Merge remote-tracking branch 'origin/trunk' into update_zig_09

This commit is contained in:
Folkert 2022-04-06 14:48:04 +02:00
commit a69bf971f0
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
583 changed files with 35930 additions and 13981 deletions

View file

@ -2,3 +2,9 @@
test-gen-llvm = "test -p test_gen"
test-gen-dev = "test -p roc_gen_dev -p test_gen --no-default-features --features gen-dev"
test-gen-wasm = "test -p roc_gen_wasm -p test_gen --no-default-features --features gen-wasm"
[target.wasm32-unknown-unknown]
# Rust compiler flags for minimum-sized .wasm binary in the web REPL
# opt-level=s Optimizations should focus more on size than speed
# lto=fat Spend extra effort on link-time optimization across crates
rustflags = ["-Copt-level=s", "-Clto=fat"]

View file

@ -64,3 +64,11 @@ Jan Van Bruggen <JanCVanB@users.noreply.github.com>
Mats Sigge <<mats.sigge@gmail.com>>
Drew Lazzeri <dlazzeri1@gmail.com>
Tom Dohrmann <erbse.13@gmx.de>
Elijah Schow <elijah.schow@gmail.com>
Derek Gustafson <degustaf@gmail.com>
Philippe Vinchon <p.vinchon@gmail.com>
Pierre-Henri Trivier <phtrivier@yahoo.fr>
Elliot Waite <1767836+elliotwaite@users.noreply.github.com>
zimt28 <1764689+zimt28@users.noreply.github.com>
Ananda Umamil <zweimach@zweimach.org>
SylvanSign <jake.d.bray@gmail.com>

View file

@ -1,7 +1,97 @@
# Building the Roc compiler from source
## Using Nix
## Installing LLVM, Zig, valgrind, and Python
### On NixOS
[For NixOS only Linux x86_64 is supported for now](https://github.com/rtfeldman/roc/issues/2734).
NixOS users should make use of the nix flake by [enabling nix flakes](https://nixos.wiki/wiki/Flakes). Shell creation can be done by executing `nix develop` from the root of the repo. NixOS users that do not make use of this flake will get stuck on issue #1846.
### On Linux/MacOS x86_64/aarch64
#### Install
Using [nix](https://nixos.org/download.html) is a quick way to get an environment bootstrapped with a single command.
Anyone having trouble installing the proper version of LLVM themselves might also prefer this method.
If you are running ArchLinux or a derivative like Manjaro, you'll need to run `sudo sysctl -w kernel.unprivileged_userns_clone=1` before installing nix.
Install nix:
`curl -L https://nixos.org/nix/install | sh`
You will need to start a fresh terminal session to use nix.
#### Usage
Now with nix installed, you just need to run one command:
`nix-shell`
> This may not output anything for a little while. This is normal, hang in there. Also make sure you are in the roc project root.
> Also, if you're on NixOS you'll need to enable opengl at the system-wide level. You can do this in configuration.nix with `hardware.opengl.enable = true;`. If you don't do this, nix-shell will fail!
You should be in a shell with everything needed to build already installed.
Use `cargo run help` to see all subcommands.
To use the `repl` subcommand, execute `cargo run repl`.
Use `cargo build` to build the whole project.
#### Extra tips
If you plan on using `nix-shell` regularly, check out [direnv](https://direnv.net/) and [lorri](https://github.com/nix-community/lorri). Whenever you `cd` into `roc/`, they will automatically load the Nix dependencies into your current shell, so you never have to run nix-shell directly!
### Editor
The editor is a :construction:WIP:construction: and not ready yet to replace your favorite editor, although if you want to try it out on nix, read on.
`cargo run edit` should work from NixOS, if you use a nix-shell from inside another OS, follow the instructions below.
#### Nvidia GPU
Outside of a nix shell, execute the following:
```
nix-channel --add https://github.com/guibou/nixGL/archive/main.tar.gz nixgl && nix-channel --update
nix-env -iA nixgl.auto.nixVulkanNvidia
```
Running the editor does not work with `nix-shell --pure`.
```
nix-shell
```
460.91.03 may be different for you, type nixVulkanNvidia and press tab to autocomplete for your version.
```
nixVulkanNvidia-460.91.03 cargo run edit
```
#### Integrated Intel Graphics
:exclamation: ** Our Nix setup currently cannot run the editor with integrated intel graphics, see #1856 ** :exclamation:
Outside of a nix shell, run:
```bash
git clone https://github.com/guibou/nixGL
cd nixGL
nix-env -f ./ -iA nixVulkanIntel
```
cd to the roc repo, and run (without --pure):
```
nix-shell
nixVulkanIntel cargo run edit
```
#### Other configs
Check the [nixGL repo](https://github.com/guibou/nixGL) for other graphics configurations.
## Troubleshooting
Create an issue if you run into problems not listed here.
That will help us improve this document for everyone who reads it in the future!
## Manual Install
To build the compiler, you need these installed:
@ -85,89 +175,6 @@ Use `cargo build` to build the whole project.
Use `cargo run help` to see all subcommands.
To use the `repl` subcommand, execute `cargo run repl`.
## Using Nix
### Install
Using [nix](https://nixos.org/download.html) is a quick way to get an environment bootstrapped with a single command.
Anyone having trouble installing the proper version of LLVM themselves might also prefer this method.
If you are running ArchLinux or a derivative like Manjaro, you'll need to run `sudo sysctl -w kernel.unprivileged_userns_clone=1` before installing nix.
Install nix:
`curl -L https://nixos.org/nix/install | sh`
You will need to start a fresh terminal session to use nix.
### Usage
Now with nix installed, you just need to run one command:
`nix-shell`
> This may not output anything for a little while. This is normal, hang in there. Also make sure you are in the roc project root.
> Also, if you're on NixOS you'll need to enable opengl at the system-wide level. You can do this in configuration.nix with `hardware.opengl.enable = true;`. If you don't do this, nix-shell will fail!
You should be in a shell with everything needed to build already installed.
Use `cargo run help` to see all subcommands.
To use the `repl` subcommand, execute `cargo run repl`.
Use `cargo build` to build the whole project.
### Extra tips
If you plan on using `nix-shell` regularly, check out [direnv](https://direnv.net/) and [lorri](https://github.com/nix-community/lorri). Whenever you `cd` into `roc/`, they will automatically load the Nix dependencies into your current shell, so you never have to run nix-shell directly!
### Editor
The editor is a WIP and not ready yet to replace your favorite editor, although if you want to try it out on nix, read on.
`cargo run edit` should work from NixOS, if you use a nix-shell from inside another OS, follow the instructions below.
#### Nvidia GPU
Outside of a nix shell, execute the following:
```
nix-channel --add https://github.com/guibou/nixGL/archive/main.tar.gz nixgl && nix-channel --update
nix-env -iA nixgl.auto.nixVulkanNvidia
```
Running the editor does not work with `nix-shell --pure`.
```
nix-shell
```
460.91.03 may be different for you, type nixVulkanNvidia and press tab to autocomplete for your version.
```
nixVulkanNvidia-460.91.03 cargo run edit
```
#### Integrated Intel Graphics
:exclamation: ** Our Nix setup currently cannot run the editor with integrated intel graphics, see #1856 ** :exclamation:
Outside of a nix shell, run:
```bash
git clone https://github.com/guibou/nixGL
cd nixGL
nix-env -f ./ -iA nixVulkanIntel
```
cd to the roc repo, and run (without --pure):
```
nix-shell
nixVulkanIntel cargo run edit
```
#### Other configs
Check the [nixGL repo](https://github.com/guibou/nixGL) for other graphics configurations.
## Troubleshooting
Create an issue if you run into problems not listed here.
That will help us improve this document for everyone who reads it in the future!
### LLVM installation on Linux
For a current list of all dependency versions and their names in apt, see the Earthfile.
@ -197,20 +204,24 @@ export CPPFLAGS="-I/usr/local/opt/llvm/include"
### LLVM installation on Windows
Installing LLVM's prebuilt binaries doesn't seem to be enough for the `llvm-sys` crate that Roc depends on, so I had to build LLVM from source
on Windows. After lots of help from [**@IanMacKenzie**](https://github.com/IanMacKenzie) (thank you, Ian!), here's what worked for me:
**Warning** While `cargo build` works on windows, linking roc programs does not yet, see issue #2608. This also means the repl, the editor and many tests will not work on windows.
Installing LLVM's prebuilt binaries doesn't seem to be enough for the `llvm-sys` crate that Roc depends on, so I had to follow the steps below:
1. I downloaded and installed [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools&rel=16) (a full Visual Studio install should work tool; the Build Tools are just the CLI tools, which is all I wanted)
1. In the installation configuration, under "additional components" I had to check both "C++ ATL for latest v142 build tools (x86 & x64)" and also "C++/CLI support for v142 build tools" [note: as of September 2021 this should no longer be necessary - the next time anyone tries this, please try it without this step and make a PR to delete this step if it's no longer needed!]
1. I launched the "x64 Native Tools Command Prompt for Visual Studio 2019" application (note: not the similarly-named "x86" one!)
1. Make sure [Python 2.7](https://www.python.org/) and [CMake 3.17](http://cmake.org/) are installed on your system.
1. I followed most of the steps under LLVM's [building from source instructions](https://github.com/llvm/llvm-project#getting-the-source-code-and-building-llvm) up to the `cmake -G ...` command, which didn't work for me. Instead, at that point I did the following step.
1. I ran `cmake -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ../llvm` to generate a NMake makefile.
1. Once that completed, I ran `nmake` to build LLVM. (This took about 2 hours on my laptop.)
1. Finally, I set an environment variable `LLVM_SYS_100_PREFIX` to point to the `build` directory where I ran the `cmake` command.
1. I downloaded and installed [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools&rel=16) (a full Visual Studio install should work too; the Build Tools are just the CLI tools, which is all I wanted)
1. Download the custom LLVM 7z archive [here](https://github.com/PLC-lang/llvm-package-windows/releases/tag/v12.0.1).
1. [Download 7-zip](https://www.7-zip.org/) to be able to extract this archive.
1. Extract the 7z file to where you want to permanently keep the folder.
1. In powershell, set the `LLVM_SYS_120_PREFIX` environment variable (check [here](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-7.2#saving-changes-to-environment-variables) to make this a permanent environment variable):
```
[Environment]::SetEnvironmentVariable(
"Path",
[Environment]::GetEnvironmentVariable("Path", "User") + ";C:\Users\anton\Downloads\LLVM-12.0.1-win64\bin",
"User"
)
```
Once all that was done, `cargo` ran successfully for Roc!
Once all that was done, `cargo build` ran successfully for Roc!
### Build speed on WSL/WSL2

View file

@ -10,7 +10,15 @@ Check [Build from source](BUILDING_FROM_SOURCE.md) for instructions.
## Running Tests
To run all tests and checks as they are run on CI, [install earthly](https://earthly.dev/get-earthly) and run:
Most contributors execute the following commands befor pushing their code:
```
cargo test
cargo fmt --all -- --check
cargo clippy -- -D warnings
```
Execute `cargo fmt --all` to fix the formatting.
If you want to run all tests and checks as they are run on CI, [install earthly](https://earthly.dev/get-earthly) and run:
```
earthly +test-all
```

302
Cargo.lock generated
View file

@ -564,6 +564,16 @@ dependencies = [
"serde_yaml",
]
[[package]]
name = "console_error_panic_hook"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen",
]
[[package]]
name = "const_format"
version = "0.2.22"
@ -723,7 +733,7 @@ dependencies = [
"ndk-glue 0.3.0",
"nix 0.20.0",
"oboe",
"parking_lot",
"parking_lot 0.11.2",
"stdweb",
"thiserror",
"web-sys",
@ -1193,6 +1203,38 @@ version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0"
[[package]]
name = "dunce"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453440c271cf5577fd2a40e4942540cb7d0d2f85e27c8d07dd0023c925a67541"
[[package]]
name = "dynasm"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47b1801e630bd336d0bbbdbf814de6cc749c9a400c7e3d995e6adfd455d0c83c"
dependencies = [
"bitflags",
"byteorder",
"lazy_static",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "dynasmrt"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d428afc93ad288f6dffc1fa5f4a78201ad2eec33c5a522e51c181009eb09061"
dependencies = [
"byteorder",
"dynasm",
"memmap2 0.5.3",
]
[[package]]
name = "either"
version = "1.6.1"
@ -1294,7 +1336,7 @@ checksum = "a16910e685088843d53132b04e0f10a571fdb193224fc589685b3ba1ce4cb03d"
dependencies = [
"cfg-if 1.0.0",
"libc",
"windows-sys",
"windows-sys 0.28.0",
]
[[package]]
@ -1677,7 +1719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46e977036f7f5139d580c7f19ad62df9cb8ebd8410bb569e73585226be80a86f"
dependencies = [
"lazy_static",
"static_assertions",
"static_assertions 1.1.0",
]
[[package]]
@ -1738,26 +1780,26 @@ dependencies = [
name = "inkwell"
version = "0.1.0"
dependencies = [
"inkwell 0.1.0 (git+https://github.com/rtfeldman/inkwell?tag=llvm13-0.release1)",
"inkwell 0.1.0 (git+https://github.com/rtfeldman/inkwell?branch=master)",
]
[[package]]
name = "inkwell"
version = "0.1.0"
source = "git+https://github.com/rtfeldman/inkwell?tag=llvm13-0.release1#e15d665227b2acad4ca949820d80048e09f3f4e5"
source = "git+https://github.com/rtfeldman/inkwell?branch=master#accd406858a40ca2a1463ff77d79f3c5e4c96f4e"
dependencies = [
"either",
"inkwell_internals",
"libc",
"llvm-sys",
"once_cell",
"parking_lot",
"parking_lot 0.12.0",
]
[[package]]
name = "inkwell_internals"
version = "0.5.0"
source = "git+https://github.com/rtfeldman/inkwell?tag=llvm13-0.release1#e15d665227b2acad4ca949820d80048e09f3f4e5"
source = "git+https://github.com/rtfeldman/inkwell?branch=master#accd406858a40ca2a1463ff77d79f3c5e4c96f4e"
dependencies = [
"proc-macro2",
"quote",
@ -1954,9 +1996,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3"
[[package]]
name = "llvm-sys"
version = "130.0.1"
version = "130.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "183612ff1acd400cd4faeb1cbf7cc725a868a46282e5c7b112ec5f0a5a49fae7"
checksum = "95eb03b4f7ae21f48ef7c565a3e3aa22c50616aea64645fb1fd7f6f56b51c274"
dependencies = [
"cc",
"lazy_static",
@ -1967,9 +2009,9 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109"
checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b"
dependencies = [
"scopeguard",
]
@ -2063,9 +2105,9 @@ dependencies = [
[[package]]
name = "memmap2"
version = "0.5.0"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e"
checksum = "057a3db23999c867821a7a59feb06a578fcb03685e983dff90daf9e7d24ac08f"
dependencies = [
"libc",
]
@ -2661,7 +2703,17 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
"parking_lot_core 0.8.5",
]
[[package]]
name = "parking_lot"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58"
dependencies = [
"lock_api",
"parking_lot_core 0.9.1",
]
[[package]]
@ -2678,12 +2730,52 @@ dependencies = [
"winapi",
]
[[package]]
name = "parking_lot_core"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954"
dependencies = [
"cfg-if 1.0.0",
"libc",
"redox_syscall",
"smallvec",
"windows-sys 0.32.0",
]
[[package]]
name = "peeking_take_while"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
[[package]]
name = "peg"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af728fe826811af3b38c37e93de6d104485953ea373d656eebae53d6987fcd2c"
dependencies = [
"peg-macros",
"peg-runtime",
]
[[package]]
name = "peg-macros"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4536be147b770b824895cbad934fccce8e49f14b4c4946eaa46a6e4a12fcdc16"
dependencies = [
"peg-runtime",
"proc-macro2",
"quote",
]
[[package]]
name = "peg-runtime"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9b0efd3ba03c3a409d44d60425f279ec442bcf0b9e63ff4e410da31c8b0f69f"
[[package]]
name = "percent-encoding"
version = "2.1.0"
@ -3190,10 +3282,13 @@ name = "repl_test"
version = "0.1.0"
dependencies = [
"indoc",
"lazy_static",
"roc_cli",
"roc_repl_cli",
"roc_test_utils",
"strip-ansi-escapes",
"wasmer",
"wasmer-wasi",
]
[[package]]
@ -3228,6 +3323,16 @@ dependencies = [
"libc",
]
[[package]]
name = "roc_alias_analysis"
version = "0.1.0"
dependencies = [
"morphic_lib",
"roc_collections",
"roc_module",
"roc_mono",
]
[[package]]
name = "roc_ast"
version = "0.1.0"
@ -3251,6 +3356,7 @@ dependencies = [
"roc_unify",
"snafu",
"ven_graph",
"winapi",
]
[[package]]
@ -3288,6 +3394,8 @@ dependencies = [
name = "roc_builtins"
version = "0.1.0"
dependencies = [
"dunce",
"lazy_static",
"roc_collections",
"roc_module",
"roc_region",
@ -3304,11 +3412,13 @@ dependencies = [
"pretty_assertions",
"roc_builtins",
"roc_collections",
"roc_error_macros",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_types",
"static_assertions 1.1.0",
"ven_graph",
]
@ -3378,9 +3488,11 @@ dependencies = [
name = "roc_constrain"
version = "0.1.0"
dependencies = [
"arrayvec 0.7.2",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_error_macros",
"roc_module",
"roc_parse",
"roc_region",
@ -3393,6 +3505,7 @@ version = "0.1.0"
dependencies = [
"bumpalo",
"indoc",
"peg",
"pretty_assertions",
"pulldown-cmark",
"roc_ast",
@ -3400,6 +3513,7 @@ dependencies = [
"roc_can",
"roc_code_markup",
"roc_collections",
"roc_highlight",
"roc_load",
"roc_module",
"roc_parse",
@ -3464,6 +3578,16 @@ dependencies = [
name = "roc_error_macros"
version = "0.1.0"
[[package]]
name = "roc_exhaustive"
version = "0.1.0"
dependencies = [
"roc_collections",
"roc_module",
"roc_region",
"roc_std",
]
[[package]]
name = "roc_fmt"
version = "0.1.0"
@ -3510,6 +3634,7 @@ dependencies = [
"bumpalo",
"inkwell 0.1.0",
"morphic_lib",
"roc_alias_analysis",
"roc_builtins",
"roc_collections",
"roc_error_macros",
@ -3534,9 +3659,20 @@ dependencies = [
"roc_target",
]
[[package]]
name = "roc_highlight"
version = "0.1.0"
dependencies = [
"peg",
"roc_code_markup",
]
[[package]]
name = "roc_ident"
version = "0.1.0"
dependencies = [
"arrayvec 0.7.2",
]
[[package]]
name = "roc_linker"
@ -3546,7 +3682,7 @@ dependencies = [
"bumpalo",
"clap 3.0.0-beta.5",
"iced-x86",
"memmap2 0.5.0",
"memmap2 0.5.3",
"object 0.26.2",
"roc_build",
"roc_collections",
@ -3559,6 +3695,20 @@ dependencies = [
[[package]]
name = "roc_load"
version = "0.1.0"
dependencies = [
"bumpalo",
"roc_builtins",
"roc_collections",
"roc_constrain",
"roc_load_internal",
"roc_module",
"roc_target",
"roc_types",
]
[[package]]
name = "roc_load_internal"
version = "0.1.0"
dependencies = [
"bumpalo",
"crossbeam",
@ -3566,12 +3716,13 @@ dependencies = [
"maplit",
"morphic_lib",
"num_cpus",
"parking_lot",
"parking_lot 0.12.0",
"pretty_assertions",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_error_macros",
"roc_module",
"roc_mono",
"roc_parse",
@ -3582,6 +3733,7 @@ dependencies = [
"roc_target",
"roc_types",
"roc_unify",
"strip-ansi-escapes",
"tempfile",
"ven_pretty",
]
@ -3590,6 +3742,7 @@ dependencies = [
name = "roc_module"
version = "0.1.0"
dependencies = [
"arrayvec 0.7.2",
"bumpalo",
"lazy_static",
"roc_collections",
@ -3597,7 +3750,7 @@ dependencies = [
"roc_ident",
"roc_region",
"snafu",
"static_assertions",
"static_assertions 1.1.0",
]
[[package]]
@ -3611,6 +3764,7 @@ dependencies = [
"roc_can",
"roc_collections",
"roc_error_macros",
"roc_exhaustive",
"roc_module",
"roc_problem",
"roc_region",
@ -3619,7 +3773,7 @@ dependencies = [
"roc_target",
"roc_types",
"roc_unify",
"static_assertions",
"static_assertions 1.1.0",
"ven_graph",
"ven_pretty",
]
@ -3649,13 +3803,14 @@ dependencies = [
"roc_module",
"roc_parse",
"roc_region",
"roc_types",
]
[[package]]
name = "roc_region"
version = "0.1.0"
dependencies = [
"static_assertions",
"static_assertions 1.1.0",
]
[[package]]
@ -3674,6 +3829,8 @@ dependencies = [
"roc_mono",
"roc_parse",
"roc_repl_eval",
"roc_reporting",
"roc_std",
"roc_target",
"roc_types",
"rustyline",
@ -3696,6 +3853,7 @@ dependencies = [
"roc_parse",
"roc_region",
"roc_reporting",
"roc_std",
"roc_target",
"roc_types",
]
@ -3705,6 +3863,8 @@ name = "roc_repl_wasm"
version = "0.1.0"
dependencies = [
"bumpalo",
"console_error_panic_hook",
"futures",
"js-sys",
"roc_builtins",
"roc_collections",
@ -3712,6 +3872,7 @@ dependencies = [
"roc_load",
"roc_parse",
"roc_repl_eval",
"roc_reporting",
"roc_target",
"roc_types",
"wasm-bindgen",
@ -3730,6 +3891,7 @@ dependencies = [
"roc_can",
"roc_collections",
"roc_constrain",
"roc_exhaustive",
"roc_module",
"roc_mono",
"roc_parse",
@ -3769,10 +3931,7 @@ dependencies = [
name = "roc_std"
version = "0.1.0"
dependencies = [
"indoc",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"static_assertions 0.1.1",
]
[[package]]
@ -3798,7 +3957,7 @@ dependencies = [
"roc_error_macros",
"roc_module",
"roc_region",
"static_assertions",
"static_assertions 1.1.0",
"ven_ena",
]
@ -3872,7 +4031,7 @@ checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088"
[[package]]
name = "rustyline"
version = "9.1.1"
source = "git+https://github.com/rtfeldman/rustyline?tag=v9.1.1#7053ae0fe0ee710d38ed5845dd979113382994dc"
source = "git+https://github.com/rtfeldman/rustyline?rev=e74333c#e74333c0d618896b88175bf06645108f996fe6d0"
dependencies = [
"bitflags",
"cfg-if 1.0.0",
@ -3895,7 +4054,7 @@ dependencies = [
[[package]]
name = "rustyline-derive"
version = "0.6.0"
source = "git+https://github.com/rtfeldman/rustyline?tag=v9.1.1#7053ae0fe0ee710d38ed5845dd979113382994dc"
source = "git+https://github.com/rtfeldman/rustyline?rev=e74333c#e74333c0d618896b88175bf06645108f996fe6d0"
dependencies = [
"quote",
"syn",
@ -4039,7 +4198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0bccbcf40c8938196944a3da0e133e031a33f4d6b72db3bda3cc556e361905d"
dependencies = [
"lazy_static",
"parking_lot",
"parking_lot 0.11.2",
"serial_test_derive",
]
@ -4218,6 +4377,12 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "static_assertions"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f406d6ee68db6796e11ffd7b4d171864c58b7451e79ef9460ea33c287a1f89a7"
[[package]]
name = "static_assertions"
version = "1.1.0"
@ -4506,7 +4671,7 @@ checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e"
dependencies = [
"cfg-if 1.0.0",
"rand",
"static_assertions",
"static_assertions 1.1.0",
]
[[package]]
@ -4746,6 +4911,7 @@ dependencies = [
"thiserror",
"wasmer-compiler",
"wasmer-compiler-cranelift",
"wasmer-compiler-singlepass",
"wasmer-derive",
"wasmer-engine",
"wasmer-engine-dylib",
@ -4794,6 +4960,25 @@ dependencies = [
"wasmer-vm",
]
[[package]]
name = "wasmer-compiler-singlepass"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9429b9f7708c582d855b1787f09c7029ff23fb692550d4a1cc351c8ea84c3014"
dependencies = [
"byteorder",
"dynasm",
"dynasmrt",
"lazy_static",
"loupe",
"more-asserts",
"rayon",
"smallvec",
"wasmer-compiler",
"wasmer-types",
"wasmer-vm",
]
[[package]]
name = "wasmer-derive"
version = "2.0.0"
@ -5116,7 +5301,7 @@ dependencies = [
"arrayvec 0.7.2",
"js-sys",
"log",
"parking_lot",
"parking_lot 0.11.2",
"raw-window-handle",
"smallvec",
"wasm-bindgen",
@ -5140,7 +5325,7 @@ dependencies = [
"fxhash",
"log",
"naga",
"parking_lot",
"parking_lot 0.11.2",
"profiling",
"raw-window-handle",
"smallvec",
@ -5175,7 +5360,7 @@ dependencies = [
"metal",
"naga",
"objc",
"parking_lot",
"parking_lot 0.11.2",
"profiling",
"range-alloc",
"raw-window-handle",
@ -5256,11 +5441,24 @@ version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82ca39602d5cbfa692c4b67e3bcbb2751477355141c1ed434c94da4186836ff6"
dependencies = [
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_msvc",
"windows_aarch64_msvc 0.28.0",
"windows_i686_gnu 0.28.0",
"windows_i686_msvc 0.28.0",
"windows_x86_64_gnu 0.28.0",
"windows_x86_64_msvc 0.28.0",
]
[[package]]
name = "windows-sys"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6"
dependencies = [
"windows_aarch64_msvc 0.32.0",
"windows_i686_gnu 0.32.0",
"windows_i686_msvc 0.32.0",
"windows_x86_64_gnu 0.32.0",
"windows_x86_64_msvc 0.32.0",
]
[[package]]
@ -5269,30 +5467,60 @@ version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52695a41e536859d5308cc613b4a022261a274390b25bd29dfff4bf08505f3c2"
[[package]]
name = "windows_aarch64_msvc"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5"
[[package]]
name = "windows_i686_gnu"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f54725ac23affef038fecb177de6c9bf065787c2f432f79e3c373da92f3e1d8a"
[[package]]
name = "windows_i686_gnu"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615"
[[package]]
name = "windows_i686_msvc"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d5158a43cc43623c0729d1ad6647e62fa384a3d135fd15108d37c683461f64"
[[package]]
name = "windows_i686_msvc"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172"
[[package]]
name = "windows_x86_64_gnu"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc31f409f565611535130cfe7ee8e6655d3fa99c1c61013981e491921b5ce954"
[[package]]
name = "windows_x86_64_gnu"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f2b8c7cbd3bfdddd9ab98769f9746a7fad1bca236554cd032b78d768bc0e89f"
[[package]]
name = "windows_x86_64_msvc"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316"
[[package]]
name = "winit"
version = "0.25.0"
@ -5315,7 +5543,7 @@ dependencies = [
"ndk-glue 0.3.0",
"ndk-sys",
"objc",
"parking_lot",
"parking_lot 0.11.2",
"percent-encoding",
"raw-window-handle",
"scopeguard",

View file

@ -3,6 +3,7 @@ members = [
"compiler/ident",
"compiler/region",
"compiler/collections",
"compiler/exhaustive",
"compiler/module",
"compiler/parse",
"compiler/can",
@ -14,8 +15,10 @@ members = [
"compiler/solve",
"compiler/fmt",
"compiler/mono",
"compiler/alias_analysis",
"compiler/test_mono",
"compiler/load",
"compiler/load_internal",
"compiler/gen_llvm",
"compiler/gen_dev",
"compiler/gen_wasm",
@ -31,13 +34,13 @@ members = [
"ast",
"cli",
"code_markup",
"highlight",
"error_macros",
"reporting",
"repl_cli",
"repl_eval",
"repl_test",
"repl_wasm",
"roc_std",
"test_utils",
"utils",
"docs",
@ -49,6 +52,8 @@ exclude = [
# The tests will still correctly build them.
"cli_utils",
"compiler/test_mono_macros",
# `cargo build` would cause roc_std to be built with default features which errors on windows
"roc_std",
]
# Needed to be able to run `cargo run -p roc_cli --no-default-features` -
# see www/build.sh for more.

View file

@ -1,4 +1,4 @@
FROM rust:1.57.0-slim-bullseye # make sure to update nixpkgs-unstable in sources.json too so that it uses the same rust version > search for cargo on unstable here: https://search.nixos.org/packages
FROM rust:1.58.0-slim-bullseye # make sure to update rust-toolchain.toml and nixpkgs-unstable in sources.json too so that it uses the same rust version > search for cargo on unstable here: https://search.nixos.org/packages
WORKDIR /earthbuild
prep-debian:
@ -13,10 +13,21 @@ install-other-libs:
install-zig-llvm-valgrind-clippy-rustfmt:
FROM +install-other-libs
# editor
RUN apt -y install libxkbcommon-dev
# zig
<<<<<<< HEAD
RUN wget -c https://ziglang.org/download/0.9.1/zig-linux-x86_64-0.9.1.tar.xz --no-check-certificate
RUN tar -xf zig-linux-x86_64-0.9.1.tar.xz
RUN ln -s /earthbuild/zig-linux-x86_64-0.9.1/zig /usr/bin/zig
=======
RUN wget -c https://ziglang.org/download/0.8.0/zig-linux-x86_64-0.8.0.tar.xz --no-check-certificate
RUN tar -xf zig-linux-x86_64-0.8.0.tar.xz
RUN ln -s /earthbuild/zig-linux-x86_64-0.8.0/zig /usr/bin/zig
# zig builtins wasm tests
RUN apt -y install build-essential
RUN cargo install wasmer-cli --features "singlepass"
>>>>>>> origin/trunk
# llvm
RUN apt -y install lsb-release software-properties-common gnupg
RUN wget https://apt.llvm.org/llvm.sh
@ -33,12 +44,13 @@ install-zig-llvm-valgrind-clippy-rustfmt:
RUN rustup component add clippy
# rustfmt
RUN rustup component add rustfmt
# wasm repl & tests
RUN rustup target add wasm32-unknown-unknown wasm32-wasi
RUN apt -y install libssl-dev
RUN OPENSSL_NO_VENDOR=1 cargo install wasm-pack
# criterion
RUN cargo install cargo-criterion
# editor
RUN apt -y install libxkbcommon-dev
# sccache
RUN apt -y install libssl-dev
RUN cargo install sccache
RUN sccache -V
ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache
@ -47,12 +59,12 @@ install-zig-llvm-valgrind-clippy-rustfmt:
copy-dirs:
FROM +install-zig-llvm-valgrind-clippy-rustfmt
COPY --dir cli cli_utils compiler docs editor ast code_markup error_macros utils test_utils reporting repl_cli repl_eval repl_test repl_wasm roc_std vendor examples linker Cargo.toml Cargo.lock version.txt ./
COPY --dir cli cli_utils compiler docs editor ast code_markup error_macros highlight utils test_utils reporting repl_cli repl_eval repl_test repl_wasm roc_std vendor examples linker Cargo.toml Cargo.lock version.txt ./
test-zig:
FROM +install-zig-llvm-valgrind-clippy-rustfmt
COPY --dir compiler/builtins/bitcode ./
RUN cd bitcode && ./run-tests.sh
RUN cd bitcode && ./run-tests.sh && ./run-wasm-tests.sh
check-clippy:
FROM +copy-dirs
@ -67,7 +79,7 @@ check-rustfmt:
check-typos:
RUN cargo install typos-cli --version 1.0.11 # version set to prevent confusion if the version is updated automatically
COPY --dir .github ci cli cli_utils compiler docs editor examples ast code_markup utils linker nightly_benches packages roc_std www *.md LEGAL_DETAILS shell.nix version.txt ./
COPY --dir .github ci cli cli_utils compiler docs editor examples ast code_markup highlight utils linker nightly_benches packages roc_std www *.md LEGAL_DETAILS shell.nix version.txt ./
RUN typos
test-rust:
@ -86,10 +98,14 @@ test-rust:
# gen-wasm has some multithreading problems to do with the wasmer runtime. Run it single-threaded as a separate job
RUN --mount=type=cache,target=$SCCACHE_DIR \
cargo test --locked --release --package test_gen --no-default-features --features gen-wasm -- --test-threads=1 && sccache --show-stats
# run i386 (32-bit linux) cli tests
RUN echo "4" | cargo run --locked --release --features="target-x86" -- --backend=x86_32 examples/benchmarks/NQueens.roc
# repl_test: build the compiler for wasm target, then run the tests on native target
RUN --mount=type=cache,target=$SCCACHE_DIR \
cargo test --locked --release --features with_sound --test cli_run i386 --features="i386-cli-run" && sccache --show-stats
repl_test/test_wasm.sh && sccache --show-stats
# run i386 (32-bit linux) cli tests
# NOTE: disabled until zig 0.9
# RUN echo "4" | cargo run --locked --release --features="target-x86" -- --target=x86_32 examples/benchmarks/NQueens.roc
# RUN --mount=type=cache,target=$SCCACHE_DIR \
# cargo test --locked --release --features with_sound --test cli_run i386 --features="i386-cli-run" && sccache --show-stats
verify-no-git-changes:
FROM +test-rust
@ -118,7 +134,7 @@ build-nightly-release:
RUN printf " on: " >> version.txt
RUN date >> version.txt
RUN RUSTFLAGS="-C target-cpu=x86-64" cargo build --features with_sound --release
RUN cd ./target/release && tar -czvf roc_linux_x86_64.tar.gz ./roc ../../LICENSE ../../LEGAL_DETAILS ../../examples/hello-world ../../examples/hello-rust ../../examples/hello-zig ../../compiler/builtins/bitcode/src/ ../../roc_std
RUN cd ./target/release && tar -czvf roc_linux_x86_64.tar.gz ./roc ../../LICENSE ../../LEGAL_DETAILS ../../examples/hello-world ../../compiler/builtins/bitcode/src/ ../../roc_std
SAVE ARTIFACT ./target/release/roc_linux_x86_64.tar.gz AS LOCAL roc_linux_x86_64.tar.gz
# compile everything needed for benchmarks and output a self-contained dir from which benchmarks can be run.

401
FAQ.md Normal file
View file

@ -0,0 +1,401 @@
# Frequently Asked Questions
## Is there syntax highlighting for Vim/Emacs/VS Code or a LSP?
Not currently. Although they will presumably exist someday, while Roc is in the early days there's actually a conscious
effort to focus on the Roc Editor *instead of* adding Roc support to other editors - specifically in order to give the Roc
Editor the best possible chance at kickstarting a virtuous cycle of plugin authorship.
This is an unusual approach, but there are more details in [this 2021 interview](https://youtu.be/ITrDd6-PbvY?t=212).
In the meantime, using CoffeeScript syntax highlighting for .roc files turns out to work surprisingly well!
## Why is there no way to specify "import everything this module exposes" in `imports`?
In [Elm](https://elm-lang.org), it's possible to import a module in a way that brings everything that module
exposes into scope. It can be convenient, but like all programming language features, it has downsides.
A minor reason Roc doesn't have this feature is that exposing everything can make it more difficult
outside the editor (e.g. on a website) to tell where something comes from, especially if multiple imports are
using this. ("I don't see `blah` defined in this module, so it must be coming from an import...but which of
these several import-exposing-everything modules could it be? I'll have to check all of them, or
download this code base and open it up in the editor so I can jump to definition!")
The main reason for this design, though, is compiler performance.
Currently, the name resolution step in compilation can be parallelized across modules, because it's possible to
tell if there's a naming error within a module using only the contents of that module. If "expose everything" is
allowed, then it's no longer clear whether anything is a naming error or not, until all the "expose everything"
modules have been processed, so we know exactly which names they expose. Because that feature doesn't exist in Roc,
all modules can do name resolution in parallel.
Of note, allowing this feature would only slow down modules that used it; modules that didn't use it would still be
parallelizable. However, when people find out ways to speed up their builds (in any language), advice starts to
circulate about how to unlock those speed boosts. If Roc had this feature, it's predictable that a commonly-accepted
piece of advice would eventually circulate: "don't use this feature because it slows down your builds."
If a feature exists in a language, but the common recommendation is never to use it, that's cause for reconsidering
whether the feature should be in the language at all. In the case of this feature, I think it's simpler if the
language doesn't have it; that way nobody has to learn (or spend time spreading the word) about the
performance-boosting advice not to use it.
## Why can't functions be compared for equality using the `==` operator?
Function equality has been proven to be undecidable in the general case because of the [halting problem](https://en.wikipedia.org/wiki/Halting_problem).
So while we as humans might be able to look at `\x -> x + 1` and `\x -> 1 + x` and know that they're equivalent,
in the general case it's not possible for a computer to do this reliably.
There are some other potential ways to define function equality, but they all have problems.
One way would be to have two functions be considered equal if their source code is equivalent. (Perhaps disregarding
comments and spaces.) This sounds reasonable, but it means that now revising a function to do
exactly the same thing as before (say, changing `\x -> x + 1` to `\x -> 1 + x`) can cause a bug in a
distant part of the code base. Defining function equality this way means that revising a function's internals
is no longer a safe, local operation - even if it gives all the same outputs for all the same inputs.
Another option would be to define it using "reference equality." This is what JavaScript does, for example.
However, Roc does not use reference equality anywhere else in the language, and it would mean that (for example)
passing `\x -> x + 1` to a function compared to defining `fn = \x -> x + 1` elsewhere and then passing `fn` into
the function might give different answers.
Both of these would make revising code riskier across the entire language, which is very undesirable.
Another option would be to define that function equality always returns `False`. So both of these would evaluate
to `False`:
* `(\x -> x + 1) == (\x -> 1 + x)`
* `(\x -> x + 1) == (\x -> x + 1)`
This makes function equality effectively useless, while still technically allowing it. It has some other downsides:
* Now if you put a function inside a record, using `==` on that record will still type-check, but it will then return `False`. This could lead to bugs if you didn't realize you had accidentally put a function in there - for example, because you were actually storing a different type (e.g. an opaque type) and didn't realize it had a function inside it.
* If you put a function (or a value containing a function) into a `Dict` or `Set`, you'll never be able to get it out again. This is a common problem with [NaN](https://en.wikipedia.org/wiki/NaN), which is also defined not to be equal to itself.
The first of these problems could be addressed by having function equality always return `True` instead of `False` (since that way it would not affect other fields' equality checks in a record), but that design has its own problems:
* Although function equality is still useless, `(\x -> x + 1) == (\x -> x)` returns `True`. Even if it didn't lead to bugs in practice, this would certainly be surprising and confusing to beginners.
* Now if you put several different functions into a `Dict` or `Set`, only one of them will be kept; the others will be discarded or overwritten. This could cause bugs if a value stored a function internally, and then other functions relied on that internal function for correctness.
Each of these designs makes Roc a language that's some combination of more error-prone, more confusing, and more
brittle to change. Disallowing function equality at compile time eliminates all of these drawbacks.
## Why doesn't Roc have a `Maybe` or `Option` or `Optional` type, or `null` or `nil` or `undefined`?
It's common for programming languages to have a [null reference](https://en.wikipedia.org/wiki/Null_pointer)
(e.g. `null` in C, `nil` in Ruby, `None` in Python, or `undefined` in JavaScript).
The inventor of the null reference refers to it as his "[billion dollar mistake](https://en.wikipedia.org/wiki/Null_pointer#History)" because it "has led to innumerable errors, vulnerabilities, and system crashes, which have probably caused a billion dollars of pain and damage in the last forty years."
For this and other reasons, many languages do not include a null reference, but instead have a standard library
data type which can be used in situations where a null reference would otherwise be used. Common names for this
null reference alternative type include `Maybe` (like in Haskell or Elm), `Option` (like in OCaml or Rust),
and `Optional` (like in Java).
By design, Roc does not have one of these. There are several reasons for this.
First, if a function returns a potential error, Roc has the convention to use `Result` with an error type that
has a single tag describing what went wrong. (For example, `List.first : List a -> Result a [ ListWasEmpty ]*`
instead of `List.first : List a -> Maybe a`.) This is not only more self-descriptive, it also composes better with
other operations that can fail; there's no need to have functions like `Result.toMaybe` or `Maybe.toResult`,
because in Roc, the convention is that operations that can fail always use `Result`.
Second, optional record fields can be handled using Roc's Optional Record Field language feature, so using a type like `Maybe` there would be less ergonomic.
To describe something that's neither an optional field nor an operation that can fail, an explicit tag union can be
more descriptive than something like `Maybe`. For example, if a record type has an `artist` field, but the artist
information may not be available, compare these three alternative ways to represent that:
* `artist : Maybe Artist`
* `artist : [ Loading, Loaded Artist ]`
* `artist : [ Unspecified, Specified Artist ]`
All three versions tell us that we might not have access to an `Artist`. However, the `Maybe` version doesn't
tell us why that might be. The `Loading`/`Loaded` version tells us we don't have one *yet*, because we're
still loading it, whereas the `Unspecified`/`Specified` version tells us we don't have one and shouldn't expect
to have one later if we wait, because it wasn't specified.
Naming aside, using explicit tag unions also makes it easier to transition to richer data models. For example,
after using `[ Loading, Loaded Artist ]` for awhile, we might realize that there's another possible state: loading
failed due to an error. If we modify this to be `[ Loading, Loaded Artist, Errored LoadingErr ]`, all
of our code for the `Loading` and `Loaded` states will still work.
In contrast, if we'd had `Maybe Artist` and were using helper functions like `Maybe.isNone` (a common argument
for using `Maybe` even when it's less self-descriptive), we'd have to rewrite all the code which used those
helper functions. As such, a subtle downside of these helper functions is that they discourage any change to
the data model that would break their call sites, even if that change would improve the data model overall.
On a historical note, `Maybe` may have been thought of as a substitute for null references—as opposed to something that emerged organically based on specific motivating use cases after `Result` already existed. That said, in languages that do not have an equivalent of Roc's tag unions, it's much less ergonomic to write something like `Result a [ ListWasEmpty ]*`, so that design would not fit those languages as well as it fits Roc.
## Why doesn't Roc have higher-kinded polymorphism or arbitrary-rank types?
_Since this is a FAQ answer, I'm going to assume familiarity with higher-kinded types and higher-rank types instead of including a primer on them._
A valuable aspect of Roc's type system is that it has decidable [principal](https://en.wikipedia.org/wiki/Principal_type)
type inference. This means that:
* At compile time, Roc can correctly infer the types for every expression in a program, even if you don't annotate any of the types.
* This inference always infers the most general type possible; you couldn't possibly add a valid type annotation that would make the type more flexible than the one that Roc would infer if you deleted the annotation.
It's been proven that any type system which supports either [higher-kinded polymorphism](https://www.cl.cam.ac.uk/~jdy22/papers/lightweight-higher-kinded-polymorphism.pdf) or [arbitrary-rank types](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/putting.pdf) cannot have decidable
principal type inference. With either of those features in the language, there will be situations where the compiler
would be unable to infer a type—and you'd have to write a type annotation. This also means there would be
situations where the editor would not be able to reliably tell you the type of part of your program, unlike today
where it can accurately tell you the type of anything, even if you have no type annotations in your entire code base.
### Arbitrary-rank types
Unlike arbitrary-rank (aka "Rank-N") types, both Rank-1 and Rank-2 type systems are compatible with principal
type inference. Roc currently uses Rank-1 types, and the benefits of Rank-N over Rank-2 don't seem worth
sacrificing principal type inference to attain, so let's focus on the trade-offs between Rank-1 and Rank-2.
Supporting Rank-2 types in Roc has been discussed before, but it has several important downsides:
* It would increase the complexity of the language.
* It would make some compiler error messages more confusing (e.g. they might mention `forall` because that was the most general type that could be inferred, even if that wasn't helpful or related to the actual problem).
* It would substantially increase the complexity of the type checker, which would necessarily slow it down.
No implementation of Rank-2 types can remove any of these downsides. Thus far, we've been able to come up
with sufficiently nice APIs that only require Rank-1 types, and we haven't seen a really compelling use case
where the gap between the Rank-2 and Rank-1 designs was big enough to justify switching to Rank-2.
Since I prefer Roc being simpler and having a faster compiler with nicer error messages, my hope is that Roc
will never get Rank-2 types. However, it may turn out that in the future we learn about currently-unknown
upsides that somehow outweigh these downsides, so I'm open to considering the possibility - while rooting against it.
### Higher-kinded polymorphism
I want to be really clear about this one: the explicit plan is that Roc will never support higher-kinded polymorphism.
On the technical side, the reasons for this are ordinary: I understand the practical benefits and
drawbacks of HKP, and I think the drawbacks outweigh the benefits when it comes to Roc. (Those who come to a
different conclusion may think HKP's drawbacks would be less of a big a deal in Roc than I do. That's reasonable;
we programmers often weigh the same trade-offs differently.) To be clear, I think this in the specific context of
Roc; there are plenty of other languages where HKP seems like a great fit. For example, it's hard to imagine Haskell
without it. Similarly, I think lifetime annotations are a great fit for Rust, but don't think they'd be right
for Roc either.
I also think it's important to consider the cultural implications of deciding whether or not to support HKP.
To illustrate what I mean, imagine this conversation:
**Programmer 1:** "How do you feel about higher-kinded polymorphism?"
**Programmer 2:** "I have no idea what that is."
**Programmer 1:** "Okay, how do you feel about monads?"
**Programmer 2:** "OH NO."
I've had several variations of this conversation: I'm talking about higher-kinded types,
another programmer asks what that means, I give monads as an example, and their reaction is strongly negative.
I've also had plenty of conversations with programmers who love HKP and vigorously advocate for its addition
to languages they use which don't have it. Feelings about HKP seem strongly divided, maybe more so
than any other type system feature besides static and dynamic types.
It's impossible for a programming language to be neutral on this. If the language doesn't support HKP, nobody can
implement a Monad typeclass (or equivalent) in any way that can be expected to catch on. Advocacy to add HKP to the
language will inevitably follow. If the language does support HKP, one or more alternate standard libraries built
around monads will inevitably follow, along with corresponding cultural changes. (See Scala for example.)
Culturally, to support HKP is to take a side, and to decline to support it is also to take a side.
Given this, language designers have three options:
* Have HKP and have Monad in the standard library. Embrace them and build a culture and ecosystem around them.
* Have HKP and don't have Monad in the standard library. An alternate standard lbirary built around monads will inevitably emerge, and both the community and ecosystem will divide themselves along pro-monad and anti-monad lines.
* Don't have HKP; build a culture and ecosystem around other things.
Considering that these are the only three options, I think the best choice for Roc—not only on a technical
level, but on a cultural level as well—is to make it clear that the plan is for Roc never to support HKP.
I hope this clarity can save a lot of community members' time that would otherwise be spent on advocacy or
arguing between the two sides of the divide. Again, I think it's completely reasonable for anyone to have a
different preference, but given that language designers can only choose one of these options, I'm confident
I've made the right choice for Roc by designing it never to have higher-kinded polymorphism.
## Why do Roc's syntax and standard library differ from Elm's?
Roc is a direct descendant of [Elm](https://elm-lang.org/). However, there are some differences between the two languages.
Syntactic differences are among these. This is a feature, not a bug; if Roc had identical syntax to Elm, then it's
predictable that people would write code that was designed to work in both languages - and would then rely on
that being true, for example by making a package which advertised "Works in both Elm and Roc!" This in turn
would mean that later if either language were to change its syntax in a way that didn't make sense for the other,
the result would be broken code and sadness.
So why does Roc have the specific syntax changes it does? Here are some brief explanations:
* `#` instead of `--` for comments - this allows [hashbang](https://senthilnayagan.medium.com/shebang-hashbang-10966b8f28a8)s to work without needing special syntax. That isn't a use case Elm supports, but it is one Roc is designed to support.
* `{}` instead of `()` for the unit type - Elm has both, and they can both be used as a unit type. Since `{}` has other uses in the type system, but `()` doesn't, I consider it redundant and took it out.
* No tuples - I wanted to try simplifying the language and seeing how much we'd miss them. Anything that could be represented as a tuple can be represented with either a record or a single-tag union instead (e.g. `Pair x y = ...`), so is it really necessary to have a third syntax for representing a group of fields with potentially different types?
* `when`...`is` instead of `case`...`of` - I predict it will be easier for beginners to pick up, because usually the way I explain `case`...`of` to beginners is by saying the words "when" and "is" out loud - e.g. "when `color` is `Red`, it runs this first branch; when `color` is `Blue`, it runs this other branch..."
* `:` instead of `=` for record field definitions (e.g. `{ foo: bar }` where Elm syntax would be `{ foo = bar }`): I like `=` being reserved for definitions, and `:` is the most popular alternative.
* Backpassing syntax - since Roc is designed to be used for use cases like command-line apps, shell scripts, and servers, I expect chained effects to come up a lot more often than they do in Elm. I think backpassing is nice for those use cases, similarly to how `do` notation is nice for them in Haskell.
* Tag unions instead of Elm's custom types (aka algebraic data types). This isn't just a syntactic change; tag unions are mainly in Roc because they can facilitate errors being accumulated across chained effects, which (as noted a moment ago) I expect to be a lot more common in Roc than in Elm. If you have tag unions, you don't really need a separate language feature for algebraic data types, since closed tag unions essentially work the same way - aside from not giving you a way to selectively expose variants or define phantom types. Roc's opaque types language feature covers those use cases instead.
* No `::` operator, or `::` pattern matching for lists. Both of these are for the same reason: an Elm `List` is a linked list, so both prepending to it and removing an element from the front are very cheap operations. In contrast, a Roc `List` is a flat array, so both prepending to it and removing an element from the front are among the most expensive operations you can possibly do with it! To get good performance, this usage pattern should be encouraged in Elm and discouraged in Roc. Since having special syntax would encourage it, it would not be good for Roc to have that syntax!
* No `<|` operator. In Elm, I almost exclusively found myself wanting to use this in conjunction with anonymous functions (e.g. `foo <| \bar -> ...`) or conditionals (e.g. `foo <| if bar then ...`). In Roc you can do both of these without the `<|`. That means the main remaining use for `<|` is to reduce parentheses, but I tend to think `|>` is better at that (or else the parens are fine), so after the other syntactic changes, I considered `<|` an unnecessary stylistic alternative to `|>` or parens.
* The `|>` operator passes the expression before the `|>` as the *first* argument to the function after the `|>` instead of as the last argument. See the section on currying for details on why this works this way.
* `:` instead of `type alias` - I like to avoid reserved keywords for terms that are desirable in userspace, so that people don't have to name things `typ` because `type` is a reserved keyword, or `clazz` because `class` is reserved. (I couldn't think of satisfactory alternatives for `as`, `when`, `is`, or `if` other than different reserved keywords. I could see an argument for `then`—and maybe even `is`—being replaced with a `->` or `=>` or something, but I don't anticipate missing either of those words much in userspace. `then` is used in JavaScript promises, but I think there are several better names for that function.)
* No underscores in variable names - I've seen Elm beginners reflexively use `snake_case` over `camelCase` and then need to un-learn the habit after the compiler accepted it. I'd rather have the compiler give feedback that this isn't the way to do it in Roc, and suggest a camelCase alternative. I've also seen underscores used for lazy naming, e.g. `foo` and then `foo_`. If lazy naming is the goal, `foo2` is just as concise as `foo_`, but `foo3` is more concise than `foo__`. So in a way, removing `_` is a forcing function for improved laziness. (Of course, more descriptive naming would be even better.)
* Trailing commas - I've seen people walk away (in some cases physically!) from Elm as soon as they saw the leading commas in collection literals. While I think they've made a mistake by not pushing past this aesthetic preference to give the language a chance, I also would prefer not put them in a position to make such a mistake in the first place. Secondarily, while I'm personally fine with either style, between the two I prefer the look of trailing commas.
* The `!` unary prefix operator. I didn't want to have a `Basics` module (more on that in a moment), and without `Basics`, this would either need to be called fully-qualified (`Bool.not`) or else a module import of `Bool.{ not }` would be necessary. Both seemed less nice than supporting the `!` prefix that's common to so many widely-used languages, especially when we already have a unary prefix operator of `-` for negation (e.g. `-x`).
* `!=` for the inequality operator (instead of Elm's `/=`) - this one pairs more naturally with the `!` prefix operator and is also very common in other languages.
Roc also has a different standard library from Elm. Some of the differences come down to platforms and applications (e.g. having `Task` in Roc's standard library wouldn't make sense), but others do not. Here are some brief explanations:
* No `Basics` module. I wanted to have a simple rule of "all modules in the standard library are imported by default, and so are their exposed types," and that's it. Given that I wanted the comparison operators (e.g. `<`) to work only on numbers, it ended up that having `Num` and `Bool` modules meant that almost nothing would be left for a `Basics` equivalent in Roc except `identity` and `Never`. The Roc type `[]` (empty tag union) is equivalent to `Never`, so that wasn't necessary, and I generally think that `identity` is a good concept but a sign of an incomplete API whenever its use comes up in practice. For example, instead of calling `|> List.filterMap identity` I'd rather have access to a more self-descriptive function like `|> List.dropNothings`. With `Num` and `Bool`, and without `identity` and `Never`, there was nothing left in `Basics`.
* `Str` instead of `String` - after using the `str` type in Rust, I realized I had no issue whatsoever with the more concise name, especially since it was used in so many places (similar to `Msg` and `Cmd` in Elm) - so I decided to save a couple of letters.
* No function composition operators - I stopped using these in Elm so long ago, at one point I forgot they were in the language! See the FAQ entry on currying for details about why.
* No `Char`. What most people think of as a "character" is a rendered glyph. However, rendered glyphs are comprised of [grapheme clusters](https://stackoverflow.com/a/27331885), which are a variable number of Unicode code points - and there's no upper bound on how many code points there can be in a single cluster. In a world of emoji, I think this makes `Char` error-prone and it's better to have `Str` be the only first-class unit. For convenience when working with unicode code points (e.g. for performance-critical tasks like parsing), the single-quote syntax is sugar for the corresponding `U32` code point - for example, writing `'鹏'` is exactly the same as writing `40527`. Like Rust, you get a compiler error if you put something in single quotes that's not a valid [Unicode scalar value](http://www.unicode.org/glossary/#unicode_scalar_value).
* No `Debug.log` - the editor can do a better job at this, or you can write `expect x != x` to see what `x` is when the expectation fails. Using the editor means your code doesn't change, and using `expect` gives a natural reminder to remove the debugging code before shipping: the build will fail.
* No `Debug.todo` - instead you can write a type annotation with no implementation below it; the type checker will treat it normally, but attempting to use the value will cause a runtime exception. This is a feature I've often wanted in Elm, because I like prototyping APIs by writing out the types only, but then when I want the compiler to type-check them for me, I end up having to add `Debug.todo` in various places.
* No `Maybe`. See the "Why doesn't Roc have a `Maybe`/`Option`/`Optional` type" FAQ question
## Why aren't Roc functions curried by default?
Although technically any language with first-class functions makes it possible to curry
any function (e.g. I can manually curry a Roc function `\x, y, z ->` by writing `\x -> \y -> \z ->` instead),
typically what people mean when they say Roc isn't a curried language is that Roc functions aren't curried
by default. For the rest of this section, I'll use "currying" as a shorthand for "functions that are curried
by default" for the sake of brevity.
As I see it, currying has one major upside and several major downsides. The upside:
* It makes function calls more concise in some cases.
The downsides:
* It lowers error message quality, because there can no longer be an error for "function called with too few arguments." (Calling a function with fewer arguments is always valid in curried functions; the error you get instead will unavoidably be some other sort of type mismatch, and it will be up to you to figure out that the real problem was that you forgot an argument.)
* It makes the `|>` operator more error-prone in some cases.
* It makes higher-order function calls need more parentheses in some cases.
* It significantly increases the language's learning curve. (More on this later.)
* It facilitates pointfree function composition. (More on why this is listed as a downside later.)
There's also a downside that it would make runtime performance of compiled programs worse by default,
but I assume it would be possible to optimize that away at the cost of slightly longer compile times.
I consider the one upside (conciseness in some places) extremely minor, and have almost never missed it in Roc.
Here are some more details about the downsides as I see them.
### Currying and the `|>` operator
In Roc, this code produces `"Hello, World!"`
```elm
"Hello, World"
|> Str.concat "!"
```
This is because Roc's `|>` operator uses the expression before the `|>` as the *first* argument to the function
after it. For functions where both arguments have the same type, but it's obvious which argument goes where (e.g.
`Str.concat "Hello, " "World!"`, `List.concat [ 1, 2 ] [ 3, 4 ]`), this works out well. Another example would
be `|> Num.sub 1`, which subtracts 1 from whatever came before the `|>`.
For this reason, "pipeline-friendliness" in Roc means that the first argument to each function is typically
the one that's most likely to be built up using a pipeline. For example, `List.map`:
```elm
numbers
|> List.map Num.abs
```
This argument ordering convention also often makes it possible to pass anonymous functions to higher-order
functions without needing parentheses, like so:
```elm
List.map numbers \num -> Num.abs (num - 1)
```
(If the arguments were reversed, this would be `List.map (\num -> Num.abs (num - 1)) numbers` and the
extra parentheses would be required.)
Neither of these benefits is compatible with the argument ordering currying encourages. Currying encourages
`List.map` to take the `List` as its second argument instead of the first, so that you can partially apply it
like `(List.map Num.abs)`; if Roc introduced currying but kept the order of `List.map` the same way it is today,
then partially applying `List.map` (e.g. `(List.map numbers)`) would be much less useful than if the arguments
were swapped - but that in turn would make it less useful with `|>` and would require parentheses when passing
it an anonymous function.
This is a fundamental design tension. One argument order works well with `|>` (at least the way it works in Roc
today) and with passing anonymous functions to higher-order functions, and the other works well with currying.
It's impossible to have both.
Of note, one possible design is to have currying while also having `|>` pass the *last* argument instead of the first.
This is what Elm does, and it makes pipeline-friendliness and curry-friendliness the same thing. However, it also
means that either `|> Str.concat "!"` would add the `"!"` to the front of the string, or else `Str.concat`'s
arguments would have to be flipped - meaning that `Str.concat "Hello, World" "!"` would evaluate to `"!Hello, World"`.
The only way to have `Str.concat` work the way it does in Roc today (where both pipelines and non-pipeline calling
do what you'd want them to) is to order function arguments in a way that is not conducive to currying. This design
tension only exists if there's currying in the language; without it, you can order arguments for pipeline-friendliness
without concern.
### Currying and learning curve
Prior to designing Roc, I taught a lot of beginner [Elm](https://elm-lang.org/) workshops. Sometimes at
conferences, sometimes for [Frontend Masters](https://frontendmasters.com/courses/intro-elm/),
sometimes for free at local coding bootcamps or meetup groups.
In total I've spent well over 100 hours standing in front of a class, introducing the students to their
first pure functional programming language.
Here was my experience teaching currying:
* The only way to avoid teaching it is to refuse to explain why multi-argument functions have multiple `->`s in them. (If you don't explain it, at least one student will ask about it - and many if not all of the others will wonder.)
* Teaching currying properly takes a solid chunk of time, because it requires explaining partial application, explaining how curried functions facilitate partial application, how function signatures accurately reflect that they're curried, and going through examples for all of these.
* Even after doing all this, and iterating on my approach each time to try to explain it more effectively than I had the time before, I'd estimate that under 50% of the class ended up actually understanding currying. I consistently heard that in practice it only "clicked" for most people after spending significantly more time writing code with it.
This is not the end of the world, especially because it's easy enough to think "okay, I still don't totally get this
even after that explanation, but I can remember that function arguments are separated by `->` in this language
and maybe I'll understand the rest later." (Which they almost always do, if they stick with the language.)
Clearly currying doesn't preclude a language from being easy to learn, because Elm has currying, and Elm's learning
curve is famously gentle.
That said, beginners who feel confused while learning the language are less likely to continue with it.
And however easy Roc would be to learn if it had currying, the language is certainly easier to learn without it.
### Pointfree function composition
[Pointfree function composition](https://en.wikipedia.org/wiki/Tacit_programming) is where you define
a new function by composing together two existing functions without naming intermediate arguments.
Here's an example:
```elm
reverseSort : List elem -> List elem
reverseSort = compose List.reverse List.sort
compose : (a -> b), (c -> a) -> (c -> b)
compose = \f, g, x -> f (g x)
```
Here's how I would instead write this:
```elm
reverseSort : List elem -> List elem
reverseSort = \list -> List.reverse (List.sort list)
```
I've consistently found that I can more quickly and accurately understand function definitions that use
named arguments, even though the code is longer. I suspect this is because I'm faster at reading than I am at
desugaring, and whenever I read the top version I end up needing to mentally desugar it into the bottom version.
In more complex examples (this is among the tamest pointfree function composition examples I've seen), I make
a mistake in my mental desugaring, and misunderstand what the function is doing - which can cause bugs.
I assumed I would get faster and more accurate at this over time. However, by now it's been about a decade
since I first learned about the technique, and I'm still slower and less accurate at reading code that uses
pointfree function composition (including if I wrote it - but even moreso if I didn't) than code written with
with named arguments. I've asked a lot of other programmers about their experiences with pointfree function
composition over the years, and the overwhelming majority of responses have been consistent with my experience.
As such, my opinion about pointfree function composition has gotten less and less nuanced over time. I've now moved
past "it's the right tool for the job, sometimes" to concluding it's best thought of as an antipattern. This is
because I realized how much time I was spending evaluating on a case-by-case basis whether it might be the
right fit for a given situation. The time spent on this analysis alone vastly outweighed the sum of all the
benefits I got in the rare cases where I concluded it was a fit. So I've found the way to get the most out of
pointfree function composition is to never even think about using it; every other strategy leads to a worse outcome.
Currying facilitates the antipattern of pointfree function composition, which I view as a downside of currying.
Stacking up all these downsides of currying against the one upside of making certain function calls more concise,
I concluded that it would be a mistake to have it in Roc.
## Why are both rust and zig used?
At the start of the project, we did not know zig well and it was not production ready. The reason zig entered the project because it has many different backends (wasm, various assembly formats, llvm IR) and can create code with minimal dependencies
Rust has much more overhead in terms of code size. It's objectively not a lot, but it's less with zig.
We think rust is a nicer language to work in for a project of this size. It has a type system that we're more familiar with, it has a package ecosystem and excellent tooling.

View file

@ -203,7 +203,7 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI
* Zig - https://ziglang.org
This source code can be found in compiler/builtins/bitcode/src/hash.zig and is licensed under the following terms:
This source code can be found in compiler/builtins/bitcode/src/hash.zig, highlight/tests/peg_grammar.rs and highlight/src/highlight_parser.rs and is licensed under the following terms:
The MIT License (Expat)
@ -515,3 +515,24 @@ See the License for the specific language governing permissions and
limitations under the License.
===========================================================
* iced - https://github.com/iced-rs/iced
Copyright 2019 Héctor Ramón, Iced contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -6,7 +6,7 @@ The [tutorial](TUTORIAL.md) is the best place to learn about how to use the lang
There's also a folder of [examples](https://github.com/rtfeldman/roc/tree/trunk/examples) - the [CLI example](https://github.com/rtfeldman/roc/tree/trunk/examples/cli) in particular is a reasonable starting point to build on.
[Roc Zulip chat](https://roc.zulipchat.com) is the best place to ask questions and get help! It's also where we discuss [ideas](https://roc.zulipchat.com/#narrow/stream/304641-ideas) for the language. If you want to get involved in contributing to the language, Zulip is also a great place to ask about good first projects.
If you have a specific question, the [FAQ](FAQ.md) might have an answer, although [Roc Zulip chat](https://roc.zulipchat.com) is overall the best place to ask questions and get help! It's also where we discuss [ideas](https://roc.zulipchat.com/#narrow/stream/304641-ideas) for the language. If you want to get involved in contributing to the language, Zulip is also a great place to ask about good first projects.
## State of Roc
@ -23,7 +23,7 @@ Many programs can however be compiled correctly. Check out [examples](examples)
Run examples as follows:
```
cargo run examples/hello-world/Hello.roc
cargo run examples/hello-world/helloWorld.roc
```
Some examples like `examples/benchmarks/NQueens.roc` require input after running.
For NQueens, input 10 in the terminal and press enter.

View file

@ -6,6 +6,10 @@ and more!
Enjoy!
## Getting started
Learn how to install roc on your machine [here](https://github.com/rtfeldman/roc#getting-started).
## Strings and Numbers
Lets start by getting acquainted with Rocs Read Eval Print Loop, or REPL for
@ -322,7 +326,7 @@ If we were to use `counts.note` inside `addWithStringify`, then we would get an
because `total` is calling `addAndStringify` passing a record that doesn't have a `note` field.
Record fields can have any combination of types we want. `totalWithNote` uses a record that
has a mixture of numbers and strings, but we can also have record fields that other types of
has a mixture of numbers and strings, but we can also have record fields with other types of
values - including other records, or even functions!
```coffee
@ -523,13 +527,13 @@ stoplightStr =
Green | Yellow ->
if contrast > 75 then
"not red, but very high contrast"
else if saturation > 50 then
else if contrast > 50 then
"not red, but high contrast"
else
"not red"
```
Either style can be a reasonable choice depending on the cirumstances.
Either style can be a reasonable choice depending on the circumstances.
### Tags with payloads
@ -587,7 +591,7 @@ List.append names "Jess"
This returns a **new** list with `"Jess"` after `"Ari"`, and doesn't modify the original list at all.
All values in Roc (including lists, but also records, strings, numbers, and so on) are immutable,
meaning whenever we want to "change" them, we want to instead pass them a function which returns some
meaning whenever we want to "change" them, we want to instead pass them to a function which returns some
variation of what was passed in.
### List.map
@ -1071,7 +1075,7 @@ is 0, we can look at `U8` and know that it goes from `0` (since it's unsigned) t
If we change `U8` to `I8`, making it a *signed* 8-bit integer, the range changes. Because it's still 8 bits, it still
has room to represent 2⁸ (that is, 256) different numbers. However, now in addition to one of those 256 numbers
being zero, about half of rest will be negative, and the others positive. So instead of ranging from, say -255
being zero, about half of the rest will be negative, and the others positive. So instead of ranging from, say -255
to 255 (which, counting zero, would represent 511 different numbers; too many to fit in 8 bits!) an `I8` value
ranges from -128 to 127.
@ -1082,10 +1086,10 @@ of an 8-bit integer) and dividing it in half (half of 256 is 128, so -128 is `I8
highest number, take the positive version of the lowest number (so, convert `-128` to `128`) and then subtract 1
to make room for zero (so, `128` becomes `127`; `I8` ranges from -128 to 127).
Following this pattern, the 16 in `I16` means that it's a signed 16 bit integer.
Following this pattern, the 16 in `I16` means that it's a signed 16-bit integer.
That tells us it has room to represent 2¹⁶ (which is equal to 65536) different numbers. Half of 65536 is 32768,
so the lowest `I16` would be -32768, and the highest would be 32767. Knowing that, we can also quickly tell that
the lowest `U16` would be zero (since it always is for unsigned integers), and the higeest `U16` would be 65536.
the lowest `U16` would be zero (since it always is for unsigned integers), and the highest `U16` would be 65535.
Choosing a size depends on your performance needs and the range of numbers you want to represent. Consider:
@ -1153,10 +1157,10 @@ with 18 decimal places of precision.
This means a `Dec` can represent whole numbers up to slightly over 170
quintillion, along with 18 decimal places. (To be precise, it can store
numbers betwween `-170_141_183_460_469_231_731.687303715884105728`
numbers between `-170_141_183_460_469_231_731.687303715884105728`
and `170_141_183_460_469_231_731.687303715884105727`.) Why 18
decimal places? It's the highest number of decimal places where you can still
convert any `U64] to a `Dec` without losing information.
convert any `U64` to a `Dec` without losing information.
While the fixed-point `Dec` has a fixed range, the floating-point `F32` and `F64` do not.
Instead, outside of a certain range they start to lose precision instead of immediately overflowing
@ -1619,12 +1623,11 @@ If you like, you can always annotate your functions as accepting open records. H
always be the nicest choice. For example, let's say you have a `User` type alias, like so:
```coffee
User :
{
email : Str,
firstName : Str,
lastName : Str,
}
User : {
email : Str,
firstName : Str,
lastName : Str,
}
```
This defines `User` to be a closed record, which in practice is the most common way records named `User`
@ -1657,12 +1660,11 @@ Since open records have a type variable (like `*` in `{ email : Str }*` or `a` i
type variable to the `User` type alias:
```coffee
User a :
{
email : Str,
firstName : Str,
lastName : Str,
}a
User a : {
email : Str,
firstName : Str,
lastName : Str,
}a
```
Notice that the `a` type variable appears not only in `User a` but also in `}a` at the end of the
@ -1823,7 +1825,7 @@ which is an open union (like `Ok "foo"`, which has the type `[ Ok Str ]*`) can b
expecting a tag union (no matter whether it's open or closed), as long as the expected tag union includes at least
the tags in the open union you're providing.
So if I have an `[ Ok Str ]*` value, I can pass it functions with any of these types (among others):
So if I have an `[ Ok Str ]*` value, I can pass it to functions with any of these types (among others):
* `[ Ok Str ]* -> Bool`
* `[ Ok Str ] -> Bool`

View file

@ -21,10 +21,14 @@ roc_target = { path = "../compiler/roc_target" }
roc_error_macros = { path = "../error_macros" }
arrayvec = "0.7.2"
bumpalo = { version = "3.8.0", features = ["collections"] }
libc = "0.2.106"
page_size = "0.4.2"
snafu = { version = "0.6.10", features = ["backtraces"] }
ven_graph = { path = "../vendor/pathfinding" }
libc = "0.2.106"
[dev-dependencies]
indoc = "1.0.3"
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3.9", features = ["memoryapi"]}

View file

@ -154,12 +154,18 @@ fn canonicalize_field<'a>(
let (loc_can_expr, output) =
expr_to_expr2(env, scope, &loc_expr.value, loc_expr.region);
Ok(CanonicalField::LabelAndValue {
label: label.value,
value_expr: loc_can_expr,
value_output: output,
var: field_var,
})
match loc_can_expr {
Expr2::RuntimeError() => Ok(CanonicalField::InvalidLabelOnly {
label: label.value,
var: field_var,
}),
_ => Ok(CanonicalField::LabelAndValue {
label: label.value,
value_expr: loc_can_expr,
value_output: output,
var: field_var,
}),
}
}
OptionalValue(label, _, loc_expr) => Err(CanonicalizeFieldProblem::InvalidOptionalValue {

View file

@ -1,7 +1,7 @@
use bumpalo::{collections::Vec as BumpVec, Bump};
use roc_can::expected::{Expected, PExpected};
use roc_collections::all::{BumpMap, BumpMapDefault, Index, SendMap};
use roc_collections::all::{BumpMap, BumpMapDefault, HumanIndex, SendMap};
use roc_module::{
ident::{Lowercase, TagName},
symbol::Symbol,
@ -163,7 +163,7 @@ pub fn constrain_expr<'a>(
let elem_expected = Expected::ForReason(
Reason::ElemInList {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
},
list_elem_type.shallow_clone(),
region,
@ -339,7 +339,7 @@ pub fn constrain_expr<'a>(
let reason = Reason::FnArg {
name: opt_symbol,
arg_index: Index::zero_based(index),
arg_index: HumanIndex::zero_based(index),
};
let expected_arg = Expected::ForReason(reason, arg_type.shallow_clone(), region);
@ -538,7 +538,7 @@ pub fn constrain_expr<'a>(
name.clone(),
arity,
AnnotationSource::TypedIfBranch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
num_branches,
region: ann_source.region(),
},
@ -559,7 +559,7 @@ pub fn constrain_expr<'a>(
name,
arity,
AnnotationSource::TypedIfBranch {
index: Index::zero_based(branches.len()),
index: HumanIndex::zero_based(branches.len()),
num_branches,
region: ann_source.region(),
},
@ -596,7 +596,7 @@ pub fn constrain_expr<'a>(
body,
Expected::ForReason(
Reason::IfBranch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
total_branches: branches.len(),
},
Type2::Variable(*expr_var),
@ -616,7 +616,7 @@ pub fn constrain_expr<'a>(
final_else_expr,
Expected::ForReason(
Reason::IfBranch {
index: Index::zero_based(branches.len()),
index: HumanIndex::zero_based(branches.len()),
total_branches: branches.len() + 1,
},
Type2::Variable(*expr_var),
@ -691,7 +691,7 @@ pub fn constrain_expr<'a>(
when_branch,
PExpected::ForReason(
PReason::WhenMatch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
},
cond_type.shallow_clone(),
pattern_region,
@ -700,7 +700,7 @@ pub fn constrain_expr<'a>(
name.clone(),
*arity,
AnnotationSource::TypedWhenBranch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
region: ann_source.region(),
},
typ.shallow_clone(),
@ -733,14 +733,14 @@ pub fn constrain_expr<'a>(
when_branch,
PExpected::ForReason(
PReason::WhenMatch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
},
cond_type.shallow_clone(),
pattern_region,
),
Expected::ForReason(
Reason::WhenBranch {
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
},
branch_type.shallow_clone(),
// TODO: when_branch.value.region,
@ -1065,7 +1065,7 @@ pub fn constrain_expr<'a>(
let reason = Reason::LowLevelOpArg {
op: *op,
arg_index: Index::zero_based(index),
arg_index: HumanIndex::zero_based(index),
};
let expected_arg =
Expected::ForReason(reason, arg_type.shallow_clone(), Region::zero());
@ -1474,6 +1474,15 @@ pub fn constrain_pattern<'a>(
));
}
CharacterLiteral(_) => {
state.constraints.push(Constraint::Pattern(
region,
PatternCategory::Character,
num_unsigned32(env.pool),
expected,
));
}
RecordDestructure {
whole_var,
ext_var,
@ -1672,7 +1681,7 @@ fn constrain_tag_pattern<'a>(
let expected = PExpected::ForReason(
PReason::TagArg {
tag_name: tag_name.clone(),
index: Index::zero_based(index),
index: HumanIndex::zero_based(index),
},
pattern_type,
region,
@ -1927,6 +1936,26 @@ fn _num_signed64(pool: &mut Pool) -> Type2 {
)
}
#[inline(always)]
fn num_unsigned32(pool: &mut Pool) -> Type2 {
let alias_content = Type2::TagUnion(
PoolVec::new(
std::iter::once((
TagName::Private(Symbol::NUM_UNSIGNED32),
PoolVec::empty(pool),
)),
pool,
),
pool.add(Type2::EmptyTagUnion),
);
Type2::Alias(
Symbol::NUM_UNSIGNED32,
PoolVec::empty(pool),
pool.add(alias_content),
)
}
#[inline(always)]
fn _num_integer(pool: &mut Pool, range: TypeId) -> Type2 {
let range_type = pool.get(range);

View file

@ -15,6 +15,24 @@ pub struct AST {
pub def_ids: Vec<DefId>,
}
impl AST {
pub fn insert_def_at_index(&mut self, new_def_id: DefId, index: usize) {
self.def_ids.insert(index, new_def_id);
}
// TODO print in tree shape, similar to linux tree command
pub fn ast_to_string(&self, pool: &Pool) -> String {
let mut full_ast_string = String::new();
for def_id in self.def_ids.iter() {
full_ast_string.push_str(&def2_to_string(*def_id, pool));
full_ast_string.push_str("\n\n");
}
full_ast_string
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum ASTNodeId {
ADefId(DefId),

View file

@ -13,9 +13,10 @@
// use crate::pattern::{bindings_from_patterns, canonicalize_pattern, Pattern};
// use crate::procedure::References;
use roc_collections::all::{default_hasher, ImMap, MutMap, MutSet, SendMap};
use roc_error_macros::{todo_abilities, todo_opaques};
use roc_module::ident::Lowercase;
use roc_module::symbol::Symbol;
use roc_parse::ast::{self, AliasHeader};
use roc_parse::ast::{self, TypeHeader};
use roc_parse::pattern::PatternType;
use roc_problem::can::{Problem, RuntimeError};
use roc_region::all::{Loc, Region};
@ -199,7 +200,7 @@ fn to_pending_def<'a>(
}
roc_parse::ast::Def::Alias {
header: AliasHeader { name, vars },
header: TypeHeader { name, vars },
ann,
} => {
let region = Region::span_across(&name.region, &ann.region);
@ -260,6 +261,9 @@ fn to_pending_def<'a>(
}
}
Opaque { .. } => todo_opaques!(),
Ability { .. } => todo_abilities!(),
Expect(_) => todo!(),
SpaceBefore(sub_def, _) | SpaceAfter(sub_def, _) => {
@ -321,7 +325,7 @@ fn from_pending_alias<'a>(
for loc_lowercase in vars {
if !named_rigids.contains_key(&loc_lowercase.value) {
env.problem(Problem::PhantomTypeArgument {
alias: symbol,
typ: symbol,
variable_region: loc_lowercase.region,
variable_name: loc_lowercase.value.clone(),
});

View file

@ -148,6 +148,9 @@ fn expr2_to_string_helper(
&Expr2::Var { .. } => {
out_string.push_str(&format!("{:?}", expr2,));
}
Expr2::RuntimeError { .. } => {
out_string.push_str("RuntimeError\n");
}
other => todo!("Implement for {:?}", other),
}

View file

@ -50,15 +50,16 @@ pub fn expr_to_expr2<'a>(
region: Region,
) -> (Expr2, self::Output) {
use roc_parse::ast::Expr::*;
//dbg!("{:?}", parse_expr);
match parse_expr {
Float(string) => {
match finish_parsing_float(string) {
Ok((float, _bound)) => {
Ok((string_without_suffix, float, _bound)) => {
let expr = Expr2::Float {
number: FloatVal::F64(float),
var: env.var_store.fresh(),
text: PoolStr::new(string, env.pool),
text: PoolStr::new(string_without_suffix, env.pool),
};
(expr, Output::default())

View file

@ -8,6 +8,7 @@ use roc_can::num::{
finish_parsing_base, finish_parsing_float, finish_parsing_num, ParsedNumResult,
};
use roc_collections::all::BumpMap;
use roc_error_macros::todo_opaques;
use roc_module::symbol::{Interns, Symbol};
use roc_parse::ast::{StrLiteral, StrSegment};
use roc_parse::pattern::PatternType;
@ -38,6 +39,7 @@ pub enum Pattern2 {
IntLiteral(IntVal), // 16B
FloatLiteral(FloatVal), // 16B
StrLiteral(PoolStr), // 8B
CharacterLiteral(char), // 4B
Underscore, // 0B
GlobalTag {
whole_var: Variable, // 4B
@ -185,7 +187,7 @@ pub fn to_pattern2<'a>(
let problem = MalformedPatternProblem::MalformedFloat;
malformed_pattern(env, problem, region)
}
Ok((float, _bound)) => Pattern2::FloatLiteral(FloatVal::F64(float)),
Ok((_, float, _bound)) => Pattern2::FloatLiteral(FloatVal::F64(float)),
},
ptype => unsupported_pattern(env, ptype, region),
},
@ -248,6 +250,26 @@ pub fn to_pattern2<'a>(
ptype => unsupported_pattern(env, ptype, region),
},
SingleQuote(string) => match pattern_type {
WhenBranch => {
let mut it = string.chars().peekable();
if let Some(char) = it.next() {
if it.peek().is_none() {
Pattern2::CharacterLiteral(char)
} else {
// multiple chars is found
let problem = MalformedPatternProblem::MultipleCharsInSingleQuote;
malformed_pattern(env, problem, region)
}
} else {
// no characters found
let problem = MalformedPatternProblem::EmptySingleQuote;
malformed_pattern(env, problem, region)
}
}
ptype => unsupported_pattern(env, ptype, region),
},
GlobalTag(name) => {
// Canonicalize the tag's name.
Pattern2::GlobalTag {
@ -269,6 +291,8 @@ pub fn to_pattern2<'a>(
}
}
OpaqueRef(..) => todo_opaques!(),
Apply(tag, patterns) => {
let can_patterns = PoolVec::with_capacity(patterns.len() as u32, env.pool);
for (loc_pattern, node_id) in (*patterns).iter().zip(can_patterns.iter_node_ids()) {
@ -503,6 +527,7 @@ pub fn symbols_from_pattern(pool: &Pool, initial: &Pattern2) -> Vec<Symbol> {
| IntLiteral(_)
| FloatLiteral(_)
| StrLiteral(_)
| CharacterLiteral(_)
| Underscore
| MalformedPattern(_, _)
| Shadowed { .. }
@ -563,6 +588,7 @@ pub fn symbols_and_variables_from_pattern(
| IntLiteral(_)
| FloatLiteral(_)
| StrLiteral(_)
| CharacterLiteral(_)
| Underscore
| MalformedPattern(_, _)
| Shadowed { .. }

View file

@ -3,6 +3,7 @@
#![allow(unused_imports)]
// use roc_can::expr::Output;
use roc_collections::all::{MutMap, MutSet};
use roc_error_macros::todo_abilities;
use roc_module::ident::{Ident, Lowercase, TagName};
use roc_module::symbol::Symbol;
use roc_region::all::{Loc, Region};
@ -329,9 +330,9 @@ pub fn to_type2<'a>(
annotation: &roc_parse::ast::TypeAnnotation<'a>,
region: Region,
) -> Type2 {
use roc_parse::ast::AliasHeader;
use roc_parse::ast::Pattern;
use roc_parse::ast::TypeAnnotation::*;
use roc_parse::ast::TypeHeader;
match annotation {
Apply(module_name, ident, targs) => {
@ -455,7 +456,7 @@ pub fn to_type2<'a>(
As(
loc_inner,
_spaces,
AliasHeader {
TypeHeader {
name,
vars: loc_vars,
},
@ -570,6 +571,7 @@ pub fn to_type2<'a>(
// }
Type2::AsAlias(symbol, vars, alias.actual)
}
Where { .. } => todo_abilities!(),
SpaceBefore(nested, _) | SpaceAfter(nested, _) => {
to_type2(env, scope, references, nested, region)
}

View file

@ -117,7 +117,7 @@ impl<'a> Env<'a> {
if module_id == self.home {
match self.ident_ids.get_id(&ident) {
Some(ident_id) => {
let symbol = Symbol::new(module_id, *ident_id);
let symbol = Symbol::new(module_id, ident_id);
self.qualified_lookups.insert(symbol);
@ -138,7 +138,7 @@ impl<'a> Env<'a> {
match self.dep_idents.get(&module_id) {
Some(exposed_ids) => match exposed_ids.get_id(&ident) {
Some(ident_id) => {
let symbol = Symbol::new(module_id, *ident_id);
let symbol = Symbol::new(module_id, ident_id);
self.qualified_lookups.insert(symbol);
@ -160,12 +160,17 @@ impl<'a> Env<'a> {
})
}
},
None => {
panic!(
"Module {} exists, but is not recorded in dep_idents",
module_name
)
}
None => Err(RuntimeError::ModuleNotImported {
module_name,
imported_modules: self
.dep_idents
.keys()
.filter_map(|module_id| self.module_ids.get_name(*module_id))
.map(|module_name| module_name.as_ref().into())
.collect(),
region,
module_exists: true,
}),
}
}
}
@ -177,6 +182,7 @@ impl<'a> Env<'a> {
.map(|string| string.as_ref().into())
.collect(),
region,
module_exists: false,
}),
}
}

View file

@ -43,7 +43,8 @@ fn to_type2(
var_store: &mut VarStore,
) -> Type2 {
match solved_type {
SolvedType::Alias(symbol, solved_type_variables, _todo, solved_actual) => {
// TODO(opaques): take opaques into account
SolvedType::Alias(symbol, solved_type_variables, _todo, solved_actual, _kind) => {
let type_variables = PoolVec::with_capacity(solved_type_variables.len() as u32, pool);
for (type_variable_node_id, (lowercase, solved_arg)) in type_variables
@ -243,7 +244,7 @@ impl Scope {
// when the value was exposed in the module header,
// use that existing IdentId. Otherwise, create a fresh one.
let ident_id = match exposed_ident_ids.get_id(&ident) {
Some(ident_id) => *ident_id,
Some(ident_id) => ident_id,
None => all_ident_ids.add(ident.clone().into()),
};

View file

@ -10,12 +10,10 @@
///
/// Pages also use the node value 0 (all 0 bits) to mark nodes as unoccupied.
/// This is important for performance.
use libc::{MAP_ANONYMOUS, MAP_PRIVATE, PROT_READ, PROT_WRITE};
use std::any::type_name;
use std::ffi::c_void;
use std::marker::PhantomData;
use std::mem::{align_of, size_of, MaybeUninit};
use std::ptr::null;
pub const NODE_BYTES: usize = 32;
@ -108,14 +106,32 @@ impl Pool {
// addresses from the OS which will be lazily translated into
// physical memory one 4096-byte page at a time, once we actually
// try to read or write in that page's address range.
libc::mmap(
null::<c_void>() as *mut c_void,
bytes_to_mmap,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0,
0,
)
#[cfg(unix)]
{
use libc::{MAP_ANONYMOUS, MAP_PRIVATE, PROT_READ, PROT_WRITE};
libc::mmap(
std::ptr::null_mut(),
bytes_to_mmap,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0,
0,
)
}
#[cfg(windows)]
{
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::PAGE_READWRITE;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE};
VirtualAlloc(
std::ptr::null_mut(),
bytes_to_mmap,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
)
}
} as *mut [MaybeUninit<u8>; NODE_BYTES];
// This is our actual capacity, in nodes.
@ -230,10 +246,24 @@ impl<T> std::ops::IndexMut<NodeId<T>> for Pool {
impl Drop for Pool {
fn drop(&mut self) {
unsafe {
libc::munmap(
self.nodes as *mut c_void,
NODE_BYTES * self.capacity as usize,
);
#[cfg(unix)]
{
libc::munmap(
self.nodes as *mut c_void,
NODE_BYTES * self.capacity as usize,
);
}
#[cfg(windows)]
{
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
VirtualFree(
self.nodes as *mut c_void,
NODE_BYTES * self.capacity as usize,
MEM_RELEASE,
);
}
}
}
}

View file

@ -1,18 +1,16 @@
use std::path::Path;
use bumpalo::Bump;
use roc_collections::all::MutMap;
use roc_load::file::LoadedModule;
use roc_load::LoadedModule;
use roc_target::TargetInfo;
pub fn load_module(src_file: &Path) -> LoadedModule {
let subs_by_module = MutMap::default();
let subs_by_module = Default::default();
let arena = Bump::new();
let loaded = roc_load::file::load_and_typecheck(
let loaded = roc_load::load_and_typecheck(
&arena,
src_file.to_path_buf(),
arena.alloc(roc_builtins::std::standard_stdlib()),
src_file.parent().unwrap_or_else(|| {
panic!(
"src_file {:?} did not have a parent directory but I need to have one.",
@ -25,9 +23,9 @@ pub fn load_module(src_file: &Path) -> LoadedModule {
match loaded {
Ok(x) => x,
Err(roc_load::file::LoadingProblem::FormattedReport(report)) => {
Err(roc_load::LoadingProblem::FormattedReport(report)) => {
panic!(
"Failed to load module from src_file {:?}. Report: {:?}",
"Failed to load module from src_file {:?}. Report: {}",
src_file, report
);
}

View file

@ -25,7 +25,7 @@ pub fn parse_from_string<'a>(
) -> ASTResult<AST> {
let blank_line_indx = code_str
.find("\n\n")
.expect("I was expecting a double newline to split header and rest of code.");
.expect("I was expecting two newline chars to split header and rest of code.");
let header_str = &code_str[0..blank_line_indx];
let tail_str = &code_str[blank_line_indx..];

View file

@ -4,7 +4,7 @@ use crate::lang::core::{expr::expr2::ExprId, header::AppHeader};
pub fn parse_from_string(_header_str: &str, ast_node_id: ExprId) -> AppHeader {
AppHeader {
app_name: "\"untitled-app\"".to_owned(),
packages_base: "\"platform\"".to_owned(),
packages_base: "\"c-platform\"".to_owned(),
imports: vec![],
provides: vec!["main".to_owned()],
ast_node_id,

View file

@ -1,133 +0,0 @@
use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_fmt::def::fmt_def;
use roc_fmt::module::fmt_module;
use roc_parse::ast::{Def, Module};
use roc_parse::module::module_defs;
use roc_parse::parser;
use roc_parse::parser::{Parser, SyntaxError};
use roc_region::all::Located;
use std::ffi::OsStr;
use std::path::Path;
use std::{fs, io};
#[derive(Debug)]
pub struct File<'a> {
path: &'a Path,
module_header: Module<'a>,
content: Vec<'a, Located<Def<'a>>>,
}
#[derive(Debug)]
pub enum ReadError<'a> {
Read(std::io::Error),
ParseDefs(SyntaxError<'a>),
ParseHeader(SyntaxError<'a>),
DoesntHaveRocExtension,
}
impl<'a> File<'a> {
pub fn read(path: &'a Path, arena: &'a Bump) -> Result<File<'a>, ReadError<'a>> {
if path.extension() != Some(OsStr::new("roc")) {
return Err(ReadError::DoesntHaveRocExtension);
}
let bytes = fs::read(path).map_err(ReadError::Read)?;
let allocation = arena.alloc(bytes);
let module_parse_state = parser::State::new(allocation);
let parsed_module = roc_parse::module::parse_header(arena, module_parse_state);
match parsed_module {
Ok((module, state)) => {
let parsed_defs = module_defs().parse(arena, state);
match parsed_defs {
Ok((_, defs, _)) => Ok(File {
path,
module_header: module,
content: defs,
}),
Err((_, error, _)) => Err(ReadError::ParseDefs(error)),
}
}
Err(error) => Err(ReadError::ParseHeader(SyntaxError::Header(error))),
}
}
pub fn fmt(&self) -> String {
let arena = Bump::new();
let mut formatted_file = String::new();
let mut module_header_buf = bumpalo::collections::String::new_in(&arena);
fmt_module(&mut module_header_buf, &self.module_header);
formatted_file.push_str(module_header_buf.as_str());
for def in &self.content {
let mut def_buf = bumpalo::collections::String::new_in(&arena);
fmt_def(&mut def_buf, &def.value, 0);
formatted_file.push_str(def_buf.as_str());
}
formatted_file
}
pub fn fmt_then_write_to(&self, write_path: &'a Path) -> io::Result<()> {
let formatted_file = self.fmt();
fs::write(write_path, formatted_file)
}
pub fn fmt_then_write_with_name(&self, new_name: &str) -> io::Result<()> {
self.fmt_then_write_to(
self.path
.with_file_name(new_name)
.with_extension("roc")
.as_path(),
)
}
pub fn fmt_then_write(&self) -> io::Result<()> {
self.fmt_then_write_to(self.path)
}
}
#[cfg(test)]
mod test_file {
use crate::lang::roc_file;
use bumpalo::Bump;
use std::path::Path;
#[test]
fn read_and_fmt_simple_roc_module() {
let simple_module_path = Path::new("./tests/modules/SimpleUnformatted.roc");
let arena = Bump::new();
let file = roc_file::File::read(simple_module_path, &arena)
.expect("Could not read SimpleUnformatted.roc in test_file test");
assert_eq!(
file.fmt(),
indoc!(
r#"
interface Simple
exposes [
v, x
]
imports []
v : Str
v = "Value!"
x : Int
x = 4"#
)
);
}
}

View file

@ -12,7 +12,8 @@ use roc_types::subs::{
SubsSlice, UnionTags, Variable, VariableSubsSlice,
};
use roc_types::types::{
gather_fields_unsorted_iter, Alias, Category, ErrorType, PatternCategory, RecordField,
gather_fields_unsorted_iter, Alias, AliasKind, Category, ErrorType, PatternCategory,
RecordField,
};
use roc_unify::unify::unify;
use roc_unify::unify::Mode;
@ -892,7 +893,9 @@ fn type_to_variable<'a>(
let arg_vars = AliasVariables::insert_into_subs(subs, arg_vars, []);
let alias_var = type_to_variable(arena, mempool, subs, rank, pools, cached, alias_type);
let content = Content::Alias(*symbol, arg_vars, alias_var);
// TODO(opaques): take opaques into account
let content = Content::Alias(*symbol, arg_vars, alias_var, AliasKind::Structural);
let result = register(subs, rank, pools, content);
@ -1384,10 +1387,10 @@ fn adjust_rank_content(
}
}
Alias(_, args, real_var) => {
Alias(_, args, real_var, _) => {
let mut rank = Rank::toplevel();
for var_index in args.variables() {
for var_index in args.all_variables() {
let var = subs[var_index];
rank = rank.max(adjust_rank(subs, young_mark, visit_mark, group_rank, var));
}
@ -1544,8 +1547,8 @@ fn instantiate_rigids_help(
subs.set(copy, make_descriptor(FlexVar(Some(name))));
}
Alias(_, args, real_type_var) => {
for var_index in args.variables() {
Alias(_, args, real_type_var, _) => {
for var_index in args.all_variables() {
let var = subs[var_index];
instantiate_rigids_help(subs, max_rank, pools, var);
}
@ -1794,10 +1797,10 @@ fn deep_copy_var_help(
copy
}
Alias(symbol, mut args, real_type_var) => {
let mut new_args = Vec::with_capacity(args.variables().len());
Alias(symbol, mut args, real_type_var, kind) => {
let mut new_args = Vec::with_capacity(args.all_variables().len());
for var_index in args.variables() {
for var_index in args.all_variables() {
let var = subs[var_index];
let new_var = deep_copy_var_help(subs, max_rank, pools, var);
new_args.push(new_var);
@ -1806,7 +1809,7 @@ fn deep_copy_var_help(
args.replace_variables(subs, new_args);
let new_real_type_var = deep_copy_var_help(subs, max_rank, pools, real_type_var);
let new_content = Alias(symbol, args, new_real_type_var);
let new_content = Alias(symbol, args, new_real_type_var, kind);
subs.set(copy, make_descriptor(new_content));

View file

@ -15,21 +15,24 @@ test = false
bench = false
[features]
default = ["target-aarch64", "target-x86_64", "target-wasm32", "editor"]
default = ["target-aarch64", "target-x86_64", "target-wasm32", "editor", "llvm"]
wasm32-cli-run = ["target-wasm32", "run-wasm32"]
i386-cli-run = ["target-x86"]
# TODO: change to roc_repl_cli/llvm once roc_repl can run without llvm.
llvm = ["roc_build/llvm", "roc_repl_cli"]
editor = ["roc_editor"]
run-wasm32 = ["wasmer", "wasmer-wasi"]
# Compiling for a different platform than the host can cause linker errors.
target-arm = ["roc_build/target-arm"]
target-aarch64 = ["roc_build/target-aarch64"]
target-x86 = ["roc_build/target-x86"]
target-x86_64 = ["roc_build/target-x86_64"]
target-wasm32 = ["roc_build/target-wasm32"]
target-arm = ["roc_build/target-arm", "roc_repl_cli/target-arm"]
target-aarch64 = ["roc_build/target-aarch64", "roc_repl_cli/target-aarch64"]
target-x86 = ["roc_build/target-x86", "roc_repl_cli/target-x86"]
target-x86_64 = ["roc_build/target-x86_64", "roc_repl_cli/target-x86_64"]
target-wasm32 = ["roc_build/target-wasm32", "roc_repl_cli/target-wasm32"]
target-all = [
"target-aarch64",
@ -50,27 +53,31 @@ roc_module = { path = "../compiler/module" }
roc_builtins = { path = "../compiler/builtins" }
roc_mono = { path = "../compiler/mono" }
roc_load = { path = "../compiler/load" }
roc_build = { path = "../compiler/build", default-features = false }
roc_build = { path = "../compiler/build" }
roc_fmt = { path = "../compiler/fmt" }
roc_target = { path = "../compiler/roc_target" }
roc_reporting = { path = "../reporting" }
roc_error_macros = { path = "../error_macros" }
roc_editor = { path = "../editor", optional = true }
roc_linker = { path = "../linker" }
roc_repl_cli = { path = "../repl_cli" }
clap = { version = "= 3.0.0-beta.5", default-features = false, features = ["std", "color", "suggestions"] }
roc_repl_cli = { path = "../repl_cli", optional = true }
clap = { version = "3.0.0-beta.5", default-features = false, features = ["std", "color", "suggestions"] }
const_format = "0.2.22"
bumpalo = { version = "3.8.0", features = ["collections"] }
mimalloc = { version = "0.1.26", default-features = false }
target-lexicon = "0.12.2"
tempfile = "3.2.0"
wasmer = { version = "2.0.0", optional = true, default-features = false, features = ["default-cranelift", "default-universal"] }
wasmer-wasi = { version = "2.0.0", optional = true }
# Wasmer singlepass compiler only works on x86_64.
[target.'cfg(target_arch = "x86_64")'.dependencies]
wasmer = { version = "2.0.0", optional = true, default-features = false, features = ["default-singlepass", "default-universal"] }
[target.'cfg(not(target_arch = "x86_64"))'.dependencies]
wasmer = { version = "2.0.0", optional = true, default-features = false, features = ["default-cranelift", "default-universal"] }
[dev-dependencies]
wasmer = { version = "2.0.0", default-features = false, features = ["default-cranelift", "default-universal"] }
wasmer-wasi = "2.0.0"
pretty_assertions = "1.0.0"
roc_test_utils = { path = "../test_utils" }
@ -79,6 +86,13 @@ serial_test = "0.5.1"
criterion = { git = "https://github.com/Anton-4/criterion.rs"}
cli_utils = { path = "../cli_utils" }
# Wasmer singlepass compiler only works on x86_64.
[target.'cfg(target_arch = "x86_64")'.dev-dependencies]
wasmer = { version = "2.0.0", default-features = false, features = ["default-singlepass", "default-universal"] }
[target.'cfg(not(target_arch = "x86_64"))'.dev-dependencies]
wasmer = { version = "2.0.0", default-features = false, features = ["default-cranelift", "default-universal"] }
[[bench]]
name = "time_bench"
harness = false

View file

@ -4,8 +4,7 @@ use roc_build::{
program,
};
use roc_builtins::bitcode;
use roc_collections::all::MutMap;
use roc_load::file::LoadingProblem;
use roc_load::LoadingProblem;
use roc_mono::ir::OptLevel;
use roc_target::TargetInfo;
use std::path::PathBuf;
@ -61,15 +60,11 @@ pub fn build_file<'a>(
let target_info = TargetInfo::from(target);
// Step 1: compile the app and generate the .o file
let subs_by_module = MutMap::default();
let subs_by_module = Default::default();
// Release builds use uniqueness optimizations
let stdlib = arena.alloc(roc_builtins::std::standard_stdlib());
let loaded = roc_load::file::load_and_monomorphize(
let loaded = roc_load::load_and_monomorphize(
arena,
roc_file_path.clone(),
stdlib,
src_dir.as_path(),
subs_by_module,
target_info,
@ -204,7 +199,11 @@ pub fn build_file<'a>(
buf.push_str("Code Generation");
buf.push('\n');
report_timing(buf, "Generate LLVM IR", code_gen_timing.code_gen);
report_timing(
buf,
"Generate Assembly from Mono IR",
code_gen_timing.code_gen,
);
report_timing(buf, "Emit .o file", code_gen_timing.emit_o_file);
let compilation_end = compilation_start.elapsed().unwrap();
@ -362,15 +361,11 @@ pub fn check_file(
let target_info = TargetInfo::default_x86_64();
// Step 1: compile the app and generate the .o file
let subs_by_module = MutMap::default();
let subs_by_module = Default::default();
// Release builds use uniqueness optimizations
let stdlib = arena.alloc(roc_builtins::std::standard_stdlib());
let mut loaded = roc_load::file::load_and_typecheck(
let mut loaded = roc_load::load_and_typecheck(
arena,
roc_file_path,
stdlib,
src_dir.as_path(),
subs_by_module,
target_info,

View file

@ -1,4 +1,5 @@
use std::path::PathBuf;
use std::ffi::OsStr;
use std::path::{Path, PathBuf};
use crate::FormatMode;
use bumpalo::collections::Vec;
@ -9,8 +10,8 @@ use roc_fmt::module::fmt_module;
use roc_fmt::Buf;
use roc_module::called_via::{BinOp, UnaryOp};
use roc_parse::ast::{
AliasHeader, AssignedField, Collection, Expr, Pattern, Spaced, StrLiteral, StrSegment, Tag,
TypeAnnotation, WhenBranch,
AbilityDemand, AssignedField, Collection, Expr, Has, HasClause, Pattern, Spaced, StrLiteral,
StrSegment, Tag, TypeAnnotation, TypeHeader, WhenBranch,
};
use roc_parse::header::{
AppHeader, ExposedName, HostedHeader, ImportsEntry, InterfaceHeader, ModuleName, PackageEntry,
@ -25,7 +26,54 @@ use roc_parse::{
};
use roc_region::all::{Loc, Region};
fn flatten_directories(files: std::vec::Vec<PathBuf>) -> std::vec::Vec<PathBuf> {
let mut to_flatten = files;
let mut files = vec![];
while let Some(path) = to_flatten.pop() {
if path.is_dir() {
match path.read_dir() {
Ok(directory) => {
for item in directory {
match item {
Ok(file) => {
let file_path = file.path();
if file_path.is_dir() {
to_flatten.push(file_path);
} else if is_roc_file(&file_path) {
files.push(file_path);
}
}
Err(error) => internal_error!(
"There was an error while trying to read a file from a directory: {:?}",
error
),
}
}
}
Err(error) => internal_error!(
"There was an error while trying to read the contents of a directory: {:?}",
error
),
}
} else if is_roc_file(&path) {
files.push(path);
}
}
files
}
fn is_roc_file(path: &Path) -> bool {
let ext = path.extension().and_then(OsStr::to_str);
return matches!(ext, Some("roc"));
}
pub fn format(files: std::vec::Vec<PathBuf>, mode: FormatMode) -> Result<(), String> {
let files = flatten_directories(files);
for file in files {
let arena = Bump::new();
@ -138,6 +186,8 @@ fn fmt_all<'a>(arena: &'a Bump, buf: &mut Buf<'a>, ast: &'a Ast) {
for def in &ast.defs {
fmt_def(buf, arena.alloc(def.value), 0);
}
buf.fmt_end_of_file();
}
/// RemoveSpaces normalizes the ast to something that we _expect_ to be invariant under formatting.
@ -402,15 +452,25 @@ impl<'a> RemoveSpaces<'a> for Def<'a> {
Def::Annotation(a.remove_spaces(arena), b.remove_spaces(arena))
}
Def::Alias {
header: AliasHeader { name, vars },
header: TypeHeader { name, vars },
ann,
} => Def::Alias {
header: AliasHeader {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
ann: ann.remove_spaces(arena),
},
Def::Opaque {
header: TypeHeader { name, vars },
typ,
} => Def::Opaque {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
typ: typ.remove_spaces(arena),
},
Def::Body(a, b) => Def::Body(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
@ -428,6 +488,18 @@ impl<'a> RemoveSpaces<'a> for Def<'a> {
body_pattern: arena.alloc(body_pattern.remove_spaces(arena)),
body_expr: arena.alloc(body_expr.remove_spaces(arena)),
},
Def::Ability {
header: TypeHeader { name, vars },
loc_has,
demands,
} => Def::Ability {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
loc_has: loc_has.remove_spaces(arena),
demands: demands.remove_spaces(arena),
},
Def::Expect(a) => Def::Expect(arena.alloc(a.remove_spaces(arena))),
Def::NotYetImplemented(a) => Def::NotYetImplemented(a),
Def::SpaceBefore(a, _) | Def::SpaceAfter(a, _) => a.remove_spaces(arena),
@ -435,6 +507,21 @@ impl<'a> RemoveSpaces<'a> for Def<'a> {
}
}
impl<'a> RemoveSpaces<'a> for Has<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
Has::Has
}
}
impl<'a> RemoveSpaces<'a> for AbilityDemand<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
AbilityDemand {
name: self.name.remove_spaces(arena),
typ: self.typ.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for WhenBranch<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
WhenBranch {
@ -514,6 +601,7 @@ impl<'a> RemoveSpaces<'a> for Expr<'a> {
Expr::Underscore(a) => Expr::Underscore(a),
Expr::GlobalTag(a) => Expr::GlobalTag(a),
Expr::PrivateTag(a) => Expr::PrivateTag(a),
Expr::OpaqueRef(a) => Expr::OpaqueRef(a),
Expr::Closure(a, b) => Expr::Closure(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
@ -554,6 +642,7 @@ impl<'a> RemoveSpaces<'a> for Expr<'a> {
Expr::PrecedenceConflict(a) => Expr::PrecedenceConflict(a),
Expr::SpaceBefore(a, _) => a.remove_spaces(arena),
Expr::SpaceAfter(a, _) => a.remove_spaces(arena),
Expr::SingleQuote(a) => Expr::Num(a),
}
}
}
@ -564,6 +653,7 @@ impl<'a> RemoveSpaces<'a> for Pattern<'a> {
Pattern::Identifier(a) => Pattern::Identifier(a),
Pattern::GlobalTag(a) => Pattern::GlobalTag(a),
Pattern::PrivateTag(a) => Pattern::PrivateTag(a),
Pattern::OpaqueRef(a) => Pattern::OpaqueRef(a),
Pattern::Apply(a, b) => Pattern::Apply(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
@ -595,6 +685,7 @@ impl<'a> RemoveSpaces<'a> for Pattern<'a> {
}
Pattern::SpaceBefore(a, _) => a.remove_spaces(arena),
Pattern::SpaceAfter(a, _) => a.remove_spaces(arena),
Pattern::SingleQuote(a) => Pattern::NumLiteral(a),
}
}
}
@ -621,12 +712,26 @@ impl<'a> RemoveSpaces<'a> for TypeAnnotation<'a> {
},
TypeAnnotation::Inferred => TypeAnnotation::Inferred,
TypeAnnotation::Wildcard => TypeAnnotation::Wildcard,
TypeAnnotation::Where(annot, has_clauses) => TypeAnnotation::Where(
arena.alloc(annot.remove_spaces(arena)),
arena.alloc(has_clauses.remove_spaces(arena)),
),
TypeAnnotation::SpaceBefore(a, _) => a.remove_spaces(arena),
TypeAnnotation::SpaceAfter(a, _) => a.remove_spaces(arena),
TypeAnnotation::Malformed(a) => TypeAnnotation::Malformed(a),
}
}
}
impl<'a> RemoveSpaces<'a> for HasClause<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
HasClause {
var: self.var.remove_spaces(arena),
ability: self.ability.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for Tag<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {

View file

@ -5,7 +5,8 @@ use build::{BuildOutcome, BuiltFile};
use bumpalo::Bump;
use clap::{App, AppSettings, Arg, ArgMatches};
use roc_build::link::LinkType;
use roc_load::file::LoadingProblem;
use roc_error_macros::user_error;
use roc_load::LoadingProblem;
use roc_mono::ir::OptLevel;
use std::env;
use std::io;
@ -31,8 +32,9 @@ pub const CMD_FORMAT: &str = "format";
pub const FLAG_DEBUG: &str = "debug";
pub const FLAG_DEV: &str = "dev";
pub const FLAG_OPTIMIZE: &str = "optimize";
pub const FLAG_OPT_SIZE: &str = "opt-size";
pub const FLAG_LIB: &str = "lib";
pub const FLAG_BACKEND: &str = "backend";
pub const FLAG_TARGET: &str = "target";
pub const FLAG_TIME: &str = "time";
pub const FLAG_LINK: &str = "roc-linker";
pub const FLAG_PRECOMPILED: &str = "precompiled-host";
@ -40,7 +42,6 @@ pub const FLAG_VALGRIND: &str = "valgrind";
pub const FLAG_CHECK: &str = "check";
pub const ROC_FILE: &str = "ROC_FILE";
pub const ROC_DIR: &str = "ROC_DIR";
pub const BACKEND: &str = "BACKEND";
pub const DIRECTORY_OR_FILES: &str = "DIRECTORY_OR_FILES";
pub const ARGS_FOR_APP: &str = "ARGS_FOR_APP";
@ -61,6 +62,12 @@ pub fn build_app<'a>() -> App<'a> {
.about("Optimize your compiled Roc program to run faster. (Optimization takes time to complete.)")
.required(false),
)
.arg(
Arg::new(FLAG_OPT_SIZE)
.long(FLAG_OPT_SIZE)
.about("Optimize your compiled Roc program to have a small binary size. (Optimization takes time to complete.)")
.required(false),
)
.arg(
Arg::new(FLAG_DEV)
.long(FLAG_DEV)
@ -68,12 +75,11 @@ pub fn build_app<'a>() -> App<'a> {
.required(false),
)
.arg(
Arg::new(FLAG_BACKEND)
.long(FLAG_BACKEND)
.about("Choose a different backend")
// .requires(BACKEND)
.default_value(Backend::default().as_str())
.possible_values(Backend::OPTIONS)
Arg::new(FLAG_TARGET)
.long(FLAG_TARGET)
.about("Choose a different target")
.default_value(Target::default().as_str())
.possible_values(Target::OPTIONS)
.required(false),
)
.arg(
@ -166,12 +172,18 @@ pub fn build_app<'a>() -> App<'a> {
.requires(ROC_FILE)
.required(false),
)
.arg(
Arg::new(FLAG_DEV)
.long(FLAG_DEV)
.about("Make compilation as fast as possible. (Runtime performance may suffer)")
.required(false),
)
.arg(
Arg::new(FLAG_OPT_SIZE)
.long(FLAG_OPT_SIZE)
.about("Optimize your compiled Roc program to have a small binary size. (Optimization takes time to complete.)")
.required(false),
)
.arg(
Arg::new(FLAG_DEV)
.long(FLAG_DEV)
.about("Make compilation as fast as possible. (Runtime performance may suffer)")
.required(false),
)
.arg(
Arg::new(FLAG_DEBUG)
.long(FLAG_DEBUG)
@ -198,12 +210,11 @@ pub fn build_app<'a>() -> App<'a> {
.required(false),
)
.arg(
Arg::new(FLAG_BACKEND)
.long(FLAG_BACKEND)
.about("Choose a different backend")
// .requires(BACKEND)
.default_value(Backend::default().as_str())
.possible_values(Backend::OPTIONS)
Arg::new(FLAG_TARGET)
.long(FLAG_TARGET)
.about("Choose a different target")
.default_value(Target::default().as_str())
.possible_values(Target::OPTIONS)
.required(false),
)
.arg(
@ -236,11 +247,7 @@ pub fn build_app<'a>() -> App<'a> {
}
pub fn docs(files: Vec<PathBuf>) {
roc_docs::generate_docs_html(
files,
roc_builtins::std::standard_stdlib(),
Path::new("./generated-docs"),
)
roc_docs::generate_docs_html(files, Path::new("./generated-docs"))
}
#[derive(Debug, PartialEq, Eq)]
@ -259,12 +266,12 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
use std::str::FromStr;
use BuildConfig::*;
let backend = match matches.value_of(FLAG_BACKEND) {
Some(name) => Backend::from_str(name).unwrap(),
None => Backend::default(),
let target = match matches.value_of(FLAG_TARGET) {
Some(name) => Target::from_str(name).unwrap(),
None => Target::default(),
};
let target = backend.to_triple();
let triple = target.to_triple();
let arena = Bump::new();
let filename = matches.value_of(ROC_FILE).unwrap();
@ -272,12 +279,14 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
let original_cwd = std::env::current_dir()?;
let opt_level = match (
matches.is_present(FLAG_OPTIMIZE),
matches.is_present(FLAG_OPT_SIZE),
matches.is_present(FLAG_DEV),
) {
(true, false) => OptLevel::Optimize,
(true, true) => panic!("development cannot be optimized!"),
(false, true) => OptLevel::Development,
(false, false) => OptLevel::Normal,
(true, false, false) => OptLevel::Optimize,
(false, true, false) => OptLevel::Size,
(false, false, true) => OptLevel::Development,
(false, false, false) => OptLevel::Normal,
_ => user_error!("build can be only one of `--dev`, `--optimize`, or `--opt-size`"),
};
let emit_debug_info = matches.is_present(FLAG_DEBUG);
let emit_timings = matches.is_present(FLAG_TIME);
@ -290,10 +299,10 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
let surgically_link = matches.is_present(FLAG_LINK);
let precompiled = matches.is_present(FLAG_PRECOMPILED);
if surgically_link && !roc_linker::supported(&link_type, &target) {
if surgically_link && !roc_linker::supported(&link_type, &triple) {
panic!(
"Link type, {:?}, with target, {}, not supported by roc linker",
link_type, target
link_type, triple
);
}
@ -322,7 +331,7 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
let target_valgrind = matches.is_present(FLAG_VALGRIND);
let res_binary_path = build_file(
&arena,
&target,
&triple,
src_dir,
path,
opt_level,
@ -361,7 +370,7 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
Ok(outcome.status_code())
}
BuildAndRun { roc_file_arg_index } => {
let mut cmd = match target.architecture {
let mut cmd = match triple.architecture {
Architecture::Wasm32 => {
// If possible, report the generated executable name relative to the current dir.
let generated_filename = binary_path
@ -382,7 +391,7 @@ pub fn build(matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
_ => Command::new(&binary_path),
};
if let Architecture::Wasm32 = target.architecture {
if let Architecture::Wasm32 = triple.architecture {
cmd.arg(binary_path);
}
@ -487,43 +496,43 @@ fn run_with_wasmer(_wasm_path: &std::path::Path, _args: &[String]) {
println!("Running wasm files not support");
}
enum Backend {
enum Target {
Host,
X86_32,
X86_64,
Wasm32,
}
impl Default for Backend {
impl Default for Target {
fn default() -> Self {
Backend::Host
Target::Host
}
}
impl Backend {
impl Target {
const fn as_str(&self) -> &'static str {
match self {
Backend::Host => "host",
Backend::X86_32 => "x86_32",
Backend::X86_64 => "x86_64",
Backend::Wasm32 => "wasm32",
Target::Host => "host",
Target::X86_32 => "x86_32",
Target::X86_64 => "x86_64",
Target::Wasm32 => "wasm32",
}
}
/// NOTE keep up to date!
const OPTIONS: &'static [&'static str] = &[
Backend::Host.as_str(),
Backend::X86_32.as_str(),
Backend::X86_64.as_str(),
Backend::Wasm32.as_str(),
Target::Host.as_str(),
Target::X86_32.as_str(),
Target::X86_64.as_str(),
Target::Wasm32.as_str(),
];
fn to_triple(&self) -> Triple {
let mut triple = Triple::unknown();
match self {
Backend::Host => Triple::host(),
Backend::X86_32 => {
Target::Host => Triple::host(),
Target::X86_32 => {
triple.architecture = Architecture::X86_32(X86_32Architecture::I386);
triple.binary_format = BinaryFormat::Elf;
@ -532,13 +541,13 @@ impl Backend {
triple
}
Backend::X86_64 => {
Target::X86_64 => {
triple.architecture = Architecture::X86_64;
triple.binary_format = BinaryFormat::Elf;
triple
}
Backend::Wasm32 => {
Target::Wasm32 => {
triple.architecture = Architecture::Wasm32;
triple.binary_format = BinaryFormat::Wasm;
@ -548,21 +557,21 @@ impl Backend {
}
}
impl std::fmt::Display for Backend {
impl std::fmt::Display for Target {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl std::str::FromStr for Backend {
impl std::str::FromStr for Target {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"host" => Ok(Backend::Host),
"x86_32" => Ok(Backend::X86_32),
"x86_64" => Ok(Backend::X86_64),
"wasm32" => Ok(Backend::Wasm32),
"host" => Ok(Target::Host),
"x86_32" => Ok(Target::X86_32),
"x86_64" => Ok(Target::X86_64),
"wasm32" => Ok(Target::Wasm32),
_ => Err(()),
}
}

View file

@ -3,7 +3,7 @@ use roc_cli::{
build_app, docs, format, BuildConfig, FormatMode, CMD_BUILD, CMD_CHECK, CMD_DOCS, CMD_EDIT,
CMD_FORMAT, CMD_REPL, CMD_VERSION, DIRECTORY_OR_FILES, FLAG_CHECK, FLAG_TIME, ROC_FILE,
};
use roc_load::file::LoadingProblem;
use roc_load::LoadingProblem;
use std::fs::{self, FileType};
use std::io;
use std::path::{Path, PathBuf};
@ -63,10 +63,16 @@ fn main() -> io::Result<()> {
}
}
Some((CMD_REPL, _)) => {
roc_repl_cli::main()?;
#[cfg(feature = "llvm")]
{
roc_repl_cli::main()?;
// Exit 0 if the repl exited normally
Ok(0)
// Exit 0 if the repl exited normally
Ok(0)
}
#[cfg(not(feature = "llvm"))]
todo!("enable roc repl without llvm");
}
Some((CMD_EDIT, matches)) => {
match matches

View file

@ -12,9 +12,11 @@ extern crate indoc;
#[cfg(test)]
mod cli_run {
use cli_utils::helpers::{
example_file, examples_dir, extract_valgrind_errors, fixture_file, known_bad_file, run_cmd,
run_roc, run_with_valgrind, ValgrindError, ValgrindErrorXWhat,
example_file, examples_dir, extract_valgrind_errors, fixture_file, fixtures_dir,
known_bad_file, run_cmd, run_roc, run_with_valgrind, Out, ValgrindError,
ValgrindErrorXWhat,
};
use indoc::indoc;
use roc_test_utils::assert_multiline_str_eq;
use serial_test::serial;
use std::path::{Path, PathBuf};
@ -22,11 +24,11 @@ mod cli_run {
#[cfg(not(debug_assertions))]
use roc_collections::all::MutMap;
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
const TEST_SURGICAL_LINKER: bool = true;
// Surgical linker currently only supports linux.
#[cfg(not(target_os = "linux"))]
// Surgical linker currently only supports linux x86_64.
#[cfg(not(all(target_os = "linux", target_arch = "x86_64")))]
const TEST_SURGICAL_LINKER: bool = false;
#[cfg(not(target_os = "macos"))]
@ -49,26 +51,47 @@ mod cli_run {
}
fn strip_colors(str: &str) -> String {
use roc_reporting::report::*;
str.replace(RED_CODE, "")
.replace(WHITE_CODE, "")
.replace(BLUE_CODE, "")
.replace(YELLOW_CODE, "")
.replace(GREEN_CODE, "")
.replace(CYAN_CODE, "")
.replace(MAGENTA_CODE, "")
.replace(RESET_CODE, "")
.replace(BOLD_CODE, "")
.replace(UNDERLINE_CODE, "")
use roc_reporting::report::ANSI_STYLE_CODES;
str.replace(ANSI_STYLE_CODES.red, "")
.replace(ANSI_STYLE_CODES.green, "")
.replace(ANSI_STYLE_CODES.yellow, "")
.replace(ANSI_STYLE_CODES.blue, "")
.replace(ANSI_STYLE_CODES.magenta, "")
.replace(ANSI_STYLE_CODES.cyan, "")
.replace(ANSI_STYLE_CODES.white, "")
.replace(ANSI_STYLE_CODES.bold, "")
.replace(ANSI_STYLE_CODES.underline, "")
.replace(ANSI_STYLE_CODES.reset, "")
}
fn check_compile_error(file: &Path, flags: &[&str], expected: &str) {
let compile_out = run_roc(&[&["check", file.to_str().unwrap()], &flags[..]].concat());
let compile_out = run_roc(&[&["check", file.to_str().unwrap()], flags].concat());
let err = compile_out.stdout.trim();
let err = strip_colors(&err);
let err = strip_colors(err);
assert_multiline_str_eq!(err, expected.into());
}
fn check_format_check_as_expected(file: &Path, expects_success_exit_code: bool) {
let flags = &["--check"];
let out = run_roc(&[&["format", file.to_str().unwrap()], &flags[..]].concat());
if expects_success_exit_code {
assert!(out.status.success());
} else {
assert!(!out.status.success());
}
}
fn build_example(file: &Path, flags: &[&str]) -> Out {
let compile_out = run_roc(&[&["build", file.to_str().unwrap()], flags].concat());
if !compile_out.stderr.is_empty() {
panic!("roc build had stderr: {}", compile_out.stderr);
}
assert!(compile_out.status.success(), "bad status {:?}", compile_out);
compile_out
}
fn check_output_with_stdin(
file: &Path,
stdin: &[&str],
@ -85,12 +108,7 @@ mod cli_run {
all_flags.extend_from_slice(&["--valgrind"]);
}
let compile_out = run_roc(&[&["build", file.to_str().unwrap()], &all_flags[..]].concat());
if !compile_out.stderr.is_empty() {
panic!("roc build had stderr: {}", compile_out.stderr);
}
assert!(compile_out.status.success(), "bad status {:?}", compile_out);
build_example(file, &all_flags[..]);
let out = if use_valgrind && ALLOW_VALGRIND {
let (valgrind_out, raw_xml) = if let Some(input_file) = input_file {
@ -176,7 +194,7 @@ mod cli_run {
) {
assert_eq!(input_file, None, "Wasm does not support input files");
let mut flags = flags.to_vec();
flags.push("--backend=wasm32");
flags.push("--target=wasm32");
let compile_out = run_roc(&[&["build", file.to_str().unwrap()], flags.as_slice()].concat());
if !compile_out.stderr.is_empty() {
@ -216,17 +234,33 @@ mod cli_run {
let file_name = example_file(dir_name, example.filename);
match example.executable_filename {
"hello-web" => {
"helloWeb" => {
// this is a web webassembly example, but we don't test with JS at the moment
eprintln!("WARNING: skipping testing example {} because the test is broken right now!", example.filename);
return;
}
"hello-swift" => {
"form" => {
// test is skipped until we upgrate to zig 0.9 / llvm 13
eprintln!("WARNING: skipping testing example {} because the test is broken right now!", example.filename);
return;
}
"helloSwift" => {
if cfg!(not(target_os = "macos")) {
eprintln!("WARNING: skipping testing example {} because it only works on MacOS.", example.filename);
return;
}
}
"hello-gui" => {
// Since this one requires opening a window, we do `roc build` on it but don't run it.
if cfg!(all(target_os = "linux", target_arch = "x86_64")) {
// The surgical linker can successfully link this on Linux, but the legacy linker errors!
build_example(&file_name, &["--optimize", "--roc-linker"]);
} else {
build_example(&file_name, &["--optimize"]);
}
return;
}
_ => {}
}
@ -295,56 +329,72 @@ mod cli_run {
// },
// ]
examples! {
hello_world:"hello-world" => Example {
filename: "Hello.roc",
executable_filename: "hello-world",
helloWorld:"hello-world" => Example {
filename: "helloWorld.roc",
executable_filename: "helloWorld",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
hello_zig:"hello-zig" => Example {
filename: "Hello.roc",
executable_filename: "hello-world",
helloC:"hello-world/c-platform" => Example {
filename: "helloC.roc",
executable_filename: "helloC",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
hello_rust:"hello-rust" => Example {
filename: "Hello.roc",
executable_filename: "hello-rust",
helloZig:"hello-world/zig-platform" => Example {
filename: "helloZig.roc",
executable_filename: "helloZig",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
hello_swift:"hello-swift" => Example {
filename: "Hello.roc",
executable_filename: "hello-swift",
stdin: &[],
input_file: None,
expected_ending:"Hello Swift, meet Roc\n",
use_valgrind: true,
},
hello_web:"hello-web" => Example {
filename: "Hello.roc",
executable_filename: "hello-web",
helloRust:"hello-world/rust-platform" => Example {
filename: "helloRust.roc",
executable_filename: "helloRust",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
fib:"fib" => Example {
filename: "Fib.roc",
executable_filename: "fib",
helloSwift:"hello-world/swift-platform" => Example {
filename: "helloSwift.roc",
executable_filename: "helloSwift",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
helloWeb:"hello-world/web-platform" => Example {
filename: "helloWeb.roc",
executable_filename: "helloWeb",
stdin: &[],
input_file: None,
expected_ending:"Hello, World!\n",
use_valgrind: true,
},
fib:"algorithms" => Example {
filename: "fibonacci.roc",
executable_filename: "fibonacci",
stdin: &[],
input_file: None,
expected_ending:"55\n",
use_valgrind: true,
},
quicksort:"quicksort" => Example {
filename: "Quicksort.roc",
gui:"gui" => Example {
filename: "Hello.roc",
executable_filename: "hello-gui",
stdin: &[],
input_file: None,
expected_ending: "",
use_valgrind: false,
},
quicksort:"algorithms" => Example {
filename: "quicksort.roc",
executable_filename: "quicksort",
stdin: &[],
input_file: None,
@ -359,9 +409,9 @@ mod cli_run {
// expected_ending: "[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2]\n",
// use_valgrind: true,
// },
effect:"effect" => Example {
filename: "Main.roc",
executable_filename: "effect-example",
effects:"interactive" => Example {
filename: "effects.roc",
executable_filename: "effects",
stdin: &["hi there!"],
input_file: None,
expected_ending: "hi there!\nIt is known\n",
@ -375,16 +425,16 @@ mod cli_run {
// expected_ending: "",
// use_valgrind: true,
// },
cli:"cli" => Example {
cli:"interactive" => Example {
filename: "form.roc",
executable_filename: "form",
stdin: &["Giovanni\n", "Giorgio\n"],
input_file: None,
expected_ending: "Hi, Giovanni Giorgio! 👋\n",
use_valgrind: true,
use_valgrind: false,
},
tui:"tui" => Example {
filename: "Main.roc",
tui:"interactive" => Example {
filename: "tui.roc",
executable_filename: "tui",
stdin: &["foo\n"], // NOTE: adding more lines leads to memory leaks
input_file: None,
@ -414,7 +464,7 @@ mod cli_run {
stdin: &[],
input_file: Some("examples/hello.false"),
expected_ending:"Hello, World!\n",
use_valgrind: true,
use_valgrind: false,
}
},
}
@ -528,7 +578,7 @@ mod cli_run {
&file_name,
benchmark.stdin,
benchmark.executable_filename,
&["--backend=x86_32"],
&["--target=x86_32"],
benchmark.input_file.and_then(|file| Some(examples_dir("benchmarks").join(file))),
benchmark.expected_ending,
benchmark.use_valgrind,
@ -538,7 +588,7 @@ mod cli_run {
&file_name,
benchmark.stdin,
benchmark.executable_filename,
&["--backend=x86_32", "--optimize"],
&["--target=x86_32", "--optimize"],
benchmark.input_file.and_then(|file| Some(examples_dir("benchmarks").join(file))),
benchmark.expected_ending,
benchmark.use_valgrind,
@ -669,6 +719,22 @@ mod cli_run {
if entry.file_type().unwrap().is_dir() {
let example_dir_name = entry.file_name().into_string().unwrap();
// TODO: Improve this with a more-dynamic approach. (Read all subdirectories?)
// Some hello-world examples live in nested directories
if example_dir_name == "hello-world" {
for sub_dir in [
"c-platform",
"rust-platform",
"swift-platform",
"web-platform",
"zig-platform",
] {
all_examples.remove(format!("{}/{}", example_dir_name, sub_dir).as_str()).unwrap_or_else(|| {
panic!("The example directory {}/{}/{} does not have any corresponding tests in cli_run. Please add one, so if it ever stops working, we'll know about it right away!", examples_dir, example_dir_name, sub_dir);
});
}
}
// We test benchmarks separately
if example_dir_name != "benchmarks" {
all_examples.remove(example_dir_name.as_str()).unwrap_or_else(|| {
@ -863,6 +929,25 @@ mod cli_run {
),
);
}
#[test]
fn format_check_good() {
check_format_check_as_expected(&fixture_file("format", "Formatted.roc"), true);
}
#[test]
fn format_check_reformatting_needed() {
check_format_check_as_expected(&fixture_file("format", "NotFormatted.roc"), false);
}
#[test]
fn format_check_folders() {
// This fails, because "NotFormatted.roc" is present in this folder
check_format_check_as_expected(&fixtures_dir("format"), false);
// This doesn't fail, since only "Formatted.roc" and non-roc files are present in this folder
check_format_check_as_expected(&fixtures_dir("format/formatted_directory"), true);
}
}
#[allow(dead_code)]

View file

@ -0,0 +1,6 @@
app "formatted"
packages { pf: "platform" } imports []
provides [ main ] to pf
main : Str
main = Dep1.value1 {}

View file

@ -0,0 +1 @@
This is not a .roc file, and should be ignored by the formatter.

View file

@ -0,0 +1,6 @@
app "formatted"
packages { pf: "platform" }
provides [ main ] to pf
main : Str
main = Dep1.value1 {}

View file

@ -0,0 +1,6 @@
app "formatted"
packages { pf: "platform" } imports []
provides [ main ] to pf
main : Str
main = Dep1.value1 {}

View file

@ -0,0 +1 @@
This is not a .roc file, and should be ignored by the formatter.

View file

@ -0,0 +1,3 @@
This is not a .roc file, and should be ignored by the formatter.
This file does not have an extension, to ensure the formatter does not simply test with `ends_with(".roc")`

View file

@ -0,0 +1 @@
This is not a .roc file, and should be ignored by the formatter.

View file

@ -1,4 +1,4 @@
platform "examples/multi-module"
platform "multi-module"
requires {}{ main : Str }
exposes []
packages {}

View file

@ -24,10 +24,11 @@ const Allocator = mem.Allocator;
extern fn roc__mainForHost_1_exposed() RocStr;
extern fn malloc(size: usize) callconv(.C) ?*c_void;
extern fn realloc(c_ptr: [*]align(@alignOf(u128)) u8, size: usize) callconv(.C) ?*c_void;
extern fn free(c_ptr: [*]align(@alignOf(u128)) u8) callconv(.C) void;
extern fn memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void;
extern fn memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void;
export fn roc_alloc(size: usize, alignment: u32) callconv(.C) ?*c_void {
return malloc(size);
@ -41,6 +42,14 @@ export fn roc_dealloc(c_ptr: *c_void, alignment: u32) callconv(.C) void {
free(@alignCast(16, @ptrCast([*]u8, c_ptr)));
}
export fn roc_memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void {
return memcpy(dst, src, size);
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {
return memset(dst, value, size);
}
export fn roc_panic(c_ptr: *c_void, tag_id: u32) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
const msg = @ptrCast([*:0]const u8, c_ptr);
@ -61,15 +70,15 @@ pub export fn main() i32 {
// actually call roc to populate the callresult
const callresult = roc__mainForHost_1_exposed();
// end time
var ts2: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts2) catch unreachable;
// stdout the result
stdout.print("{s}\n", .{callresult.asSlice()}) catch unreachable;
callresult.deinit();
// end time
var ts2: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts2) catch unreachable;
const delta = to_seconds(ts2) - to_seconds(ts1);
stderr.print("runtime: {d:.3}ms\n", .{delta * 1000}) catch unreachable;

View file

@ -1,4 +1,4 @@
platform "examples/multi-dep-thunk"
platform "multi-dep-thunk"
requires {}{ main : Str }
exposes []
packages {}

View file

@ -27,6 +27,8 @@ extern fn roc__mainForHost_1_exposed() RocStr;
extern fn malloc(size: usize) callconv(.C) ?*c_void;
extern fn realloc(c_ptr: [*]align(@alignOf(u128)) u8, size: usize) callconv(.C) ?*c_void;
extern fn free(c_ptr: [*]align(@alignOf(u128)) u8) callconv(.C) void;
extern fn memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void;
extern fn memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void;
export fn roc_alloc(size: usize, alignment: u32) callconv(.C) ?*c_void {
return malloc(size);
@ -40,6 +42,14 @@ export fn roc_dealloc(c_ptr: *c_void, alignment: u32) callconv(.C) void {
free(@alignCast(16, @ptrCast([*]u8, c_ptr)));
}
export fn roc_memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void {
return memcpy(dst, src, size);
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {
return memset(dst, value, size);
}
export fn roc_panic(c_ptr: *c_void, tag_id: u32) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
const msg = @ptrCast([*:0]const u8, c_ptr);
@ -60,15 +70,15 @@ pub export fn main() i32 {
// actually call roc to populate the callresult
const callresult = roc__mainForHost_1_exposed();
// end time
var ts2: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts2) catch unreachable;
// stdout the result
stdout.print("{s}\n", .{callresult.asSlice()}) catch unreachable;
callresult.deinit();
// end time
var ts2: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts2) catch unreachable;
const delta = to_seconds(ts2) - to_seconds(ts1);
stderr.print("runtime: {d:.3}ms\n", .{delta * 1000}) catch unreachable;

View file

@ -1 +1 @@
../../../examples/cli/platform
../../../examples/interactive/cli-platform

14
cli_utils/Cargo.lock generated
View file

@ -897,6 +897,12 @@ version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0"
[[package]]
name = "dunce"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453440c271cf5577fd2a40e4942540cb7d0d2f85e27c8d07dd0023c925a67541"
[[package]]
name = "either"
version = "1.6.1"
@ -2504,6 +2510,7 @@ dependencies = [
name = "roc_builtins"
version = "0.1.0"
dependencies = [
"dunce",
"roc_collections",
"roc_module",
"roc_region",
@ -2518,6 +2525,7 @@ dependencies = [
"bumpalo",
"roc_builtins",
"roc_collections",
"roc_error_macros",
"roc_module",
"roc_parse",
"roc_problem",
@ -2587,6 +2595,7 @@ dependencies = [
"roc_builtins",
"roc_can",
"roc_collections",
"roc_error_macros",
"roc_module",
"roc_parse",
"roc_region",
@ -2687,6 +2696,7 @@ dependencies = [
"roc_mono",
"roc_problem",
"roc_region",
"roc_reporting",
"roc_solve",
"roc_target",
"roc_types",
@ -2760,6 +2770,7 @@ dependencies = [
"roc_can",
"roc_collections",
"roc_constrain",
"roc_error_macros",
"roc_module",
"roc_mono",
"roc_parse",
@ -2771,7 +2782,6 @@ dependencies = [
"roc_types",
"roc_unify",
"ven_pretty",
"wasm-bindgen",
]
[[package]]
@ -2880,7 +2890,6 @@ dependencies = [
"roc_reporting",
"roc_target",
"roc_types",
"wasm-bindgen",
]
[[package]]
@ -2913,7 +2922,6 @@ dependencies = [
"roc_region",
"roc_types",
"roc_unify",
"wasm-bindgen",
]
[[package]]

View file

@ -20,4 +20,6 @@ serde = { version = "1.0.130", features = ["derive"] }
serde-xml-rs = "0.5.1"
strip-ansi-escapes = "0.1.1"
tempfile = "3.2.0"
[target.'cfg(unix)'.dependencies]
rlimit = "0.6.2"

View file

@ -1,11 +1,12 @@
use crate::helpers::{example_file, run_cmd, run_roc};
use criterion::{black_box, measurement::Measurement, BenchmarkGroup};
use rlimit::{setrlimit, Resource};
use std::path::Path;
use std::{path::Path, thread};
const CFOLD_STACK_SIZE: usize = 8192 * 100000;
fn exec_bench_w_input<T: Measurement>(
file: &Path,
stdin_str: &str,
stdin_str: &'static str,
executable_filename: &str,
expected_ending: &str,
bench_group_opt: Option<&mut BenchmarkGroup<T>>,
@ -31,7 +32,7 @@ fn exec_bench_w_input<T: Measurement>(
fn check_cmd_output(
file: &Path,
stdin_str: &str,
stdin_str: &'static str,
executable_filename: &str,
expected_ending: &str,
) {
@ -41,11 +42,16 @@ fn check_cmd_output(
.unwrap()
.to_string();
if cmd_str.contains("cfold") {
increase_stack_limit();
}
let out = if cmd_str.contains("cfold") {
let child = thread::Builder::new()
.stack_size(CFOLD_STACK_SIZE)
.spawn(move || run_cmd(&cmd_str, &[stdin_str], &[]))
.unwrap();
let out = run_cmd(&cmd_str, &[stdin_str], &[]);
child.join().unwrap()
} else {
run_cmd(&cmd_str, &[stdin_str], &[])
};
if !&out.stdout.ends_with(expected_ending) {
panic!(
@ -69,7 +75,20 @@ fn bench_cmd<T: Measurement>(
.to_string();
if cmd_str.contains("cfold") {
increase_stack_limit();
#[cfg(unix)]
use rlimit::{setrlimit, Resource};
#[cfg(unix)]
setrlimit(
Resource::STACK,
CFOLD_STACK_SIZE as u64,
CFOLD_STACK_SIZE as u64,
)
.expect("Failed to increase stack limit.");
#[cfg(windows)]
println!("Skipping the cfold benchmark on windows, I can't adjust the stack size and use criterion at the same time.");
#[cfg(windows)]
return;
}
if let Some(bench_group) = bench_group_opt {
@ -85,12 +104,6 @@ fn bench_cmd<T: Measurement>(
}
}
fn increase_stack_limit() {
let new_stack_limit = 8192 * 100000;
setrlimit(Resource::STACK, new_stack_limit, new_stack_limit)
.expect("Failed to increase stack limit.");
}
pub fn bench_nqueens<T: Measurement>(bench_group_opt: Option<&mut BenchmarkGroup<T>>) {
exec_bench_w_input(
&example_file("benchmarks", "NQueens.roc"),

View file

@ -4,7 +4,7 @@ version = "0.1.0"
authors = ["The Roc Contributors"]
license = "UPL-1.0"
edition = "2018"
description = "Our own markup language for Roc code. Used by the editor and (soon) the docs."
description = "Our own markup language for Roc code. Used by the editor and the docs."
[dependencies]
roc_ast = { path = "../ast" }

View file

@ -3,3 +3,4 @@ pub mod markup;
pub mod markup_error;
pub mod slow_pool;
pub mod syntax_highlight;
pub mod underline_style;

View file

@ -55,8 +55,13 @@ pub enum Attribute {
HighlightStart { highlight_start: HighlightStart },
HighlightEnd { highlight_end: HighlightEnd },
UnderlineStart { underline_start: UnderlineStart },
UnderlineEnd { underline_end: UnderlineEnd },
Underline { underline_spec: UnderlineSpec },
}
#[derive(Debug)]
pub enum UnderlineSpec {
Partial { start: usize, end: usize },
Full,
}
#[derive(Debug, Default)]

View file

@ -1,164 +1,167 @@
use roc_ast::lang::core::{ast::ASTNodeId, expr::expr2::ExprId};
use crate::{
slow_pool::{MarkNodeId, SlowPool},
syntax_highlight::HighlightStyle,
};
use crate::{slow_pool::MarkNodeId, syntax_highlight::HighlightStyle};
use super::{
attribute::Attributes,
nodes::MarkupNode,
nodes::{self, make_nested_mn},
};
use super::{attribute::Attributes, nodes, nodes::MarkupNode};
pub fn new_equals_mn(ast_node_id: ASTNodeId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::EQUALS.to_owned(),
ast_node_id,
syn_high_style: HighlightStyle::Operator,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_equals_mn() -> MarkupNode {
common_text_node(nodes::EQUALS.to_owned(), HighlightStyle::Operator, 0)
}
pub fn new_comma_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
new_comma_mn_ast(ASTNodeId::AExprId(expr_id), parent_id_opt)
pub fn new_comma_mn() -> MarkupNode {
common_text_node(nodes::COMMA.to_owned(), HighlightStyle::Operator, 0)
}
pub fn new_comma_mn_ast(ast_node_id: ASTNodeId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::COMMA.to_owned(),
ast_node_id,
syn_high_style: HighlightStyle::Comma,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_dot_mn() -> MarkupNode {
common_text_node(nodes::DOT.to_owned(), HighlightStyle::Operator, 0)
}
pub fn new_blank_mn(ast_node_id: ASTNodeId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
pub fn new_blank_mn() -> MarkupNode {
MarkupNode::Blank {
ast_node_id,
attributes: Attributes::default(),
parent_id_opt,
parent_id_opt: None,
newlines_at_end: 0,
}
}
pub fn new_blank_mn_w_nls(
ast_node_id: ASTNodeId,
parent_id_opt: Option<MarkNodeId>,
nr_of_newlines: usize,
) -> MarkupNode {
pub fn new_blank_mn_w_nls(nr_of_newlines: usize) -> MarkupNode {
MarkupNode::Blank {
ast_node_id,
attributes: Attributes::default(),
parent_id_opt,
parent_id_opt: None,
newlines_at_end: nr_of_newlines,
}
}
pub fn new_colon_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
new_operator_mn(nodes::COLON.to_owned(), expr_id, parent_id_opt)
pub fn new_colon_mn() -> MarkupNode {
new_operator_mn(nodes::COLON.to_owned())
}
pub fn new_operator_mn(
content: String,
expr_id: ExprId,
parent_id_opt: Option<MarkNodeId>,
) -> MarkupNode {
MarkupNode::Text {
content,
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::Operator,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_operator_mn(content: String) -> MarkupNode {
common_text_node(content, HighlightStyle::Operator, 0)
}
pub fn new_left_accolade_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::LEFT_ACCOLADE.to_owned(),
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::Bracket,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_left_accolade_mn() -> MarkupNode {
common_text_node(nodes::LEFT_ACCOLADE.to_owned(), HighlightStyle::Bracket, 0)
}
pub fn new_right_accolade_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::RIGHT_ACCOLADE.to_owned(),
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::Bracket,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_right_accolade_mn() -> MarkupNode {
common_text_node(nodes::RIGHT_ACCOLADE.to_owned(), HighlightStyle::Bracket, 0)
}
pub fn new_left_square_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::LEFT_SQUARE_BR.to_owned(),
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::Bracket,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_left_square_mn() -> MarkupNode {
common_text_node(nodes::LEFT_SQUARE_BR.to_owned(), HighlightStyle::Bracket, 0)
}
pub fn new_right_square_mn(expr_id: ExprId, parent_id_opt: Option<MarkNodeId>) -> MarkupNode {
MarkupNode::Text {
content: nodes::RIGHT_SQUARE_BR.to_owned(),
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::Bracket,
attributes: Attributes::default(),
parent_id_opt,
newlines_at_end: 0,
}
pub fn new_right_square_mn() -> MarkupNode {
common_text_node(
nodes::RIGHT_SQUARE_BR.to_owned(),
HighlightStyle::Bracket,
0,
)
}
pub fn new_func_name_mn(content: String, expr_id: ExprId) -> MarkupNode {
MarkupNode::Text {
content,
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::FunctionName,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
}
pub fn new_func_name_mn(content: String) -> MarkupNode {
common_text_node(content, HighlightStyle::FunctionName, 0)
}
pub fn new_arg_name_mn(content: String, expr_id: ExprId) -> MarkupNode {
MarkupNode::Text {
content,
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::FunctionArgName,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
}
pub fn new_arg_name_mn(content: String) -> MarkupNode {
common_text_node(content, HighlightStyle::FunctionArgName, 0)
}
pub fn new_arrow_mn(ast_node_id: ASTNodeId, newlines_at_end: usize) -> MarkupNode {
MarkupNode::Text {
content: nodes::ARROW.to_owned(),
ast_node_id,
syn_high_style: HighlightStyle::Operator,
attributes: Attributes::default(),
parent_id_opt: None,
pub fn new_arrow_mn(newlines_at_end: usize) -> MarkupNode {
common_text_node(
nodes::ARROW.to_owned(),
HighlightStyle::Operator,
newlines_at_end,
}
)
}
pub fn new_comments_mn(
comments: String,
ast_node_id: ASTNodeId,
pub fn new_comments_mn(comment: String, newlines_at_end: usize) -> MarkupNode {
common_text_node(comment, HighlightStyle::Comment, newlines_at_end)
}
fn common_text_node(
content: String,
highlight_style: HighlightStyle,
newlines_at_end: usize,
) -> MarkupNode {
MarkupNode::Text {
content: comments,
ast_node_id,
syn_high_style: HighlightStyle::Comment,
content,
syn_high_style: highlight_style,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end,
}
}
pub const NEW_LINES_AFTER_DEF: usize = 2;
pub fn new_assign_mn(
val_name_mn_id: MarkNodeId,
equals_mn_id: MarkNodeId,
expr_mark_node_id: MarkNodeId,
) -> MarkupNode {
make_nested_mn(
vec![val_name_mn_id, equals_mn_id, expr_mark_node_id],
NEW_LINES_AFTER_DEF,
)
}
pub fn new_module_name_mn_id(mn_ids: Vec<MarkNodeId>, mark_node_pool: &mut SlowPool) -> MarkNodeId {
if mn_ids.len() == 1 {
*mn_ids.get(0).unwrap() // safe because we checked the length before
} else {
let nested_node = make_nested_mn(mn_ids, 0);
mark_node_pool.add(nested_node)
}
}
pub fn new_module_var_mn(
module_name_id: MarkNodeId,
dot_id: MarkNodeId,
ident_id: MarkNodeId,
) -> MarkupNode {
make_nested_mn(vec![module_name_id, dot_id, ident_id], 0)
}
pub fn if_mn() -> MarkupNode {
keyword_mn("if ")
}
pub fn then_mn() -> MarkupNode {
keyword_mn(" then ")
}
pub fn else_mn() -> MarkupNode {
keyword_mn(" else ")
}
fn keyword_mn(keyword: &str) -> MarkupNode {
common_text_node(keyword.to_owned(), HighlightStyle::Keyword, 0)
}
pub fn new_if_expr_mn(
if_mn_id: MarkNodeId,
cond_expr_mn_id: MarkNodeId,
then_mn_id: MarkNodeId,
then_expr_mn_id: MarkNodeId,
else_mn_id: MarkNodeId,
else_expr_mn_id: MarkNodeId,
) -> MarkupNode {
make_nested_mn(
vec![
if_mn_id,
cond_expr_mn_id,
then_mn_id,
then_expr_mn_id,
else_mn_id,
else_expr_mn_id,
],
1,
)
}

View file

@ -7,6 +7,7 @@ use roc_module::symbol::Interns;
use crate::{
markup::{
convert::{from_def2::def2_to_markup, from_header::header_to_markup},
mark_id_ast_id_map::MarkIdAstIdMap,
nodes::set_parent_for_all,
},
slow_pool::{MarkNodeId, SlowPool},
@ -17,8 +18,13 @@ pub fn ast_to_mark_nodes<'a>(
ast: &AST,
mark_node_pool: &mut SlowPool,
interns: &Interns,
) -> ASTResult<Vec<MarkNodeId>> {
let mut all_mark_node_ids = vec![header_to_markup(&ast.header, mark_node_pool)];
) -> ASTResult<(Vec<MarkNodeId>, MarkIdAstIdMap)> {
let mut mark_id_ast_id_map = MarkIdAstIdMap::default();
let mut all_mark_node_ids = vec![header_to_markup(
&ast.header,
mark_node_pool,
&mut mark_id_ast_id_map,
)];
for &def_id in ast.def_ids.iter() {
// for debugging
@ -26,12 +32,19 @@ pub fn ast_to_mark_nodes<'a>(
let def2 = env.pool.get(def_id);
let expr2_markup_id = def2_to_markup(env, def2, def_id, mark_node_pool, interns)?;
let expr2_markup_id = def2_to_markup(
env,
def2,
def_id,
mark_node_pool,
&mut mark_id_ast_id_map,
interns,
)?;
set_parent_for_all(expr2_markup_id, mark_node_pool);
all_mark_node_ids.push(expr2_markup_id);
}
Ok(all_mark_node_ids)
Ok((all_mark_node_ids, mark_id_ast_id_map))
}

View file

@ -1,7 +1,9 @@
use crate::{
markup::{
common_nodes::new_blank_mn_w_nls,
top_level_def::{tld_mark_node, tld_w_comments_mark_node},
mark_id_ast_id_map::MarkIdAstIdMap,
nodes::MarkupNode,
top_level_def::{assignment_mark_node, tld_w_comments_mark_node},
},
slow_pool::{MarkNodeId, SlowPool},
};
@ -20,11 +22,25 @@ use roc_ast::{
};
use roc_module::symbol::Interns;
pub fn add_node(
mark_node: MarkupNode,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> MarkNodeId {
let mark_node_id = mark_node_pool.add(mark_node);
mark_id_ast_id_map.insert(mark_node_id, ast_node_id);
mark_node_id
}
pub fn def2_to_markup<'a>(
env: &mut Env<'a>,
def2: &Def2,
def2_node_id: DefId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
interns: &Interns,
) -> ASTResult<MarkNodeId> {
let ast_node_id = ASTNodeId::ADefId(def2_node_id);
@ -39,45 +55,81 @@ pub fn def2_to_markup<'a>(
env.pool.get(*expr_id),
*expr_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
0,
)?;
let tld_mn =
tld_mark_node(*identifier_id, expr_mn_id, ast_node_id, mark_node_pool, env)?;
let tld_mn = assignment_mark_node(
*identifier_id,
expr_mn_id,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
env,
)?;
mark_node_pool.add(tld_mn)
add_node(tld_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Def2::Blank => mark_node_pool.add(new_blank_mn_w_nls(ast_node_id, None, 2)),
Def2::Blank => add_node(
new_blank_mn_w_nls(2),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
),
Def2::CommentsBefore { comments, def_id } => {
let inner_def = env.pool.get(*def_id);
let inner_def_mark_node_id =
def2_to_markup(env, inner_def, *def_id, mark_node_pool, interns)?;
let inner_def_mark_node_id = def2_to_markup(
env,
inner_def,
*def_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
)?;
let full_mark_node = tld_w_comments_mark_node(
comments.clone(),
inner_def_mark_node_id,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
true,
)?;
mark_node_pool.add(full_mark_node)
add_node(
full_mark_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)
}
Def2::CommentsAfter { def_id, comments } => {
let inner_def = env.pool.get(*def_id);
let inner_def_mark_node_id =
def2_to_markup(env, inner_def, *def_id, mark_node_pool, interns)?;
let inner_def_mark_node_id = def2_to_markup(
env,
inner_def,
*def_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
)?;
let full_mark_node = tld_w_comments_mark_node(
comments.clone(),
inner_def_mark_node_id,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
false,
)?;
mark_node_pool.add(full_mark_node)
add_node(
full_mark_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)
}
};

View file

@ -6,6 +6,7 @@ use crate::{
new_left_accolade_mn, new_left_square_mn, new_operator_mn, new_right_accolade_mn,
new_right_square_mn,
},
mark_id_ast_id_map::MarkIdAstIdMap,
nodes::{
get_string, join_mark_nodes_commas, join_mark_nodes_spaces, new_markup_node, MarkupNode,
},
@ -32,12 +33,15 @@ use roc_ast::{
};
use roc_module::{module_err::ModuleResult, symbol::Interns};
use super::from_def2::add_node;
// make Markup Nodes: generate String representation, assign Highlighting Style
pub fn expr2_to_markup<'a>(
env: &Env<'a>,
expr2: &Expr2,
expr2_node_id: ExprId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
interns: &Interns,
indent_level: usize,
) -> ASTResult<MarkNodeId> {
@ -58,18 +62,30 @@ pub fn expr2_to_markup<'a>(
ast_node_id,
HighlightStyle::Number,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
)
}
Expr2::Str(text) => {
let content = format!("\"{}\"", text.as_str(env.pool));
new_markup_node(
with_indent(indent_level, &content),
ast_node_id,
HighlightStyle::String,
mark_node_pool,
string_mark_node(
&content,
indent_level,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)
}
Expr2::SmallStr(array_str) => {
let content = format!("\"{}\"", array_str.as_str());
string_mark_node(
&content,
indent_level,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)
}
Expr2::GlobalTag { name, .. } => new_markup_node(
@ -77,12 +93,20 @@ pub fn expr2_to_markup<'a>(
ast_node_id,
HighlightStyle::Type,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
),
Expr2::Call { args, expr_id, .. } => {
let expr = env.pool.get(*expr_id);
let fun_call_mark_id =
expr2_to_markup(env, expr, *expr_id, mark_node_pool, interns, indent_level)?;
let fun_call_mark_id = expr2_to_markup(
env,
expr,
*expr_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
indent_level,
)?;
let arg_expr_ids: Vec<ExprId> =
args.iter(env.pool).map(|(_, arg_id)| *arg_id).collect();
@ -92,24 +116,31 @@ pub fn expr2_to_markup<'a>(
.map(|arg_id| {
let arg_expr = env.pool.get(*arg_id);
expr2_to_markup(env, arg_expr, *arg_id, mark_node_pool, interns, 0)
expr2_to_markup(
env,
arg_expr,
*arg_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
0,
)
})
.collect::<ASTResult<Vec<MarkNodeId>>>()?;
let mut args_with_sapces =
join_mark_nodes_spaces(arg_call_mark_ids, true, ast_node_id, mark_node_pool);
join_mark_nodes_spaces(arg_call_mark_ids, true, mark_node_pool);
let mut children_ids = vec![fun_call_mark_id];
children_ids.append(&mut args_with_sapces);
let call_node = MarkupNode::Nested {
ast_node_id,
children_ids,
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(call_node)
add_node(call_node, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Expr2::Var(symbol) => {
let text = symbol.fully_qualified(interns, env.home);
@ -119,12 +150,17 @@ pub fn expr2_to_markup<'a>(
ast_node_id,
HighlightStyle::Value,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
)
}
Expr2::List { elems, .. } => {
let mut children_ids =
vec![mark_node_pool.add(new_left_square_mn(expr2_node_id, None))];
let mut children_ids = vec![add_node(
new_left_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)];
let indexed_node_ids: Vec<(usize, ExprId)> =
elems.iter(env.pool).copied().enumerate().collect();
@ -137,43 +173,66 @@ pub fn expr2_to_markup<'a>(
sub_expr2,
*node_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
indent_level,
)?);
if idx + 1 < elems.len() {
children_ids.push(mark_node_pool.add(new_comma_mn(expr2_node_id, None)));
children_ids.push(add_node(
new_comma_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
));
}
}
children_ids.push(mark_node_pool.add(new_right_square_mn(expr2_node_id, None)));
let list_node = MarkupNode::Nested {
children_ids.push(add_node(
new_right_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
));
let list_mn = MarkupNode::Nested {
children_ids,
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(list_node)
add_node(list_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Expr2::EmptyRecord => {
let children_ids = vec![
mark_node_pool.add(new_left_accolade_mn(expr2_node_id, None)),
mark_node_pool.add(new_right_accolade_mn(expr2_node_id, None)),
add_node(
new_left_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
),
add_node(
new_right_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
),
];
let record_node = MarkupNode::Nested {
ast_node_id,
let record_mn = MarkupNode::Nested {
children_ids,
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(record_node)
add_node(record_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Expr2::Record { fields, .. } => {
let mut children_ids =
vec![mark_node_pool.add(new_left_accolade_mn(expr2_node_id, None))];
let mut children_ids = vec![add_node(
new_left_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
)];
for (idx, field_node_id) in fields.iter_node_ids().enumerate() {
let record_field = env.pool.get(field_node_id);
@ -185,6 +244,7 @@ pub fn expr2_to_markup<'a>(
ast_node_id,
HighlightStyle::RecordField,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
));
@ -192,7 +252,12 @@ pub fn expr2_to_markup<'a>(
RecordField::InvalidLabelOnly(_, _) => (),
RecordField::LabelOnly(_, _, _) => (),
RecordField::LabeledValue(_, _, sub_expr2_node_id) => {
children_ids.push(mark_node_pool.add(new_colon_mn(expr2_node_id, None)));
children_ids.push(add_node(
new_colon_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
));
let sub_expr2 = env.pool.get(*sub_expr2_node_id);
children_ids.push(expr2_to_markup(
@ -200,6 +265,7 @@ pub fn expr2_to_markup<'a>(
sub_expr2,
*sub_expr2_node_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
indent_level,
)?);
@ -207,22 +273,36 @@ pub fn expr2_to_markup<'a>(
}
if idx + 1 < fields.len() {
children_ids.push(mark_node_pool.add(new_comma_mn(expr2_node_id, None)));
children_ids.push(add_node(
new_comma_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
));
}
}
children_ids.push(mark_node_pool.add(new_right_accolade_mn(expr2_node_id, None)));
let record_node = MarkupNode::Nested {
children_ids.push(add_node(
new_right_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
));
let record_mn = MarkupNode::Nested {
children_ids,
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(record_node)
add_node(record_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Expr2::Blank => mark_node_pool.add(new_blank_mn(ast_node_id, None)),
Expr2::Blank => add_node(
new_blank_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
),
Expr2::LetValue {
def_id,
body_id: _,
@ -236,16 +316,21 @@ pub fn expr2_to_markup<'a>(
let val_name_mn = MarkupNode::Text {
content: val_name,
ast_node_id,
syn_high_style: HighlightStyle::Value,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
};
let val_name_mn_id = mark_node_pool.add(val_name_mn);
let val_name_mn_id =
add_node(val_name_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map);
let equals_mn_id = mark_node_pool.add(new_equals_mn(ast_node_id, None));
let equals_mn_id = add_node(
new_equals_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let value_def = env.pool.get(*def_id);
@ -260,6 +345,7 @@ pub fn expr2_to_markup<'a>(
env.pool.get(*expr_id),
*expr_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
indent_level,
)?;
@ -267,14 +353,13 @@ pub fn expr2_to_markup<'a>(
let body_mn = mark_node_pool.get_mut(body_mn_id);
body_mn.add_newline_at_end();
let full_let_node = MarkupNode::Nested {
ast_node_id,
let full_let_mn = MarkupNode::Nested {
children_ids: vec![val_name_mn_id, equals_mn_id, body_mn_id],
parent_id_opt: None,
newlines_at_end: 1,
};
mark_node_pool.add(full_let_node)
add_node(full_let_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
other => {
unimplemented!(
@ -292,8 +377,13 @@ pub fn expr2_to_markup<'a>(
body_id,
extra: _,
} => {
let backslash_mn = new_operator_mn("\\".to_string(), expr2_node_id, None);
let backslash_mn_id = mark_node_pool.add(backslash_mn);
let backslash_mn = new_operator_mn("\\".to_string());
let backslash_mn_id = add_node(
backslash_mn,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let arg_names: Vec<&str> = args
.iter(env.pool)
@ -321,31 +411,31 @@ pub fn expr2_to_markup<'a>(
let arg_mark_nodes = arg_names
.iter()
.map(|arg_name| new_arg_name_mn(arg_name.to_string(), expr2_node_id))
.map(|arg_name| new_arg_name_mn(arg_name.to_string()))
.collect_vec();
let args_with_commas: Vec<MarkupNode> =
join_mark_nodes_commas(arg_mark_nodes, ASTNodeId::AExprId(expr2_node_id));
let args_with_commas: Vec<MarkupNode> = join_mark_nodes_commas(arg_mark_nodes);
let mut args_with_commas_ids: Vec<MarkNodeId> = args_with_commas
.into_iter()
.map(|mark_node| mark_node_pool.add(mark_node))
.map(|mark_node| {
add_node(mark_node, ast_node_id, mark_node_pool, mark_id_ast_id_map)
})
.collect();
let arrow_mn = new_arrow_mn(ASTNodeId::AExprId(expr2_node_id), 1);
let arrow_mn_id = mark_node_pool.add(arrow_mn);
let arrow_mn = new_arrow_mn(1);
let arrow_mn_id = add_node(arrow_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map);
let mut children_ids = vec![backslash_mn_id];
children_ids.append(&mut args_with_commas_ids);
children_ids.push(arrow_mn_id);
let args_mn = MarkupNode::Nested {
ast_node_id: ASTNodeId::AExprId(expr2_node_id),
children_ids,
parent_id_opt: None,
newlines_at_end: 0,
};
let args_mn_id = mark_node_pool.add(args_mn);
let args_mn_id = add_node(args_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map);
let body_expr = env.pool.get(*body_id);
let body_mn_id = expr2_to_markup(
@ -353,24 +443,25 @@ pub fn expr2_to_markup<'a>(
body_expr,
*body_id,
mark_node_pool,
mark_id_ast_id_map,
interns,
indent_level + 1,
)?;
let function_node = MarkupNode::Nested {
ast_node_id,
let function_mn = MarkupNode::Nested {
children_ids: vec![args_mn_id, body_mn_id],
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(function_node)
add_node(function_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
Expr2::RuntimeError() => new_markup_node(
"RunTimeError".to_string(),
ast_node_id,
HighlightStyle::Blank,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
),
rest => todo!("implement expr2_to_markup for {:?}", rest),
@ -387,3 +478,20 @@ fn with_indent(indent_level: usize, some_str: &str) -> String {
full_string
}
fn string_mark_node(
content: &str,
indent_level: usize,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> MarkNodeId {
new_markup_node(
with_indent(indent_level, content),
ast_node_id,
HighlightStyle::String,
mark_node_pool,
mark_id_ast_id_map,
indent_level,
)
}

View file

@ -1,4 +1,4 @@
use roc_ast::lang::core::{ast::ASTNodeId, expr::expr2::ExprId, header::AppHeader};
use roc_ast::lang::core::{ast::ASTNodeId, header::AppHeader};
use crate::{
markup::{
@ -7,54 +7,82 @@ use crate::{
new_comma_mn, new_left_accolade_mn, new_left_square_mn, new_right_accolade_mn,
new_right_square_mn,
},
mark_id_ast_id_map::MarkIdAstIdMap,
nodes::{set_parent_for_all, MarkupNode},
},
slow_pool::{MarkNodeId, SlowPool},
syntax_highlight::HighlightStyle,
};
pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -> MarkNodeId {
use super::from_def2::add_node;
pub fn header_to_markup(
app_header: &AppHeader,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> MarkNodeId {
let expr_id = app_header.ast_node_id;
let ast_node_id = ASTNodeId::AExprId(expr_id);
let app_node_id = header_mn("app ".to_owned(), expr_id, mark_node_pool);
let app_node_id = header_mn(
"app ".to_owned(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let app_name_node_id = header_val_mn(
app_header.app_name.clone(),
expr_id,
ast_node_id,
HighlightStyle::String,
mark_node_pool,
mark_id_ast_id_map,
);
let full_app_node = MarkupNode::Nested {
ast_node_id,
children_ids: vec![app_node_id, app_name_node_id],
parent_id_opt: None,
newlines_at_end: 1,
};
let packages_node_id = header_mn(" packages ".to_owned(), expr_id, mark_node_pool);
let packages_node_id = header_mn(
" packages ".to_owned(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let pack_left_acc_node_id = mark_node_pool.add(new_left_accolade_mn(expr_id, None));
let pack_left_acc_node_id = add_node(
new_left_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let pack_base_node_id = header_val_mn(
"base: ".to_owned(),
expr_id,
ast_node_id,
HighlightStyle::RecordField,
mark_node_pool,
mark_id_ast_id_map,
);
let pack_val_node_id = header_val_mn(
app_header.packages_base.clone(),
expr_id,
ast_node_id,
HighlightStyle::String,
mark_node_pool,
mark_id_ast_id_map,
);
let pack_right_acc_node_id = mark_node_pool.add(new_right_accolade_mn(expr_id, None));
let pack_right_acc_node_id = add_node(
new_right_accolade_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let full_packages_node = MarkupNode::Nested {
ast_node_id,
children_ids: vec![
packages_node_id,
pack_left_acc_node_id,
@ -66,18 +94,34 @@ pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -
newlines_at_end: 1,
};
let imports_node_id = header_mn(" imports ".to_owned(), expr_id, mark_node_pool);
let imports_node_id = header_mn(
" imports ".to_owned(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let imports_left_square_node_id = mark_node_pool.add(new_left_square_mn(expr_id, None));
let imports_left_square_node_id = add_node(
new_left_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let mut import_child_ids: Vec<MarkNodeId> = add_header_mn_list(
&app_header.imports,
expr_id,
ast_node_id,
HighlightStyle::Import,
mark_node_pool,
mark_id_ast_id_map,
);
let imports_right_square_node_id = mark_node_pool.add(new_right_square_mn(expr_id, None));
let imports_right_square_node_id = add_node(
new_right_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let mut full_import_children = vec![imports_node_id, imports_left_square_node_id];
@ -85,26 +129,46 @@ pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -
full_import_children.push(imports_right_square_node_id);
let full_import_node = MarkupNode::Nested {
ast_node_id,
children_ids: full_import_children,
parent_id_opt: None,
newlines_at_end: 1,
};
let provides_node_id = header_mn(" provides ".to_owned(), expr_id, mark_node_pool);
let provides_node_id = header_mn(
" provides ".to_owned(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let provides_left_square_node_id = mark_node_pool.add(new_left_square_mn(expr_id, None));
let provides_left_square_node_id = add_node(
new_left_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let mut provides_val_node_ids: Vec<MarkNodeId> = add_header_mn_list(
&app_header.provides,
expr_id,
ast_node_id,
HighlightStyle::Provides,
mark_node_pool,
mark_id_ast_id_map,
);
let provides_right_square_node_id = mark_node_pool.add(new_right_square_mn(expr_id, None));
let provides_right_square_node_id = add_node(
new_right_square_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let provides_end_node_id = header_mn(" to base".to_owned(), expr_id, mark_node_pool);
let provides_end_node_id = header_mn(
" to base".to_owned(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let mut full_provides_children = vec![provides_node_id, provides_left_square_node_id];
@ -113,19 +177,37 @@ pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -
full_provides_children.push(provides_end_node_id);
let full_provides_node = MarkupNode::Nested {
ast_node_id,
children_ids: full_provides_children,
parent_id_opt: None,
newlines_at_end: 1,
};
let full_app_node_id = mark_node_pool.add(full_app_node);
let full_packages_node = mark_node_pool.add(full_packages_node);
let full_import_node_id = mark_node_pool.add(full_import_node);
let full_provides_node_id = mark_node_pool.add(full_provides_node);
let full_app_node_id = add_node(
full_app_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let full_packages_node = add_node(
full_packages_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let full_import_node_id = add_node(
full_import_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let full_provides_node_id = add_node(
full_provides_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let header_mark_node = MarkupNode::Nested {
ast_node_id,
children_ids: vec![
full_app_node_id,
full_packages_node,
@ -136,7 +218,12 @@ pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -
newlines_at_end: 1,
};
let header_mn_id = mark_node_pool.add(header_mark_node);
let header_mn_id = add_node(
header_mark_node,
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
set_parent_for_all(header_mn_id, mark_node_pool);
@ -146,60 +233,73 @@ pub fn header_to_markup(app_header: &AppHeader, mark_node_pool: &mut SlowPool) -
// Used for provides and imports
fn add_header_mn_list(
str_vec: &[String],
expr_id: ExprId,
ast_node_id: ASTNodeId,
highlight_style: HighlightStyle,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> Vec<MarkNodeId> {
let nr_of_elts = str_vec.len();
str_vec
.iter()
.enumerate()
.map(|(indx, provide_str)| {
.flat_map(|(indx, provide_str)| {
let provide_str = header_val_mn(
provide_str.to_owned(),
expr_id,
ast_node_id,
highlight_style,
mark_node_pool,
mark_id_ast_id_map,
);
if indx != nr_of_elts - 1 {
vec![provide_str, mark_node_pool.add(new_comma_mn(expr_id, None))]
vec![
provide_str,
add_node(
new_comma_mn(),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
),
]
} else {
vec![provide_str]
}
})
.flatten()
.collect()
}
fn header_mn(content: String, expr_id: ExprId, mark_node_pool: &mut SlowPool) -> MarkNodeId {
fn header_mn(
content: String,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> MarkNodeId {
let mark_node = MarkupNode::Text {
content,
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: HighlightStyle::PackageRelated,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(mark_node)
add_node(mark_node, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}
fn header_val_mn(
content: String,
expr_id: ExprId,
ast_node_id: ASTNodeId,
highlight_style: HighlightStyle,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
) -> MarkNodeId {
let mark_node = MarkupNode::Text {
content,
ast_node_id: ASTNodeId::AExprId(expr_id),
syn_high_style: highlight_style,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(mark_node)
add_node(mark_node, ast_node_id, mark_node_pool, mark_id_ast_id_map)
}

View file

@ -0,0 +1,29 @@
use std::collections::HashMap;
use roc_ast::lang::core::ast::ASTNodeId;
use crate::markup_error::MarkNodeIdWithoutCorrespondingASTNodeId;
use crate::{markup_error::MarkResult, slow_pool::MarkNodeId};
/// A hashmap is wrapped to allow for an easy swap out with more performant alternatives
#[derive(Debug, Default)]
pub struct MarkIdAstIdMap {
map: HashMap<MarkNodeId, ASTNodeId>,
}
impl MarkIdAstIdMap {
pub fn insert(&mut self, mn_id: MarkNodeId, ast_id: ASTNodeId) {
self.map.insert(mn_id, ast_id);
}
pub fn get(&self, mn_id: MarkNodeId) -> MarkResult<ASTNodeId> {
match self.map.get(&mn_id) {
Some(ast_node_id) => Ok(*ast_node_id),
None => MarkNodeIdWithoutCorrespondingASTNodeId {
node_id: mn_id,
keys_str: format!("{:?}", self.map.keys()),
}
.fail(),
}
}
}

View file

@ -1,5 +1,6 @@
pub mod attribute;
pub mod common_nodes;
pub mod convert;
pub mod mark_id_ast_id_map;
pub mod nodes;
pub mod top_level_def;

View file

@ -4,7 +4,10 @@ use crate::{
syntax_highlight::HighlightStyle,
};
use super::{attribute::Attributes, common_nodes::new_comma_mn_ast};
use super::{
attribute::Attributes, common_nodes::new_comma_mn, convert::from_def2::add_node,
mark_id_ast_id_map::MarkIdAstIdMap,
};
use crate::markup_error::{ExpectedTextNode, NestedNodeMissingChild, NestedNodeRequired};
use itertools::Itertools;
@ -18,42 +21,29 @@ use std::fmt;
#[derive(Debug)]
pub enum MarkupNode {
Nested {
ast_node_id: ASTNodeId,
children_ids: Vec<MarkNodeId>,
parent_id_opt: Option<MarkNodeId>,
newlines_at_end: usize,
},
Text {
content: String,
ast_node_id: ASTNodeId,
syn_high_style: HighlightStyle,
attributes: Attributes,
parent_id_opt: Option<MarkNodeId>,
newlines_at_end: usize,
},
Blank {
ast_node_id: ASTNodeId,
attributes: Attributes,
parent_id_opt: Option<MarkNodeId>,
newlines_at_end: usize,
},
Indent {
ast_node_id: ASTNodeId,
indent_level: usize,
parent_id_opt: Option<MarkNodeId>,
},
}
impl MarkupNode {
pub fn get_ast_node_id(&self) -> ASTNodeId {
match self {
MarkupNode::Nested { ast_node_id, .. } => *ast_node_id,
MarkupNode::Text { ast_node_id, .. } => *ast_node_id,
MarkupNode::Blank { ast_node_id, .. } => *ast_node_id,
MarkupNode::Indent { ast_node_id, .. } => *ast_node_id,
}
}
pub fn get_parent_id_opt(&self) -> Option<MarkNodeId> {
match self {
MarkupNode::Nested { parent_id_opt, .. } => *parent_id_opt,
@ -85,24 +75,24 @@ impl MarkupNode {
// return (index of child in list of children, closest ast index of child corresponding to ast node)
pub fn get_child_indices(
&self,
child_id: MarkNodeId,
mark_node_pool: &SlowPool,
mark_node_id: MarkNodeId,
ast_node_id: ASTNodeId,
mark_id_ast_id_map: &MarkIdAstIdMap,
) -> MarkResult<(usize, usize)> {
match self {
MarkupNode::Nested { children_ids, .. } => {
let mut mark_child_index_opt: Option<usize> = None;
let mut child_ids_with_ast: Vec<MarkNodeId> = Vec::new();
let self_ast_id = self.get_ast_node_id();
for (indx, &mark_child_id) in children_ids.iter().enumerate() {
if mark_child_id == child_id {
if mark_child_id == mark_node_id {
mark_child_index_opt = Some(indx);
}
let child_mark_node = mark_node_pool.get(mark_child_id);
let child_ast_node_id = mark_id_ast_id_map.get(mark_child_id)?;
// a node that points to the same ast_node as the parent is a ',', '[', ']'
// those are not "real" ast children
if child_mark_node.get_ast_node_id() != self_ast_id {
if child_ast_node_id != ast_node_id {
child_ids_with_ast.push(mark_child_id)
}
}
@ -145,7 +135,7 @@ impl MarkupNode {
}
} else {
NestedNodeMissingChild {
node_id: child_id,
node_id: mark_node_id,
children_ids: children_ids.clone(),
}
.fail()
@ -258,6 +248,14 @@ impl MarkupNode {
}
}
pub fn make_nested_mn(children_ids: Vec<MarkNodeId>, newlines_at_end: usize) -> MarkupNode {
MarkupNode::Nested {
children_ids,
parent_id_opt: None,
newlines_at_end,
}
}
pub fn get_string<'a>(env: &Env<'a>, pool_str: &PoolStr) -> String {
pool_str.as_str(env.pool).to_owned()
}
@ -269,6 +267,7 @@ pub const LEFT_SQUARE_BR: &str = "[ ";
pub const RIGHT_SQUARE_BR: &str = " ]";
pub const COLON: &str = ": ";
pub const COMMA: &str = ", ";
pub const DOT: &str = ".";
pub const STRING_QUOTES: &str = "\"\"";
pub const EQUALS: &str = " = ";
pub const ARROW: &str = " -> ";
@ -279,36 +278,34 @@ pub fn new_markup_node(
node_id: ASTNodeId,
highlight_style: HighlightStyle,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
indent_level: usize,
) -> MarkNodeId {
let content_node = MarkupNode::Text {
content: text,
ast_node_id: node_id,
syn_high_style: highlight_style,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
};
let content_node_id = mark_node_pool.add(content_node);
let content_node_id = add_node(content_node, node_id, mark_node_pool, mark_id_ast_id_map);
if indent_level > 0 {
let indent_node = MarkupNode::Indent {
ast_node_id: node_id,
indent_level,
parent_id_opt: None,
};
let indent_node_id = mark_node_pool.add(indent_node);
let indent_node_id = add_node(indent_node, node_id, mark_node_pool, mark_id_ast_id_map);
let nested_node = MarkupNode::Nested {
ast_node_id: node_id,
children_ids: vec![indent_node_id, content_node_id],
parent_id_opt: None,
newlines_at_end: 0,
};
mark_node_pool.add(nested_node)
add_node(nested_node, node_id, mark_node_pool, mark_id_ast_id_map)
} else {
content_node_id
}
@ -318,7 +315,6 @@ pub fn set_parent_for_all(markup_node_id: MarkNodeId, mark_node_pool: &mut SlowP
let node = mark_node_pool.get(markup_node_id);
if let MarkupNode::Nested {
ast_node_id: _,
children_ids,
parent_id_opt: _,
newlines_at_end: _,
@ -399,7 +395,7 @@ fn tree_as_string_helper(
.to_owned();
let child = mark_node_pool.get(child_id);
let child_str = format!("{}", mark_node_pool.get(child_id)).replace("\n", "\\n");
let child_str = format!("{}", mark_node_pool.get(child_id)).replace('\n', "\\n");
full_str.push_str(&format!("{} mn_id {}\n", child_str, child_id));
@ -426,7 +422,6 @@ pub fn get_root_mark_node_id(mark_node_id: MarkNodeId, mark_node_pool: &SlowPool
pub fn join_mark_nodes_spaces(
mark_nodes_ids: Vec<MarkNodeId>,
with_prepend: bool,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
) -> Vec<MarkNodeId> {
let space_range_max = if with_prepend {
@ -439,7 +434,6 @@ pub fn join_mark_nodes_spaces(
.map(|_| {
let space_node = MarkupNode::Text {
content: " ".to_string(),
ast_node_id,
syn_high_style: HighlightStyle::Blank,
attributes: Attributes::default(),
parent_id_opt: None,
@ -458,13 +452,41 @@ pub fn join_mark_nodes_spaces(
}
// put comma mark nodes between each node in mark_nodes
pub fn join_mark_nodes_commas(
mark_nodes: Vec<MarkupNode>,
ast_node_id: ASTNodeId,
) -> Vec<MarkupNode> {
pub fn join_mark_nodes_commas(mark_nodes: Vec<MarkupNode>) -> Vec<MarkupNode> {
let join_nodes: Vec<MarkupNode> = (0..(mark_nodes.len() - 1))
.map(|_| new_comma_mn_ast(ast_node_id, None))
.map(|_| new_comma_mn())
.collect();
mark_nodes.into_iter().interleave(join_nodes).collect()
}
pub fn mark_nodes_to_string(markup_node_ids: &[MarkNodeId], mark_node_pool: &SlowPool) -> String {
let mut all_code_string = String::new();
for mark_node_id in markup_node_ids.iter() {
node_to_string_w_children(*mark_node_id, &mut all_code_string, mark_node_pool)
}
all_code_string
}
pub fn node_to_string_w_children(
node_id: MarkNodeId,
str_buffer: &mut String,
mark_node_pool: &SlowPool,
) {
let node = mark_node_pool.get(node_id);
if node.is_nested() {
for child_id in node.get_children_ids() {
node_to_string_w_children(child_id, str_buffer, mark_node_pool);
}
for _ in 0..node.get_newlines_at_end() {
str_buffer.push('\n')
}
} else {
let node_content_str = node.get_full_content();
str_buffer.push_str(&node_content_str);
}
}

View file

@ -14,37 +14,43 @@ use crate::{
syntax_highlight::HighlightStyle,
};
// Top Level Defined Value. example: `main = "Hello, World!"`
pub fn tld_mark_node<'a>(
use super::{
common_nodes::new_assign_mn, convert::from_def2::add_node, mark_id_ast_id_map::MarkIdAstIdMap,
};
// represents for example: `main = "Hello, World!"`
pub fn assignment_mark_node<'a>(
identifier_id: IdentId,
expr_mark_node_id: MarkNodeId,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
env: &Env<'a>,
) -> ASTResult<MarkupNode> {
let val_name = env.ident_ids.get_name_str_res(identifier_id)?;
let val_name_mn = MarkupNode::Text {
content: val_name.to_owned(),
ast_node_id,
syn_high_style: HighlightStyle::Value,
attributes: Attributes::default(),
parent_id_opt: None,
newlines_at_end: 0,
};
let val_name_mn_id = mark_node_pool.add(val_name_mn);
let val_name_mn_id = add_node(val_name_mn, ast_node_id, mark_node_pool, mark_id_ast_id_map);
let equals_mn_id = mark_node_pool.add(new_equals_mn(ast_node_id, None));
let full_let_node = MarkupNode::Nested {
let equals_mn_id = add_node(
new_equals_mn(),
ast_node_id,
children_ids: vec![val_name_mn_id, equals_mn_id, expr_mark_node_id],
parent_id_opt: None,
newlines_at_end: 3,
};
mark_node_pool,
mark_id_ast_id_map,
);
Ok(full_let_node)
Ok(new_assign_mn(
val_name_mn_id,
equals_mn_id,
expr_mark_node_id,
))
}
pub fn tld_w_comments_mark_node(
@ -52,9 +58,15 @@ pub fn tld_w_comments_mark_node(
def_mark_node_id: MarkNodeId,
ast_node_id: ASTNodeId,
mark_node_pool: &mut SlowPool,
mark_id_ast_id_map: &mut MarkIdAstIdMap,
comments_before: bool,
) -> ASTResult<MarkupNode> {
let comment_mn_id = mark_node_pool.add(new_comments_mn(comments, ast_node_id, 1));
let comment_mn_id = add_node(
new_comments_mn(comments, 1),
ast_node_id,
mark_node_pool,
mark_id_ast_id_map,
);
let children_ids = if comments_before {
vec![comment_mn_id, def_mark_node_id]
@ -63,7 +75,6 @@ pub fn tld_w_comments_mark_node(
};
let tld_w_comment_node = MarkupNode::Nested {
ast_node_id,
children_ids,
parent_id_opt: None,
newlines_at_end: 2,

View file

@ -24,6 +24,16 @@ pub enum MarkError {
node_type: String,
backtrace: Backtrace,
},
#[snafu(display(
"MarkNodeIdWithoutCorrespondingASTNodeId: MarkupNode with id {} was not found in MarkIdAstIdMap, available keys are: {}.",
node_id,
keys_str
))]
MarkNodeIdWithoutCorrespondingASTNodeId {
node_id: MarkNodeId,
keys_str: String,
backtrace: Backtrace,
},
#[snafu(display("NestedNodeMissingChild: expected to find child with id {} in Nested MarkupNode, but it was missing. Id's of the children are {:?}.", node_id, children_ids))]
NestedNodeMissingChild {
node_id: MarkNodeId,

View file

@ -1,6 +1,4 @@
use std::fmt;
use crate::markup::nodes::MarkupNode;
use crate::markup::{mark_id_ast_id_map::MarkIdAstIdMap, nodes::MarkupNode};
pub type MarkNodeId = usize;
@ -34,14 +32,15 @@ impl SlowPool {
// TODO delete children of old node, this requires SlowPool to be changed to
// make sure the indexes still make sense after removal/compaction
}
}
impl fmt::Display for SlowPool {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\n\n(mark_node_pool)\n")?;
pub fn debug_string(&self, mark_id_ast_id_map: &MarkIdAstIdMap) -> String {
let mut ret_str = String::new();
for (index, node) in self.nodes.iter().enumerate() {
let ast_node_id_str = format!("{:?}", node.get_ast_node_id());
for (mark_node_id, node) in self.nodes.iter().enumerate() {
let ast_node_id_str = match mark_id_ast_id_map.get(mark_node_id) {
Ok(ast_id) => format!("{:?}", ast_id),
Err(err) => format!("{:?}", err),
};
let ast_node_id: String = ast_node_id_str
.chars()
.filter(|c| c.is_ascii_digit())
@ -55,17 +54,16 @@ impl fmt::Display for SlowPool {
child_str = format!("children: {:?}", node_children);
}
writeln!(
f,
ret_str.push_str(&format!(
"{}: {} ({}) ast_id {:?} {}",
index,
mark_node_id,
node.node_type_as_string(),
node.get_content(),
ast_node_id.parse::<usize>().unwrap(),
child_str
)?;
));
}
Ok(())
ret_str
}
}

View file

@ -6,7 +6,6 @@ use crate::colors::{from_hsb, RgbaTup};
#[derive(Hash, Eq, PartialEq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum HighlightStyle {
Operator, // =+-<>...
Comma,
String,
FunctionName,
FunctionArgName,
@ -21,6 +20,9 @@ pub enum HighlightStyle {
Blank,
Comment,
DocsComment,
UppercaseIdent,
LowercaseIdent, // TODO we probably don't want all lowercase identifiers to have the same color?
Keyword, // if, else, when...
}
pub fn default_highlight_map() -> HashMap<HighlightStyle, RgbaTup> {
@ -31,7 +33,6 @@ pub fn default_highlight_map() -> HashMap<HighlightStyle, RgbaTup> {
let mut highlight_map = HashMap::new();
[
(Operator, from_hsb(185, 50, 75)),
(Comma, from_hsb(258, 50, 90)),
(String, from_hsb(346, 65, 97)),
(FunctionName, almost_white),
(FunctionArgName, from_hsb(225, 50, 100)),
@ -46,6 +47,9 @@ pub fn default_highlight_map() -> HashMap<HighlightStyle, RgbaTup> {
(Blank, from_hsb(258, 50, 90)),
(Comment, from_hsb(258, 50, 90)), // TODO check color
(DocsComment, from_hsb(258, 50, 90)), // TODO check color
(UppercaseIdent, almost_white),
(LowercaseIdent, from_hsb(225, 50, 100)),
(Keyword, almost_white),
]
.iter()
.for_each(|tup| {

View file

@ -0,0 +1,20 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::colors::{from_hsb, RgbaTup};
#[derive(Hash, Eq, PartialEq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum UnderlineStyle {
Error,
Warning,
}
pub fn default_underline_color_map() -> HashMap<UnderlineStyle, RgbaTup> {
let mut underline_colors = HashMap::new();
underline_colors.insert(UnderlineStyle::Error, from_hsb(0, 50, 75));
underline_colors.insert(UnderlineStyle::Warning, from_hsb(60, 50, 75));
underline_colors
}

View file

@ -0,0 +1,13 @@
[package]
authors = ["The Roc Contributors"]
edition = "2018"
license = "UPL-1.0"
name = "roc_alias_analysis"
version = "0.1.0"
[dependencies]
morphic_lib = {path = "../../vendor/morphic_lib"}
roc_collections = {path = "../collections"}
roc_module = {path = "../module"}
roc_mono = {path = "../mono"}

View file

@ -8,11 +8,11 @@ use roc_collections::all::{MutMap, MutSet};
use roc_module::low_level::LowLevel;
use roc_module::symbol::Symbol;
use crate::ir::{
use roc_mono::ir::{
Call, CallType, Expr, HigherOrderLowLevel, HostExposedLayouts, ListLiteralElement, Literal,
ModifyRc, OptLevel, Proc, Stmt,
};
use crate::layout::{Builtin, Layout, RawFunctionLayout, UnionLayout};
use roc_mono::layout::{Builtin, Layout, RawFunctionLayout, UnionLayout};
// just using one module for now
pub const MOD_APP: ModName = ModName(b"UserApp");
@ -110,7 +110,7 @@ fn bytes_as_ascii(bytes: &[u8]) -> String {
pub fn spec_program<'a, I>(
opt_level: OptLevel,
entry_point: crate::ir::EntryPoint<'a>,
entry_point: roc_mono::ir::EntryPoint<'a>,
procs: I,
) -> Result<morphic_lib::Solutions>
where
@ -245,7 +245,7 @@ where
match opt_level {
OptLevel::Development | OptLevel::Normal => morphic_lib::solve_trivial(program),
OptLevel::Optimize => morphic_lib::solve(program),
OptLevel::Optimize | OptLevel::Size => morphic_lib::solve(program),
}
}
@ -266,7 +266,7 @@ fn terrible_hack(builder: &mut FuncDefBuilder, block: BlockId, type_id: TypeId)
}
fn build_entry_point(
layout: crate::ir::ProcLayout,
layout: roc_mono::ir::ProcLayout,
func_name: FuncName,
host_exposed_functions: &[([u8; SIZE], &[Layout])],
) -> Result<FuncDef> {
@ -308,7 +308,7 @@ fn build_entry_point(
let block = builder.add_block();
let type_id = layout_spec(&mut builder, &Layout::Struct(layouts))?;
let type_id = layout_spec(&mut builder, &Layout::struct_no_name_order(layouts))?;
let argument = builder.add_unknown_with(block, &[], type_id)?;
@ -349,7 +349,10 @@ fn proc_spec<'a>(proc: &Proc<'a>) -> Result<(FuncDef, MutSet<UnionLayout<'a>>)>
let value_id = stmt_spec(&mut builder, &mut env, block, &proc.ret_layout, &proc.body)?;
let root = BlockExpr(block, value_id);
let arg_type_id = layout_spec(&mut builder, &Layout::Struct(&argument_layouts))?;
let arg_type_id = layout_spec(
&mut builder,
&Layout::struct_no_name_order(&argument_layouts),
)?;
let ret_type_id = layout_spec(&mut builder, &proc.ret_layout)?;
let spec = builder.build(arg_type_id, ret_type_id, root)?;
@ -360,7 +363,7 @@ fn proc_spec<'a>(proc: &Proc<'a>) -> Result<(FuncDef, MutSet<UnionLayout<'a>>)>
#[derive(Default)]
struct Env<'a> {
symbols: MutMap<Symbol, ValueId>,
join_points: MutMap<crate::ir::JoinPointId, morphic_lib::ContinuationId>,
join_points: MutMap<roc_mono::ir::JoinPointId, morphic_lib::ContinuationId>,
type_names: MutSet<UnionLayout<'a>>,
}
@ -708,7 +711,7 @@ fn call_spec(
passed_function,
..
}) => {
use crate::low_level::HigherOrder::*;
use roc_mono::low_level::HigherOrder::*;
let array = passed_function.specialization_id.to_bytes();
let spec_var = CalleeSpecVar(&array);
@ -1135,7 +1138,7 @@ fn call_spec(
// ListFindUnsafe returns { value: v, found: Bool=Int1 }
let output_layouts = vec![argument_layouts[0], Layout::Builtin(Builtin::Bool)];
let output_layout = Layout::Struct(&output_layouts);
let output_layout = Layout::struct_no_name_order(&output_layouts);
let output_type = layout_spec(builder, &output_layout)?;
let loop_body = |builder: &mut FuncDefBuilder, block, output| {
@ -1193,7 +1196,7 @@ fn lowlevel_spec(
block: BlockId,
layout: &Layout,
op: &LowLevel,
update_mode: crate::ir::UpdateModeId,
update_mode: roc_mono::ir::UpdateModeId,
arguments: &[Symbol],
) -> Result<ValueId> {
use LowLevel::*;
@ -1255,22 +1258,21 @@ fn lowlevel_spec(
builder.add_bag_get(block, bag)
}
ListSet => {
ListReplaceUnsafe => {
let list = env.symbols[&arguments[0]];
let to_insert = env.symbols[&arguments[2]];
let bag = builder.add_get_tuple_field(block, list, LIST_BAG_INDEX)?;
let cell = builder.add_get_tuple_field(block, list, LIST_CELL_INDEX)?;
// decrement the overwritten element
let overwritten = builder.add_bag_get(block, bag)?;
let _unit = builder.add_recursive_touch(block, overwritten)?;
let _unit = builder.add_update(block, update_mode_var, cell)?;
let _unit1 = builder.add_touch(block, cell)?;
let _unit2 = builder.add_update(block, update_mode_var, cell)?;
builder.add_bag_insert(block, bag, to_insert)?;
with_new_heap_cell(builder, block, bag)
let old_value = builder.add_bag_get(block, bag)?;
let new_list = with_new_heap_cell(builder, block, bag)?;
builder.add_make_tuple(block, &[new_list, old_value])
}
ListSwap => {
let list = env.symbols[&arguments[0]];
@ -1509,6 +1511,16 @@ fn expr_spec<'a>(
builder.add_make_named(block, MOD_APP, type_name, tag_value_id)
}
ExprBox { symbol } => {
let value_id = env.symbols[symbol];
with_new_heap_cell(builder, block, value_id)
}
ExprUnbox { symbol } => {
let tuple_id = env.symbols[symbol];
builder.add_get_tuple_field(block, tuple_id, BOX_VALUE_INDEX)
}
Struct(fields) => build_tuple_value(builder, env, block, fields),
UnionAtIndex {
index,
@ -1672,7 +1684,9 @@ fn layout_spec_help(
match layout {
Builtin(builtin) => builtin_spec(builder, builtin, when_recursive),
Struct(fields) => build_recursive_tuple_type(builder, fields, when_recursive),
Struct { field_layouts, .. } => {
build_recursive_tuple_type(builder, field_layouts, when_recursive)
}
LambdaSet(lambda_set) => layout_spec_help(
builder,
&lambda_set.runtime_representation(),
@ -1701,6 +1715,13 @@ fn layout_spec_help(
}
}
}
Boxed(inner_layout) => {
let inner_type = layout_spec_help(builder, inner_layout, when_recursive)?;
let cell_type = builder.add_heap_cell_type();
builder.add_tuple_type(&[cell_type, inner_type])
}
RecursivePointer => match when_recursive {
WhenRecursive::Unreachable => {
unreachable!()
@ -1783,6 +1804,10 @@ const LIST_BAG_INDEX: u32 = 1;
const DICT_CELL_INDEX: u32 = LIST_CELL_INDEX;
const DICT_BAG_INDEX: u32 = LIST_BAG_INDEX;
#[allow(dead_code)]
const BOX_CELL_INDEX: u32 = LIST_CELL_INDEX;
const BOX_VALUE_INDEX: u32 = LIST_BAG_INDEX;
const TAG_CELL_INDEX: u32 = 0;
const TAG_DATA_INDEX: u32 = 1;

View file

@ -24,7 +24,7 @@ roc_gen_llvm = { path = "../gen_llvm", optional = true }
roc_gen_wasm = { path = "../gen_wasm", optional = true }
roc_gen_dev = { path = "../gen_dev", default-features = false }
roc_reporting = { path = "../../reporting" }
roc_std = { path = "../../roc_std" }
roc_std = { path = "../../roc_std", default-features = false }
bumpalo = { version = "3.8.0", features = ["collections"] }
libloading = "0.7.1"
tempfile = "3.2.0"
@ -35,7 +35,6 @@ target-lexicon = "0.12.2"
serde_json = "1.0.69"
[features]
default = ["llvm", "target-aarch64", "target-x86_64", "target-wasm32"]
target-arm = []
target-aarch64 = ["roc_gen_dev/target-aarch64"]
target-x86 = []

View file

@ -1,4 +1,4 @@
use crate::target::{arch_str, target_triple_str};
use crate::target::{arch_str, target_zig_str};
#[cfg(feature = "llvm")]
use libloading::{Error, Library};
use roc_builtins::bitcode;
@ -8,7 +8,7 @@ use std::collections::HashMap;
use std::env;
use std::io;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Output};
use std::process::{self, Child, Command, Output};
use target_lexicon::{Architecture, OperatingSystem, Triple};
fn zig_executable() -> String {
@ -46,6 +46,10 @@ pub fn link(
operating_system: OperatingSystem::Darwin,
..
} => link_macos(target, output_path, input_paths, link_type),
Triple {
operating_system: OperatingSystem::Windows,
..
} => link_windows(target, output_path, input_paths, link_type),
_ => panic!("TODO gracefully handle unsupported target: {:?}", target),
}
}
@ -137,6 +141,8 @@ pub fn build_zig_host_native(
if matches!(opt_level, OptLevel::Optimize) {
command.args(&["-O", "ReleaseSafe"]);
} else if matches!(opt_level, OptLevel::Size) {
command.args(&["-O", "ReleaseSmall"]);
}
command.output().unwrap()
}
@ -231,6 +237,8 @@ pub fn build_zig_host_native(
]);
if matches!(opt_level, OptLevel::Optimize) {
command.args(&["-O", "ReleaseSafe"]);
} else if matches!(opt_level, OptLevel::Size) {
command.args(&["-O", "ReleaseSmall"]);
}
command.output().unwrap()
}
@ -282,6 +290,8 @@ pub fn build_zig_host_wasm32(
]);
if matches!(opt_level, OptLevel::Optimize) {
command.args(&["-O", "ReleaseSafe"]);
} else if matches!(opt_level, OptLevel::Size) {
command.args(&["-O", "ReleaseSmall"]);
}
command.output().unwrap()
}
@ -317,7 +327,9 @@ pub fn build_c_host_native(
command.args(&["-fPIC", "-c"]);
}
if matches!(opt_level, OptLevel::Optimize) {
command.arg("-O2");
command.arg("-O3");
} else if matches!(opt_level, OptLevel::Size) {
command.arg("-Os");
}
command.output().unwrap()
}
@ -351,6 +363,8 @@ pub fn build_swift_host_native(
if matches!(opt_level, OptLevel::Optimize) {
command.arg("-O");
} else if matches!(opt_level, OptLevel::Size) {
command.arg("-Osize");
}
command.output().unwrap()
@ -443,7 +457,7 @@ pub fn rebuild_host(
&emit_bin,
zig_host_src.to_str().unwrap(),
zig_str_path.to_str().unwrap(),
target_triple_str(target),
target_zig_str(target),
opt_level,
shared_lib_path,
target_valgrind,
@ -456,18 +470,18 @@ pub fn rebuild_host(
} else if cargo_host_src.exists() {
// Compile and link Cargo.toml, if it exists
let cargo_dir = host_input_path.parent().unwrap();
let cargo_out_dir =
cargo_dir
.join("target")
.join(if matches!(opt_level, OptLevel::Optimize) {
"release"
} else {
"debug"
});
let cargo_out_dir = cargo_dir.join("target").join(
if matches!(opt_level, OptLevel::Optimize | OptLevel::Size) {
"release"
} else {
"debug"
},
);
let mut command = Command::new("cargo");
command.arg("build").current_dir(cargo_dir);
if matches!(opt_level, OptLevel::Optimize) {
// Rust doesn't expose size without editing the cargo.toml. Instead just use release.
if matches!(opt_level, OptLevel::Optimize | OptLevel::Size) {
command.arg("--release");
}
let source_file = if shared_lib_path.is_some() {
@ -533,6 +547,8 @@ pub fn rebuild_host(
]);
if matches!(opt_level, OptLevel::Optimize) {
command.arg("-O");
} else if matches!(opt_level, OptLevel::Size) {
command.arg("-C opt-level=s");
}
let output = command.output().unwrap();
@ -636,6 +652,33 @@ fn library_path<const N: usize>(segments: [&str; N]) -> Option<PathBuf> {
}
}
/// Given a list of library directories and the name of a library, find the 1st match
///
/// The provided list of library directories should be in the form of a list of
/// directories, where each directory is represented by a series of path segments, like
///
/// ["/usr", "lib"]
///
/// Each directory will be checked for a file with the provided filename, and the first
/// match will be returned.
///
/// If there are no matches, [`None`] will be returned.
fn look_for_library(lib_dirs: &[&[&str]], lib_filename: &str) -> Option<PathBuf> {
lib_dirs
.iter()
.map(|lib_dir| {
lib_dir.iter().fold(PathBuf::new(), |mut path, segment| {
path.push(segment);
path
})
})
.map(|mut path| {
path.push(lib_filename);
path
})
.find(|path| path.exists())
}
fn link_linux(
target: &Triple,
output_path: PathBuf,
@ -670,28 +713,75 @@ fn link_linux(
));
}
let libcrt_path =
// Some things we'll need to build a list of dirs to check for libraries
let maybe_nix_path = nix_path_opt();
let usr_lib_arch = ["/usr", "lib", &architecture];
let lib_arch = ["/lib", &architecture];
let nix_path_segments;
let lib_dirs_if_nix: [&[&str]; 5];
let lib_dirs_if_nonix: [&[&str]; 4];
// Build the aformentioned list
let lib_dirs: &[&[&str]] =
// give preference to nix_path if it's defined, this prevents bugs
if let Some(nix_path) = nix_path_opt() {
library_path([&nix_path])
.unwrap()
if let Some(nix_path) = &maybe_nix_path {
nix_path_segments = [nix_path.as_str()];
lib_dirs_if_nix = [
&nix_path_segments,
&usr_lib_arch,
&lib_arch,
&["/usr", "lib"],
&["/usr", "lib64"],
];
&lib_dirs_if_nix
} else {
library_path(["/usr", "lib", &architecture])
.or_else(|| library_path(["/usr", "lib"]))
.unwrap()
lib_dirs_if_nonix = [
&usr_lib_arch,
&lib_arch,
&["/usr", "lib"],
&["/usr", "lib64"],
];
&lib_dirs_if_nonix
};
// Look for the libraries we'll need
let libgcc_name = "libgcc_s.so.1";
let libgcc_path =
// give preference to nix_path if it's defined, this prevents bugs
if let Some(nix_path) = nix_path_opt() {
library_path([&nix_path, libgcc_name])
.unwrap()
} else {
library_path(["/lib", &architecture, libgcc_name])
.or_else(|| library_path(["/usr", "lib", &architecture, libgcc_name]))
.or_else(|| library_path(["/usr", "lib", libgcc_name]))
.unwrap()
let libgcc_path = look_for_library(lib_dirs, libgcc_name);
let crti_name = "crti.o";
let crti_path = look_for_library(lib_dirs, crti_name);
let crtn_name = "crtn.o";
let crtn_path = look_for_library(lib_dirs, crtn_name);
let scrt1_name = "Scrt1.o";
let scrt1_path = look_for_library(lib_dirs, scrt1_name);
// Unwrap all the paths at once so we can inform the user of all missing libs at once
let (libgcc_path, crti_path, crtn_path, scrt1_path) =
match (libgcc_path, crti_path, crtn_path, scrt1_path) {
(Some(libgcc), Some(crti), Some(crtn), Some(scrt1)) => (libgcc, crti, crtn, scrt1),
(maybe_gcc, maybe_crti, maybe_crtn, maybe_scrt1) => {
if maybe_gcc.is_none() {
eprintln!("Couldn't find libgcc_s.so.1!");
eprintln!("You may need to install libgcc\n");
}
if maybe_crti.is_none() | maybe_crtn.is_none() | maybe_scrt1.is_none() {
eprintln!("Couldn't find the glibc development files!");
eprintln!("We need the objects crti.o, crtn.o, and Scrt1.o");
eprintln!("You may need to install the glibc development package");
eprintln!("(probably called glibc-dev or glibc-devel)\n");
}
let dirs = lib_dirs
.iter()
.map(|segments| segments.join("/"))
.collect::<Vec<String>>()
.join("\n");
eprintln!("We looked in the following directories:\n{}", dirs);
process::exit(1);
}
};
let ld_linux = match target.architecture {
@ -717,7 +807,7 @@ fn link_linux(
LinkType::Executable => (
// Presumably this S stands for Static, since if we include Scrt1.o
// in the linking for dynamic builds, linking fails.
vec![libcrt_path.join("Scrt1.o").to_str().unwrap().to_string()],
vec![scrt1_path.to_string_lossy().into_owned()],
output_path,
),
LinkType::Dylib => {
@ -749,8 +839,6 @@ fn link_linux(
let env_path = env::var("PATH").unwrap_or_else(|_| "".to_string());
init_arch(target);
// NOTE: order of arguments to `ld` matters here!
// The `-l` flags should go after the `.o` arguments
@ -772,8 +860,8 @@ fn link_linux(
"-A",
arch_str(target),
"-pie",
libcrt_path.join("crti.o").to_str().unwrap(),
libcrt_path.join("crtn.o").to_str().unwrap(),
&*crti_path.to_string_lossy(),
&*crtn_path.to_string_lossy(),
])
.args(&base_args)
.args(&["-dynamic-linker", ld_linux])
@ -850,6 +938,18 @@ fn link_macos(
ld_command.arg(format!("-L{}/swift", sdk_path));
};
let roc_link_flags = match env::var("ROC_LINK_FLAGS") {
Ok(flags) => {
println!("⚠️ CAUTION: The ROC_LINK_FLAGS environment variable is a temporary workaround, and will no longer do anything once surgical linking lands! If you're concerned about what this means for your use case, please ask about it on Zulip.");
flags
}
Err(_) => "".to_string(),
};
for roc_link_flag in roc_link_flags.split_whitespace() {
ld_command.arg(roc_link_flag);
}
ld_command.args(&[
// Libraries - see https://github.com/rtfeldman/roc/pull/554#discussion_r496392274
// for discussion and further references
@ -953,6 +1053,15 @@ fn link_wasm32(
Ok((child, output_path))
}
fn link_windows(
_target: &Triple,
_output_path: PathBuf,
_input_paths: &[&str],
_link_type: LinkType,
) -> io::Result<(Child, PathBuf)> {
todo!("Add windows support to the surgical linker. See issue #2608.")
}
#[cfg(feature = "llvm")]
pub fn module_to_dylib(
module: &inkwell::module::Module,
@ -1010,13 +1119,3 @@ fn validate_output(file_name: &str, cmd_name: &str, output: Output) {
}
}
}
#[cfg(feature = "llvm")]
fn init_arch(target: &Triple) {
crate::target::init_arch(target);
}
#[cfg(not(feature = "llvm"))]
fn init_arch(_target: &Triple) {
panic!("Tried to initialize LLVM when crate was not built with `feature = \"llvm\"` enabled");
}

View file

@ -2,12 +2,12 @@
use roc_gen_llvm::llvm::build::module_from_builtins;
#[cfg(feature = "llvm")]
pub use roc_gen_llvm::llvm::build::FunctionIterator;
use roc_load::file::{LoadedModule, MonomorphizedModule};
use roc_load::{LoadedModule, MonomorphizedModule};
use roc_module::symbol::{Interns, ModuleId};
use roc_mono::ir::OptLevel;
use roc_region::all::LineInfo;
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::time::{Duration, SystemTime};
use roc_collections::all::MutMap;
#[cfg(feature = "target-wasm32")]
@ -179,7 +179,7 @@ pub fn gen_from_mono_module(
_emit_debug_info: bool,
) -> CodeGenTiming {
match opt_level {
OptLevel::Optimize => {
OptLevel::Optimize | OptLevel::Size => {
todo!("Return this error message in a better way: optimized builds not supported without llvm backend");
}
OptLevel::Normal | OptLevel::Development => {
@ -199,7 +199,7 @@ pub fn gen_from_mono_module(
emit_debug_info: bool,
) -> CodeGenTiming {
match opt_level {
OptLevel::Normal | OptLevel::Optimize => gen_from_mono_module_llvm(
OptLevel::Normal | OptLevel::Size | OptLevel::Optimize => gen_from_mono_module_llvm(
arena,
loaded,
roc_file_path,
@ -230,7 +230,6 @@ pub fn gen_from_mono_module_llvm(
use inkwell::context::Context;
use inkwell::module::Linkage;
use inkwell::targets::{CodeModel, FileType, RelocMode};
use std::time::SystemTime;
let code_gen_start = SystemTime::now();
@ -267,6 +266,7 @@ pub fn gen_from_mono_module_llvm(
|| name.starts_with("roc_builtins.dec")
|| name.starts_with("list.RocList")
|| name.starts_with("dict.RocDict")
|| name.contains("incref")
|| name.contains("decref")
{
function.add_attribute(AttributeLoc::Function, enum_attr);
@ -486,6 +486,7 @@ fn gen_from_mono_module_dev_wasm32(
loaded: MonomorphizedModule,
app_o_file: &Path,
) -> CodeGenTiming {
let code_gen_start = SystemTime::now();
let MonomorphizedModule {
module_id,
procedures,
@ -519,9 +520,17 @@ fn gen_from_mono_module_dev_wasm32(
procedures,
);
let code_gen = code_gen_start.elapsed().unwrap();
let emit_o_file_start = SystemTime::now();
std::fs::write(&app_o_file, &bytes).expect("failed to write object to file");
CodeGenTiming::default()
let emit_o_file = emit_o_file_start.elapsed().unwrap();
CodeGenTiming {
code_gen,
emit_o_file,
}
}
fn gen_from_mono_module_dev_assembly(
@ -530,6 +539,8 @@ fn gen_from_mono_module_dev_assembly(
target: &target_lexicon::Triple,
app_o_file: &Path,
) -> CodeGenTiming {
let code_gen_start = SystemTime::now();
let lazy_literals = true;
let generate_allocators = false; // provided by the platform
@ -551,10 +562,18 @@ fn gen_from_mono_module_dev_assembly(
let module_object = roc_gen_dev::build_module(&env, &mut interns, target, procedures);
let code_gen = code_gen_start.elapsed().unwrap();
let emit_o_file_start = SystemTime::now();
let module_out = module_object
.write()
.expect("failed to build output object");
std::fs::write(&app_o_file, module_out).expect("failed to write object to file");
CodeGenTiming::default()
let emit_o_file = emit_o_file_start.elapsed().unwrap();
CodeGenTiming {
code_gen,
emit_o_file,
}
}

View file

@ -41,6 +41,37 @@ pub fn target_triple_str(target: &Triple) -> &'static str {
operating_system: OperatingSystem::Darwin,
..
} => "x86_64-unknown-darwin10",
Triple {
architecture: Architecture::X86_64,
operating_system: OperatingSystem::Windows,
..
} => "x86_64-pc-windows-gnu",
_ => panic!("TODO gracefully handle unsupported target: {:?}", target),
}
}
pub fn target_zig_str(target: &Triple) -> &'static str {
// Zig has its own architecture mappings, defined here:
// https://github.com/ziglang/zig/blob/master/tools/process_headers.zig
//
// and an open proposal to unify them with the more typical "target triples":
// https://github.com/ziglang/zig/issues/4911
match target {
Triple {
architecture: Architecture::X86_64,
operating_system: OperatingSystem::Linux,
..
} => "x86_64-linux-gnu",
Triple {
architecture: Architecture::X86_32(target_lexicon::X86_32Architecture::I386),
operating_system: OperatingSystem::Linux,
..
} => "i386-linux-gnu",
Triple {
architecture: Architecture::Aarch64(_),
operating_system: OperatingSystem::Linux,
..
} => "aarch64-linux-gnu",
_ => panic!("TODO gracefully handle unsupported target: {:?}", target),
}
}
@ -114,6 +145,8 @@ pub fn target_machine(
pub fn convert_opt_level(level: OptLevel) -> OptimizationLevel {
match level {
OptLevel::Development | OptLevel::Normal => OptimizationLevel::None,
// Default is O2/Os. If we want Oz, we have to explicitly turn of loop vectorization as well.
OptLevel::Size => OptimizationLevel::Default,
OptLevel::Optimize => OptimizationLevel::Aggressive,
}
}

View file

@ -11,3 +11,8 @@ roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_types = { path = "../types" }
roc_target = { path = "../roc_target" }
lazy_static = "1.4.0"
[build-dependencies]
# dunce can be removed once ziglang/zig#5109 is fixed
dunce = "1.0.2"

View file

@ -7,7 +7,7 @@ To add a builtin:
2. Make sure the function is public with the `pub` keyword and uses the C calling convention. This is really easy, just add `pub` and `callconv(.C)` to the function declaration like so: `pub fn atan(num: f64) callconv(.C) f64 { ... }`
3. In `src/main.zig`, export the function. This is also organized by module. For example, for a `Num` function find the `Num` section and add: `comptime { exportNumFn(num.atan, "atan"); }`. The first argument is the function, the second is the name of it in LLVM.
4. In `compiler/builtins/src/bitcode.rs`, add a constant for the new function. This is how we use it in Rust. Once again, this is organized by module, so just find the relevant area and add your new function.
5. You can now your function in Rust using `call_bitcode_fn` in `llvm/src/build.rs`!
5. You can now use your function in Rust using `call_bitcode_fn` in `llvm/src/build.rs`!
## How it works
@ -28,7 +28,7 @@ There will be two directories like `roc_builtins-[some random characters]`, look
> The bitcode is a bunch of bytes that aren't particularly human-readable.
> If you want to take a look at the human-readable LLVM IR, look at
> `target/debug/build/roc_builtins-[some random characters]/out/builtins.ll`
> `compiler/builtins/bitcode/builtins.ll`
## Calling bitcode functions

View file

@ -0,0 +1,9 @@
#!/bin/bash
set -euxo pipefail
# Test failures will always point at the _start function
# Make sure to look at the rest of the stack trace!
warning_about_non_native_binary=$(zig test -target wasm32-wasi-musl -O ReleaseFast src/main.zig 2>&1)
wasm_test_binary=$(echo $warning_about_non_native_binary | cut -d' ' -f 3)
wasmer $wasm_test_binary dummyArgForZigTestBinary

View file

@ -26,21 +26,19 @@ pub const RocDec = extern struct {
return .{ .num = num * one_point_zero_i128 };
}
// TODO: There's got to be a better way to do this other than converting to Str
pub fn fromF64(num: f64) ?RocDec {
var digit_bytes: [19]u8 = undefined; // 19 = max f64 digits + '.' + '-'
var result: f64 = num * comptime @intToFloat(f64, one_point_zero_i128);
var fbs = std.io.fixedBufferStream(digit_bytes[0..]);
std.fmt.formatFloatDecimal(num, .{}, fbs.writer()) catch
return null;
var dec = RocDec.fromStr(RocStr.init(&digit_bytes, fbs.pos));
if (dec) |d| {
return d;
} else {
if (result > comptime @intToFloat(f64, math.maxInt(i128))) {
return null;
}
if (result < comptime @intToFloat(f64, math.minInt(i128))) {
return null;
}
var ret: RocDec = .{ .num = @floatToInt(i128, result) };
return ret;
}
pub fn fromStr(roc_str: RocStr) ?RocDec {
@ -729,6 +727,11 @@ test "fromF64" {
try expectEqual(RocDec{ .num = 25500000000000000000 }, dec.?);
}
test "fromF64 overflow" {
var dec = RocDec.fromF64(1e308);
try expectEqual(dec, null);
}
test "fromStr: empty" {
var roc_str = RocStr.init("", 0);
var dec = RocDec.fromStr(roc_str);
@ -898,11 +901,11 @@ test "toStr: -0.00045" {
try expectEqualSlices(u8, res_slice, res_roc_str.?.asSlice());
}
test "toStr: -111.123456789" {
var dec: RocDec = .{ .num = -111123456789000000000 };
test "toStr: -111.123456" {
var dec: RocDec = .{ .num = -111123456000000000000 };
var res_roc_str = dec.toStr();
const res_slice: []const u8 = "-111.123456789"[0..];
const res_slice: []const u8 = "-111.123456"[0..];
try expectEqualSlices(u8, res_slice, res_roc_str.?.asSlice());
}

View file

@ -408,7 +408,19 @@ const Dec = fn (?[*]u8) callconv(.C) void;
const Caller3 = fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
// Dict.insert : Dict k v, k, v -> Dict k v
pub fn dictInsert(input: RocDict, alignment: Alignment, key: Opaque, key_width: usize, value: Opaque, value_width: usize, hash_fn: HashFn, is_eq: EqFn, dec_key: Dec, dec_value: Dec, output: *RocDict) callconv(.C) void {
pub fn dictInsert(
input: RocDict,
alignment: Alignment,
key: Opaque,
key_width: usize,
value: Opaque,
value_width: usize,
hash_fn: HashFn,
is_eq: EqFn,
dec_key: Dec,
dec_value: Dec,
output: *RocDict,
) callconv(.C) void {
var seed: u64 = INITIAL_SEED;
var result = input.makeUnique(alignment, key_width, value_width);
@ -543,7 +555,13 @@ pub fn elementsRc(dict: RocDict, alignment: Alignment, key_width: usize, value_w
}
}
pub fn dictKeys(dict: RocDict, alignment: Alignment, key_width: usize, value_width: usize, inc_key: Inc, output: *RocList) callconv(.C) void {
pub fn dictKeys(
dict: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
inc_key: Inc,
) callconv(.C) RocList {
const size = dict.capacity();
var length: usize = 0;
@ -558,8 +576,7 @@ pub fn dictKeys(dict: RocDict, alignment: Alignment, key_width: usize, value_wid
}
if (length == 0) {
output.* = RocList.empty();
return;
return RocList.empty();
}
const data_bytes = length * key_width;
@ -581,10 +598,16 @@ pub fn dictKeys(dict: RocDict, alignment: Alignment, key_width: usize, value_wid
}
}
output.* = RocList{ .bytes = ptr, .length = length };
return RocList{ .bytes = ptr, .length = length, .capacity = length };
}
pub fn dictValues(dict: RocDict, alignment: Alignment, key_width: usize, value_width: usize, inc_value: Inc, output: *RocList) callconv(.C) void {
pub fn dictValues(
dict: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
inc_value: Inc,
) callconv(.C) RocList {
const size = dict.capacity();
var length: usize = 0;
@ -599,8 +622,7 @@ pub fn dictValues(dict: RocDict, alignment: Alignment, key_width: usize, value_w
}
if (length == 0) {
output.* = RocList.empty();
return;
return RocList.empty();
}
const data_bytes = length * value_width;
@ -622,14 +644,25 @@ pub fn dictValues(dict: RocDict, alignment: Alignment, key_width: usize, value_w
}
}
output.* = RocList{ .bytes = ptr, .length = length };
return RocList{ .bytes = ptr, .length = length, .capacity = length };
}
fn doNothing(_: Opaque) callconv(.C) void {
return;
}
pub fn dictUnion(dict1: RocDict, dict2: RocDict, alignment: Alignment, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, inc_key: Inc, inc_value: Inc, output: *RocDict) callconv(.C) void {
pub fn dictUnion(
dict1: RocDict,
dict2: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
hash_fn: HashFn,
is_eq: EqFn,
inc_key: Inc,
inc_value: Inc,
output: *RocDict,
) callconv(.C) void {
output.* = dict1.makeUnique(alignment, key_width, value_width);
var i: usize = 0;

View file

@ -16,6 +16,7 @@ const HasTagId = fn (u16, ?[*]u8) callconv(.C) extern struct { matched: bool, da
pub const RocList = extern struct {
bytes: ?[*]u8,
length: usize,
capacity: usize,
pub fn len(self: RocList) usize {
return self.length;
@ -26,7 +27,7 @@ pub const RocList = extern struct {
}
pub fn empty() RocList {
return RocList{ .bytes = null, .length = 0 };
return RocList{ .bytes = null, .length = 0, .capacity = 0 };
}
pub fn isUnique(self: RocList) bool {
@ -50,6 +51,7 @@ pub const RocList = extern struct {
return RocList{
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
.length = length,
.capacity = length,
};
}
@ -96,7 +98,7 @@ pub const RocList = extern struct {
if (self.isUnique()) {
const new_source = utils.unsafeReallocate(source_ptr, alignment, self.len(), new_length, element_width);
return RocList{ .bytes = new_source, .length = new_length };
return RocList{ .bytes = new_source, .length = new_length, .capacity = new_length };
}
}
@ -128,6 +130,7 @@ pub const RocList = extern struct {
const result = RocList{
.bytes = first_slot,
.length = new_length,
.capacity = new_length,
};
utils.decref(self.bytes, old_length * element_width, alignment);
@ -887,14 +890,23 @@ pub fn listSublist(
}
const keep_len = std.math.min(len, size - start);
const drop_len = std.math.max(start, 0);
const drop_start_len = start;
const drop_end_len = size - (start + keep_len);
// Decrement the reference counts of elements before `start`.
var i: usize = 0;
while (i < drop_len) : (i += 1) {
while (i < drop_start_len) : (i += 1) {
const element = source_ptr + i * element_width;
dec(element);
}
// Decrement the reference counts of elements after `start + keep_len`.
i = 0;
while (i < drop_end_len) : (i += 1) {
const element = source_ptr + (start + keep_len + i) * element_width;
dec(element);
}
const output = RocList.allocate(alignment, keep_len, element_width);
const target_ptr = output.bytes orelse unreachable;
@ -1237,7 +1249,7 @@ pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_widt
@memcpy(new_source + list_a.len() * element_width, source_b, list_b.len() * element_width);
}
return RocList{ .bytes = new_source, .length = total_length };
return RocList{ .bytes = new_source, .length = total_length, .capacity = total_length };
}
}
const total_length: usize = list_a.len() + list_b.len();
@ -1256,95 +1268,56 @@ pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_widt
return output;
}
pub fn listSetInPlace(
bytes: ?[*]u8,
pub fn listReplaceInPlace(
list: RocList,
index: usize,
element: Opaque,
element_width: usize,
dec: Dec,
) callconv(.C) ?[*]u8 {
out_element: ?[*]u8,
) callconv(.C) RocList {
// INVARIANT: bounds checking happens on the roc side
//
// at the time of writing, the function is implemented roughly as
// `if inBounds then LowLevelListGet input index item else input`
// `if inBounds then LowLevelListReplace input index item else input`
// so we don't do a bounds check here. Hence, the list is also non-empty,
// because inserting into an empty list is always out of bounds
return listSetInPlaceHelp(bytes, index, element, element_width, dec);
return listReplaceInPlaceHelp(list, index, element, element_width, out_element);
}
pub fn listSet(
bytes: ?[*]u8,
length: usize,
pub fn listReplace(
list: RocList,
alignment: u32,
index: usize,
element: Opaque,
element_width: usize,
dec: Dec,
) callconv(.C) ?[*]u8 {
out_element: ?[*]u8,
) callconv(.C) RocList {
// INVARIANT: bounds checking happens on the roc side
//
// at the time of writing, the function is implemented roughly as
// `if inBounds then LowLevelListGet input index item else input`
// `if inBounds then LowLevelListReplace input index item else input`
// so we don't do a bounds check here. Hence, the list is also non-empty,
// because inserting into an empty list is always out of bounds
const ptr: [*]usize = @ptrCast([*]usize, @alignCast(@alignOf(usize), bytes));
if ((ptr - 1)[0] == utils.REFCOUNT_ONE) {
return listSetInPlaceHelp(bytes, index, element, element_width, dec);
} else {
return listSetImmutable(bytes, length, alignment, index, element, element_width, dec);
}
return listReplaceInPlaceHelp(list.makeUnique(alignment, element_width), index, element, element_width, out_element);
}
inline fn listSetInPlaceHelp(
bytes: ?[*]u8,
inline fn listReplaceInPlaceHelp(
list: RocList,
index: usize,
element: Opaque,
element_width: usize,
dec: Dec,
) ?[*]u8 {
out_element: ?[*]u8,
) RocList {
// the element we will replace
var element_at_index = (bytes orelse undefined) + (index * element_width);
var element_at_index = (list.bytes orelse undefined) + (index * element_width);
// decrement its refcount
dec(element_at_index);
// copy out the old element
@memcpy(out_element orelse undefined, element_at_index, element_width);
// copy in the new element
@memcpy(element_at_index, element orelse undefined, element_width);
return bytes;
}
inline fn listSetImmutable(
old_bytes: ?[*]u8,
length: usize,
alignment: u32,
index: usize,
element: Opaque,
element_width: usize,
dec: Dec,
) ?[*]u8 {
const data_bytes = length * element_width;
var new_bytes = utils.allocateWithRefcount(data_bytes, alignment);
@memcpy(new_bytes, old_bytes orelse undefined, data_bytes);
// the element we will replace
var element_at_index = new_bytes + (index * element_width);
// decrement its refcount
dec(element_at_index);
// copy in the new element
@memcpy(element_at_index, element orelse undefined, element_width);
// consume RC token of original
utils.decref(old_bytes, data_bytes, alignment);
//return list;
return new_bytes;
return list;
}
pub fn listFindUnsafe(

View file

@ -49,8 +49,8 @@ comptime {
exportListFn(list.listConcat, "concat");
exportListFn(list.listSublist, "sublist");
exportListFn(list.listDropAt, "drop_at");
exportListFn(list.listSet, "set");
exportListFn(list.listSetInPlace, "set_in_place");
exportListFn(list.listReplace, "replace");
exportListFn(list.listReplaceInPlace, "replace_in_place");
exportListFn(list.listSwap, "swap");
exportListFn(list.listAny, "any");
exportListFn(list.listAll, "all");
@ -98,6 +98,14 @@ comptime {
num.exportDivCeil(T, ROC_BUILTINS ++ "." ++ NUM ++ ".div_ceil.");
}
inline for (INTEGERS) |FROM| {
inline for (INTEGERS) |TO| {
// We're exporting more than we need here, but that's okay.
num.exportToIntCheckingMax(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max.");
num.exportToIntCheckingMaxAndMin(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max_and_min.");
}
}
inline for (FLOATS) |T| {
num.exportAsin(T, ROC_BUILTINS ++ "." ++ NUM ++ ".asin.");
num.exportAcos(T, ROC_BUILTINS ++ "." ++ NUM ++ ".acos.");
@ -147,6 +155,7 @@ comptime {
exportUtilsFn(utils.increfC, "incref");
exportUtilsFn(utils.decrefC, "decref");
exportUtilsFn(utils.decrefCheckNullC, "decref_check_null");
exportUtilsFn(utils.allocateWithRefcountC, "allocate_with_refcount");
exportExpectFn(expect.expectFailedC, "expect_failed");
exportExpectFn(expect.getExpectFailuresC, "get_expect_failures");
exportExpectFn(expect.deinitFailuresC, "deinit_failures");

View file

@ -108,6 +108,39 @@ pub fn exportDivCeil(comptime T: type, comptime name: []const u8) void {
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn ToIntCheckedResult(comptime T: type) type {
// On the Roc side we sort by alignment; putting the errorcode last
// always works out (no number with smaller alignment than 1).
return extern struct {
value: T,
out_of_bounds: bool,
};
}
pub fn exportToIntCheckingMax(comptime From: type, comptime To: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: From) callconv(.C) ToIntCheckedResult(To) {
if (input > std.math.maxInt(To)) {
return .{ .out_of_bounds = true, .value = 0 };
}
return .{ .out_of_bounds = false, .value = @intCast(To, input) };
}
}.func;
@export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong });
}
pub fn exportToIntCheckingMaxAndMin(comptime From: type, comptime To: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: From) callconv(.C) ToIntCheckedResult(To) {
if (input > std.math.maxInt(To) or input < std.math.minInt(To)) {
return .{ .out_of_bounds = true, .value = 0 };
}
return .{ .out_of_bounds = false, .value = @intCast(To, input) };
}
}.func;
@export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong });
}
pub fn bytesToU16C(arg: RocList, position: usize) callconv(.C) u16 {
return @call(.{ .modifier = always_inline }, bytesToU16, .{ arg, position });
}

View file

@ -15,9 +15,11 @@ const InPlace = enum(u8) {
Clone,
};
const SMALL_STR_MAX_LENGTH = small_string_size - 1;
const small_string_size = @sizeOf(RocStr);
const blank_small_string: [@sizeOf(RocStr)]u8 = init_blank_small_string(small_string_size);
const MASK_ISIZE: isize = std.math.minInt(isize);
const MASK: usize = @bitCast(usize, MASK_ISIZE);
const SMALL_STR_MAX_LENGTH = SMALL_STRING_SIZE - 1;
const SMALL_STRING_SIZE = @sizeOf(RocStr);
fn init_blank_small_string(comptime n: usize) [n]u8 {
var prime_list: [n]u8 = undefined;
@ -33,14 +35,15 @@ fn init_blank_small_string(comptime n: usize) [n]u8 {
pub const RocStr = extern struct {
str_bytes: ?[*]u8,
str_len: usize,
str_capacity: usize,
pub const alignment = @alignOf(usize);
pub inline fn empty() RocStr {
const small_str_flag: isize = std.math.minInt(isize);
return RocStr{
.str_len = @bitCast(usize, small_str_flag),
.str_len = 0,
.str_bytes = null,
.str_capacity = MASK,
};
}
@ -59,24 +62,22 @@ pub const RocStr = extern struct {
return RocStr{
.str_bytes = first_element,
.str_len = number_of_chars,
.str_capacity = number_of_chars,
};
}
// allocate space for a (big or small) RocStr, but put nothing in it yet
pub fn allocate(result_in_place: InPlace, number_of_chars: usize) RocStr {
const result_is_big = number_of_chars >= small_string_size;
const result_is_big = number_of_chars >= SMALL_STRING_SIZE;
if (result_is_big) {
return RocStr.initBig(result_in_place, number_of_chars);
} else {
var t = blank_small_string;
var string = RocStr.empty();
const mask: u8 = 0b1000_0000;
const final_byte = @truncate(u8, number_of_chars) | mask;
string.asU8ptr()[@sizeOf(RocStr) - 1] = @intCast(u8, number_of_chars) | 0b1000_0000;
t[small_string_size - 1] = final_byte;
return @bitCast(RocStr, t);
return string;
}
}
@ -203,23 +204,31 @@ pub const RocStr = extern struct {
// NOTE: returns false for empty string!
pub fn isSmallStr(self: RocStr) bool {
return @bitCast(isize, self.str_len) < 0;
return @bitCast(isize, self.str_capacity) < 0;
}
fn asArray(self: RocStr) [@sizeOf(RocStr)]u8 {
const as_int = @ptrToInt(&self);
const as_ptr = @intToPtr([*]u8, as_int);
const slice = as_ptr[0..@sizeOf(RocStr)];
return slice.*;
}
pub fn len(self: RocStr) usize {
const bytes: [*]const u8 = @ptrCast([*]const u8, &self);
const last_byte = bytes[@sizeOf(RocStr) - 1];
const small_len = @as(usize, last_byte ^ 0b1000_0000);
const big_len = self.str_len;
if (self.isSmallStr()) {
return self.asArray()[@sizeOf(RocStr) - 1] ^ 0b1000_0000;
} else {
return self.str_len;
}
}
// Since this conditional would be prone to branch misprediction,
// make sure it will compile to a cmov.
return if (self.isSmallStr()) small_len else big_len;
pub fn capacity(self: RocStr) usize {
return self.str_capacity ^ MASK;
}
pub fn isEmpty(self: RocStr) bool {
const empty_len = comptime RocStr.empty().str_len;
return self.str_len == empty_len;
return self.len() == 0;
}
// If a string happens to be null-terminated already, then we can pass its
@ -268,36 +277,6 @@ pub const RocStr = extern struct {
}
}
// Returns (@sizeOf(RocStr) - 1) for small strings and the empty string.
// Returns 0 for refcounted strings and immortal strings.
// Returns the stored capacity value for all other strings.
pub fn capacity(self: RocStr) usize {
const length = self.len();
const longest_small_str = @sizeOf(RocStr) - 1;
if (length <= longest_small_str) {
// Note that although empty strings technically have the full
// capacity of a small string available, they aren't marked as small
// strings, so if you want to make use of that capacity, you need
// to first change its flag to mark it as a small string!
return longest_small_str;
} else {
const ptr: [*]usize = @ptrCast([*]usize, @alignCast(@alignOf(usize), self.str_bytes));
const capacity_or_refcount: isize = (ptr - 1)[0];
if (capacity_or_refcount > 0) {
// If capacity_or_refcount is positive, that means it's a
// capacity value.
return capacity_or_refcount;
} else {
// This is either a refcount or else this big string is stored
// in a readonly section; either way, it has no capacity,
// because we cannot mutate it in-place!
return 0;
}
}
}
pub fn isUnique(self: RocStr) bool {
// small strings can be copied
if (self.isSmallStr()) {
@ -512,8 +491,12 @@ fn strFromFloatHelp(comptime T: type, float: T) RocStr {
}
// Str.split
pub fn strSplitInPlaceC(array: [*]RocStr, string: RocStr, delimiter: RocStr) callconv(.C) void {
return @call(.{ .modifier = always_inline }, strSplitInPlace, .{ array, string, delimiter });
pub fn strSplitInPlaceC(opt_array: ?[*]RocStr, string: RocStr, delimiter: RocStr) callconv(.C) void {
if (opt_array) |array| {
return @call(.{ .modifier = always_inline }, strSplitInPlace, .{ array, string, delimiter });
} else {
return;
}
}
fn strSplitInPlace(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
@ -1128,7 +1111,11 @@ fn strConcat(arg1: RocStr, arg2: RocStr) RocStr {
@memcpy(new_source + arg1.len(), arg2.asU8ptr(), arg2.len());
return RocStr{ .str_bytes = new_source, .str_len = combined_length };
return RocStr{
.str_bytes = new_source,
.str_len = combined_length,
.str_capacity = combined_length,
};
}
}
@ -1174,11 +1161,18 @@ test "RocStr.concat: small concat small" {
pub const RocListStr = extern struct {
list_elements: ?[*]RocStr,
list_length: usize,
list_capacity: usize,
};
// Str.joinWith
pub fn strJoinWithC(list: RocListStr, separator: RocStr) callconv(.C) RocStr {
return @call(.{ .modifier = always_inline }, strJoinWith, .{ list, separator });
pub fn strJoinWithC(list: RocList, separator: RocStr) callconv(.C) RocStr {
const roc_list_str = RocListStr{
.list_elements = @ptrCast(?[*]RocStr, @alignCast(@alignOf(usize), list.bytes)),
.list_length = list.length,
.list_capacity = list.capacity,
};
return @call(.{ .modifier = always_inline }, strJoinWith, .{ roc_list_str, separator });
}
fn strJoinWith(list: RocListStr, separator: RocStr) RocStr {
@ -1235,7 +1229,11 @@ test "RocStr.joinWith: result is big" {
var roc_result = RocStr.init(result_ptr, result_len);
var elements: [3]RocStr = .{ roc_elem, roc_elem, roc_elem };
const list = RocListStr{ .list_length = 3, .list_elements = @ptrCast([*]RocStr, &elements) };
const list = RocListStr{
.list_length = 3,
.list_capacity = 3,
.list_elements = @ptrCast([*]RocStr, &elements),
};
defer {
roc_sep.deinit();
@ -1264,9 +1262,9 @@ inline fn strToBytes(arg: RocStr) RocList {
@memcpy(ptr, arg.asU8ptr(), length);
return RocList{ .length = length, .bytes = ptr };
return RocList{ .length = length, .bytes = ptr, .capacity = length };
} else {
return RocList{ .length = arg.len(), .bytes = arg.str_bytes };
return RocList{ .length = arg.len(), .bytes = arg.str_bytes, .capacity = arg.str_capacity };
}
}
@ -1308,7 +1306,11 @@ inline fn fromUtf8(arg: RocList, update_mode: UpdateMode) FromUtf8Result {
} else {
const byte_list = arg.makeUniqueExtra(RocStr.alignment, @sizeOf(u8), update_mode);
const string = RocStr{ .str_bytes = byte_list.bytes, .str_len = byte_list.length };
const string = RocStr{
.str_bytes = byte_list.bytes,
.str_len = byte_list.length,
.str_capacity = byte_list.capacity,
};
return FromUtf8Result{
.is_ok = true,
@ -1412,7 +1414,7 @@ pub const Utf8ByteProblem = enum(u8) {
};
fn validateUtf8Bytes(bytes: [*]u8, length: usize) FromUtf8Result {
return fromUtf8(RocList{ .bytes = bytes, .length = length }, .Immutable);
return fromUtf8(RocList{ .bytes = bytes, .length = length, .capacity = length }, .Immutable);
}
fn validateUtf8BytesX(str: RocList) FromUtf8Result {
@ -1824,13 +1826,13 @@ test "strTrim: blank" {
}
test "strTrim: large to large" {
const original_bytes = " hello giant world ";
const original_bytes = " hello even more giant world ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = "hello giant world";
const expected_bytes = "hello even more giant world";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1842,13 +1844,13 @@ test "strTrim: large to large" {
}
test "strTrim: large to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = "hello world";
const expected_bytes = "hello";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1861,13 +1863,13 @@ test "strTrim: large to small" {
}
test "strTrim: small to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(original.isSmallStr());
const expected_bytes = "hello world";
const expected_bytes = "hello";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1895,13 +1897,13 @@ test "strTrimLeft: blank" {
}
test "strTrimLeft: large to large" {
const original_bytes = " hello giant world ";
const original_bytes = " hello even more giant world ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = "hello giant world ";
const expected_bytes = "hello even more giant world ";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1913,13 +1915,13 @@ test "strTrimLeft: large to large" {
}
test "strTrimLeft: large to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = "hello world ";
const expected_bytes = "hello ";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1932,13 +1934,13 @@ test "strTrimLeft: large to small" {
}
test "strTrimLeft: small to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(original.isSmallStr());
const expected_bytes = "hello world ";
const expected_bytes = "hello ";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1966,13 +1968,13 @@ test "strTrimRight: blank" {
}
test "strTrimRight: large to large" {
const original_bytes = " hello giant world ";
const original_bytes = " hello even more giant world ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = " hello giant world";
const expected_bytes = " hello even more giant world";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -1984,13 +1986,13 @@ test "strTrimRight: large to large" {
}
test "strTrimRight: large to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(!original.isSmallStr());
const expected_bytes = " hello world";
const expected_bytes = " hello";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();
@ -2003,13 +2005,13 @@ test "strTrimRight: large to small" {
}
test "strTrimRight: small to small" {
const original_bytes = " hello world ";
const original_bytes = " hello ";
const original = RocStr.init(original_bytes, original_bytes.len);
defer original.deinit();
try expect(original.isSmallStr());
const expected_bytes = " hello world";
const expected_bytes = " hello";
const expected = RocStr.init(expected_bytes, expected_bytes.len);
defer expected.deinit();

View file

@ -1,5 +1,6 @@
const std = @import("std");
const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const Monotonic = std.builtin.AtomicOrder.Monotonic;
pub fn WithOverflow(comptime T: type) type {
return extern struct { value: T, has_overflowed: bool };
@ -38,14 +39,14 @@ fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*anyopaque {
}
fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*anyopaque {
const ptr = @ptrCast([*]u8, @alignCast(16, c_ptr));
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
const slice = ptr[0..old_size];
return @ptrCast(?*anyopaque, std.testing.allocator.realloc(slice, new_size) catch unreachable);
}
fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.C) void {
const ptr = @ptrCast([*]u8, @alignCast(16, c_ptr));
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
std.testing.allocator.destroy(ptr);
}
@ -120,10 +121,32 @@ pub const IntWidth = enum(u8) {
I128 = 9,
};
const Refcount = enum {
none,
normal,
atomic,
};
const RC_TYPE = Refcount.normal;
pub fn increfC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void {
if (RC_TYPE == Refcount.none) return;
var refcount = ptr_to_refcount.*;
var masked_amount = if (refcount == REFCOUNT_MAX_ISIZE) 0 else amount;
ptr_to_refcount.* = refcount + masked_amount;
if (refcount < REFCOUNT_MAX_ISIZE) {
switch (RC_TYPE) {
Refcount.normal => {
ptr_to_refcount.* = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
},
Refcount.atomic => {
var next = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
while (@cmpxchgWeak(isize, ptr_to_refcount, refcount, next, Monotonic, Monotonic)) |found| {
refcount = found;
next = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
}
},
Refcount.none => unreachable,
}
}
}
pub fn decrefC(
@ -169,71 +192,51 @@ inline fn decref_ptr_to_refcount(
refcount_ptr: [*]isize,
alignment: u32,
) void {
const refcount: isize = refcount_ptr[0];
if (RC_TYPE == Refcount.none) return;
const extra_bytes = std.math.max(alignment, @sizeOf(usize));
if (refcount == REFCOUNT_ONE_ISIZE) {
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
} else if (refcount < 0) {
refcount_ptr[0] = refcount - 1;
switch (RC_TYPE) {
Refcount.normal => {
const refcount: isize = refcount_ptr[0];
if (refcount == REFCOUNT_ONE_ISIZE) {
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
} else if (refcount < REFCOUNT_MAX_ISIZE) {
refcount_ptr[0] = refcount - 1;
}
},
Refcount.atomic => {
if (refcount_ptr[0] < REFCOUNT_MAX_ISIZE) {
var last = @atomicRmw(isize, &refcount_ptr[0], std.builtin.AtomicRmwOp.Sub, 1, Monotonic);
if (last == REFCOUNT_ONE_ISIZE) {
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
}
}
},
Refcount.none => unreachable,
}
}
pub fn allocateWithRefcountC(
data_bytes: usize,
element_alignment: u32,
) callconv(.C) [*]u8 {
return allocateWithRefcount(data_bytes, element_alignment);
}
pub fn allocateWithRefcount(
data_bytes: usize,
element_alignment: u32,
) [*]u8 {
const alignment = std.math.max(@sizeOf(usize), element_alignment);
const first_slot_offset = std.math.max(@sizeOf(usize), element_alignment);
const ptr_width = @sizeOf(usize);
const alignment = std.math.max(ptr_width, element_alignment);
const length = alignment + data_bytes;
switch (alignment) {
16 => {
// TODO handle alloc failing!
var new_bytes: [*]align(16) u8 = @alignCast(16, alloc(length, alignment) orelse unreachable);
var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable;
var as_usize_array = @ptrCast([*]usize, new_bytes);
as_usize_array[0] = 0;
as_usize_array[1] = REFCOUNT_ONE;
const data_ptr = new_bytes + alignment;
const refcount_ptr = @ptrCast([*]usize, @alignCast(ptr_width, data_ptr) - ptr_width);
refcount_ptr[0] = if (RC_TYPE == Refcount.none) REFCOUNT_MAX_ISIZE else REFCOUNT_ONE;
var as_u8_array = @ptrCast([*]u8, new_bytes);
const first_slot = as_u8_array + first_slot_offset;
return first_slot;
},
8 => {
// TODO handle alloc failing!
var raw = alloc(length, alignment) orelse unreachable;
var new_bytes: [*]align(8) u8 = @alignCast(8, raw);
var as_isize_array = @ptrCast([*]isize, new_bytes);
as_isize_array[0] = REFCOUNT_ONE_ISIZE;
var as_u8_array = @ptrCast([*]u8, new_bytes);
const first_slot = as_u8_array + first_slot_offset;
return first_slot;
},
4 => {
// TODO handle alloc failing!
var raw = alloc(length, alignment) orelse unreachable;
var new_bytes: [*]align(@alignOf(isize)) u8 = @alignCast(@alignOf(isize), raw);
var as_isize_array = @ptrCast([*]isize, new_bytes);
as_isize_array[0] = REFCOUNT_ONE_ISIZE;
var as_u8_array = @ptrCast([*]u8, new_bytes);
const first_slot = as_u8_array + first_slot_offset;
return first_slot;
},
else => {
// const stdout = std.io.getStdOut().writer();
// stdout.print("alignment: {d}", .{alignment}) catch unreachable;
// @panic("allocateWithRefcount with invalid alignment");
unreachable;
},
}
return data_ptr;
}
pub const CSlice = extern struct {

View file

@ -7,6 +7,9 @@ use std::path::Path;
use std::process::Command;
use std::str;
/// To debug the zig code with debug prints, we need to disable the wasm code gen
const DEBUG: bool = false;
fn zig_executable() -> String {
match std::env::var("ROC_ZIG") {
Ok(path) => path,
@ -27,24 +30,32 @@ fn main() {
}
// "." is relative to where "build.rs" is
let build_script_dir_path = fs::canonicalize(Path::new(".")).unwrap();
// dunce can be removed once ziglang/zig#5109 is fixed
let build_script_dir_path = dunce::canonicalize(Path::new(".")).unwrap();
let bitcode_path = build_script_dir_path.join("bitcode");
// LLVM .bc FILES
generate_bc_file(&bitcode_path, "ir", "builtins-host");
generate_bc_file(&bitcode_path, "ir-wasm32", "builtins-wasm32");
if !DEBUG {
generate_bc_file(&bitcode_path, "ir-wasm32", "builtins-wasm32");
}
generate_bc_file(&bitcode_path, "ir-i386", "builtins-i386");
// OBJECT FILES
#[cfg(windows)]
const BUILTINS_HOST_FILE: &str = "builtins-host.obj";
#[cfg(not(windows))]
const BUILTINS_HOST_FILE: &str = "builtins-host.o";
generate_object_file(
&bitcode_path,
"BUILTINS_HOST_O",
"object",
"builtins-host.o",
BUILTINS_HOST_FILE,
);
generate_object_file(
@ -84,16 +95,18 @@ fn generate_object_file(
println!("Compiling zig object `{}` to: {}", zig_object, src_obj);
run_command(
&bitcode_path,
&zig_executable(),
&["build", zig_object, "-Drelease=true"],
);
if !DEBUG {
run_command(
&bitcode_path,
&zig_executable(),
&["build", zig_object, "-Drelease=true"],
);
println!("Moving zig object `{}` to: {}", zig_object, dest_obj);
println!("Moving zig object `{}` to: {}", zig_object, dest_obj);
// we store this .o file in rust's `target` folder
run_command(&bitcode_path, "mv", &[src_obj, dest_obj]);
// we store this .o file in rust's `target` folder (for wasm we need to leave a copy here too)
fs::copy(src_obj, dest_obj).expect("Failed to copy object file.");
}
}
fn generate_bc_file(bitcode_path: &Path, zig_object: &str, file_name: &str) {

View file

@ -18,10 +18,118 @@ interface Dict
]
imports []
## A [dictionary](https://en.wikipedia.org/wiki/Associative_array) that lets you can associate keys with values.
##
## ### Inserting
##
## The most basic way to use a dictionary is to start with an empty one and then:
## 1. Call [Dict.insert] passing a key and a value, to associate that key with that value in the dictionary.
## 2. Later, call [Dict.get] passing the same key as before, and it will return the value you stored.
##
## Here's an example of a dictionary which uses a city's name as the key, and its population as the associated value.
##
## populationByCity =
## Dict.empty
## |> Dict.insert "London" 8_961_989
## |> Dict.insert "Philadelphia" 1_603_797
## |> Dict.insert "Shanghai" 24_870_895
## |> Dict.insert "Delhi" 16_787_941
## |> Dict.insert "Amsterdam" 872_680
##
## ### Converting to a [List]
##
## We can call [Dict.toList] on `populationByCity` to turn it into a list of key-value pairs:
##
## Dict.toList populationByCity == [
## { k: "London", v: 8961989 },
## { k: "Philadelphia", v: 1603797 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## { k: "Amsterdam", v: 872680 },
## ]
##
## We can use the similar [Dict.keyList] and [Dict.values] functions to get only the keys or only the values,
## instead of getting these `{ k, v }` records that contain both.
##
## You may notice that these lists have the same order as the original insertion order. This will be true if
## all you ever do is [insert] and [get] operations on the dictionary, but [remove] operations can change this order.
## Let's see how that looks.
##
## ### Removing
##
## We can remove an element from the dictionary, like so:
##
## populationByCity
## |> Dict.remove "Philadelphia"
## |> Dict.toList
## ==
## [
## { k: "London", v: 8961989 },
## { k: "Amsterdam", v: 872680 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## ]
##
## Notice that the order changed! Philadelphia has been not only removed from the list, but Amsterdam - the last
## entry we inserted - has been moved into the spot where Philadelphia was previously. This is exactly what
## [Dict.remove] does: it removes an element and moves the most recent insertion into the vacated spot.
##
## This move is done as a performance optimization, and it lets [remove] have
## [constant time complexity](https://en.wikipedia.org/wiki/Time_complexity#Constant_time). If you need a removal
## operation which preserves ordering, [Dict.removeShift] will remove the element and then shift everything after it
## over one spot. Be aware that this shifting requires copying every single entry after the removed element, though,
## so it can be massively more costly than [remove]! This makes [remove] the recommended default choice;
## [removeShift] should only be used if maintaining original insertion order is absolutely necessary.
##
##
## ### Removing
##
## ### Equality
##
## When comparing two dictionaries for equality, they are `==` only if their both their contents and their
## orderings match. This preserves the property that if `dict1 == dict2`, you should be able to rely on
## `fn dict1 == fn dict2` also being `True`, even if `fn` relies on the dictionary's ordering (for example, if
## `fn` is `Dict.toList` or calls it internally.)
##
## The [Dict.hasSameContents] function gives an alternative to `==` which ignores ordering
## and returns `True` if both dictionaries have the same keys and associated values.
Dict k v : [ @Dict k v ] # TODO k should require a hashing and equating constraint
## An empty dictionary.
empty : Dict * *
size : Dict * * -> Nat
isEmpty : Dict * * -> Bool
## Returns a [List] of the dictionary's key/value pairs.
##
## See [walk] to walk over the key/value pairs without creating an intermediate data structure.
toList : Dict k v -> List { k, v }
## Returns a [List] of the dictionary's keys.
##
## See [keySet] to get a [Set] of keys instead, or [walkKeys] to walk over the keys without creating
## an intermediate data structure.
keyList : Dict key * -> List key
## Returns a [Set] of the dictionary's keys.
##
## See [keyList] to get a [List] of keys instead, or [walkKeys] to walk over the keys without creating
## an intermediate data structure.
keySet : Dict key * -> Set key
## Returns a [List] of the dictionary's values.
##
## See [walkValues] to walk over the values without creating an intermediate data structure.
values : Dict * value -> List value
walk : Dict k v, state, (state, k, v -> state) -> state
walkKeys : Dict key *, state, (state, key -> state) -> state
walkValues : Dict * value, state, (state, value -> state) -> state
## Convert each key and value in the #Dict to something new, by calling a conversion
## function on each of them. Then return a new #Map of the converted keys and values.
##
@ -32,9 +140,9 @@ isEmpty : Dict * * -> Bool
## `map` functions like this are common in Roc, and they all work similarly.
## See for example [List.map], [Result.map], and `Set.map`.
map :
Dict beforeKey beforeValue,
({ key: beforeKey, value: beforeValue } -> { key: afterKey, value: afterValue })
-> Dict afterKey afterValue
Dict beforeKey beforeVal,
({ k: beforeKey, v: beforeVal } -> { k: afterKey, v: afterVal })
-> Dict afterKey afterVal
# DESIGN NOTES: The reason for panicking when given NaN is that:
# * If we allowed NaN in, Dict.insert would no longer be idempotent.
@ -47,3 +155,56 @@ map :
## defined to be unequal to *NaN*, inserting a *NaN* key results in an entry
## that can never be retrieved or removed from the [Dict].
insert : Dict key val, key, val -> Dict key val
## Removes a key from the dictionary in [constant time](https://en.wikipedia.org/wiki/Time_complexity#Constant_time), without preserving insertion order.
##
## Since the internal [List] which determines the order of operations like [toList] and [walk] cannot have gaps in it,
## whenever an element is removed from the middle of that list, something must be done to eliminate the resulting gap.
##
## * [removeShift] eliminates the gap by shifting over every element after the removed one. This takes [linear time](https://en.wikipedia.org/wiki/Time_complexity#Linear_time),
## and preserves the original ordering.
## * [remove] eliminates the gap by replacing the removed element with the one at the end of the list - that is, the most recent insertion. This takes [constant time](https://en.wikipedia.org/wiki/Time_complexity#Constant_time), but does not preserve the original ordering.
##
## For example, suppose we have a `populationByCity` with these contents:
##
## Dict.toList populationByCity == [
## { k: "London", v: 8961989 },
## { k: "Philadelphia", v: 1603797 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## { k: "Amsterdam", v: 872680 },
## ]
##
## Using `Dict.remove "Philadelphia"` on this will replace the `"Philadelphia"` entry with the most recent insertion,
## which is `"Amsterdam"` in this case.
##
## populationByCity
## |> Dict.remove "Philadelphia"
## |> Dict.toList
## ==
## [
## { k: "London", v: 8961989 },
## { k: "Amsterdam", v: 872680 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## ]
##
## Both [remove] and [removeShift] leave the dictionary with the same contents; they only differ in ordering and in
## performance. Since ordering only affects operations like [toList] and [walk], [remove] is the better default
## choice because it has much better performance characteristics; [removeShift] should only be used when it's
## absolutely necessary for operations like [toList] and [walk] to preserve the exact original insertion order.
remove : Dict k v, k -> Dict k v
## Removes a key from the dictionary in [linear time](https://en.wikipedia.org/wiki/Time_complexity#Linear_time), while preserving insertion order.
##
## It's better to use [remove] than this by default, since [remove] has [constant time complexity](https://en.wikipedia.org/wiki/Time_complexity#Constant_time),
## which commonly leads [removeShift] to take many times as long to run as [remove] does. However, [remove] does not
## preserve insertion order, so the slower [removeShift] exists only for use cases where it's abolutely necessary for
## ordering-sensitive functions like [toList] and [walk] to preserve the exact original insertion order.
##
## See the [remove] documentation for more details about the differences between [remove] and [removeShift].
removeShift : Dict k v, k -> Dict k v
## Returns whether both dictionaries have the same keys, and the same values associated with those keys.
## This is different from `==` in that it disregards the ordering of the keys and values.
hasSameContents : Dict k v, Dict k v -> Bool

View file

@ -100,6 +100,28 @@ interface Num
subWrap,
sqrt,
tan,
toI8,
toI8Checked,
toI16,
toI16Checked,
toI32,
toI32Checked,
toI64,
toI64Checked,
toI128,
toI128Checked,
toU8,
toU8Checked,
toU16,
toU16Checked,
toU32,
toU32Checked,
toU64,
toU64Checked,
toU128,
toU128Checked,
toNat,
toNatChecked,
toFloat,
toStr
]
@ -592,6 +614,35 @@ mulCheckOverflow : Num a, Num a -> Result (Num a) [ Overflow ]*
## Convert
## Convert any [Int] to a specifically-sized [Int], without checking validity.
## These are unchecked bitwise operations,
## so if the source number is outside the target range, then these will
## effectively modulo-wrap around the target range to reach a valid value.
toI8 : Int * -> I8
toI16 : Int * -> I16
toI32 : Int * -> I32
toI64 : Int * -> I64
toI128 : Int * -> I128
toU8 : Int * -> U8
toU16 : Int * -> U16
toU32 : Int * -> U32
toU64 : Int * -> U64
toU128 : Int * -> U128
## Convert any [Int] to a specifically-sized [Int], after checking validity.
## These are checked bitwise operations,
## so if the source number is outside the target range, then these will
## return `Err OutOfBounds`.
toI8Checked : Int * -> Result I8 [ OutOfBounds ]*
toI16Checked : Int * -> Result I16 [ OutOfBounds ]*
toI32Checked : Int * -> Result I32 [ OutOfBounds ]*
toI64Checked : Int * -> Result I64 [ OutOfBounds ]*
toI128Checked : Int * -> Result I128 [ OutOfBounds ]*
toU8Checked : Int * -> Result U8 [ OutOfBounds ]*
toU16Checked : Int * -> Result U16 [ OutOfBounds ]*
toU32Checked : Int * -> Result U32 [ OutOfBounds ]*
toU64Checked : Int * -> Result U64 [ OutOfBounds ]*
toU128Checked : Int * -> Result U128 [ OutOfBounds ]*
## Convert a number to a [Str].
##
## This is the same as calling `Num.format {}` - so for more details on

View file

@ -392,27 +392,19 @@ toUtf32Le : Str -> List U8
# Parsing
## If the string begins with a valid [extended grapheme cluster](http://www.unicode.org/glossary/#extended_grapheme_cluster),
## return it along with the rest of the string after that grapheme.
## If the bytes begin with a valid [extended grapheme cluster](http://www.unicode.org/glossary/#extended_grapheme_cluster)
## encoded as [UTF-8](https://en.wikipedia.org/wiki/UTF-8), return it along with the number of bytes it took up.
##
## If the string does not begin with a full grapheme, for example because it was
## empty, return `Err`.
parseGrapheme : Str -> Result { val : Str, rest : Str } [ Expected [ Grapheme ]* Str ]*
## If the bytes do not begin with a valid grapheme, for example because the list was
## empty or began with an invalid grapheme, return `Err`.
parseUtf8Grapheme : List U8 -> Result { grapheme : Str, bytesParsed: Nat } [ InvalidGrapheme ]*
## If the string begins with a valid [Unicode code point](http://www.unicode.org/glossary/#code_point),
## return it along with the rest of the string after that code point.
## If the bytes begin with a valid [Unicode code point](http://www.unicode.org/glossary/#code_point)
## encoded as [UTF-8](https://en.wikipedia.org/wiki/UTF-8), return it along with the number of bytes it took up.
##
## If the string does not begin with a valid code point, for example because it was
## empty, return `Err`.
parseCodePt : Str -> Result { val : U32, rest : Str } [ Expected [ CodePt ]* Str ]*
## If the first string begins with the second, return whatever comes
## after the second.
chomp : Str, Str -> Result Str [ Expected [ ExactStr Str ]* Str ]*
## If the string begins with a [Unicode code point](http://www.unicode.org/glossary/#code_point)
## equal to the given [U32], return whatever comes after that code point.
chompCodePt : Str, U32 -> Result Str [ Expected [ ExactCodePt U32 ]* Str ]*
## If the string does not begin with a valid code point, for example because the list was
## empty or began with an invalid code point, return an `Err`.
parseUtf8CodePt : List U8 -> Result { codePt : U32, bytesParsed: Nat } [ InvalidCodePt ]*
## If the string represents a valid [U8] number, return that number.
##

View file

@ -0,0 +1,14 @@
interface Bool
exposes [ Bool, and, or, not, isEq, isNotEq ]
imports [ ]
Bool : [ True, False ]
and : Bool, Bool -> Bool
or : Bool, Bool -> Bool
# xor : Bool, Bool -> Bool # currently unimplemented
not : Bool -> Bool
isEq : a, a -> Bool
isNotEq : a, a -> Bool

View file

@ -0,0 +1,15 @@
interface Box
exposes [ box, unbox ]
imports [ ]
box : a -> Box a
unbox : Box a -> a
# # we'd need reset/reuse for box for this to be efficient
# # that is currently not implemented
# map : Box a, (a -> b) -> Box b
# map = \boxed, transform =
# boxed
# |> Box.unbox
# |> transform
# |> Box.box

View file

@ -0,0 +1,32 @@
interface Dict
exposes
[
empty,
single,
get,
walk,
insert,
len,
remove,
contains,
keys,
values,
union,
intersection,
difference,
]
imports [ ]
empty : Dict k v
single : k, v -> Dict k v
get : Dict k v, k -> Result v [ KeyNotFound ]*
walk : Dict k v, state, (state, k, v -> state) -> state
insert : Dict k v, k, v -> Dict k v
len : Dict k v -> Nat
remove : Dict k v, k -> Dict k v
contains : Dict k v, k -> Bool
keys : Dict k v -> List k
values : Dict k v -> List v
union : Dict k v, Dict k v -> Dict k v
intersection : Dict k v, Dict k v -> Dict k v
difference : Dict k v, Dict k v -> Dict k v

View file

@ -0,0 +1,121 @@
isEmpty,
get,
set,
replace,
append,
map,
len,
walkBackwards,
concat,
first,
single,
repeat,
reverse,
prepend,
join,
keepIf,
contains,
sum,
walk,
last,
keepOks,
keepErrs,
mapWithIndex,
map2,
map3,
product,
walkUntil,
range,
sortWith,
drop,
swap,
dropAt,
dropLast,
min,
max,
map4,
dropFirst,
joinMap,
any,
takeFirst,
takeLast,
find,
sublist,
intersperse,
split,
all,
dropIf,
sortAsc,
sortDesc,
isEmpty : List a -> Bool
isEmpty = \list ->
List.len list == 0
get : List a, Nat -> Result a [ OutOfBounds ]*
set : List a, Nat, a -> List a
replace : List a, Nat, a -> { list : List a, value : a }
append : List a, a -> List a
prepend : List a, a -> List a
len : List a -> Nat
concat : List a, List a -> List a
last : List a -> Result a [ ListWasEmpty ]*
single : a -> List a
repeat : a, Nat -> List a
reverse : List a -> List a
join : List (List a) -> List a
contains : List a, a -> Bool
walk : List elem, state, (state, elem -> state) -> state
walkBackwards : List elem, state, (state, elem -> state) -> state
walkUntil : List elem, state, (state, elem -> [ Continue state, Stop state ]) -> state
sum : List (Num a) -> Num a
sum = \list ->
List.walk list 0 Num.add
product : List (Num a) -> Num a
product = \list ->
List.walk list 1 Num.mul
any : List a, (a -> Bool) -> Bool
all : List a, (a -> Bool) -> Bool
keepIf : List a, (a -> Bool) -> List a
dropIf : List a, (a -> Bool) -> List a
keepOks : List before, (before -> Result after *) -> List after
keepErrs: List before, (before -> Result * after) -> List after
map : List a, (a -> b) -> List b
map2 : List a, List b, (a, b -> c) -> List c
map3 : List a, List b, List c, (a, b, c -> d) -> List d
map4 : List a, List b, List c, List d, (a, b, c, d -> e) -> List e
mapWithIndex : List a, (a -> b) -> List b
range : Int a, Int a -> List (Int a)
sortWith : List a, (a, a -> [ LT, EQ, GT ] ) -> List a
sortAsc : List (Num a) -> List (Num a)
sortAsc = \list -> List.sortWith list Num.compare
sortDesc : List (Num a) -> List (Num a)
sortDesc = \list -> List.sortWith list (\a, b -> Num.compare b a)
swap : List a, Nat, Nat -> List a
first : List a -> Result a [ ListWasEmpty ]*
dropFirst : List elem -> List elem
dropLast : List elem -> List elem
takeFirst : List elem, Nat -> List elem
takeLast : List elem, Nat -> List elem
drop : List elem, Nat -> List elem
dropAt : List elem, Nat -> List elem
min : List (Num a) -> Result (Num a) [ ListWasEmpty ]*
max : List (Num a) -> Result (Num a) [ ListWasEmpty ]*
joinMap : List a, (a -> List b) -> List b
find : List elem, (elem -> Bool) -> Result elem [ NotFound ]*
sublist : List elem, { start : Nat, len : Nat } -> List elem
intersperse : List elem, elem -> List elem
split : List elem, Nat -> { before: List elem, others: List elem }

View file

@ -0,0 +1,348 @@
interface Num
exposes
[
Num,
Int,
Float,
Integer,
FloatingPoint,
I128,
I64,
I32,
I16,
I8,
U128,
U64,
U32,
U16,
U8,
Signed128,
Signed64,
Signed32,
Signed16,
Signed8,
Unsigned128,
Unsigned64,
Unsigned32,
Unsigned16,
Unsigned8,
Nat,
Dec,
F32,
F64,
Natural,
Decimal,
Binary32,
Binary64,
maxFloat,
minFloat,
abs,
neg,
add,
sub,
mul,
isLt,
isLte,
isGt,
isGte,
sin,
cos,
tan,
atan,
acos,
asin,
isZero,
isEven,
isOdd,
toFloat,
isPositive,
isNegative,
rem,
div,
modInt,
modFloat,
sqrt,
log,
round,
ceiling,
floor,
compare,
pow,
powInt,
addWrap,
addChecked,
addSaturated,
bitwiseAnd,
bitwiseXor,
bitwiseOr,
shiftLeftBy,
shiftRightBy,
shiftRightZfBy,
subWrap,
subChecked,
subSaturated,
mulWrap,
mulChecked,
intCast,
bytesToU16,
bytesToU32,
divCeil,
divFloor,
toStr,
isMultipleOf,
minI8,
maxI8,
minU8,
maxU8,
minI16,
maxI16,
minU16,
maxU16,
minI32,
maxI32,
minU32,
maxU32,
minI64,
maxI64,
minU64,
maxU64,
minI128,
maxI128,
minU128,
maxU128,
toI8,
toI8Checked,
toI16,
toI16Checked,
toI32,
toI32Checked,
toI64,
toI64Checked,
toI128,
toI128Checked,
toU8,
toU8Checked,
toU16,
toU16Checked,
toU32,
toU32Checked,
toU64,
toU64Checked,
toU128,
toU128Checked,
]
imports [ ]
Num range : [ @Num range ]
Int range : Num (Integer range)
Float range : Num (FloatingPoint range)
Signed128 : [ @Signed128 ]
Signed64 : [ @Signed64 ]
Signed32 : [ @Signed32 ]
Signed16 : [ @Signed16 ]
Signed8 : [ @Signed8 ]
Unsigned128 : [ @Unsigned128 ]
Unsigned64 : [ @Unsigned64 ]
Unsigned32 : [ @Unsigned32 ]
Unsigned16 : [ @Unsigned16 ]
Unsigned8 : [ @Unsigned8 ]
Natural : [ @Natural ]
Integer range : [ @Integer range ]
I128 : Num (Integer Signed128)
I64 : Num (Integer Signed64)
I32 : Num (Integer Signed32)
I16 : Num (Integer Signed16)
I8 : Int Signed8
U128 : Num (Integer Unsigned128)
U64 : Num (Integer Unsigned64)
U32 : Num (Integer Unsigned32)
U16 : Num (Integer Unsigned16)
U8 : Num (Integer Unsigned8)
Nat : Num (Integer Natural)
Decimal : [ @Decimal ]
Binary64 : [ @Binary64 ]
Binary32 : [ @Binary32 ]
FloatingPoint range : [ @FloatingPoint range ]
F64 : Num (FloatingPoint Binary64)
F32 : Num (FloatingPoint Binary32)
Dec : Num (FloatingPoint Decimal)
# ------- Functions
toStr : Num * -> Str
intCast : Int a -> Int b
bytesToU16 : List U8, Nat -> Result U16 [ OutOfBounds ]
bytesToU32 : List U8, Nat -> Result U32 [ OutOfBounds ]
compare : Num a, Num a -> [ LT, EQ, GT ]
isLt : Num a, Num a -> Bool
isGt : Num a, Num a -> Bool
isLte : Num a, Num a -> Bool
isGte : Num a, Num a -> Bool
isZero : Num a -> Bool
isEven : Int a -> Bool
isOdd : Int a -> Bool
isPositive : Num a -> Bool
isNegative : Num a -> Bool
toFloat : Num * -> Float *
abs : Num a -> Num a
neg : Num a -> Num a
add : Num a, Num a -> Num a
sub : Num a, Num a -> Num a
mul : Num a, Num a -> Num a
sin : Float a -> Float a
cos : Float a -> Float a
tan : Float a -> Float a
asin : Float a -> Float a
acos : Float a -> Float a
atan : Float a -> Float a
sqrt : Float a -> Result (Float a) [ SqrtOfNegative ]*
log : Float a -> Result (Float a) [ LogNeedsPositive ]*
div : Float a, Float a -> Result (Float a) [ DivByZero ]*
divCeil: Int a, Int a -> Result (Int a) [ DivByZero ]*
divFloor: Int a, Int a -> Result (Int a) [ DivByZero ]*
# mod : Float a, Float a -> Result (Float a) [ DivByZero ]*
rem : Int a, Int a -> Result (Int a) [ DivByZero ]*
# mod : Int a, Int a -> Result (Int a) [ DivByZero ]*
isMultipleOf : Int a, Int a -> Bool
bitwiseAnd : Int a, Int a -> Int a
bitwiseXor : Int a, Int a -> Int a
bitwiseOr : Int a, Int a -> Int a
shiftLeftBy : Int a, Int a -> Int a
shiftRightBy : Int a, Int a -> Int a
shiftRightZfBy : Int a, Int a -> Int a
round : Float * -> Int *
floor : Float * -> Int *
ceiling : Float * -> Int *
pow : Float a, Float a -> Float a
powInt : Int a, Int a -> Int a
addWrap : Int range, Int range -> Int range
addSaturated : Num a, Num a -> Num a
addChecked : Num a, Num a -> Result (Num a) [ Overflow ]*
subWrap : Int range, Int range -> Int range
subSaturated : Num a, Num a -> Num a
subChecked : Num a, Num a -> Result (Num a) [ Overflow ]*
mulWrap : Int range, Int range -> Int range
# mulSaturated : Num a, Num a -> Num a
mulChecked : Num a, Num a -> Result (Num a) [ Overflow ]*
minI8 : I8
minI8 = -128i8
maxI8 : I8
maxI8 = 127i8
minU8 : U8
minU8 = 0u8
maxU8 : U8
maxU8 = 255u8
minI16 : I16
minI16 = -32768i16
maxI16 : I16
maxI16 = 32767i16
minU16 : U16
minU16 = 0u16
maxU16 : U16
maxU16 = 65535u16
minI32 : I32
minI32 = -2147483648
maxI32 : I32
maxI32 = 2147483647
minU32 : U32
minU32 = 0
maxU32 : U32
maxU32 = 4294967295
minI64 : I64
minI64 = -9223372036854775808
maxI64 : I64
maxI64 = 9223372036854775807
minU64 : U64
minU64 = 0
maxU64 : U64
maxU64 = 18446744073709551615
minI128 : I128
minI128 = -170141183460469231731687303715884105728
maxI128 : I128
maxI128 = 170141183460469231731687303715884105727
minU128 : U128
minU128 = 0
maxU128 : U128
maxU128 = 0340282366920938463463374607431768211455
toI8 : Int * -> I8
toI16 : Int * -> I16
toI32 : Int * -> I32
toI64 : Int * -> I64
toI128 : Int * -> I128
toU8 : Int * -> U8
toU16 : Int * -> U16
toU32 : Int * -> U32
toU64 : Int * -> U64
toU128 : Int * -> U128
toI8Checked : Int * -> Result I8 [ OutOfBounds ]*
toI16Checked : Int * -> Result I16 [ OutOfBounds ]*
toI32Checked : Int * -> Result I32 [ OutOfBounds ]*
toI64Checked : Int * -> Result I64 [ OutOfBounds ]*
toI128Checked : Int * -> Result I128 [ OutOfBounds ]*
toU8Checked : Int * -> Result U8 [ OutOfBounds ]*
toU16Checked : Int * -> Result U16 [ OutOfBounds ]*
toU32Checked : Int * -> Result U32 [ OutOfBounds ]*
toU64Checked : Int * -> Result U64 [ OutOfBounds ]*
toU128Checked : Int * -> Result U128 [ OutOfBounds ]*

View file

@ -0,0 +1,41 @@
interface Result
exposes [ Result, isOk, isErr, map, mapErr, after, withDefault ]
imports [ ]
Result ok err : [ Ok ok, Err err ]
isOk : Result ok err -> Bool
isOk = \result ->
when result is
Ok _ -> True
Err _ -> False
isErr : Result ok err -> Bool
isErr = \result ->
when result is
Ok _ -> False
Err _ -> True
withDefault : Result ok err, ok -> ok
withDefault = \result, default ->
when result is
Ok value -> value
Err _ -> default
map : Result a err, (a -> b) -> Result b err
map = \result, transform ->
when result is
Ok v -> Ok (transform v)
Err e -> Err e
mapErr : Result ok a, (a -> b) -> Result ok b
mapErr = \result, transform ->
when result is
Ok v -> Ok v
Err e -> Err (transform e)
after : Result a err, (a -> Result b err) -> Result b err
after = \result, transform ->
when result is
Ok v -> transform v
Err e -> Err e

View file

@ -0,0 +1,38 @@
interface Dict
exposes
[
empty,
single,
walk,
insert,
len,
remove,
contains,
toList,
fromList,
union,
intersection,
difference,
]
imports [ ]
empty : Set k
single : k -> Set k
insert : Set k, k -> Set k
len : Set k -> Nat
remove : Set k, k -> Set k
contains : Set k, k -> Bool
# toList = \set -> Dict.keys (toDict set)
toList : Set k -> List k
fromList : List k -> Set k
union : Set k, Set k -> Set k
intersection : Set k, Set k -> Set k
difference : Set k, Set k -> Set k
toDict : Set k -> Dict k {}
walk : Set k, state, (state, k -> state) -> state
walk = \set, state, step ->
Dict.walk (toDict set) state (\s, k, _ -> step s k)

View file

@ -0,0 +1,90 @@
interface Str
exposes
[
concat,
Utf8Problem,
Utf8ByteProblem,
isEmpty,
joinWith,
split,
repeat,
countGraphemes,
startsWithCodePt,
toUtf8,
fromUtf8,
fromUtf8Range,
startsWith,
endsWith,
trim,
trimLeft,
trimRight,
toDec,
toF64,
toF32,
toNat,
toU128,
toI128,
toU64,
toI64,
toU32,
toI32,
toU16,
toI16,
toU8,
toI8,
]
imports [ ]
Utf8ByteProblem :
[
InvalidStartByte,
UnexpectedEndOfSequence,
ExpectedContinuation,
OverlongEncoding,
CodepointTooLarge,
EncodesSurrogateHalf,
]
Utf8Problem : { byteIndex : Nat, problem : Utf8ByteProblem }
isEmpty : Str -> Bool
concat : Str, Str -> Str
joinWith : List Str, Str -> Str
split : Str, Str -> List Str
repeat : Str, Nat -> Str
countGraphemes : Str -> Nat
startsWithCodePt : Str, U32 -> Bool
toUtf8 : Str -> List U8
# fromUtf8 : List U8 -> Result Str [ BadUtf8 Utf8Problem ]*
# fromUtf8Range : List U8 -> Result Str [ BadUtf8 Utf8Problem Nat, OutOfBounds ]*
fromUtf8 : List U8 -> Result Str [ BadUtf8 Utf8ByteProblem Nat ]*
fromUtf8Range : List U8, { start : Nat, count : Nat } -> Result Str [ BadUtf8 Utf8ByteProblem Nat, OutOfBounds ]*
startsWith : Str, Str -> Bool
endsWith : Str, Str -> Bool
trim : Str -> Str
trimLeft : Str -> Str
trimRight : Str -> Str
toDec : Str -> Result Dec [ InvalidNumStr ]*
toF64 : Str -> Result F64 [ InvalidNumStr ]*
toF32 : Str -> Result F32 [ InvalidNumStr ]*
toNat : Str -> Result Nat [ InvalidNumStr ]*
toU128 : Str -> Result U128 [ InvalidNumStr ]*
toI128 : Str -> Result I128 [ InvalidNumStr ]*
toU64 : Str -> Result U64 [ InvalidNumStr ]*
toI64 : Str -> Result I64 [ InvalidNumStr ]*
toU32 : Str -> Result U32 [ InvalidNumStr ]*
toI32 : Str -> Result I32 [ InvalidNumStr ]*
toU16 : Str -> Result U16 [ InvalidNumStr ]*
toI16 : Str -> Result I16 [ InvalidNumStr ]*
toU8 : Str -> Result U8 [ InvalidNumStr ]*
toI8 : Str -> Result I8 [ InvalidNumStr ]*

View file

@ -12,7 +12,7 @@ pub const BUILTINS_WASM32_OBJ_PATH: &str = env!(
"Env var BUILTINS_WASM32_O not found. Is there a problem with the build script?"
);
#[derive(Debug, Default)]
#[derive(Debug, Default, Copy, Clone)]
pub struct IntrinsicName {
pub options: [&'static str; 14],
}
@ -159,6 +159,21 @@ impl IntWidth {
_ => None,
}
}
pub const fn type_name(&self) -> &'static str {
match self {
Self::I8 => "i8",
Self::I16 => "i16",
Self::I32 => "i32",
Self::I64 => "i64",
Self::I128 => "i128",
Self::U8 => "u8",
Self::U16 => "u16",
Self::U32 => "u32",
Self::U64 => "u64",
Self::U128 => "u128",
}
}
}
impl Index<DecWidth> for IntrinsicName {
@ -214,11 +229,12 @@ macro_rules! float_intrinsic {
}
#[macro_export]
macro_rules! int_intrinsic {
macro_rules! llvm_int_intrinsic {
($signed_name:literal, $unsigned_name:literal) => {{
let mut output = IntrinsicName::default();
// The indeces align with the `Index` impl for `IntrinsicName`.
// LLVM uses the same types for both signed and unsigned integers.
output.options[4] = concat!($unsigned_name, ".i8");
output.options[5] = concat!($unsigned_name, ".i16");
output.options[6] = concat!($unsigned_name, ".i32");
@ -239,6 +255,28 @@ macro_rules! int_intrinsic {
};
}
#[macro_export]
macro_rules! int_intrinsic {
($name:expr) => {{
let mut output = IntrinsicName::default();
// The indices align with the `Index` impl for `IntrinsicName`.
output.options[4] = concat!($name, ".u8");
output.options[5] = concat!($name, ".u16");
output.options[6] = concat!($name, ".u32");
output.options[7] = concat!($name, ".u64");
output.options[8] = concat!($name, ".u128");
output.options[9] = concat!($name, ".i8");
output.options[10] = concat!($name, ".i16");
output.options[11] = concat!($name, ".i32");
output.options[12] = concat!($name, ".i64");
output.options[13] = concat!($name, ".i128");
output
}};
}
pub const NUM_ASIN: IntrinsicName = float_intrinsic!("roc_builtins.num.asin");
pub const NUM_ACOS: IntrinsicName = float_intrinsic!("roc_builtins.num.acos");
pub const NUM_ATAN: IntrinsicName = float_intrinsic!("roc_builtins.num.atan");
@ -316,8 +354,8 @@ pub const LIST_RANGE: &str = "roc_builtins.list.range";
pub const LIST_REVERSE: &str = "roc_builtins.list.reverse";
pub const LIST_SORT_WITH: &str = "roc_builtins.list.sort_with";
pub const LIST_CONCAT: &str = "roc_builtins.list.concat";
pub const LIST_SET: &str = "roc_builtins.list.set";
pub const LIST_SET_IN_PLACE: &str = "roc_builtins.list.set_in_place";
pub const LIST_REPLACE: &str = "roc_builtins.list.replace";
pub const LIST_REPLACE_IN_PLACE: &str = "roc_builtins.list.replace_in_place";
pub const LIST_ANY: &str = "roc_builtins.list.any";
pub const LIST_ALL: &str = "roc_builtins.list.all";
pub const LIST_FIND_UNSAFE: &str = "roc_builtins.list.find_unsafe";
@ -333,9 +371,57 @@ pub const DEC_MUL_WITH_OVERFLOW: &str = "roc_builtins.dec.mul_with_overflow";
pub const DEC_DIV: &str = "roc_builtins.dec.div";
pub const UTILS_TEST_PANIC: &str = "roc_builtins.utils.test_panic";
pub const UTILS_ALLOCATE_WITH_REFCOUNT: &str = "roc_builtins.utils.allocate_with_refcount";
pub const UTILS_INCREF: &str = "roc_builtins.utils.incref";
pub const UTILS_DECREF: &str = "roc_builtins.utils.decref";
pub const UTILS_DECREF_CHECK_NULL: &str = "roc_builtins.utils.decref_check_null";
pub const UTILS_EXPECT_FAILED: &str = "roc_builtins.expect.expect_failed";
pub const UTILS_GET_EXPECT_FAILURES: &str = "roc_builtins.expect.get_expect_failures";
pub const UTILS_DEINIT_FAILURES: &str = "roc_builtins.expect.deinit_failures";
#[derive(Debug, Default)]
pub struct IntToIntrinsicName {
pub options: [IntrinsicName; 10],
}
impl IntToIntrinsicName {
pub const fn default() -> Self {
Self {
options: [IntrinsicName::default(); 10],
}
}
}
impl Index<IntWidth> for IntToIntrinsicName {
type Output = IntrinsicName;
fn index(&self, index: IntWidth) -> &Self::Output {
&self.options[index as usize]
}
}
#[macro_export]
macro_rules! int_to_int_intrinsic {
($name_prefix:literal, $name_suffix:literal) => {{
let mut output = IntToIntrinsicName::default();
output.options[0] = int_intrinsic!(concat!($name_prefix, "u8", $name_suffix));
output.options[1] = int_intrinsic!(concat!($name_prefix, "u16", $name_suffix));
output.options[2] = int_intrinsic!(concat!($name_prefix, "u32", $name_suffix));
output.options[3] = int_intrinsic!(concat!($name_prefix, "u64", $name_suffix));
output.options[4] = int_intrinsic!(concat!($name_prefix, "u128", $name_suffix));
output.options[5] = int_intrinsic!(concat!($name_prefix, "i8", $name_suffix));
output.options[6] = int_intrinsic!(concat!($name_prefix, "i16", $name_suffix));
output.options[7] = int_intrinsic!(concat!($name_prefix, "i32", $name_suffix));
output.options[8] = int_intrinsic!(concat!($name_prefix, "i64", $name_suffix));
output.options[9] = int_intrinsic!(concat!($name_prefix, "i128", $name_suffix));
output
}};
}
pub const NUM_INT_TO_INT_CHECKING_MAX: IntToIntrinsicName =
int_to_int_intrinsic!("roc_builtins.num.int_to_", "_checking_max");
pub const NUM_INT_TO_INT_CHECKING_MAX_AND_MIN: IntToIntrinsicName =
int_to_int_intrinsic!("roc_builtins.num.int_to_", "_checking_max_and_min");

View file

@ -2,4 +2,5 @@
// See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
#![allow(clippy::large_enum_variant)]
pub mod bitcode;
pub mod roc;
pub mod std;

View file

@ -0,0 +1,28 @@
use roc_module::symbol::ModuleId;
#[inline(always)]
pub fn module_source(module_id: ModuleId) -> &'static str {
match module_id {
ModuleId::RESULT => RESULT,
ModuleId::NUM => NUM,
ModuleId::STR => STR,
ModuleId::LIST => LIST,
ModuleId::DICT => DICT,
ModuleId::SET => SET,
ModuleId::BOX => BOX,
ModuleId::BOOL => BOOL,
_ => panic!(
"ModuleId {:?} is not part of the standard library",
module_id
),
}
}
const RESULT: &str = include_str!("../roc/Result.roc");
const NUM: &str = include_str!("../roc/Num.roc");
const STR: &str = include_str!("../roc/Str.roc");
const LIST: &str = include_str!("../roc/List.roc");
const DICT: &str = include_str!("../roc/Dict.roc");
const SET: &str = include_str!("../roc/Set.roc");
const BOX: &str = include_str!("../roc/Box.roc");
const BOOL: &str = include_str!("../roc/Bool.roc");

View file

@ -3,16 +3,25 @@ use roc_module::ident::TagName;
use roc_module::symbol::Symbol;
use roc_region::all::Region;
use roc_types::builtin_aliases::{
bool_type, dec_type, dict_type, f32_type, f64_type, float_type, i128_type, i16_type, i32_type,
i64_type, i8_type, int_type, list_type, nat_type, num_type, ordering_type, result_type,
set_type, str_type, str_utf8_byte_problem_type, u128_type, u16_type, u32_type, u64_type,
u8_type,
bool_type, box_type, dec_type, dict_type, f32_type, f64_type, float_type, i128_type, i16_type,
i32_type, i64_type, i8_type, int_type, list_type, nat_type, num_type, ordering_type,
result_type, set_type, str_type, str_utf8_byte_problem_type, u128_type, u16_type, u32_type,
u64_type, u8_type,
};
use roc_types::solved_types::SolvedType;
use roc_types::subs::VarId;
use roc_types::types::RecordField;
use std::collections::HashMap;
lazy_static::lazy_static! {
static ref STDLIB: StdLib = standard_stdlib();
}
/// A global static that stores our initialized standard library definitions
pub fn borrow_stdlib() -> &'static StdLib {
&STDLIB
}
/// Example:
///
/// let_tvars! { a, b, c }
@ -445,6 +454,170 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
// maxI128 : I128
add_type!(Symbol::NUM_MAX_I128, i128_type());
// toI8 : Int * -> I8
add_top_level_function_type!(
Symbol::NUM_TO_I8,
vec![int_type(flex(TVAR1))],
Box::new(i8_type()),
);
let out_of_bounds = SolvedType::TagUnion(
vec![(TagName::Global("OutOfBounds".into()), vec![])],
Box::new(SolvedType::Wildcard),
);
// toI8Checked : Int * -> Result I8 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_I8_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(i8_type(), out_of_bounds.clone())),
);
// toI16 : Int * -> I16
add_top_level_function_type!(
Symbol::NUM_TO_I16,
vec![int_type(flex(TVAR1))],
Box::new(i16_type()),
);
// toI16Checked : Int * -> Result I16 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_I16_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(i16_type(), out_of_bounds.clone())),
);
// toI32 : Int * -> I32
add_top_level_function_type!(
Symbol::NUM_TO_I32,
vec![int_type(flex(TVAR1))],
Box::new(i32_type()),
);
// toI32Checked : Int * -> Result I32 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_I32_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(i32_type(), out_of_bounds.clone())),
);
// toI64 : Int * -> I64
add_top_level_function_type!(
Symbol::NUM_TO_I64,
vec![int_type(flex(TVAR1))],
Box::new(i64_type()),
);
// toI64Checked : Int * -> Result I64 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_I64_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(i64_type(), out_of_bounds.clone())),
);
// toI128 : Int * -> I128
add_top_level_function_type!(
Symbol::NUM_TO_I128,
vec![int_type(flex(TVAR1))],
Box::new(i128_type()),
);
// toI128Checked : Int * -> Result I128 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_I128_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(i128_type(), out_of_bounds)),
);
// toU8 : Int * -> U8
add_top_level_function_type!(
Symbol::NUM_TO_U8,
vec![int_type(flex(TVAR1))],
Box::new(u8_type()),
);
let out_of_bounds = SolvedType::TagUnion(
vec![(TagName::Global("OutOfBounds".into()), vec![])],
Box::new(SolvedType::Wildcard),
);
// toU8Checked : Int * -> Result U8 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_U8_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(u8_type(), out_of_bounds.clone())),
);
// toU16 : Int * -> U16
add_top_level_function_type!(
Symbol::NUM_TO_U16,
vec![int_type(flex(TVAR1))],
Box::new(u16_type()),
);
// toU16Checked : Int * -> Result U16 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_U16_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(u16_type(), out_of_bounds.clone())),
);
// toU32 : Int * -> U32
add_top_level_function_type!(
Symbol::NUM_TO_U32,
vec![int_type(flex(TVAR1))],
Box::new(u32_type()),
);
// toU32Checked : Int * -> Result U32 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_U32_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(u32_type(), out_of_bounds.clone())),
);
// toU64 : Int * -> U64
add_top_level_function_type!(
Symbol::NUM_TO_U64,
vec![int_type(flex(TVAR1))],
Box::new(u64_type()),
);
// toU64Checked : Int * -> Result U64 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_U64_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(u64_type(), out_of_bounds.clone())),
);
// toU128 : Int * -> U128
add_top_level_function_type!(
Symbol::NUM_TO_U128,
vec![int_type(flex(TVAR1))],
Box::new(u128_type()),
);
// toU128Checked : Int * -> Result U128 [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_U128_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(u128_type(), out_of_bounds.clone())),
);
// toNat : Int * -> Nat
add_top_level_function_type!(
Symbol::NUM_TO_NAT,
vec![int_type(flex(TVAR1))],
Box::new(nat_type()),
);
// toNatChecked : Int * -> Result Nat [ OutOfBounds ]*
add_top_level_function_type!(
Symbol::NUM_TO_NAT_CHECKED,
vec![int_type(flex(TVAR1))],
Box::new(result_type(nat_type(), out_of_bounds)),
);
// toStr : Num a -> Str
add_top_level_function_type!(
Symbol::NUM_TO_STR,
@ -906,6 +1079,19 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
Box::new(result_type(flex(TVAR1), list_was_empty.clone())),
);
// replace : List elem, Nat, elem -> { list: List elem, value: elem }
add_top_level_function_type!(
Symbol::LIST_REPLACE,
vec![list_type(flex(TVAR1)), nat_type(), flex(TVAR1)],
Box::new(SolvedType::Record {
fields: vec![
("list".into(), RecordField::Required(list_type(flex(TVAR1)))),
("value".into(), RecordField::Required(flex(TVAR1))),
],
ext: Box::new(SolvedType::EmptyRecord),
}),
);
// set : List elem, Nat, elem -> List elem
add_top_level_function_type!(
Symbol::LIST_SET,
@ -1619,6 +1805,20 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
Box::new(bool_type()),
);
// Box.box : a -> Box a
add_top_level_function_type!(
Symbol::BOX_BOX_FUNCTION,
vec![flex(TVAR1)],
Box::new(box_type(flex(TVAR1))),
);
// Box.unbox : Box a -> a
add_top_level_function_type!(
Symbol::BOX_UNBOX,
vec![box_type(flex(TVAR1))],
Box::new(flex(TVAR1)),
);
types
}

View file

@ -7,6 +7,7 @@ edition = "2018"
[dependencies]
roc_collections = { path = "../collections" }
roc_error_macros = { path = "../../error_macros" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_parse = { path = "../parse" }
@ -15,6 +16,7 @@ roc_types = { path = "../types" }
roc_builtins = { path = "../builtins" }
ven_graph = { path = "../../vendor/pathfinding" }
bumpalo = { version = "3.8.0", features = ["collections"] }
static_assertions = "1.1.0"
[dev-dependencies]
pretty_assertions = "1.0.0"

View file

@ -1,12 +1,15 @@
use crate::env::Env;
use crate::scope::Scope;
use roc_collections::all::{ImMap, MutMap, MutSet, SendMap};
use roc_error_macros::todo_abilities;
use roc_module::ident::{Ident, Lowercase, TagName};
use roc_module::symbol::{IdentIds, ModuleId, Symbol};
use roc_parse::ast::{AliasHeader, AssignedField, Pattern, Tag, TypeAnnotation};
use roc_parse::ast::{AssignedField, Pattern, Tag, TypeAnnotation, TypeHeader};
use roc_region::all::{Loc, Region};
use roc_types::subs::{VarStore, Variable};
use roc_types::types::{Alias, LambdaSet, Problem, RecordField, Type};
use roc_types::types::{
Alias, AliasCommon, AliasKind, LambdaSet, Problem, RecordField, Type, TypeExtension,
};
#[derive(Clone, Debug, PartialEq)]
pub struct Annotation {
@ -16,52 +19,90 @@ pub struct Annotation {
pub aliases: SendMap<Symbol, Alias>,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct NamedVariable {
pub variable: Variable,
pub name: Lowercase,
// NB: there may be multiple occurrences of a variable
pub first_seen: Region,
}
#[derive(Clone, Debug, PartialEq, Default)]
pub struct IntroducedVariables {
// NOTE on rigids
//
// Rigids must be unique within a type annotation.
// E.g. in `identity : a -> a`, there should only be one
// variable (a rigid one, with name "a").
// Hence `rigids : ImMap<Lowercase, Variable>`
//
// But then between annotations, the same name can occur multiple times,
// but a variable can only have one name. Therefore
// `ftv : SendMap<Variable, Lowercase>`.
pub wildcards: Vec<Variable>,
pub var_by_name: SendMap<Lowercase, Variable>,
pub name_by_var: SendMap<Variable, Lowercase>,
pub wildcards: Vec<Loc<Variable>>,
pub lambda_sets: Vec<Variable>,
pub inferred: Vec<Loc<Variable>>,
pub named: Vec<NamedVariable>,
pub host_exposed_aliases: MutMap<Symbol, Variable>,
}
impl IntroducedVariables {
pub fn insert_named(&mut self, name: Lowercase, var: Variable) {
self.var_by_name.insert(name.clone(), var);
self.name_by_var.insert(var, name);
pub fn insert_named(&mut self, name: Lowercase, var: Loc<Variable>) {
debug_assert!(!self
.named
.iter()
.any(|nv| nv.name == name || nv.variable == var.value));
let named_variable = NamedVariable {
name,
variable: var.value,
first_seen: var.region,
};
self.named.push(named_variable);
}
pub fn insert_wildcard(&mut self, var: Variable) {
pub fn insert_wildcard(&mut self, var: Loc<Variable>) {
self.wildcards.push(var);
}
pub fn insert_inferred(&mut self, var: Loc<Variable>) {
self.inferred.push(var);
}
fn insert_lambda_set(&mut self, var: Variable) {
self.lambda_sets.push(var);
}
pub fn insert_host_exposed_alias(&mut self, symbol: Symbol, var: Variable) {
self.host_exposed_aliases.insert(symbol, var);
}
pub fn union(&mut self, other: &Self) {
self.wildcards.extend(other.wildcards.iter().cloned());
self.var_by_name.extend(other.var_by_name.clone());
self.name_by_var.extend(other.name_by_var.clone());
self.wildcards.extend(other.wildcards.iter().copied());
self.lambda_sets.extend(other.lambda_sets.iter().copied());
self.inferred.extend(other.inferred.iter().copied());
self.host_exposed_aliases
.extend(other.host_exposed_aliases.clone());
self.named.extend(other.named.iter().cloned());
self.named.sort();
self.named.dedup();
}
pub fn union_owned(&mut self, other: Self) {
self.wildcards.extend(other.wildcards);
self.lambda_sets.extend(other.lambda_sets);
self.inferred.extend(other.inferred);
self.host_exposed_aliases.extend(other.host_exposed_aliases);
self.named.extend(other.named);
self.named.sort();
self.named.dedup();
}
pub fn var_by_name(&self, name: &Lowercase) -> Option<&Variable> {
self.var_by_name.get(name)
self.named
.iter()
.find(|nv| &nv.name == name)
.map(|nv| &nv.variable)
}
pub fn name_by_var(&self, var: Variable) -> Option<&Lowercase> {
self.name_by_var.get(&var)
self.named
.iter()
.find(|nv| nv.variable == var)
.map(|nv| &nv.name)
}
}
@ -130,13 +171,20 @@ fn make_apply_symbol(
// it was imported but it doesn't expose this ident.
env.problem(roc_problem::can::Problem::RuntimeError(problem));
Err(Type::Erroneous(Problem::UnrecognizedIdent((*ident).into())))
// A failed import should have already been reported through
// roc_can::env::Env::qualified_lookup's checks
Err(Type::Erroneous(Problem::SolvedTypeError))
}
}
}
}
pub fn find_alias_symbols(
/// Retrieves all symbols in an annotations that reference a type definition, that is either an
/// alias or an opaque type.
///
/// For example, in `[ A Age U8, B Str {} ]`, there are three type definition references - `Age`,
/// `U8`, and `Str`.
pub fn find_type_def_symbols(
module_id: ModuleId,
ident_ids: &mut IdentIds,
initial_annotation: &roc_parse::ast::TypeAnnotation,
@ -223,6 +271,7 @@ pub fn find_alias_symbols(
SpaceBefore(inner, _) | SpaceAfter(inner, _) => {
stack.push(inner);
}
Where(..) => todo_abilities!(),
Inferred | Wildcard | Malformed(_) => {}
}
}
@ -265,7 +314,7 @@ fn can_annotation_help(
let ret = can_annotation_help(
env,
&return_type.value,
region,
return_type.region,
scope,
var_store,
introduced_variables,
@ -273,7 +322,9 @@ fn can_annotation_help(
references,
);
let closure = Type::Variable(var_store.fresh());
let lambda_set = var_store.fresh();
introduced_variables.insert_lambda_set(lambda_set);
let closure = Type::Variable(lambda_set);
Type::Function(args, Box::new(closure), Box::new(ret))
}
@ -291,7 +342,7 @@ fn can_annotation_help(
let arg_ann = can_annotation_help(
env,
&arg.value,
region,
arg.region,
scope,
var_store,
introduced_variables,
@ -305,9 +356,6 @@ fn can_annotation_help(
match scope.lookup_alias(symbol) {
Some(alias) => {
// use a known alias
let mut actual = alias.typ.clone();
let mut substitutions = ImMap::default();
let mut vars = Vec::new();
if alias.type_variables.len() != args.len() {
let error = Type::Erroneous(Problem::BadTypeArguments {
@ -319,42 +367,50 @@ fn can_annotation_help(
return error;
}
for (loc_var, arg_ann) in alias.type_variables.iter().zip(args.into_iter()) {
let name = loc_var.value.0.clone();
let var = loc_var.value.1;
let is_structural = alias.kind == AliasKind::Structural;
if is_structural {
let mut type_var_to_arg = Vec::new();
substitutions.insert(var, arg_ann.clone());
vars.push((name.clone(), arg_ann));
}
for (loc_var, arg_ann) in alias.type_variables.iter().zip(args) {
let name = loc_var.value.0.clone();
// make sure the recursion variable is freshly instantiated
if let Type::RecursiveTagUnion(rvar, _, _) = &mut actual {
let new = var_store.fresh();
substitutions.insert(*rvar, Type::Variable(new));
*rvar = new;
}
// make sure hidden variables are freshly instantiated
let mut lambda_set_variables =
Vec::with_capacity(alias.lambda_set_variables.len());
for typ in alias.lambda_set_variables.iter() {
if let Type::Variable(var) = typ.0 {
let fresh = var_store.fresh();
substitutions.insert(var, Type::Variable(fresh));
lambda_set_variables.push(LambdaSet(Type::Variable(fresh)));
} else {
unreachable!("at this point there should be only vars in there");
type_var_to_arg.push((name, arg_ann));
}
}
// instantiate variables
actual.substitute(&substitutions);
let mut lambda_set_variables =
Vec::with_capacity(alias.lambda_set_variables.len());
Type::Alias {
symbol,
type_arguments: vars,
lambda_set_variables,
actual: Box::new(actual),
for _ in 0..alias.lambda_set_variables.len() {
let lvar = var_store.fresh();
introduced_variables.insert_lambda_set(lvar);
lambda_set_variables.push(LambdaSet(Type::Variable(lvar)));
}
Type::DelayedAlias(AliasCommon {
symbol,
type_arguments: type_var_to_arg,
lambda_set_variables,
})
} else {
let (type_arguments, lambda_set_variables, actual) =
instantiate_and_freshen_alias_type(
var_store,
introduced_variables,
&alias.type_variables,
args,
&alias.lambda_set_variables,
alias.typ.clone(),
);
Type::Alias {
symbol,
type_arguments,
lambda_set_variables,
actual: Box::new(actual),
kind: alias.kind,
}
}
}
None => Type::Apply(symbol, args, region),
@ -368,7 +424,7 @@ fn can_annotation_help(
None => {
let var = var_store.fresh();
introduced_variables.insert_named(name, var);
introduced_variables.insert_named(name, Loc::at(region, var));
Type::Variable(var)
}
@ -377,8 +433,7 @@ fn can_annotation_help(
As(
loc_inner,
_spaces,
alias_header
@ AliasHeader {
alias_header @ TypeHeader {
name,
vars: loc_vars,
},
@ -433,7 +488,8 @@ fn can_annotation_help(
} else {
let var = var_store.fresh();
introduced_variables.insert_named(var_name.clone(), var);
introduced_variables
.insert_named(var_name.clone(), Loc::at(loc_var.region, var));
vars.push((var_name.clone(), Type::Variable(var)));
lowercase_vars.push(Loc::at(loc_var.region, (var_name, var)));
@ -488,7 +544,13 @@ fn can_annotation_help(
hidden_variables.remove(&loc_var.value.1);
}
scope.add_alias(symbol, region, lowercase_vars, alias_actual);
scope.add_alias(
symbol,
region,
lowercase_vars,
alias_actual,
AliasKind::Structural, // aliases in "as" are never opaque
);
let alias = scope.lookup_alias(symbol).unwrap();
local_aliases.insert(symbol, alias.clone());
@ -511,24 +573,22 @@ fn can_annotation_help(
type_arguments: vars,
lambda_set_variables: alias.lambda_set_variables.clone(),
actual: Box::new(alias.typ.clone()),
kind: alias.kind,
}
}
}
Record { fields, ext } => {
let ext_type = match ext {
Some(loc_ann) => can_annotation_help(
env,
&loc_ann.value,
region,
scope,
var_store,
introduced_variables,
local_aliases,
references,
),
None => Type::EmptyRec,
};
let ext_type = can_extension_type(
env,
scope,
var_store,
introduced_variables,
local_aliases,
references,
ext,
roc_problem::can::ExtensionTypeKind::Record,
);
if fields.is_empty() {
match ext {
@ -536,7 +596,7 @@ fn can_annotation_help(
// just `a` does not mean the same as `{}a`, so even
// if there are no fields, still make this a `Record`,
// not an EmptyRec
Type::Record(Default::default(), Box::new(ext_type))
Type::Record(Default::default(), TypeExtension::from_type(ext_type))
}
None => Type::EmptyRec,
@ -553,23 +613,20 @@ fn can_annotation_help(
references,
);
Type::Record(field_types, Box::new(ext_type))
Type::Record(field_types, TypeExtension::from_type(ext_type))
}
}
TagUnion { tags, ext, .. } => {
let ext_type = match ext {
Some(loc_ann) => can_annotation_help(
env,
&loc_ann.value,
loc_ann.region,
scope,
var_store,
introduced_variables,
local_aliases,
references,
),
None => Type::EmptyTagUnion,
};
let ext_type = can_extension_type(
env,
scope,
var_store,
introduced_variables,
local_aliases,
references,
ext,
roc_problem::can::ExtensionTypeKind::TagUnion,
);
if tags.is_empty() {
match ext {
@ -577,7 +634,7 @@ fn can_annotation_help(
// just `a` does not mean the same as `{}a`, so even
// if there are no fields, still make this a `Record`,
// not an EmptyRec
Type::TagUnion(Default::default(), Box::new(ext_type))
Type::TagUnion(Default::default(), TypeExtension::from_type(ext_type))
}
None => Type::EmptyTagUnion,
@ -599,7 +656,7 @@ fn can_annotation_help(
// in theory we save a lot of time by sorting once here
insertion_sort_by(&mut tag_types, |a, b| a.0.cmp(&b.0));
Type::TagUnion(tag_types, Box::new(ext_type))
Type::TagUnion(tag_types, TypeExtension::from_type(ext_type))
}
}
SpaceBefore(nested, _) | SpaceAfter(nested, _) => can_annotation_help(
@ -615,7 +672,7 @@ fn can_annotation_help(
Wildcard => {
let var = var_store.fresh();
introduced_variables.insert_wildcard(var);
introduced_variables.insert_wildcard(Loc::at(region, var));
Type::Variable(var)
}
@ -623,20 +680,185 @@ fn can_annotation_help(
// Inference variables aren't bound to a rigid or a wildcard, so all we have to do is
// make a fresh unconstrained variable, and let the type solver fill it in for us 🤠
let var = var_store.fresh();
introduced_variables.insert_inferred(Loc::at(region, var));
Type::Variable(var)
}
Where(..) => todo_abilities!(),
Malformed(string) => {
malformed(env, region, string);
let var = var_store.fresh();
introduced_variables.insert_wildcard(var);
introduced_variables.insert_wildcard(Loc::at(region, var));
Type::Variable(var)
}
}
}
#[allow(clippy::too_many_arguments)]
fn can_extension_type<'a>(
env: &mut Env,
scope: &mut Scope,
var_store: &mut VarStore,
introduced_variables: &mut IntroducedVariables,
local_aliases: &mut SendMap<Symbol, Alias>,
references: &mut MutSet<Symbol>,
opt_ext: &Option<&Loc<TypeAnnotation<'a>>>,
ext_problem_kind: roc_problem::can::ExtensionTypeKind,
) -> Type {
fn valid_record_ext_type(typ: &Type) -> bool {
// Include erroneous types so that we don't overreport errors.
matches!(
typ,
Type::EmptyRec | Type::Record(..) | Type::Variable(..) | Type::Erroneous(..)
)
}
fn valid_tag_ext_type(typ: &Type) -> bool {
matches!(
typ,
Type::EmptyTagUnion | Type::TagUnion(..) | Type::Variable(..) | Type::Erroneous(..)
)
}
use roc_problem::can::ExtensionTypeKind;
let (empty_ext_type, valid_extension_type): (_, fn(&Type) -> bool) = match ext_problem_kind {
ExtensionTypeKind::Record => (Type::EmptyRec, valid_record_ext_type),
ExtensionTypeKind::TagUnion => (Type::EmptyTagUnion, valid_tag_ext_type),
};
match opt_ext {
Some(loc_ann) => {
let ext_type = can_annotation_help(
env,
&loc_ann.value,
loc_ann.region,
scope,
var_store,
introduced_variables,
local_aliases,
references,
);
if valid_extension_type(shallow_dealias_with_scope(scope, &ext_type)) {
ext_type
} else {
// Report an error but mark the extension variable to be inferred
// so that we're as permissive as possible.
//
// THEORY: invalid extension types can appear in this position. Otherwise
// they would be caught as errors during unification.
env.problem(roc_problem::can::Problem::InvalidExtensionType {
region: loc_ann.region,
kind: ext_problem_kind,
});
let var = var_store.fresh();
introduced_variables.insert_inferred(Loc::at_zero(var));
Type::Variable(var)
}
}
None => empty_ext_type,
}
}
/// a shallow dealias, continue until the first constructor is not an alias.
fn shallow_dealias_with_scope<'a>(scope: &'a mut Scope, typ: &'a Type) -> &'a Type {
let mut result = typ;
loop {
match result {
Type::Alias { actual, .. } => {
// another loop
result = actual;
}
Type::DelayedAlias(AliasCommon { symbol, .. }) => match scope.lookup_alias(*symbol) {
None => unreachable!(),
Some(alias) => {
result = &alias.typ;
}
},
_ => break,
}
}
result
}
pub fn instantiate_and_freshen_alias_type(
var_store: &mut VarStore,
introduced_variables: &mut IntroducedVariables,
type_variables: &[Loc<(Lowercase, Variable)>],
type_arguments: Vec<Type>,
lambda_set_variables: &[LambdaSet],
mut actual_type: Type,
) -> (Vec<(Lowercase, Type)>, Vec<LambdaSet>, Type) {
let mut substitutions = ImMap::default();
let mut type_var_to_arg = Vec::new();
for (loc_var, arg_ann) in type_variables.iter().zip(type_arguments.into_iter()) {
let name = loc_var.value.0.clone();
let var = loc_var.value.1;
substitutions.insert(var, arg_ann.clone());
type_var_to_arg.push((name.clone(), arg_ann));
}
// make sure the recursion variable is freshly instantiated
if let Type::RecursiveTagUnion(rvar, _, _) = &mut actual_type {
let new = var_store.fresh();
substitutions.insert(*rvar, Type::Variable(new));
*rvar = new;
}
// make sure hidden variables are freshly instantiated
let mut new_lambda_set_variables = Vec::with_capacity(lambda_set_variables.len());
for typ in lambda_set_variables.iter() {
if let Type::Variable(var) = typ.0 {
let fresh = var_store.fresh();
substitutions.insert(var, Type::Variable(fresh));
introduced_variables.insert_lambda_set(fresh);
new_lambda_set_variables.push(LambdaSet(Type::Variable(fresh)));
} else {
unreachable!("at this point there should be only vars in there");
}
}
// instantiate variables
actual_type.substitute(&substitutions);
(type_var_to_arg, new_lambda_set_variables, actual_type)
}
pub fn freshen_opaque_def(
var_store: &mut VarStore,
opaque: &Alias,
) -> (Vec<(Lowercase, Type)>, Vec<LambdaSet>, Type) {
debug_assert!(opaque.kind == AliasKind::Opaque);
let fresh_arguments = opaque
.type_variables
.iter()
.map(|_| Type::Variable(var_store.fresh()))
.collect();
// TODO this gets ignored; is that a problem
let mut introduced_variables = IntroducedVariables::default();
instantiate_and_freshen_alias_type(
var_store,
&mut introduced_variables,
&opaque.type_variables,
fresh_arguments,
&opaque.lambda_set_variables,
opaque.typ.clone(),
)
}
fn insertion_sort_by<T, F>(arr: &mut [T], mut compare: F)
where
F: FnMut(&T, &T) -> std::cmp::Ordering,
@ -727,7 +949,10 @@ fn can_assigned_fields<'a>(
Type::Variable(*var)
} else {
let field_var = var_store.fresh();
introduced_variables.insert_named(field_name.clone(), field_var);
introduced_variables.insert_named(
field_name.clone(),
Loc::at(loc_field_name.region, field_var),
);
Type::Variable(field_var)
}
};

View file

@ -57,6 +57,7 @@ pub fn builtin_dependencies(symbol: Symbol) -> &'static [Symbol] {
Symbol::LIST_PRODUCT => &[Symbol::LIST_WALK, Symbol::NUM_MUL],
Symbol::LIST_SUM => &[Symbol::LIST_WALK, Symbol::NUM_ADD],
Symbol::LIST_JOIN_MAP => &[Symbol::LIST_WALK, Symbol::LIST_CONCAT],
Symbol::LIST_SET => &[Symbol::LIST_REPLACE],
_ => &[],
}
}
@ -102,6 +103,7 @@ pub fn builtin_defs_map(symbol: Symbol, var_store: &mut VarStore) -> Option<Def>
STR_TO_I8 => str_to_num,
LIST_LEN => list_len,
LIST_GET => list_get,
LIST_REPLACE => list_replace,
LIST_SET => list_set,
LIST_APPEND => list_append,
LIST_FIRST => list_first,
@ -242,6 +244,28 @@ pub fn builtin_defs_map(symbol: Symbol, var_store: &mut VarStore) -> Option<Def>
NUM_MAX_U64=> num_max_u64,
NUM_MIN_I128=> num_min_i128,
NUM_MAX_I128=> num_max_i128,
NUM_TO_I8 => num_to_i8,
NUM_TO_I8_CHECKED => num_to_i8_checked,
NUM_TO_I16 => num_to_i16,
NUM_TO_I16_CHECKED => num_to_i16_checked,
NUM_TO_I32 => num_to_i32,
NUM_TO_I32_CHECKED => num_to_i32_checked,
NUM_TO_I64 => num_to_i64,
NUM_TO_I64_CHECKED => num_to_i64_checked,
NUM_TO_I128 => num_to_i128,
NUM_TO_I128_CHECKED => num_to_i128_checked,
NUM_TO_U8 => num_to_u8,
NUM_TO_U8_CHECKED => num_to_u8_checked,
NUM_TO_U16 => num_to_u16,
NUM_TO_U16_CHECKED => num_to_u16_checked,
NUM_TO_U32 => num_to_u32,
NUM_TO_U32_CHECKED => num_to_u32_checked,
NUM_TO_U64 => num_to_u64,
NUM_TO_U64_CHECKED => num_to_u64_checked,
NUM_TO_U128 => num_to_u128,
NUM_TO_U128_CHECKED => num_to_u128_checked,
NUM_TO_NAT => num_to_nat,
NUM_TO_NAT_CHECKED => num_to_nat_checked,
NUM_TO_STR => num_to_str,
RESULT_MAP => result_map,
RESULT_MAP_ERR => result_map_err,
@ -249,6 +273,8 @@ pub fn builtin_defs_map(symbol: Symbol, var_store: &mut VarStore) -> Option<Def>
RESULT_WITH_DEFAULT => result_with_default,
RESULT_IS_OK => result_is_ok,
RESULT_IS_ERR => result_is_err,
BOX_BOX_FUNCTION => box_box,
BOX_UNBOX => box_unbox,
}
}
@ -390,6 +416,181 @@ fn lowlevel_5(symbol: Symbol, op: LowLevel, var_store: &mut VarStore) -> Def {
)
}
// Num.toI8 : Int * -> I8
fn num_to_i8(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toI16 : Int * -> I16
fn num_to_i16(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toI32 : Int * -> I32
fn num_to_i32(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toI64 : Int * -> I64
fn num_to_i64(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toI128 : Int * -> I128
fn num_to_i128(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toU8 : Int * -> U8
fn num_to_u8(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toU16 : Int * -> U16
fn num_to_u16(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toU32 : Int * -> U32
fn num_to_u32(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toU64 : Int * -> U64
fn num_to_u64(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toU128 : Int * -> U128
fn num_to_u128(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
// Num.toNat : Int * -> Nat
fn num_to_nat(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Defer to IntCast
lowlevel_1(symbol, LowLevel::NumIntCast, var_store)
}
fn to_num_checked(symbol: Symbol, var_store: &mut VarStore, lowlevel: LowLevel) -> Def {
let bool_var = var_store.fresh();
let num_var_1 = var_store.fresh();
let num_var_2 = var_store.fresh();
let ret_var = var_store.fresh();
let record_var = var_store.fresh();
// let arg_2 = RunLowLevel NumToXXXChecked arg_1
// if arg_2.b then
// Err OutOfBounds
// else
// Ok arg_2.a
//
// "a" and "b" because the lowlevel return value looks like { converted_val: XXX, out_of_bounds: bool },
// and codegen will sort by alignment, so "a" will be the first key, etc.
let cont = If {
branch_var: ret_var,
cond_var: bool_var,
branches: vec![(
// if-condition
no_region(
// arg_2.b
Access {
record_var,
ext_var: var_store.fresh(),
field: "b".into(),
field_var: var_store.fresh(),
loc_expr: Box::new(no_region(Var(Symbol::ARG_2))),
},
),
// out of bounds!
no_region(tag(
"Err",
vec![tag("OutOfBounds", Vec::new(), var_store)],
var_store,
)),
)],
final_else: Box::new(
// all is well
no_region(
// Ok arg_2.a
tag(
"Ok",
vec![
// arg_2.a
Access {
record_var,
ext_var: var_store.fresh(),
field: "a".into(),
field_var: num_var_2,
loc_expr: Box::new(no_region(Var(Symbol::ARG_2))),
},
],
var_store,
),
),
),
};
// arg_2 = RunLowLevel NumToXXXChecked arg_1
let def = crate::def::Def {
loc_pattern: no_region(Pattern::Identifier(Symbol::ARG_2)),
loc_expr: no_region(RunLowLevel {
op: lowlevel,
args: vec![(num_var_1, Var(Symbol::ARG_1))],
ret_var: record_var,
}),
expr_var: record_var,
pattern_vars: SendMap::default(),
annotation: None,
};
let body = LetNonRec(Box::new(def), Box::new(no_region(cont)), ret_var);
defn(
symbol,
vec![(num_var_1, Symbol::ARG_1)],
var_store,
body,
ret_var,
)
}
macro_rules! num_to_checked {
($($fn:ident)*) => {$(
// Num.toXXXChecked : Int * -> Result XXX [ OutOfBounds ]*
fn $fn(symbol: Symbol, var_store: &mut VarStore) -> Def {
// Use the generic `NumToIntChecked`; we'll figure out exactly what layout(s) we need
// during code generation after types are resolved.
to_num_checked(symbol, var_store, LowLevel::NumToIntChecked)
}
)*}
}
num_to_checked! {
num_to_i8_checked
num_to_i16_checked
num_to_i32_checked
num_to_i64_checked
num_to_i128_checked
num_to_u8_checked
num_to_u16_checked
num_to_u32_checked
num_to_u64_checked
num_to_u128_checked
num_to_nat_checked
}
// Num.toStr : Num a -> Str
fn num_to_str(symbol: Symbol, var_store: &mut VarStore) -> Def {
let num_var = var_store.fresh();
@ -2115,6 +2316,91 @@ fn list_get(symbol: Symbol, var_store: &mut VarStore) -> Def {
)
}
/// List.replace : List elem, Nat, elem -> { list: List elem, value: elem }
fn list_replace(symbol: Symbol, var_store: &mut VarStore) -> Def {
let arg_list = Symbol::ARG_1;
let arg_index = Symbol::ARG_2;
let arg_elem = Symbol::ARG_3;
let bool_var = var_store.fresh();
let len_var = var_store.fresh();
let elem_var = var_store.fresh();
let list_arg_var = var_store.fresh();
let ret_record_var = var_store.fresh();
let ret_result_var = var_store.fresh();
let list_field = Field {
var: list_arg_var,
region: Region::zero(),
loc_expr: Box::new(Loc::at_zero(Expr::Var(arg_list))),
};
let value_field = Field {
var: elem_var,
region: Region::zero(),
loc_expr: Box::new(Loc::at_zero(Expr::Var(arg_elem))),
};
// Perform a bounds check. If it passes, run LowLevel::ListReplaceUnsafe.
// Otherwise, return the list unmodified.
let body = If {
cond_var: bool_var,
branch_var: ret_result_var,
branches: vec![(
// if-condition
no_region(
// index < List.len list
RunLowLevel {
op: LowLevel::NumLt,
args: vec![
(len_var, Var(arg_index)),
(
len_var,
RunLowLevel {
op: LowLevel::ListLen,
args: vec![(list_arg_var, Var(arg_list))],
ret_var: len_var,
},
),
],
ret_var: bool_var,
},
),
// then-branch
no_region(
// List.replaceUnsafe list index elem
RunLowLevel {
op: LowLevel::ListReplaceUnsafe,
args: vec![
(list_arg_var, Var(arg_list)),
(len_var, Var(arg_index)),
(elem_var, Var(arg_elem)),
],
ret_var: ret_record_var,
},
),
)],
final_else: Box::new(
// else-branch
no_region(record(
vec![("list".into(), list_field), ("value".into(), value_field)],
var_store,
)),
),
};
defn(
symbol,
vec![
(list_arg_var, Symbol::ARG_1),
(len_var, Symbol::ARG_2),
(elem_var, Symbol::ARG_3),
],
var_store,
body,
ret_result_var,
)
}
/// List.set : List elem, Nat, elem -> List elem
///
/// List.set :
@ -2129,9 +2415,27 @@ fn list_set(symbol: Symbol, var_store: &mut VarStore) -> Def {
let bool_var = var_store.fresh();
let len_var = var_store.fresh();
let elem_var = var_store.fresh();
let replace_record_var = var_store.fresh();
let list_arg_var = var_store.fresh(); // Uniqueness type Attr differs between
let list_ret_var = var_store.fresh(); // the arg list and the returned list
let replace_function = (
var_store.fresh(),
Loc::at_zero(Expr::Var(Symbol::LIST_REPLACE)),
var_store.fresh(),
replace_record_var,
);
let replace_call = Expr::Call(
Box::new(replace_function),
vec![
(list_arg_var, Loc::at_zero(Var(arg_list))),
(len_var, Loc::at_zero(Var(arg_index))),
(elem_var, Loc::at_zero(Var(arg_elem))),
],
CalledVia::Space,
);
// Perform a bounds check. If it passes, run LowLevel::ListSet.
// Otherwise, return the list unmodified.
let body = If {
@ -2158,18 +2462,16 @@ fn list_set(symbol: Symbol, var_store: &mut VarStore) -> Def {
},
),
// then-branch
no_region(
// List.setUnsafe list index
RunLowLevel {
op: LowLevel::ListSet,
args: vec![
(list_arg_var, Var(arg_list)),
(len_var, Var(arg_index)),
(elem_var, Var(arg_elem)),
],
ret_var: list_ret_var,
},
),
no_region(Access {
record_var: replace_record_var,
ext_var: var_store.fresh(),
field_var: list_ret_var,
loc_expr: Box::new(no_region(
// List.replaceUnsafe list index elem
replace_call,
)),
field: "list".into(),
}),
)],
final_else: Box::new(
// else-branch
@ -5035,6 +5337,16 @@ fn num_bytes_to(symbol: Symbol, var_store: &mut VarStore, offset: i64, low_level
)
}
/// Box.box : a -> Box a
fn box_box(symbol: Symbol, var_store: &mut VarStore) -> Def {
lowlevel_1(symbol, LowLevel::BoxExpr, var_store)
}
/// Box.unbox : Box a -> a
fn box_unbox(symbol: Symbol, var_store: &mut VarStore) -> Def {
lowlevel_1(symbol, LowLevel::UnboxExpr, var_store)
}
#[inline(always)]
fn defn_help(
fn_name: Symbol,

Some files were not shown because too many files have changed in this diff Show more