Merge branch 'main' of github.com:roc-lang/roc into clippy-1.74

This commit is contained in:
Anton-4 2023-12-25 17:05:37 +01:00
commit cd632fe549
No known key found for this signature in database
GPG key ID: 0971D718C0A9B937
271 changed files with 7741 additions and 7417 deletions

View file

@ -9,15 +9,31 @@ concurrency:
env:
# use .tar.gz for quick testing
ARCHIVE_FORMAT: .tar.gz
BASIC_CLI_BRANCH: main
ARCHIVE_FORMAT: .tar.br
# Make a new basic-cli git tag and set it here before starting this workflow
RELEASE_TAG: 0.7.1
jobs:
fetch-releases:
prepare:
runs-on: [ubuntu-20.04]
steps:
- uses: actions/checkout@v3
with:
repository: roc-lang/basic-cli
- name: check if provided RELEASE_TAG is fresh
run: |
git fetch --tags
TAG_DATE=$(git log -1 --format=%ai ${{ env.RELEASE_TAG }})
CURRENT_DATE=$(date +%Y-%m-%d)
TAG_AGE=$(( ($(date -d $CURRENT_DATE +%s) - $(date -d "$TAG_DATE" +%s) )/(60*60*24) ))
if [ $TAG_AGE -gt 4 ]; then
echo "The provided RELEASE_TAG (${{ env.RELEASE_TAG }}) seems stale, it is $TAG_AGE days old. Did you set it correctly at the top of this workflow?"
exit 1
fi
# get latest nightly releases
- run: curl -fOL https://github.com/roc-lang/roc/releases/download/nightly/roc_nightly-linux_x86_64-latest.tar.gz
- run: curl -fOL https://github.com/roc-lang/roc/releases/download/nightly/roc_nightly-linux_arm64-latest.tar.gz
- run: curl -fOL https://github.com/roc-lang/roc/releases/download/nightly/roc_nightly-macos_x86_64-latest.tar.gz
@ -30,7 +46,7 @@ jobs:
build-linux-x86_64-files:
runs-on: [ubuntu-20.04]
needs: [fetch-releases]
needs: [prepare]
steps:
- uses: actions/checkout@v3
@ -47,14 +63,14 @@ jobs:
with:
name: linux-x86_64-files
path: |
basic-cli/src/metadata_linux-x64.rm
basic-cli/src/linux-x64.rh
basic-cli/src/linux-x64.o
basic-cli/platform/metadata_linux-x64.rm
basic-cli/platform/linux-x64.rh
basic-cli/platform/linux-x64.o
build-linux-arm64-files:
runs-on: [self-hosted, Linux, ARM64]
needs: [fetch-releases]
needs: [prepare]
steps:
- uses: actions/checkout@v3
@ -74,11 +90,11 @@ jobs:
with:
name: linux-arm64-files
path: |
basic-cli/src/linux-arm64.o
basic-cli/platform/linux-arm64.o
build-macos-x86_64-files:
runs-on: [macos-11] # I expect the generated files to work on macOS 12 and up
needs: [fetch-releases]
needs: [prepare]
steps:
- uses: actions/checkout@v3
@ -92,12 +108,12 @@ jobs:
with:
name: macos-x86_64-files
path: |
basic-cli/src/macos-x64.o
basic-cli/platform/macos-x64.o
build-macos-apple-silicon-files:
name: build apple silicon .o file
runs-on: [self-hosted, macOS, ARM64]
needs: [fetch-releases]
needs: [prepare]
steps:
- uses: actions/checkout@v3
@ -111,7 +127,7 @@ jobs:
with:
name: macos-apple-silicon-files
path: |
basic-cli/src/macos-arm64.o
basic-cli/platform/macos-arm64.o
create-release-archive:
needs: [build-linux-x86_64-files, build-linux-arm64-files, build-macos-x86_64-files, build-macos-apple-silicon-files]
@ -140,17 +156,25 @@ jobs:
- run: git clone https://github.com/roc-lang/basic-cli.git
- run: cp macos-apple-silicon-files/* ./basic-cli/src
- run: cp macos-apple-silicon-files/* ./basic-cli/platform
- run: cp linux-x86_64-files/* ./basic-cli/src
- run: cp linux-x86_64-files/* ./basic-cli/platform
- run: cp linux-arm64-files/* ./basic-cli/src
- run: cp linux-arm64-files/* ./basic-cli/platform
- run: cp macos-x86_64-files/* ./basic-cli/src
- run: cp macos-x86_64-files/* ./basic-cli/platform
- run: ./roc_nightly/roc build --bundle=${{ env.ARCHIVE_FORMAT }} ./basic-cli/src/main.roc
- name: bundle basic-cli release archive
run: ./roc_nightly/roc build --bundle=${{ env.ARCHIVE_FORMAT }} ./basic-cli/platform/main.roc
- run: echo "TAR_FILENAME=$(ls -d basic-cli/src/* | grep ${{ env.ARCHIVE_FORMAT }})" >> $GITHUB_ENV
- name: build basic-cli docs
env:
ROC_DOCS_URL_ROOT: /packages/basic-cli/${{ env.RELEASE_TAG }}
run: |
./roc_nightly/roc docs ./basic-cli/platform/main.roc
tar -czvf docs.tar.gz generated-docs/
- run: echo "TAR_FILENAME=$(ls -d basic-cli/platform/* | grep ${{ env.ARCHIVE_FORMAT }})" >> $GITHUB_ENV
- name: Upload platform archive
uses: actions/upload-artifact@v3
@ -159,6 +183,13 @@ jobs:
path: |
${{ env.TAR_FILENAME }}
- name: Upload docs archive
uses: actions/upload-artifact@v3
with:
name: release-assets-docs
path: |
docs.tar.gz
test-release-ubuntu:
needs: [create-release-archive]
runs-on: [ubuntu-20.04]
@ -194,18 +225,19 @@ jobs:
- name: Install ncat for tests if we dont have it yet
run: if ! dpkg -l | grep -qw ncat; then sudo apt install -y ncat; fi
- name: prep testing
- name: prepare testing
run: |
mv roc_nightly basic-cli-platform/.
cd basic-cli-platform
mkdir src
find . -maxdepth 1 -type f -exec mv {} src/ \;
mkdir platform
# move all files to platform dir
find . -maxdepth 1 -type f -exec mv {} platform/ \;
mkdir temp-basic-cli
cd temp-basic-cli
git clone https://github.com/roc-lang/basic-cli.git
cd basic-cli
git checkout ${{ env.BASIC_CLI_BRANCH }}
git checkout ${{ env.RELEASE_TAG }}
cp -r examples ../..
cp -r ci ../..
cp -r LICENSE ../..

View file

@ -1,6 +1,6 @@
on:
pull_request:
name: CI manager
# cancel current runs when a new commit is pushed
@ -21,7 +21,7 @@ jobs:
id: filecheck
run: |
git fetch origin ${{ github.base_ref }}
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$)'; then
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^AUTHORS$)'; then
echo "run_tests=full" >> $GITHUB_OUTPUT
else
echo "run_tests=none" >> $GITHUB_OUTPUT
@ -52,7 +52,7 @@ jobs:
start-macos-x86-64-tests:
needs: check-changes
if: needs.check-changes.outputs.run_tests == 'full'
uses: ./.github/workflows/ubuntu_x86_64.yml
uses: ./.github/workflows/macos_x86_64.yml
start-ubuntu-x86-64-tests:
needs: check-changes
@ -103,12 +103,16 @@ jobs:
needs: [ran-full, ran-none]
if: |
always()
&& contains(needs.*.result, 'success')
&& !contains(needs.*.result, 'failure')
&& !contains(needs.*.result, 'cancelled')
&& !(needs.ran-full.result == 'skipped' && needs.ran-none.result == 'skipped')
steps:
- run: echo "Workflow succeeded :)"
- name: Check previous job results
run: |
if [ "${{ needs.ran-full.result }}" != "success" ] && [ "${{ needs.ran-none.result }}" != "success" ]; then
echo "One or more jobs failed."
exit 1
fi
- run: echo "Workflow succeeded :)"

View file

@ -25,7 +25,7 @@ jobs:
run: ./ci/write_version.sh
- name: build release with lto
run: cargo build --profile=release-with-lto --locked --bin roc
run: cargo build --profile=release-with-lto --locked --bin roc --bin roc_ls
- name: get commit SHA
run: echo "SHA=$(git rev-parse --short "$GITHUB_SHA")" >> $GITHUB_ENV

View file

@ -25,7 +25,7 @@ jobs:
run: ./ci/write_version.sh
- name: build release with lto
run: RUSTFLAGS="-C target-cpu=x86-64" cargo build --profile=release-with-lto --locked --bin roc
run: RUSTFLAGS="-C target-cpu=x86-64" cargo build --profile=release-with-lto --locked --bin roc --bin roc_ls
# target-cpu=x86-64 -> For maximal compatibility for all CPU's. This was also faster in our tests: https://roc.zulipchat.com/#narrow/stream/231635-compiler-development/topic/.2Ecargo.2Fconfig.2Etoml/near/325726299
- name: get commit SHA

View file

@ -42,11 +42,7 @@ jobs:
run: ./ci/write_version.sh
- name: build nightly release
run: cargo build --locked --profile=release-with-lto --bin roc
# this makes the roc binary a lot smaller
- name: strip debug info
run: strip ./target/release-with-lto/roc
run: cargo build --locked --profile=release-with-lto --bin roc --bin roc_ls
- name: package release
run: ./ci/package_release.sh ${{ env.RELEASE_FOLDER_NAME }}

View file

@ -32,7 +32,7 @@ jobs:
# this issue may be caused by using older versions of XCode
- name: build release
run: RUSTFLAGS="-C target-cpu=x86-64" cargo build --profile=release-with-lto --locked --bin roc
run: RUSTFLAGS="-C target-cpu=x86-64" cargo build --profile=release-with-lto --locked --bin roc --bin roc_ls
# target-cpu=x86-64 -> For maximal compatibility for all CPU's.
- name: get commit SHA

View file

@ -20,6 +20,9 @@ jobs:
- name: execute tests with --release
run: nix develop -c cargo test --locked --release
- name: roc test all builtins
run: nix develop -c ./ci/roc_test_builtins.sh
- name: test wasm32 cli_run
run: nix develop -c cargo test --locked --release --features="wasm32-cli-run"

View file

@ -32,8 +32,12 @@ jobs:
- name: test building default.nix
run: nix-build
# for skipped tests: see issue 6274
- name: execute tests with --release
run: nix develop -c cargo test --locked --release
run: nix develop -c cargo test --locked --release -- --skip cli_run::inspect_gui --skip cli_run::hello_gui
- name: roc test all builtins
run: nix develop -c ./ci/roc_test_builtins.sh
- name: make a libapp.so for the next step
run: nix develop -c cargo run -- gen-stub-lib examples/platform-switching/rocLovesRust.roc

View file

@ -20,6 +20,9 @@ jobs:
run: nix develop -c cargo test --locked --release -p roc_cli -- --skip hello_gui
# see 5932 for hello_gui
- name: roc test all builtins
run: nix develop -c ./ci/roc_test_builtins.sh
- name: make a libapp.so for the next step
run: nix develop -c cargo run -- gen-stub-lib examples/platform-switching/rocLovesRust.roc

View file

@ -47,11 +47,8 @@ jobs:
- name: test gen-wasm single threaded # gen-wasm has some multithreading problems to do with the wasmer runtime
run: cargo test --locked --release --package test_gen --no-default-features --features gen-wasm -- --test-threads=1 && sccache --show-stats
- name: run `roc test` on Str builtins
run: cargo run --locked --release -- test crates/compiler/builtins/roc/Str.roc && sccache --show-stats
- name: run `roc test` on Dict builtins
run: cargo run --locked --release -- test crates/compiler/builtins/roc/Dict.roc && sccache --show-stats
- name: roc test all builtins
run: ./ci/roc_test_builtins.sh
- name: wasm repl test
run: crates/repl_test/test_wasm.sh && sccache --show-stats

View file

@ -123,7 +123,7 @@ Luca Cervello <luca.cervello@gmail.com>
Josh Mak <joshmak@berkeley.edu>
Jakub Kozłowski <kubukoz@gmail.com>
Travis Staloch <twostepted@gmail.com>
Nick Gravgaard <nick@nickgravgaard.com>
Nick Gravgaard <nick@nick-gravgaard.com>
Keerthana Kasthuril <76804118+keerthanak-tw@users.noreply.github.com>
Salman Shaik <salmansiddiq.shaik@gmail.com>
Austin Clements <austinclementsbass@gmail.com>

View file

@ -76,7 +76,7 @@ To run the test suite (via `cargo test`), you additionally need to install:
- [`valgrind`](https://www.valgrind.org/) (needs special treatment to [install on macOS](https://stackoverflow.com/a/61359781)
Alternatively, you can use `cargo test --no-fail-fast` or `cargo test -p specific_tests` to skip over the valgrind failures & tests.
For debugging LLVM IR, we use [DebugIR](https://github.com/vaivaswatha/debugir). This dependency is only required to build with the `--debug` flag, and for normal development you should be fine without it.
For emitting LLVM IR for debugging purposes, the `--emit-llvm-ir` flag can be used.
### libxcb libraries

View file

@ -26,7 +26,7 @@ Check [Building from source](BUILDING_FROM_SOURCE.md) for instructions.
Most contributors execute the following commands before pushing their code:
```sh
cargo test
cargo test --release
cargo fmt --all -- --check
cargo clippy --workspace --tests -- --deny warnings
```

View file

@ -53,9 +53,7 @@ build-nightly-release:
COPY --dir .git LICENSE LEGAL_DETAILS ci ./
# version.txt is used by the CLI: roc --version
RUN ./ci/write_version.sh
RUN RUSTFLAGS=$RUSTFLAGS cargo build --profile=release-with-lto --locked --bin roc
# strip debug info
RUN strip ./target/release-with-lto/roc
RUN RUSTFLAGS=$RUSTFLAGS cargo build --profile=release-with-lto --locked --bin roc --bin roc_ls
RUN ./ci/package_release.sh $RELEASE_FOLDER_NAME
RUN ls
SAVE ARTIFACT ./$RELEASE_FOLDER_NAME.tar.gz AS LOCAL $RELEASE_FOLDER_NAME.tar.gz

View file

@ -277,4 +277,33 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
===========================================================
* ankerl::unordered_dense - https://github.com/martinus/unordered_dense
A rather direct port of the source into Roc is currently the implementation for our Dict type.
Source code is in crates/compiler/builtins/roc/Dict.roc
MIT License
Copyright (c) 2022 Martin Leitner-Ankerl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -6,7 +6,7 @@
- [**tutorial**](https://roc-lang.org/tutorial)
- [**docs** for the standard library](https://www.roc-lang.org/builtins)
- [**examples**](https://github.com/roc-lang/examples/tree/main/examples)
- [**faq**: frequently asked questions](https://github.com/roc-lang/roc/blob/main/FAQ.md)
- [**faq**: frequently asked questions](https://github.com/roc-lang/roc/blob/main/www/content/faq.md)
- [**group chat**](https://roc.zulipchat.com) for help, questions and discussions
If you'd like to contribute, check out [good first issues](https://github.com/roc-lang/roc/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). Don't hesitate to ask for help on our [group chat](https://roc.zulipchat.com), we're friendly!
@ -17,14 +17,17 @@ You can 💜 **sponsor** 💜 Roc on:
- [GitHub](https://github.com/sponsors/roc-lang)
- [Liberapay](https://liberapay.com/roc_lang)
We are very grateful for our corporate sponsors [Vendr](https://www.vendr.com/), [RWX](https://www.rwx.com), [Tweede golf](https://tweedegolf.nl/en), and [ohne-makler](https://www.ohne-makler.net):
We are very grateful for our corporate sponsors [Vendr](https://www.vendr.com/), [RWX](https://www.rwx.com), [Tweede golf](https://tweedegolf.nl/en), [ohne-makler](https://www.ohne-makler.net), and [Decem](https://www.decem.com.au):
[<img src="https://user-images.githubusercontent.com/1094080/223597445-81755626-a080-4299-a38c-3c92e7548489.png" height="60" alt="Vendr logo"/>](https://www.vendr.com)
&nbsp;&nbsp;&nbsp;&nbsp;
[<img src="https://github.com/roc-lang/roc/assets/1094080/82c0868e-d23f-42a0-ac2d-c6e6b2e16575" height="60" alt="RWX logo"/>](https://www.rwx.com)
&nbsp;&nbsp;&nbsp;&nbsp;
[<img src="https://user-images.githubusercontent.com/1094080/183123052-856815b1-8cc9-410a-83b0-589f03613188.svg" height="60" alt="tweede golf logo"/>](https://tweedegolf.nl/en)
&nbsp;&nbsp;&nbsp;&nbsp;
[<img src="https://www.ohne-makler.net/static/img/brand/logo.svg" height="60" alt="ohne-makler logo"/>](https://www.ohne-makler.net)
&nbsp;&nbsp;&nbsp;&nbsp;
[<img src="https://github.com/roc-lang/roc/assets/1094080/fd2a759c-7f6d-4f57-9eca-9601deba87b6" height="60" alt="Decem logo"/>](https://www.decem.com.au)
If you would like your company to become a corporate sponsor of Roc's development, please [DM Richard Feldman on Zulip](https://roc.zulipchat.com/#narrow/pm-with/281383-user281383)!
@ -39,7 +42,7 @@ We'd also like to express our gratitude to our generous [individual sponsors](ht
* [Lucas Rosa](https://github.com/rvcas)
* [Jonas Schell](https://github.com/Ocupe)
* [Christopher Dolan](https://github.com/cdolan)
* [Nick Gravgaard](https://github.com/nickgravgaard)
* [Nick Gravgaard](https://github.com/nick-gravgaard)
* [Zeljko Nesic](https://github.com/popara)
* [Shritesh Bhattarai](https://github.com/shritesh)
* [Richard Feldman](https://github.com/rtfeldman)

View file

@ -4,6 +4,9 @@
set -euxo pipefail
git clone https://github.com/roc-lang/basic-cli.git
cd basic-cli
git checkout $RELEASE_TAG
cd ..
if [ "$(uname -s)" == "Linux" ]; then
@ -13,7 +16,7 @@ if [ "$(uname -s)" == "Linux" ]; then
timeout 300s sudo apt-get install -y musl-tools
fi
cd basic-cli/src # we cd to install the target for the right rust version
cd basic-cli/platform # we cd to install the target for the right rust version
if [ "$(uname -m)" == "x86_64" ]; then
rustup target add x86_64-unknown-linux-musl
elif [ "$(uname -m)" == "aarch64" ]; then

View file

@ -3,15 +3,21 @@
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -euxo pipefail
cp target/release-with-lto/roc ./roc # to be able to delete "target" later
# this makes the binaries a lot smaller
strip ./target/release-with-lto/roc
strip ./target/release-with-lto/roc_ls
# to be able to delete "target" later
cp target/release-with-lto/roc ./roc
cp target/release-with-lto/roc_ls ./roc_lang_server
# delete unnecessary files and folders
git clean -fdx --exclude roc
git clean -fdx --exclude roc --exclude roc_lang_server
mkdir $1
mv roc LICENSE LEGAL_DETAILS $1
mv roc roc_lang_server LICENSE LEGAL_DETAILS $1
mkdir $1/examples
mv examples/helloWorld.roc examples/platform-switching examples/cli $1/examples

10
ci/roc_test_builtins.sh Executable file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -euxo pipefail
for file in crates/compiler/builtins/roc/*.roc; do
if grep -qE '^\s*expect' "$file"; then
cargo run --locked --release -- test "$file"
fi
done

23
ci/update_basic_cli_url.sh Executable file
View file

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -euxo pipefail
# Check if the correct number of arguments is given
if [ "$#" -ne 2 ]; then
echo "Usage: ./update_basic_cli_url.sh OLD_URL NEW_URL"
exit 1
fi
OLD_URL=$1
NEW_URL=$2
# Use git ls-files to list all files tracked by Git, excluding those in .gitignore
files=$(git ls-files)
# Use perl to replace OLD_URL with NEW_URL in the files
for file in $files; do
perl -pi -e "s|\Q$OLD_URL\E|$NEW_URL|g" $file
done
echo "Replaced all old basic-cli URLs with the new one."

View file

@ -50,7 +50,8 @@ pub const CMD_GLUE: &str = "glue";
pub const CMD_GEN_STUB_LIB: &str = "gen-stub-lib";
pub const CMD_PREPROCESS_HOST: &str = "preprocess-host";
pub const FLAG_DEBUG: &str = "debug";
pub const FLAG_EMIT_LLVM_IR: &str = "emit-llvm-ir";
pub const FLAG_PROFILING: &str = "profiling";
pub const FLAG_BUNDLE: &str = "bundle";
pub const FLAG_DEV: &str = "dev";
pub const FLAG_OPTIMIZE: &str = "optimize";
@ -102,9 +103,15 @@ pub fn build_app() -> Command {
.action(ArgAction::SetTrue)
.required(false);
let flag_debug = Arg::new(FLAG_DEBUG)
.long(FLAG_DEBUG)
.help("Store LLVM debug information in the generated program")
let flag_emit_llvm_ir = Arg::new(FLAG_EMIT_LLVM_IR)
.long(FLAG_EMIT_LLVM_IR)
.help("Emit a `.ll` file containing the LLVM IR of the program")
.action(ArgAction::SetTrue)
.required(false);
let flag_profiling = Arg::new(FLAG_PROFILING)
.long(FLAG_PROFILING)
.help("Keep debug info in the final generated program even in optmized builds")
.action(ArgAction::SetTrue)
.required(false);
@ -163,7 +170,8 @@ pub fn build_app() -> Command {
.arg(flag_max_threads.clone())
.arg(flag_opt_size.clone())
.arg(flag_dev.clone())
.arg(flag_debug.clone())
.arg(flag_emit_llvm_ir.clone())
.arg(flag_profiling.clone())
.arg(flag_time.clone())
.arg(flag_linker.clone())
.arg(flag_prebuilt.clone())
@ -212,7 +220,8 @@ pub fn build_app() -> Command {
.arg(flag_max_threads.clone())
.arg(flag_opt_size.clone())
.arg(flag_dev.clone())
.arg(flag_debug.clone())
.arg(flag_emit_llvm_ir.clone())
.arg(flag_profiling.clone())
.arg(flag_time.clone())
.arg(flag_linker.clone())
.arg(flag_prebuilt.clone())
@ -234,7 +243,8 @@ pub fn build_app() -> Command {
.arg(flag_max_threads.clone())
.arg(flag_opt_size.clone())
.arg(flag_dev.clone())
.arg(flag_debug.clone())
.arg(flag_emit_llvm_ir.clone())
.arg(flag_profiling.clone())
.arg(flag_time.clone())
.arg(flag_linker.clone())
.arg(flag_prebuilt.clone())
@ -247,7 +257,8 @@ pub fn build_app() -> Command {
.arg(flag_max_threads.clone())
.arg(flag_opt_size.clone())
.arg(flag_dev.clone())
.arg(flag_debug.clone())
.arg(flag_emit_llvm_ir.clone())
.arg(flag_profiling.clone())
.arg(flag_time.clone())
.arg(flag_linker.clone())
.arg(flag_prebuilt.clone())
@ -376,7 +387,8 @@ pub fn build_app() -> Command {
.arg(flag_max_threads)
.arg(flag_opt_size)
.arg(flag_dev)
.arg(flag_debug)
.arg(flag_emit_llvm_ir)
.arg(flag_profiling)
.arg(flag_time)
.arg(flag_linker)
.arg(flag_prebuilt)
@ -695,7 +707,13 @@ pub fn build(
CodeGenBackend::Llvm(backend_mode)
};
let emit_debug_info = matches.get_flag(FLAG_DEBUG);
let emit_llvm_ir = matches.get_flag(FLAG_EMIT_LLVM_IR);
if emit_llvm_ir && !matches!(code_gen_backend, CodeGenBackend::Llvm(_)) {
user_error!("Cannot emit llvm ir while using a dev backend.");
}
let emit_debug_info = matches.get_flag(FLAG_PROFILING)
|| matches!(opt_level, OptLevel::Development | OptLevel::Normal);
let emit_timings = matches.get_flag(FLAG_TIME);
let threading = match matches.get_one::<usize>(FLAG_MAX_THREADS) {
@ -744,6 +762,7 @@ pub fn build(
backend: code_gen_backend,
opt_level,
emit_debug_info,
emit_llvm_ir,
};
let load_config = standard_load_config(&triple, build_ordering, threading);

View file

@ -562,11 +562,12 @@ mod cli_run {
words : List Str
words = ["this", "will", "for", "sure", "be", "a", "large", "string", "so", "when", "we", "split", "it", "it", "will", "use", "seamless", "slices", "which", "affect", "printing"]
[#UserApp] 42
[#UserApp] "Fjoer en ferdjer frieten oan dyn geve lea"
[#UserApp] "abc"
[#UserApp] 10
[#UserApp] (A (B C))
[<ignored for tests>:22] x = 42
[<ignored for tests>:23] "Fjoer en ferdjer frieten oan dyn geve lea" = "Fjoer en ferdjer frieten oan dyn geve lea"
[<ignored for tests>:24] "this is line 24" = "this is line 24"
[<ignored for tests>:13] x = "abc"
[<ignored for tests>:13] x = 10
[<ignored for tests>:13] x = (A (B C))
Program finished!
"#
),
@ -873,7 +874,7 @@ mod cli_run {
This roc file can print it's own source code. The source is:
app "ingested-file"
packages { pf: "https://github.com/roc-lang/basic-cli/releases/download/0.7.0/bkGby8jb0tmZYsy2hg1E_B2QrCgcSTxdUlHtETwm5m4.tar.br" }
packages { pf: "https://github.com/roc-lang/basic-cli/releases/download/0.7.1/Icc3xJoIixF3hCcfXrDwLCu4wQHtNdPyoJkEbkgIElA.tar.br" }
imports [
pf.Stdout,
"ingested-file.roc" as ownCode : Str,
@ -900,7 +901,7 @@ mod cli_run {
&[],
&[],
&[],
"30461\n",
"162088\n",
UseValgrind::No,
TestCliCommands::Run,
)
@ -942,7 +943,7 @@ mod cli_run {
test_roc_app_slim(
"examples",
"inspect-logging.roc",
r#"{friends: [{2}, {2}, {0, 1}], people: [{age: 27, favoriteColor: Blue, firstName: "John", hasBeard: Bool.true, lastName: "Smith"}, {age: 47, favoriteColor: Green, firstName: "Debby", hasBeard: Bool.false, lastName: "Johnson"}, {age: 33, favoriteColor: (RGB (255, 255, 0)), firstName: "Jane", hasBeard: Bool.false, lastName: "Doe"}]}
r#"(@Community {friends: [{2}, {2}, {0, 1}], people: [(@Person {age: 27, favoriteColor: Blue, firstName: "John", hasBeard: Bool.true, lastName: "Smith"}), (@Person {age: 47, favoriteColor: Green, firstName: "Debby", hasBeard: Bool.false, lastName: "Johnson"}), (@Person {age: 33, favoriteColor: (RGB (255, 255, 0)), firstName: "Jane", hasBeard: Bool.false, lastName: "Doe"})]})
"#,
UseValgrind::Yes,
)

View file

@ -52,9 +52,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
extern fn kill(pid: c_int, sig: c_int) c_int;

View file

@ -52,9 +52,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
extern fn kill(pid: c_int, sig: c_int) c_int;

View file

@ -52,9 +52,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
extern fn kill(pid: c_int, sig: c_int) c_int;

View file

@ -66,9 +66,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {

View file

@ -63,9 +63,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {

View file

@ -68,9 +68,9 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {

View file

@ -21,6 +21,7 @@ main =
x = 42
dbg x
dbg "Fjoer en ferdjer frieten oan dyn geve lea"
dbg "this is line 24"
r = {x : polyDbg "abc", y: polyDbg 10u8, z : polyDbg (A (B C))}

View file

@ -52,10 +52,10 @@ export fn roc_panic(msg: *RocStr, tag_id: u32) callconv(.C) void {
std.process.exit(1);
}
export fn roc_dbg(loc: *RocStr, msg: *RocStr) callconv(.C) void {
export fn roc_dbg(loc: *RocStr, msg: *RocStr, src: *RocStr) callconv(.C) void {
// This platform uses stdout for testing purposes instead of the normal stderr.
const stdout = std.io.getStdOut().writer();
stdout.print("[{s}] {s}\n", .{ loc.asSlice(), msg.asSlice() }) catch unreachable;
stdout.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
export fn roc_memset(dst: [*]u8, value: i32, size: usize) callconv(.C) void {

View file

@ -85,6 +85,7 @@ pub struct CodeGenOptions {
pub backend: CodeGenBackend,
pub opt_level: OptLevel,
pub emit_debug_info: bool,
pub emit_llvm_ir: bool,
}
type GenFromMono<'a> = (CodeObject, CodeGenTiming, ExpectMetadata<'a>);
@ -101,6 +102,7 @@ pub fn gen_from_mono_module<'a>(
) -> GenFromMono<'a> {
let path = roc_file_path;
let debug = code_gen_options.emit_debug_info;
let emit_llvm_ir = code_gen_options.emit_llvm_ir;
let opt = code_gen_options.opt_level;
match code_gen_options.backend {
@ -120,15 +122,23 @@ pub fn gen_from_mono_module<'a>(
wasm_dev_stack_bytes,
backend_mode,
),
CodeGenBackend::Llvm(backend_mode) => {
gen_from_mono_module_llvm(arena, loaded, path, target, opt, backend_mode, debug)
}
CodeGenBackend::Llvm(backend_mode) => gen_from_mono_module_llvm(
arena,
loaded,
path,
target,
opt,
backend_mode,
debug,
emit_llvm_ir,
),
}
}
// TODO how should imported modules factor into this? What if those use builtins too?
// TODO this should probably use more helper functions
// TODO make this polymorphic in the llvm functions so it can be reused for another backend.
#[allow(clippy::too_many_arguments)]
fn gen_from_mono_module_llvm<'a>(
arena: &'a bumpalo::Bump,
loaded: MonomorphizedModule<'a>,
@ -137,6 +147,7 @@ fn gen_from_mono_module_llvm<'a>(
opt_level: OptLevel,
backend_mode: LlvmBackendMode,
emit_debug_info: bool,
emit_llvm_ir: bool,
) -> GenFromMono<'a> {
use crate::target::{self, convert_opt_level};
use inkwell::attributes::{Attribute, AttributeLoc};
@ -151,9 +162,6 @@ fn gen_from_mono_module_llvm<'a>(
let context = Context::create();
let module = arena.alloc(module_from_builtins(target, &context, "app"));
// strip Zig debug stuff
// module.strip_debug_info();
// mark our zig-defined builtins as internal
let app_ll_file = {
let mut temp = PathBuf::from(roc_file_path);
@ -245,8 +253,9 @@ fn gen_from_mono_module_llvm<'a>(
env.dibuilder.finalize();
// we don't use the debug info, and it causes weird errors.
module.strip_debug_info();
if !emit_debug_info {
module.strip_debug_info();
}
// Uncomment this to see the module's optimized LLVM instruction output:
// env.module.print_to_stderr();
@ -265,6 +274,11 @@ fn gen_from_mono_module_llvm<'a>(
);
}
if emit_llvm_ir {
eprintln!("Emitting LLVM IR to {}", &app_ll_file.display());
module.print_to_file(&app_ll_file).unwrap();
}
// Uncomment this to see the module's optimized LLVM instruction output:
// env.module.print_to_stderr();
@ -359,65 +373,6 @@ fn gen_from_mono_module_llvm<'a>(
assert!(bc_to_object.status.success(), "{bc_to_object:#?}");
MemoryBuffer::create_from_file(&app_o_file).expect("memory buffer creation works")
} else if emit_debug_info {
module.strip_debug_info();
let mut app_ll_dbg_file = PathBuf::from(roc_file_path);
app_ll_dbg_file.set_extension("dbg.ll");
let mut app_o_file = PathBuf::from(roc_file_path);
app_o_file.set_extension("o");
use std::process::Command;
// write the ll code to a file, so we can modify it
module.print_to_file(&app_ll_file).unwrap();
// run the debugir https://github.com/vaivaswatha/debugir tool
match Command::new("debugir")
.args(["-instnamer", app_ll_file.to_str().unwrap()])
.output()
{
Ok(_) => {}
Err(error) => {
use std::io::ErrorKind;
match error.kind() {
ErrorKind::NotFound => internal_error!(
r"I could not find the `debugir` tool on the PATH, install it from https://github.com/vaivaswatha/debugir"
),
_ => internal_error!("{:?}", error),
}
}
}
use target_lexicon::Architecture;
match target.architecture {
Architecture::X86_64
| Architecture::X86_32(_)
| Architecture::Aarch64(_)
| Architecture::Wasm32 => {
// write the .o file. Note that this builds the .o for the local machine,
// and ignores the `target_machine` entirely.
//
// different systems name this executable differently, so we shotgun for
// the most common ones and then give up.
let ll_to_object = Command::new("llc")
.args([
"-relocation-model=pic",
"-filetype=obj",
app_ll_dbg_file.to_str().unwrap(),
"-o",
app_o_file.to_str().unwrap(),
])
.output()
.unwrap();
assert!(ll_to_object.stderr.is_empty(), "{ll_to_object:#?}");
}
_ => unreachable!(),
}
MemoryBuffer::create_from_file(&app_o_file).expect("memory buffer creation works")
} else {
// Emit the .o file
@ -1326,6 +1281,7 @@ pub fn build_str_test<'a>(
backend: CodeGenBackend::Llvm(LlvmBackendMode::Binary),
opt_level: OptLevel::Normal,
emit_debug_info: false,
emit_llvm_ir: false,
};
let emit_timings = false;

View file

@ -11,7 +11,7 @@ fn roc_alloc(_: usize, _: u32) callconv(.C) ?*anyopaque {
fn roc_panic(_: *anyopaque, _: u32) callconv(.C) void {
@panic("Not needed for dec benchmark");
}
fn roc_dbg(_: *anyopaque, _: *anyopaque) callconv(.C) void {
fn roc_dbg(_: *anyopaque, _: *anyopaque, _: *anyopaque) callconv(.C) void {
@panic("Not needed for dec benchmark");
}

View file

@ -7,7 +7,7 @@ const CrossTarget = std.zig.CrossTarget;
const Arch = std.Target.Cpu.Arch;
pub fn build(b: *Build) void {
// const mode = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseFast });
// const mode = b.standardOptimizeOption(.{ .preferred_optimize_mode = .Debug });
const mode = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseFast });
// Options
@ -58,6 +58,9 @@ fn generateLlvmIrFile(
) void {
const obj = b.addObject(.{ .name = object_name, .root_source_file = main_path, .optimize = mode, .target = target, .use_llvm = true });
obj.strip = true;
obj.disable_stack_probing = true;
if (target.cpu_arch != .wasm32)
obj.bundle_compiler_rt = true;
// Generating the bin seems required to get zig to generate the llvm ir.
_ = obj.getEmittedBin();
@ -89,6 +92,9 @@ fn generateObjectFile(
obj.strip = true;
obj.link_function_sections = true;
obj.force_pic = true;
obj.disable_stack_probing = true;
if (target.cpu_arch != .wasm32)
obj.bundle_compiler_rt = true;
const obj_file = obj.getEmittedBin();
@ -110,7 +116,7 @@ fn makeLinux32Target() CrossTarget {
target.cpu_arch = std.Target.Cpu.Arch.x86;
target.os_tag = std.Target.Os.Tag.linux;
target.abi = std.Target.Abi.musl;
target.abi = std.Target.Abi.none;
return target;
}
@ -120,7 +126,7 @@ fn makeLinuxAarch64Target() CrossTarget {
target.cpu_arch = std.Target.Cpu.Arch.aarch64;
target.os_tag = std.Target.Os.Tag.linux;
target.abi = std.Target.Abi.musl;
target.abi = std.Target.Abi.none;
return target;
}
@ -130,7 +136,7 @@ fn makeLinuxX64Target() CrossTarget {
target.cpu_arch = std.Target.Cpu.Arch.x86_64;
target.os_tag = std.Target.Os.Tag.linux;
target.abi = std.Target.Abi.musl;
target.abi = std.Target.Abi.none;
return target;
}
@ -140,7 +146,7 @@ fn makeWindows64Target() CrossTarget {
target.cpu_arch = std.Target.Cpu.Arch.x86_64;
target.os_tag = std.Target.Os.Tag.windows;
target.abi = std.Target.Abi.gnu;
target.abi = std.Target.Abi.none;
return target;
}

View file

@ -1,478 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const math = std.math;
// Eventually, we need to statically ingest compiler-rt and get it working with the surgical linker, then these should not be needed anymore.
// Until then, we are manually ingesting used parts of compiler-rt here.
//
// Taken from
// https://github.com/ziglang/zig/tree/4976b58ab16069f8d3267b69ed030f29685c1abe/lib/compiler_rt/
// Thank you Zig Contributors!
// Libcalls that involve u128 on Windows x86-64 are expected by LLVM to use the
// calling convention of @Vector(2, u64), rather than what's standard.
pub const want_windows_v2u64_abi = builtin.os.tag == .windows and builtin.cpu.arch == .x86_64 and @import("builtin").object_format != .c;
const v2u64 = @Vector(2, u64);
// Export it as weak incase it is already linked in by something else.
comptime {
if (!want_windows_v2u64_abi) {
@export(__muloti4, .{ .name = "__muloti4", .linkage = .Weak });
@export(__lshrti3, .{ .name = "__lshrti3", .linkage = .Weak });
@export(__divti3, .{ .name = "__divti3", .linkage = .Weak });
@export(__modti3, .{ .name = "__modti3", .linkage = .Weak });
@export(__umodti3, .{ .name = "__umodti3", .linkage = .Weak });
@export(__udivti3, .{ .name = "__udivti3", .linkage = .Weak });
@export(__fixdfti, .{ .name = "__fixdfti", .linkage = .Weak });
@export(__fixsfti, .{ .name = "__fixsfti", .linkage = .Weak });
@export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = .Weak });
@export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = .Weak });
}
}
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
return muloXi4_genericFast(i128, a, b, overflow);
} else {
return muloXi4_genericSmall(i128, a, b, overflow);
}
}
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
return div(a, b);
}
fn __divti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(div(@as(i128, @bitCast(a)), @as(i128, @bitCast(b)))));
}
inline fn div(a: i128, b: i128) i128 {
const s_a = a >> (128 - 1);
const s_b = b >> (128 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;
const r = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), null);
const s = s_a ^ s_b;
return (@as(i128, @bitCast(r)) ^ s) -% s;
}
pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 {
return udivmod(u128, a, b, null);
}
fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), null)));
}
pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 {
var r: u128 = undefined;
_ = udivmod(u128, a, b, &r);
return r;
}
fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
var r: u128 = undefined;
_ = udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), &r);
return @as(v2u64, @bitCast(r));
}
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
return mod(a, b);
}
fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(mod(@as(i128, @bitCast(a)), @as(i128, @bitCast(b)))));
}
inline fn mod(a: i128, b: i128) i128 {
const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
const an = (a ^ s_a) -% s_a; // negate if s == -1
const bn = (b ^ s_b) -% s_b; // negate if s == -1
var r: u128 = undefined;
_ = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), &r);
return (@as(i128, @bitCast(r)) ^ s_a) -% s_a; // negate if s == -1
}
pub fn __fixdfti(a: f64) callconv(.C) i128 {
return floatToInt(i128, a);
}
fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(floatToInt(i128, a)));
}
pub fn __fixsfti(a: f32) callconv(.C) i128 {
return floatToInt(i128, a);
}
fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(floatToInt(i128, a)));
}
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
return floatToInt(u128, a);
}
fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(floatToInt(u128, a)));
}
pub fn __fixunssfti(a: f32) callconv(.C) u128 {
return floatToInt(u128, a);
}
fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
return @as(v2u64, @bitCast(floatToInt(u128, a)));
}
// mulo - multiplication overflow
// * return a*%b.
// * return if a*b overflows => 1 else => 0
// - muloXi4_genericSmall as default
// - muloXi4_genericFast for 2*bitsize <= usize
inline fn muloXi4_genericSmall(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
overflow.* = 0;
const min = math.minInt(ST);
var res: ST = a *% b;
// Hacker's Delight section Overflow subsection Multiplication
// case a=-2^{31}, b=-1 problem, because
// on some machines a*b = -2^{31} with overflow
// Then -2^{31}/-1 overflows and any result is possible.
// => check with a<0 and b=-2^{31}
if ((a < 0 and b == min) or (a != 0 and @divTrunc(res, a) != b))
overflow.* = 1;
return res;
}
inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
overflow.* = 0;
const EST = switch (ST) {
i32 => i64,
i64 => i128,
i128 => i256,
else => unreachable,
};
const min = math.minInt(ST);
const max = math.maxInt(ST);
var res: EST = @as(EST, a) * @as(EST, b);
//invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1}
if (res < min or max < res)
overflow.* = 1;
return @as(ST, @truncate(res));
}
const native_endian = builtin.cpu.arch.endian();
const low = switch (native_endian) {
.Big => 1,
.Little => 0,
};
const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
// @setRuntimeSafety(builtin.is_test);
const double_int_bits = @typeInfo(DoubleInt).Int.bits;
const single_int_bits = @divExact(double_int_bits, 2);
const SingleInt = std.meta.Int(.unsigned, single_int_bits);
const SignedDoubleInt = std.meta.Int(.signed, double_int_bits);
const Log2SingleInt = std.math.Log2Int(SingleInt);
const n = @as([2]SingleInt, @bitCast(a));
const d = @as([2]SingleInt, @bitCast(b));
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
// special cases, X is unknown, K != 0
if (n[high] == 0) {
if (d[high] == 0) {
// 0 X
// ---
// 0 X
if (maybe_rem) |rem| {
rem.* = n[low] % d[low];
}
return n[low] / d[low];
}
// 0 X
// ---
// K X
if (maybe_rem) |rem| {
rem.* = n[low];
}
return 0;
}
// n[high] != 0
if (d[low] == 0) {
if (d[high] == 0) {
// K X
// ---
// 0 0
if (maybe_rem) |rem| {
rem.* = n[high] % d[low];
}
return n[high] / d[low];
}
// d[high] != 0
if (n[low] == 0) {
// K 0
// ---
// K 0
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
rem.* = @as(DoubleInt, @bitCast(r));
}
return n[high] / d[high];
}
// K K
// ---
// K 0
if ((d[high] & (d[high] - 1)) == 0) {
// d is a power of 2
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
rem.* = @as(DoubleInt, @bitCast(r));
}
return n[high] >> @as(Log2SingleInt, @intCast(@ctz(d[high])));
}
// K K
// ---
// K 0
sr = @as(c_uint, @bitCast(@as(c_int, @clz(d[high])) - @as(c_int, @clz(n[high]))));
// 0 <= sr <= single_int_bits - 2 or sr large
if (sr > single_int_bits - 2) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= single_int_bits - 1
// q.all = a << (double_int_bits - sr);
q[low] = 0;
q[high] = n[low] << @as(Log2SingleInt, @intCast(single_int_bits - sr));
// r.all = a >> sr;
r[high] = n[high] >> @as(Log2SingleInt, @intCast(sr));
r[low] = (n[high] << @as(Log2SingleInt, @intCast(single_int_bits - sr))) | (n[low] >> @as(Log2SingleInt, @intCast(sr)));
} else {
// d[low] != 0
if (d[high] == 0) {
// K X
// ---
// 0 K
if ((d[low] & (d[low] - 1)) == 0) {
// d is a power of 2
if (maybe_rem) |rem| {
rem.* = n[low] & (d[low] - 1);
}
if (d[low] == 1) {
return a;
}
sr = @ctz(d[low]);
q[high] = n[high] >> @as(Log2SingleInt, @intCast(sr));
q[low] = (n[high] << @as(Log2SingleInt, @intCast(single_int_bits - sr))) | (n[low] >> @as(Log2SingleInt, @intCast(sr)));
return @as(DoubleInt, @bitCast(q));
}
// K X
// ---
// 0 K
sr = 1 + single_int_bits + @as(c_uint, @clz(d[low])) - @as(c_uint, @clz(n[high]));
// 2 <= sr <= double_int_bits - 1
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
if (sr == single_int_bits) {
q[low] = 0;
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else if (sr < single_int_bits) {
// 2 <= sr <= single_int_bits - 1
q[low] = 0;
q[high] = n[low] << @as(Log2SingleInt, @intCast(single_int_bits - sr));
r[high] = n[high] >> @as(Log2SingleInt, @intCast(sr));
r[low] = (n[high] << @as(Log2SingleInt, @intCast(single_int_bits - sr))) | (n[low] >> @as(Log2SingleInt, @intCast(sr)));
} else {
// single_int_bits + 1 <= sr <= double_int_bits - 1
q[low] = n[low] << @as(Log2SingleInt, @intCast(double_int_bits - sr));
q[high] = (n[high] << @as(Log2SingleInt, @intCast(double_int_bits - sr))) | (n[low] >> @as(Log2SingleInt, @intCast(sr - single_int_bits)));
r[high] = 0;
r[low] = n[high] >> @as(Log2SingleInt, @intCast(sr - single_int_bits));
}
} else {
// K X
// ---
// K K
sr = @as(c_uint, @bitCast(@as(c_int, @clz(d[high])) - @as(c_int, @clz(n[high]))));
// 0 <= sr <= single_int_bits - 1 or sr large
if (sr > single_int_bits - 1) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= single_int_bits
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
q[low] = 0;
if (sr == single_int_bits) {
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else {
r[high] = n[high] >> @as(Log2SingleInt, @intCast(sr));
r[low] = (n[high] << @as(Log2SingleInt, @intCast(single_int_bits - sr))) | (n[low] >> @as(Log2SingleInt, @intCast(sr)));
q[high] = n[low] << @as(Log2SingleInt, @intCast(single_int_bits - sr));
}
}
}
// Not a special case
// q and r are initialized with:
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
// 1 <= sr <= double_int_bits - 1
var carry: u32 = 0;
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1));
r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1));
q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1));
q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
// {
// r.all -= b;
// carry = 1;
// }
r_all = @as(DoubleInt, @bitCast(r));
const s: SignedDoubleInt = @as(SignedDoubleInt, @bitCast(b -% r_all -% 1)) >> (double_int_bits - 1);
carry = @as(u32, @intCast(s & 1));
r_all -= b & @as(DoubleInt, @bitCast(s));
r = @as([2]SingleInt, @bitCast(r_all));
}
const q_all = (@as(DoubleInt, @bitCast(q)) << 1) | carry;
if (maybe_rem) |rem| {
rem.* = r_all;
}
return q_all;
}
pub inline fn floatToInt(comptime I: type, a: anytype) I {
const Log2Int = math.Log2Int;
const Int = @import("std").meta.Int;
const F = @TypeOf(a);
const float_bits = @typeInfo(F).Float.bits;
const int_bits = @typeInfo(I).Int.bits;
const rep_t = Int(.unsigned, float_bits);
const sig_bits = math.floatMantissaBits(F);
const exp_bits = math.floatExponentBits(F);
const fractional_bits = floatFractionalBits(F);
// const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
const implicit_bit = @as(rep_t, 1) << sig_bits;
const max_exp = (1 << (exp_bits - 1));
const exp_bias = max_exp - 1;
const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
// Break a into sign, exponent, significand
const a_rep: rep_t = @as(rep_t, @bitCast(a));
const negative = (a_rep >> (float_bits - 1)) != 0;
const exponent = @as(i32, @intCast((a_rep << 1) >> (sig_bits + 1))) - exp_bias;
const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
// If the exponent is negative, the result rounds to zero.
if (exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
switch (@typeInfo(I).Int.signedness) {
.unsigned => {
if (negative) return 0;
if (@as(c_uint, @intCast(exponent)) >= @min(int_bits, max_exp)) return math.maxInt(I);
},
.signed => if (@as(c_uint, @intCast(exponent)) >= @min(int_bits - 1, max_exp)) {
return if (negative) math.minInt(I) else math.maxInt(I);
},
}
// If 0 <= exponent < sig_bits, right shift to get the result.
// Otherwise, shift left.
var result: I = undefined;
if (exponent < fractional_bits) {
result = @as(I, @intCast(significand >> @as(Log2Int(rep_t), @intCast(fractional_bits - exponent))));
} else {
result = @as(I, @intCast(significand)) << @as(Log2Int(I), @intCast(exponent - fractional_bits));
}
if ((@typeInfo(I).Int.signedness == .signed) and negative)
return ~result +% 1;
return result;
}
/// Returns the number of fractional bits in the mantissa of floating point type T.
pub inline fn floatFractionalBits(comptime T: type) comptime_int {
comptime std.debug.assert(@typeInfo(T) == .Float);
// standard IEEE floats have an implicit 0.m or 1.m integer part
// f80 is special and has an explicitly stored bit in the MSB
// this function corresponds to `MANT_DIG - 1' from C
return switch (@typeInfo(T).Float.bits) {
16 => 10,
32 => 23,
64 => 52,
80 => 63,
128 => 112,
else => @compileError("unknown floating point type " ++ @typeName(T)),
};
}
pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
return lshrXi3(i128, a, b);
}
// Logical shift right: shift in 0 from left to right
// Precondition: 0 <= b < T.bit_count
inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const word_t = HalveInt(T, false);
const S = std.math.Log2Int(word_t.HalfT);
const input = word_t{ .all = a };
var output: word_t = undefined;
if (b >= word_t.bits) {
output.s.high = 0;
output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits));
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @as(S, @intCast(b));
output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b));
output.s.low |= input.s.low >> @as(S, @intCast(b));
}
return output.all;
}
/// Allows to access underlying bits as two equally sized lower and higher
/// signed or unsigned integers.
fn HalveInt(comptime T: type, comptime signed_half: bool) type {
return extern union {
pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
pub const HalfTU = std.meta.Int(.unsigned, bits);
pub const HalfTS = std.meta.Int(.signed, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
extern struct { low: HalfT, high: HalfT }
else
extern struct { high: HalfT, low: HalfT },
};
}

View file

@ -0,0 +1,11 @@
const std = @import("std");
const builtin = @import("builtin");
const RocStr = @import("str.zig").RocStr;
// An optional debug impl to be called during `roc test`
pub fn dbg_impl(loc: *const RocStr, msg: *const RocStr, src: *const RocStr) callconv(.C) void {
if (builtin.target.cpu.arch != .wasm32) {
const stderr = std.io.getStdErr().writer();
stderr.print("[{s}] {s} = {s}\n", .{ loc.asSlice(), src.asSlice(), msg.asSlice() }) catch unreachable;
}
}

View file

@ -252,7 +252,6 @@ pub const RocDec = extern struct {
if (answer.has_overflowed) {
roc_panic("Decimal addition overflowed!", 0);
unreachable;
} else {
return answer.value;
}
@ -283,7 +282,6 @@ pub const RocDec = extern struct {
if (answer.has_overflowed) {
roc_panic("Decimal subtraction overflowed!", 0);
unreachable;
} else {
return answer.value;
}
@ -347,7 +345,6 @@ pub const RocDec = extern struct {
if (answer.has_overflowed) {
roc_panic("Decimal multiplication overflowed!", 0);
unreachable;
} else {
return answer.value;
}
@ -369,7 +366,7 @@ pub const RocDec = extern struct {
// (n / 0) is an error
if (denominator_i128 == 0) {
@panic("TODO runtime exception for dividing by 0!");
roc_panic("Decimal division by 0!", 0);
}
// If they're both negative, or if neither is negative, the final answer
@ -397,7 +394,7 @@ pub const RocDec = extern struct {
if (denominator_i128 == one_point_zero_i128) {
return self;
} else {
@panic("TODO runtime exception for overflow when dividing!");
roc_panic("Decimal division overflow in numerator!", 0);
}
};
const numerator_u128 = @as(u128, @intCast(numerator_abs_i128));
@ -410,7 +407,7 @@ pub const RocDec = extern struct {
if (numerator_i128 == one_point_zero_i128) {
return other;
} else {
@panic("TODO runtime exception for overflow when dividing!");
roc_panic("Decimal division overflow in denominator!", 0);
}
};
const denominator_u128 = @as(u128, @intCast(denominator_abs_i128));
@ -422,7 +419,7 @@ pub const RocDec = extern struct {
if (answer.hi == 0 and answer.lo <= math.maxInt(i128)) {
unsigned_answer = @as(i128, @intCast(answer.lo));
} else {
@panic("TODO runtime exception for overflow when dividing!");
roc_panic("Decimal division overflow!", 0);
}
return RocDec{ .num = if (is_answer_negative) -unsigned_answer else unsigned_answer };
@ -636,7 +633,7 @@ fn mul_and_decimalize(a: u128, b: u128) i128 {
const d = answer[0];
if (overflowed == 1) {
@panic("TODO runtime exception for overflow!");
roc_panic("Decimal multiplication overflow!", 0);
}
// Final 512bit value is d, c, b, a
@ -1216,7 +1213,7 @@ pub fn fromF64C(arg: f64) callconv(.C) i128 {
if (@call(.always_inline, RocDec.fromF64, .{arg})) |dec| {
return dec.num;
} else {
@panic("TODO runtime exception failing convert f64 to RocDec");
roc_panic("Decimal conversion from f64 failed!", 0);
}
}
@ -1225,7 +1222,7 @@ pub fn fromF32C(arg_f32: f32) callconv(.C) i128 {
if (@call(.always_inline, RocDec.fromF64, .{arg_f64})) |dec| {
return dec.num;
} else {
@panic("TODO runtime exception failing convert f64 to RocDec");
roc_panic("Decimal conversion from f32!", 0);
}
}
@ -1240,7 +1237,7 @@ pub fn exportFromInt(comptime T: type, comptime name: []const u8) void {
const answer = @mulWithOverflow(this, RocDec.one_point_zero_i128);
if (answer[1] == 1) {
@panic("TODO runtime exception failing convert integer to RocDec");
roc_panic("Decimal conversion from Integer failed!", 0);
} else {
return answer[0];
}
@ -1266,11 +1263,15 @@ pub fn neqC(arg1: RocDec, arg2: RocDec) callconv(.C) bool {
}
pub fn negateC(arg: RocDec) callconv(.C) i128 {
return if (@call(.always_inline, RocDec.negate, .{arg})) |dec| dec.num else @panic("TODO overflow for negating RocDec");
return if (@call(.always_inline, RocDec.negate, .{arg})) |dec| dec.num else {
roc_panic("Decimal negation overflow!", 0);
};
}
pub fn absC(arg: RocDec) callconv(.C) i128 {
const result = @call(.always_inline, RocDec.abs, .{arg}) catch @panic("TODO overflow for calling absolute value on RocDec");
const result = @call(.always_inline, RocDec.abs, .{arg}) catch {
roc_panic("Decimal absolute value overflow!", 0);
};
return result.num;
}

View file

@ -1,87 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const musl = @import("libc/musl.zig");
const folly = @import("libc/folly.zig");
const cpuid = @import("libc/cpuid.zig");
comptime {
// TODO: remove this workaround.
// Our wasm llvm pipeline always links in memcpy.
// As such, our impl will conflict.
if (builtin.is_test) {
// We don't need memcpy for tests because the tests are built with -lc
} else if (arch != .wasm32) {
@export(memcpy, .{ .name = "memcpy", .linkage = .Strong });
}
}
const Memcpy = *const fn (noalias [*]u8, noalias [*]const u8, len: usize) callconv(.C) [*]u8;
pub var memcpy_target: Memcpy = switch (arch) {
.x86_64 => dispatch_memcpy,
else => unreachable,
};
pub fn memcpy(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.C) [*]u8 {
switch (builtin.os.tag) {
.windows => {
return musl.memcpy(dest, src, len);
},
else => switch (arch) {
// x86_64 has a special optimized memcpy that can use avx2.
.x86_64 => {
return memcpy_target(dest, src, len);
},
else => {
return musl.memcpy(dest, src, len);
},
},
}
}
const MemcpyDecision = enum {
uninitialized,
folly_prefetchw,
folly_prefetcht0,
musl,
};
var memcpy_decision: MemcpyDecision = .uninitialized;
fn dispatch_memcpy(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.C) [*]u8 {
switch (arch) {
.x86_64 => {
// TODO: Switch this to overwrite the memcpy_target pointer once the surgical linker can support it.
// Then dispatch will just happen on the first call instead of every call.
// if (cpuid.supports_avx2()) {
// if (cpuid.supports_prefetchw()) {
// memcpy_target = folly.memcpy_prefetchw;
// } else {
// memcpy_target = folly.memcpy_prefetcht0;
// }
// } else {
// memcpy_target = musl.memcpy;
// }
// return memcpy_target(dest, src, len);
switch (memcpy_decision) {
.uninitialized => {
if (cpuid.supports_avx2()) {
if (cpuid.supports_prefetchw()) {
memcpy_decision = .folly_prefetchw;
} else {
memcpy_decision = .folly_prefetcht0;
}
} else {
memcpy_decision = .musl;
}
return dispatch_memcpy(dest, src, len);
},
.folly_prefetchw => return folly.memcpy_prefetchw(dest, src, len),
.folly_prefetcht0 => return folly.memcpy_prefetcht0(dest, src, len),
.musl => return musl.memcpy(dest, src, len),
}
},
else => unreachable,
}
}

View file

@ -1,7 +0,0 @@
const builtin = @import("builtin");
const os = builtin.os;
pub const function_prefix = switch (os.tag) {
.macos => "_",
else => "",
};

View file

@ -1,53 +0,0 @@
// Check if AVX2 is supported.
// Returns 1 if AVX2 is supported, 0 otherwise.
.global {[function_prefix]s}supports_avx2;
{[function_prefix]s}supports_avx2:
// Save the EBX register.
push %rbx
// Call the CPUID instruction with the EAX register set to 7 and ECX set to 0.
// This will get the CPUID information for the current CPU.
mov $7, %eax
mov $0, %ecx
cpuid
// The AVX2 feature flag is located in the EBX register at bit 5.
bt $5, %ebx
jc .avx2_supported
// AVX2 is not supported.
pop %rbx
mov $0, %eax
ret
.avx2_supported:
pop %rbx
mov $1, %eax
ret
// Check if prefetchw is supported.
// Returns 1 if the prefetchw instruction is supported, 0 otherwise.
.global {[function_prefix]s}supports_prefetchw;
{[function_prefix]s}supports_prefetchw:
// Save the EBX register.
push %rbx
// Call the CPUID instruction with the EAX register set to 0x80000001 and ECX set to 0.
// This will get the CPUID information for the current CPU.
mov $0x80000001, %eax
mov $0, %ecx
cpuid
// The prefetchw feature flag is located in the ECX register at bit 8.
bt $8, %ecx
jc .prefetchw_supported
// AVX2 is not supported.
pop %rbx
mov $0, %eax
ret
.prefetchw_supported:
pop %rbx
mov $1, %eax
ret

View file

@ -1,18 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const function_prefix = @import("assembly_util.zig").function_prefix;
// I couldn't manage to define this in a PIE friendly way with inline assembly.
// Instead, I am defining it as global assembly functions.
comptime {
switch (arch) {
.x86_64 => {
asm (std.fmt.comptimePrint(@embedFile("cpuid.S"), .{ .function_prefix = function_prefix }));
},
else => unreachable,
}
}
pub extern fn supports_avx2() bool;
pub extern fn supports_prefetchw() bool;

View file

@ -1,2 +0,0 @@
pub const memcpy_prefetchw = @import("folly/memcpy.zig").__folly_memcpy_prefetchw;
pub const memcpy_prefetcht0 = @import("folly/memcpy.zig").__folly_memcpy_prefetcht0;

View file

@ -1,437 +0,0 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* __folly_memcpy: An optimized memcpy implementation that uses prefetch and
* AVX2 instructions.
*
* This implementation of memcpy acts as a memmove: while overlapping copies
* are undefined in memcpy, in some implementations they're the same function and
* legacy programs rely on this behavior.
*
* This implementation uses prefetch to avoid dtlb misses. This can
* substantially reduce dtlb store misses in cases where the destination
* location is absent from L1 cache and where the copy size is small enough
* that the hardware prefetcher doesn't have a large impact.
*
* The number of branches is limited by the use of overlapping loads & stores.
* This helps with copies where the source and destination cache lines are already
* present in L1 because there are fewer instructions to execute and fewer
* branches to potentially mispredict.
* e.g. to copy the last 4 <= n <= 7 bytes: copy the first & last 4 bytes (overlapped):
* movl (%rsi), %r8d
* movl -4(%rsi,%rdx), %r9d
* movl %r8d, (%rdi)
* movl %r9d, -4(%rdi,%rdx)
*
*
* For sizes up to 256 all source data is first read into registers and then written:
* - n <= 16: overlapping movs
* - n <= 32: overlapping unaligned 16-byte SSE XMM load/stores
* - n <= 256: overlapping unaligned 32-byte AVX YMM load/stores
*
* Large copies (> 256 bytes) use unaligned loads + aligned stores.
* This is observed to always be faster than rep movsb, so the rep movsb
* instruction is not used.
* - The head & tail may be unaligned => they're always written using unaligned stores.
*
* If the copy size is humongous (> 32 KiB) and the source and destination are both
* aligned, this memcpy will use non-temporal operations (AVX2). This can have
* a substantial speedup for copies where data is absent from L1, but it
* is significantly slower if the source and destination data were already
* in L1. The use of non-temporal operations also has the effect that after
* the copy is complete, the data will be moved out of L1, even if the data was
* present before the copy started.
*
* For n > 256 and overlapping src & dst buffers (memmove):
* - use unaligned loads + aligned stores, but not non-temporal stores
* - for dst < src forward copy in 128 byte batches:
* - unaligned load the first 32 bytes & last 4 x 32 bytes
* - forward copy (unaligned load + aligned stores) 4 x 32 bytes at a time
* - unaligned store the first 32 bytes & last 4 x 32 bytes
* - for dst > src backward copy in 128 byte batches:
* - unaligned load the first 4 x 32 bytes & last 32 bytes
* - backward copy (unaligned load + aligned stores) 4 x 32 bytes at a time
* - unaligned store the first 4 x 32 bytes & last 32 bytes
*
* @author Logan Evans <lpe@fb.com>
*/
// .type {[function_prefix]s}__folly_memcpy_short_{[prefetch]s}, @function not supported by windows
{[function_prefix]s}__folly_memcpy_short_{[prefetch]s}:
.cfi_startproc
.L_GE1_LE7_{[prefetch]s}:
cmp $1, %rdx
je .L_EQ1_{[prefetch]s}
cmp $4, %rdx
jae .L_GE4_LE7_{[prefetch]s}
.L_GE2_LE3_{[prefetch]s}:
movw (%rsi), %r8w
movw -2(%rsi,%rdx), %r9w
movw %r8w, (%rdi)
movw %r9w, -2(%rdi,%rdx)
ret
.balign 2
.L_EQ1_{[prefetch]s}:
movb (%rsi), %r8b
movb %r8b, (%rdi)
ret
// Aligning the target of a jump to an even address has a measurable
// speedup in microbenchmarks.
.balign 2
.L_GE4_LE7_{[prefetch]s}:
movl (%rsi), %r8d
movl -4(%rsi,%rdx), %r9d
movl %r8d, (%rdi)
movl %r9d, -4(%rdi,%rdx)
ret
.cfi_endproc
// .size {[function_prefix]s}__folly_memcpy_short_{[prefetch]s}, .-{[function_prefix]s}__folly_memcpy_short_{[prefetch]s} not supported by windows
// memcpy is an alternative entrypoint into the function named __folly_memcpy.
// The compiler is able to call memcpy since the name is global while
// stacktraces will show __folly_memcpy since that is the name of the function.
// This is intended to aid in debugging by making it obvious which version of
// memcpy is being used.
.balign 64
.globl {[function_prefix]s}__folly_memcpy_{[prefetch]s}
// .type {[function_prefix]s}__folly_memcpy_{[prefetch]s}, @function not supported by windows
{[function_prefix]s}__folly_memcpy_{[prefetch]s}:
.cfi_startproc
mov %rdi, %rax // return: $rdi
test %rdx, %rdx
je .L_EQ0_{[prefetch]s}
{[prefetch]s} (%rdi)
{[prefetch]s} -1(%rdi,%rdx)
cmp $8, %rdx
jb .L_GE1_LE7_{[prefetch]s}
.L_GE8_{[prefetch]s}:
cmp $32, %rdx
ja .L_GE33_{[prefetch]s}
.L_GE8_LE32_{[prefetch]s}:
cmp $16, %rdx
ja .L_GE17_LE32_{[prefetch]s}
.L_GE8_LE16_{[prefetch]s}:
mov (%rsi), %r8
mov -8(%rsi,%rdx), %r9
mov %r8, (%rdi)
mov %r9, -8(%rdi,%rdx)
.L_EQ0_{[prefetch]s}:
ret
.balign 2
.L_GE17_LE32_{[prefetch]s}:
movdqu (%rsi), %xmm0
movdqu -16(%rsi,%rdx), %xmm1
movdqu %xmm0, (%rdi)
movdqu %xmm1, -16(%rdi,%rdx)
ret
.balign 2
.L_GE193_LE256_{[prefetch]s}:
vmovdqu %ymm3, 96(%rdi)
vmovdqu %ymm4, -128(%rdi,%rdx)
.L_GE129_LE192_{[prefetch]s}:
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm5, -96(%rdi,%rdx)
.L_GE65_LE128_{[prefetch]s}:
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm6, -64(%rdi,%rdx)
.L_GE33_LE64_{[prefetch]s}:
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm7, -32(%rdi,%rdx)
vzeroupper
ret
.balign 2
.L_GE33_{[prefetch]s}:
vmovdqu (%rsi), %ymm0
vmovdqu -32(%rsi,%rdx), %ymm7
cmp $64, %rdx
jbe .L_GE33_LE64_{[prefetch]s}
{[prefetch]s} 64(%rdi)
vmovdqu 32(%rsi), %ymm1
vmovdqu -64(%rsi,%rdx), %ymm6
cmp $128, %rdx
jbe .L_GE65_LE128_{[prefetch]s}
{[prefetch]s} 128(%rdi)
vmovdqu 64(%rsi), %ymm2
vmovdqu -96(%rsi,%rdx), %ymm5
cmp $192, %rdx
jbe .L_GE129_LE192_{[prefetch]s}
{[prefetch]s} 192(%rdi)
vmovdqu 96(%rsi), %ymm3
vmovdqu -128(%rsi,%rdx), %ymm4
cmp $256, %rdx
jbe .L_GE193_LE256_{[prefetch]s}
.L_GE257_{[prefetch]s}:
{[prefetch]s} 256(%rdi)
// Check if there is an overlap. If there is an overlap then the caller
// has a bug since this is undefined behavior. However, for legacy
// reasons this behavior is expected by some callers.
//
// All copies through 256 bytes will operate as a memmove since for
// those sizes all reads are performed before any writes.
//
// This check uses the idea that there is an overlap if
// (%rdi < (%rsi + %rdx)) && (%rsi < (%rdi + %rdx)),
// or equivalently, there is no overlap if
// ((%rsi + %rdx) <= %rdi) || ((%rdi + %rdx) <= %rsi).
//
// %r9 will be used after .L_ALIGNED_DST_LOOP to calculate how many
// bytes remain to be copied.
// (%rsi + %rdx <= %rdi) => no overlap
lea (%rsi,%rdx), %r9
cmp %rdi, %r9
jbe .L_NO_OVERLAP_{[prefetch]s}
// (%rdi + %rdx <= %rsi) => no overlap
lea (%rdi,%rdx), %r8
cmp %rsi, %r8
// If no info is available in branch predictor's cache, Intel CPUs assume
// forward jumps are not taken. Use a forward jump as overlapping buffers
// are unlikely.
ja .L_OVERLAP_{[prefetch]s}
.balign 2
.L_NO_OVERLAP_{[prefetch]s}:
vmovdqu %ymm0, (%rdi)
vmovdqu %ymm1, 32(%rdi)
vmovdqu %ymm2, 64(%rdi)
vmovdqu %ymm3, 96(%rdi)
// Align %rdi to a 32 byte boundary.
// %rcx = 128 - 31 & %rdi
mov $128, %rcx
and $31, %rdi
sub %rdi, %rcx
lea (%rsi,%rcx), %rsi
lea (%rax,%rcx), %rdi
sub %rcx, %rdx
// %r8 is the end condition for the loop.
lea -128(%rsi,%rdx), %r8
// This threshold is half of L1 cache on a Skylake machine, which means that
// potentially all of L1 will be populated by this copy once it is executed
// (dst and src are cached for temporal copies).
// NON_TEMPORAL_STORE_THRESHOLD = $32768
// cmp NON_TEMPORAL_STORE_THRESHOLD, %rdx
cmp $32768, %rdx
jae .L_NON_TEMPORAL_LOOP_{[prefetch]s}
.balign 2
.L_ALIGNED_DST_LOOP_{[prefetch]s}:
{[prefetch]s} 128(%rdi)
{[prefetch]s} 192(%rdi)
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
add $128, %rsi
vmovdqa %ymm0, (%rdi)
vmovdqa %ymm1, 32(%rdi)
vmovdqa %ymm2, 64(%rdi)
vmovdqa %ymm3, 96(%rdi)
add $128, %rdi
cmp %r8, %rsi
jb .L_ALIGNED_DST_LOOP_{[prefetch]s}
.L_ALIGNED_DST_LOOP_END_{[prefetch]s}:
sub %rsi, %r9
mov %r9, %rdx
vmovdqu %ymm4, -128(%rdi,%rdx)
vmovdqu %ymm5, -96(%rdi,%rdx)
vmovdqu %ymm6, -64(%rdi,%rdx)
vmovdqu %ymm7, -32(%rdi,%rdx)
vzeroupper
ret
.balign 2
.L_NON_TEMPORAL_LOOP_{[prefetch]s}:
testb $31, %sil
jne .L_ALIGNED_DST_LOOP_{[prefetch]s}
// This is prefetching the source data unlike ALIGNED_DST_LOOP which
// prefetches the destination data. This choice is again informed by
// benchmarks. With a non-temporal store the entirety of the cache line
// is being written so the previous data can be discarded without being
// fetched.
prefetchnta 128(%rsi)
prefetchnta 196(%rsi)
vmovntdqa (%rsi), %ymm0
vmovntdqa 32(%rsi), %ymm1
vmovntdqa 64(%rsi), %ymm2
vmovntdqa 96(%rsi), %ymm3
add $128, %rsi
vmovntdq %ymm0, (%rdi)
vmovntdq %ymm1, 32(%rdi)
vmovntdq %ymm2, 64(%rdi)
vmovntdq %ymm3, 96(%rdi)
add $128, %rdi
cmp %r8, %rsi
jb .L_NON_TEMPORAL_LOOP_{[prefetch]s}
sfence
jmp .L_ALIGNED_DST_LOOP_END_{[prefetch]s}
.L_OVERLAP_{[prefetch]s}:
.balign 2
cmp %rdi, %rsi
jb .L_OVERLAP_BWD_{[prefetch]s} // %rsi < %rdi => backward-copy
je .L_RET_{[prefetch]s} // %rsi == %rdi => return, nothing to copy
// Source & destination buffers overlap. Forward copy.
vmovdqu (%rsi), %ymm8
// Align %rdi to a 32 byte boundary.
// %rcx = 32 - 31 & %rdi
mov $32, %rcx
and $31, %rdi
sub %rdi, %rcx
lea (%rsi,%rcx), %rsi
lea (%rax,%rcx), %rdi
sub %rcx, %rdx
// %r8 is the end condition for the loop.
lea -128(%rsi,%rdx), %r8
.L_OVERLAP_FWD_ALIGNED_DST_LOOP_{[prefetch]s}:
{[prefetch]s} 128(%rdi)
{[prefetch]s} 192(%rdi)
vmovdqu (%rsi), %ymm0
vmovdqu 32(%rsi), %ymm1
vmovdqu 64(%rsi), %ymm2
vmovdqu 96(%rsi), %ymm3
add $128, %rsi
vmovdqa %ymm0, (%rdi)
vmovdqa %ymm1, 32(%rdi)
vmovdqa %ymm2, 64(%rdi)
vmovdqa %ymm3, 96(%rdi)
add $128, %rdi
cmp %r8, %rsi
jb .L_OVERLAP_FWD_ALIGNED_DST_LOOP_{[prefetch]s}
sub %rsi, %r9
mov %r9, %rdx
vmovdqu %ymm4, -128(%rdi,%rdx)
vmovdqu %ymm5, -96(%rdi,%rdx)
vmovdqu %ymm6, -64(%rdi,%rdx)
vmovdqu %ymm7, -32(%rdi,%rdx)
vmovdqu %ymm8, (%rax) // %rax == the original (unaligned) %rdi
vzeroupper
.L_RET_{[prefetch]s}:
ret
.L_OVERLAP_BWD_{[prefetch]s}:
// Save last 32 bytes.
vmovdqu -32(%rsi, %rdx), %ymm8
lea -32(%rdi, %rdx), %r9
// %r8 is the end condition for the loop.
lea 128(%rsi), %r8
// Align %rdi+%rdx (destination end) to a 32 byte boundary.
// %rcx = (%rdi + %rdx - 32) & 31
mov %r9, %rcx
and $31, %rcx
// Set %rsi & %rdi to the end of the 32 byte aligned range.
sub %rcx, %rdx
add %rdx, %rsi
add %rdx, %rdi
.L_OVERLAP_BWD_ALIGNED_DST_LOOP_{[prefetch]s}:
{[prefetch]s} -128(%rdi)
{[prefetch]s} -192(%rdi)
vmovdqu -32(%rsi), %ymm4
vmovdqu -64(%rsi), %ymm5
vmovdqu -96(%rsi), %ymm6
vmovdqu -128(%rsi), %ymm7
sub $128, %rsi
vmovdqa %ymm4, -32(%rdi)
vmovdqa %ymm5, -64(%rdi)
vmovdqa %ymm6, -96(%rdi)
vmovdqa %ymm7, -128(%rdi)
sub $128, %rdi
cmp %r8, %rsi
ja .L_OVERLAP_BWD_ALIGNED_DST_LOOP_{[prefetch]s}
vmovdqu %ymm0, (%rax) // %rax == the original unaligned %rdi
vmovdqu %ymm1, 32(%rax)
vmovdqu %ymm2, 64(%rax)
vmovdqu %ymm3, 96(%rax)
vmovdqu %ymm8, (%r9)
vzeroupper
ret
.cfi_endproc
// .size {[function_prefix]s}__folly_memcpy_{[prefetch]s}, .-{[function_prefix]s}__folly_memcpy_{[prefetch]s} not supported by windows

View file

@ -1,18 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const function_prefix = @import("../assembly_util.zig").function_prefix;
comptime {
switch (arch) {
.x86_64 => {
inline for ([_][]const u8{ "prefetchw", "prefetcht0" }) |prefetch| {
asm (std.fmt.comptimePrint(@embedFile("memcpy-x86_64.S"), .{ .prefetch = prefetch, .function_prefix = function_prefix }));
}
},
else => unreachable,
}
}
pub extern fn __folly_memcpy_prefetchw(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.SysV) [*]u8;
pub extern fn __folly_memcpy_prefetcht0(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.SysV) [*]u8;

View file

@ -1 +0,0 @@
pub const memcpy = @import("musl/memcpy.zig").memcpy;

View file

@ -1,193 +0,0 @@
musl as a whole is licensed under the following standard MIT license:
----------------------------------------------------------------------
Copyright © 2005-2020 Rich Felker, et al.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
Authors/contributors include:
A. Wilcox
Ada Worcester
Alex Dowad
Alex Suykov
Alexander Monakov
Andre McCurdy
Andrew Kelley
Anthony G. Basile
Aric Belsito
Arvid Picciani
Bartosz Brachaczek
Benjamin Peterson
Bobby Bingham
Boris Brezillon
Brent Cook
Chris Spiegel
Clément Vasseur
Daniel Micay
Daniel Sabogal
Daurnimator
David Carlier
David Edelsohn
Denys Vlasenko
Dmitry Ivanov
Dmitry V. Levin
Drew DeVault
Emil Renner Berthing
Fangrui Song
Felix Fietkau
Felix Janda
Gianluca Anzolin
Hauke Mehrtens
He X
Hiltjo Posthuma
Isaac Dunham
Jaydeep Patil
Jens Gustedt
Jeremy Huntwork
Jo-Philipp Wich
Joakim Sindholt
John Spencer
Julien Ramseier
Justin Cormack
Kaarle Ritvanen
Khem Raj
Kylie McClain
Leah Neukirchen
Luca Barbato
Luka Perkov
M Farkas-Dyck (Strake)
Mahesh Bodapati
Markus Wichmann
Masanori Ogino
Michael Clark
Michael Forney
Mikhail Kremnyov
Natanael Copa
Nicholas J. Kain
orc
Pascal Cuoq
Patrick Oppenlander
Petr Hosek
Petr Skocik
Pierre Carrier
Reini Urban
Rich Felker
Richard Pennington
Ryan Fairfax
Samuel Holland
Segev Finer
Shiz
sin
Solar Designer
Stefan Kristiansson
Stefan O'Rear
Szabolcs Nagy
Timo Teräs
Trutz Behn
Valentin Ochs
Will Dietz
William Haddon
William Pitcock
Portions of this software are derived from third-party works licensed
under terms compatible with the above MIT license:
The TRE regular expression implementation (src/regex/reg* and
src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
under a 2-clause BSD license (license text in the source files). The
included version has been heavily modified by Rich Felker in 2012, in
the interests of size, simplicity, and namespace cleanliness.
Much of the math library code (src/math/* and src/complex/*) is
Copyright © 1993,2004 Sun Microsystems or
Copyright © 2003-2011 David Schultz or
Copyright © 2003-2009 Steven G. Kargl or
Copyright © 2003-2009 Bruce D. Evans or
Copyright © 2008 Stephen L. Moshier or
Copyright © 2017-2018 Arm Limited
and labelled as such in comments in the individual source files. All
have been licensed under extremely permissive terms.
The ARM memcpy code (src/string/arm/memcpy.S) is Copyright © 2008
The Android Open Source Project and is licensed under a two-clause BSD
license. It was taken from Bionic libc, used on Android.
The AArch64 memcpy and memset code (src/string/aarch64/*) are
Copyright © 1999-2019, Arm Limited.
The implementation of DES for crypt (src/crypt/crypt_des.c) is
Copyright © 1994 David Burren. It is licensed under a BSD license.
The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
originally written by Solar Designer and placed into the public
domain. The code also comes with a fallback permissive license for use
in jurisdictions that may not recognize the public domain.
The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
Valentin Ochs and is licensed under an MIT-style license.
The x86_64 port was written by Nicholas J. Kain and is licensed under
the standard MIT terms.
The mips and microblaze ports were originally written by Richard
Pennington for use in the ellcc project. The original code was adapted
by Rich Felker for build system and code conventions during upstream
integration. It is licensed under the standard MIT terms.
The mips64 port was contributed by Imagination Technologies and is
licensed under the standard MIT terms.
The powerpc port was also originally written by Richard Pennington,
and later supplemented and integrated by John Spencer. It is licensed
under the standard MIT terms.
All other files which have no copyright comments are original works
produced specifically for use as part of this library, written either
by Rich Felker, the main author of the library, or by one or more
contibutors listed above. Details on authorship of individual files
can be found in the git version control history of the project. The
omission of copyright and license comments in each file is in the
interest of source tree size.
In addition, permission is hereby granted for all public header files
(include/* and arch/*/bits/*) and crt files intended to be linked into
applications (crt/*, ldso/dlstart.c, and arch/*/crt_arch.h) to omit
the copyright notice and permission notice otherwise required by the
license, and to use these files without any requirement of
attribution. These files include substantial contributions from:
Bobby Bingham
John Spencer
Nicholas J. Kain
Rich Felker
Richard Pennington
Stefan Kristiansson
Szabolcs Nagy
all of whom have explicitly granted such permission.
This file previously contained text expressing a belief that most of
the files covered by the above exception were sufficiently trivial not
to be subject to copyright, resulting in confusion over whether it
negated the permissions granted in the license. In the spirit of
permissive licensing, and of not having licensing issues being an
obstacle to adoption, that text has been removed.

View file

@ -1,2 +0,0 @@
This set of files all come from [musl libc](https://musl.libc.org/).
Roc just directly uses a few of them instead of depending on musl libc fully.

View file

@ -1,30 +0,0 @@
.global {[function_prefix]s}musl_memcpy
// Windows does not support the type directive.
// .type {[function_prefix]s}musl_memcpy,@function
{[function_prefix]s}musl_memcpy:
push %esi
push %edi
mov 12(%esp),%edi
mov 16(%esp),%esi
mov 20(%esp),%ecx
mov %edi,%eax
cmp $4,%ecx
jc 1f
test $3,%edi
jz 1f
2: movsb
dec %ecx
test $3,%edi
jnz 2b
1: mov %ecx,%edx
shr $2,%ecx
rep
movsl
and $3,%edx
jz 1f
2: movsb
dec %edx
jnz 2b
1: pop %edi
pop %esi
ret

View file

@ -1,23 +0,0 @@
.global {[function_prefix]s}musl_memcpy
// Windows does not support the type directive.
// .type {[function_prefix]s}musl_memcpy,@function
{[function_prefix]s}musl_memcpy:
mov %rdi,%rax
cmp $8,%rdx
jc 1f
test $7,%edi
jz 1f
2: movsb
dec %rdx
test $7,%edi
jnz 2b
1: mov %rdx,%rcx
shr $3,%rcx
rep
movsq
and $7,%edx
jz 1f
2: movsb
dec %edx
jnz 2b
1: ret

View file

@ -1,223 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const function_prefix = @import("../assembly_util.zig").function_prefix;
comptime {
switch (arch) {
.x86_64 => {
asm (std.fmt.comptimePrint(@embedFile("memcpy-x86_64.S"), .{ .function_prefix = function_prefix }));
},
.x86 => {
asm (std.fmt.comptimePrint(@embedFile("memcpy-x86.S"), .{ .function_prefix = function_prefix }));
},
// TODO: add assembly implementations for other platforms.
else => {},
}
}
pub const memcpy =
switch (builtin.os.tag) {
.windows => fallback_memcpy,
else => switch (arch) {
.x86_64, .x86 => musl_memcpy,
else => fallback_memcpy,
},
};
pub extern fn musl_memcpy(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.C) [*]u8;
// Note: this is written to only support little endian targets.
// To support big endian, `<<` and `>>` wold need to be swapped.
pub fn fallback_memcpy(noalias dest: [*]u8, noalias src: [*]const u8, len: usize) callconv(.C) [*]u8 {
var d = dest;
var s = src;
var n = len;
switch (@min(n, @intFromPtr(s) % 4)) {
1 => {
d[0] = s[0];
d += 1;
s += 1;
n -= 1;
},
2 => {
d[0] = s[0];
d[1] = s[1];
d += 2;
s += 2;
n -= 2;
},
3 => {
d[0] = s[0];
d[1] = s[1];
d[2] = s[2];
d += 3;
s += 3;
n -= 3;
},
else => {},
}
if (@intFromPtr(d) % 4 == 0) {
var d4 = @as([*]align(4) u8, @alignCast(d));
var s4 = @as([*]align(4) const u8, @alignCast(s));
while (n >= 16) : (n -= 16) {
var d_u32 = @as([*]u32, @ptrCast(d4));
var s_u32 = @as([*]const u32, @ptrCast(s4));
d_u32[0] = s_u32[0];
d_u32[1] = s_u32[1];
d_u32[2] = s_u32[2];
d_u32[3] = s_u32[3];
d4 += 16;
s4 += 16;
}
if (n & 8 != 0) {
var d_u32 = @as([*]u32, @ptrCast(d4));
var s_u32 = @as([*]const u32, @ptrCast(s4));
d_u32[0] = s_u32[0];
d_u32[1] = s_u32[1];
d4 += 8;
s4 += 8;
}
if (n & 4 != 0) {
var d_u32 = @as([*]u32, @ptrCast(d4));
var s_u32 = @as([*]const u32, @ptrCast(s4));
d_u32[0] = s_u32[0];
d4 += 4;
s4 += 4;
}
d = d4;
s = s4;
if (n & 2 != 0) {
d[0] = s[0];
d += 1;
s += 1;
d[0] = s[0];
d += 1;
s += 1;
}
if (n & 1 != 0) {
d[0] = s[0];
}
return dest;
}
if (n >= 32) {
switch (@intFromPtr(d) % 4) {
1 => {
var w = @as([*]const u32, @ptrCast(@alignCast(s)))[0];
d[0] = s[0];
d += 1;
s += 1;
d[0] = s[0];
d += 1;
s += 1;
d[0] = s[0];
d += 1;
s += 1;
n -= 3;
while (n >= 17) : (n -= 16) {
var d_u32 = @as([*]u32, @ptrCast(@alignCast(d)));
var s_u32 = @as([*]const u32, @ptrCast(@alignCast(s + 1)));
var x = s_u32[0];
d_u32[0] = (w >> 24) | (x << 8);
w = s_u32[1];
d_u32[1] = (x >> 24) | (w << 8);
x = s_u32[2];
d_u32[2] = (w >> 24) | (x << 8);
w = s_u32[3];
d_u32[3] = (x >> 24) | (w << 8);
d += 16;
s += 16;
}
},
2 => {
var w = @as([*]const u32, @ptrCast(@alignCast(s)))[0];
d[0] = s[0];
d += 1;
s += 1;
d[0] = s[0];
d += 1;
s += 1;
n -= 2;
while (n >= 18) : (n -= 16) {
var d_u32 = @as([*]u32, @ptrCast(@alignCast(d)));
var s_u32 = @as([*]const u32, @ptrCast(@alignCast(s + 2)));
var x = s_u32[0];
d_u32[0] = (w >> 16) | (x << 16);
w = s_u32[1];
d_u32[1] = (x >> 16) | (w << 16);
x = s_u32[2];
d_u32[2] = (w >> 16) | (x << 16);
w = s_u32[3];
d_u32[3] = (x >> 16) | (w << 16);
d += 16;
s += 16;
}
},
3 => {
var w = @as([*]const u32, @ptrCast(@alignCast(s)))[0];
d[0] = s[0];
d += 1;
s += 1;
n -= 1;
while (n >= 19) : (n -= 16) {
var d_u32 = @as([*]u32, @ptrCast(@alignCast(d)));
var s_u32 = @as([*]const u32, @ptrCast(@alignCast(s + 3)));
var x = s_u32[0];
d_u32[0] = (w >> 8) | (x << 24);
w = s_u32[1];
d_u32[1] = (x >> 8) | (w << 24);
x = s_u32[2];
d_u32[2] = (w >> 8) | (x << 24);
w = s_u32[3];
d_u32[3] = (x >> 8) | (w << 24);
d += 16;
s += 16;
}
},
else => unreachable,
}
}
if (n & 16 != 0) {
comptime var i = 0;
inline while (i < 16) : (i += 1) {
d[0] = s[0];
d += 1;
s += 1;
}
}
if (n & 8 != 0) {
comptime var i = 0;
inline while (i < 8) : (i += 1) {
d[0] = s[0];
d += 1;
s += 1;
}
}
if (n & 4 != 0) {
comptime var i = 0;
inline while (i < 4) : (i += 1) {
d[0] = s[0];
d += 1;
s += 1;
}
}
if (n & 2 != 0) {
d[0] = s[0];
d += 1;
s += 1;
d[0] = s[0];
d += 1;
s += 1;
}
if (n & 1 != 0) {
d[0] = s[0];
}
return dest;
}

View file

@ -21,16 +21,18 @@ const SEAMLESS_SLICE_BIT: usize =
pub const RocList = extern struct {
bytes: ?[*]u8,
length: usize,
// This technically points to directly after the refcount.
// This is an optimization that enables use one code path for regular lists and slices for geting the refcount ptr.
capacity_or_ref_ptr: usize,
// For normal lists, contains the capacity.
// For seamless slices contains the pointer to the original allocation.
// This pointer is to the first element of the original list.
// Note we storing an allocation pointer, the pointer must be right shifted by one.
capacity_or_alloc_ptr: usize,
pub inline fn len(self: RocList) usize {
return self.length;
}
pub fn getCapacity(self: RocList) usize {
const list_capacity = self.capacity_or_ref_ptr;
const list_capacity = self.capacity_or_alloc_ptr;
const slice_capacity = self.length;
const slice_mask = self.seamlessSliceMask();
const capacity = (list_capacity & ~slice_mask) | (slice_capacity & slice_mask);
@ -38,14 +40,14 @@ pub const RocList = extern struct {
}
pub fn isSeamlessSlice(self: RocList) bool {
return @as(isize, @bitCast(self.capacity_or_ref_ptr)) < 0;
return @as(isize, @bitCast(self.capacity_or_alloc_ptr)) < 0;
}
// This returns all ones if the list is a seamless slice.
// Otherwise, it returns all zeros.
// This is done without branching for optimization purposes.
pub fn seamlessSliceMask(self: RocList) usize {
return @as(usize, @bitCast(@as(isize, @bitCast(self.capacity_or_ref_ptr)) >> (@bitSizeOf(isize) - 1)));
return @as(usize, @bitCast(@as(isize, @bitCast(self.capacity_or_alloc_ptr)) >> (@bitSizeOf(isize) - 1)));
}
pub fn isEmpty(self: RocList) bool {
@ -53,7 +55,7 @@ pub const RocList = extern struct {
}
pub fn empty() RocList {
return RocList{ .bytes = null, .length = 0, .capacity_or_ref_ptr = 0 };
return RocList{ .bytes = null, .length = 0, .capacity_or_alloc_ptr = 0 };
}
pub fn eql(self: RocList, other: RocList) bool {
@ -99,21 +101,22 @@ pub const RocList = extern struct {
return list;
}
// returns a pointer to just after the refcount.
// It is just after the refcount as an optimization for other shared code paths.
// For regular list, it just returns their bytes pointer.
// For seamless slices, it returns the pointer stored in capacity_or_ref_ptr.
pub fn getRefcountPtr(self: RocList) ?[*]u8 {
const list_ref_ptr = @intFromPtr(self.bytes);
const slice_ref_ptr = self.capacity_or_ref_ptr << 1;
// returns a pointer to the original allocation.
// This pointer points to the first element of the allocation.
// The pointer is to just after the refcount.
// For big lists, it just returns their bytes pointer.
// For seamless slices, it returns the pointer stored in capacity_or_alloc_ptr.
pub fn getAllocationPtr(self: RocList) ?[*]u8 {
const list_alloc_ptr = @intFromPtr(self.bytes);
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
const slice_mask = self.seamlessSliceMask();
const ref_ptr = (list_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
return @as(?[*]u8, @ptrFromInt(ref_ptr));
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
}
pub fn decref(self: RocList, alignment: u32) void {
// We use the raw capacity to ensure we always decrement the refcount of seamless slices.
utils.decref(self.getRefcountPtr(), self.capacity_or_ref_ptr, alignment);
utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, alignment);
}
pub fn elements(self: RocList, comptime T: type) ?[*]T {
@ -187,7 +190,7 @@ pub const RocList = extern struct {
return RocList{
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
.length = length,
.capacity_or_ref_ptr = capacity,
.capacity_or_alloc_ptr = capacity,
};
}
@ -204,7 +207,7 @@ pub const RocList = extern struct {
return RocList{
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
.length = length,
.capacity_or_ref_ptr = length,
.capacity_or_alloc_ptr = length,
};
}
@ -216,13 +219,13 @@ pub const RocList = extern struct {
) RocList {
if (self.bytes) |source_ptr| {
if (self.isUnique() and !self.isSeamlessSlice()) {
const capacity = self.capacity_or_ref_ptr;
const capacity = self.capacity_or_alloc_ptr;
if (capacity >= new_length) {
return RocList{ .bytes = self.bytes, .length = new_length, .capacity_or_ref_ptr = capacity };
return RocList{ .bytes = self.bytes, .length = new_length, .capacity_or_alloc_ptr = capacity };
} else {
const new_capacity = utils.calculateCapacity(capacity, new_length, element_width);
const new_source = utils.unsafeReallocate(source_ptr, alignment, capacity, new_capacity, element_width);
return RocList{ .bytes = new_source, .length = new_length, .capacity_or_ref_ptr = new_capacity };
return RocList{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity };
}
}
return self.reallocateFresh(alignment, new_length, element_width);
@ -500,8 +503,8 @@ pub fn listReleaseExcessCapacity(
update_mode: UpdateMode,
) callconv(.C) RocList {
const old_length = list.len();
// We use the direct list.capacity_or_ref_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
if ((update_mode == .InPlace or list.isUnique()) and list.capacity_or_ref_ptr == old_length) {
// We use the direct list.capacity_or_alloc_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
if ((update_mode == .InPlace or list.isUnique()) and list.capacity_or_alloc_ptr == old_length) {
return list;
} else if (old_length == 0) {
list.decref(alignment);
@ -649,14 +652,14 @@ pub fn listSublist(
output.length = keep_len;
return output;
} else {
const list_ref_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT;
const slice_ref_ptr = list.capacity_or_ref_ptr;
const list_alloc_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT;
const slice_alloc_ptr = list.capacity_or_alloc_ptr;
const slice_mask = list.seamlessSliceMask();
const ref_ptr = (list_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
return RocList{
.bytes = source_ptr + start * element_width,
.length = keep_len,
.capacity_or_ref_ptr = ref_ptr,
.capacity_or_alloc_ptr = alloc_ptr,
};
}
}
@ -965,10 +968,10 @@ pub fn listCapacity(
return list.getCapacity();
}
pub fn listRefcountPtr(
pub fn listAllocationPtr(
list: RocList,
) callconv(.C) ?[*]u8 {
return list.getRefcountPtr();
return list.getAllocationPtr();
}
test "listConcat: non-unique with unique overlapping" {

View file

@ -4,11 +4,7 @@ const math = std.math;
const utils = @import("utils.zig");
const expect = @import("expect.zig");
const panic_utils = @import("panic.zig");
comptime {
_ = @import("compiler_rt.zig");
_ = @import("libc.zig");
}
const dbg_utils = @import("dbg.zig");
const ROC_BUILTINS = "roc_builtins";
const NUM = "num";
@ -17,6 +13,13 @@ const STR = "str";
// Dec Module
const dec = @import("dec.zig");
var FLTUSED: i32 = 0;
comptime {
if (builtin.os.tag == .windows) {
@export(FLTUSED, .{ .name = "_fltused", .linkage = .Weak });
}
}
comptime {
exportDecFn(dec.absC, "abs");
exportDecFn(dec.acosC, "acos");
@ -73,7 +76,7 @@ comptime {
exportListFn(list.listSwap, "swap");
exportListFn(list.listIsUnique, "is_unique");
exportListFn(list.listCapacity, "capacity");
exportListFn(list.listRefcountPtr, "refcount_ptr");
exportListFn(list.listAllocationPtr, "allocation_ptr");
exportListFn(list.listReleaseExcessCapacity, "release_excess_capacity");
}
@ -215,7 +218,7 @@ comptime {
exportStrFn(str.strCloneTo, "clone_to");
exportStrFn(str.withCapacity, "with_capacity");
exportStrFn(str.strGraphemes, "graphemes");
exportStrFn(str.strRefcountPtr, "refcount_ptr");
exportStrFn(str.strAllocationPtr, "allocation_ptr");
exportStrFn(str.strReleaseExcessCapacity, "release_excess_capacity");
inline for (INTEGERS) |T| {
@ -245,6 +248,7 @@ comptime {
exportUtilsFn(utils.dictPseudoSeed, "dict_pseudo_seed");
@export(panic_utils.panic, .{ .name = "roc_builtins.utils." ++ "panic", .linkage = .Weak });
@export(dbg_utils.dbg_impl, .{ .name = "roc_builtins.utils." ++ "dbg_impl", .linkage = .Weak });
if (builtin.target.cpu.arch != .wasm32) {
exportUtilsFn(expect.expectFailedStartSharedBuffer, "expect_failed_start_shared_buffer");

View file

@ -233,7 +233,9 @@ pub fn exportCeiling(comptime F: type, comptime T: type, comptime name: []const
pub fn exportDivCeil(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(a: T, b: T) callconv(.C) T {
return math.divCeil(T, a, b) catch @panic("TODO runtime exception for dividing by 0!");
return math.divCeil(T, a, b) catch {
roc_panic("Integer division by 0!", 0);
};
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
@ -379,8 +381,7 @@ pub fn exportAddOrPanic(comptime T: type, comptime name: []const u8) void {
fn func(self: T, other: T) callconv(.C) T {
const result = addWithOverflow(T, self, other);
if (result.has_overflowed) {
roc_panic("integer addition overflowed!", 0);
unreachable;
roc_panic("Integer addition overflowed!", 0);
} else {
return result.value;
}
@ -437,8 +438,7 @@ pub fn exportSubOrPanic(comptime T: type, comptime name: []const u8) void {
fn func(self: T, other: T) callconv(.C) T {
const result = subWithOverflow(T, self, other);
if (result.has_overflowed) {
roc_panic("integer subtraction overflowed!", 0);
unreachable;
roc_panic("Integer subtraction overflowed!", 0);
} else {
return result.value;
}
@ -622,8 +622,7 @@ pub fn exportMulOrPanic(comptime T: type, comptime W: type, comptime name: []con
fn func(self: T, other: T) callconv(.C) T {
const result = @call(.always_inline, mulWithOverflow, .{ T, W, self, other });
if (result.has_overflowed) {
roc_panic("integer multiplication overflowed!", 0);
unreachable;
roc_panic("Integer multiplication overflowed!", 0);
} else {
return result.value;
}
@ -634,8 +633,8 @@ pub fn exportMulOrPanic(comptime T: type, comptime W: type, comptime name: []con
pub fn exportCountLeadingZeroBits(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T) callconv(.C) usize {
return @as(usize, @clz(self));
fn func(self: T) callconv(.C) u8 {
return @as(u8, @clz(self));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
@ -643,8 +642,8 @@ pub fn exportCountLeadingZeroBits(comptime T: type, comptime name: []const u8) v
pub fn exportCountTrailingZeroBits(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T) callconv(.C) usize {
return @as(usize, @ctz(self));
fn func(self: T) callconv(.C) u8 {
return @as(u8, @ctz(self));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
@ -652,8 +651,8 @@ pub fn exportCountTrailingZeroBits(comptime T: type, comptime name: []const u8)
pub fn exportCountOneBits(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T) callconv(.C) usize {
return @as(usize, @popCount(self));
fn func(self: T) callconv(.C) u8 {
return @as(u8, @popCount(self));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });

View file

@ -2,14 +2,14 @@ const std = @import("std");
const RocStr = @import("str.zig").RocStr;
// Signals to the host that the program has panicked
extern fn roc_panic(msg: *const RocStr, tag_id: u32) callconv(.C) void;
extern fn roc_panic(msg: *const RocStr, tag_id: u32) callconv(.C) noreturn;
pub fn panic_help(msg: []const u8, tag_id: u32) void {
pub fn panic_help(msg: []const u8, tag_id: u32) noreturn {
var str = RocStr.init(msg.ptr, msg.len);
roc_panic(&str, tag_id);
}
// must export this explicitly because right now it is not used from zig code
pub fn panic(msg: *const RocStr, alignment: u32) callconv(.C) void {
pub fn panic(msg: *const RocStr, alignment: u32) callconv(.C) noreturn {
return roc_panic(msg, alignment);
}

View file

@ -34,17 +34,21 @@ fn init_blank_small_string(comptime n: usize) [n]u8 {
}
pub const RocStr = extern struct {
str_bytes: ?[*]u8,
str_len: usize,
str_capacity: usize,
bytes: ?[*]u8,
length: usize,
// For big strs, contains the capacity.
// For seamless slices contains the pointer to the original allocation.
// This pointer is to the first character of the original string.
// Note we storing an allocation pointer, the pointer must be right shifted by one.
capacity_or_alloc_ptr: usize,
pub const alignment = @alignOf(usize);
pub inline fn empty() RocStr {
return RocStr{
.str_len = 0,
.str_bytes = null,
.str_capacity = MASK,
.length = 0,
.bytes = null,
.capacity_or_alloc_ptr = MASK,
};
}
@ -63,29 +67,29 @@ pub const RocStr = extern struct {
const start_byte = @as([*]u8, @ptrCast(list.bytes)) + start;
if (list.isSeamlessSlice()) {
return RocStr{
.str_bytes = start_byte,
.str_len = count | SEAMLESS_SLICE_BIT,
.str_capacity = list.capacity_or_ref_ptr & (~SEAMLESS_SLICE_BIT),
.bytes = start_byte,
.length = count | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = list.capacity_or_alloc_ptr & (~SEAMLESS_SLICE_BIT),
};
} else if (start == 0 and (update_mode == .InPlace or list.isUnique())) {
// Rare case, we can take over the original list.
return RocStr{
.str_bytes = start_byte,
.str_len = count,
.str_capacity = list.capacity_or_ref_ptr, // This is guaranteed to be a proper capacity.
.bytes = start_byte,
.length = count,
.capacity_or_alloc_ptr = list.capacity_or_alloc_ptr, // This is guaranteed to be a proper capacity.
};
} else {
// Create seamless slice pointing to the list.
return RocStr{
.str_bytes = start_byte,
.str_len = count | SEAMLESS_SLICE_BIT,
.str_capacity = @intFromPtr(list.bytes) >> 1,
.bytes = start_byte,
.length = count | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = @intFromPtr(list.bytes) >> 1,
};
}
}
pub fn isSeamlessSlice(self: RocStr) bool {
return !self.isSmallStr() and @as(isize, @bitCast(self.str_len)) < 0;
return !self.isSmallStr() and @as(isize, @bitCast(self.length)) < 0;
}
pub fn fromSlice(slice: []const u8) RocStr {
@ -96,9 +100,9 @@ pub const RocStr = extern struct {
const first_element = utils.allocateWithRefcount(capacity, @sizeOf(usize));
return RocStr{
.str_bytes = first_element,
.str_len = length,
.str_capacity = capacity,
.bytes = first_element,
.length = length,
.capacity_or_alloc_ptr = capacity,
};
}
@ -140,27 +144,28 @@ pub const RocStr = extern struct {
// Otherwise, it returns all zeros.
// This is done without branching for optimization purposes.
pub fn seamlessSliceMask(self: RocStr) usize {
return @as(usize, @bitCast(@as(isize, @bitCast(self.str_len)) >> (@bitSizeOf(isize) - 1)));
return @as(usize, @bitCast(@as(isize, @bitCast(self.length)) >> (@bitSizeOf(isize) - 1)));
}
// returns a pointer to just after the refcount.
// It is just after the refcount as an optimization for other shared code paths.
// For regular list, it just returns their bytes pointer.
// For seamless slices, it returns the pointer stored in capacity_or_ref_ptr.
// returns a pointer to the original allocation.
// This pointer points to the first element of the allocation.
// The pointer is to just after the refcount.
// For big strings, it just returns their bytes pointer.
// For seamless slices, it returns the pointer stored in capacity_or_alloc_ptr.
// This does not return a valid value if the input is a small string.
pub fn getRefcountPtr(self: RocStr) ?[*]u8 {
const str_ref_ptr = @intFromPtr(self.str_bytes);
const slice_ref_ptr = self.str_capacity << 1;
pub fn getAllocationPtr(self: RocStr) ?[*]u8 {
const str_alloc_ptr = @intFromPtr(self.bytes);
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
const slice_mask = self.seamlessSliceMask();
const ref_ptr = (str_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
return @as(?[*]u8, @ptrFromInt(ref_ptr));
const alloc_ptr = (str_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
}
pub fn incref(self: RocStr, n: usize) void {
if (!self.isSmallStr()) {
const ref_ptr = self.getRefcountPtr();
if (ref_ptr != null) {
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(ref_ptr)));
const alloc_ptr = self.getAllocationPtr();
if (alloc_ptr != null) {
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(alloc_ptr)));
utils.increfRcPtrC(@as(*isize, @ptrCast(isizes - 1)), @as(isize, @intCast(n)));
}
}
@ -168,13 +173,13 @@ pub const RocStr = extern struct {
pub fn decref(self: RocStr) void {
if (!self.isSmallStr()) {
utils.decref(self.getRefcountPtr(), self.str_capacity, RocStr.alignment);
utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, RocStr.alignment);
}
}
pub fn eq(self: RocStr, other: RocStr) bool {
// If they are byte-for-byte equal, they're definitely equal!
if (self.str_bytes == other.str_bytes and self.str_len == other.str_len and self.str_capacity == other.str_capacity) {
if (self.bytes == other.bytes and self.length == other.length and self.capacity_or_alloc_ptr == other.capacity_or_alloc_ptr) {
return true;
}
@ -208,12 +213,12 @@ pub const RocStr = extern struct {
// just return the bytes
return str;
} else {
var new_str = RocStr.allocateBig(str.str_len, str.str_len);
var new_str = RocStr.allocateBig(str.length, str.length);
var old_bytes: [*]u8 = @as([*]u8, @ptrCast(str.str_bytes));
var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_str.str_bytes));
var old_bytes: [*]u8 = @as([*]u8, @ptrCast(str.bytes));
var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_str.bytes));
@memcpy(new_bytes[0..str.str_len], old_bytes[0..str.str_len]);
@memcpy(new_bytes[0..str.length], old_bytes[0..str.length]);
return new_str;
}
@ -230,7 +235,7 @@ pub const RocStr = extern struct {
return self.reallocateFresh(new_length);
}
if (self.str_bytes) |source_ptr| {
if (self.bytes) |source_ptr| {
if (old_capacity > new_length) {
var output = self;
output.setLen(new_length);
@ -245,7 +250,7 @@ pub const RocStr = extern struct {
element_width,
);
return RocStr{ .str_bytes = new_source, .str_len = new_length, .str_capacity = new_capacity };
return RocStr{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity };
}
return self.reallocateFresh(new_length);
}
@ -295,7 +300,7 @@ pub const RocStr = extern struct {
}
pub fn isSmallStr(self: RocStr) bool {
return @as(isize, @bitCast(self.str_capacity)) < 0;
return @as(isize, @bitCast(self.capacity_or_alloc_ptr)) < 0;
}
test "isSmallStr: returns true for empty string" {
@ -313,7 +318,7 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
return self.asArray()[@sizeOf(RocStr) - 1] ^ 0b1000_0000;
} else {
return self.str_len & (~SEAMLESS_SLICE_BIT);
return self.length & (~SEAMLESS_SLICE_BIT);
}
}
@ -321,7 +326,7 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
self.asU8ptrMut()[@sizeOf(RocStr) - 1] = @as(u8, @intCast(length)) | 0b1000_0000;
} else {
self.str_len = length | (SEAMLESS_SLICE_BIT & self.str_len);
self.length = length | (SEAMLESS_SLICE_BIT & self.length);
}
}
@ -329,9 +334,9 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
return SMALL_STR_MAX_LENGTH;
} else if (self.isSeamlessSlice()) {
return self.str_len & (~SEAMLESS_SLICE_BIT);
return self.length & (~SEAMLESS_SLICE_BIT);
} else {
return self.str_capacity;
return self.capacity_or_alloc_ptr;
}
}
@ -340,7 +345,7 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
return self.asArray()[index];
} else {
const bytes = self.str_bytes orelse unreachable;
const bytes = self.bytes orelse unreachable;
return bytes[index];
}
@ -369,7 +374,7 @@ pub const RocStr = extern struct {
return utils.REFCOUNT_ONE;
}
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(self.str_bytes)));
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(self.bytes)));
return (ptr - 1)[0];
}
@ -393,7 +398,7 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
return @as([*]const u8, @ptrCast(self));
} else {
return @as([*]const u8, @ptrCast(self.str_bytes));
return @as([*]const u8, @ptrCast(self.bytes));
}
}
@ -401,7 +406,7 @@ pub const RocStr = extern struct {
if (self.isSmallStr()) {
return @as([*]u8, @ptrCast(self));
} else {
return @as([*]u8, @ptrCast(self.str_bytes));
return @as([*]u8, @ptrCast(self.bytes));
}
}
@ -516,13 +521,13 @@ pub const RocStr = extern struct {
const content = "012345678901234567890123456789";
const roc_str1 = RocStr.init(content, content.len);
const roc_str2 = RocStr.init(content, content.len);
try expect(roc_str1.str_bytes != roc_str2.str_bytes);
try expect(roc_str1.bytes != roc_str2.bytes);
// Insert garbage after the end of each string
roc_str1.str_bytes.?[30] = '!';
roc_str1.str_bytes.?[31] = '!';
roc_str2.str_bytes.?[30] = '-';
roc_str2.str_bytes.?[31] = '-';
roc_str1.bytes.?[30] = '!';
roc_str1.bytes.?[31] = '!';
roc_str2.bytes.?[30] = '-';
roc_str2.bytes.?[31] = '-';
defer {
roc_str1.decref();
@ -553,13 +558,13 @@ pub fn strToScalarsC(str: RocStr) callconv(.C) RocList {
}
fn strToScalars(string: RocStr) callconv(.C) RocList {
const str_len = string.len();
const len = string.len();
if (str_len == 0) {
if (len == 0) {
return RocList.empty();
}
var capacity = str_len;
var capacity = len;
if (!string.isSmallStr()) {
capacity = string.getCapacity();
@ -576,7 +581,7 @@ fn strToScalars(string: RocStr) callconv(.C) RocList {
var src_index: usize = 0;
var answer_index: usize = 0;
while (src_index < str_len) {
while (src_index < len) {
src_index += writeNextScalar(string, src_index, answer_elems, answer_index);
answer_index += 1;
}
@ -846,13 +851,13 @@ fn initFromSmallStr(slice_bytes: [*]u8, len: usize, _: usize) RocStr {
return RocStr.init(slice_bytes, len);
}
// The ref_ptr must already be shifted to be ready for storing in a seamless slice.
fn initFromBigStr(slice_bytes: [*]u8, len: usize, ref_ptr: usize) RocStr {
// The alloc_ptr must already be shifted to be ready for storing in a seamless slice.
fn initFromBigStr(slice_bytes: [*]u8, len: usize, alloc_ptr: usize) RocStr {
// Here we can make seamless slices instead of copying to a new small str.
return RocStr{
.str_bytes = slice_bytes,
.str_len = len | SEAMLESS_SLICE_BIT,
.str_capacity = ref_ptr,
.bytes = slice_bytes,
.length = len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = alloc_ptr,
};
}
@ -861,9 +866,9 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
var slice_start_index: usize = 0;
var str_index: usize = 0;
const str_bytes = string.asU8ptr();
const str_len = string.len();
const ref_ptr = @intFromPtr(string.getRefcountPtr()) >> 1;
const bytes = string.asU8ptr();
const len = string.len();
const alloc_ptr = @intFromPtr(string.getAllocationPtr()) >> 1;
const init_fn = if (string.isSmallStr())
&initFromSmallStr
else
@ -872,8 +877,8 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
const delimiter_bytes_ptrs = delimiter.asU8ptr();
const delimiter_len = delimiter.len();
if (str_len >= delimiter_len and delimiter_len > 0) {
const end_index: usize = str_len - delimiter_len + 1;
if (len >= delimiter_len and delimiter_len > 0) {
const end_index: usize = len - delimiter_len + 1;
while (str_index <= end_index) {
var delimiter_index: usize = 0;
var matches_delimiter = true;
@ -881,12 +886,12 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
while (delimiter_index < delimiter_len) {
var delimiterChar = delimiter_bytes_ptrs[delimiter_index];
if (str_index + delimiter_index >= str_len) {
if (str_index + delimiter_index >= len) {
matches_delimiter = false;
break;
}
var strChar = str_bytes[str_index + delimiter_index];
var strChar = bytes[str_index + delimiter_index];
if (delimiterChar != strChar) {
matches_delimiter = false;
@ -899,7 +904,7 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
if (matches_delimiter) {
const segment_len: usize = str_index - slice_start_index;
array[ret_array_index] = init_fn(@constCast(str_bytes) + slice_start_index, segment_len, ref_ptr);
array[ret_array_index] = init_fn(@constCast(bytes) + slice_start_index, segment_len, alloc_ptr);
slice_start_index = str_index + delimiter_len;
ret_array_index += 1;
str_index += delimiter_len;
@ -909,7 +914,7 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
}
}
array[ret_array_index] = init_fn(@constCast(str_bytes) + slice_start_index, str_len - slice_start_index, ref_ptr);
array[ret_array_index] = init_fn(@constCast(bytes) + slice_start_index, len - slice_start_index, alloc_ptr);
if (!string.isSmallStr()) {
// Correct refcount for all of the splits made.
@ -1240,17 +1245,17 @@ test "strSplitHelp: overlapping delimiter 2" {
// needs to be broken into, so that we can allocate a array
// of that size. It always returns at least 1.
pub fn countSegments(string: RocStr, delimiter: RocStr) callconv(.C) usize {
const str_bytes = string.asU8ptr();
const str_len = string.len();
const bytes = string.asU8ptr();
const len = string.len();
const delimiter_bytes_ptrs = delimiter.asU8ptr();
const delimiter_len = delimiter.len();
var count: usize = 1;
if (str_len >= delimiter_len and delimiter_len > 0) {
if (len >= delimiter_len and delimiter_len > 0) {
var str_index: usize = 0;
const end_cond: usize = str_len - delimiter_len + 1;
const end_cond: usize = len - delimiter_len + 1;
while (str_index < end_cond) {
var delimiter_index: usize = 0;
@ -1259,7 +1264,7 @@ pub fn countSegments(string: RocStr, delimiter: RocStr) callconv(.C) usize {
while (delimiter_index < delimiter_len) {
const delimiterChar = delimiter_bytes_ptrs[delimiter_index];
const strChar = str_bytes[str_index + delimiter_index];
const strChar = bytes[str_index + delimiter_index];
if (delimiterChar != strChar) {
matches_delimiter = false;
@ -1409,7 +1414,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
var index: usize = 0;
var last_codepoint_len: u8 = 0;
const ref_ptr = @intFromPtr(roc_str.getRefcountPtr()) >> 1;
const alloc_ptr = @intFromPtr(roc_str.getAllocationPtr()) >> 1;
const init_fn = if (roc_str.isSmallStr())
&initFromSmallStr
else
@ -1425,7 +1430,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
if (opt_last_codepoint) |last_codepoint| {
var did_break = grapheme.isGraphemeBreak(last_codepoint, cur_codepoint, &break_state);
if (did_break) {
graphemes[index] = init_fn(@constCast(slice.ptr), last_codepoint_len, ref_ptr);
graphemes[index] = init_fn(@constCast(slice.ptr), last_codepoint_len, alloc_ptr);
slice = slice[last_codepoint_len..];
index += 1;
break_state = null;
@ -1436,7 +1441,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
opt_last_codepoint = cur_codepoint;
}
// Append last grapheme
graphemes[index] = init_fn(@constCast(slice.ptr), slice.len, ref_ptr);
graphemes[index] = init_fn(@constCast(slice.ptr), slice.len, alloc_ptr);
if (!roc_str.isSmallStr()) {
// Correct refcount for all of the splits made.
@ -1498,8 +1503,35 @@ pub fn getCapacity(string: RocStr) callconv(.C) usize {
}
pub fn substringUnsafe(string: RocStr, start: usize, length: usize) callconv(.C) RocStr {
const slice = string.asSlice()[start .. start + length];
return RocStr.fromSlice(slice);
if (string.isSmallStr()) {
if (start == 0) {
var output = string;
output.setLen(length);
return output;
}
const slice = string.asSlice()[start .. start + length];
return RocStr.fromSlice(slice);
}
if (string.bytes) |source_ptr| {
if (start == 0 and string.isUnique()) {
var output = string;
output.setLen(length);
return output;
} else {
// Shifting right by 1 is required to avoid the highest bit of capacity being set.
// If it was set, the slice would get interpreted as a small string.
const str_alloc_ptr = (@intFromPtr(source_ptr) >> 1);
const slice_alloc_ptr = string.capacity_or_alloc_ptr;
const slice_mask = string.seamlessSliceMask();
const alloc_ptr = (str_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
return RocStr{
.bytes = source_ptr + start,
.length = length | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = alloc_ptr,
};
}
}
return RocStr.empty();
}
pub fn getUnsafe(string: RocStr, index: usize) callconv(.C) u8 {
@ -1584,9 +1616,9 @@ pub fn repeat(string: RocStr, count: usize) callconv(.C) RocStr {
// Str.startsWithScalar
pub fn startsWithScalar(string: RocStr, prefix: u32) callconv(.C) bool {
const str_len = string.len();
const len = string.len();
if (str_len == 0) {
if (len == 0) {
return false;
}
@ -1750,7 +1782,7 @@ test "RocStr.concat: small concat small" {
pub const RocListStr = extern struct {
list_elements: ?[*]RocStr,
list_length: usize,
list_capacity_or_ref_ptr: usize,
list_capacity_or_alloc_ptr: usize,
};
// Str.joinWith
@ -1758,7 +1790,7 @@ pub fn strJoinWithC(list: RocList, separator: RocStr) callconv(.C) RocStr {
const roc_list_str = RocListStr{
.list_elements = @as(?[*]RocStr, @ptrCast(@alignCast(list.bytes))),
.list_length = list.length,
.list_capacity_or_ref_ptr = list.capacity_or_ref_ptr,
.list_capacity_or_alloc_ptr = list.capacity_or_alloc_ptr,
};
return @call(.always_inline, strJoinWith, .{ roc_list_str, separator });
@ -1820,7 +1852,7 @@ test "RocStr.joinWith: result is big" {
var elements: [3]RocStr = .{ roc_elem, roc_elem, roc_elem };
const list = RocListStr{
.list_length = 3,
.list_capacity_or_ref_ptr = 3,
.list_capacity_or_alloc_ptr = 3,
.list_elements = @as([*]RocStr, @ptrCast(&elements)),
};
@ -1851,10 +1883,10 @@ inline fn strToBytes(arg: RocStr) RocList {
@memcpy(ptr[0..length], arg.asU8ptr()[0..length]);
return RocList{ .length = length, .bytes = ptr, .capacity_or_ref_ptr = length };
return RocList{ .length = length, .bytes = ptr, .capacity_or_alloc_ptr = length };
} else {
const is_seamless_slice = arg.str_len & SEAMLESS_SLICE_BIT;
return RocList{ .length = length, .bytes = arg.str_bytes, .capacity_or_ref_ptr = arg.str_capacity | is_seamless_slice };
const is_seamless_slice = arg.length & SEAMLESS_SLICE_BIT;
return RocList{ .length = length, .bytes = arg.bytes, .capacity_or_alloc_ptr = arg.capacity_or_alloc_ptr | is_seamless_slice };
}
}
@ -2015,7 +2047,7 @@ pub const Utf8ByteProblem = enum(u8) {
};
fn validateUtf8Bytes(bytes: [*]u8, length: usize) FromUtf8Result {
return fromUtf8Range(RocList{ .bytes = bytes, .length = length, .capacity_or_ref_ptr = length }, 0, length, .Immutable);
return fromUtf8Range(RocList{ .bytes = bytes, .length = length, .capacity_or_alloc_ptr = length }, 0, length, .Immutable);
}
fn validateUtf8BytesX(str: RocList) FromUtf8Result {
@ -2096,10 +2128,10 @@ test "validateUtf8Bytes: unicode ∆ in middle of array" {
fn expectErr(list: RocList, index: usize, err: Utf8DecodeError, problem: Utf8ByteProblem) !void {
const str_ptr = @as([*]u8, @ptrCast(list.bytes));
const str_len = list.length;
const len = list.length;
try expectError(err, numberOfNextCodepointBytes(str_ptr, str_len, index));
try expectEqual(toErrUtf8ByteResponse(index, problem), validateUtf8Bytes(str_ptr, str_len));
try expectError(err, numberOfNextCodepointBytes(str_ptr, len, index));
try expectEqual(toErrUtf8ByteResponse(index, problem), validateUtf8Bytes(str_ptr, len));
}
test "validateUtf8Bytes: invalid start byte" {
@ -2247,22 +2279,22 @@ pub fn strTrim(input_string: RocStr) callconv(.C) RocStr {
// Big and unique with no leading bytes to remove.
// Just take ownership and shrink the length.
var new_string = string;
new_string.str_len = new_len;
new_string.length = new_len;
return new_string;
} else if (string.isSeamlessSlice()) {
// Already a seamless slice, just update the range.
return RocStr{
.str_bytes = bytes_ptr + leading_bytes,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = string.str_capacity,
.bytes = bytes_ptr + leading_bytes,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
};
} else {
// Not unique or removing leading bytes, just make a slice.
return RocStr{
.str_bytes = bytes_ptr + leading_bytes,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
.bytes = bytes_ptr + leading_bytes,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
};
}
}
@ -2295,22 +2327,22 @@ pub fn strTrimStart(input_string: RocStr) callconv(.C) RocStr {
// Big and unique with no leading bytes to remove.
// Just take ownership and shrink the length.
var new_string = string;
new_string.str_len = new_len;
new_string.length = new_len;
return new_string;
} else if (string.isSeamlessSlice()) {
// Already a seamless slice, just update the range.
return RocStr{
.str_bytes = bytes_ptr + leading_bytes,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = string.str_capacity,
.bytes = bytes_ptr + leading_bytes,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
};
} else {
// Not unique or removing leading bytes, just make a slice.
return RocStr{
.str_bytes = bytes_ptr + leading_bytes,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
.bytes = bytes_ptr + leading_bytes,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
};
}
}
@ -2343,22 +2375,22 @@ pub fn strTrimEnd(input_string: RocStr) callconv(.C) RocStr {
// Big and unique with no leading bytes to remove.
// Just take ownership and shrink the length.
var new_string = string;
new_string.str_len = new_len;
new_string.length = new_len;
return new_string;
} else if (string.isSeamlessSlice()) {
// Already a seamless slice, just update the range.
return RocStr{
.str_bytes = bytes_ptr,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = string.str_capacity,
.bytes = bytes_ptr,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
};
} else {
// Not unique, just make a slice.
return RocStr{
.str_bytes = bytes_ptr,
.str_len = new_len | SEAMLESS_SLICE_BIT,
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
.bytes = bytes_ptr,
.length = new_len | SEAMLESS_SLICE_BIT,
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
};
}
}
@ -2858,7 +2890,7 @@ pub fn strCloneTo(
const slice = string.asSlice();
var relative = string;
relative.str_bytes = @as(?[*]u8, @ptrFromInt(extra_offset)); // i.e. just after the string struct
relative.bytes = @as(?[*]u8, @ptrFromInt(extra_offset)); // i.e. just after the string struct
// write the string struct
const array = relative.asArray();
@ -2871,17 +2903,17 @@ pub fn strCloneTo(
}
}
pub fn strRefcountPtr(
pub fn strAllocationPtr(
string: RocStr,
) callconv(.C) ?[*]u8 {
return string.getRefcountPtr();
return string.getAllocationPtr();
}
pub fn strReleaseExcessCapacity(
string: RocStr,
) callconv(.C) RocStr {
const old_length = string.len();
// We use the direct list.capacity_or_ref_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
// We use the direct list.capacity_or_alloc_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
if (string.isSmallStr()) {
// SmallStr has no excess capacity.
return string;

View file

@ -20,11 +20,11 @@ extern fn roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, align
// This should never be passed a null pointer.
extern fn roc_dealloc(c_ptr: *anyopaque, alignment: u32) callconv(.C) void;
extern fn roc_dbg(file_path: *anyopaque, message: *anyopaque) callconv(.C) void;
extern fn roc_dbg(loc: *anyopaque, message: *anyopaque, src: *anyopaque) callconv(.C) void;
// Since roc_dbg is never used by the builtins, we need at export a function that uses it to stop DCE.
pub fn test_dbg(file_path: *anyopaque, message: *anyopaque) callconv(.C) void {
roc_dbg(file_path, message);
pub fn test_dbg(loc: *anyopaque, src: *anyopaque, message: *anyopaque) callconv(.C) void {
roc_dbg(loc, message, src);
}
extern fn kill(pid: c_int, sig: c_int) c_int;
@ -47,9 +47,10 @@ fn testing_roc_mmap(addr: ?*anyopaque, length: c_uint, prot: c_int, flags: c_int
return mmap(addr, length, prot, flags, fd, offset);
}
fn testing_roc_dbg(file_path: *anyopaque, message: *anyopaque) callconv(.C) void {
fn testing_roc_dbg(loc: *anyopaque, message: *anyopaque, src: *anyopaque) callconv(.C) void {
_ = message;
_ = file_path;
_ = src;
_ = loc;
}
comptime {

View file

@ -57,7 +57,7 @@ DecodeError : [TooShort]
## Return type of a [Decoder].
##
## This is can be useful when creating a [custom](#custom) decoder or when
## This can be useful when creating a [custom](#custom) decoder or when
## using [fromBytesPartial](#fromBytesPartial). For example writing unit tests,
## such as;
## ```

File diff suppressed because it is too large Load diff

View file

@ -34,8 +34,7 @@ interface Inspect
custom,
apply,
toInspector,
DbgFormatter,
toDbgStr,
toStr,
]
imports [
Bool.{ Bool },
@ -99,6 +98,12 @@ inspect = \val ->
(@Inspector valFn) = toInspector val
valFn (init {})
toStr : val -> Str where val implements Inspect
toStr = \val ->
val
|> inspect
|> toDbgStr
# The current default formatter for inspect.
# This just returns a simple string for debugging.
# More powerful formatters will likely be wanted in the future.

View file

@ -32,6 +32,7 @@ interface List
product,
walkWithIndex,
walkUntil,
walkWithIndexUntil,
walkFrom,
walkFromUntil,
range,
@ -520,6 +521,25 @@ walkWithIndexHelp = \list, state, f, index, length ->
else
state
## Like [walkUntil], but at each step the function also receives the index of the current element.
walkWithIndexUntil : List elem, state, (state, elem, Nat -> [Continue state, Break state]) -> state
walkWithIndexUntil = \list, state, f ->
when walkWithIndexUntilHelp list state f 0 (List.len list) is
Continue new -> new
Break new -> new
## internal helper
walkWithIndexUntilHelp : List elem, s, (s, elem, Nat -> [Continue s, Break b]), Nat, Nat -> [Continue s, Break b]
walkWithIndexUntilHelp = \list, state, f, index, length ->
if index < length then
when f state (List.getUnsafe list index) index is
Continue nextState ->
walkWithIndexUntilHelp list nextState f (Num.addWrap index 1) length
Break b -> Break b
else
Continue state
## Note that in other languages, `walkBackwards` is sometimes called `reduceRight`,
## `fold`, `foldRight`, or `foldr`.
walkBackwards : List elem, state, (state, elem -> state) -> state

View file

@ -560,8 +560,6 @@ tau = 2 * pi
# ------- Functions
## Convert a number to a [Str].
##
## This is the same as calling `Num.format {}` - so for more details on
## exact formatting, see `Num.format`.
## ```
## Num.toStr 42
## ```
@ -573,7 +571,6 @@ tau = 2 * pi
## When this function is given a non-[finite](Num.isFinite)
## [F64] or [F32] value, the returned string will be `"NaN"`, `"∞"`, or `"-∞"`.
##
## To get strings in hexadecimal, octal, or binary format, use `Num.format`.
toStr : Num * -> Str
intCast : Int a -> Int b
@ -1054,7 +1051,7 @@ shiftLeftBy : Int a, U8 -> Int a
##
## The most significant bits are copied from the current.
## ```
## shiftRightBy 0b0000_0011 2 == 0b0000_1100
## shiftRightBy 0b0000_1100 2 == 0b0000_0011
##
## 0b0001_0100 |> shiftRightBy 2 == 0b0000_0101
##
@ -1065,16 +1062,16 @@ shiftRightBy : Int a, U8 -> Int a
## Bitwise logical right shift of a number by another
##
## The most significant bits always become 0. This means that shifting left is
## The most significant bits always become 0. This means that shifting right is
## like dividing by factors of two for unsigned integers.
## ```
## shiftRightBy 0b0010_1000 2 == 0b0000_1010
## shiftRightZfBy 0b0010_1000 2 == 0b0000_1010
##
## 0b0010_1000 |> shiftRightBy 2 == 0b0000_1010
## 0b0010_1000 |> shiftRightZfBy 2 == 0b0000_1010
##
## 0b1001_0000 |> shiftRightBy 2 == 0b0010_0100
## 0b1001_0000 |> shiftRightZfBy 2 == 0b0010_0100
## ```
## In some languages `shiftRightBy` is implemented as a binary operator `>>`.
## In some languages `shiftRightZfBy` is implemented as a binary operator `>>`.
shiftRightZfBy : Int a, U8 -> Int a
## Round off the given fraction to the nearest integer.
@ -1112,7 +1109,7 @@ powInt : Int a, Int a -> Int a
##
## 8
## ```
countLeadingZeroBits : Int a -> Nat
countLeadingZeroBits : Int a -> U8
## Counts the number of least-significant (trailing in a big-Endian sense) zeroes in an integer.
##
@ -1125,7 +1122,7 @@ countLeadingZeroBits : Int a -> Nat
##
## 8
## ```
countTrailingZeroBits : Int a -> Nat
countTrailingZeroBits : Int a -> U8
## Counts the number of set bits in an integer.
##
@ -1138,7 +1135,7 @@ countTrailingZeroBits : Int a -> Nat
##
## 0
## ```
countOneBits : Int a -> Nat
countOneBits : Int a -> U8
addWrap : Int range, Int range -> Int range
@ -1433,12 +1430,11 @@ toU32 : Int * -> U32
toU64 : Int * -> U64
toU128 : Int * -> U128
## Converts an [Int] to a [Nat]. If the given number doesn't fit in [Nat], it will be truncated.
## Converts an [Int] to a [Nat]. If the given number doesn't fit in [Nat], it will be truncated!
## Since [Nat] has a different maximum number depending on the system you're building
## for, this may give a different answer on different systems.
##
## For example, on a 32-bit system, `Num.maxNat` will return the same answer as
## `Num.maxU32`. This means that calling `Num.toNat 9_000_000_000` on a 32-bit
## For example, on a 32-bit system, calling `Num.toNat 9_000_000_000` on a 32-bit
## system will return `Num.maxU32` instead of 9 billion, because 9 billion is
## higher than `Num.maxU32` and will not fit in a [Nat] on a 32-bit system.
##

View file

@ -2,9 +2,14 @@ interface Set
exposes [
Set,
empty,
withCapacity,
reserve,
releaseExcessCapacity,
single,
walk,
walkUntil,
keepIf,
dropIf,
insert,
len,
isEmpty,
@ -43,7 +48,7 @@ Set k := Dict.Dict k {} where k implements Hash & Eq
},
]
isEq : Set k, Set k -> Bool where k implements Hash & Eq
isEq : Set k, Set k -> Bool
isEq = \xs, ys ->
if len xs != len ys then
Bool.false
@ -54,7 +59,7 @@ isEq = \xs, ys ->
else
Break Bool.false
hashSet : hasher, Set k -> hasher where k implements Hash & Eq, hasher implements Hasher
hashSet : hasher, Set k -> hasher where hasher implements Hasher
hashSet = \hasher, @Set inner -> Hash.hash hasher inner
toInspectorSet : Set k -> Inspector f where k implements Inspect & Hash & Eq, f implements InspectFormatter
@ -72,13 +77,25 @@ toInspectorSet = \set ->
empty : {} -> Set *
empty = \{} -> @Set (Dict.empty {})
## Return a dictionary with space allocated for a number of entries. This
## Return a set with space allocated for a number of entries. This
## may provide a performance optimization if you know how many entries will be
## inserted.
withCapacity : Nat -> Set *
withCapacity = \cap ->
@Set (Dict.withCapacity cap)
## Enlarge the set for at least capacity additional elements
reserve : Set k, Nat -> Set k
reserve = \@Set dict, requested ->
@Set (Dict.reserve dict requested)
## Shrink the memory footprint of a set such that capacity is as small as possible.
## This function will require regenerating the metadata if the size changes.
## There will still be some overhead due to dictionary metadata always being a power of 2.
releaseExcessCapacity : Set k -> Set k
releaseExcessCapacity = \@Set dict ->
@Set (Dict.releaseExcessCapacity dict)
## Creates a new `Set` with a single value.
## ```
## singleItemSet = Set.single "Apple"
@ -86,7 +103,7 @@ withCapacity = \cap ->
##
## expect countValues == 1
## ```
single : k -> Set k where k implements Hash & Eq
single : k -> Set k
single = \key ->
Dict.single key {} |> @Set
@ -102,7 +119,7 @@ single = \key ->
##
## expect countValues == 3
## ```
insert : Set k, k -> Set k where k implements Hash & Eq
insert : Set k, k -> Set k
insert = \@Set dict, key ->
Dict.insert dict key {} |> @Set
@ -187,7 +204,7 @@ expect
## expect has10 == Bool.false
## expect has20 == Bool.true
## ```
remove : Set k, k -> Set k where k implements Hash & Eq
remove : Set k, k -> Set k
remove = \@Set dict, key ->
Dict.remove dict key |> @Set
@ -206,7 +223,7 @@ remove = \@Set dict, key ->
## expect hasApple == Bool.true
## expect hasBanana == Bool.false
## ```
contains : Set k, k -> Bool where k implements Hash & Eq
contains : Set k, k -> Bool
contains = \@Set dict, key ->
Dict.contains dict key
@ -219,7 +236,7 @@ contains = \@Set dict, key ->
##
## expect Set.toList numbers == values
## ```
toList : Set k -> List k where k implements Hash & Eq
toList : Set k -> List k
toList = \@Set dict ->
Dict.keys dict
@ -233,11 +250,12 @@ toList = \@Set dict ->
##
## expect Set.fromList [Pear, Apple, Banana] == values
## ```
fromList : List k -> Set k where k implements Hash & Eq
fromList : List k -> Set k
fromList = \list ->
initial = @Set (Dict.withCapacity (List.len list))
List.walk list initial insert
list
|> List.map \k -> (k, {})
|> Dict.fromList
|> @Set
## Combine two `Set` collection by keeping the
## [union](https://en.wikipedia.org/wiki/Union_(set_theory))
@ -249,7 +267,7 @@ fromList = \list ->
##
## expect Set.union set1 set2 == Set.fromList [Left, Right]
## ```
union : Set k, Set k -> Set k where k implements Hash & Eq
union : Set k, Set k -> Set k
union = \@Set dict1, @Set dict2 ->
Dict.insertAll dict1 dict2 |> @Set
@ -262,7 +280,7 @@ union = \@Set dict1, @Set dict2 ->
##
## expect Set.intersection set1 set2 == Set.single Left
## ```
intersection : Set k, Set k -> Set k where k implements Hash & Eq
intersection : Set k, Set k -> Set k
intersection = \@Set dict1, @Set dict2 ->
Dict.keepShared dict1 dict2 |> @Set
@ -276,7 +294,7 @@ intersection = \@Set dict1, @Set dict2 ->
##
## expect Set.difference first second == Set.fromList [Up, Down]
## ```
difference : Set k, Set k -> Set k where k implements Hash & Eq
difference : Set k, Set k -> Set k
difference = \@Set dict1, @Set dict2 ->
Dict.removeAll dict1 dict2 |> @Set
@ -299,14 +317,14 @@ difference = \@Set dict1, @Set dict2 ->
##
## expect result == 2
## ```
walk : Set k, state, (state, k -> state) -> state where k implements Hash & Eq
walk : Set k, state, (state, k -> state) -> state
walk = \@Set dict, state, step ->
Dict.walk dict state (\s, k, _ -> step s k)
## Convert each value in the set to something new, by calling a conversion
## function on each of them which receives the old value. Then return a
## new set containing the converted values.
map : Set a, (a -> b) -> Set b where a implements Hash & Eq, b implements Hash & Eq
map : Set a, (a -> b) -> Set b
map = \set, transform ->
init = withCapacity (capacity set)
@ -318,7 +336,7 @@ map = \set, transform ->
## (using [Set.union]) into one set.
##
## You may know a similar function named `concatMap` in other languages.
joinMap : Set a, (a -> Set b) -> Set b where a implements Hash & Eq, b implements Hash & Eq
joinMap : Set a, (a -> Set b) -> Set b
joinMap = \set, transform ->
init = withCapacity (capacity set) # Might be a pessimization
@ -340,10 +358,32 @@ joinMap = \set, transform ->
##
## expect result == FoundTheAnswer
## ```
walkUntil : Set k, state, (state, k -> [Continue state, Break state]) -> state where k implements Hash & Eq
walkUntil : Set k, state, (state, k -> [Continue state, Break state]) -> state
walkUntil = \@Set dict, state, step ->
Dict.walkUntil dict state (\s, k, _ -> step s k)
## Run the given function on each element in the `Set`, and return
## a `Set` with just the elements for which the function returned `Bool.true`.
## ```
## expect Set.fromList [1,2,3,4,5]
## |> Set.keepIf \k -> k >= 3
## |> Bool.isEq (Set.fromList [3,4,5])
## ```
keepIf : Set k, (k -> Bool) -> Set k
keepIf = \@Set dict, predicate ->
@Set (Dict.keepIf dict (\(k, _v) -> predicate k))
## Run the given function on each element in the `Set`, and return
## a `Set` with just the elements for which the function returned `Bool.false`.
## ```
## expect Set.fromList [1,2,3,4,5]
## |> Set.dropIf \k -> k >= 3
## |> Bool.isEq (Set.fromList [1,2])
## ```
dropIf : Set k, (k -> Bool) -> Set k
dropIf = \@Set dict, predicate ->
@Set (Dict.dropIf dict (\(k, _v) -> predicate k))
expect
first =
single "Keep Me"
@ -443,3 +483,13 @@ expect
|> insert orderOne
wrapperOne == wrapperTwo
expect
Set.fromList [1, 2, 3, 4, 5]
|> Set.keepIf \k -> k >= 3
|> Bool.isEq (Set.fromList [3, 4, 5])
expect
Set.fromList [1, 2, 3, 4, 5]
|> Set.dropIf \k -> k >= 3
|> Bool.isEq (Set.fromList [1, 2])

View file

@ -1,3 +1,3 @@
package "builtins"
exposes [Str, Num, Bool, Result, List, Dict, Set, Decode, Encode, Hash, Box, TotallyNotJson]
exposes [Str, Num, Bool, Result, List, Dict, Set, Decode, Encode, Hash, Box, TotallyNotJson, Inspect]
packages {}

View file

@ -370,7 +370,7 @@ pub const STR_GET_SCALAR_UNSAFE: &str = "roc_builtins.str.get_scalar_unsafe";
pub const STR_CLONE_TO: &str = "roc_builtins.str.clone_to";
pub const STR_WITH_CAPACITY: &str = "roc_builtins.str.with_capacity";
pub const STR_GRAPHEMES: &str = "roc_builtins.str.graphemes";
pub const STR_REFCOUNT_PTR: &str = "roc_builtins.str.refcount_ptr";
pub const STR_ALLOCATION_PTR: &str = "roc_builtins.str.allocation_ptr";
pub const STR_RELEASE_EXCESS_CAPACITY: &str = "roc_builtins.str.release_excess_capacity";
pub const LIST_MAP: &str = "roc_builtins.list.map";
@ -390,7 +390,7 @@ pub const LIST_PREPEND: &str = "roc_builtins.list.prepend";
pub const LIST_APPEND_UNSAFE: &str = "roc_builtins.list.append_unsafe";
pub const LIST_RESERVE: &str = "roc_builtins.list.reserve";
pub const LIST_CAPACITY: &str = "roc_builtins.list.capacity";
pub const LIST_REFCOUNT_PTR: &str = "roc_builtins.list.refcount_ptr";
pub const LIST_ALLOCATION_PTR: &str = "roc_builtins.list.allocation_ptr";
pub const LIST_RELEASE_EXCESS_CAPACITY: &str = "roc_builtins.list.release_excess_capacity";
pub const DEC_ABS: &str = "roc_builtins.dec.abs";
@ -422,6 +422,7 @@ pub const DEC_TAN: &str = "roc_builtins.dec.tan";
pub const DEC_TO_I128: &str = "roc_builtins.dec.to_i128";
pub const DEC_TO_STR: &str = "roc_builtins.dec.to_str";
pub const UTILS_DBG_IMPL: &str = "roc_builtins.utils.dbg_impl";
pub const UTILS_TEST_PANIC: &str = "roc_builtins.utils.test_panic";
pub const UTILS_ALLOCATE_WITH_REFCOUNT: &str = "roc_builtins.utils.allocate_with_refcount";
pub const UTILS_INCREF_RC_PTR: &str = "roc_builtins.utils.incref_rc_ptr";

View file

@ -670,11 +670,15 @@ fn deep_copy_expr_help<C: CopyEnv>(env: &mut C, copied: &mut Vec<Variable>, expr
},
Dbg {
source_location,
source,
loc_message,
loc_continuation,
variable,
symbol,
} => Dbg {
source_location: source_location.clone(),
source: source.clone(),
loc_message: Box::new(loc_message.map(|e| go_help!(e))),
loc_continuation: Box::new(loc_continuation.map(|e| go_help!(e))),
variable: sub!(*variable),

View file

@ -8,7 +8,7 @@
use roc_error_macros::internal_error;
use roc_module::{called_via::CalledVia, symbol::Symbol};
use roc_parse::ast;
use roc_parse::ast::{self, Collection};
use roc_region::all::{Loc, Region};
use crate::{env::Env, pattern::Pattern, scope::Scope};
@ -214,12 +214,13 @@ fn is_eq<'a>(env: &mut Env<'a>, at_opaque: &'a str) -> ast::Expr<'a> {
}
fn to_inspector<'a>(env: &mut Env<'a>, at_opaque: &'a str) -> ast::Expr<'a> {
// Inspect for opaques as a tag so it prints `@Opaque payload`.
let alloc_pat = |it| env.arena.alloc(Loc::at(DERIVED_REGION, it));
let alloc_expr = |it| env.arena.alloc(Loc::at(DERIVED_REGION, it));
let payload = "#payload";
// \@Opaq payload
// \@Opaque payload
let opaque_ref = alloc_pat(ast::Pattern::OpaqueRef(at_opaque));
let opaque_apply_pattern = ast::Pattern::Apply(
opaque_ref,
@ -229,7 +230,7 @@ fn to_inspector<'a>(env: &mut Env<'a>, at_opaque: &'a str) -> ast::Expr<'a> {
);
// Inspect.toInspector payload
let call_member = alloc_expr(ast::Expr::Apply(
let to_inspector_payload = alloc_expr(ast::Expr::Apply(
alloc_expr(ast::Expr::Var {
module_name: "Inspect",
ident: "toInspector",
@ -241,17 +242,61 @@ fn to_inspector<'a>(env: &mut Env<'a>, at_opaque: &'a str) -> ast::Expr<'a> {
roc_module::called_via::CalledVia::Space,
));
// TODO: change the derived implementation to be something that includes the opaque symbol in
// the derivation, e.g. something like
//
// \@Opaq payload ->
// Inspect.opaqueWrapper "toString symbol" payload
// Inspect.tag "@opaque" [Inspect.toInspector payload]
let to_inspector_list = alloc_expr(ast::Expr::List(Collection::with_items(
&*env.arena.alloc([&*to_inspector_payload]),
)));
let opaque_name = alloc_expr(ast::Expr::Str(ast::StrLiteral::PlainLine(at_opaque)));
// \@Opaq payload -> Inspect.toInspector payload
ast::Expr::Closure(
let opaque_inspector = alloc_expr(ast::Expr::Apply(
alloc_expr(ast::Expr::Var {
module_name: "Inspect",
ident: "tag",
}),
&*env.arena.alloc([&*opaque_name, &*to_inspector_list]),
roc_module::called_via::CalledVia::Space,
));
let fmt = "#fmt";
// \fmt -> Inspect.apply opaqueInspector fmt
let apply_opaque_inspector = alloc_expr(ast::Expr::Apply(
alloc_expr(ast::Expr::Var {
module_name: "Inspect",
ident: "apply",
}),
&*env.arena.alloc([
&*opaque_inspector,
&*alloc_expr(ast::Expr::Var {
module_name: "",
ident: fmt,
}),
]),
roc_module::called_via::CalledVia::Space,
));
let custom_closure = alloc_expr(ast::Expr::Closure(
env.arena
.alloc([Loc::at(DERIVED_REGION, ast::Pattern::Identifier(fmt))]),
apply_opaque_inspector,
));
// Inspect.custom \fmt -> ...
let custom = alloc_expr(ast::Expr::Apply(
alloc_expr(ast::Expr::Var {
module_name: "Inspect",
ident: "custom",
}),
env.arena.alloc([&*custom_closure]),
CalledVia::Space,
));
// \@Opaque payload -> (Inspect.custom \fmt -> ...)
ast::Expr::Closure(
&*env
.arena
.alloc([Loc::at(DERIVED_REGION, opaque_apply_pattern)]),
call_member,
custom,
)
}

View file

@ -269,6 +269,8 @@ pub enum Expr {
},
Dbg {
source_location: Box<str>,
source: Box<str>,
loc_message: Box<Loc<Expr>>,
loc_continuation: Box<Loc<Expr>>,
variable: Variable,
@ -1249,7 +1251,7 @@ pub fn canonicalize_expr<'a>(
ast::Expr::Dbg(_, _) => {
internal_error!("Dbg should have been desugared by now")
}
ast::Expr::LowLevelDbg(message, continuation) => {
ast::Expr::LowLevelDbg((source_location, source), message, continuation) => {
let mut output = Output::default();
let (loc_message, output1) =
@ -1276,6 +1278,8 @@ pub fn canonicalize_expr<'a>(
(
Dbg {
source_location: (*source_location).into(),
source: (*source).into(),
loc_message: Box::new(loc_message),
loc_continuation: Box::new(loc_continuation),
variable: var_store.fresh(),
@ -2097,6 +2101,8 @@ pub fn inline_calls(var_store: &mut VarStore, expr: Expr) -> Expr {
}
Dbg {
source_location,
source,
loc_message,
loc_continuation,
variable,
@ -2113,6 +2119,8 @@ pub fn inline_calls(var_store: &mut VarStore, expr: Expr) -> Expr {
};
Dbg {
source_location,
source,
loc_message: Box::new(loc_message),
loc_continuation: Box::new(loc_continuation),
variable,
@ -2398,7 +2406,7 @@ pub fn is_valid_interpolation(expr: &ast::Expr<'_>) -> bool {
| ast::Expr::MalformedClosure => true,
// Newlines are disallowed inside interpolation, and these all require newlines
ast::Expr::Dbg(_, _)
| ast::Expr::LowLevelDbg(_, _)
| ast::Expr::LowLevelDbg(_, _, _)
| ast::Expr::Defs(_, _)
| ast::Expr::Expect(_, _)
| ast::Expr::When(_, _)

View file

@ -275,6 +275,8 @@ pub fn canonicalize_module_defs<'a>(
loc_defs: &'a mut Defs<'a>,
header_type: &roc_parse::header::HeaderType,
home: ModuleId,
module_path: &str,
src: &'a str,
module_ids: &'a ModuleIds,
exposed_ident_ids: IdentIds,
dep_idents: &'a IdentIdsByModule,
@ -310,7 +312,7 @@ pub fn canonicalize_module_defs<'a>(
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
crate::operator::desugar_defs(arena, loc_defs);
crate::operator::desugar_defs(arena, loc_defs, src, &mut None, module_path);
let mut rigid_variables = RigidVariables::default();

View file

@ -11,7 +11,7 @@ use roc_parse::ast::{
AssignedField, Collection, Pattern, RecordBuilderField, StrLiteral, StrSegment, ValueDef,
WhenBranch,
};
use roc_region::all::{Loc, Region};
use roc_region::all::{LineInfo, Loc, Region};
// BinOp precedence logic adapted from Gluon by Markus Westerlind
// https://github.com/gluon-lang/gluon - license information can be found in
@ -67,13 +67,19 @@ fn new_op_call_expr<'a>(
Loc { region, value }
}
fn desugar_value_def<'a>(arena: &'a Bump, def: &'a ValueDef<'a>) -> ValueDef<'a> {
fn desugar_value_def<'a>(
arena: &'a Bump,
def: &'a ValueDef<'a>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> ValueDef<'a> {
use ValueDef::*;
match def {
Body(loc_pattern, loc_expr) => Body(
desugar_loc_pattern(arena, loc_pattern),
desugar_expr(arena, loc_expr),
desugar_loc_pattern(arena, loc_pattern, src, line_info, module_path),
desugar_expr(arena, loc_expr, src, line_info, module_path),
),
ann @ Annotation(_, _) => *ann,
AnnotatedBody {
@ -87,13 +93,14 @@ fn desugar_value_def<'a>(arena: &'a Bump, def: &'a ValueDef<'a>) -> ValueDef<'a>
ann_type,
comment: *comment,
body_pattern,
body_expr: desugar_expr(arena, body_expr),
body_expr: desugar_expr(arena, body_expr, src, line_info, module_path),
},
Dbg {
condition,
preceding_comment,
} => {
let desugared_condition = &*arena.alloc(desugar_expr(arena, condition));
let desugared_condition =
&*arena.alloc(desugar_expr(arena, condition, src, line_info, module_path));
Dbg {
condition: desugared_condition,
preceding_comment: *preceding_comment,
@ -103,7 +110,8 @@ fn desugar_value_def<'a>(arena: &'a Bump, def: &'a ValueDef<'a>) -> ValueDef<'a>
condition,
preceding_comment,
} => {
let desugared_condition = &*arena.alloc(desugar_expr(arena, condition));
let desugared_condition =
&*arena.alloc(desugar_expr(arena, condition, src, line_info, module_path));
Expect {
condition: desugared_condition,
preceding_comment: *preceding_comment,
@ -113,7 +121,8 @@ fn desugar_value_def<'a>(arena: &'a Bump, def: &'a ValueDef<'a>) -> ValueDef<'a>
condition,
preceding_comment,
} => {
let desugared_condition = &*arena.alloc(desugar_expr(arena, condition));
let desugared_condition =
&*arena.alloc(desugar_expr(arena, condition, src, line_info, module_path));
ExpectFx {
condition: desugared_condition,
preceding_comment: *preceding_comment,
@ -122,15 +131,27 @@ fn desugar_value_def<'a>(arena: &'a Bump, def: &'a ValueDef<'a>) -> ValueDef<'a>
}
}
pub fn desugar_defs<'a>(arena: &'a Bump, defs: &mut roc_parse::ast::Defs<'a>) {
pub fn desugar_defs<'a>(
arena: &'a Bump,
defs: &mut roc_parse::ast::Defs<'a>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) {
for value_def in defs.value_defs.iter_mut() {
*value_def = desugar_value_def(arena, arena.alloc(*value_def));
*value_def = desugar_value_def(arena, arena.alloc(*value_def), src, line_info, module_path);
}
}
/// Reorder the expression tree based on operator precedence and associativity rules,
/// then replace the BinOp nodes with Apply nodes. Also drop SpaceBefore and SpaceAfter nodes.
pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc<Expr<'a>> {
pub fn desugar_expr<'a>(
arena: &'a Bump,
loc_expr: &'a Loc<Expr<'a>>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> &'a Loc<Expr<'a>> {
match &loc_expr.value {
Float(..)
| Num(..)
@ -153,16 +174,22 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
StrLiteral::PlainLine(_) => loc_expr,
StrLiteral::Line(segments) => {
let region = loc_expr.region;
let value = Str(StrLiteral::Line(desugar_str_segments(arena, segments)));
let value = Str(StrLiteral::Line(desugar_str_segments(
arena,
segments,
src,
line_info,
module_path,
)));
arena.alloc(Loc { region, value })
}
StrLiteral::Block(lines) => {
let region = loc_expr.region;
let new_lines = Vec::from_iter_in(
lines
.iter()
.map(|segments| desugar_str_segments(arena, segments)),
lines.iter().map(|segments| {
desugar_str_segments(arena, segments, src, line_info, module_path)
}),
arena,
);
let value = Str(StrLiteral::Block(new_lines.into_bump_slice()));
@ -177,7 +204,17 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
region,
value: **sub_expr,
};
let value = TupleAccess(&desugar_expr(arena, arena.alloc(loc_sub_expr)).value, paths);
let value = TupleAccess(
&desugar_expr(
arena,
arena.alloc(loc_sub_expr),
src,
line_info,
module_path,
)
.value,
paths,
);
arena.alloc(Loc { region, value })
}
@ -187,7 +224,17 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
region,
value: **sub_expr,
};
let value = RecordAccess(&desugar_expr(arena, arena.alloc(loc_sub_expr)).value, paths);
let value = RecordAccess(
&desugar_expr(
arena,
arena.alloc(loc_sub_expr),
src,
line_info,
module_path,
)
.value,
paths,
);
arena.alloc(Loc { region, value })
}
@ -195,7 +242,7 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
let mut new_items = Vec::with_capacity_in(items.len(), arena);
for item in items.iter() {
new_items.push(desugar_expr(arena, item));
new_items.push(desugar_expr(arena, item, src, line_info, module_path));
}
let new_items = new_items.into_bump_slice();
let value: Expr<'a> = List(items.replace_items(new_items));
@ -205,32 +252,47 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
value,
})
}
Record(fields) => arena.alloc(Loc {
region: loc_expr.region,
value: Record(fields.map_items(arena, |field| {
let value = desugar_field(arena, &field.value);
Loc {
Record(fields) => {
let mut allocated = Vec::with_capacity_in(fields.len(), arena);
for field in fields.iter() {
let value = desugar_field(arena, &field.value, src, line_info, module_path);
allocated.push(Loc {
value,
region: field.region,
}
})),
}),
Tuple(fields) => arena.alloc(Loc {
region: loc_expr.region,
value: Tuple(fields.map_items(arena, |field| desugar_expr(arena, field))),
}),
});
}
let fields = fields.replace_items(allocated.into_bump_slice());
arena.alloc(Loc {
region: loc_expr.region,
value: Record(fields),
})
}
Tuple(fields) => {
let mut allocated = Vec::with_capacity_in(fields.len(), arena);
for field in fields.iter() {
let expr = desugar_expr(arena, field, src, line_info, module_path);
allocated.push(expr);
}
let fields = fields.replace_items(allocated.into_bump_slice());
arena.alloc(Loc {
region: loc_expr.region,
value: Tuple(fields),
})
}
RecordUpdate { fields, update } => {
// NOTE the `update` field is always a `Var { .. }`, we only desugar it to get rid of
// any spaces before/after
let new_update = desugar_expr(arena, update);
let new_update = desugar_expr(arena, update, src, line_info, module_path);
let new_fields = fields.map_items(arena, |field| {
let value = desugar_field(arena, &field.value);
Loc {
let mut allocated = Vec::with_capacity_in(fields.len(), arena);
for field in fields.iter() {
let value = desugar_field(arena, &field.value, src, line_info, module_path);
allocated.push(Loc {
value,
region: field.region,
}
});
});
}
let new_fields = fields.replace_items(allocated.into_bump_slice());
arena.alloc(Loc {
region: loc_expr.region,
@ -243,8 +305,8 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
Closure(loc_patterns, loc_ret) => arena.alloc(Loc {
region: loc_expr.region,
value: Closure(
desugar_loc_patterns(arena, loc_patterns),
desugar_expr(arena, loc_ret),
desugar_loc_patterns(arena, loc_patterns, src, line_info, module_path),
desugar_expr(arena, loc_ret, src, line_info, module_path),
),
}),
Backpassing(loc_patterns, loc_body, loc_ret) => {
@ -253,10 +315,11 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
// loc_ret
// first desugar the body, because it may contain |>
let desugared_body = desugar_expr(arena, loc_body);
let desugared_body = desugar_expr(arena, loc_body, src, line_info, module_path);
let desugared_ret = desugar_expr(arena, loc_ret);
let desugared_loc_patterns = desugar_loc_patterns(arena, loc_patterns);
let desugared_ret = desugar_expr(arena, loc_ret, src, line_info, module_path);
let desugared_loc_patterns =
desugar_loc_patterns(arena, loc_patterns, src, line_info, module_path);
let closure = Expr::Closure(desugared_loc_patterns, desugared_ret);
let loc_closure = Loc::at(loc_expr.region, closure);
@ -289,12 +352,20 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
value: UnappliedRecordBuilder(loc_expr),
region: loc_expr.region,
}),
BinOps(lefts, right) => desugar_bin_ops(arena, loc_expr.region, lefts, right),
BinOps(lefts, right) => desugar_bin_ops(
arena,
loc_expr.region,
lefts,
right,
src,
line_info,
module_path,
),
Defs(defs, loc_ret) => {
let mut defs = (*defs).clone();
desugar_defs(arena, &mut defs);
desugar_defs(arena, &mut defs, src, line_info, module_path);
let loc_ret = desugar_expr(arena, loc_ret);
let loc_ret = desugar_expr(arena, loc_ret, src, line_info, module_path);
arena.alloc(Loc::at(loc_expr.region, Defs(arena.alloc(defs), loc_ret)))
}
@ -326,13 +397,17 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
}
};
desugared_args.push(desugar_expr(arena, arg));
desugared_args.push(desugar_expr(arena, arg, src, line_info, module_path));
}
let desugared_args = desugared_args.into_bump_slice();
let mut apply: &Loc<Expr> = arena.alloc(Loc {
value: Apply(desugar_expr(arena, loc_fn), desugared_args, *called_via),
value: Apply(
desugar_expr(arena, loc_fn, src, line_info, module_path),
desugared_args,
*called_via,
),
region: loc_expr.region,
});
@ -341,7 +416,7 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
Some(apply_exprs) => {
for expr in apply_exprs {
let desugared_expr = desugar_expr(arena, expr);
let desugared_expr = desugar_expr(arena, expr, src, line_info, module_path);
let args = std::slice::from_ref(arena.alloc(apply));
@ -356,15 +431,23 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
apply
}
When(loc_cond_expr, branches) => {
let loc_desugared_cond = &*arena.alloc(desugar_expr(arena, loc_cond_expr));
let loc_desugared_cond = &*arena.alloc(desugar_expr(
arena,
loc_cond_expr,
src,
line_info,
module_path,
));
let mut desugared_branches = Vec::with_capacity_in(branches.len(), arena);
for branch in branches.iter() {
let desugared_expr = desugar_expr(arena, &branch.value);
let desugared_patterns = desugar_loc_patterns(arena, branch.patterns);
let desugared_expr =
desugar_expr(arena, &branch.value, src, line_info, module_path);
let desugared_patterns =
desugar_loc_patterns(arena, branch.patterns, src, line_info, module_path);
let desugared_guard = if let Some(guard) = &branch.guard {
Some(*desugar_expr(arena, guard))
Some(*desugar_expr(arena, guard, src, line_info, module_path))
} else {
None
};
@ -402,7 +485,8 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
},
};
let loc_fn_var = arena.alloc(Loc { region, value });
let desugared_args = arena.alloc([desugar_expr(arena, loc_arg)]);
let desugared_args =
arena.alloc([desugar_expr(arena, loc_arg, src, line_info, module_path)]);
arena.alloc(Loc {
value: Apply(loc_fn_var, desugared_args, CalledVia::UnaryOp(op)),
@ -418,6 +502,9 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
value: **expr,
region: loc_expr.region,
}),
src,
line_info,
module_path,
)
}
ParensAround(expr) => {
@ -427,6 +514,9 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
value: **expr,
region: loc_expr.region,
}),
src,
line_info,
module_path,
);
arena.alloc(Loc {
@ -436,14 +526,20 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
}
If(if_thens, final_else_branch) => {
// If does not get desugared into `when` so we can give more targeted error messages during type checking.
let desugared_final_else = &*arena.alloc(desugar_expr(arena, final_else_branch));
let desugared_final_else = &*arena.alloc(desugar_expr(
arena,
final_else_branch,
src,
line_info,
module_path,
));
let mut desugared_if_thens = Vec::with_capacity_in(if_thens.len(), arena);
for (condition, then_branch) in if_thens.iter() {
desugared_if_thens.push((
*desugar_expr(arena, condition),
*desugar_expr(arena, then_branch),
*desugar_expr(arena, condition, src, line_info, module_path),
*desugar_expr(arena, then_branch, src, line_info, module_path),
));
}
@ -453,62 +549,85 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Loc<Expr<'a>>) -> &'a Loc
})
}
Expect(condition, continuation) => {
let desugared_condition = &*arena.alloc(desugar_expr(arena, condition));
let desugared_continuation = &*arena.alloc(desugar_expr(arena, continuation));
let desugared_condition =
&*arena.alloc(desugar_expr(arena, condition, src, line_info, module_path));
let desugared_continuation = &*arena.alloc(desugar_expr(
arena,
continuation,
src,
line_info,
module_path,
));
arena.alloc(Loc {
value: Expect(desugared_condition, desugared_continuation),
region: loc_expr.region,
})
}
Dbg(condition, continuation) => {
// Desugars a `dbg x` statement into
// `roc_dbg (Inspect.toDbgStr (Inspect.inspect x))`
let desugared_continuation = &*arena.alloc(desugar_expr(arena, continuation));
// Desugars a `dbg x` statement into essentially
// Inspect.toStr x |> LowLevelDbg
let desugared_continuation = &*arena.alloc(desugar_expr(
arena,
continuation,
src,
line_info,
module_path,
));
let region = condition.region;
// TODO desugar this in canonicalization instead, so we can work
// in terms of integers exclusively and not need to create strings
// which canonicalization then needs to look up, check if they're exposed, etc
let inspect = Var {
// Inspect.toStr x
let inspect_fn = Var {
module_name: ModuleName::INSPECT,
ident: "inspect",
ident: "toStr",
};
let loc_inspect_fn_var = arena.alloc(Loc {
value: inspect,
value: inspect_fn,
region,
});
let desugared_inspect_args = arena.alloc([desugar_expr(arena, condition)]);
let desugared_inspect_args =
arena.alloc([desugar_expr(arena, condition, src, line_info, module_path)]);
let inspector = arena.alloc(Loc {
let dbg_str = arena.alloc(Loc {
value: Apply(loc_inspect_fn_var, desugared_inspect_args, CalledVia::Space),
region,
});
let to_dbg_str = Var {
module_name: ModuleName::INSPECT,
ident: "toDbgStr",
};
let loc_to_dbg_str_fn_var = arena.alloc(Loc {
value: to_dbg_str,
region,
});
let to_dbg_str_args = arena.alloc([&*inspector]);
let dbg_str = arena.alloc(Loc {
value: Apply(loc_to_dbg_str_fn_var, to_dbg_str_args, CalledVia::Space),
region,
});
// line_info is an option so that we can lazily calculate it.
// That way it there are no `dbg` statements, we never pay the cast of scanning the source an extra time.
if matches!(line_info, None) {
*line_info = Some(LineInfo::new(src));
}
let line_col = line_info.as_ref().unwrap().convert_pos(region.start());
let dbg_src = src
.split_at(region.start().offset as usize)
.1
.split_at((region.end().offset - region.start().offset) as usize)
.0;
// |> LowLevelDbg
arena.alloc(Loc {
value: LowLevelDbg(dbg_str, desugared_continuation),
value: LowLevelDbg(
arena.alloc((
&*arena.alloc_str(&format!("{}:{}", module_path, line_col.line + 1)),
&*arena.alloc_str(dbg_src),
)),
dbg_str,
desugared_continuation,
),
region: loc_expr.region,
})
}
LowLevelDbg(_, _) => unreachable!("Only exists after desugaring"),
LowLevelDbg(_, _, _) => unreachable!("Only exists after desugaring"),
}
}
fn desugar_str_segments<'a>(
arena: &'a Bump,
segments: &'a [StrSegment<'a>],
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> &'a [StrSegment<'a>] {
Vec::from_iter_in(
segments.iter().map(|segment| match segment {
@ -522,6 +641,9 @@ fn desugar_str_segments<'a>(
region: loc_expr.region,
value: *loc_expr.value,
}),
src,
line_info,
module_path,
);
StrSegment::Interpolated(Loc {
region: loc_desugared.region,
@ -537,6 +659,9 @@ fn desugar_str_segments<'a>(
fn desugar_field<'a>(
arena: &'a Bump,
field: &'a AssignedField<'a, Expr<'a>>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> AssignedField<'a, Expr<'a>> {
use roc_parse::ast::AssignedField::*;
@ -547,7 +672,7 @@ fn desugar_field<'a>(
region: loc_str.region,
},
spaces,
desugar_expr(arena, loc_expr),
desugar_expr(arena, loc_expr, src, line_info, module_path),
),
OptionalValue(loc_str, spaces, loc_expr) => OptionalValue(
Loc {
@ -555,7 +680,7 @@ fn desugar_field<'a>(
region: loc_str.region,
},
spaces,
desugar_expr(arena, loc_expr),
desugar_expr(arena, loc_expr, src, line_info, module_path),
),
LabelOnly(loc_str) => {
// Desugar { x } into { x: x }
@ -573,11 +698,11 @@ fn desugar_field<'a>(
region: loc_str.region,
},
&[],
desugar_expr(arena, arena.alloc(loc_expr)),
desugar_expr(arena, arena.alloc(loc_expr), src, line_info, module_path),
)
}
SpaceBefore(field, _spaces) => desugar_field(arena, field),
SpaceAfter(field, _spaces) => desugar_field(arena, field),
SpaceBefore(field, _spaces) => desugar_field(arena, field, src, line_info, module_path),
SpaceAfter(field, _spaces) => desugar_field(arena, field, src, line_info, module_path),
Malformed(string) => Malformed(string),
}
@ -586,11 +711,14 @@ fn desugar_field<'a>(
fn desugar_loc_patterns<'a>(
arena: &'a Bump,
loc_patterns: &'a [Loc<Pattern<'a>>],
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> &'a [Loc<Pattern<'a>>] {
Vec::from_iter_in(
loc_patterns.iter().map(|loc_pattern| Loc {
region: loc_pattern.region,
value: desugar_pattern(arena, loc_pattern.value),
value: desugar_pattern(arena, loc_pattern.value, src, line_info, module_path),
}),
arena,
)
@ -600,14 +728,23 @@ fn desugar_loc_patterns<'a>(
fn desugar_loc_pattern<'a>(
arena: &'a Bump,
loc_pattern: &'a Loc<Pattern<'a>>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> &'a Loc<Pattern<'a>> {
arena.alloc(Loc {
region: loc_pattern.region,
value: desugar_pattern(arena, loc_pattern.value),
value: desugar_pattern(arena, loc_pattern.value, src, line_info, module_path),
})
}
fn desugar_pattern<'a>(arena: &'a Bump, pattern: Pattern<'a>) -> Pattern<'a> {
fn desugar_pattern<'a>(
arena: &'a Bump,
pattern: Pattern<'a>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> Pattern<'a> {
use roc_parse::ast::Pattern::*;
match pattern {
@ -630,7 +767,7 @@ fn desugar_pattern<'a>(arena: &'a Bump, pattern: Pattern<'a>) -> Pattern<'a> {
let desugared_arg_patterns = Vec::from_iter_in(
arg_patterns.iter().map(|arg_pattern| Loc {
region: arg_pattern.region,
value: desugar_pattern(arena, arg_pattern.value),
value: desugar_pattern(arena, arg_pattern.value, src, line_info, module_path),
}),
arena,
)
@ -639,26 +776,62 @@ fn desugar_pattern<'a>(arena: &'a Bump, pattern: Pattern<'a>) -> Pattern<'a> {
Apply(tag, desugared_arg_patterns)
}
RecordDestructure(field_patterns) => {
RecordDestructure(field_patterns.map_items(arena, |field_pattern| Loc {
region: field_pattern.region,
value: desugar_pattern(arena, field_pattern.value),
}))
let mut allocated = Vec::with_capacity_in(field_patterns.len(), arena);
for field_pattern in field_patterns.iter() {
let value =
desugar_pattern(arena, field_pattern.value, src, line_info, module_path);
allocated.push(Loc {
value,
region: field_pattern.region,
});
}
let field_patterns = field_patterns.replace_items(allocated.into_bump_slice());
RecordDestructure(field_patterns)
}
RequiredField(name, field_pattern) => {
RequiredField(name, desugar_loc_pattern(arena, field_pattern))
RequiredField(name, field_pattern) => RequiredField(
name,
desugar_loc_pattern(arena, field_pattern, src, line_info, module_path),
),
OptionalField(name, expr) => {
OptionalField(name, desugar_expr(arena, expr, src, line_info, module_path))
}
Tuple(patterns) => {
let mut allocated = Vec::with_capacity_in(patterns.len(), arena);
for pattern in patterns.iter() {
let value = desugar_pattern(arena, pattern.value, src, line_info, module_path);
allocated.push(Loc {
value,
region: pattern.region,
});
}
let patterns = patterns.replace_items(allocated.into_bump_slice());
Tuple(patterns)
}
List(patterns) => {
let mut allocated = Vec::with_capacity_in(patterns.len(), arena);
for pattern in patterns.iter() {
let value = desugar_pattern(arena, pattern.value, src, line_info, module_path);
allocated.push(Loc {
value,
region: pattern.region,
});
}
let patterns = patterns.replace_items(allocated.into_bump_slice());
List(patterns)
}
As(sub_pattern, symbol) => As(
desugar_loc_pattern(arena, sub_pattern, src, line_info, module_path),
symbol,
),
SpaceBefore(sub_pattern, _spaces) => {
desugar_pattern(arena, *sub_pattern, src, line_info, module_path)
}
SpaceAfter(sub_pattern, _spaces) => {
desugar_pattern(arena, *sub_pattern, src, line_info, module_path)
}
OptionalField(name, expr) => OptionalField(name, desugar_expr(arena, expr)),
Tuple(patterns) => Tuple(patterns.map_items(arena, |elem_pattern| Loc {
region: elem_pattern.region,
value: desugar_pattern(arena, elem_pattern.value),
})),
List(patterns) => List(patterns.map_items(arena, |elem_pattern| Loc {
region: elem_pattern.region,
value: desugar_pattern(arena, elem_pattern.value),
})),
As(sub_pattern, symbol) => As(desugar_loc_pattern(arena, sub_pattern), symbol),
SpaceBefore(sub_pattern, _spaces) => desugar_pattern(arena, *sub_pattern),
SpaceAfter(sub_pattern, _spaces) => desugar_pattern(arena, *sub_pattern),
}
}
@ -787,19 +960,22 @@ fn desugar_bin_ops<'a>(
whole_region: Region,
lefts: &'a [(Loc<Expr<'_>>, Loc<BinOp>)],
right: &'a Loc<Expr<'_>>,
src: &'a str,
line_info: &mut Option<LineInfo>,
module_path: &str,
) -> &'a Loc<Expr<'a>> {
let mut arg_stack: Vec<&'a Loc<Expr>> = Vec::with_capacity_in(lefts.len() + 1, arena);
let mut op_stack: Vec<Loc<BinOp>> = Vec::with_capacity_in(lefts.len(), arena);
for (loc_expr, loc_op) in lefts {
arg_stack.push(desugar_expr(arena, loc_expr));
arg_stack.push(desugar_expr(arena, loc_expr, src, line_info, module_path));
match run_binop_step(arena, whole_region, &mut arg_stack, &mut op_stack, *loc_op) {
Err(problem) => return problem,
Ok(()) => continue,
}
}
let mut expr = desugar_expr(arena, right);
let mut expr = desugar_expr(arena, right, src, line_info, module_path);
for (left, loc_op) in arg_stack.into_iter().zip(op_stack.into_iter()).rev() {
expr = arena.alloc(new_op_call_expr(arena, left, loc_op, expr));

View file

@ -387,6 +387,8 @@ pub fn walk_expr<V: Visitor>(visitor: &mut V, expr: &Expr, var: Variable) {
}
Expr::Dbg {
variable,
source: _,
source_location: _,
loc_message,
loc_continuation,
symbol: _,

View file

@ -52,7 +52,13 @@ pub fn can_expr_with(arena: &Bump, home: ModuleId, expr_str: &str) -> CanExprOut
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
let loc_expr = operator::desugar_expr(arena, &loc_expr);
let loc_expr = operator::desugar_expr(
arena,
&loc_expr,
expr_str,
&mut None,
arena.alloc("TestPath"),
);
let mut scope = Scope::new(home, IdentIds::default(), Default::default());
scope.add_alias(

View file

@ -741,6 +741,8 @@ pub fn constrain_expr(
}
Dbg {
source_location: _,
source: _,
loc_message,
loc_continuation,
variable,

View file

@ -302,25 +302,14 @@ fn should_outdent(mut rhs: &TypeAnnotation) -> bool {
}
}
fn fmt_dbg_in_def<'a>(
buf: &mut Buf,
condition: &'a Loc<Expr<'a>>,
is_multiline: bool,
indent: u16,
) {
fn fmt_dbg_in_def<'a>(buf: &mut Buf, condition: &'a Loc<Expr<'a>>, _: bool, indent: u16) {
buf.ensure_ends_with_newline();
buf.indent(indent);
buf.push_str("dbg");
let return_indent = if is_multiline {
buf.newline();
indent + INDENT
} else {
buf.spaces(1);
indent
};
buf.spaces(1);
condition.format(buf, return_indent);
condition.format(buf, indent);
}
fn fmt_expect<'a>(buf: &mut Buf, condition: &'a Loc<Expr<'a>>, is_multiline: bool, indent: u16) {

View file

@ -61,8 +61,8 @@ impl<'a> Formattable for Expr<'a> {
Expect(condition, continuation) => {
condition.is_multiline() || continuation.is_multiline()
}
Dbg(condition, continuation) => condition.is_multiline() || continuation.is_multiline(),
LowLevelDbg(_, _) => unreachable!(
Dbg(condition, _) => condition.is_multiline(),
LowLevelDbg(_, _, _) => unreachable!(
"LowLevelDbg should only exist after desugaring, not during formatting"
),
@ -438,7 +438,7 @@ impl<'a> Formattable for Expr<'a> {
Dbg(condition, continuation) => {
fmt_dbg(buf, condition, continuation, self.is_multiline(), indent);
}
LowLevelDbg(_, _) => unreachable!(
LowLevelDbg(_, _, _) => unreachable!(
"LowLevelDbg should only exist after desugaring, not during formatting"
),
If(branches, final_else) => {
@ -956,22 +956,16 @@ fn fmt_dbg<'a>(
buf: &mut Buf,
condition: &'a Loc<Expr<'a>>,
continuation: &'a Loc<Expr<'a>>,
is_multiline: bool,
_: bool,
indent: u16,
) {
buf.ensure_ends_with_newline();
buf.indent(indent);
buf.push_str("dbg");
let return_indent = if is_multiline {
buf.newline();
indent + INDENT
} else {
buf.spaces(1);
indent
};
buf.spaces(1);
condition.format(buf, return_indent);
condition.format(buf, indent);
// Always put a blank line after the `dbg` line(s)
buf.ensure_ends_with_blank_line();

View file

@ -5,8 +5,8 @@ use roc_parse::{
ast::{
AbilityImpls, AbilityMember, AssignedField, Collection, CommentOrNewline, Defs, Expr,
Header, Implements, ImplementsAbilities, ImplementsAbility, ImplementsClause, Module,
Pattern, RecordBuilderField, Spaced, Spaces, StrLiteral, StrSegment, Tag, TypeAnnotation,
TypeDef, TypeHeader, ValueDef, WhenBranch,
Pattern, PatternAs, RecordBuilderField, Spaced, Spaces, StrLiteral, StrSegment, Tag,
TypeAnnotation, TypeDef, TypeHeader, ValueDef, WhenBranch,
},
header::{
AppHeader, ExposedName, HostedHeader, ImportsEntry, InterfaceHeader, KeywordItem,
@ -726,7 +726,7 @@ impl<'a> RemoveSpaces<'a> for Expr<'a> {
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Expr::LowLevelDbg(_, _) => unreachable!(
Expr::LowLevelDbg(_, _, _) => unreachable!(
"LowLevelDbg should only exist after desugaring, not during formatting"
),
Expr::Apply(a, b, c) => Expr::Apply(
@ -800,9 +800,10 @@ impl<'a> RemoveSpaces<'a> for Pattern<'a> {
Pattern::OptionalField(a, b) => {
Pattern::OptionalField(a, arena.alloc(b.remove_spaces(arena)))
}
Pattern::As(pattern, pattern_as) => {
Pattern::As(arena.alloc(pattern.remove_spaces(arena)), pattern_as)
}
Pattern::As(pattern, pattern_as) => Pattern::As(
arena.alloc(pattern.remove_spaces(arena)),
pattern_as.remove_spaces(arena),
),
Pattern::NumLiteral(a) => Pattern::NumLiteral(a),
Pattern::NonBase10Literal {
string,
@ -826,7 +827,10 @@ impl<'a> RemoveSpaces<'a> for Pattern<'a> {
Pattern::SingleQuote(a) => Pattern::SingleQuote(a),
Pattern::List(pats) => Pattern::List(pats.remove_spaces(arena)),
Pattern::Tuple(pats) => Pattern::Tuple(pats.remove_spaces(arena)),
Pattern::ListRest(opt_pattern_as) => Pattern::ListRest(opt_pattern_as),
Pattern::ListRest(opt_pattern_as) => Pattern::ListRest(
opt_pattern_as
.map(|(_, pattern_as)| ([].as_ref(), pattern_as.remove_spaces(arena))),
),
}
}
}
@ -936,3 +940,12 @@ impl<'a> RemoveSpaces<'a> for ImplementsAbilities<'a> {
}
}
}
impl<'a> RemoveSpaces<'a> for PatternAs<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
PatternAs {
spaces_before: &[],
identifier: self.identifier.remove_spaces(arena),
}
}
}

View file

@ -103,7 +103,7 @@ main =
If this file exists somewhere in the repo as `dbg.roc`, we'll be able to compile an object file by issuing the following command:
```console
# `cargo run --` can be replaces with calling the compiled `roc` cli binary.
# `cargo run --` can be replaced with calling the compiled `roc` cli binary.
$ cargo run -- build --dev main.roc --no-link
```
@ -152,7 +152,7 @@ The output lines contain the hexadecimal representation of the x86 opcodes and f
As a note, when dealing with relocations, please make sure to compile with PIC.
- [Online Assembler](https://defuse.ca/online-x86-assembler.htm#disassembly) -
Useful for seeing the actual bytes generated by assembly instructions.
A lot of time it gives on out of multiple options because x86_64 has many ways to do things.
A lot of time it gives one out of multiple options because x86_64 has many ways to do things.
Also, sometimes it doesn't seem to generate things quite as you expect.
- [Alternative Online Assembler](http://shell-storm.org/online/Online-Assembler-and-Disassembler/) -
Like previous but with more architecture options.

View file

@ -1765,6 +1765,21 @@ trait Backend<'a> {
let intrinsic = bitcode::NUM_IS_MULTIPLE_OF[int_width].to_string();
self.build_fn_call(sym, intrinsic, args, arg_layouts, ret_layout);
}
LowLevel::NumCountLeadingZeroBits => {
let int_width = arg_layouts[0].try_int_width().unwrap();
let intrinsic = bitcode::NUM_COUNT_LEADING_ZERO_BITS[int_width].to_string();
self.build_fn_call(sym, intrinsic, args, arg_layouts, ret_layout);
}
LowLevel::NumCountTrailingZeroBits => {
let int_width = arg_layouts[0].try_int_width().unwrap();
let intrinsic = bitcode::NUM_COUNT_TRAILING_ZERO_BITS[int_width].to_string();
self.build_fn_call(sym, intrinsic, args, arg_layouts, ret_layout);
}
LowLevel::NumCountOneBits => {
let int_width = arg_layouts[0].try_int_width().unwrap();
let intrinsic = bitcode::NUM_COUNT_ONE_BITS[int_width].to_string();
self.build_fn_call(sym, intrinsic, args, arg_layouts, ret_layout);
}
LowLevel::ListSublist => {
// list: RocList,
// alignment: u32,

View file

@ -30,12 +30,25 @@ pub fn call_bitcode_fn<'ctx>(
args: &[BasicValueEnum<'ctx>],
fn_name: &str,
) -> BasicValueEnum<'ctx> {
call_bitcode_fn_help(env, args, fn_name)
let ret = call_bitcode_fn_help(env, args, fn_name)
.try_as_basic_value()
.left()
.unwrap_or_else(|| {
panic!("LLVM error: Did not get return value from bitcode function {fn_name:?}")
})
});
if env.target_info.operating_system == roc_target::OperatingSystem::Windows {
// On windows zig uses a vector type <2xi64> instead of a i128 value
let vec_type = env.context.i64_type().vec_type(2);
if ret.get_type() == vec_type.into() {
return env
.builder
.build_bitcast(ret, env.context.i128_type(), "return_i128")
.unwrap();
}
}
ret
}
pub fn call_void_bitcode_fn<'ctx>(
@ -54,7 +67,35 @@ fn call_bitcode_fn_help<'ctx>(
args: &[BasicValueEnum<'ctx>],
fn_name: &str,
) -> CallSiteValue<'ctx> {
let it = args.iter().map(|x| (*x).into());
let it = args
.iter()
.map(|x| {
if env.target_info.operating_system == roc_target::OperatingSystem::Windows {
if x.get_type() == env.context.i128_type().into() {
let parent = env
.builder
.get_insert_block()
.and_then(|b| b.get_parent())
.unwrap();
let alloca = create_entry_block_alloca(
env,
parent,
x.get_type(),
"pass_u128_by_reference",
);
env.builder.build_store(alloca, *x).unwrap();
alloca.into()
} else {
*x
}
} else {
*x
}
})
.map(|x| (x).into());
let arguments = bumpalo::collections::Vec::from_iter_in(it, env.arena);
let fn_val = env

View file

@ -710,7 +710,7 @@ impl LlvmBackendMode {
match self {
LlvmBackendMode::Binary => false,
LlvmBackendMode::BinaryDev => false,
LlvmBackendMode::BinaryGlue => false,
LlvmBackendMode::BinaryGlue => true,
LlvmBackendMode::GenTest => true,
LlvmBackendMode::WasmGenTest => true,
LlvmBackendMode::CliTest => true,
@ -910,16 +910,19 @@ impl<'a, 'ctx, 'env> Env<'a, 'ctx, 'env> {
&self,
env: &Env<'a, 'ctx, 'env>,
location: BasicValueEnum<'ctx>,
source: BasicValueEnum<'ctx>,
message: BasicValueEnum<'ctx>,
) {
let function = self.module.get_function("roc_dbg").unwrap();
let loc = self.string_to_arg(env, location);
let src = self.string_to_arg(env, source);
let msg = self.string_to_arg(env, message);
let call = self
.builder
.new_build_call(function, &[loc.into(), msg.into()], "roc_dbg");
// TODO: at some point it will be a breaking change, but flip order to (loc, src, msg)
let call =
self.builder
.new_build_call(function, &[loc.into(), msg.into(), src.into()], "roc_dbg");
call.set_call_convention(C_CALL_CONV);
}
@ -1055,6 +1058,55 @@ pub fn module_from_builtins<'ctx>(
let module = Module::parse_bitcode_from_buffer(&memory_buffer, ctx)
.unwrap_or_else(|err| panic!("Unable to import builtins bitcode. LLVM error: {err:?}"));
// In testing, this adds about 20ms extra to compilation.
// Long term it would be best if we could do this on the zig side.
// The core issue is that we have to properly labael certain functions as private and DCE them.
// Otherwise, now that zig bundles all of compiler-rt, we would optimize and compile the entire library.
// Anything not depended on by a `roc_builtin.` function could already by DCE'd theoretically.
// That said, this workaround is good enough and fixes compilations times.
// Also, must_keep is the functions we depend on that would normally be provide by libc.
// They are magically linked to by llvm builtins, so we must specify that they can't be DCE'd.
let must_keep = [
"_fltused",
"floorf",
"memcpy",
"memset",
// I have no idea why this function is special.
// Without it, some tests hang on M1 mac outside of nix.
"__muloti4",
// fixes `Undefined Symbol in relocation`
"__udivti3",
// Roc special functions
"__roc_force_longjmp",
"__roc_force_setjmp",
"set_shared_buffer",
];
for func in module.get_functions() {
let has_definition = func.count_basic_blocks() > 0;
let name = func.get_name().to_string_lossy();
if has_definition
&& !name.starts_with("roc_builtins.")
&& !must_keep.contains(&name.as_ref())
{
func.set_linkage(Linkage::Private);
}
}
// Note, running DCE here is faster then waiting until full app DCE.
let mpm = PassManager::create(());
mpm.add_global_dce_pass();
mpm.run_on(&module);
// Now that the unused compiler-rt functions have been removed,
// mark that the builtin functions are allowed to be DCE'd if they aren't used.
for func in module.get_functions() {
let name = func.get_name().to_string_lossy();
if name.starts_with("roc_builtins.") {
func.set_linkage(Linkage::Private);
}
}
// Add LLVM intrinsics.
add_intrinsics(ctx, &module);
@ -1227,6 +1279,8 @@ fn promote_to_wasm_test_wrapper<'a, 'ctx>(
let subprogram = env.new_subprogram(main_fn_name);
c_function.set_subprogram(subprogram);
debug_info_init!(env, c_function);
// STEP 2: build the exposed function's body
let builder = env.builder;
let context = env.context;
@ -3527,17 +3581,17 @@ pub(crate) fn build_exp_stmt<'a, 'ctx>(
}
Dbg {
source_location,
source,
symbol,
variable: _,
remainder,
} => {
if env.mode.runs_expects() {
// TODO: Change location to `filename:line_number`
// let region = unsafe { std::mem::transmute::<_, roc_region::all::Region>(*symbol) };
let location =
build_string_literal(env, parent, symbol.module_string(&env.interns));
let location = build_string_literal(env, parent, source_location);
let source = build_string_literal(env, parent, source);
let message = scope.load_symbol(symbol);
env.call_dbg(env, location, message);
env.call_dbg(env, location, source, message);
}
build_exp_stmt(
@ -4363,6 +4417,8 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
let subprogram = env.new_subprogram(c_function_name);
c_function.set_subprogram(subprogram);
debug_info_init!(env, c_function);
// STEP 2: build the exposed function's body
let builder = env.builder;
let context = env.context;
@ -4371,8 +4427,6 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
builder.position_at_end(entry);
debug_info_init!(env, c_function);
// drop the first argument, which is the pointer we write the result into
let args_vector = c_function.get_params();
let mut args = args_vector.as_slice();
@ -4413,29 +4467,68 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
}
}
let arguments_for_call = &arguments_for_call.into_bump_slice();
let call_result = if env.mode.returns_roc_result() {
debug_assert_eq!(args.len(), roc_function.get_params().len());
if args.len() == roc_function.get_params().len() {
let arguments_for_call = &arguments_for_call.into_bump_slice();
let roc_wrapper_function =
make_exception_catcher(env, layout_interner, roc_function, return_layout);
debug_assert_eq!(
arguments_for_call.len(),
roc_wrapper_function.get_params().len()
);
let dbg_loc = builder.get_current_debug_location().unwrap();
let roc_wrapper_function =
make_exception_catcher(env, layout_interner, roc_function, return_layout);
debug_assert_eq!(
arguments_for_call.len(),
roc_wrapper_function.get_params().len()
);
builder.position_at_end(entry);
builder.position_at_end(entry);
builder.set_current_debug_location(dbg_loc);
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
call_direct_roc_function(
env,
layout_interner,
roc_function,
wrapped_layout,
arguments_for_call,
)
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
call_direct_roc_function(
env,
layout_interner,
roc_function,
wrapped_layout,
arguments_for_call,
)
} else {
debug_assert_eq!(args.len() + 1, roc_function.get_params().len());
arguments_for_call.push(args[0]);
let arguments_for_call = &arguments_for_call.into_bump_slice();
let dbg_loc = builder.get_current_debug_location().unwrap();
let roc_wrapper_function =
make_exception_catcher(env, layout_interner, roc_function, return_layout);
builder.position_at_end(entry);
builder.set_current_debug_location(dbg_loc);
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
let call_result = call_direct_roc_function(
env,
layout_interner,
roc_wrapper_function,
wrapped_layout,
arguments_for_call,
);
let output_arg_index = 0;
let output_arg = c_function
.get_nth_param(output_arg_index as u32)
.unwrap()
.into_pointer_value();
env.builder.new_build_store(output_arg, call_result);
builder.new_build_return(None);
return c_function;
}
} else {
let arguments_for_call = &arguments_for_call.into_bump_slice();
call_direct_roc_function(
env,
layout_interner,
@ -4459,6 +4552,7 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
output_arg,
call_result,
);
builder.new_build_return(None);
c_function
@ -4511,6 +4605,8 @@ fn expose_function_to_host_help_c_abi_gen_test<'a, 'ctx>(
let subprogram = env.new_subprogram(c_function_name);
c_function.set_subprogram(subprogram);
debug_info_init!(env, c_function);
// STEP 2: build the exposed function's body
let builder = env.builder;
let context = env.context;
@ -4519,8 +4615,6 @@ fn expose_function_to_host_help_c_abi_gen_test<'a, 'ctx>(
builder.position_at_end(entry);
debug_info_init!(env, c_function);
// drop the final argument, which is the pointer we write the result into
let args_vector = c_function.get_params();
let mut args = args_vector.as_slice();
@ -4567,10 +4661,12 @@ fn expose_function_to_host_help_c_abi_gen_test<'a, 'ctx>(
let (call_result, call_result_layout) = {
let last_block = builder.get_insert_block().unwrap();
let dbg_loc = builder.get_current_debug_location().unwrap();
let roc_wrapper_function =
make_exception_catcher(env, layout_interner, roc_function, return_layout);
builder.position_at_end(last_block);
builder.set_current_debug_location(dbg_loc);
let wrapper_result = roc_call_result_layout(env.arena, return_layout);
@ -4622,12 +4718,12 @@ fn expose_function_to_host_help_c_abi_gen_test<'a, 'ctx>(
let subprogram = env.new_subprogram(&size_function_name);
size_function.set_subprogram(subprogram);
debug_info_init!(env, size_function);
let entry = context.append_basic_block(size_function, "entry");
builder.position_at_end(entry);
debug_info_init!(env, size_function);
let size: BasicValueEnum = return_type.size_of().unwrap().into();
builder.new_build_return(Some(&size));
@ -4713,6 +4809,8 @@ fn expose_function_to_host_help_c_abi_v2<'a, 'ctx>(
let subprogram = env.new_subprogram(c_function_name);
c_function.set_subprogram(subprogram);
debug_info_init!(env, c_function);
// STEP 2: build the exposed function's body
let builder = env.builder;
let context = env.context;
@ -4941,12 +5039,12 @@ fn expose_function_to_host_help_c_abi<'a, 'ctx>(
let subprogram = env.new_subprogram(&size_function_name);
size_function.set_subprogram(subprogram);
debug_info_init!(env, size_function);
let entry = env.context.append_basic_block(size_function, "entry");
env.builder.position_at_end(entry);
debug_info_init!(env, size_function);
let return_type = match env.mode {
LlvmBackendMode::GenTest | LlvmBackendMode::WasmGenTest | LlvmBackendMode::CliTest => {
roc_call_result_type(env, roc_function.get_type().get_return_type().unwrap()).into()
@ -5343,6 +5441,8 @@ fn make_exception_catching_wrapper<'a, 'ctx>(
let subprogram = env.new_subprogram(wrapper_function_name);
wrapper_function.set_subprogram(subprogram);
debug_info_init!(env, wrapper_function);
// The exposed main function must adhere to the C calling convention, but the wrapper can still be fastcc.
wrapper_function.set_call_conventions(FAST_CALL_CONV);
@ -5819,6 +5919,8 @@ fn build_proc_header<'a, 'ctx>(
let subprogram = env.new_subprogram(&fn_name);
fn_val.set_subprogram(subprogram);
debug_info_init!(env, fn_val);
if env.exposed_to_host.contains(&symbol) {
let arguments = Vec::from_iter_in(proc.args.iter().map(|(layout, _)| *layout), env.arena);
expose_function_to_host(

View file

@ -450,7 +450,7 @@ pub(crate) fn list_capacity_or_ref_ptr<'ctx>(
// Gets a pointer to just after the refcount for a list or seamless slice.
// The value is just after the refcount so that normal lists and seamless slices can share code paths easily.
pub(crate) fn list_refcount_ptr<'ctx>(
pub(crate) fn list_allocation_ptr<'ctx>(
env: &Env<'_, 'ctx, '_>,
wrapper_struct: StructValue<'ctx>,
) -> PointerValue<'ctx> {
@ -459,7 +459,7 @@ pub(crate) fn list_refcount_ptr<'ctx>(
&[wrapper_struct],
&[],
BitcodeReturns::Basic,
bitcode::LIST_REFCOUNT_PTR,
bitcode::LIST_ALLOCATION_PTR,
)
.into_pointer_value()
}
@ -864,7 +864,7 @@ pub(crate) fn decref<'ctx>(
wrapper_struct: StructValue<'ctx>,
alignment: u32,
) {
let refcount_ptr = list_refcount_ptr(env, wrapper_struct);
let refcount_ptr = list_allocation_ptr(env, wrapper_struct);
crate::llvm::refcounting::decref_pointer_check_null(env, refcount_ptr, alignment);
}

View file

@ -48,7 +48,7 @@ pub(crate) fn str_equal<'ctx>(
// Gets a pointer to just after the refcount for a list or seamless slice.
// The value is just after the refcount so that normal lists and seamless slices can share code paths easily.
pub(crate) fn str_refcount_ptr<'ctx>(
pub(crate) fn str_allocation_ptr<'ctx>(
env: &Env<'_, 'ctx, '_>,
value: BasicValueEnum<'ctx>,
) -> PointerValue<'ctx> {
@ -57,7 +57,7 @@ pub(crate) fn str_refcount_ptr<'ctx>(
&[value],
&[],
BitcodeReturns::Basic,
bitcode::STR_REFCOUNT_PTR,
bitcode::STR_ALLOCATION_PTR,
)
.into_pointer_value()
}

View file

@ -469,6 +469,8 @@ fn build_clone_tag<'a, 'ctx>(
let subprogram = env.new_subprogram(&fn_name);
function_value.set_subprogram(subprogram);
debug_info_init!(env, function_value);
env.dibuilder.finalize();
build_clone_tag_help(

View file

@ -1,3 +1,4 @@
use crate::debug_info_init;
use crate::llvm::bitcode::call_void_bitcode_fn;
use crate::llvm::build::{add_func, get_panic_msg_ptr, get_panic_tag_ptr, BuilderExt, C_CALL_CONV};
use crate::llvm::build::{CCReturn, Env, FunctionSpec};
@ -160,8 +161,39 @@ pub fn add_default_roc_externs(env: &Env<'_, '_, '_>) {
}
}
// TODO: generate a valid impl of dbg here.
unreachable_function(env, "roc_dbg");
// roc_dbg
{
// The type of this function (but not the implementation) should have
// already been defined by the builtins, which rely on it.
let fn_val = module.get_function("roc_dbg").unwrap();
let mut params = fn_val.get_param_iter();
let loc_arg = params.next().unwrap();
let msg_arg = params.next().unwrap();
let src_arg = params.next().unwrap();
debug_assert!(params.next().is_none());
// Add a basic block for the entry point
let entry = ctx.append_basic_block(fn_val, "entry");
builder.position_at_end(entry);
// Call utils.dbg_impl()
let dbg_impl = module.get_function(bitcode::UTILS_DBG_IMPL).unwrap();
let call = builder.new_build_call(
dbg_impl,
&[loc_arg.into(), msg_arg.into(), src_arg.into()],
"call_utils_dbg_impl",
);
call.set_call_convention(C_CALL_CONV);
builder.new_build_return(None);
if cfg!(debug_assertions) {
crate::llvm::build::verify_fn(fn_val);
}
}
match env.target_info.operating_system {
roc_target::OperatingSystem::Windows => {
@ -223,6 +255,8 @@ pub fn add_sjlj_roc_panic(env: &Env<'_, '_, '_>) {
let subprogram = env.new_subprogram("roc_panic");
fn_val.set_subprogram(subprogram);
debug_info_init!(env, fn_val);
env.dibuilder.finalize();
// Add a basic block for the entry point

View file

@ -80,10 +80,6 @@ pub(crate) fn add_intrinsics<'ctx>(ctx: &'ctx Context, module: &Module<'ctx>) {
let i32_type = ctx.i32_type();
let void_type = ctx.void_type();
if let Some(func) = module.get_function("__muloti4") {
func.set_linkage(Linkage::WeakAny);
}
add_intrinsic(
ctx,
module,

View file

@ -1100,13 +1100,22 @@ pub(crate) fn run_low_level<'a, 'ctx>(
NumBytesToU128 => {
arguments!(list, position);
call_list_bitcode_fn(
let ret = call_list_bitcode_fn(
env,
&[list.into_struct_value()],
&[position],
BitcodeReturns::Basic,
bitcode::NUM_BYTES_TO_U128,
)
);
if env.target_info.operating_system == roc_target::OperatingSystem::Windows {
// On windows the return type is not a i128, likely due to alignment
env.builder
.build_bitcast(ret, env.context.i128_type(), "empty_string")
.unwrap()
} else {
ret
}
}
NumCompare => {
arguments_with_layouts!((lhs_arg, lhs_layout), (rhs_arg, rhs_layout));
@ -1535,7 +1544,7 @@ fn build_int_binop<'ctx>(
)
.into_struct_value();
throw_on_overflow(env, parent, result, "integer addition overflowed!")
throw_on_overflow(env, parent, result, "Integer addition overflowed!")
}
NumAddWrap => bd.new_build_int_add(lhs, rhs, "add_int_wrap").into(),
NumAddChecked => {
@ -1566,7 +1575,7 @@ fn build_int_binop<'ctx>(
)
.into_struct_value();
throw_on_overflow(env, parent, result, "integer subtraction overflowed!")
throw_on_overflow(env, parent, result, "Integer subtraction overflowed!")
}
NumSubWrap => bd.new_build_int_sub(lhs, rhs, "sub_int").into(),
NumSubChecked => {
@ -1597,7 +1606,7 @@ fn build_int_binop<'ctx>(
)
.into_struct_value();
throw_on_overflow(env, parent, result, "integer multiplication overflowed!")
throw_on_overflow(env, parent, result, "Integer multiplication overflowed!")
}
NumMulWrap => bd.new_build_int_mul(lhs, rhs, "mul_int").into(),
NumMulSaturated => call_bitcode_fn(
@ -2350,7 +2359,7 @@ fn build_dec_binop<'a, 'ctx>(
bitcode::DEC_ADD_WITH_OVERFLOW,
lhs,
rhs,
"decimal addition overflowed",
"Decimal addition overflowed",
),
NumSub => build_dec_binop_throw_on_overflow(
env,
@ -2358,7 +2367,7 @@ fn build_dec_binop<'a, 'ctx>(
bitcode::DEC_SUB_WITH_OVERFLOW,
lhs,
rhs,
"decimal subtraction overflowed",
"Decimal subtraction overflowed",
),
NumMul => build_dec_binop_throw_on_overflow(
env,
@ -2366,7 +2375,7 @@ fn build_dec_binop<'a, 'ctx>(
bitcode::DEC_MUL_WITH_OVERFLOW,
lhs,
rhs,
"decimal multiplication overflowed",
"Decimal multiplication overflowed",
),
NumDivFrac => dec_binop_with_unchecked(env, bitcode::DEC_DIV, lhs, rhs),
@ -2596,7 +2605,16 @@ fn build_int_unary_op<'a, 'ctx, 'env>(
}
}
PtrWidth::Bytes8 => {
if target_int_width.stack_size() as usize > env.target_info.ptr_size() {
let return_by_pointer = {
if env.target_info.operating_system
== roc_target::OperatingSystem::Windows
{
target_int_width.stack_size() as usize >= env.target_info.ptr_size()
} else {
target_int_width.stack_size() as usize > env.target_info.ptr_size()
}
};
if return_by_pointer {
let bitcode_return_type =
zig_to_int_checked_result_type(env, target_int_width.type_name());
@ -2659,7 +2677,7 @@ fn int_neg_raise_on_overflow<'ctx>(
throw_internal_exception(
env,
parent,
"integer negation overflowed because its argument is the minimum value",
"Integer negation overflowed because its argument is the minimum value",
);
builder.position_at_end(else_block);
@ -2690,7 +2708,7 @@ fn int_abs_raise_on_overflow<'ctx>(
throw_internal_exception(
env,
parent,
"integer absolute overflowed because its argument is the minimum value",
"Integer absolute overflowed because its argument is the minimum value",
);
builder.position_at_end(else_block);

View file

@ -5,9 +5,9 @@ use crate::llvm::build::{
add_func, cast_basic_basic, get_tag_id, tag_pointer_clear_tag_id, Env, FAST_CALL_CONV,
};
use crate::llvm::build_list::{
incrementing_elem_loop, list_capacity_or_ref_ptr, list_refcount_ptr, load_list,
incrementing_elem_loop, list_allocation_ptr, list_capacity_or_ref_ptr, load_list,
};
use crate::llvm::build_str::str_refcount_ptr;
use crate::llvm::build_str::str_allocation_ptr;
use crate::llvm::convert::{basic_type_from_layout, zig_str_type, RocUnion};
use crate::llvm::struct_::RocStruct;
use bumpalo::collections::Vec;
@ -156,6 +156,8 @@ impl<'ctx> PointerToRefcount<'ctx> {
let subprogram = env.new_subprogram(fn_name);
function_value.set_subprogram(subprogram);
debug_info_init!(env, function_value);
Self::build_decrement_function_body(env, function_value, alignment);
function_value
@ -864,7 +866,7 @@ fn modify_refcount_list_help<'a, 'ctx>(
}
let refcount_ptr =
PointerToRefcount::from_ptr_to_data(env, list_refcount_ptr(env, original_wrapper));
PointerToRefcount::from_ptr_to_data(env, list_allocation_ptr(env, original_wrapper));
let call_mode = mode_to_call_mode(fn_val, mode);
refcount_ptr.modify(call_mode, layout, env, layout_interner);
@ -971,7 +973,7 @@ fn modify_refcount_str_help<'a, 'ctx>(
builder.new_build_conditional_branch(is_big_and_non_empty, modification_block, cont_block);
builder.position_at_end(modification_block);
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, str_refcount_ptr(env, arg_val));
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, str_allocation_ptr(env, arg_val));
let call_mode = mode_to_call_mode(fn_val, mode);
refcount_ptr.modify(
call_mode,
@ -1049,6 +1051,8 @@ pub fn build_header_help<'ctx>(
let subprogram = env.new_subprogram(fn_name);
fn_val.set_subprogram(subprogram);
debug_info_init!(env, fn_val);
env.dibuilder.finalize();
fn_val

View file

@ -290,7 +290,7 @@ mod dummy_platform_functions {
}
#[no_mangle]
pub unsafe extern "C" fn roc_dbg(_loc: *mut c_void, _msg: *mut c_void) {
pub unsafe extern "C" fn roc_dbg(_loc: *mut c_void, _msg: *mut c_void, _src: *mut c_void) {
unimplemented!("It is not valid to call roc dbg from within the compiler. Please use the \"platform\" feature if this is a platform.")
}

View file

@ -1382,7 +1382,7 @@ impl<'a> LowLevelCall<'a> {
}
NumAbs => {
const PANIC_MSG: &str =
"integer absolute overflowed because its argument is the minimum value";
"Integer absolute overflowed because its argument is the minimum value";
self.load_args(backend);
@ -1446,7 +1446,7 @@ impl<'a> LowLevelCall<'a> {
}
NumNeg => {
const PANIC_MSG: &str =
"integer negation overflowed because its argument is the minimum value";
"Integer negation overflowed because its argument is the minimum value";
self.load_args(backend);
match CodeGenNumType::from(self.ret_layout) {

View file

@ -166,7 +166,13 @@ pub fn can_expr_with<'a>(
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
let loc_expr = operator::desugar_expr(arena, &loc_expr);
let loc_expr = operator::desugar_expr(
arena,
&loc_expr,
expr_str,
&mut None,
arena.alloc("TestPath"),
);
let mut scope = Scope::new(home, IdentIds::default(), Default::default());

View file

@ -371,6 +371,18 @@ fn start_phase<'a>(
checkmate: _,
} = typechecked;
let our_exposed_types = state
.exposed_types
.get(&module_id)
.unwrap_or_else(|| internal_error!("Exposed types for {:?} missing", module_id))
.clone();
state.world_abilities.insert(
module_id,
abilities_store.clone(),
our_exposed_types.exposed_types_storage_subs,
);
let mut imported_module_thunks = bumpalo::collections::Vec::new_in(arena);
if let Some(imports) = state.module_cache.imports.get(&module_id) {
@ -397,7 +409,7 @@ fn start_phase<'a>(
decls,
ident_ids,
exposed_to_host: state.exposed_to_host.clone(),
abilities_store,
world_abilities: state.world_abilities.clone_ref(),
// TODO: awful, how can we get rid of the clone?
exposed_by_module: state.exposed_types.clone(),
derived_module,
@ -457,23 +469,8 @@ fn start_phase<'a>(
procs_base,
layout_cache,
module_timing,
abilities_store,
expectations,
} = found_specializations;
let our_exposed_types = state
.exposed_types
.get(&module_id)
.unwrap_or_else(|| {
internal_error!("Exposed types for {:?} missing", module_id)
})
.clone();
// Add our abilities to the world.
state.world_abilities.insert(
module_id,
abilities_store,
our_exposed_types.exposed_types_storage_subs,
);
(
ident_ids,
@ -598,7 +595,6 @@ enum Msg<'a> {
procs_base: ProcsBase<'a>,
solved_subs: Solved<Subs>,
module_timing: ModuleTiming,
abilities_store: AbilitiesStore,
toplevel_expects: ToplevelExpects,
expectations: Option<Expectations>,
},
@ -688,6 +684,7 @@ impl MakeSpecializationsPass {
struct State<'a> {
pub root_id: ModuleId,
pub root_subs: Option<Subs>,
pub root_path: PathBuf,
pub cache_dir: PathBuf,
/// If the root is an app module, the shorthand specified in its header's `to` field
pub opt_platform_shorthand: Option<&'a str>,
@ -756,6 +753,7 @@ impl<'a> State<'a> {
fn new(
root_id: ModuleId,
root_path: PathBuf,
opt_platform_shorthand: Option<&'a str>,
target_info: TargetInfo,
function_kind: FunctionKind,
@ -774,6 +772,7 @@ impl<'a> State<'a> {
Self {
root_id,
root_path,
root_subs: None,
opt_platform_shorthand,
cache_dir,
@ -906,7 +905,7 @@ enum BuildTask<'a> {
decls: Declarations,
exposed_to_host: ExposedToHost,
exposed_by_module: ExposedByModule,
abilities_store: AbilitiesStore,
world_abilities: WorldAbilities,
derived_module: SharedDerivedModule,
expectations: Option<Expectations>,
build_expects: bool,
@ -1081,8 +1080,9 @@ pub struct LoadStart<'a> {
arc_modules: Arc<Mutex<PackageModuleIds<'a>>>,
ident_ids_by_module: SharedIdentIdsByModule,
root_id: ModuleId,
opt_platform_shorthand: Option<&'a str>,
root_path: PathBuf,
root_msg: Msg<'a>,
opt_platform_shorthand: Option<&'a str>,
src_dir: PathBuf,
}
@ -1105,7 +1105,7 @@ impl<'a> LoadStart<'a> {
let res_loaded = load_filename(
arena,
filename,
filename.clone(),
true,
None,
None,
@ -1140,6 +1140,7 @@ impl<'a> LoadStart<'a> {
ident_ids_by_module,
src_dir,
root_id: header_output.module_id,
root_path: filename,
root_msg: header_output.msg,
opt_platform_shorthand: header_output.opt_platform_shorthand,
})
@ -1166,7 +1167,7 @@ impl<'a> LoadStart<'a> {
let header_output = load_from_str(
arena,
filename,
filename.clone(),
src,
Arc::clone(&arc_modules),
Arc::clone(&ident_ids_by_module),
@ -1182,6 +1183,7 @@ impl<'a> LoadStart<'a> {
src_dir,
ident_ids_by_module,
root_id,
root_path: filename,
root_msg,
opt_platform_shorthand: opt_platform_id,
})
@ -1356,6 +1358,7 @@ pub fn load_single_threaded<'a>(
arc_modules,
ident_ids_by_module,
root_id,
root_path,
root_msg,
src_dir,
opt_platform_shorthand,
@ -1371,6 +1374,7 @@ pub fn load_single_threaded<'a>(
let number_of_workers = 1;
let mut state = State::new(
root_id,
root_path,
opt_platform_shorthand,
target_info,
function_kind,
@ -1507,7 +1511,7 @@ fn state_thread_step<'a>(
Ok(ControlFlow::Break(LoadResult::Monomorphized(monomorphized)))
}
Msg::FailedToReadFile { filename, error } => {
let buf = to_file_problem_report_string(&filename, error);
let buf = to_file_problem_report_string(filename, error);
Err(LoadingProblem::FormattedReport(buf))
}
@ -1658,7 +1662,7 @@ pub fn report_loading_problem(
}
LoadingProblem::FormattedReport(report) => report,
LoadingProblem::FileProblem { filename, error } => {
to_file_problem_report_string(&filename, error)
to_file_problem_report_string(filename, error)
}
err => todo!("Loading error: {:?}", err),
}
@ -1681,6 +1685,7 @@ fn load_multi_threaded<'a>(
arc_modules,
ident_ids_by_module,
root_id,
root_path,
root_msg,
src_dir,
opt_platform_shorthand,
@ -1711,6 +1716,7 @@ fn load_multi_threaded<'a>(
let mut state = State::new(
root_id,
root_path,
opt_platform_shorthand,
target_info,
function_kind,
@ -2242,6 +2248,7 @@ fn update<'a>(
let buf = to_https_problem_report_string(
url,
Problem::InvalidUrl(url_err),
header.module_path,
);
return Err(LoadingProblem::FormattedReport(buf));
}
@ -2730,7 +2737,6 @@ fn update<'a>(
ident_ids,
layout_cache,
module_timing,
abilities_store,
toplevel_expects,
expectations,
} => {
@ -2754,7 +2760,6 @@ fn update<'a>(
procs_base,
subs,
module_timing,
abilities_store,
expectations,
};
@ -3165,7 +3170,7 @@ fn finish_specialization<'a>(
}
Valid(To::NewPackage(p_or_p)) => PathBuf::from(p_or_p.as_str()),
other => {
let buf = to_missing_platform_report(state.root_id, other);
let buf = report_cannot_run(state.root_id, state.root_path, other);
return Err(LoadingProblem::FormattedReport(buf));
}
};
@ -3519,9 +3524,7 @@ fn load_builtin_module_help<'a>(
) -> (HeaderInfo<'a>, roc_parse::state::State<'a>) {
let is_root_module = false;
let opt_shorthand = None;
let filename = PathBuf::from(filename);
let parse_state = roc_parse::state::State::new(src_bytes.as_bytes());
let parsed = roc_parse::module::parse_header(arena, parse_state.clone());
@ -3974,6 +3977,7 @@ fn parse_header<'a>(
module_timing,
)?;
let filename = resolved_header.module_path.clone();
let mut messages = Vec::with_capacity(packages.len() + 1);
// It's important that the app header is first in the list!
@ -3988,6 +3992,7 @@ fn parse_header<'a>(
module_id,
module_ids,
ident_ids_by_module,
filename,
);
// Look at the app module's `to` keyword to determine which package was the platform.
@ -4090,6 +4095,7 @@ fn load_packages<'a>(
module_id: ModuleId,
module_ids: Arc<Mutex<PackageModuleIds<'a>>>,
ident_ids_by_module: SharedIdentIdsByModule,
filename: PathBuf,
) {
// Load all the packages
for Loc { value: entry, .. } in packages.iter() {
@ -4127,7 +4133,7 @@ fn load_packages<'a>(
}
}
Err(problem) => {
let buf = to_https_problem_report_string(src, problem);
let buf = to_https_problem_report_string(src, problem, filename);
load_messages.push(Msg::FailedToLoad(LoadingProblem::FormattedReport(buf)));
return;
@ -5264,6 +5270,8 @@ fn canonicalize_and_constrain<'a>(
let ParsedModule {
module_id,
module_path,
src,
header_type,
exposed_ident_ids,
parsed_defs,
@ -5286,6 +5294,8 @@ fn canonicalize_and_constrain<'a>(
parsed_defs,
&header_type,
module_id,
&*arena.alloc(module_path.to_string_lossy()),
src,
module_ids,
exposed_ident_ids,
&dep_idents,
@ -5701,7 +5711,7 @@ fn build_pending_specializations<'a>(
target_info: TargetInfo,
exposed_to_host: ExposedToHost,
exposed_by_module: &ExposedByModule,
abilities_store: AbilitiesStore,
world_abilities: WorldAbilities,
derived_module: SharedDerivedModule,
mut expectations: Option<Expectations>,
build_expects: bool,
@ -5734,7 +5744,7 @@ fn build_pending_specializations<'a>(
// NB: for getting pending specializations the module view is enough because we only need
// to know the types and abilities in our modules. Only for building *all* specializations
// do we need a global view.
abilities: AbilitiesView::Module(&abilities_store),
abilities: AbilitiesView::World(&world_abilities),
exposed_by_module,
derived_module: &derived_module,
struct_indexing: UsageTrackingMap::default(),
@ -6134,7 +6144,6 @@ fn build_pending_specializations<'a>(
layout_cache,
procs_base,
module_timing,
abilities_store,
toplevel_expects,
expectations,
}
@ -6374,7 +6383,7 @@ fn run_task<'a>(
solved_subs,
imported_module_thunks,
exposed_to_host,
abilities_store,
world_abilities,
exposed_by_module,
derived_module,
expectations,
@ -6391,7 +6400,7 @@ fn run_task<'a>(
target_info,
exposed_to_host,
&exposed_by_module,
abilities_store,
world_abilities,
derived_module,
expectations,
build_expects,
@ -6584,7 +6593,11 @@ fn to_parse_problem_report<'a>(
buf
}
fn to_missing_platform_report(module_id: ModuleId, other: &PlatformPath) -> String {
fn report_cannot_run(
module_id: ModuleId,
filename: PathBuf,
platform_path: &PlatformPath,
) -> String {
use roc_reporting::report::{Report, RocDocAllocator, DEFAULT_PALETTE};
use ven_pretty::DocAllocator;
use PlatformPath::*;
@ -6594,20 +6607,20 @@ fn to_missing_platform_report(module_id: ModuleId, other: &PlatformPath) -> Stri
let alloc = RocDocAllocator::new(&[], module_id, &interns);
let report = {
match other {
match platform_path {
Valid(_) => unreachable!(),
NotSpecified => {
let doc = alloc.stack([
alloc.reflow("I could not find a platform based on your input file."),
alloc.reflow(r"Does the module header contain an entry that looks like this:"),
alloc.reflow(r"Does the module header have an entry that looks like this?"),
alloc
.parser_suggestion(" packages { pf: \"platform\" }")
.parser_suggestion("packages { blah: \"…path or URL to platform…\" }")
.indent(4),
alloc.reflow("See also TODO."),
alloc.reflow("Tip: The following part of the tutorial has an example of specifying a platform:\n\n<https://www.roc-lang.org/tutorial#building-an-application>"),
]);
Report {
filename: "UNKNOWN.roc".into(),
filename,
doc,
title: "NO PLATFORM".to_string(),
severity: Severity::RuntimeError,
@ -6622,7 +6635,7 @@ fn to_missing_platform_report(module_id: ModuleId, other: &PlatformPath) -> Stri
]);
Report {
filename: "UNKNOWN.roc".into(),
filename,
doc,
title: "NO PLATFORM".to_string(),
severity: Severity::RuntimeError,
@ -6637,7 +6650,7 @@ fn to_missing_platform_report(module_id: ModuleId, other: &PlatformPath) -> Stri
]);
Report {
filename: "UNKNOWN.roc".into(),
filename,
doc,
title: "NO PLATFORM".to_string(),
severity: Severity::RuntimeError,
@ -6652,7 +6665,7 @@ fn to_missing_platform_report(module_id: ModuleId, other: &PlatformPath) -> Stri
]);
Report {
filename: "UNKNOWN.roc".into(),
filename,
doc,
title: "NO PLATFORM".to_string(),
severity: Severity::RuntimeError,

View file

@ -144,7 +144,6 @@ pub(crate) struct FoundSpecializationsModule<'a> {
pub(crate) procs_base: ProcsBase<'a>,
pub(crate) subs: Subs,
pub(crate) module_timing: ModuleTiming,
pub(crate) abilities_store: AbilitiesStore,
pub(crate) expectations: Option<Expectations>,
}

View file

@ -183,7 +183,7 @@ impl LowLevelWrapperType {
/// We use a rust macro to ensure that every LowLevel gets handled
macro_rules! map_symbol_to_lowlevel {
($($lowlevel:ident <= $symbol:ident),* $(,)?) => {
($($lowlevel:ident <= $($symbol:ident),+);* $(;)?) => {
fn for_symbol_help(symbol: Symbol) -> LowLevelWrapperType {
use $crate::low_level::LowLevelWrapperType::*;
@ -191,14 +191,14 @@ macro_rules! map_symbol_to_lowlevel {
// expands to a big (but non-exhaustive) match on symbols and maps them to a lowlevel
match symbol {
$(
Symbol::$symbol => CanBeReplacedBy(LowLevel::$lowlevel),
$(Symbol::$symbol)|+ => CanBeReplacedBy(LowLevel::$lowlevel),
)*
_ => NotALowLevelWrapper,
}
}
fn _enforce_exhaustiveness(lowlevel: LowLevel) -> Symbol {
fn _enforce_exhaustiveness(lowlevel: LowLevel) -> &'static [Symbol] {
// when adding a new lowlevel, this match will stop being exhaustive, and give a
// compiler error. Most likely, you are adding a new lowlevel that maps directly to a
// symbol. For instance, you want to have `List.foo` to stand for the `ListFoo`
@ -209,7 +209,7 @@ macro_rules! map_symbol_to_lowlevel {
// that it isn't and just see if that works.
match lowlevel {
$(
LowLevel::$lowlevel => Symbol::$symbol,
LowLevel::$lowlevel => &[$(Symbol::$symbol),+],
)*
// these are higher-order lowlevels. these need the surrounding
@ -259,107 +259,107 @@ macro_rules! map_symbol_to_lowlevel {
// pattern of a symbol mapping directly to a lowlevel. In other words, most lowlevels (left) are generated
// by only one specific symbol (right)
map_symbol_to_lowlevel! {
StrConcat <= STR_CONCAT,
StrJoinWith <= STR_JOIN_WITH,
StrIsEmpty <= STR_IS_EMPTY,
StrStartsWith <= STR_STARTS_WITH,
StrStartsWithScalar <= STR_STARTS_WITH_SCALAR,
StrEndsWith <= STR_ENDS_WITH,
StrSplit <= STR_SPLIT,
StrCountGraphemes <= STR_COUNT_GRAPHEMES,
StrCountUtf8Bytes <= STR_COUNT_UTF8_BYTES,
StrFromUtf8Range <= STR_FROM_UTF8_RANGE_LOWLEVEL,
StrToUtf8 <= STR_TO_UTF8,
StrRepeat <= STR_REPEAT,
StrTrim <= STR_TRIM,
StrTrimStart <= STR_TRIM_START,
StrTrimEnd <= STR_TRIM_END,
StrToScalars <= STR_TO_SCALARS,
StrGetUnsafe <= STR_GET_UNSAFE,
StrSubstringUnsafe <= STR_SUBSTRING_UNSAFE,
StrReserve <= STR_RESERVE,
StrAppendScalar <= STR_APPEND_SCALAR_UNSAFE,
StrGetScalarUnsafe <= STR_GET_SCALAR_UNSAFE,
StrToNum <= STR_TO_NUM,
StrGetCapacity <= STR_CAPACITY,
StrWithCapacity <= STR_WITH_CAPACITY,
StrGraphemes <= STR_GRAPHEMES,
StrReleaseExcessCapacity <= STR_RELEASE_EXCESS_CAPACITY,
ListLen <= LIST_LEN,
ListGetCapacity <= LIST_CAPACITY,
ListWithCapacity <= LIST_WITH_CAPACITY,
ListReserve <= LIST_RESERVE,
ListReleaseExcessCapacity <= LIST_RELEASE_EXCESS_CAPACITY,
ListIsUnique <= LIST_IS_UNIQUE,
ListAppendUnsafe <= LIST_APPEND_UNSAFE,
ListPrepend <= LIST_PREPEND,
ListGetUnsafe <= LIST_GET_UNSAFE,
ListReplaceUnsafe <= LIST_REPLACE_UNSAFE,
ListConcat <= LIST_CONCAT,
ListSublist <= LIST_SUBLIST_LOWLEVEL,
ListDropAt <= LIST_DROP_AT,
ListSwap <= LIST_SWAP,
NumAdd <= NUM_ADD,
NumAddWrap <= NUM_ADD_WRAP,
NumAddChecked <= NUM_ADD_CHECKED_LOWLEVEL,
NumAddSaturated <= NUM_ADD_SATURATED,
NumSub <= NUM_SUB,
NumSubWrap <= NUM_SUB_WRAP,
NumSubChecked <= NUM_SUB_CHECKED_LOWLEVEL,
NumSubSaturated <= NUM_SUB_SATURATED,
NumMul <= NUM_MUL,
NumMulWrap <= NUM_MUL_WRAP,
NumMulSaturated <= NUM_MUL_SATURATED,
NumMulChecked <= NUM_MUL_CHECKED_LOWLEVEL,
NumGt <= NUM_GT,
NumGte <= NUM_GTE,
NumLt <= NUM_LT,
NumLte <= NUM_LTE,
NumCompare <= NUM_COMPARE,
NumDivFrac <= NUM_DIV_FRAC,
NumDivCeilUnchecked <= NUM_DIV_CEIL,
NumDivTruncUnchecked <= NUM_DIV_TRUNC,
NumRemUnchecked <= NUM_REM,
NumIsMultipleOf <= NUM_IS_MULTIPLE_OF,
NumAbs <= NUM_ABS,
NumNeg <= NUM_NEG,
NumSin <= NUM_SIN,
NumCos <= NUM_COS,
NumTan <= NUM_TAN,
NumSqrtUnchecked <= NUM_SQRT,
NumLogUnchecked <= NUM_LOG,
NumRound <= NUM_ROUND,
NumToFrac <= NUM_TO_FRAC,
NumIsNan <= NUM_IS_NAN,
NumIsInfinite <= NUM_IS_INFINITE,
NumIsFinite <= NUM_IS_FINITE,
NumPow <= NUM_POW,
NumCeiling <= NUM_CEILING,
NumPowInt <= NUM_POW_INT,
NumFloor <= NUM_FLOOR,
NumAtan <= NUM_ATAN,
NumAcos <= NUM_ACOS,
NumAsin <= NUM_ASIN,
NumBytesToU16 <= NUM_BYTES_TO_U16_LOWLEVEL,
NumBytesToU32 <= NUM_BYTES_TO_U32_LOWLEVEL,
NumBytesToU64 <= NUM_BYTES_TO_U64_LOWLEVEL,
NumBytesToU128 <= NUM_BYTES_TO_U128_LOWLEVEL,
NumBitwiseAnd <= NUM_BITWISE_AND,
NumBitwiseXor <= NUM_BITWISE_XOR,
NumBitwiseOr <= NUM_BITWISE_OR,
NumShiftLeftBy <= NUM_SHIFT_LEFT,
NumShiftRightBy <= NUM_SHIFT_RIGHT,
NumShiftRightZfBy <= NUM_SHIFT_RIGHT_ZERO_FILL,
NumToStr <= NUM_TO_STR,
NumCountLeadingZeroBits <= NUM_COUNT_LEADING_ZERO_BITS,
NumCountTrailingZeroBits <= NUM_COUNT_TRAILING_ZERO_BITS,
NumCountOneBits <= NUM_COUNT_ONE_BITS,
I128OfDec <= I128_OF_DEC,
Eq <= BOOL_STRUCTURAL_EQ,
NotEq <= BOOL_STRUCTURAL_NOT_EQ,
And <= BOOL_AND,
Or <= BOOL_OR,
Not <= BOOL_NOT,
Unreachable <= LIST_UNREACHABLE,
DictPseudoSeed <= DICT_PSEUDO_SEED,
StrConcat <= STR_CONCAT;
StrJoinWith <= STR_JOIN_WITH;
StrIsEmpty <= STR_IS_EMPTY;
StrStartsWith <= STR_STARTS_WITH;
StrStartsWithScalar <= STR_STARTS_WITH_SCALAR;
StrEndsWith <= STR_ENDS_WITH;
StrSplit <= STR_SPLIT;
StrCountGraphemes <= STR_COUNT_GRAPHEMES;
StrCountUtf8Bytes <= STR_COUNT_UTF8_BYTES;
StrFromUtf8Range <= STR_FROM_UTF8_RANGE_LOWLEVEL;
StrToUtf8 <= STR_TO_UTF8;
StrRepeat <= STR_REPEAT;
StrTrim <= STR_TRIM;
StrTrimStart <= STR_TRIM_START;
StrTrimEnd <= STR_TRIM_END;
StrToScalars <= STR_TO_SCALARS;
StrGetUnsafe <= STR_GET_UNSAFE;
StrSubstringUnsafe <= STR_SUBSTRING_UNSAFE;
StrReserve <= STR_RESERVE;
StrAppendScalar <= STR_APPEND_SCALAR_UNSAFE;
StrGetScalarUnsafe <= STR_GET_SCALAR_UNSAFE;
StrToNum <= STR_TO_NUM;
StrGetCapacity <= STR_CAPACITY;
StrWithCapacity <= STR_WITH_CAPACITY;
StrGraphemes <= STR_GRAPHEMES;
StrReleaseExcessCapacity <= STR_RELEASE_EXCESS_CAPACITY;
ListLen <= LIST_LEN;
ListGetCapacity <= LIST_CAPACITY;
ListWithCapacity <= LIST_WITH_CAPACITY;
ListReserve <= LIST_RESERVE;
ListReleaseExcessCapacity <= LIST_RELEASE_EXCESS_CAPACITY;
ListIsUnique <= LIST_IS_UNIQUE;
ListAppendUnsafe <= LIST_APPEND_UNSAFE;
ListPrepend <= LIST_PREPEND;
ListGetUnsafe <= LIST_GET_UNSAFE, DICT_LIST_GET_UNSAFE;
ListReplaceUnsafe <= LIST_REPLACE_UNSAFE;
ListConcat <= LIST_CONCAT;
ListSublist <= LIST_SUBLIST_LOWLEVEL;
ListDropAt <= LIST_DROP_AT;
ListSwap <= LIST_SWAP;
NumAdd <= NUM_ADD;
NumAddWrap <= NUM_ADD_WRAP;
NumAddChecked <= NUM_ADD_CHECKED_LOWLEVEL;
NumAddSaturated <= NUM_ADD_SATURATED;
NumSub <= NUM_SUB;
NumSubWrap <= NUM_SUB_WRAP;
NumSubChecked <= NUM_SUB_CHECKED_LOWLEVEL;
NumSubSaturated <= NUM_SUB_SATURATED;
NumMul <= NUM_MUL;
NumMulWrap <= NUM_MUL_WRAP;
NumMulSaturated <= NUM_MUL_SATURATED;
NumMulChecked <= NUM_MUL_CHECKED_LOWLEVEL;
NumGt <= NUM_GT;
NumGte <= NUM_GTE;
NumLt <= NUM_LT;
NumLte <= NUM_LTE;
NumCompare <= NUM_COMPARE;
NumDivFrac <= NUM_DIV_FRAC;
NumDivCeilUnchecked <= NUM_DIV_CEIL;
NumDivTruncUnchecked <= NUM_DIV_TRUNC;
NumRemUnchecked <= NUM_REM;
NumIsMultipleOf <= NUM_IS_MULTIPLE_OF;
NumAbs <= NUM_ABS;
NumNeg <= NUM_NEG;
NumSin <= NUM_SIN;
NumCos <= NUM_COS;
NumTan <= NUM_TAN;
NumSqrtUnchecked <= NUM_SQRT;
NumLogUnchecked <= NUM_LOG;
NumRound <= NUM_ROUND;
NumToFrac <= NUM_TO_FRAC;
NumIsNan <= NUM_IS_NAN;
NumIsInfinite <= NUM_IS_INFINITE;
NumIsFinite <= NUM_IS_FINITE;
NumPow <= NUM_POW;
NumCeiling <= NUM_CEILING;
NumPowInt <= NUM_POW_INT;
NumFloor <= NUM_FLOOR;
NumAtan <= NUM_ATAN;
NumAcos <= NUM_ACOS;
NumAsin <= NUM_ASIN;
NumBytesToU16 <= NUM_BYTES_TO_U16_LOWLEVEL;
NumBytesToU32 <= NUM_BYTES_TO_U32_LOWLEVEL;
NumBytesToU64 <= NUM_BYTES_TO_U64_LOWLEVEL;
NumBytesToU128 <= NUM_BYTES_TO_U128_LOWLEVEL;
NumBitwiseAnd <= NUM_BITWISE_AND;
NumBitwiseXor <= NUM_BITWISE_XOR;
NumBitwiseOr <= NUM_BITWISE_OR;
NumShiftLeftBy <= NUM_SHIFT_LEFT;
NumShiftRightBy <= NUM_SHIFT_RIGHT;
NumShiftRightZfBy <= NUM_SHIFT_RIGHT_ZERO_FILL;
NumToStr <= NUM_TO_STR;
NumCountLeadingZeroBits <= NUM_COUNT_LEADING_ZERO_BITS;
NumCountTrailingZeroBits <= NUM_COUNT_TRAILING_ZERO_BITS;
NumCountOneBits <= NUM_COUNT_ONE_BITS;
I128OfDec <= I128_OF_DEC;
Eq <= BOOL_STRUCTURAL_EQ;
NotEq <= BOOL_STRUCTURAL_NOT_EQ;
And <= BOOL_AND;
Or <= BOOL_OR;
Not <= BOOL_NOT;
Unreachable <= LIST_UNREACHABLE;
DictPseudoSeed <= DICT_PSEUDO_SEED;
}

View file

@ -1439,6 +1439,7 @@ define_builtins! {
83 LIST_WALK_WITH_INDEX: "walkWithIndex"
84 LIST_APPEND_IF_OK: "appendIfOk"
85 LIST_PREPEND_IF_OK: "prependIfOk"
86 LIST_WALK_WITH_INDEX_UNTIL: "walkWithIndexUntil"
}
7 RESULT: "Result" => {
0 RESULT_RESULT: "Result" exposed_type=true // the Result.Result type alias
@ -1484,6 +1485,10 @@ define_builtins! {
24 DICT_IS_EMPTY: "isEmpty"
25 DICT_MAP: "map"
26 DICT_JOINMAP: "joinMap"
27 DICT_KEEP_IF: "keepIf"
28 DICT_DROP_IF: "dropIf"
29 DICT_RESERVE: "reserve"
30 DICT_RELEASE_EXCESS_CAPACITY: "releaseExcessCapacity"
}
9 SET: "Set" => {
0 SET_SET: "Set" exposed_type=true // the Set.Set type alias
@ -1506,6 +1511,11 @@ define_builtins! {
17 SET_IS_EMPTY: "isEmpty"
18 SET_MAP: "map"
19 SET_JOIN_MAP: "joinMap"
20 SET_KEEP_IF: "keepIf"
21 SET_DROP_IF: "dropIf"
22 SET_WITH_CAPACITY: "withCapacity"
23 SET_RESERVE: "reserve"
24 SET_RELEASE_EXCESS_CAPACITY: "releaseExcessCapacity"
}
10 BOX: "Box" => {
0 BOX_BOX_TYPE: "Box" exposed_apply_type=true // the Box.Box opaque type
@ -1631,8 +1641,7 @@ define_builtins! {
31 INSPECT_APPLY: "apply"
32 INSPECT_TO_INSPECTOR: "toInspector"
33 INSPECT_NAT: "nat"
34 INSPECT_DBG_FORMATTER: "DbgFormatter" exposed_type=true
35 INSPECT_TO_DBG_STR: "toDbgStr"
34 INSPECT_TO_STR: "toStr"
}
15 JSON: "TotallyNotJson" => {
0 JSON_JSON: "TotallyNotJson"

View file

@ -631,10 +631,14 @@ fn specialize_drops_stmt<'a, 'i>(
),
}),
Stmt::Dbg {
source_location,
source,
symbol,
variable,
remainder,
} => arena.alloc(Stmt::Dbg {
source_location,
source,
symbol: *symbol,
variable: *variable,
remainder: specialize_drops_stmt(
@ -1536,7 +1540,7 @@ fn low_level_no_rc(lowlevel: &LowLevel) -> RC {
StrGetUnsafe | ListGetUnsafe => RC::NoRc,
ListConcat => RC::Rc,
StrConcat => RC::Rc,
StrSubstringUnsafe => RC::NoRc,
StrSubstringUnsafe => RC::Rc,
StrReserve => RC::Rc,
StrAppendScalar => RC::Rc,
StrGetScalarUnsafe => RC::NoRc,

View file

@ -689,6 +689,8 @@ fn insert_refcount_operations_stmt<'v, 'a>(
})
}
Stmt::Dbg {
source_location,
source,
symbol,
variable,
remainder,
@ -703,6 +705,8 @@ fn insert_refcount_operations_stmt<'v, 'a>(
);
arena.alloc(Stmt::Dbg {
source_location,
source,
symbol: *symbol,
variable: *variable,
remainder: newer_remainder,
@ -1288,7 +1292,7 @@ fn lowlevel_borrow_signature(arena: &Bump, op: LowLevel) -> &[Ownership] {
StrGetUnsafe | ListGetUnsafe => arena.alloc_slice_copy(&[borrowed, irrelevant]),
ListConcat => arena.alloc_slice_copy(&[owned, owned]),
StrConcat => arena.alloc_slice_copy(&[owned, borrowed]),
StrSubstringUnsafe => arena.alloc_slice_copy(&[borrowed, irrelevant, irrelevant]),
StrSubstringUnsafe => arena.alloc_slice_copy(&[owned, irrelevant, irrelevant]),
StrReserve => arena.alloc_slice_copy(&[owned, irrelevant]),
StrAppendScalar => arena.alloc_slice_copy(&[owned, irrelevant]),
StrGetScalarUnsafe => arena.alloc_slice_copy(&[borrowed, irrelevant]),

View file

@ -1531,6 +1531,10 @@ pub enum Stmt<'a> {
remainder: &'a Stmt<'a>,
},
Dbg {
/// The location this dbg is in source as a printable string.
source_location: &'a str,
/// The source code of the expression being debugged.
source: &'a str,
/// The expression we're displaying
symbol: Symbol,
/// The specialized variable of the expression
@ -4606,6 +4610,8 @@ pub fn with_hole<'a>(
Expect { .. } => unreachable!("I think this is unreachable"),
ExpectFx { .. } => unreachable!("I think this is unreachable"),
Dbg {
source_location,
source,
loc_message,
loc_continuation,
variable: cond_variable,
@ -4625,6 +4631,8 @@ pub fn with_hole<'a>(
env,
procs,
layout_cache,
&*arena.alloc(source_location),
&*arena.alloc(source),
dbg_symbol,
*loc_message,
cond_variable,
@ -5896,8 +5904,10 @@ fn compile_dbg<'a>(
env: &mut Env<'a, '_>,
procs: &mut Procs<'a>,
layout_cache: &mut LayoutCache<'a>,
source_location: &'a str,
source: &'a str,
dbg_symbol: Symbol,
loc_condition: Loc<roc_can::expr::Expr>,
loc_message: Loc<roc_can::expr::Expr>,
variable: Variable,
continuation: Stmt<'a>,
) -> Stmt<'a> {
@ -5908,6 +5918,8 @@ fn compile_dbg<'a>(
.fresh_unnamed_flex_var();
let dbg_stmt = Stmt::Dbg {
source_location,
source,
symbol: dbg_symbol,
variable: spec_var,
remainder: env.arena.alloc(continuation),
@ -5918,17 +5930,17 @@ fn compile_dbg<'a>(
store_specialized_expectation_lookups(env, [variable], &[spec_var]);
let symbol_is_reused = matches!(
can_reuse_symbol(env, layout_cache, procs, &loc_condition.value, variable),
can_reuse_symbol(env, layout_cache, procs, &loc_message.value, variable),
ReuseSymbol::Value(_)
);
// skip evaluating the condition if it's just a symbol
// skip evaluating the message if it's just a symbol
if symbol_is_reused {
dbg_stmt
} else {
with_hole(
env,
loc_condition.value,
loc_message.value,
variable,
procs,
layout_cache,
@ -7141,6 +7153,8 @@ pub fn from_can<'a>(
}
Dbg {
source_location,
source,
loc_message,
loc_continuation,
variable: cond_variable,
@ -7152,6 +7166,8 @@ pub fn from_can<'a>(
env,
procs,
layout_cache,
&*env.arena.alloc(source_location),
&*env.arena.alloc(source),
dbg_symbol,
*loc_message,
cond_variable,
@ -7625,6 +7641,8 @@ fn substitute_in_stmt_help<'a>(
}
Dbg {
source_location,
source,
symbol,
variable,
remainder,
@ -7633,6 +7651,8 @@ fn substitute_in_stmt_help<'a>(
substitute_in_stmt_help(arena, remainder, subs).unwrap_or(remainder);
let expect = Dbg {
source_location,
source,
symbol: substitute(subs, *symbol).unwrap_or(*symbol),
variable: *variable,
remainder: new_remainder,

View file

@ -1544,7 +1544,13 @@ fn store_list_pattern<'a>(
}
}
stmt = store_list_rest(env, list_sym, list_arity, list_layout, opt_rest, stmt);
stmt = match store_list_rest(env, list_sym, list_arity, list_layout, opt_rest, stmt) {
StorePattern::Productive(new) => {
is_productive = true;
new
}
StorePattern::NotProductive(new) => new,
};
if is_productive {
StorePattern::Productive(stmt)
@ -1560,8 +1566,12 @@ fn store_list_rest<'a>(
list_layout: InLayout<'a>,
opt_rest: &Option<(usize, Option<Symbol>)>,
mut stmt: Stmt<'a>,
) -> Stmt<'a> {
) -> StorePattern<'a> {
let mut is_productive = false;
if let Some((index, Some(rest_sym))) = opt_rest {
is_productive = true;
let usize_layout = Layout::usize(env.target_info);
let total_dropped = list_arity.min_len();
@ -1608,7 +1618,12 @@ fn store_list_rest<'a>(
stmt = Stmt::Let(sym, expr, lay, env.arena.alloc(stmt));
}
}
stmt
if is_productive {
StorePattern::Productive(stmt)
} else {
StorePattern::NotProductive(stmt)
}
}
#[allow(clippy::too_many_arguments)]

View file

@ -651,6 +651,8 @@ fn insert_reset_reuse_operations_stmt<'a, 'i>(
})
}
Stmt::Dbg {
source_location,
source,
symbol,
variable,
remainder,
@ -666,6 +668,8 @@ fn insert_reset_reuse_operations_stmt<'a, 'i>(
);
arena.alloc(Stmt::Dbg {
source_location,
source,
symbol: *symbol,
variable: *variable,
remainder: new_remainder,

View file

@ -330,6 +330,8 @@ fn insert_jumps<'a>(
}
Dbg {
source_location,
source,
symbol,
variable,
remainder,
@ -342,6 +344,8 @@ fn insert_jumps<'a>(
needle_result,
) {
Some(cont) => Some(arena.alloc(Dbg {
source_location,
source,
symbol: *symbol,
variable: *variable,
remainder: cont,
@ -1020,10 +1024,14 @@ impl<'a> TrmcEnv<'a> {
remainder: arena.alloc(self.walk_stmt(env, remainder)),
},
Stmt::Dbg {
source_location,
source,
symbol,
variable,
remainder,
} => Stmt::Dbg {
source_location,
source,
symbol: *symbol,
variable: *variable,
remainder: arena.alloc(self.walk_stmt(env, remainder)),

View file

@ -302,7 +302,7 @@ pub enum Expr<'a> {
Expect(&'a Loc<Expr<'a>>, &'a Loc<Expr<'a>>),
Dbg(&'a Loc<Expr<'a>>, &'a Loc<Expr<'a>>),
// This form of debug is a desugared call to roc_dbg
LowLevelDbg(&'a Loc<Expr<'a>>, &'a Loc<Expr<'a>>),
LowLevelDbg(&'a (&'a str, &'a str), &'a Loc<Expr<'a>>, &'a Loc<Expr<'a>>),
// Application
/// To apply by name, do Apply(Var(...), ...)
@ -1537,7 +1537,7 @@ impl<'a> Malformed for Expr<'a> {
Backpassing(args, call, body) => args.iter().any(|arg| arg.is_malformed()) || call.is_malformed() || body.is_malformed(),
Expect(condition, continuation) |
Dbg(condition, continuation) => condition.is_malformed() || continuation.is_malformed(),
LowLevelDbg(condition, continuation) => condition.is_malformed() || continuation.is_malformed(),
LowLevelDbg(_, condition, continuation) => condition.is_malformed() || continuation.is_malformed(),
Apply(func, args, _) => func.is_malformed() || args.iter().any(|arg| arg.is_malformed()),
BinOps(firsts, last) => firsts.iter().any(|(expr, _)| expr.is_malformed()) || last.is_malformed(),
UnaryOp(expr, _) => expr.is_malformed(),

View file

@ -1933,7 +1933,7 @@ fn expr_to_pattern_help<'a>(arena: &'a Bump, expr: &Expr<'a>) -> Result<Pattern<
| Expr::When(_, _)
| Expr::Expect(_, _)
| Expr::Dbg(_, _)
| Expr::LowLevelDbg(_, _)
| Expr::LowLevelDbg(_, _, _)
| Expr::MalformedClosure
| Expr::PrecedenceConflict { .. }
| Expr::MultipleRecordBuilders { .. }

Some files were not shown because too many files have changed in this diff Show more