Merge remote-tracking branch 'origin/trunk' into name-functions-with-morphic

This commit is contained in:
Folkert 2021-06-09 14:57:09 +02:00
commit 20f6e03cfb
104 changed files with 1707 additions and 704 deletions

4
.earthignore Normal file
View file

@ -0,0 +1,4 @@
AUTHORS
nix
.envrc
.gitignore

View file

@ -22,4 +22,3 @@ jobs:
- name: install dependencies, build, run zig tests, rustfmt, clippy, cargo test --release - name: install dependencies, build, run zig tests, rustfmt, clippy, cargo test --release
run: ./ci/safe-earthly.sh +test-all run: ./ci/safe-earthly.sh +test-all

View file

@ -1,4 +1,4 @@
on: on:
schedule: schedule:
- cron: '0 0 * * *' - cron: '0 0 * * *'

22
.github/workflows/www.yml vendored Normal file
View file

@ -0,0 +1,22 @@
name: deploy www.roc-lang.org
# Whenever a commit lands on trunk, deploy the site
on:
push:
branches:
- deploy-www # TODO change to trunk
jobs:
deploy:
name: 'Deploy to Netlify'
runs-on: [self-hosted]
steps:
- uses: jsmrcaga/action-netlify-deploy@v1.6.0
with:
install_command: 'pwd; cd ../../www'
build_command: 'bash build.sh'
build_directory: 'build'
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
NETLIFY_DEPLOY_MESSAGE: "Deploy git ref ${{ github.ref }}"
NETLIFY_DEPLOY_TO_PROD: true

169
Cargo.lock generated
View file

@ -1,7 +1,5 @@
# This file is automatically @generated by Cargo. # This file is automatically @generated by Cargo.
# It is not intended for manual editing. # It is not intended for manual editing.
version = 3
[[package]] [[package]]
name = "ab_glyph" name = "ab_glyph"
version = "0.2.11" version = "0.2.11"
@ -20,9 +18,9 @@ checksum = "d9fe5e32de01730eb1f6b7f5b51c17e03e2325bf40a74f754f04f130043affff"
[[package]] [[package]]
name = "addr2line" name = "addr2line"
version = "0.15.1" version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a" checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a"
dependencies = [ dependencies = [
"gimli", "gimli",
] ]
@ -99,6 +97,15 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "approx"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e"
dependencies = [
"num-traits",
]
[[package]] [[package]]
name = "arena-pool" name = "arena-pool"
version = "0.1.0" version = "0.1.0"
@ -155,16 +162,16 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]] [[package]]
name = "backtrace" name = "backtrace"
version = "0.3.59" version = "0.3.60"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4717cfcbfaa661a0fd48f8453951837ae7e8f81e481fbb136e3202d72805a744" checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282"
dependencies = [ dependencies = [
"addr2line", "addr2line",
"cc", "cc",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"libc", "libc",
"miniz_oxide", "miniz_oxide",
"object", "object 0.25.2",
"rustc-demangle", "rustc-demangle",
] ]
@ -689,7 +696,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-utils 0.8.4", "crossbeam-utils 0.8.5",
] ]
[[package]] [[package]]
@ -710,8 +717,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-epoch 0.9.4", "crossbeam-epoch 0.9.5",
"crossbeam-utils 0.8.4", "crossbeam-utils 0.8.5",
] ]
[[package]] [[package]]
@ -731,12 +738,12 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-epoch" name = "crossbeam-epoch"
version = "0.9.4" version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-utils 0.8.4", "crossbeam-utils 0.8.5",
"lazy_static", "lazy_static",
"memoffset 0.6.4", "memoffset 0.6.4",
"scopeguard", "scopeguard",
@ -766,11 +773,10 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-utils" name = "crossbeam-utils"
version = "0.8.4" version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db"
dependencies = [ dependencies = [
"autocfg 1.0.1",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"lazy_static", "lazy_static",
] ]
@ -1407,12 +1413,12 @@ dependencies = [
[[package]] [[package]]
name = "glyph_brush_layout" name = "glyph_brush_layout"
version = "0.2.1" version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10bc06d530bf20c1902f1b02799ab7372ff43f6119770c49b0bc3f21bd148820" checksum = "15cf18cf985bd942f05e14552b63c9d08f7d0ed1ec79a977eb9747c9e065f497"
dependencies = [ dependencies = [
"ab_glyph", "ab_glyph",
"approx 0.4.0", "approx 0.5.0",
"xi-unicode", "xi-unicode",
] ]
@ -1482,9 +1488,9 @@ dependencies = [
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.3.2" version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
dependencies = [ dependencies = [
"unicode-segmentation", "unicode-segmentation",
] ]
@ -1612,13 +1618,13 @@ dependencies = [
name = "inkwell" name = "inkwell"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"inkwell 0.1.0 (git+https://github.com/rtfeldman/inkwell?tag=llvm10-0.release5)", "inkwell 0.1.0 (git+https://github.com/rtfeldman/inkwell?tag=llvm12-0.release2)",
] ]
[[package]] [[package]]
name = "inkwell" name = "inkwell"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/rtfeldman/inkwell?tag=llvm10-0.release5#f0a32d3a0595bda3777a7b7e08a2e46c97eba6f4" source = "git+https://github.com/rtfeldman/inkwell?tag=llvm12-0.release2#2f9bfb728cb5713b39fe527838969dcc8e184c4d"
dependencies = [ dependencies = [
"either", "either",
"inkwell_internals", "inkwell_internals",
@ -1632,7 +1638,7 @@ dependencies = [
[[package]] [[package]]
name = "inkwell_internals" name = "inkwell_internals"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/rtfeldman/inkwell?tag=llvm10-0.release5#f0a32d3a0595bda3777a7b7e08a2e46c97eba6f4" source = "git+https://github.com/rtfeldman/inkwell?tag=llvm12-0.release2#2f9bfb728cb5713b39fe527838969dcc8e184c4d"
dependencies = [ dependencies = [
"proc-macro2 1.0.27", "proc-macro2 1.0.27",
"quote 1.0.9", "quote 1.0.9",
@ -1757,9 +1763,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.95" version = "0.2.96"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" checksum = "5600b4e6efc5421841a2138a6b082e07fe12f9aaa12783d50e5d13325b26b4fc"
[[package]] [[package]]
name = "libloading" name = "libloading"
@ -2165,6 +2171,15 @@ dependencies = [
"indexmap", "indexmap",
] ]
[[package]]
name = "object"
version = "0.25.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8bc1d42047cf336f0f939c99e97183cf31551bf0f2865a2ec9c8d91fd4ffb5e"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.7.2" version = "1.7.2"
@ -2191,12 +2206,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
[[package]] [[package]]
name = "ordered-float" name = "ordered-float"
version = "2.5.0" version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809348965973b261c3e504c8d0434e465274f78c880e10039914f2c5dcf49461" checksum = "f100fcfb41e5385e0991f74981732049f9b896821542a219420491046baafdc2"
dependencies = [ dependencies = [
"num-traits", "num-traits",
"rand 0.8.3",
] ]
[[package]] [[package]]
@ -2531,9 +2545,9 @@ dependencies = [
[[package]] [[package]]
name = "profiling" name = "profiling"
version = "1.0.1" version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3a66d5e88679f2720126c11ee29da07a08f094eac52306b066edd7d393752d6" checksum = "0a7c000c0ce9d9bb94c0fbacdf20e5087fbe652c556ffb2c9387d980e17d51fb"
[[package]] [[package]]
name = "pulldown-cmark" name = "pulldown-cmark"
@ -2850,7 +2864,7 @@ checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
dependencies = [ dependencies = [
"crossbeam-channel 0.5.1", "crossbeam-channel 0.5.1",
"crossbeam-deque 0.8.0", "crossbeam-deque 0.8.0",
"crossbeam-utils 0.8.4", "crossbeam-utils 0.8.5",
"lazy_static", "lazy_static",
"num_cpus", "num_cpus",
] ]
@ -2902,12 +2916,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-automata" name = "regex-automata"
version = "0.1.9" version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"byteorder",
]
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
@ -2943,7 +2954,7 @@ dependencies = [
"roc_can", "roc_can",
"roc_collections", "roc_collections",
"roc_constrain", "roc_constrain",
"roc_gen", "roc_gen_llvm",
"roc_load", "roc_load",
"roc_module", "roc_module",
"roc_mono", "roc_mono",
@ -3025,7 +3036,7 @@ dependencies = [
"roc_docs", "roc_docs",
"roc_editor", "roc_editor",
"roc_fmt", "roc_fmt",
"roc_gen", "roc_gen_llvm",
"roc_load", "roc_load",
"roc_module", "roc_module",
"roc_mono", "roc_mono",
@ -3159,7 +3170,45 @@ dependencies = [
] ]
[[package]] [[package]]
name = "roc_gen" name = "roc_gen_dev"
version = "0.1.0"
dependencies = [
"bumpalo",
"im 14.3.0",
"im-rc 14.3.0",
"indoc 0.3.6",
"inlinable_string",
"itertools 0.9.0",
"libc",
"libloading 0.6.7",
"maplit",
"object 0.24.0",
"pretty_assertions 0.5.1",
"quickcheck 0.8.5",
"quickcheck_macros 0.8.0",
"roc_build",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_load",
"roc_module",
"roc_mono",
"roc_parse",
"roc_problem",
"roc_region",
"roc_reporting",
"roc_solve",
"roc_std",
"roc_types",
"roc_unify",
"target-lexicon",
"tempfile",
"tokio",
]
[[package]]
name = "roc_gen_llvm"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bumpalo", "bumpalo",
@ -3194,44 +3243,6 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "roc_gen_dev"
version = "0.1.0"
dependencies = [
"bumpalo",
"im 14.3.0",
"im-rc 14.3.0",
"indoc 0.3.6",
"inlinable_string",
"itertools 0.9.0",
"libc",
"libloading 0.6.7",
"maplit",
"object",
"pretty_assertions 0.5.1",
"quickcheck 0.8.5",
"quickcheck_macros 0.8.0",
"roc_build",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_load",
"roc_module",
"roc_mono",
"roc_parse",
"roc_problem",
"roc_region",
"roc_reporting",
"roc_solve",
"roc_std",
"roc_types",
"roc_unify",
"target-lexicon",
"tempfile",
"tokio",
]
[[package]] [[package]]
name = "roc_load" name = "roc_load"
version = "0.1.0" version = "0.1.0"
@ -3911,7 +3922,7 @@ dependencies = [
"roc_can", "roc_can",
"roc_collections", "roc_collections",
"roc_constrain", "roc_constrain",
"roc_gen", "roc_gen_llvm",
"roc_load", "roc_load",
"roc_module", "roc_module",
"roc_mono", "roc_mono",

View file

@ -17,7 +17,7 @@ members = [
"compiler/test_mono_macros", "compiler/test_mono_macros",
"compiler/test_mono", "compiler/test_mono",
"compiler/load", "compiler/load",
"compiler/gen", "compiler/gen_llvm",
"compiler/gen_dev", "compiler/gen_dev",
"compiler/build", "compiler/build",
"compiler/arena_pool", "compiler/arena_pool",
@ -32,6 +32,12 @@ members = [
"roc_std", "roc_std",
"docs" "docs"
] ]
# Needed to be able to run `cargo run -p roc_cli --no-default-features` -
# see www/build.sh for more.
#
# Without the `-p` flag, cargo ignores `--no-default-features` when you have a
# workspace, and without `resolver = "2"` here, you can't use `-p` like this.
resolver = "2"
# Optimizations based on https://deterministic.space/high-performance-rust.html # Optimizations based on https://deterministic.space/high-performance-rust.html
[profile.release] [profile.release]

View file

@ -99,22 +99,41 @@ check-rustfmt:
RUN cargo fmt --version RUN cargo fmt --version
RUN cargo fmt --all -- --check RUN cargo fmt --all -- --check
check-typos:
RUN cargo install typos-cli --version 1.0.4 # use latest version on resolution of issue crate-ci/typos#277
COPY --dir .github ci cli compiler docs editor examples packages roc_std www *.md LEGAL_DETAILS shell.nix ./
RUN typos
test-rust: test-rust:
FROM +copy-dirs-and-cache FROM +copy-dirs-and-cache
ENV RUST_BACKTRACE=1 ENV RUST_BACKTRACE=1
RUN --mount=type=cache,target=$SCCACHE_DIR \ RUN --mount=type=cache,target=$SCCACHE_DIR \
cargo test --release && sccache --show-stats cargo test --release && sccache --show-stats
verify-no-git-changes:
FROM +test-rust
# If running tests caused anything to be changed or added (without being
# included in a .gitignore somewhere), fail the build!
#
# How it works: the `git ls-files` command lists all the modified or
# uncommitted files in the working tree, the `| grep -E .` command returns a
# zero exit code if it listed any files and nonzero otherwise (which is the
# opposite of what we want), and the `!` at the start inverts the exit code.
RUN ! git ls-files --deleted --modified --others --exclude-standard | grep -E .
test-all: test-all:
BUILD +test-zig BUILD +test-zig
BUILD +check-rustfmt BUILD +check-rustfmt
BUILD +check-clippy BUILD +check-clippy
BUILD +check-typos
BUILD +test-rust BUILD +test-rust
BUILD +verify-no-git-changes
bench-roc: bench-roc:
FROM +copy-dirs-and-cache FROM +copy-dirs-and-cache
ENV RUST_BACKTRACE=full ENV RUST_BACKTRACE=full
RUN cargo criterion -V RUN cargo criterion -V
# ulimit -s unlimited to prevent stack overflow errors for CFold
RUN --privileged --mount=type=cache,target=$SCCACHE_DIR \ RUN --privileged --mount=type=cache,target=$SCCACHE_DIR \
cd cli && cargo criterion && sccache --show-stats ulimit -s unlimited && cd cli && cargo criterion && sccache --show-stats

View file

@ -46,7 +46,7 @@ By using systems-level programming languages like C and C++, platform authors sa
Roc is designed to make the "systems-level platform, higher-level application" experience as nice as possible. Roc is designed to make the "systems-level platform, higher-level application" experience as nice as possible.
* **Application** authors code exclusively in Roc. It's a language designed for nice ergonomics. The syntax resembles Ruby or CoffeeScript, and it has a fast compiler with full type inference. * **Application** authors code exclusively in Roc. It's a language designed for nice ergonomics. The syntax resembles Ruby or CoffeeScript, and it has a fast compiler with full type inference.
* **Platform** authors code almost exclusively in a systems-level langauge like C, C++, Rust, or [Zig](https://ziglang.org/), except for the thin Roc API they expose to application authors. Roc application code compiles to machine code, and production builds of Roc apps benefit from the same [LLVM](https://llvm.org/) optimizations that C++, Rust, and Zig do. Roc application authors do not need to know this lower-level code exists; all they have to interact with is the platform's API, which is exposed as an ordinary Roc API. * **Platform** authors code almost exclusively in a systems-level language like C, C++, Rust, or [Zig](https://ziglang.org/), except for the thin Roc API they expose to application authors. Roc application code compiles to machine code, and production builds of Roc apps benefit from the same [LLVM](https://llvm.org/) optimizations that C++, Rust, and Zig do. Roc application authors do not need to know this lower-level code exists; all they have to interact with is the platform's API, which is exposed as an ordinary Roc API.
Every Roc application is built on top of exactly one Roc platform. There is no such thing as a Roc application that runs without a platform, and there is no default platform. You must choose one! Every Roc application is built on top of exactly one Roc platform. There is no such thing as a Roc application that runs without a platform, and there is no default platform. You must choose one!

View file

@ -15,7 +15,12 @@ test = false
bench = false bench = false
[features] [features]
default = ["target-x86"] default = ["target-x86", "llvm", "editor"]
# This is a separate feature because when we generate docs on Netlify,
# it doesn't have LLVM installed. (Also, it doesn't need to do code gen.)
llvm = ["inkwell", "roc_gen_llvm", "roc_build/llvm"]
editor = ["roc_editor"]
target-x86 = [] target-x86 = []
@ -45,11 +50,11 @@ roc_unify = { path = "../compiler/unify" }
roc_solve = { path = "../compiler/solve" } roc_solve = { path = "../compiler/solve" }
roc_mono = { path = "../compiler/mono" } roc_mono = { path = "../compiler/mono" }
roc_load = { path = "../compiler/load" } roc_load = { path = "../compiler/load" }
roc_gen = { path = "../compiler/gen" } roc_gen_llvm = { path = "../compiler/gen_llvm", optional = true }
roc_build = { path = "../compiler/build" } roc_build = { path = "../compiler/build", default-features = false }
roc_fmt = { path = "../compiler/fmt" } roc_fmt = { path = "../compiler/fmt" }
roc_reporting = { path = "../compiler/reporting" } roc_reporting = { path = "../compiler/reporting" }
roc_editor = { path = "../editor" } roc_editor = { path = "../editor", optional = true }
# TODO switch to clap 3.0.0 once it's out. Tried adding clap = "~3.0.0-beta.1" and cargo wouldn't accept it # TODO switch to clap 3.0.0 once it's out. Tried adding clap = "~3.0.0-beta.1" and cargo wouldn't accept it
clap = { git = "https://github.com/rtfeldman/clap", branch = "master" } clap = { git = "https://github.com/rtfeldman/clap", branch = "master" }
const_format = "0.2.8" const_format = "0.2.8"
@ -62,7 +67,7 @@ inlinable_string = "0.1"
libc = "0.2" libc = "0.2"
libloading = "0.6" libloading = "0.6"
inkwell = { path = "../vendor/inkwell" } inkwell = { path = "../vendor/inkwell", optional = true }
target-lexicon = "0.10" target-lexicon = "0.10"
tempfile = "3.1.0" tempfile = "3.1.0"
@ -74,7 +79,7 @@ quickcheck = "0.8"
quickcheck_macros = "0.8" quickcheck_macros = "0.8"
serial_test = "0.5" serial_test = "0.5"
tempfile = "3.1.0" tempfile = "3.1.0"
criterion = { git = "https://github.com/Anton-4/criterion.rs"} criterion = { git = "https://github.com/Anton-4/criterion.rs"}
cli_utils = { path = "cli_utils" } cli_utils = { path = "cli_utils" }
# Keep the commented deps, they are commented because they require nightly rust # Keep the commented deps, they are commented because they require nightly rust
# criterion-perf-events = "0.1.3" # criterion-perf-events = "0.1.3"

17
cli/benches/README.md Normal file
View file

@ -0,0 +1,17 @@
# Running the benchmarks
Install cargo criterion:
```
cargo install cargo-criterion
```
To prevent stack overflow on the `CFold` benchmark:
```
ulimit -s unlimited
```
In the `cli` folder execute:
```
cargo criterion
```

View file

@ -1,5 +1,5 @@
use cli_utils::bench_utils::{ use cli_utils::bench_utils::{
bench_cfold, bench_deriv, bench_nqueens, bench_rbtree_ck, bench_rbtree_delete, bench_cfold, bench_deriv, bench_nqueens, bench_quicksort, bench_rbtree_ck, bench_rbtree_delete,
}; };
use criterion::{ use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, SamplingMode, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, SamplingMode,
@ -7,17 +7,17 @@ use criterion::{
fn bench_group_wall_time(c: &mut Criterion) { fn bench_group_wall_time(c: &mut Criterion) {
let mut group = c.benchmark_group("bench-group_wall-time"); let mut group = c.benchmark_group("bench-group_wall-time");
// calculate statistics based on a fixed(flat) 100 runs // calculate statistics based on a fixed(flat) 200 runs
group.sampling_mode(SamplingMode::Flat); group.sampling_mode(SamplingMode::Flat);
group.sample_size(200); group.sample_size(200);
let bench_funcs: Vec<fn(Option<&mut BenchmarkGroup<WallTime>>) -> ()> = vec![ let bench_funcs: Vec<fn(Option<&mut BenchmarkGroup<WallTime>>) -> ()> = vec![
bench_nqueens, // queens 11 bench_nqueens, // queens 11
bench_cfold, // e = mkExpr 12 1 bench_cfold, // e = mkExpr 17 1
bench_deriv, // nest deriv 7 f bench_deriv, // nest deriv 8 f
bench_rbtree_ck, // ms = makeMap 5 5600 bench_rbtree_ck, // ms = makeMap 5 80000
bench_rbtree_delete, // m = makeMap 6000 bench_rbtree_delete, // m = makeMap 100000
// TODO quicksort bench_quicksort, // list size 10000
]; ];
for bench_func in bench_funcs.iter() { for bench_func in bench_funcs.iter() {

File diff suppressed because one or more lines are too long

View file

@ -5,8 +5,8 @@ use roc_build::{
}; };
use roc_can::builtins::builtin_defs_map; use roc_can::builtins::builtin_defs_map;
use roc_collections::all::MutMap; use roc_collections::all::MutMap;
use roc_gen::llvm::build::OptLevel;
use roc_load::file::LoadingProblem; use roc_load::file::LoadingProblem;
use roc_mono::ir::OptLevel;
use std::path::PathBuf; use std::path::PathBuf;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use target_lexicon::Triple; use target_lexicon::Triple;
@ -32,6 +32,7 @@ pub struct BuiltFile {
pub total_time: Duration, pub total_time: Duration,
} }
#[cfg(feature = "llvm")]
pub fn build_file<'a>( pub fn build_file<'a>(
arena: &'a Bump, arena: &'a Bump,
target: &Triple, target: &Triple,

View file

@ -1,12 +1,12 @@
#[macro_use] #[macro_use]
extern crate clap; extern crate clap;
use build::{build_file, BuildOutcome, BuiltFile}; use build::{BuildOutcome, BuiltFile};
use bumpalo::Bump; use bumpalo::Bump;
use clap::{App, AppSettings, Arg, ArgMatches}; use clap::{App, AppSettings, Arg, ArgMatches};
use roc_build::link::LinkType; use roc_build::link::LinkType;
use roc_gen::llvm::build::OptLevel;
use roc_load::file::LoadingProblem; use roc_load::file::LoadingProblem;
use roc_mono::ir::OptLevel;
use std::env; use std::env;
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -25,12 +25,13 @@ pub const CMD_DOCS: &str = "docs";
pub const FLAG_DEBUG: &str = "debug"; pub const FLAG_DEBUG: &str = "debug";
pub const FLAG_OPTIMIZE: &str = "optimize"; pub const FLAG_OPTIMIZE: &str = "optimize";
pub const FLAG_LIB: &str = "lib";
pub const ROC_FILE: &str = "ROC_FILE"; pub const ROC_FILE: &str = "ROC_FILE";
pub const DIRECTORY_OR_FILES: &str = "DIRECTORY_OR_FILES"; pub const DIRECTORY_OR_FILES: &str = "DIRECTORY_OR_FILES";
pub const ARGS_FOR_APP: &str = "ARGS_FOR_APP"; pub const ARGS_FOR_APP: &str = "ARGS_FOR_APP";
pub fn build_app<'a>() -> App<'a> { pub fn build_app<'a>() -> App<'a> {
App::new("roc") let app = App::new("roc")
.version(crate_version!()) .version(crate_version!())
.subcommand(App::new(CMD_BUILD) .subcommand(App::new(CMD_BUILD)
.about("Build a program") .about("Build a program")
@ -45,6 +46,12 @@ pub fn build_app<'a>() -> App<'a> {
.help("Optimize the compiled program to run faster. (Optimization takes time to complete.)") .help("Optimize the compiled program to run faster. (Optimization takes time to complete.)")
.required(false), .required(false),
) )
.arg(
Arg::with_name(FLAG_LIB)
.long(FLAG_LIB)
.help("Build a C library instead of an executable.")
.required(false),
)
.arg( .arg(
Arg::with_name(FLAG_DEBUG) Arg::with_name(FLAG_DEBUG)
.long(FLAG_DEBUG) .long(FLAG_DEBUG)
@ -81,15 +88,6 @@ pub fn build_app<'a>() -> App<'a> {
.subcommand(App::new(CMD_REPL) .subcommand(App::new(CMD_REPL)
.about("Launch the interactive Read Eval Print Loop (REPL)") .about("Launch the interactive Read Eval Print Loop (REPL)")
) )
.subcommand(App::new(CMD_EDIT)
.about("Launch the Roc editor")
.arg(Arg::with_name(DIRECTORY_OR_FILES)
.index(1)
.multiple(true)
.required(false)
.help("(optional) The directory or files to open on launch.")
)
)
.subcommand( .subcommand(
App::new(CMD_DOCS) App::new(CMD_DOCS)
.about("Generate documentation for Roc modules") .about("Generate documentation for Roc modules")
@ -100,7 +98,21 @@ pub fn build_app<'a>() -> App<'a> {
.help("The directory or files to build documentation for") .help("The directory or files to build documentation for")
) )
);
if cfg!(feature = "edit") {
app.subcommand(
App::new(CMD_EDIT).about("Launch the Roc editor").arg(
Arg::with_name(DIRECTORY_OR_FILES)
.index(1)
.multiple(true)
.required(false)
.help("(optional) The directory or files to open on launch."),
),
) )
} else {
app
}
} }
pub fn docs(files: Vec<PathBuf>) { pub fn docs(files: Vec<PathBuf>) {
@ -111,12 +123,15 @@ pub fn docs(files: Vec<PathBuf>) {
) )
} }
#[derive(Debug, PartialEq, Eq)]
pub enum BuildConfig { pub enum BuildConfig {
BuildOnly, BuildOnly,
BuildAndRun { roc_file_arg_index: usize }, BuildAndRun { roc_file_arg_index: usize },
} }
#[cfg(feature = "llvm")]
pub fn build(target: &Triple, matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> { pub fn build(target: &Triple, matches: &ArgMatches, config: BuildConfig) -> io::Result<i32> {
use build::build_file;
use BuildConfig::*; use BuildConfig::*;
let arena = Bump::new(); let arena = Bump::new();
@ -130,6 +145,12 @@ pub fn build(target: &Triple, matches: &ArgMatches, config: BuildConfig) -> io::
}; };
let emit_debug_info = matches.is_present(FLAG_DEBUG); let emit_debug_info = matches.is_present(FLAG_DEBUG);
let link_type = if matches.is_present(FLAG_LIB) {
LinkType::Dylib
} else {
LinkType::Executable
};
let path = Path::new(filename).canonicalize().unwrap(); let path = Path::new(filename).canonicalize().unwrap();
let src_dir = path.parent().unwrap().canonicalize().unwrap(); let src_dir = path.parent().unwrap().canonicalize().unwrap();
@ -159,7 +180,7 @@ pub fn build(target: &Triple, matches: &ArgMatches, config: BuildConfig) -> io::
path, path,
opt_level, opt_level,
emit_debug_info, emit_debug_info,
LinkType::Executable, link_type,
); );
match res_binary_path { match res_binary_path {

View file

@ -1,17 +1,26 @@
use roc_cli::{ use roc_cli::{
build, build_app, docs, repl, BuildConfig, CMD_BUILD, CMD_DOCS, CMD_EDIT, CMD_REPL, CMD_RUN, build_app, docs, repl, BuildConfig, CMD_BUILD, CMD_DOCS, CMD_EDIT, CMD_REPL, CMD_RUN,
DIRECTORY_OR_FILES, ROC_FILE, DIRECTORY_OR_FILES, ROC_FILE,
}; };
use std::fs::{self, FileType};
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use target_lexicon::Triple; use target_lexicon::Triple;
#[cfg(feature = "llvm")]
use roc_cli::build;
#[cfg(not(feature = "llvm"))]
fn build(_target: &Triple, _matches: &clap::ArgMatches, _config: BuildConfig) -> io::Result<i32> {
panic!("Building without LLVM is not currently supported.");
}
fn main() -> io::Result<()> { fn main() -> io::Result<()> {
let matches = build_app().get_matches(); let matches = build_app().get_matches();
let exit_code = match matches.subcommand_name() { let exit_code = match matches.subcommand_name() {
None => { None => {
roc_editor::launch(&[])?; launch_editor(&[])?;
// rustc couldn't infer the error type here // rustc couldn't infer the error type here
Result::<i32, io::Error>::Ok(0) Result::<i32, io::Error>::Ok(0)
@ -44,14 +53,14 @@ fn main() -> io::Result<()> {
.values_of_os(DIRECTORY_OR_FILES) .values_of_os(DIRECTORY_OR_FILES)
{ {
None => { None => {
roc_editor::launch(&[])?; launch_editor(&[])?;
} }
Some(values) => { Some(values) => {
let paths = values let paths = values
.map(|os_str| Path::new(os_str)) .map(|os_str| Path::new(os_str))
.collect::<Vec<&Path>>(); .collect::<Vec<&Path>>();
roc_editor::launch(&paths)?; launch_editor(&paths)?;
} }
} }
@ -65,11 +74,16 @@ fn main() -> io::Result<()> {
.values_of_os(DIRECTORY_OR_FILES) .values_of_os(DIRECTORY_OR_FILES)
.unwrap(); .unwrap();
let paths = values let mut roc_files = Vec::new();
.map(|os_str| Path::new(os_str).to_path_buf())
.collect::<Vec<PathBuf>>();
docs(paths); // Populate roc_files
for os_str in values {
let metadata = fs::metadata(os_str)?;
roc_files_recursive(os_str, metadata.file_type(), &mut roc_files)?;
}
docs(roc_files);
Ok(0) Ok(0)
} }
@ -78,3 +92,31 @@ fn main() -> io::Result<()> {
std::process::exit(exit_code); std::process::exit(exit_code);
} }
fn roc_files_recursive<P: AsRef<Path>>(
path: P,
file_type: FileType,
roc_files: &mut Vec<PathBuf>,
) -> io::Result<()> {
if file_type.is_dir() {
for entry_res in fs::read_dir(path)? {
let entry = entry_res?;
roc_files_recursive(entry.path(), entry.file_type()?, roc_files)?;
}
} else {
roc_files.push(path.as_ref().to_path_buf());
}
Ok(())
}
#[cfg(feature = "editor")]
fn launch_editor(filepaths: &[&Path]) -> io::Result<()> {
roc_editor::launch(filepaths)
}
#[cfg(not(feature = "editor"))]
fn launch_editor(_filepaths: &[&Path]) -> io::Result<()> {
panic!("Cannot launch the editor because this build of roc did not include `feature = \"editor\"`!");
}

View file

@ -1,15 +1,12 @@
use const_format::concatcp; use const_format::concatcp;
#[cfg(feature = "llvm")]
use gen::{gen_and_eval, ReplOutput}; use gen::{gen_and_eval, ReplOutput};
use roc_gen::llvm::build::OptLevel;
use roc_parse::parser::{EExpr, SyntaxError}; use roc_parse::parser::{EExpr, SyntaxError};
use rustyline::error::ReadlineError;
use rustyline::highlight::{Highlighter, PromptInfo}; use rustyline::highlight::{Highlighter, PromptInfo};
use rustyline::validate::{self, ValidationContext, ValidationResult, Validator}; use rustyline::validate::{self, ValidationContext, ValidationResult, Validator};
use rustyline::Editor;
use rustyline_derive::{Completer, Helper, Hinter}; use rustyline_derive::{Completer, Helper, Hinter};
use std::borrow::Cow; use std::borrow::Cow;
use std::io; use std::io;
use target_lexicon::Triple;
const BLUE: &str = "\u{001b}[36m"; const BLUE: &str = "\u{001b}[36m";
const PINK: &str = "\u{001b}[35m"; const PINK: &str = "\u{001b}[35m";
@ -30,7 +27,9 @@ pub const INSTRUCTIONS: &str = "Enter an expression, or :help, or :exit/:q.\n";
pub const PROMPT: &str = concatcp!("\n", BLUE, "»", END_COL, " "); pub const PROMPT: &str = concatcp!("\n", BLUE, "»", END_COL, " ");
pub const CONT_PROMPT: &str = concatcp!(BLUE, "", END_COL, " "); pub const CONT_PROMPT: &str = concatcp!(BLUE, "", END_COL, " ");
#[cfg(feature = "llvm")]
mod eval; mod eval;
#[cfg(feature = "llvm")]
mod gen; mod gen;
#[derive(Completer, Helper, Hinter)] #[derive(Completer, Helper, Hinter)]
@ -107,7 +106,16 @@ impl Validator for InputValidator {
} }
} }
#[cfg(not(feature = "llvm"))]
pub fn main() -> io::Result<()> { pub fn main() -> io::Result<()> {
panic!("The REPL currently requires being built with LLVM.");
}
#[cfg(feature = "llvm")]
pub fn main() -> io::Result<()> {
use rustyline::error::ReadlineError;
use rustyline::Editor;
// To debug rustyline: // To debug rustyline:
// <UNCOMMENT> env_logger::init(); // <UNCOMMENT> env_logger::init();
// <RUN WITH:> RUST_LOG=rustyline=debug cargo run repl 2> debug.log // <RUN WITH:> RUST_LOG=rustyline=debug cargo run repl 2> debug.log
@ -226,7 +234,11 @@ fn report_parse_error(fail: SyntaxError) {
println!("TODO Gracefully report parse error in repl: {:?}", fail); println!("TODO Gracefully report parse error in repl: {:?}", fail);
} }
#[cfg(feature = "llvm")]
fn eval_and_format<'a>(src: &str) -> Result<String, SyntaxError<'a>> { fn eval_and_format<'a>(src: &str) -> Result<String, SyntaxError<'a>> {
use roc_mono::ir::OptLevel;
use target_lexicon::Triple;
gen_and_eval(src.as_bytes(), Triple::host(), OptLevel::Normal).map(|output| match output { gen_and_eval(src.as_bytes(), Triple::host(), OptLevel::Normal).map(|output| match output {
ReplOutput::NoProblems { expr, expr_type } => { ReplOutput::NoProblems { expr, expr_type } => {
format!("\n{} {}:{} {}", expr, PINK, END_COL, expr_type) format!("\n{} {}:{} {}", expr, PINK, END_COL, expr_type)

View file

@ -2,7 +2,7 @@ use bumpalo::collections::Vec;
use bumpalo::Bump; use bumpalo::Bump;
use libloading::Library; use libloading::Library;
use roc_collections::all::MutMap; use roc_collections::all::MutMap;
use roc_gen::{run_jit_function, run_jit_function_dynamic_type}; use roc_gen_llvm::{run_jit_function, run_jit_function_dynamic_type};
use roc_module::ident::{Lowercase, TagName}; use roc_module::ident::{Lowercase, TagName};
use roc_module::operator::CalledVia; use roc_module::operator::CalledVia;
use roc_module::symbol::{Interns, ModuleId, Symbol}; use roc_module::symbol::{Interns, ModuleId, Symbol};

View file

@ -8,9 +8,9 @@ use roc_can::builtins::builtin_defs_map;
use roc_collections::all::{MutMap, MutSet}; use roc_collections::all::{MutMap, MutSet};
use roc_fmt::annotation::Formattable; use roc_fmt::annotation::Formattable;
use roc_fmt::annotation::{Newlines, Parens}; use roc_fmt::annotation::{Newlines, Parens};
use roc_gen::llvm::build::OptLevel; use roc_gen_llvm::llvm::externs::add_default_roc_externs;
use roc_gen::llvm::externs::add_default_roc_externs;
use roc_load::file::LoadingProblem; use roc_load::file::LoadingProblem;
use roc_mono::ir::OptLevel;
use roc_parse::parser::SyntaxError; use roc_parse::parser::SyntaxError;
use roc_types::pretty_print::{content_to_string, name_all_type_vars}; use roc_types::pretty_print::{content_to_string, name_all_type_vars};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -130,7 +130,9 @@ pub fn gen_and_eval<'a>(
let context = Context::create(); let context = Context::create();
let builder = context.create_builder(); let builder = context.create_builder();
let ptr_bytes = target.pointer_width().unwrap().bytes() as u32; let ptr_bytes = target.pointer_width().unwrap().bytes() as u32;
let module = arena.alloc(roc_gen::llvm::build::module_from_builtins(&context, "")); let module = arena.alloc(roc_gen_llvm::llvm::build::module_from_builtins(
&context, "",
));
// Add roc_alloc, roc_realloc, and roc_dealloc, since the repl has no // Add roc_alloc, roc_realloc, and roc_dealloc, since the repl has no
// platform to provide them. // platform to provide them.
@ -166,12 +168,12 @@ pub fn gen_and_eval<'a>(
let module = arena.alloc(module); let module = arena.alloc(module);
let (module_pass, function_pass) = let (module_pass, function_pass) =
roc_gen::llvm::build::construct_optimization_passes(module, opt_level); roc_gen_llvm::llvm::build::construct_optimization_passes(module, opt_level);
let (dibuilder, compile_unit) = roc_gen::llvm::build::Env::new_debug_info(module); let (dibuilder, compile_unit) = roc_gen_llvm::llvm::build::Env::new_debug_info(module);
// Compile and add all the Procs before adding main // Compile and add all the Procs before adding main
let env = roc_gen::llvm::build::Env { let env = roc_gen_llvm::llvm::build::Env {
arena: &arena, arena: &arena,
builder: &builder, builder: &builder,
dibuilder: &dibuilder, dibuilder: &dibuilder,
@ -185,7 +187,7 @@ pub fn gen_and_eval<'a>(
exposed_to_host: MutSet::default(), exposed_to_host: MutSet::default(),
}; };
let (main_fn_name, main_fn) = roc_gen::llvm::build::build_procedures_return_main( let (main_fn_name, main_fn) = roc_gen_llvm::llvm::build::build_procedures_return_main(
&env, &env,
opt_level, opt_level,
procedures, procedures,

View file

@ -7,7 +7,7 @@ extern crate indoc;
#[cfg(test)] #[cfg(test)]
mod repl_eval { mod repl_eval {
use cli_utils::helpers; use cli_utils::helpers;
use roc_gen::run_roc::RocCallResult; use roc_gen_llvm::run_roc::RocCallResult;
#[test] #[test]
fn check_discriminant_size() { fn check_discriminant_size() {
@ -16,7 +16,8 @@ mod repl_eval {
let value: i64 = 1234; let value: i64 = 1234;
assert_eq!( assert_eq!(
std::mem::size_of_val(&RocCallResult::Success(value)), std::mem::size_of_val(&RocCallResult::Success(value)),
roc_gen::run_roc::ROC_CALL_RESULT_DISCRIMINANT_SIZE + std::mem::size_of_val(&value) roc_gen_llvm::run_roc::ROC_CALL_RESULT_DISCRIMINANT_SIZE
+ std::mem::size_of_val(&value)
) )
} }

View file

@ -19,7 +19,7 @@ roc_unify = { path = "../unify" }
roc_solve = { path = "../solve" } roc_solve = { path = "../solve" }
roc_mono = { path = "../mono" } roc_mono = { path = "../mono" }
roc_load = { path = "../load" } roc_load = { path = "../load" }
roc_gen = { path = "../gen" } roc_gen_llvm = { path = "../gen_llvm", optional = true }
roc_reporting = { path = "../reporting" } roc_reporting = { path = "../reporting" }
im = "14" # im and im-rc should always have the same version! im = "14" # im and im-rc should always have the same version!
im-rc = "14" # im and im-rc should always have the same version! im-rc = "14" # im and im-rc should always have the same version!
@ -28,7 +28,7 @@ inlinable_string = "0.1.0"
libloading = "0.6" libloading = "0.6"
tempfile = "3.1.0" tempfile = "3.1.0"
serde_json = "1.0" serde_json = "1.0"
inkwell = { path = "../../vendor/inkwell" } inkwell = { path = "../../vendor/inkwell", optional = true }
target-lexicon = "0.10" target-lexicon = "0.10"
[dev-dependencies] [dev-dependencies]
@ -39,6 +39,10 @@ quickcheck = "0.8"
quickcheck_macros = "0.8" quickcheck_macros = "0.8"
[features] [features]
default = ["llvm"]
target-arm = [] target-arm = []
target-aarch64 = [] target-aarch64 = []
target-webassembly = [] target-webassembly = []
# This is a separate feature because when we generate docs on Netlify,
# it doesn't have LLVM installed. (Also, it doesn't need to do code gen.)
llvm = ["inkwell", "roc_gen_llvm"]

View file

@ -1,21 +1,21 @@
use crate::target;
use crate::target::arch_str; use crate::target::arch_str;
use inkwell::module::Module; #[cfg(feature = "llvm")]
use inkwell::targets::{CodeModel, FileType, RelocMode};
use libloading::{Error, Library}; use libloading::{Error, Library};
use roc_gen::llvm::build::OptLevel; #[cfg(feature = "llvm")]
use roc_mono::ir::OptLevel;
use std::collections::HashMap; use std::collections::HashMap;
use std::env; use std::env;
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::{Child, Command, Output}; use std::process::{Child, Command, Output};
use target_lexicon::{Architecture, OperatingSystem, Triple}; use target_lexicon::{Architecture, OperatingSystem, Triple};
use tempfile::tempdir;
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum LinkType { pub enum LinkType {
Executable, // These numbers correspond to the --lib flag; if it's present
Dylib, // (e.g. is_present returns `1 as bool`), this will be 1 as well.
Executable = 0,
Dylib = 1,
} }
/// input_paths can include the host as well as the app. e.g. &["host.o", "roc_app.o"] /// input_paths can include the host as well as the app. e.g. &["host.o", "roc_app.o"]
@ -360,6 +360,9 @@ fn link_linux(
}; };
let env_path = env::var("PATH").unwrap_or_else(|_| "".to_string()); let env_path = env::var("PATH").unwrap_or_else(|_| "".to_string());
init_arch(target);
// NOTE: order of arguments to `ld` matters here! // NOTE: order of arguments to `ld` matters here!
// The `-l` flags should go after the `.o` arguments // The `-l` flags should go after the `.o` arguments
Ok(( Ok((
@ -477,12 +480,16 @@ fn link_macos(
)) ))
} }
#[cfg(feature = "llvm")]
pub fn module_to_dylib( pub fn module_to_dylib(
module: &Module, module: &inkwell::module::Module,
target: &Triple, target: &Triple,
opt_level: OptLevel, opt_level: OptLevel,
) -> Result<Library, Error> { ) -> Result<Library, Error> {
let dir = tempdir().unwrap(); use crate::target::{self, convert_opt_level};
use inkwell::targets::{CodeModel, FileType, RelocMode};
let dir = tempfile::tempdir().unwrap();
let filename = PathBuf::from("Test.roc"); let filename = PathBuf::from("Test.roc");
let file_path = dir.path().join(filename); let file_path = dir.path().join(filename);
let mut app_o_file = file_path; let mut app_o_file = file_path;
@ -492,7 +499,8 @@ pub fn module_to_dylib(
// Emit the .o file using position-indepedent code (PIC) - needed for dylibs // Emit the .o file using position-indepedent code (PIC) - needed for dylibs
let reloc = RelocMode::PIC; let reloc = RelocMode::PIC;
let model = CodeModel::Default; let model = CodeModel::Default;
let target_machine = target::target_machine(target, opt_level.into(), reloc, model).unwrap(); let target_machine =
target::target_machine(target, convert_opt_level(opt_level), reloc, model).unwrap();
target_machine target_machine
.write_to_file(module, FileType::Object, &app_o_file) .write_to_file(module, FileType::Object, &app_o_file)
@ -529,3 +537,13 @@ fn validate_output(file_name: &str, cmd_name: &str, output: Output) {
} }
} }
} }
#[cfg(feature = "llvm")]
fn init_arch(target: &Triple) {
crate::target::init_arch(target);
}
#[cfg(not(feature = "llvm"))]
fn init_arch(_target: &Triple) {
panic!("Tried to initialize LLVM when crate was not built with `feature = \"llvm\"` enabled");
}

View file

@ -1,13 +1,14 @@
use crate::target; #[cfg(feature = "llvm")]
use bumpalo::Bump; use roc_gen_llvm::llvm::build::module_from_builtins;
use inkwell::context::Context; #[cfg(feature = "llvm")]
use inkwell::targets::{CodeModel, FileType, RelocMode}; pub use roc_gen_llvm::llvm::build::FunctionIterator;
pub use roc_gen::llvm::build::FunctionIterator; #[cfg(feature = "llvm")]
use roc_gen::llvm::build::{module_from_builtins, OptLevel};
use roc_load::file::MonomorphizedModule; use roc_load::file::MonomorphizedModule;
#[cfg(feature = "llvm")]
use roc_mono::ir::OptLevel;
#[cfg(feature = "llvm")]
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime}; use std::time::Duration;
use target_lexicon::Triple;
#[derive(Debug, Clone, Copy, Default)] #[derive(Debug, Clone, Copy, Default)]
pub struct CodeGenTiming { pub struct CodeGenTiming {
@ -18,16 +19,24 @@ pub struct CodeGenTiming {
// TODO how should imported modules factor into this? What if those use builtins too? // TODO how should imported modules factor into this? What if those use builtins too?
// TODO this should probably use more helper functions // TODO this should probably use more helper functions
// TODO make this polymorphic in the llvm functions so it can be reused for another backend. // TODO make this polymorphic in the llvm functions so it can be reused for another backend.
#[cfg(feature = "llvm")]
#[allow(clippy::cognitive_complexity)] #[allow(clippy::cognitive_complexity)]
pub fn gen_from_mono_module( pub fn gen_from_mono_module(
arena: &Bump, arena: &bumpalo::Bump,
mut loaded: MonomorphizedModule, mut loaded: MonomorphizedModule,
roc_file_path: &Path, roc_file_path: &Path,
target: Triple, target: target_lexicon::Triple,
app_o_file: &Path, app_o_file: &Path,
opt_level: OptLevel, opt_level: OptLevel,
emit_debug_info: bool, emit_debug_info: bool,
) -> CodeGenTiming { ) -> CodeGenTiming {
use crate::target::{self, convert_opt_level};
use inkwell::attributes::{Attribute, AttributeLoc};
use inkwell::context::Context;
use inkwell::module::Linkage;
use inkwell::targets::{CodeModel, FileType, RelocMode};
use std::time::SystemTime;
use roc_reporting::report::{ use roc_reporting::report::{
can_problem, mono_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE, can_problem, mono_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE,
}; };
@ -87,9 +96,6 @@ pub fn gen_from_mono_module(
// module.strip_debug_info(); // module.strip_debug_info();
// mark our zig-defined builtins as internal // mark our zig-defined builtins as internal
use inkwell::attributes::{Attribute, AttributeLoc};
use inkwell::module::Linkage;
let app_ll_file = { let app_ll_file = {
let mut temp = PathBuf::from(roc_file_path); let mut temp = PathBuf::from(roc_file_path);
temp.set_extension("ll"); temp.set_extension("ll");
@ -119,12 +125,12 @@ pub fn gen_from_mono_module(
} }
let builder = context.create_builder(); let builder = context.create_builder();
let (dibuilder, compile_unit) = roc_gen::llvm::build::Env::new_debug_info(module); let (dibuilder, compile_unit) = roc_gen_llvm::llvm::build::Env::new_debug_info(module);
let (mpm, _fpm) = roc_gen::llvm::build::construct_optimization_passes(module, opt_level); let (mpm, _fpm) = roc_gen_llvm::llvm::build::construct_optimization_passes(module, opt_level);
// Compile and add all the Procs before adding main // Compile and add all the Procs before adding main
let ptr_bytes = target.pointer_width().unwrap().bytes() as u32; let ptr_bytes = target.pointer_width().unwrap().bytes() as u32;
let env = roc_gen::llvm::build::Env { let env = roc_gen_llvm::llvm::build::Env {
arena: &arena, arena: &arena,
builder: &builder, builder: &builder,
dibuilder: &dibuilder, dibuilder: &dibuilder,
@ -137,7 +143,7 @@ pub fn gen_from_mono_module(
exposed_to_host: loaded.exposed_to_host.keys().copied().collect(), exposed_to_host: loaded.exposed_to_host.keys().copied().collect(),
}; };
roc_gen::llvm::build::build_procedures(&env, opt_level, loaded.procedures); roc_gen_llvm::llvm::build::build_procedures(&env, opt_level, loaded.procedures);
env.dibuilder.finalize(); env.dibuilder.finalize();
@ -226,7 +232,7 @@ pub fn gen_from_mono_module(
let reloc = RelocMode::Default; let reloc = RelocMode::Default;
let model = CodeModel::Default; let model = CodeModel::Default;
let target_machine = let target_machine =
target::target_machine(&target, opt_level.into(), reloc, model).unwrap(); target::target_machine(&target, convert_opt_level(opt_level), reloc, model).unwrap();
target_machine target_machine
.write_to_file(&env.module, FileType::Object, &app_o_file) .write_to_file(&env.module, FileType::Object, &app_o_file)

View file

@ -1,7 +1,10 @@
use inkwell::targets::{ #[cfg(feature = "llvm")]
CodeModel, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple, use inkwell::{
targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple},
OptimizationLevel,
}; };
use inkwell::OptimizationLevel; #[cfg(feature = "llvm")]
use roc_mono::ir::OptLevel;
use target_lexicon::{Architecture, OperatingSystem, Triple}; use target_lexicon::{Architecture, OperatingSystem, Triple};
pub fn target_triple_str(target: &Triple) -> &'static str { pub fn target_triple_str(target: &Triple) -> &'static str {
@ -28,36 +31,20 @@ pub fn target_triple_str(target: &Triple) -> &'static str {
} }
} }
/// NOTE: arch_str is *not* the same as the beginning of the magic target triple #[cfg(feature = "llvm")]
/// string! For example, if it's "x86-64" here, the magic target triple string pub fn init_arch(target: &Triple) {
/// will begin with "x86_64" (with an underscore) instead.
pub fn arch_str(target: &Triple) -> &'static str {
// Best guide I've found on how to determine these magic strings:
//
// https://stackoverflow.com/questions/15036909/clang-how-to-list-supported-target-architectures
match target.architecture { match target.architecture {
Architecture::X86_64 => { Architecture::X86_64 => {
Target::initialize_x86(&InitializationConfig::default()); Target::initialize_x86(&InitializationConfig::default());
"x86-64"
} }
Architecture::Aarch64(_) if cfg!(feature = "target-aarch64") => { Architecture::Aarch64(_) if cfg!(feature = "target-aarch64") => {
Target::initialize_aarch64(&InitializationConfig::default()); Target::initialize_aarch64(&InitializationConfig::default());
"aarch64"
} }
Architecture::Arm(_) if cfg!(feature = "target-arm") => { Architecture::Arm(_) if cfg!(feature = "target-arm") => {
// NOTE: why not enable arm and wasm by default?
//
// We had some trouble getting them to link properly. This may be resolved in the
// future, or maybe it was just some weird configuration on one machine.
Target::initialize_arm(&InitializationConfig::default()); Target::initialize_arm(&InitializationConfig::default());
"arm"
} }
Architecture::Wasm32 if cfg!(feature = "target-webassembly") => { Architecture::Wasm32 if cfg!(feature = "target-webassembly") => {
Target::initialize_webassembly(&InitializationConfig::default()); Target::initialize_webassembly(&InitializationConfig::default());
"wasm32"
} }
_ => panic!( _ => panic!(
"TODO gracefully handle unsupported target architecture: {:?}", "TODO gracefully handle unsupported target architecture: {:?}",
@ -66,6 +53,26 @@ pub fn arch_str(target: &Triple) -> &'static str {
} }
} }
/// NOTE: arch_str is *not* the same as the beginning of the magic target triple
/// string! For example, if it's "x86-64" here, the magic target triple string
/// will begin with "x86_64" (with an underscore) instead.
pub fn arch_str(target: &Triple) -> &'static str {
// Best guide I've found on how to determine these magic strings:
//
// https://stackoverflow.com/questions/15036909/clang-how-to-list-supported-target-architectures
match target.architecture {
Architecture::X86_64 => "x86-64",
Architecture::Aarch64(_) if cfg!(feature = "target-aarch64") => "aarch64",
Architecture::Arm(_) if cfg!(feature = "target-arm") => "arm",
Architecture::Wasm32 if cfg!(feature = "target-webassembly") => "wasm32",
_ => panic!(
"TODO gracefully handle unsupported target architecture: {:?}",
target.architecture
),
}
}
#[cfg(feature = "llvm")]
pub fn target_machine( pub fn target_machine(
target: &Triple, target: &Triple,
opt: OptimizationLevel, opt: OptimizationLevel,
@ -74,6 +81,8 @@ pub fn target_machine(
) -> Option<TargetMachine> { ) -> Option<TargetMachine> {
let arch = arch_str(target); let arch = arch_str(target);
init_arch(target);
Target::from_name(arch).unwrap().create_target_machine( Target::from_name(arch).unwrap().create_target_machine(
&TargetTriple::create(target_triple_str(target)), &TargetTriple::create(target_triple_str(target)),
"generic", "generic",
@ -83,3 +92,11 @@ pub fn target_machine(
model, model,
) )
} }
#[cfg(feature = "llvm")]
pub fn convert_opt_level(level: OptLevel) -> OptimizationLevel {
match level {
OptLevel::Normal => OptimizationLevel::None,
OptLevel::Optimize => OptimizationLevel::Aggressive,
}
}

View file

@ -9,15 +9,15 @@ Towards the bottom of `symbol.rs` there is a `define_builtins!` macro being used
Some of these have `#` inside their name (`first#list`, `#lt` ..). This is a trick we are doing to hide implementation details from Roc programmers. To a Roc programmer, a name with `#` in it is invalid, because `#` means everything after it is parsed to a comment. We are constructing these functions manually, so we are circumventing the parsing step and dont have such restrictions. We get to make functions and values with `#` which as a consequence are not accessible to Roc programmers. Roc programmers simply cannot reference them. Some of these have `#` inside their name (`first#list`, `#lt` ..). This is a trick we are doing to hide implementation details from Roc programmers. To a Roc programmer, a name with `#` in it is invalid, because `#` means everything after it is parsed to a comment. We are constructing these functions manually, so we are circumventing the parsing step and dont have such restrictions. We get to make functions and values with `#` which as a consequence are not accessible to Roc programmers. Roc programmers simply cannot reference them.
But we can use these values and some of these are necessary for implementing builtins. For example, `List.get` returns tags, and it is not easy for us to create tags when composing LLVM. What is easier however, is: But we can use these values and some of these are necessary for implementing builtins. For example, `List.get` returns tags, and it is not easy for us to create tags when composing LLVM. What is easier however, is:
- ..writing `List.#getUnsafe` that has the dangerous signature of `List elem, Int -> elem` in LLVM - ..writing `List.#getUnsafe` that has the dangerous signature of `List elem, Nat -> elem` in LLVM
- ..writing `List elem, Int -> Result elem [ OutOfBounds ]*` in a type safe way that uses `getUnsafe` internally, only after it checks if the `elem` at `Int` index exists. - ..writing `List elem, Nat -> Result elem [ OutOfBounds ]*` in a type safe way that uses `getUnsafe` internally, only after it checks if the `elem` at `Nat` index exists.
### can/src/builtins.rs ### can/src/builtins.rs
Right at the top of this module is a function called `builtin_defs`. All this is doing is mapping the `Symbol` defined in `module/src/symbol.rs` to its implementation. Some of the builtins are quite complex, such as `list_get`. What makes `list_get` is that it returns tags, and in order to return tags it first has to defer to lower-level functions via an if statement. Right at the top of this module is a function called `builtin_defs`. All this is doing is mapping the `Symbol` defined in `module/src/symbol.rs` to its implementation. Some of the builtins are quite complex, such as `list_get`. What makes `list_get` is that it returns tags, and in order to return tags it first has to defer to lower-level functions via an if statement.
Lets look at `List.repeat : elem, Int -> List elem`, which is more straight-forward, and points directly to its lower level implementation: Lets look at `List.repeat : elem, Nat -> List elem`, which is more straight-forward, and points directly to its lower level implementation:
``` ```
fn list_repeat(symbol: Symbol, var_store: &mut VarStore) -> Def { fn list_repeat(symbol: Symbol, var_store: &mut VarStore) -> Def {
let elem_var = var_store.fresh(); let elem_var = var_store.fresh();
@ -42,7 +42,7 @@ fn list_repeat(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
``` ```
In these builtin definitions you will need to allocate for and list the arguments. For `List.repeat`, the arguments are the `elem_var` and the `len_var`. So in both the `body` and `defn` we list these arguments in a vector, with the `Symobl::ARG_1` adn` Symvol::ARG_2` designating which argument is which. In these builtin definitions you will need to allocate for and list the arguments. For `List.repeat`, the arguments are the `elem_var` and the `len_var`. So in both the `body` and `defn` we list these arguments in a vector, with the `Symobl::ARG_1` and` Symvol::ARG_2` designating which argument is which.
Since `List.repeat` is implemented entirely as low level functions, its `body` is a `RunLowLevel`, and the `op` is `LowLevel::ListRepeat`. Lets talk about `LowLevel` in the next section. Since `List.repeat` is implemented entirely as low level functions, its `body` is a `RunLowLevel`, and the `op` is `LowLevel::ListRepeat`. Lets talk about `LowLevel` in the next section.
@ -60,7 +60,7 @@ Its one thing to actually write these functions, its _another_ thing to let the
## Specifying how we pass args to the function ## Specifying how we pass args to the function
### builtins/mono/src/borrow.rs ### builtins/mono/src/borrow.rs
After we have all of this, we need to specify if the arguements we're passing are owned, borrowed or irrelvant. Towards the bottom of this file, add a new case for you builtin and specify each arg. Be sure to read the comment, as it explains this in more detail. After we have all of this, we need to specify if the arguments we're passing are owned, borrowed or irrelvant. Towards the bottom of this file, add a new case for you builtin and specify each arg. Be sure to read the comment, as it explains this in more detail.
## Specifying the uniqueness of a function ## Specifying the uniqueness of a function
### builtins/src/unique.rs ### builtins/src/unique.rs

View file

@ -3,10 +3,10 @@
## Adding a bitcode builtin ## Adding a bitcode builtin
To add a builtin: To add a builtin:
1. Add the function to the relevent module. For `Num` builtin use it in `src/num.zig`, for `Str` builtins use `src/str.zig`, and so on. **For anything you add, you must add tests for it!** Not only does to make the builtins more maintainable, it's the the easiest way to test these functions on Zig. To run the test, run: `zig build test` 1. Add the function to the relevant module. For `Num` builtin use it in `src/num.zig`, for `Str` builtins use `src/str.zig`, and so on. **For anything you add, you must add tests for it!** Not only does to make the builtins more maintainable, it's the the easiest way to test these functions on Zig. To run the test, run: `zig build test`
2. Make sure the function is public with the `pub` keyword and uses the C calling convention. This is really easy, just add `pub` and `callconv(.C)` to the function declaration like so: `pub fn atan(num: f64) callconv(.C) f64 { ... }` 2. Make sure the function is public with the `pub` keyword and uses the C calling convention. This is really easy, just add `pub` and `callconv(.C)` to the function declaration like so: `pub fn atan(num: f64) callconv(.C) f64 { ... }`
3. In `src/main.zig`, export the function. This is also organized by module. For example, for a `Num` function find the `Num` section and add: `comptime { exportNumFn(num.atan, "atan"); }`. The first arguement is the function, the second is the name of it in LLVM. 3. In `src/main.zig`, export the function. This is also organized by module. For example, for a `Num` function find the `Num` section and add: `comptime { exportNumFn(num.atan, "atan"); }`. The first argument is the function, the second is the name of it in LLVM.
4. In `compiler/builtins/src/bitcode.rs`, add a constant for the new function. This is how we use it in Rust. Once again, this is organized by module, so just find the relevent area and add your new function. 4. In `compiler/builtins/src/bitcode.rs`, add a constant for the new function. This is how we use it in Rust. Once again, this is organized by module, so just find the relevant area and add your new function.
5. You can now your function in Rust using `call_bitcode_fn` in `llvm/src/build.rs`! 5. You can now your function in Rust using `call_bitcode_fn` in `llvm/src/build.rs`!
## How it works ## How it works
@ -32,4 +32,4 @@ There will be two directories like `roc_builtins-[some random characters]`, look
## Calling bitcode functions ## Calling bitcode functions
use the `call_bitcode_fn` function defined in `llvm/src/build.rs` to call bitcode funcitons. use the `call_bitcode_fn` function defined in `llvm/src/build.rs` to call bitcode functions.

View file

@ -6,4 +6,4 @@ set -euxo pipefail
zig build test zig build test
# fmt every zig # fmt every zig
find src/*.zig -type f -print0 | xargs -n 1 -0 zig fmt --check find src/*.zig -type f -print0 | xargs -n 1 -0 zig fmt --check || (echo "zig fmt --check FAILED! Check the previuous lines to see which files were improperly formatted." && exit 1)

View file

@ -176,7 +176,7 @@ fn mul_and_decimalize(a: u128, b: u128) i128 {
// floor(2^315/10^18) is 66749594872528440074844428317798503581334516323645399060845050244444366430645 // floor(2^315/10^18) is 66749594872528440074844428317798503581334516323645399060845050244444366430645
// Add 1. // Add 1.
// This can't overflow because the intial numbers are only 127bit due to removing the sign bit. // This can't overflow because the initial numbers are only 127bit due to removing the sign bit.
var overflowed = @addWithOverflow(u128, lhs_lo, 1, &lhs_lo); var overflowed = @addWithOverflow(u128, lhs_lo, 1, &lhs_lo);
lhs_hi = blk: { lhs_hi = blk: {
if (overflowed) { if (overflowed) {

View file

@ -9,6 +9,20 @@ use std::str;
fn main() { fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = env::var_os("OUT_DIR").unwrap();
let dest_obj_path = Path::new(&out_dir).join("builtins.o");
let dest_obj = dest_obj_path.to_str().expect("Invalid dest object path");
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rustc-env=BUILTINS_O={}", dest_obj);
// When we build on Netlify, zig is not installed (but also not used,
// since all we're doing is generating docs), so we can skip the steps
// that require having zig installed.
if env::var_os("NO_ZIG_INSTALLED").is_some() {
// We still need to do the other things before this point, because
// setting the env vars is needed for other parts of the build.
return;
}
let big_sur_path = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/lib"; let big_sur_path = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/lib";
let use_build_script = Path::new(big_sur_path).exists(); let use_build_script = Path::new(big_sur_path).exists();
@ -34,8 +48,6 @@ fn main() {
run_command(&bitcode_path, "zig", &["build", "ir", "-Drelease=true"]); run_command(&bitcode_path, "zig", &["build", "ir", "-Drelease=true"]);
} }
let dest_obj_path = Path::new(&out_dir).join("builtins.o");
let dest_obj = dest_obj_path.to_str().expect("Invalid dest object path");
println!("Moving zig object to: {}", dest_obj); println!("Moving zig object to: {}", dest_obj);
run_command(&bitcode_path, "mv", &[src_obj, dest_obj]); run_command(&bitcode_path, "mv", &[src_obj, dest_obj]);
@ -50,8 +62,6 @@ fn main() {
&[dest_ir, "-o", dest_bc], &[dest_ir, "-o", dest_bc],
); );
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rustc-env=BUILTINS_O={}", dest_obj);
get_zig_files(bitcode_path.as_path(), &|path| { get_zig_files(bitcode_path.as_path(), &|path| {
let path: &Path = path; let path: &Path = path;
println!( println!(

View file

@ -55,36 +55,36 @@ and : Bool, Bool -> Bool
## ##
## In some languages, `&&` and `||` are special-cased in the compiler to skip ## In some languages, `&&` and `||` are special-cased in the compiler to skip
## evaluating the expression after the operator under certain circumstances. ## evaluating the expression after the operator under certain circumstances.
## # In Roc, this is not the case. See the performance notes for #Bool.and for details. ## # In Roc, this is not the case. See the performance notes for [Bool.and] for details.
or : Bool, Bool -> Bool or : Bool, Bool -> Bool
## Exclusive or ## Exclusive or
xor : Bool, Bool -> Bool xor : Bool, Bool -> Bool
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `isEq : 'val, 'val -> Bool`
## Returns `True` if the two values are *structurally equal*, and `False` otherwise. ## Returns `True` if the two values are *structurally equal*, and `False` otherwise.
## ##
## `a == b` is shorthand for `Bool.isEq a b` ## `a == b` is shorthand for `Bool.isEq a b`
## ##
## Structural equality works as follows: ## Structural equality works as follows:
## ##
## 1. #Int and #Float values are equal if their numbers are equal. ## 1. Global tags are equal if they are the same tag, and also their contents (if any) are equal.
## 2. Records are equal if all their fields are equal. ## 2. Private tags are equal if they are the same tag, in the same module, and also their contents (if any) are equal.
## 3. Global tags are equal if they are the same tag, and also their contents (if any) are equal. ## 3. Records are equal if all their fields are equal.
## 4. Private tags are equal if they are the same tag, in the same module, and also their contents (if any) are equal. ## 4. Collections ([Str], [List], [Dict], and [Set]) are equal if they are the same length, and also all their corresponding elements are equal.
## 5. Collections (#String, #List, #Map, #Set, and #Bytes) are equal if they are the same length, and also all their corresponding elements are equal. ## 5. [Num] values are equal if their numbers are equal, with one exception: if both arguments to `isEq` are *NaN*, then `isEq` returns `False`. See [Num.isNaN] for more about *NaN*.
## ##
## Note that `isEq` takes `'val` instead of `val`, which means `isEq` does not ## Note that `isEq` takes `'val` instead of `val`, which means `isEq` does not
## accept arguments whose types contain functions. ## accept arguments whose types contain functions.
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `isEq : 'val, 'val -> Bool`
isEq : val, val -> Bool isEq : val, val -> Bool
## Calls #eq on the given values, then calls #not on the result. # TODO: removed `'` from signature because parser does not support it yet
# Original signature: `isNotEq : 'val, 'val -> Bool`
## Calls [isEq] on the given values, then calls [not] on the result.
## ##
## `a != b` is shorthand for `Bool.isNotEq a b` ## `a != b` is shorthand for `Bool.isNotEq a b`
## ##
## Note that `isNotEq` takes `'val` instead of `val`, which means `isNotEq` does not ## Note that `isNotEq` takes `'val` instead of `val`, which means `isNotEq` does not
## accept arguments whose types contain functions. ## accept arguments whose types contain functions.
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `isNotEq : 'val, 'val -> Bool`
isNotEq : val, val -> Bool isNotEq : val, val -> Bool

View file

@ -1,7 +0,0 @@
interface Defaults
exposes []
imports [
Dict.{ Dict },
Set.{ Set },
Num.{ Num, Int, Float }
]

View file

@ -19,3 +19,15 @@ map :
Dict beforeKey beforeValue, Dict beforeKey beforeValue,
({ key: beforeKey, value: beforeValue } -> { key: afterKey, value: afterValue }) ({ key: beforeKey, value: beforeValue } -> { key: afterKey, value: afterValue })
-> Dict afterKey afterValue -> Dict afterKey afterValue
# DESIGN NOTES: The reason for panicking when given NaN is that:
# * If we allowed NaN in, Dict.insert would no longer be idempotent.
# * If we allowed NaN but overrode its semantics to make it feel like "NaN == NaN" we'd need isNaN checks in all hashing operations as well as all equality checks (during collision detection), not just insert. This would be much worse for performance than panicking on insert, which only requires one extra conditional on insert.
# * It's obviously invalid; the whole point of NaN is that an error occurred. Giving a runtime error notifies you when this problem happens. Giving it only on insert is the best for performance, because it means you aren't paying for isNaN checks on lookups as well.
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: insert : Dict 'key val, 'key, val -> Dict 'key val
## Make sure never to insert a key of *NaN* into a [Dict]! Because *NaN* is
## defined to be unequal to *NaN*, inserting a *NaN* key results in an entry
## that can never be retrieved or removed from the [Dict].
insert : Dict key val, key, val -> Dict key val

View file

@ -62,7 +62,7 @@ interface List2
## the same type. If you want to put a mix of #Int and #Str values into a list, try this: ## the same type. If you want to put a mix of #Int and #Str values into a list, try this:
## ##
## ``` ## ```
## mixedList : List [ IntElem Int, StrElem Str ]* ## mixedList : List [ IntElem I64, StrElem Str ]*
## mixedList = [ IntElem 1, IntElem 2, StrElem "a", StrElem "b" ] ## mixedList = [ IntElem 1, IntElem 2, StrElem "a", StrElem "b" ]
## ``` ## ```
## ##
@ -180,7 +180,7 @@ interface List2
## we can free it immediately because there are no other refcounts. However, ## we can free it immediately because there are no other refcounts. However,
## in the case of `lists`, we have to iterate through the list and decrement ## in the case of `lists`, we have to iterate through the list and decrement
## the refcounts of each of its contained lists - because they, too, have ## the refcounts of each of its contained lists - because they, too, have
## refcounts! Importantly, beacuse the first element had its refcount incremented ## refcounts! Importantly, because the first element had its refcount incremented
## because the function returned `first`, that element will actually end up ## because the function returned `first`, that element will actually end up
## *not* getting freed at the end - but all the others will be. ## *not* getting freed at the end - but all the others will be.
## ##
@ -232,9 +232,28 @@ reverse : List elem -> List elem
## Sorts a list using a function which specifies how two elements are ordered. ## Sorts a list using a function which specifies how two elements are ordered.
## ##
## ## When sorting by numeric values, it's more efficient to use [sortAsc] or
## [sortDesc] instead.
sort : List elem, (elem, elem -> [ Lt, Eq, Gt ]) -> List elem sort : List elem, (elem, elem -> [ Lt, Eq, Gt ]) -> List elem
## Sorts a list in ascending order (lowest to highest), using a function which
## specifies a way to represent each element as a number.
##
## This is more efficient than [sort] because it skips
## calculating the `[ Lt, Eq, Gt ]` value and uses the number directly instead.
##
## To sort in descending order (highest to lowest), use [List.sortDesc] instead.
sortAsc : List elem, (elem -> Num *) -> List elem
## Sorts a list in descending order (highest to lowest), using a function which
## specifies a way to represent each element as a number.
##
## This is more efficient than [sort] because it skips
## calculating the `[ Lt, Eq, Gt ]` value and uses the number directly instead.
##
## To sort in ascending order (lowest to highest), use [List.sortAsc] instead.
sortDesc : List elem, (elem -> Num *) -> List elem
## Convert each element in the list to something new, by calling a conversion ## Convert each element in the list to something new, by calling a conversion
## function on each of them. Then return a new list of the converted values. ## function on each of them. Then return a new list of the converted values.
## ##
@ -248,7 +267,7 @@ map : List before, (before -> after) -> List after
## This works like #List.map, except it also passes the index ## This works like #List.map, except it also passes the index
## of the element to the conversion function. ## of the element to the conversion function.
mapWithIndex : List before, (before, Int -> after) -> List after mapWithIndex : List before, (before, Nat -> after) -> List after
## This works like #List.map, except at any time you can return `Err` to ## This works like #List.map, except at any time you can return `Err` to
## cancel the entire operation immediately, and return that #Err. ## cancel the entire operation immediately, and return that #Err.
@ -279,7 +298,7 @@ update : List elem, Nat, (elem -> elem) -> List elem
## A more flexible version of #List.update, which returns an "updater" function ## A more flexible version of #List.update, which returns an "updater" function
## that lets you delay performing the update until later. ## that lets you delay performing the update until later.
updater : List elem, Nat -> { elem, new : elem -> List elem } updater : List elem, Nat -> { elem, new : (elem -> List elem) }
## If all the elements in the list are #Ok, return a new list containing the ## If all the elements in the list are #Ok, return a new list containing the
## contents of those #Ok tags. If any elements are #Err, return #Err. ## contents of those #Ok tags. If any elements are #Err, return #Err.
@ -629,7 +648,7 @@ walk : List elem, { start : state, step : (state, elem -> state) } -> state
## Note that in other languages, `walkBackwards` is sometimes called `reduceRight`, ## Note that in other languages, `walkBackwards` is sometimes called `reduceRight`,
## `fold`, `foldRight`, or `foldr`. ## `fold`, `foldRight`, or `foldr`.
walkBackwards : List elem, { start : state, step : (state, elem -> state ]) } -> state walkBackwards : List elem, { start : state, step : (state, elem -> state) } -> state
## Same as #List.walk, except you can stop walking early. ## Same as #List.walk, except you can stop walking early.
## ##

View file

@ -9,13 +9,24 @@ interface Num2
## This is useful for functions that can work on either, for example #Num.add, whose type is: ## This is useful for functions that can work on either, for example #Num.add, whose type is:
## ##
## ``` ## ```
## add : Num range, Num range -> Num range ## add : Num a, Num a -> Num a
## ``` ## ```
## ##
## The number 1.5 technically has the type `Num FloatingPoint`, so when you pass two of them to `Num.add`, the answer you get is `3.0 : Num FloatingPoint`. ## The number 1.5 technically has the type `Num (Fraction *)`, so when you pass
## two of them to [Num.add], the answer you get is `3.0 : Num (Fraction *)`.
#
## Similarly, the number 0x1 (that is, the integer 1 in hexadecimal notation)
## technically has the type `Num (Integer *)`, so when you pass two of them to
## [Num.add], the answer you get is `2 : Num (Integer *)`.
## ##
## The type #Float is defined to be an alias for `Num FloatingPoint`, so `3.0 : Num FloatingPoint` is the same answer as `3.0 : Float`. # # Similarly, the number 1 technically has the type `Num Integer`, so when you pass two of them to `Num.add`, the answer you get is `2 : Num Integer`. # # The type #Int is defined to be an alias for `Num Integer`, so `2 : Num Integer` is the same answer as `2 : Int`. # ## The type [`Frac a`](#Frac) is defined to be an alias for `Num (Fraction a)`,
## In this way, the `Num` type makes it possible to have `1 + 1` return `2 : Int` and `1.5 + 1.5` return `3.0 : Float`. ## so `3.0 : Num (Fraction *)` is the same value as `3.0 : Frac *`.
## Similarly, the type [`Int a`](#Int) is defined to be an alias for
## `Num (Integer a)`, so `2 : Num (Integer *)` is the same value as
## `2 : Int *`.
##
## In this way, the [Num] type makes it possible to have `1 + 0x1` return
## `2 : Int *` and `1.5 + 1.5` return `3.0 : Frac`.
## ##
## ## Number Literals ## ## Number Literals
## ##
@ -29,29 +40,30 @@ interface Num2
## ends up having the type `Nat`. ## ends up having the type `Nat`.
## ##
## Sometimes number literals don't become more specific. For example, ## Sometimes number literals don't become more specific. For example,
## the #Num.toStr function has the type `Num * -> Str`. This means that ## the [Num.toStr] function has the type `Num * -> Str`. This means that
## when calling `Num.toStr (5 + 6)`, the expression `(5 + 6)` ## when calling `Num.toStr (5 + 6)`, the expression `(5 + 6)`
## still has the type `Num *`. When this happens, `Num *` defaults to ## still has the type `Num *`. When this happens, `Num *` defaults to
## being an #I32 - so this addition expression would overflow ## being an [I64] - so this addition expression would overflow
## if either 5 or 6 were replaced with a number big enough to cause ## if either 5 or 6 were replaced with a number big enough to cause
## addition overflow on an #I32. ## addition overflow on an [I64] value.
## ##
## If this default of #I32 is not big enough for your purposes, ## If this default of [I64] is not big enough for your purposes,
## you can add an `i64` to the end of the number literal, like so: ## you can add an `i128` to the end of the number literal, like so:
## ##
## >>> Num.toStr 5_000_000_000i64 ## >>> Num.toStr 5_000_000_000i128
## ##
## This `i64` suffix specifies that you want this number literal to be ## This `i128` suffix specifies that you want this number literal to be
## an #I64 instead of a `Num *`. All the other numeric types have ## an [I128] instead of a `Num *`. All the other numeric types have
## suffixes just like `i64`; here are some other examples: ## suffixes just like `i128`; here are some other examples:
## ##
## * `215u8` is a `215` value of type #U8 ## * `215u8` is a `215` value of type [U8]
## * `76.4f32` is a `76.4` value of type #F32 ## * `76.4f32` is a `76.4` value of type [F32]
## * `12345ulen` is a `12345` value of type #Nat ## * `123.45dec` is a `123.45` value of type [Dec]
## * `12345nat` is a `12345` value of type [Nat]
## ##
## In practice, these are rarely needed. It's most common to write ## In practice, these are rarely needed. It's most common to write
## number literals without any suffix. ## number literals without any suffix.
Num range : [ @Num range ] Num a : [ @Num a ]
## A decimal number. ## A decimal number.
## ##
@ -110,6 +122,30 @@ Dec : Frac [ @Decimal128 ]
## been done in a base-2 floating point calculation, which causes noticeable ## been done in a base-2 floating point calculation, which causes noticeable
## precision loss in this case. ## precision loss in this case.
## ##
## The floating-point numbers ([F32] and [F64]) also have three values which
## are not ordinary [finite numbers](https://en.wikipedia.org/wiki/Finite_number).
## They are:
## * ∞ ([infinity](https://en.wikipedia.org/wiki/Infinity))
## * -∞ (negative infinity)
## * *NaN* ([not a number](https://en.wikipedia.org/wiki/NaN))
##
## These values are different from ordinary numbers in that they only occur
## when a floating-point calculation encounters an error. For example:
## * Dividing a positive [F64] by `0.0` returns ∞.
## * Dividing a negative [F64] by `0.0` returns -∞.
## * Dividing a [F64] of `0.0` by `0.0` returns [*NaN*](Num.isNaN).
##
## These rules come from the [IEEE-754](https://en.wikipedia.org/wiki/IEEE_754)
## floating point standard. Because almost all modern processors are built to
## this standard, deviating from these rules has a significant performance
## cost! Since the most common reason to choose [F64] or [F32] over [Dec] is
## access to hardware-accelerated performance, Roc follows these rules exactly.
##
## There's no literal syntax for these error values, but you can check to see if
## you ended up with one of them by using [isNaN], [isFinite], and [isInfinite].
## Whenever a function in this module could return one of these values, that
## possibility is noted in the function's documentation.
##
## ## Performance Notes ## ## Performance Notes
## ##
## On typical modern CPUs, performance is similar between [Dec], [F64], and [F32] ## On typical modern CPUs, performance is similar between [Dec], [F64], and [F32]
@ -128,7 +164,7 @@ Dec : Frac [ @Decimal128 ]
## an even bigger performance difference. [F32] and [F64] can do these in a ## an even bigger performance difference. [F32] and [F64] can do these in a
## single instruction, whereas [Dec] needs entire custom procedures - which use ## single instruction, whereas [Dec] needs entire custom procedures - which use
## loops and conditionals. If you need to do performance-critical trigonometry ## loops and conditionals. If you need to do performance-critical trigonometry
## or square roots, either [F32] or [F64] is probably a better choice than the ## or square roots, either [F64] or [F32] is probably a better choice than the
## usual default choice of [Dec], despite the precision problems they bring. ## usual default choice of [Dec], despite the precision problems they bring.
Frac a : Num [ @Fraction a ] Frac a : Num [ @Fraction a ]
@ -194,6 +230,17 @@ I64 : Int [ @Signed64 ]
U64 : Int [ @Unsigned64 ] U64 : Int [ @Unsigned64 ]
I128 : Int [ @Signed128 ] I128 : Int [ @Signed128 ]
U128 : Int [ @Unsigned128 ] U128 : Int [ @Unsigned128 ]
## A [natural number](https://en.wikipedia.org/wiki/Natural_number) represented
## as a 64-bit unsigned integer on 64-bit systems, a 32-bit unsigned integer
## on 32-bit systems, and so on.
##
## This system-specific size makes it useful for certain data structure
## functions like [List.len], because the number of elements many data strucures
## can hold is also system-specific. For example, the maximum number of elements
## a [List] can hold on a 64-bit system fits in a 64-bit unsigned integer, and
## on a 32-bit system it fits in 32-bit unsigned integer. This makes [Nat] a
## good fit for [List.len] regardless of system.
Nat : Int [ @Natural ] Nat : Int [ @Natural ]
## A 64-bit signed integer. All number literals without decimal points are compatible with #Int values. ## A 64-bit signed integer. All number literals without decimal points are compatible with #Int values.
@ -272,148 +319,6 @@ Nat : Int [ @Natural ]
## If you need to do math outside these bounds, consider using a larger numeric size. ## If you need to do math outside these bounds, consider using a larger numeric size.
Int size : Num [ @Int size ] Int size : Num [ @Int size ]
## A 64-bit floating-point number. All number literals with decimal points are #Float values.
##
## >>> 0.1
##
## >>> 1.0
##
## >>> 0.0
##
## If you like, you can put underscores in your #Float literals.
## They have no effect on the number's value, but can make things easier to read.
##
## >>> 1_000_000.000_000_001
##
## Roc supports two types of floating-point numbers:
##
## - *Decimal* floating-point numbers
## - *Binary* floating-point numbers
##
## Decimal floats are precise for decimal calculations. For example:
##
## >>> 0.1 + 0.2
##
## Operations on binary floats tend to run *much* faster than operations on
## decimal floats, because almost all processors have dedicated instructions
## for binary floats and not for decimal floats.
## However, binary floats are less precise for decimal calculations.
##
## For example, here is the same `0.1 + 0.2` calculation again, this time putting
## `f64` after the numbers to specify that they should be #F64 binary floats
## instead of the default of decimal floats.
##
## >>> 0.1f64 + 0.2f64
##
## If decimal precision is unimportant, binary floats give better performance.
## If decimal precision is important - for example, when representing money -
## decimal floats tend to be worth the performance cost.
##
## Usually, Roc's compiler can infer a more specific type than #Float for
## a particular float value, based on how it is used with other numbers. For example:
##
## >>> coordinates : { x : F32, y : F32 }
## >>> coordinates = { x: 1, y: 2.5 }
## >>>
## >>> coordinates.x + 1
##
## On the last line, the compiler infers that the `1` in `+ 1` is an #F32
## beacuse it's being added to `coordinates.x`, which was defined to be an #F32
## on the first line.
##
## Sometimes the compiler has no information about which specific type to pick.
## For example:
##
## >>> 0.1 + 0.2 == 0.3
##
## When this happens, the compiler defaults to choosing #D64 decimal floats.
## If you want something else, you can write (for example) `0.1f32 + 0.2 == 0.3`
## to compare them as #F32 values instead.
##
## Both decimal and binary #Float values conform to the [IEEE-754](https://en.wikipedia.org/wiki/IEEE_754#Interchange_formats)
## specification for floating point numbers. Conforming to this specification
## means Roc's binary floats have nearly universal hardware support, and its
## decimal floats have [some hardware support](http://speleotrove.com/decimal/)
## among the rare processors which support decimal float instructions at all.
##
## This specification covers several float formats. Here are the ones Roc supports:
##
## - #F32 (32-bit binary float)
## - #F64 (64-bit binary float)
## - #D32 (32-bit decimal float)
## - #D64 (64-bit decimal float) # TODO show a table like we do with ints, with the min/max ranges
##
## Like #Int, it's possible for #Float operations to overflow. Like with ints,
## you'll typically get a crash when this happens.
##
## * In a development build, you'll get an assertion failure.
## * In an optimized build, you'll get [`Infinity` or `-Infinity`](https://en.wikipedia.org/wiki/IEEE_754-1985#Positive_and_negative_infinity).
##
## Although some languages treat have first-class representations for
## `-Infinity`, `Infinity`, and the special `NaN` ("not a number")
## floating-point values described in the IEEE-754, Roc does not.
## Instead, Roc treats all of these as errors. If any Float operation
## in a development build encounters one of these values, it will
## result in an assertion failure.
##
## Stll, it's possible that these values may accidentally arise in
## release builds. If this happens, they will behave according to the usual
## IEEE-754 rules: any operation involving `NaN` will output `NaN`,
## any operation involving `Infinity` or `-Infinity` will output either
## `Infinity`, `-Infinity`, or `NaN`, and `NaN` is defined to be not
## equal to itself - meaning `(x == x)` returns `False` if `x` is `NaN`.
##
## These are very error-prone values, so if you see an assertion fail in
## developent because of one of them, take it seriously - and try to fix
## the code so that it can't come up in a release!
##
## ## Loud versus Quiet errors
##
## Besides precision problems, another reason floats are error-prone
## is that they have quiet error handling built in. For example, in
## a 64-bit floating point number, there are certain patterns of those
## 64 bits which do not represent valid floats; instead, they represent
## invalid results of previous operations.
##
## Whenever any arithmetic operation is performed on an invalid float,
## the result is also invalid. This is called *error propagation*, and
## it is notoriously error-prone. In Roc, using equality operations like
## `==` and `!=` on an invalid float causes a crash. (See #Float.verify
## to check the validity of your float.)
##
## Beause invalid floats are so error-prone, Roc discourages using them.
## Instead, by default it treats them the same way as overflow: by
## crashing whenever any #Float function would otherwise return one.
## You can also use functions like #Float.tryAdd to get an `Ok` or an error
## back so you can gracefully recover from invalid values.
##
## Quiet errors can be useful sometimes. For example, you might want to
## do three floating point calculations in a row, and then gracefully handle
## the situation where any one of the three was invalid. In that situation,
## quiet errors can be more efficient than using three `try` functions, because
## it can have one condition at the end instead of three along the way.
##
## Another potential use for quiet errors is for risky performance optimizations.
## When you are absolutely certain there is no chance of overflow or other
## errors, using a *quiet* operation may save an entry in the instruction cache
## by removing a branch that would always have been predicted correctly.
## Always [measure the performance change](https://youtu.be/r-TLSBdHe1A)
## if you do this! The compiler can optimize away those branches behind the scenes,
## so you may find that using the quiet version expliitly
## makes the code riskier to future change, without actually affecting performance.
##
## ## Performance Notes
##
## Currently, loud errors are implemented using an extra conditional. Although
## this conditional will always be correctly branh-predicted unless an error
## occurs, there is a small effect on the instruction cache, which means
## quiet errors are very slightly more efficient.
##
## Long-term, it's possible that the Roc compiler may be able to implement
## loud errors using *signalling errors* in some situations, which could
## eliminate the performance difference between loud and quiet errors in
## the situation where no error occurs.
## Convert ## Convert
## Return a negative number when given a positive one, and vice versa. ## Return a negative number when given a positive one, and vice versa.
@ -426,16 +331,16 @@ Int size : Num [ @Int size ]
## ##
## >>> Num.neg 0.0 ## >>> Num.neg 0.0
## ##
## This is safe to use with any #Float, but it can cause overflow when used with certain #Int values. ## This is safe to use with any #Frac, but it can cause overflow when used with certain #Int values.
## ##
## For example, calling #Num.neg on the lowest value of a signed integer (such as #Int.lowestI64 or #Int.lowestI32) will cause overflow. ## For example, calling #Num.neg on the lowest value of a signed integer (such as #Int.lowestI64 or #Int.lowestI32) will cause overflow.
## This is because, for any given size of signed integer (32-bit, 64-bit, etc.) its negated lowest value turns out to be 1 higher than ## This is because, for any given size of signed integer (32-bit, 64-bit, etc.) its negated lowest value turns out to be 1 higher than
## the highest value it can represent. (For this reason, calling #Num.abs on the lowest signed value will also cause overflow.) ## the highest value it can represent. (For this reason, calling #Num.abs on the lowest signed value will also cause overflow.)
## ##
## Additionally, calling #Num.neg on any unsigned integer (such as any #U64 or #U32 value) other than 0 will cause overflow. ## Additionally, calling #Num.neg on any unsigned integer (such as any #U64 or #U32 value) other than zero will cause overflow.
## ##
## (It will never crash when given a #Float, however, because of how floating point numbers represent positive and negative numbers.) ## (It will never crash when given a #Frac, however, because of how floating point numbers represent positive and negative numbers.)
neg : Num range -> Num range neg : Num a -> Num a
## Return the absolute value of the number. ## Return the absolute value of the number.
## ##
@ -451,14 +356,14 @@ neg : Num range -> Num range
## ##
## >>> Num.abs 0.0 ## >>> Num.abs 0.0
## ##
## This is safe to use with any #Float, but it can cause overflow when used with certain #Int values. ## This is safe to use with any #Frac, but it can cause overflow when used with certain #Int values.
## ##
## For example, calling #Num.abs on the lowest value of a signed integer (such as #Int.lowestI64 or #Int.lowestI32) will cause overflow. ## For example, calling #Num.abs on the lowest value of a signed integer (such as #Int.lowestI64 or #Int.lowestI32) will cause overflow.
## This is because, for any given size of signed integer (32-bit, 64-bit, etc.) its negated lowest value turns out to be 1 higher than ## This is because, for any given size of signed integer (32-bit, 64-bit, etc.) its negated lowest value turns out to be 1 higher than
## the highest value it can represent. (For this reason, calling #Num.neg on the lowest signed value will also cause overflow.) ## the highest value it can represent. (For this reason, calling #Num.neg on the lowest signed value will also cause overflow.)
## ##
## Calling this on an unsigned integer (like #U32 or #U64) never does anything. ## Calling this on an unsigned integer (like #U32 or #U64) never does anything.
abs : Num range -> Num range abs : Num a -> Num a
## Check ## Check
@ -485,7 +390,7 @@ isOdd : Num * -> Bool
## Add two numbers of the same type. ## Add two numbers of the same type.
## ##
## (To add an #Int and a #Float, first convert one so that they both have the same type. There are functions in the [`Float`](/Float) module that can convert both #Int to #Float and the other way around.) ## (To add an #Int and a #Frac, first convert one so that they both have the same type. There are functions in the [`Frac`](/Frac) module that can convert both #Int to #Frac and the other way around.)
## ##
## `a + b` is shorthand for `Num.add a b`. ## `a + b` is shorthand for `Num.add a b`.
## ##
@ -495,13 +400,24 @@ isOdd : Num * -> Bool
## ##
## `Num.add` can be convenient in pipelines. ## `Num.add` can be convenient in pipelines.
## ##
## >>> Float.pi ## >>> Frac.pi
## >>> |> Num.add 1.0 ## >>> |> Num.add 1.0
add : Num range, Num range -> Num range ##
## If the answer to this operation can't fit in the return value (e.g. an
## [I8] answer that's higher than 127 or lower than -128), the result is an
## *overflow*. For [F64] and [F32], overflow results in an answer of either
## ∞ or -∞. For all other number types, overflow results in a panic.
add : Num a, Num a -> Num a
## Add two numbers and check for overflow.
##
## This is the same as [Num.add] except if the operation overflows, instead of
## panicking or returning ∞ or -∞, it will return `Err Overflow`.
addCheckOverflow : Num a, Num a -> Result (Num a) [ Overflow ]*
## Subtract two numbers of the same type. ## Subtract two numbers of the same type.
## ##
## (To subtract an #Int and a #Float, first convert one so that they both have the same type. There are functions in the [`Float`](/Float) module that can convert both #Int to #Float and the other way around.) ## (To subtract an #Int and a #Frac, first convert one so that they both have the same type. There are functions in the [`Frac`](/Frac) module that can convert both #Int to #Frac and the other way around.)
## ##
## `a - b` is shorthand for `Num.sub a b`. ## `a - b` is shorthand for `Num.sub a b`.
## ##
@ -511,13 +427,24 @@ add : Num range, Num range -> Num range
## ##
## `Num.sub` can be convenient in pipelines. ## `Num.sub` can be convenient in pipelines.
## ##
## >>> Float.pi ## >>> Frac.pi
## >>> |> Num.sub 2.0 ## >>> |> Num.sub 2.0
sub : Num range, Num range -> Num range ##
## If the answer to this operation can't fit in the return value (e.g. an
## [I8] answer that's higher than 127 or lower than -128), the result is an
## *overflow*. For [F64] and [F32], overflow results in an answer of either
## ∞ or -∞. For all other number types, overflow results in a panic.
sub : Num a, Num a -> Num a
## Subtract two numbers and check for overflow.
##
## This is the same as [Num.sub] except if the operation overflows, instead of
## panicking or returning ∞ or -∞, it will return `Err Overflow`.
subCheckOverflow : Num a, Num a -> Result (Num a) [ Overflow ]*
## Multiply two numbers of the same type. ## Multiply two numbers of the same type.
## ##
## (To multiply an #Int and a #Float, first convert one so that they both have the same type. There are functions in the [`Float`](/Float) module that can convert both #Int to #Float and the other way around.) ## (To multiply an #Int and a #Frac, first convert one so that they both have the same type. There are functions in the [`Frac`](/Frac) module that can convert both #Int to #Frac and the other way around.)
## ##
## `a * b` is shorthand for `Num.mul a b`. ## `a * b` is shorthand for `Num.mul a b`.
## ##
@ -527,9 +454,20 @@ sub : Num range, Num range -> Num range
## ##
## `Num.mul` can be convenient in pipelines. ## `Num.mul` can be convenient in pipelines.
## ##
## >>> Float.pi ## >>> Frac.pi
## >>> |> Num.mul 2.0 ## >>> |> Num.mul 2.0
mul : Num range, Num range -> Num range ##
## If the answer to this operation can't fit in the return value (e.g. an
## [I8] answer that's higher than 127 or lower than -128), the result is an
## *overflow*. For [F64] and [F32], overflow results in an answer of either
## ∞ or -∞. For all other number types, overflow results in a panic.
mul : Num a, Num a -> Num a
## Multiply two numbers and check for overflow.
##
## This is the same as [Num.mul] except if the operation overflows, instead of
## panicking or returning ∞ or -∞, it will return `Err Overflow`.
mulCheckOverflow : Num a, Num a -> Result (Num a) [ Overflow ]*
## Convert ## Convert
@ -540,13 +478,16 @@ mul : Num range, Num range -> Num range
## ##
## >>> Num.toStr 42 ## >>> Num.toStr 42
## ##
## Only #Float values will include a decimal point, and they will always include one. ## Only #Frac values will include a decimal point, and they will always include one.
## ##
## >>> Num.toStr 4.2 ## >>> Num.toStr 4.2
## ##
## >>> Num.toStr 4.0 ## >>> Num.toStr 4.0
## ##
## For other bases see #toHexStr, #toOctalStr, and #toBinaryStr. ## When this function is given a non-[finite](Num.isFinite)
## [F64] or [F32] value, the returned string will be `"NaN"`, `"∞"`, or `"-∞"`.
##
## To get strings in hexadecimal, octal, or binary format, use [Num.format].
toStr : Num * -> Str toStr : Num * -> Str
## Convert a number into a [Str], formatted with the given options. ## Convert a number into a [Str], formatted with the given options.
@ -612,16 +553,16 @@ format :
-> Str -> Str
## Round off the given float to the nearest integer. ## Round off the given float to the nearest integer.
round : Float * -> Int * round : Frac * -> Int *
ceil : Float * -> Int * ceil : Frac * -> Int *
floor : Float * -> Int * floor : Frac * -> Int *
trunc : Float * -> Int * trunc : Frac * -> Int *
## Convert an #Int to a #Nat. If the given number doesn't fit in #Nat, it will be truncated. ## Convert an #Int to a #Nat. If the given number doesn't fit in #Nat, it will be truncated.
## Since #Nat has a different maximum number depending on the system you're building ## Since #Nat has a different maximum number depending on the system you're building
## for, this may give a different answer on different systems. ## for, this may give a different answer on different systems.
## ##
## For example, on a 32-bit sytem, #Num.maxNat will return the same answer as ## For example, on a 32-bit system, #Num.maxNat will return the same answer as
## #Num.maxU32. This means that calling `Num.toNat 9_000_000_000` on a 32-bit ## #Num.maxU32. This means that calling `Num.toNat 9_000_000_000` on a 32-bit
## system will return #Num.maxU32 instead of 9 billion, because 9 billion is ## system will return #Num.maxU32 instead of 9 billion, because 9 billion is
## higher than #Num.maxU32 and will not fit in a #Nat on a 32-bit system. ## higher than #Num.maxU32 and will not fit in a #Nat on a 32-bit system.
@ -630,13 +571,13 @@ trunc : Float * -> Int *
## the #Nat value of 9_000_000_000. This is because on a 64-bit system, #Nat can ## the #Nat value of 9_000_000_000. This is because on a 64-bit system, #Nat can
## hold up to #Num.maxU64, and 9_000_000_000 is lower than #Num.maxU64. ## hold up to #Num.maxU64, and 9_000_000_000 is lower than #Num.maxU64.
## ##
## To convert a #Float to a #Nat, first call either #Num.round, #Num.ceil, or #Num.floor ## To convert a #Frac to a #Nat, first call either #Num.round, #Num.ceil, or #Num.floor
## on it, then call this on the resulting #Int. ## on it, then call this on the resulting #Int.
toNat : Int * -> Nat toNat : Int * -> Nat
## Convert an #Int to an #I8. If the given number doesn't fit in #I8, it will be truncated. ## Convert an #Int to an #I8. If the given number doesn't fit in #I8, it will be truncated.
## ##
## To convert a #Float to an #I8, first call either #Num.round, #Num.ceil, or #Num.floor ## To convert a #Frac to an #I8, first call either #Num.round, #Num.ceil, or #Num.floor
## on it, then call this on the resulting #Int. ## on it, then call this on the resulting #Int.
toI8 : Int * -> I8 toI8 : Int * -> I8
toI16 : Int * -> I16 toI16 : Int * -> I16
@ -660,13 +601,9 @@ toF32 : Num * -> F32
## there will be a loss of precision. ## there will be a loss of precision.
toF64 : Num * -> F64 toF64 : Num * -> F64
## Convert a #Num to a #D32. If the given number can't be precisely represented in a #D32, ## Convert a #Num to a #Dec. If the given number can't be precisely represented in a #Dec,
## there will be a loss of precision. ## there will be a loss of precision.
toD32 : Num * -> D32 toDec : Num * -> Dec
## Convert a #Num to a #D64. If the given number can't be precisely represented in a #D64,
## there will be a loss of precision.
toD64 : Num * -> D64
## Divide two integers and #Num.round the resulut. ## Divide two integers and #Num.round the resulut.
## ##
@ -689,16 +626,16 @@ toD64 : Num * -> D64
## >>> Num.divRound 8 -3 ## >>> Num.divRound 8 -3
## ##
## This is the same as the #// operator. ## This is the same as the #// operator.
divRound : Int, Int -> Int divRound : Int a, Int a -> Int a
## Perform flooring modulo on two integers. ## Perform flooring modulo on two integers.
## ##
## Modulo is the same as remainder when working with positive numbers, ## Modulo is the same as remainder when working with positive numbers,
## but if either number is negative, then modulo works differently. ## but if either number is negative, then modulo works differently.
## ##
## Additionally, flooring modulo uses #Float.floor on the result. ## Additionally, flooring modulo uses #Frac.floor on the result.
## ##
## (Use #Float.mod for non-flooring modulo.) ## (Use #Frac.mod for non-flooring modulo.)
## ##
## Return `Err DivByZero` if the second integer is zero, because division by zero is undefined in mathematics. ## Return `Err DivByZero` if the second integer is zero, because division by zero is undefined in mathematics.
## ##
@ -711,40 +648,16 @@ divRound : Int, Int -> Int
## >>> -8 %% -3 ## >>> -8 %% -3
## ##
## >>> Int.modFloor -8 -3 ## >>> Int.modFloor -8 -3
#modFloor : Int, Int -> Result DivByZero Int #modFloor : Int a, Int a -> Result (Int a) [ DivByZero ]*
## Bitwise ## Bitwise
xor : Int, Int -> Int xor : Int a, Int a -> Int a
and : Int, Int -> Int and : Int a, Int a -> Int a
not : Int -> Int not : Int a -> Int a
## Sort ascending - that is, with the lowest first, and the highest last.
##
## List.sort Num.asc [ 3, 6, 0 ]
##
asc : Num a, Num a -> [ Eq, Lt, Gt ]
## Sort descending - that is, with the highest first, and the lowest last.
##
## List.sort Num.desc [ 3, 6, 0 ]
##
desc : Num a, Num a -> [ Eq, Lt, Gt ]
## TODO should we offer hash32 etc even if someday it has to do a hash64 and truncate?
##
## This function can crash under these circumstances:
##
## * It receives a function, or any type that contains a function (for example a record, tag, or #List containing a function)
## * It receives an erroneous #Float (`NaN`, `Infinity`, or `-Infinity` - these values can only originate from hosts)
##
## CAUTION: This function may give different answers in future releases of Roc,
## so be aware that if you rely on the exact answer this gives today, your
## code may break in a future Roc release.
hash64 : a -> U64
## Limits ## Limits
@ -803,88 +716,81 @@ maxU32 : U32
## and zero is the lowest unsigned number. Unsigned numbers cannot be negative. ## and zero is the lowest unsigned number. Unsigned numbers cannot be negative.
minU32 : U32 minU32 : U32
## The highest supported #Float value you can have, which is approximately 1.8 × 10^308. ## The highest supported #F64 value you can have, which is approximately 1.8 × 10^308.
## ##
## If you go higher than this, your running Roc code will crash - so be careful not to! ## If you go higher than this, your running Roc code will crash - so be careful not to!
maxF64 : Float * maxF64 : F64
## The lowest supported #Float value you can have, which is approximately -1.8 × 10^308. ## The lowest supported #F64 value you can have, which is approximately -1.8 × 10^308.
## ##
## If you go lower than this, your running Roc code will crash - so be careful not to! ## If you go lower than this, your running Roc code will crash - so be careful not to!
minF64 : Float * minF64 : F64
## The highest integer that can be represented as a #Float without # losing precision. ## The highest supported #F32 value you can have, which is approximately 1.8 × 10^308.
## It is equal to 2^53, which is approximately 9 × 10^15.
## ##
## Some integers higher than this can be represented, but they may lose precision. For example: ## If you go higher than this, your running Roc code will crash - so be careful not to!
## maxF32 : F32
## >>> Float.highestInt
##
## >>> Float.highestInt + 100 # Increasing may lose precision
##
## >>> Float.highestInt - 100 # Decreasing is fine - but watch out for lowestLosslessInt!
maxPreciseInt : Float *
## The lowest integer that can be represented as a #Float without losing precision. ## The lowest supported #F32 value you can have, which is approximately -1.8 × 10^308.
## It is equal to -2^53, which is approximately -9 × 10^15.
## ##
## Some integers lower than this can be represented, but they may lose precision. For example: ## If you go lower than this, your running Roc code will crash - so be careful not to!
minF32 : F32
## The highest supported #F64 value you can have, which is approximately 1.8 × 10^308.
## ##
## >>> Float.lowestIntVal ## If you go higher than this, your running Roc code will crash - so be careful not to!
maxDec : Dec
## The lowest supported #F64 value you can have, which is approximately -1.8 × 10^308.
## ##
## >>> Float.lowestIntVal - 100 # Decreasing may lose precision ## If you go lower than this, your running Roc code will crash - so be careful not to!
## maxDec : Dec
## >>> Float.lowestIntVal + 100 # Increasing is fine - but watch out for highestInt!
maxPreciseInt : Float *
## Constants ## Constants
## An approximation of e, specifically 2.718281828459045. ## An approximation of e, specifically 2.718281828459045.
e : Float * e : Frac *
## An approximation of pi, specifically 3.141592653589793. ## An approximation of pi, specifically 3.141592653589793.
pi : Float * pi : Frac *
## Constants
## An approximation of e, specifically 2.718281828459045.
e : Float *
## An approximation of pi, specifically 3.141592653589793.
pi : Float *
#ceiling : Float -> Int
#floor : Float -> Int
## Trigonometry ## Trigonometry
#cos : Float -> Float cos : Frac a -> Frac a
#acos : Float -> Float acos : Frac a -> Frac a
#sin : Float -> Float sin : Frac a -> Frac a
#asin : Float -> Float asin : Frac a -> Frac a
#tan : Float -> Float tan : Frac a -> Frac a
#atan : Float -> Float atan : Frac a -> Frac a
## Other Calculations (arithmetic?) ## Other Calculations (arithmetic?)
## Divide two #Float numbers. ## Divide one [Frac] by another.
## ##
## `a / b` is shorthand for `Num.div a b`. ## `a / b` is shorthand for `Num.div a b`.
## ##
## Division by zero is undefined in mathematics. As such, you should make ## [Division by zero is undefined in mathematics](https://en.wikipedia.org/wiki/Division_by_zero).
## sure never to pass zero as the denomaintor to this function! ## As such, you should make sure never to pass zero as the denomaintor to this function!
## Calling [div] on a [Dec] denominator of zero will cause a panic.
## ##
## If zero does get passed as the denominator... ## Calling [div] on [F32] and [F64] values follows these rules:
## * Dividing a positive [F64] or [F32] by zero returns ∞.
## * Dividing a negative [F64] or [F32] by zero returns -∞.
## * Dividing a zero [F64] or [F32] by zero returns [*NaN*](Num.isNaN).
## ##
## * In a development build, you'll get an assertion failure. ## > These rules come from the [IEEE-754](https://en.wikipedia.org/wiki/IEEE_754)
## * In a release build, the function will return `Infinity`, `-Infinity`, or `NaN` depending on the arguments. ## > floating point standard. Because almost all modern processors are built to
## > this standard, deviating from these rules has a significant performance
## > cost! Since the most common reason to choose [F64] or [F32] over [Dec] is
## > access to hardware-accelerated performance, Roc follows these rules exactly.
## ##
## To divide an #Int and a #Float, first convert the #Int to a #Float using one of the functions in this module. ## To divide an [Int] and a [Frac], first convert the [Int] to a [Frac] using
## one of the functions in this module like [toDec].
## ##
## >>> 5.0 / 7.0 ## >>> 5.0 / 7.0
## ##
@ -892,45 +798,47 @@ pi : Float *
## ##
## `Num.div` can be convenient in pipelines. ## `Num.div` can be convenient in pipelines.
## ##
## >>> Float.pi ## >>> Num.pi
## >>> |> Num.div 2.0 ## >>> |> Num.div 2.0
#div : Float, Float -> Result Float DivByZero div : Frac a, Frac a -> Frac a
div = \numerator, denominator ->
when numerator is
0.0 -> 0.0 # TODO return Result!
_ -> denominator
## Perform modulo on two #Float numbers. ## Perform modulo on two [Frac]s.
## ##
## Modulo is the same as remainder when working with positive numbers, ## Modulo is the same as remainder when working with positive numbers,
## but if either number is negative, then modulo works differently. ## but if either number is negative, then modulo works differently.
## ##
## Return `Err DivByZero` if the second number is zero, because division by zero is undefined in mathematics. ## `a % b` is shorthand for `Num.mod a b`.
## ##
## `a % b` is shorthand for `Float.mod a b`. ## [Division by zero is undefined in mathematics](https://en.wikipedia.org/wiki/Division_by_zero),
## and as such, so is modulo by zero. Because of this, you should make sure never
## to pass zero for the second argument to this function!
##
## Passing [mod] a [Dec] value of zero for its second argument will cause a panic.
## Passing [mod] a [F32] and [F64] value for its second argument will cause it
## to return [*NaN*](Num.isNaN).
## ##
## >>> 5.0 % 7.0 ## >>> 5.0 % 7.0
## ##
## >>> Float.mod 5 7 ## >>> Num.mod 5 7
## ##
## `Float.mod` can be convenient in pipelines. ## `Num.mod` can be convenient in pipelines.
## ##
## >>> Float.pi ## >>> Num.pi
## >>> |> Float.mod 2.0 ## >>> |> Num.mod 2.0
mod : Float a, Float a -> Result (Float a) [ DivByZero ]* mod : Frac a, Frac a -> Frac a
## Raises a #Float to the power of another #Float. ## Raises a #Frac to the power of another #Frac.
## ##
## ` ## `
## For an #Int alternative to this function, see #Num.raise. ## For an #Int alternative to this function, see #Num.raise.
pow : Float a, Float a -> Float a pow : Frac a, Frac a -> Frac a
## Raises an integer to the power of another, by multiplying the integer by ## Raises an integer to the power of another, by multiplying the integer by
## itself the given number of times. ## itself the given number of times.
## ##
## This process is known as [exponentiation by squaring](https://en.wikipedia.org/wiki/Exponentiation_by_squaring). ## This process is known as [exponentiation by squaring](https://en.wikipedia.org/wiki/Exponentiation_by_squaring).
## ##
## For a #Float alternative to this function, which supports negative exponents, ## For a #Frac alternative to this function, which supports negative exponents,
## see #Num.exp. ## see #Num.exp.
## ##
## >>> Num.exp 5 0 ## >>> Num.exp 5 0
@ -947,33 +855,187 @@ pow : Float a, Float a -> Float a
## overflow ## overflow
expBySquaring : Int a, U8 -> Int a expBySquaring : Int a, U8 -> Int a
## Return the reciprocal of a #Float - that is, divides `1.0` by the given number. ## Returns an approximation of the absolute value of a [Frac]'s square root.
## ##
## Crashes if given `0.0`, because division by zero is undefined in mathematics. ## The square root of a negative number is an irrational number, and [Frac] only
## supports rational numbers. As such, you should make sure never to pass this
## function a negative number! Calling [sqrt] on a negative [Dec] will cause a panic.
## ##
## For a version that does not crash, use #tryRecip ## Calling [sqrt] on [F32] and [F64] values follows these rules:
recip : Float a -> Result (Float a) [ DivByZero ]* ## * Passing a negative [F64] or [F32] returns [*NaN*](Num.isNaN).
## * Passing [*NaN*](Num.isNaN) or -∞ also returns [*NaN*](Num.isNaN).
## * Passing ∞ returns ∞.
##
## > These rules come from the [IEEE-754](https://en.wikipedia.org/wiki/IEEE_754)
## > floating point standard. Because almost all modern processors are built to
## > this standard, deviating from these rules has a significant performance
## > cost! Since the most common reason to choose [F64] or [F32] over [Dec] is
## > access to hardware-accelerated performance, Roc follows these rules exactly.
##
## >>> Frac.sqrt 4.0
##
## >>> Frac.sqrt 1.5
##
## >>> Frac.sqrt 0.0
##
## >>> Frac.sqrt -4.0f64
##
## >>> Frac.sqrt -4.0dec
sqrt : Frac a -> Frac a
## NOTE: Need to come up a suffix alternative to the "try" prefix. ## Bit shifts
## This should be like (for example) recipTry so that it's more discoverable
## in documentation and editor autocomplete when you type "recip"
tryRecip : Float a -> Result (Float a) [ DivByZero ]*
## Return an approximation of the absolute value of the square root of the #Float. ## [Logical bit shift](https://en.wikipedia.org/wiki/Bitwise_operation#Logical_shift) left.
## ##
## Return #InvalidSqrt if given a negative number or an invalid #Float. The square root of a negative number is an irrational number, and #Float only supports rational numbers. ## `a << b` is shorthand for `Num.shl a b`.
shl : Int a, Int a -> Int a
## [Arithmetic bit shift](https://en.wikipedia.org/wiki/Bitwise_operation#Arithmetic_shift) left.
## ##
## >>> Float.sqrt 4.0 ## This is called `shlWrap` because any bits shifted
## off the beginning of the number will be wrapped around to
## the end. (In contrast, [shl] replaces discarded bits with zeroes.)
shlWrap : Int a, Int a -> Int a
## [Logical bit shift](https://en.wikipedia.org/wiki/Bitwise_operation#Logical_shift) right.
## ##
## >>> Float.sqrt 1.5 ## `a >> b` is shorthand for `Num.shr a b`.
shr : Int a, Int a -> Int a
## [Arithmetic bit shift](https://en.wikipedia.org/wiki/Bitwise_operation#Arithmetic_shift) right.
## ##
## >>> Float.sqrt 0.0 ## This is called `shlWrap` because any bits shifted
## ## off the end of the number will be wrapped around to
## >>> Float.sqrt -4.0 ## the beginning. (In contrast, [shr] replaces discarded bits with zeroes.)
sqrt : Float a -> [Ok (Float a), InvalidSqrt]* shrWrap : Int a, Int a -> Int a
## [Endianness](https://en.wikipedia.org/wiki/Endianness) ## [Endianness](https://en.wikipedia.org/wiki/Endianness)
Endi : [ Big, Little ] Endi : [ Big, Little ]
## The [Endi] argument does not matter for [U8] and [I8], since they have
## only one byte.
toBytes : Num *, Endi -> List U8 toBytes : Num *, Endi -> List U8
## when Num.parseBytes bytes Big is
## Ok { val: f64, rest } -> ...
## Err (ExpectedNum (Float Binary64)) -> ...
parseBytes : List U8, Endi -> Result { val : Num a, rest : List U8 } [ ExpectedNum a ]*
## when Num.fromBytes bytes Big is
## Ok f64 -> ...
## Err (ExpectedNum (Float Binary64)) -> ...
fromBytes : List U8, Endi -> Result (Num a) [ ExpectedNum a ]*
## Comparison
## Returns `True` if the first number is less than the second.
##
## `a < b` is shorthand for `Num.isLt a b`.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
##
## >>> 5
## >>> |> Num.isLt 6
isLt : Num a, Num a -> Bool
## Returns `True` if the first number is less than or equal to the second.
##
## `a <= b` is shorthand for `Num.isLte a b`.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
isLte : Num a, Num a -> Bool
## Returns `True` if the first number is greater than the second.
##
## `a > b` is shorthand for `Num.isGt a b`.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
##
## >>> 6
## >>> |> Num.isGt 5
isGt : Num a, Num a -> Bool
## Returns `True` if the first number is greater than or equal to the second.
##
## `a >= b` is shorthand for `Num.isGte a b`.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
isGte : Num a, Num a -> Bool
## Returns the higher of two numbers.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
higher : Num a, Num a -> Num a
## Returns the lower of two numbers.
##
## If either argument is [*NaN*](Num.isNaN), returns `False` no matter what. (*NaN*
## is [defined to be unordered](https://en.wikipedia.org/wiki/NaN#Comparison_with_NaN).)
lower : Num a, Num a -> Num a
# Branchless implementation that works for all numeric types:
#
# let is_lt = arg1 < arg2;
# let is_eq = arg1 == arg2;
# return (is_lt as i8 - is_eq as i8) + 1;
#
# 1, 1 -> (0 - 1) + 1 == 0 # Eq
# 5, 1 -> (0 - 0) + 1 == 1 # Gt
# 1, 5 -> (1 - 0) + 1 == 2 # Lt
## Returns `Lt` if the first number is less than the second, `Gt` if
## the first is greater than the second, and `Eq` if they're equal.
##
## Although this can be passed to [List.sort], you'll get better performance
## by using [List.sortAsc] or [List.sortDesc] instead.
compare : Num a, Num a -> [ Lt, Eq, Gt ]
## Special Floating-Point Values
## When given a [F64] or [F32] value, returns `False` if that value is
## [*NaN*](Num.isNaN), ∞ or -∞, and `True` otherwise.
##
## Always returns `True` when given a [Dec].
##
## This is the opposite of [isInfinite], except when given [*NaN*](Num.isNaN). Both
## [isFinite] and [isInfinite] return `False` for [*NaN*](Num.isNaN).
isFinite : Frac * -> Bool
## When given a [F64] or [F32] value, returns `True` if that value is either
## ∞ or -∞, and `False` otherwise.
##
## Always returns `False` when given a [Dec].
##
## This is the opposite of [isFinite], except when given [*NaN*](Num.isNaN). Both
## [isFinite] and [isInfinite] return `False` for [*NaN*](Num.isNaN).
isInfinite : Frac * -> Bool
## When given a [F64] or [F32] value, returns `True` if that value is
## *NaN* ([not a number](https://en.wikipedia.org/wiki/NaN)), and `False` otherwise.
##
## Always returns `False` when given a [Dec].
##
## >>> Num.isNaN 12.3
##
## >>> Num.isNaN (Num.sqrt -2)
##
## *NaN* is unusual from other numberic values in that:
## * *NaN* is not equal to any other number, even itself. [Bool.isEq] always returns `False` if either argument is *NaN*.
## * *NaN* has no ordering, so [isLt], [isLte], [isGt], and [isGte] always return `False` if either argument is *NaN*.
##
## These rules come from the [IEEE-754](https://en.wikipedia.org/wiki/IEEE_754)
## floating point standard. Because almost all modern processors are built to
## this standard, deviating from these rules has a significant performance
## cost! Since the most common reason to choose [F64] or [F32] over [Dec] is
## access to hardware-accelerated performance, Roc follows these rules exactly.
##
## Note that you should never put a *NaN* into a [Set], or use it as the key in
## a [Dict]. The result is entries that can never be removed from those
## collections! See the documentation for [Set.add] and [Dict.insert] for details.
isNaN : Frac * -> Bool

View file

@ -18,6 +18,9 @@ len : Set * -> Nat
# TODO: removed `'` from signature because parser does not support it yet # TODO: removed `'` from signature because parser does not support it yet
# Original signature: `add : Set 'elem, 'elem -> Set 'elem` # Original signature: `add : Set 'elem, 'elem -> Set 'elem`
## Make sure never to add a *NaN* to a [Set]! Because *NaN* is defined to be
## unequal to *NaN*, adding a *NaN* results in an entry that can never be
## retrieved or removed from the [Set].
add : Set elem, elem -> Set elem add : Set elem, elem -> Set elem
## Drops the given element from the set. ## Drops the given element from the set.

View file

@ -194,7 +194,7 @@ startsWith : Str, Str -> Bool
## if you want to check whether a string begins with something that's representable ## if you want to check whether a string begins with something that's representable
## in a single code point, you can use (for example) `Str.startsWithCodePoint '鹏'` ## in a single code point, you can use (for example) `Str.startsWithCodePoint '鹏'`
## instead of `Str.startsWithCodePoint "鹏"`. ('鹏' evaluates to the [U32] ## instead of `Str.startsWithCodePoint "鹏"`. ('鹏' evaluates to the [U32]
## value `40527`.) This will not work for graphemes which take up mulitple code ## value `40527`.) This will not work for graphemes which take up multiple code
## points, however; `Str.startsWithCodePoint '👩‍👩‍👦‍👦'` would be a compiler error ## points, however; `Str.startsWithCodePoint '👩‍👩‍👦‍👦'` would be a compiler error
## because 👩‍👩‍👦‍👦 takes up multiple code points and cannot be represented as a ## because 👩‍👩‍👦‍👦 takes up multiple code points and cannot be represented as a
## single [U32]. You'd need to use `Str.startsWithCodePoint "🕊"` instead. ## single [U32]. You'd need to use `Str.startsWithCodePoint "🕊"` instead.
@ -444,21 +444,38 @@ toF64 : Str -> Result U128 [ InvalidF64 ]*
toF32 : Str -> Result I128 [ InvalidF32 ]* toF32 : Str -> Result I128 [ InvalidF32 ]*
toDec : Str -> Result Dec [ InvalidDec ]* toDec : Str -> Result Dec [ InvalidDec ]*
## If the string begins with a valid #U8 number, return ## If the string represents a valid number, return that number.
##
## The exact number type to look for will be inferred from usage. Here's an
## example where the `Err` branch matches `Integer Signed64`, which causes this to
## parse an [I64] because [I64] is defined as `I64 : Num [ Integer [ Signed64 ] ]`.
##
## >>> when Str.toNum "12345" is
## >>> Ok i64 -> "The I64 was: \(i64)"
## >>> Err (ExpectedNum (Integer Signed64)) -> "Not a valid I64!"
##
## If the string is exactly `"NaN"`, `"∞"`, or `"-∞"`, they will be accepted
## only when converting to [F64] or [F32] numbers, and will be translated accordingly.
##
## This never accepts numbers with underscores or commas in them. For more
## advanced options, see [parseNum].
toNum : Str -> Result (Num a) [ ExpectedNum a ]*
## If the string begins with an [Int] or a [finite](Num.isFinite) [Frac], return
## that number along with the rest of the string after it. ## that number along with the rest of the string after it.
parseU8 : Str, NumParseConfig -> Result { val : U8, rest : Str } [ Expected [ NumU8 ]* Str ]* ##
parseI8 : Str, NumParseConfig -> Result { val : I8, rest : Str } [ Expected [ NumI8 ]* Str ]* ## The exact number type to look for will be inferred from usage. Here's an
parseU16 : Str, NumParseConfig -> Result { val : U16, rest : Str } [ Expected [ NumU16 ]* Str ]* ## example where the `Err` branch matches `Float Binary64`, which causes this to
parseI16 : Str, NumParseConfig -> Result { val : I16, rest : Str } [ Expected [ NumI16 ]* Str ]* ## parse an [F64] because [F64] is defined as `F64 : Num [ Fraction [ Float64 ] ]`.
parseU32 : Str, NumParseConfig -> Result { val : U32, rest : Str } [ Expected [ NumU32 ]* Str ]* ##
parseI32 : Str, NumParseConfig -> Result { val : I32, rest : Str } [ Expected [ NumI32 ]* Str ]* ## >>> when Str.parseNum input {} is
parseU64 : Str, NumParseConfig -> Result { val : U64, rest : Str } [ Expected [ NumU64 ]* Str ]* ## >>> Ok { val: f64, rest } -> "The F64 was: \(f64)"
parseI64 : Str, NumParseConfig -> Result { val : I64, rest : Str } [ Expected [ NumI64 ]* Str ]* ## >>> Err (ExpectedNum (Fraction Float64)) -> "Not a valid F64!"
parseU128 : Str, NumParseConfig -> Result { val : U128, rest : Str } [ Expected [ NumU128 ]* Str ]* ##
parseI128 : Str, NumParseConfig -> Result { val : I128, rest : Str } [ Expected [ NumI128 ]* Str ]* ## If the string begins with `"NaN"`, `"∞"`, and `"-∞"` (which do not represent
parseF64 : Str, NumParseConfig -> Result { val : U128, rest : Str } [ Expected [ NumF64 ]* Str ]* ## [finite](Num.isFinite) numbers), they will be accepted only when parsing
parseF32 : Str, NumParseConfig -> Result { val : I128, rest : Str } [ Expected [ NumF32 ]* Str ]* ## [F64] or [F32] numbers, and translated accordingly.
parseDec : Str, NumParseConfig -> Result { val : Dec, rest : Str } [ Expected [ NumDec ]* Str ]* parseNum : Str, NumParseConfig -> Result { val : Num a, rest : Str } [ ExpectedNum a ]*
## Notes: ## Notes:
## * You can allow a decimal mark for integers; they'll only parse if the numbers after it are all 0. ## * You can allow a decimal mark for integers; they'll only parse if the numbers after it are all 0.
@ -475,4 +492,3 @@ NumParseConfig :
trailingZeroes ? [ Allowed, Disallowed ], trailingZeroes ? [ Allowed, Disallowed ],
wholeSep ? { mark : Str, policy : [ Allowed, Required U64 ] } wholeSep ? { mark : Str, policy : [ Allowed, Required U64 ] }
} }
-> Str

View file

@ -3,12 +3,6 @@ pub const OBJ_PATH: &str = env!(
"Env var BUILTINS_O not found. Is there a problem with the build script?" "Env var BUILTINS_O not found. Is there a problem with the build script?"
); );
pub fn as_bytes() -> &'static [u8] {
// In the build script for the builtins module,
// we compile the builtins into LLVM bitcode
include_bytes!("../bitcode/builtins.bc")
}
pub const NUM_ASIN: &str = "roc_builtins.num.asin"; pub const NUM_ASIN: &str = "roc_builtins.num.asin";
pub const NUM_ACOS: &str = "roc_builtins.num.acos"; pub const NUM_ACOS: &str = "roc_builtins.num.acos";
pub const NUM_ATAN: &str = "roc_builtins.num.atan"; pub const NUM_ATAN: &str = "roc_builtins.num.atan";

View file

@ -292,7 +292,7 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
// minInt : Int range // minInt : Int range
add_type!(Symbol::NUM_MIN_INT, int_type(flex(TVAR1))); add_type!(Symbol::NUM_MIN_INT, int_type(flex(TVAR1)));
// div : Int, Int -> Result Int [ DivByZero ]* // divInt : Int a, Int a -> Result (Int a) [ DivByZero ]*
let div_by_zero = SolvedType::TagUnion( let div_by_zero = SolvedType::TagUnion(
vec![(TagName::Global("DivByZero".into()), vec![])], vec![(TagName::Global("DivByZero".into()), vec![])],
Box::new(SolvedType::Wildcard), Box::new(SolvedType::Wildcard),

View file

@ -30,7 +30,7 @@ macro_rules! macro_magic {
/// Some builtins cannot be constructed in code gen alone, and need to be defined /// Some builtins cannot be constructed in code gen alone, and need to be defined
/// as separate Roc defs. For example, List.get has this type: /// as separate Roc defs. For example, List.get has this type:
/// ///
/// List.get : List elem, Int -> Result elem [ OutOfBounds ]* /// List.get : List elem, Nat -> Result elem [ OutOfBounds ]*
/// ///
/// Because this returns an open tag union for its Err type, it's not possible /// Because this returns an open tag union for its Err type, it's not possible
/// for code gen to return a hardcoded value for OutOfBounds. For example, /// for code gen to return a hardcoded value for OutOfBounds. For example,
@ -450,7 +450,7 @@ fn num_add(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumAdd) num_binop(symbol, var_store, LowLevel::NumAdd)
} }
/// Num.addWrap : Int, Int -> Int /// Num.addWrap : Int a, Int a -> Int a
fn num_add_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_add_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumAddWrap) num_binop(symbol, var_store, LowLevel::NumAddWrap)
} }
@ -549,7 +549,7 @@ fn num_sub(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumSub) num_binop(symbol, var_store, LowLevel::NumSub)
} }
/// Num.subWrap : Int, Int -> Int /// Num.subWrap : Int a, Int a -> Int a
fn num_sub_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_sub_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumSubWrap) num_binop(symbol, var_store, LowLevel::NumSubWrap)
} }
@ -648,7 +648,7 @@ fn num_mul(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumMul) num_binop(symbol, var_store, LowLevel::NumMul)
} }
/// Num.mulWrap : Int, Int -> Int /// Num.mulWrap : Int a, Int a -> Int a
fn num_mul_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_mul_wrap(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumMulWrap) num_binop(symbol, var_store, LowLevel::NumMulWrap)
} }
@ -1152,7 +1152,7 @@ fn num_ceiling(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// Num.powInt : Int, Int -> Int /// Num.powInt : Int a, Int a -> Int a
fn num_pow_int(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_pow_int(symbol: Symbol, var_store: &mut VarStore) -> Def {
let int_var = var_store.fresh(); let int_var = var_store.fresh();
@ -1251,17 +1251,17 @@ fn num_asin(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// Num.bitwiseAnd : Int, Int -> Int /// Num.bitwiseAnd : Int a, Int a -> Int a
fn num_bitwise_and(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_bitwise_and(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumBitwiseAnd) num_binop(symbol, var_store, LowLevel::NumBitwiseAnd)
} }
/// Num.bitwiseXor : Int, Int -> Int /// Num.bitwiseXor : Int a, Int a -> Int a
fn num_bitwise_xor(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_bitwise_xor(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumBitwiseXor) num_binop(symbol, var_store, LowLevel::NumBitwiseXor)
} }
/// Num.bitwiseOr: Int, Int -> Int /// Num.bitwiseOr: Int a, Int a -> Int a
fn num_bitwise_or(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_bitwise_or(symbol: Symbol, var_store: &mut VarStore) -> Def {
num_binop(symbol, var_store, LowLevel::NumBitwiseOr) num_binop(symbol, var_store, LowLevel::NumBitwiseOr)
} }
@ -1667,7 +1667,7 @@ fn list_concat(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// List.repeat : elem, Int -> List elem /// List.repeat : elem, Nat -> List elem
fn list_repeat(symbol: Symbol, var_store: &mut VarStore) -> Def { fn list_repeat(symbol: Symbol, var_store: &mut VarStore) -> Def {
let elem_var = var_store.fresh(); let elem_var = var_store.fresh();
let len_var = var_store.fresh(); let len_var = var_store.fresh();
@ -1809,7 +1809,7 @@ fn list_get(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// List.set : List elem, Int, elem -> List elem /// List.set : List elem, Nat, elem -> List elem
/// ///
/// List.set : /// List.set :
/// Attr (w | u | v) (List (Attr u a)), /// Attr (w | u | v) (List (Attr u a)),
@ -2531,7 +2531,7 @@ fn set_walk(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// Num.rem : Int, Int -> Result Int [ DivByZero ]* /// Num.rem : Int a, Int a -> Result (Int a) [ DivByZero ]*
fn num_rem(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_rem(symbol: Symbol, var_store: &mut VarStore) -> Def {
let num_var = var_store.fresh(); let num_var = var_store.fresh();
let unbound_zero_var = var_store.fresh(); let unbound_zero_var = var_store.fresh();
@ -2590,7 +2590,7 @@ fn num_rem(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// Num.isMultipleOf : Int, Int -> Bool /// Num.isMultipleOf : Int a, Int a -> Bool
fn num_is_multiple_of(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_is_multiple_of(symbol: Symbol, var_store: &mut VarStore) -> Def {
lowlevel_2(symbol, LowLevel::NumIsMultipleOf, var_store) lowlevel_2(symbol, LowLevel::NumIsMultipleOf, var_store)
} }
@ -2696,7 +2696,7 @@ fn num_div_float(symbol: Symbol, var_store: &mut VarStore) -> Def {
) )
} }
/// Num.div : Int, Int -> Result Int [ DivByZero ]* /// Num.div : Int a , Int a -> Result (Int a) [ DivByZero ]*
fn num_div_int(symbol: Symbol, var_store: &mut VarStore) -> Def { fn num_div_int(symbol: Symbol, var_store: &mut VarStore) -> Def {
let bool_var = var_store.fresh(); let bool_var = var_store.fresh();
let num_var = var_store.fresh(); let num_var = var_store.fresh();

View file

@ -603,7 +603,7 @@ pub fn canonicalize_expr<'a>(
// A "when" with no branches is a runtime error, but it will mess things up // A "when" with no branches is a runtime error, but it will mess things up
// if code gen mistakenly thinks this is a tail call just because its condition // if code gen mistakenly thinks this is a tail call just because its condition
// happend to be one. (The condition gave us our initial output value.) // happened to be one. (The condition gave us our initial output value.)
if branches.is_empty() { if branches.is_empty() {
output.tail_call = None; output.tail_call = None;
} }

View file

@ -345,7 +345,7 @@ pub fn desugar_expr<'a>(arena: &'a Bump, loc_expr: &'a Located<Expr<'a>>) -> &'a
) )
} }
If(if_thens, final_else_branch) => { If(if_thens, final_else_branch) => {
// If does not get desugared into `when` so we can give more targetted error messages during type checking. // If does not get desugared into `when` so we can give more targeted error messages during type checking.
let desugared_final_else = &*arena.alloc(desugar_expr(arena, &final_else_branch)); let desugared_final_else = &*arena.alloc(desugar_expr(arena, &final_else_branch));
let mut desugared_if_thens = Vec::with_capacity_in(if_thens.len(), arena); let mut desugared_if_thens = Vec::with_capacity_in(if_thens.len(), arena);

View file

@ -13,7 +13,7 @@ use roc_types::types::Type::{self, *};
#[inline(always)] #[inline(always)]
pub fn int_literal( pub fn int_literal(
num_var: Variable, num_var: Variable,
percision_var: Variable, precision_var: Variable,
expected: Expected<Type>, expected: Expected<Type>,
region: Region, region: Region,
) -> Constraint { ) -> Constraint {
@ -25,7 +25,7 @@ pub fn int_literal(
And(vec![ And(vec![
Eq( Eq(
num_type.clone(), num_type.clone(),
ForReason(reason, num_int(Type::Variable(percision_var)), region), ForReason(reason, num_int(Type::Variable(precision_var)), region),
Category::Int, Category::Int,
region, region,
), ),

View file

@ -96,7 +96,7 @@ pub fn constrain_expr(
expected: Expected<Type>, expected: Expected<Type>,
) -> Constraint { ) -> Constraint {
match expr { match expr {
Int(var, percision, _) => int_literal(*var, *percision, expected, region), Int(var, precision, _) => int_literal(*var, *precision, expected, region),
Num(var, _) => exists( Num(var, _) => exists(
vec![*var], vec![*var],
Eq( Eq(
@ -106,7 +106,7 @@ pub fn constrain_expr(
region, region,
), ),
), ),
Float(var, percision, _) => float_literal(*var, *percision, expected, region), Float(var, precision, _) => float_literal(*var, *precision, expected, region),
EmptyRecord => constrain_empty_record(region, expected), EmptyRecord => constrain_empty_record(region, expected),
Expr::Record { record_var, fields } => { Expr::Record { record_var, fields } => {
if fields.is_empty() { if fields.is_empty() {

View file

@ -146,7 +146,7 @@ pub fn pre_constrain_imports(
// Translate referenced symbols into constraints. We do this on the main // Translate referenced symbols into constraints. We do this on the main
// thread because we need exclusive access to the exposed_types map, in order // thread because we need exclusive access to the exposed_types map, in order
// to get the necessary constraint info for any aliases we imported. We also // to get the necessary constraint info for any aliases we imported. We also
// resolve builtin types now, so we can use a refernce to stdlib instead of // resolve builtin types now, so we can use a reference to stdlib instead of
// having to either clone it or recreate it from scratch on the other thread. // having to either clone it or recreate it from scratch on the other thread.
for &symbol in references.iter() { for &symbol in references.iter() {
let module_id = symbol.module_id(); let module_id = symbol.module_id();

View file

@ -116,7 +116,7 @@ mod test_fmt {
} }
#[test] #[test]
fn force_space_at_begining_of_comment() { fn force_space_at_beginning_of_comment() {
expr_formats_to( expr_formats_to(
indoc!( indoc!(
r#" r#"

View file

@ -1,5 +1,6 @@
[package] [package]
name = "roc_gen_dev" name = "roc_gen_dev"
description = "The development backend for the Roc compiler"
version = "0.1.0" version = "0.1.0"
authors = ["The Roc Contributors"] authors = ["The Roc Contributors"]
license = "UPL-1.0" license = "UPL-1.0"

View file

@ -5,7 +5,7 @@ It goes from Roc's [mono ir](https://github.com/rtfeldman/roc/blob/trunk/compile
## General Process ## General Process
The backend is essentially defined as two recursive match statment over the mono ir. The backend is essentially defined as two recursive match statement over the mono ir.
The first pass is used to do simple linear scan lifetime analysis. The first pass is used to do simple linear scan lifetime analysis.
In the future it may be expanded to add a few other quick optimizations. In the future it may be expanded to add a few other quick optimizations.
The second pass is the actual meat of the backend that generates the byte buffer of output binary. The second pass is the actual meat of the backend that generates the byte buffer of output binary.
@ -62,7 +62,7 @@ Here are example implementations for [arm](https://github.com/rtfeldman/roc/blob
Adding a new builtin to the dev backend can be pretty simple. Adding a new builtin to the dev backend can be pretty simple.
Here is [an example](https://github.com/rtfeldman/roc/pull/893/files) of adding `Num.Sub`. Here is [an example](https://github.com/rtfeldman/roc/pull/893/files) of adding `Num.Sub`.
This is the general procede I follow with some helpful links: This is the general procedure I follow with some helpful links:
1. Find a feature that is just n+1. 1. Find a feature that is just n+1.
For example, since we already have integers, adding a builtin that functions on them should be n+1. For example, since we already have integers, adding a builtin that functions on them should be n+1.

View file

@ -205,7 +205,7 @@ pub struct Backend64Bit<
float_used_callee_saved_regs: MutSet<FloatReg>, float_used_callee_saved_regs: MutSet<FloatReg>,
stack_size: u32, stack_size: u32,
// The ammount of stack space needed to pass args for function calling. // The amount of stack space needed to pass args for function calling.
fn_call_stack_size: u32, fn_call_stack_size: u32,
} }
@ -409,7 +409,7 @@ impl<
Ok(()) Ok(())
} }
x => Err(format!( x => Err(format!(
"recieving return type, {:?}, is not yet implemented", "receiving return type, {:?}, is not yet implemented",
x x
)), )),
} }

View file

@ -236,7 +236,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
Layout::Builtin(Builtin::Float64) => {} Layout::Builtin(Builtin::Float64) => {}
x => { x => {
return Err(format!( return Err(format!(
"recieving return type, {:?}, is not yet implemented", "receiving return type, {:?}, is not yet implemented",
x x
)); ));
} }
@ -530,7 +530,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
Layout::Builtin(Builtin::Float64) => {} Layout::Builtin(Builtin::Float64) => {}
x => { x => {
return Err(format!( return Err(format!(
"recieving return type, {:?}, is not yet implemented", "receiving return type, {:?}, is not yet implemented",
x x
)); ));
} }

View file

@ -145,7 +145,7 @@ where
) -> Result<(), String>; ) -> Result<(), String>;
/// build_expr builds the expressions for the specified symbol. /// build_expr builds the expressions for the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later. /// The builder must keep track of the symbol because it may be referred to later.
fn build_expr( fn build_expr(
&mut self, &mut self,
sym: &Symbol, sym: &Symbol,
@ -230,7 +230,7 @@ where
} }
/// build_run_low_level builds the low level opertation and outputs to the specified symbol. /// build_run_low_level builds the low level opertation and outputs to the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later. /// The builder must keep track of the symbol because it may be referred to later.
fn build_run_low_level( fn build_run_low_level(
&mut self, &mut self,
sym: &Symbol, sym: &Symbol,

View file

@ -1,5 +1,6 @@
[package] [package]
name = "roc_gen" name = "roc_gen_llvm"
description = "The LLVM backend for the Roc compiler"
version = "0.1.0" version = "0.1.0"
authors = ["The Roc Contributors"] authors = ["The Roc Contributors"]
license = "UPL-1.0" license = "UPL-1.0"

View file

@ -50,7 +50,8 @@ use roc_module::ident::TagName;
use roc_module::low_level::LowLevel; use roc_module::low_level::LowLevel;
use roc_module::symbol::{Interns, ModuleId, Symbol}; use roc_module::symbol::{Interns, ModuleId, Symbol};
use roc_mono::ir::{ use roc_mono::ir::{
BranchInfo, CallType, ExceptionId, JoinPointId, ModifyRc, TopLevelFunctionLayout, Wrapped, BranchInfo, CallType, ExceptionId, JoinPointId, ModifyRc, OptLevel, TopLevelFunctionLayout,
Wrapped,
}; };
use roc_mono::layout::{Builtin, InPlace, LambdaSet, Layout, LayoutIds, UnionLayout}; use roc_mono::layout::{Builtin, InPlace, LambdaSet, Layout, LayoutIds, UnionLayout};
use target_lexicon::CallingConvention; use target_lexicon::CallingConvention;
@ -87,21 +88,6 @@ macro_rules! debug_info_init {
}}; }};
} }
#[derive(Debug, Clone, Copy)]
pub enum OptLevel {
Normal,
Optimize,
}
impl From<OptLevel> for OptimizationLevel {
fn from(level: OptLevel) -> Self {
match level {
OptLevel::Normal => OptimizationLevel::None,
OptLevel::Optimize => OptimizationLevel::Aggressive,
}
}
}
/// Iterate over all functions in an llvm module /// Iterate over all functions in an llvm module
pub struct FunctionIterator<'ctx> { pub struct FunctionIterator<'ctx> {
next: Option<FunctionValue<'ctx>>, next: Option<FunctionValue<'ctx>>,
@ -356,7 +342,9 @@ impl<'a, 'ctx, 'env> Env<'a, 'ctx, 'env> {
} }
pub fn module_from_builtins<'ctx>(ctx: &'ctx Context, module_name: &str) -> Module<'ctx> { pub fn module_from_builtins<'ctx>(ctx: &'ctx Context, module_name: &str) -> Module<'ctx> {
let bitcode_bytes = bitcode::as_bytes(); // In the build script for the builtins module,
// we compile the builtins into LLVM bitcode
let bitcode_bytes: &[u8] = include_bytes!("../../../builtins/bitcode/builtins.bc");
let memory_buffer = MemoryBuffer::create_from_memory_range(&bitcode_bytes, module_name); let memory_buffer = MemoryBuffer::create_from_memory_range(&bitcode_bytes, module_name);
@ -1490,7 +1478,7 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
let builder = env.builder; let builder = env.builder;
// Determine types, assumes the descriminant is in the field layouts // Determine types, assumes the discriminant is in the field layouts
let num_fields = field_layouts.len(); let num_fields = field_layouts.len();
let mut field_types = Vec::with_capacity_in(num_fields, env.arena); let mut field_types = Vec::with_capacity_in(num_fields, env.arena);
@ -4685,7 +4673,7 @@ fn run_low_level<'a, 'ctx, 'env>(
BasicValueEnum::IntValue(bool_val) BasicValueEnum::IntValue(bool_val)
} }
ListGetUnsafe => { ListGetUnsafe => {
// List.get : List elem, Int -> [ Ok elem, OutOfBounds ]* // List.get : List elem, Nat -> [ Ok elem, OutOfBounds ]*
debug_assert_eq!(args.len(), 2); debug_assert_eq!(args.len(), 2);
let (wrapper_struct, list_layout) = load_symbol_and_layout(scope, &args[0]); let (wrapper_struct, list_layout) = load_symbol_and_layout(scope, &args[0]);
@ -5275,7 +5263,7 @@ fn build_int_binop<'a, 'ctx, 'env>(
// rem == 0 // rem == 0
// } // }
// //
// NOTE we'd like the branches to be swapped for better branch prediciton, // NOTE we'd like the branches to be swapped for better branch prediction,
// but llvm normalizes to the above ordering in -O3 // but llvm normalizes to the above ordering in -O3
let zero = rhs.get_type().const_zero(); let zero = rhs.get_type().const_zero();
let neg_1 = rhs.get_type().const_int(-1i64 as u64, false); let neg_1 = rhs.get_type().const_int(-1i64 as u64, false);
@ -5622,7 +5610,7 @@ fn build_int_unary_op<'a, 'ctx, 'env>(
int_abs_raise_on_overflow(env, arg, arg_layout) int_abs_raise_on_overflow(env, arg, arg_layout)
} }
NumToFloat => { NumToFloat => {
// TODO: Handle differnt sized numbers // TODO: Handle different sized numbers
// This is an Int, so we need to convert it. // This is an Int, so we need to convert it.
bd.build_cast( bd.build_cast(
InstructionOpcode::SIToFP, InstructionOpcode::SIToFP,
@ -5746,7 +5734,7 @@ fn build_float_unary_op<'a, 'ctx, 'env>(
let bd = env.builder; let bd = env.builder;
// TODO: Handle differnt sized floats // TODO: Handle different sized floats
match op { match op {
NumNeg => bd.build_float_neg(arg, "negate_float").into(), NumNeg => bd.build_float_neg(arg, "negate_float").into(),
NumAbs => env.call_intrinsic(LLVM_FABS_F64, &[arg.into()]), NumAbs => env.call_intrinsic(LLVM_FABS_F64, &[arg.into()]),
@ -5887,7 +5875,7 @@ fn throw_exception<'a, 'ctx, 'env>(env: &Env<'a, 'ctx, 'env>, message: &str) {
let builder = env.builder; let builder = env.builder;
let info = { let info = {
// we represend both void and char pointers with `u8*` // we represented both void and char pointers with `u8*`
let u8_ptr = context.i8_type().ptr_type(AddressSpace::Generic); let u8_ptr = context.i8_type().ptr_type(AddressSpace::Generic);
// allocate an exception (that can hold a pointer to a string) // allocate an exception (that can hold a pointer to a string)

View file

@ -38,7 +38,7 @@ macro_rules! run_jit_function {
($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr, $errors:expr) => {{ ($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr, $errors:expr) => {{
use inkwell::context::Context; use inkwell::context::Context;
use roc_gen::run_roc::RocCallResult; use roc_gen_llvm::run_roc::RocCallResult;
use std::mem::MaybeUninit; use std::mem::MaybeUninit;
unsafe { unsafe {
@ -77,7 +77,7 @@ macro_rules! run_jit_function_dynamic_type {
($lib: expr, $main_fn_name: expr, $bytes:expr, $transform:expr, $errors:expr) => {{ ($lib: expr, $main_fn_name: expr, $bytes:expr, $transform:expr, $errors:expr) => {{
use inkwell::context::Context; use inkwell::context::Context;
use roc_gen::run_roc::RocCallResult; use roc_gen_llvm::run_roc::RocCallResult;
unsafe { unsafe {
let main: libloading::Symbol<unsafe extern "C" fn(*const u8)> = $lib let main: libloading::Symbol<unsafe extern "C" fn(*const u8)> = $lib
@ -86,7 +86,7 @@ macro_rules! run_jit_function_dynamic_type {
.ok_or(format!("Unable to JIT compile `{}`", $main_fn_name)) .ok_or(format!("Unable to JIT compile `{}`", $main_fn_name))
.expect("errored"); .expect("errored");
let size = roc_gen::run_roc::ROC_CALL_RESULT_DISCRIMINANT_SIZE + $bytes; let size = roc_gen_llvm::run_roc::ROC_CALL_RESULT_DISCRIMINANT_SIZE + $bytes;
let layout = std::alloc::Layout::array::<u8>(size).unwrap(); let layout = std::alloc::Layout::array::<u8>(size).unwrap();
let result = std::alloc::alloc(layout); let result = std::alloc::alloc(layout);
main(result); main(result);

View file

@ -1,4 +1,4 @@
use crate::docs::DocEntry::DetatchedDoc; use crate::docs::DocEntry::DetachedDoc;
use crate::docs::TypeAnnotation::{ use crate::docs::TypeAnnotation::{
Apply, BoundVariable, Function, NoTypeAnn, ObscuredRecord, ObscuredTagUnion, Record, TagUnion, Apply, BoundVariable, Function, NoTypeAnn, ObscuredRecord, ObscuredTagUnion, Record, TagUnion,
}; };
@ -32,7 +32,7 @@ pub struct ModuleDocumentation {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum DocEntry { pub enum DocEntry {
DocDef(DocDef), DocDef(DocDef),
DetatchedDoc(String), DetachedDoc(String),
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -108,10 +108,10 @@ pub fn generate_module_docs<'a>(
} }
} }
fn detatched_docs_from_comments_and_new_lines<'a>( fn detached_docs_from_comments_and_new_lines<'a>(
comments_or_new_lines: &'a [roc_parse::ast::CommentOrNewline<'a>], comments_or_new_lines: &'a [roc_parse::ast::CommentOrNewline<'a>],
) -> Vec<String> { ) -> Vec<String> {
let mut detatched_docs: Vec<String> = Vec::new(); let mut detached_docs: Vec<String> = Vec::new();
let mut docs = String::new(); let mut docs = String::new();
@ -123,13 +123,13 @@ fn detatched_docs_from_comments_and_new_lines<'a>(
} }
CommentOrNewline::LineComment(_) | CommentOrNewline::Newline => { CommentOrNewline::LineComment(_) | CommentOrNewline::Newline => {
detatched_docs.push(docs.clone()); detached_docs.push(docs.clone());
docs = String::new(); docs = String::new();
} }
} }
} }
detatched_docs detached_docs
} }
fn generate_entry_doc<'a>( fn generate_entry_doc<'a>(
@ -147,8 +147,8 @@ fn generate_entry_doc<'a>(
Def::SpaceBefore(sub_def, comments_or_new_lines) => { Def::SpaceBefore(sub_def, comments_or_new_lines) => {
// Comments before a definition are attached to the current defition // Comments before a definition are attached to the current defition
for detatched_doc in detatched_docs_from_comments_and_new_lines(comments_or_new_lines) { for detached_doc in detached_docs_from_comments_and_new_lines(comments_or_new_lines) {
acc.push(DetatchedDoc(detatched_doc)); acc.push(DetachedDoc(detached_doc));
} }
generate_entry_doc(ident_ids, acc, Some(comments_or_new_lines), sub_def) generate_entry_doc(ident_ids, acc, Some(comments_or_new_lines), sub_def)

View file

@ -40,7 +40,7 @@ pub fn infer_borrow<'a>(
// This is a fixed-point analysis // This is a fixed-point analysis
// //
// all functions initiall own all their paramters // all functions initiall own all their parameters
// through a series of checks and heuristics, some arguments are set to borrowed // through a series of checks and heuristics, some arguments are set to borrowed
// when that doesn't lead to conflicts the change is kept, otherwise it may be reverted // when that doesn't lead to conflicts the change is kept, otherwise it may be reverted
// //
@ -348,7 +348,7 @@ impl<'a> BorrowInfState<'a> {
/// let z = e in ... /// let z = e in ...
/// ///
/// and determines whether z and which of the symbols used in e /// and determines whether z and which of the symbols used in e
/// must be taken as owned paramters /// must be taken as owned parameters
fn collect_call(&mut self, z: Symbol, e: &crate::ir::Call<'a>) { fn collect_call(&mut self, z: Symbol, e: &crate::ir::Call<'a>) {
use crate::ir::CallType::*; use crate::ir::CallType::*;

View file

@ -7,16 +7,16 @@ use roc_collections::all::{MutMap, MutSet};
use roc_module::symbol::Symbol; use roc_module::symbol::Symbol;
pub fn free_variables(stmt: &Stmt<'_>) -> MutSet<Symbol> { pub fn free_variables(stmt: &Stmt<'_>) -> MutSet<Symbol> {
let (mut occuring, bound) = occuring_variables(stmt); let (mut occurring, bound) = occurring_variables(stmt);
for ref s in bound { for ref s in bound {
occuring.remove(s); occurring.remove(s);
} }
occuring occurring
} }
pub fn occuring_variables(stmt: &Stmt<'_>) -> (MutSet<Symbol>, MutSet<Symbol>) { pub fn occurring_variables(stmt: &Stmt<'_>) -> (MutSet<Symbol>, MutSet<Symbol>) {
let mut stack = std::vec![stmt]; let mut stack = std::vec![stmt];
let mut result = MutSet::default(); let mut result = MutSet::default();
let mut bound_variables = MutSet::default(); let mut bound_variables = MutSet::default();
@ -26,7 +26,7 @@ pub fn occuring_variables(stmt: &Stmt<'_>) -> (MutSet<Symbol>, MutSet<Symbol>) {
match stmt { match stmt {
Let(symbol, expr, _, cont) => { Let(symbol, expr, _, cont) => {
occuring_variables_expr(expr, &mut result); occurring_variables_expr(expr, &mut result);
result.insert(*symbol); result.insert(*symbol);
bound_variables.insert(*symbol); bound_variables.insert(*symbol);
stack.push(cont); stack.push(cont);
@ -39,7 +39,7 @@ pub fn occuring_variables(stmt: &Stmt<'_>) -> (MutSet<Symbol>, MutSet<Symbol>) {
fail, fail,
.. ..
} => { } => {
occuring_variables_call(call, &mut result); occurring_variables_call(call, &mut result);
result.insert(*symbol); result.insert(*symbol);
bound_variables.insert(*symbol); bound_variables.insert(*symbol);
stack.push(pass); stack.push(pass);
@ -93,13 +93,13 @@ pub fn occuring_variables(stmt: &Stmt<'_>) -> (MutSet<Symbol>, MutSet<Symbol>) {
(result, bound_variables) (result, bound_variables)
} }
fn occuring_variables_call(call: &crate::ir::Call<'_>, result: &mut MutSet<Symbol>) { fn occurring_variables_call(call: &crate::ir::Call<'_>, result: &mut MutSet<Symbol>) {
// NOTE though the function name does occur, it is a static constant in the program // NOTE though the function name does occur, it is a static constant in the program
// for liveness, it should not be included here. // for liveness, it should not be included here.
result.extend(call.arguments.iter().copied()); result.extend(call.arguments.iter().copied());
} }
pub fn occuring_variables_expr(expr: &Expr<'_>, result: &mut MutSet<Symbol>) { pub fn occurring_variables_expr(expr: &Expr<'_>, result: &mut MutSet<Symbol>) {
use Expr::*; use Expr::*;
match expr { match expr {
@ -109,7 +109,7 @@ pub fn occuring_variables_expr(expr: &Expr<'_>, result: &mut MutSet<Symbol>) {
result.insert(*symbol); result.insert(*symbol);
} }
Call(call) => occuring_variables_call(call, result), Call(call) => occurring_variables_call(call, result),
Tag { arguments, .. } Tag { arguments, .. }
| Struct(arguments) | Struct(arguments)
@ -160,13 +160,13 @@ struct Context<'a> {
fn update_live_vars<'a>(expr: &Expr<'a>, v: &LiveVarSet) -> LiveVarSet { fn update_live_vars<'a>(expr: &Expr<'a>, v: &LiveVarSet) -> LiveVarSet {
let mut v = v.clone(); let mut v = v.clone();
occuring_variables_expr(expr, &mut v); occurring_variables_expr(expr, &mut v);
v v
} }
/// `isFirstOcc xs x i = true` if `xs[i]` is the first occurrence of `xs[i]` in `xs` /// `isFirstOcc xs x i = true` if `xs[i]` is the first occurrence of `xs[i]` in `xs`
fn is_first_occurence(xs: &[Symbol], i: usize) -> bool { fn is_first_occurrence(xs: &[Symbol], i: usize) -> bool {
match xs.get(i) { match xs.get(i) {
None => unreachable!(), None => unreachable!(),
Some(s) => i == xs.iter().position(|v| s == v).unwrap(), Some(s) => i == xs.iter().position(|v| s == v).unwrap(),
@ -319,7 +319,7 @@ impl<'a> Context<'a> {
{ {
for (i, x) in xs.iter().enumerate() { for (i, x) in xs.iter().enumerate() {
let info = self.get_var_info(*x); let info = self.get_var_info(*x);
if !info.reference || !is_first_occurence(xs, i) { if !info.reference || !is_first_occurrence(xs, i) {
// do nothing // do nothing
} else { } else {
let num_consumptions = get_num_consumptions(*x, xs, consume_param_pred.clone()); // number of times the argument is used let num_consumptions = get_num_consumptions(*x, xs, consume_param_pred.clone()); // number of times the argument is used
@ -393,7 +393,7 @@ impl<'a> Context<'a> {
// Remark: `x` may occur multiple times in the application (e.g., `f x y x`). // Remark: `x` may occur multiple times in the application (e.g., `f x y x`).
// This is why we check whether it is the first occurrence. // This is why we check whether it is the first occurrence.
if self.must_consume(*x) if self.must_consume(*x)
&& is_first_occurence(xs, i) && is_first_occurrence(xs, i)
&& is_borrow_param(*x, xs, ps) && is_borrow_param(*x, xs, ps)
&& !b_live_vars.contains(x) && !b_live_vars.contains(x)
{ {
@ -418,7 +418,7 @@ impl<'a> Context<'a> {
This is why we check whether it is the first occurrence. */ This is why we check whether it is the first occurrence. */
if self.must_consume(*x) if self.must_consume(*x)
&& is_first_occurence(xs, i) && is_first_occurrence(xs, i)
&& *is_borrow && *is_borrow
&& !b_live_vars.contains(x) && !b_live_vars.contains(x)
{ {
@ -1096,7 +1096,7 @@ pub fn collect_stmt(
vars = collect_stmt(cont, jp_live_vars, vars); vars = collect_stmt(cont, jp_live_vars, vars);
vars.remove(symbol); vars.remove(symbol);
let mut result = MutSet::default(); let mut result = MutSet::default();
occuring_variables_expr(expr, &mut result); occurring_variables_expr(expr, &mut result);
vars.extend(result); vars.extend(result);
vars vars
@ -1114,7 +1114,7 @@ pub fn collect_stmt(
vars.remove(symbol); vars.remove(symbol);
let mut result = MutSet::default(); let mut result = MutSet::default();
occuring_variables_call(call, &mut result); occurring_variables_call(call, &mut result);
vars.extend(result); vars.extend(result);

View file

@ -43,6 +43,12 @@ macro_rules! return_on_layout_error {
}; };
} }
#[derive(Debug, Clone, Copy)]
pub enum OptLevel {
Normal,
Optimize,
}
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub enum MonoProblem { pub enum MonoProblem {
PatternProblem(crate::exhaustive::Error), PatternProblem(crate::exhaustive::Error),
@ -7663,7 +7669,7 @@ pub fn num_argument_to_int_or_float(
4 => IntOrFloat::UnsignedIntType(IntPrecision::I32), 4 => IntOrFloat::UnsignedIntType(IntPrecision::I32),
8 => IntOrFloat::UnsignedIntType(IntPrecision::I64), 8 => IntOrFloat::UnsignedIntType(IntPrecision::I64),
_ => panic!( _ => panic!(
"Invalid target for Num type arguement: Roc does't support compiling to {}-bit systems.", "Invalid target for Num type argument: Roc does't support compiling to {}-bit systems.",
ptr_bytes * 8 ptr_bytes * 8
), ),
} }

View file

@ -418,7 +418,7 @@ impl<'a> ExprState<'a> {
F: Fn(Region, Row, Col) -> EExpr<'a>, F: Fn(Region, Row, Col) -> EExpr<'a>,
{ {
if !self.operators.is_empty() { if !self.operators.is_empty() {
// this `=` or `<-` likely occured inline; treat it as an invalid operator // this `=` or `<-` likely occurred inline; treat it as an invalid operator
let opchar = match loc_op.value { let opchar = match loc_op.value {
BinOp::Assignment => arena.alloc([b'=']) as &[_], BinOp::Assignment => arena.alloc([b'=']) as &[_],
BinOp::Backpassing => arena.alloc([b'<', b'-']) as &[_], BinOp::Backpassing => arena.alloc([b'<', b'-']) as &[_],
@ -451,7 +451,7 @@ impl<'a> ExprState<'a> {
debug_assert_eq!(loc_op.value, BinOp::HasType); debug_assert_eq!(loc_op.value, BinOp::HasType);
if !self.operators.is_empty() { if !self.operators.is_empty() {
// this `:` likely occured inline; treat it as an invalid operator // this `:` likely occurred inline; treat it as an invalid operator
let opchar = arena.alloc([b':']) as &[_]; let opchar = arena.alloc([b':']) as &[_];
let fail = let fail =
@ -1000,7 +1000,7 @@ fn parse_expr_operator<'a>(
(&*arena.alloc(Located::at(alias_region, alias)), state) (&*arena.alloc(Located::at(alias_region, alias)), state)
} }
Err(_) => { Err(_) => {
// this `=` likely occured inline; treat it as an invalid operator // this `=` likely occurred inline; treat it as an invalid operator
let fail = EExpr::BadOperator( let fail = EExpr::BadOperator(
arena.alloc([b'=']), arena.alloc([b'=']),
loc_op.region.start_line, loc_op.region.start_line,
@ -1044,7 +1044,7 @@ fn parse_expr_operator<'a>(
(Located::at(expr_region, good), ann_type, state) (Located::at(expr_region, good), ann_type, state)
} }
Err(_) => { Err(_) => {
// this `=` likely occured inline; treat it as an invalid operator // this `=` likely occurred inline; treat it as an invalid operator
let fail = EExpr::BadOperator( let fail = EExpr::BadOperator(
arena.alloc([b'=']), arena.alloc([b'=']),
loc_op.region.start_line, loc_op.region.start_line,
@ -1153,7 +1153,7 @@ fn parse_expr_operator<'a>(
} }
} }
Err(_) => { Err(_) => {
// this `:` likely occured inline; treat it as an invalid operator // this `:` likely occurred inline; treat it as an invalid operator
let fail = EExpr::BadOperator( let fail = EExpr::BadOperator(
arena.alloc([b':']), arena.alloc([b':']),
loc_op.region.start_line, loc_op.region.start_line,

View file

@ -3385,7 +3385,7 @@ mod test_parse {
// Reproducing this bug requires a bizarre set of things to all be true: // Reproducing this bug requires a bizarre set of things to all be true:
// //
// * Must be parsing a *module* def (nested expr defs don't repro this) // * Must be parsing a *module* def (nested expr defs don't repro this)
// * That top-level module def conatins a def inside it // * That top-level module def contains a def inside it
// * That inner def is defining a function // * That inner def is defining a function
// * The name of the inner def begins with a keyword (`if`, `then`, `else`, `when`, `is`) // * The name of the inner def begins with a keyword (`if`, `then`, `else`, `when`, `is`)
// //

View file

@ -1973,7 +1973,7 @@ fn to_type_report<'a>(
let region = Region::from_row_col(*row, *col); let region = Region::from_row_col(*row, *col);
let doc = alloc.stack(vec![ let doc = alloc.stack(vec![
alloc.reflow(r"I just started parsing a function argument type, but I encounterd two commas in a row:"), alloc.reflow(r"I just started parsing a function argument type, but I encountered two commas in a row:"),
alloc.region_with_subregion(surroundings, region), alloc.region_with_subregion(surroundings, region),
alloc.concat(vec![alloc.reflow("Try removing one of them.")]), alloc.concat(vec![alloc.reflow("Try removing one of them.")]),
]); ]);

View file

@ -1046,7 +1046,7 @@ fn to_pattern_report<'b>(
let doc = alloc.stack(vec![ let doc = alloc.stack(vec![
alloc.text("This pattern is being used in an unexpected way:"), alloc.text("This pattern is being used in an unexpected way:"),
alloc.region(expr_region), alloc.region(expr_region),
pattern_type_comparision( pattern_type_comparison(
alloc, alloc,
found, found,
expected_type, expected_type,
@ -1078,7 +1078,7 @@ fn to_pattern_report<'b>(
.append(name.clone()) .append(name.clone())
.append(alloc.text(" is weird:")), .append(alloc.text(" is weird:")),
alloc.region(region), alloc.region(region),
pattern_type_comparision( pattern_type_comparison(
alloc, alloc,
found, found,
expected_type, expected_type,
@ -1112,7 +1112,7 @@ fn to_pattern_report<'b>(
.append(alloc.keyword("when")) .append(alloc.keyword("when"))
.append(alloc.text(" is causing a mismatch:")), .append(alloc.text(" is causing a mismatch:")),
alloc.region(region), alloc.region(region),
pattern_type_comparision( pattern_type_comparison(
alloc, alloc,
found, found,
expected_type, expected_type,
@ -1144,7 +1144,7 @@ fn to_pattern_report<'b>(
.append(alloc.keyword("when")) .append(alloc.keyword("when"))
.append(alloc.text(" does not match the previous ones:")), .append(alloc.text(" does not match the previous ones:")),
alloc.region(region), alloc.region(region),
pattern_type_comparision( pattern_type_comparison(
alloc, alloc,
found, found,
expected_type, expected_type,
@ -1175,7 +1175,7 @@ fn to_pattern_report<'b>(
} }
} }
fn pattern_type_comparision<'b>( fn pattern_type_comparison<'b>(
alloc: &'b RocDocAllocator<'b>, alloc: &'b RocDocAllocator<'b>,
actual: ErrorType, actual: ErrorType,
expected: ErrorType, expected: ErrorType,

View file

@ -450,11 +450,11 @@ impl<'a> RocDocAllocator<'a> {
} else { } else {
ERROR_UNDERLINE.repeat((sub_region2.end_col - sub_region2.start_col) as usize) ERROR_UNDERLINE.repeat((sub_region2.end_col - sub_region2.start_col) as usize)
}; };
let inbetween = " " let in_between = " "
.repeat((sub_region2.start_col.saturating_sub(sub_region1.end_col)) as usize); .repeat((sub_region2.start_col.saturating_sub(sub_region1.end_col)) as usize);
self.text(highlight1) self.text(highlight1)
.append(self.text(inbetween)) .append(self.text(in_between))
.append(self.text(highlight2)) .append(self.text(highlight2))
}; };

View file

@ -4774,7 +4774,7 @@ mod test_reporting {
r#" r#"
DOUBLE COMMA DOUBLE COMMA
I just started parsing a function argument type, but I encounterd two I just started parsing a function argument type, but I encountered two
commas in a row: commas in a row:
1 f : I64,,I64 -> I64 1 f : I64,,I64 -> I64

View file

@ -793,7 +793,7 @@ fn type_to_variable(
Alias(symbol, args, alias_type) => { Alias(symbol, args, alias_type) => {
// TODO cache in uniqueness inference gives problems! all Int's get the same uniqueness var! // TODO cache in uniqueness inference gives problems! all Int's get the same uniqueness var!
// Cache aliases without type arguments. Commonly used aliases like `Int` would otherwise get O(n) // Cache aliases without type arguments. Commonly used aliases like `Int` would otherwise get O(n)
// different variables (once for each occurence). The recursion restriction is required // different variables (once for each occurrence). The recursion restriction is required
// for uniqueness types only: recursive aliases "introduce" an unbound uniqueness // for uniqueness types only: recursive aliases "introduce" an unbound uniqueness
// attribute in the body, when // attribute in the body, when
// //

View file

@ -82,7 +82,7 @@ When calling `List.append list1 list2` on a unique `list1`, first we'll check to
If there is not enough capacity to fit both lists, then we can try to call [`realloc`](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/realloc?view=vs-2019) to hopefully extend the size of our allocated memory. If `realloc` succeeds (meaning there happened to be enough free memory right after our current allocation), then we update `capacity` to reflect the new amount of space, and move on. If there is not enough capacity to fit both lists, then we can try to call [`realloc`](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/realloc?view=vs-2019) to hopefully extend the size of our allocated memory. If `realloc` succeeds (meaning there happened to be enough free memory right after our current allocation), then we update `capacity` to reflect the new amount of space, and move on.
> **Note:** The reason we store capacity right after the last element in the list is becasue of how memory cache lines work. Whenever we need to access `capacity`, it's because we're about to increase the length of the list, which means that we will most certainly be writing to the memory location right after its last element. That in turn means that we'll need to have that memory location in cache, which in turn means that looking up the `capacity` there is guaranteed not to cause a cache miss. (It's possible that writing the new capacity value to a later address could cause a cache miss, but this strategy minimizes the chance of that happening.) An alternate design would be where we store the capacity right before the first element in the list. In that design we wouldn't have to re-write the capacity value at the end of the list every time we grew it, but we'd be much more likely to incur more cache misses that way - because we're working at the end of the list, not at the beginning. Cache misses are many times more expensive than an extra write to a memory address that's in cache already, not to mention the potential extra load instruction to add the length to the memory address of the first element (instead of subtracting 1 from that address), so we optimize for minimizing the highly expensive cache misses by always paying a tiny additional cost when increasing the length of the list, as well as a potential even tinier cost (zero, if the length already happens to be in a register) when looking up its capacity or refcount. > **Note:** The reason we store capacity right after the last element in the list is because of how memory cache lines work. Whenever we need to access `capacity`, it's because we're about to increase the length of the list, which means that we will most certainly be writing to the memory location right after its last element. That in turn means that we'll need to have that memory location in cache, which in turn means that looking up the `capacity` there is guaranteed not to cause a cache miss. (It's possible that writing the new capacity value to a later address could cause a cache miss, but this strategy minimizes the chance of that happening.) An alternate design would be where we store the capacity right before the first element in the list. In that design we wouldn't have to re-write the capacity value at the end of the list every time we grew it, but we'd be much more likely to incur more cache misses that way - because we're working at the end of the list, not at the beginning. Cache misses are many times more expensive than an extra write to a memory address that's in cache already, not to mention the potential extra load instruction to add the length to the memory address of the first element (instead of subtracting 1 from that address), so we optimize for minimizing the highly expensive cache misses by always paying a tiny additional cost when increasing the length of the list, as well as a potential even tinier cost (zero, if the length already happens to be in a register) when looking up its capacity or refcount.
If `realloc` fails, then we have to fall back on the same "allocate new memory and copy everything" strategy that we do with shared lists. If `realloc` fails, then we have to fall back on the same "allocate new memory and copy everything" strategy that we do with shared lists.
@ -107,7 +107,7 @@ Since each bool value is a byte, it's okay for them to be packed side-by-side ev
Note that unlike in the `List Str` example before, there wouldn't be any unused memory between the refcount (or capacity, depending on whether the list was shared or unique) and the first element in the list. That will always be the case when the size of the refcount is no bigger than the alignment of the list's elements. Note that unlike in the `List Str` example before, there wouldn't be any unused memory between the refcount (or capacity, depending on whether the list was shared or unique) and the first element in the list. That will always be the case when the size of the refcount is no bigger than the alignment of the list's elements.
## Distinguishing bewteen refcount and capacity in the host ## Distinguishing between refcount and capacity in the host
If I'm a platform author, and I receive a `List` from the application, it's important that I be able to tell whether I'm dealing with a refcount or a capacity. (The uniqueness type information will have been erased by this time, because some applications will return a Unique list while others return a Shared list, so I need to be able to tell using runtime information only which is which.) This way, I can know to either increment the refcount, or to feel free to mutate it in-place using the capacity value. If I'm a platform author, and I receive a `List` from the application, it's important that I be able to tell whether I'm dealing with a refcount or a capacity. (The uniqueness type information will have been erased by this time, because some applications will return a Unique list while others return a Shared list, so I need to be able to tell using runtime information only which is which.) This way, I can know to either increment the refcount, or to feel free to mutate it in-place using the capacity value.

View file

@ -6,7 +6,7 @@ license = "UPL-1.0"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
roc_gen = { path = "../gen" } roc_gen_llvm = { path = "../gen_llvm" }
roc_collections = { path = "../collections" } roc_collections = { path = "../collections" }
roc_region = { path = "../region" } roc_region = { path = "../region" }
roc_module = { path = "../module" } roc_module = { path = "../module" }

View file

@ -383,7 +383,7 @@ fn list_walk_with_str() {
} }
#[test] #[test]
fn list_walk_substraction() { fn list_walk_subtraction() {
assert_evals_to!(r#"List.walk [ 1, 2 ] Num.sub 1"#, 2, i64); assert_evals_to!(r#"List.walk [ 1, 2 ] Num.sub 1"#, 2, i64);
} }

View file

@ -4,8 +4,9 @@ use roc_build::program::FunctionIterator;
use roc_can::builtins::builtin_defs_map; use roc_can::builtins::builtin_defs_map;
use roc_can::def::Def; use roc_can::def::Def;
use roc_collections::all::{MutMap, MutSet}; use roc_collections::all::{MutMap, MutSet};
use roc_gen::llvm::externs::add_default_roc_externs; use roc_gen_llvm::llvm::externs::add_default_roc_externs;
use roc_module::symbol::Symbol; use roc_module::symbol::Symbol;
use roc_mono::ir::OptLevel;
use roc_types::subs::VarStore; use roc_types::subs::VarStore;
fn promote_expr_to_module(src: &str) -> String { fn promote_expr_to_module(src: &str) -> String {
@ -180,7 +181,7 @@ pub fn helper<'a>(
}; };
let builder = context.create_builder(); let builder = context.create_builder();
let module = roc_gen::llvm::build::module_from_builtins(context, "app"); let module = roc_gen_llvm::llvm::build::module_from_builtins(context, "app");
// Add roc_alloc, roc_realloc, and roc_dealloc, since the repl has no // Add roc_alloc, roc_realloc, and roc_dealloc, since the repl has no
// platform to provide them. // platform to provide them.
@ -190,16 +191,16 @@ pub fn helper<'a>(
module.strip_debug_info(); module.strip_debug_info();
let opt_level = if cfg!(debug_assertions) { let opt_level = if cfg!(debug_assertions) {
roc_gen::llvm::build::OptLevel::Normal OptLevel::Normal
} else { } else {
roc_gen::llvm::build::OptLevel::Optimize OptLevel::Optimize
}; };
let module = arena.alloc(module); let module = arena.alloc(module);
let (module_pass, function_pass) = let (module_pass, function_pass) =
roc_gen::llvm::build::construct_optimization_passes(module, opt_level); roc_gen_llvm::llvm::build::construct_optimization_passes(module, opt_level);
let (dibuilder, compile_unit) = roc_gen::llvm::build::Env::new_debug_info(module); let (dibuilder, compile_unit) = roc_gen_llvm::llvm::build::Env::new_debug_info(module);
// mark our zig-defined builtins as internal // mark our zig-defined builtins as internal
use inkwell::attributes::{Attribute, AttributeLoc}; use inkwell::attributes::{Attribute, AttributeLoc};
@ -221,7 +222,7 @@ pub fn helper<'a>(
} }
// Compile and add all the Procs before adding main // Compile and add all the Procs before adding main
let env = roc_gen::llvm::build::Env { let env = roc_gen_llvm::llvm::build::Env {
arena: &arena, arena: &arena,
builder: &builder, builder: &builder,
dibuilder: &dibuilder, dibuilder: &dibuilder,
@ -235,7 +236,7 @@ pub fn helper<'a>(
exposed_to_host: MutSet::default(), exposed_to_host: MutSet::default(),
}; };
let (main_fn_name, main_fn) = roc_gen::llvm::build::build_procedures_return_main( let (main_fn_name, main_fn) = roc_gen_llvm::llvm::build::build_procedures_return_main(
&env, &env,
opt_level, opt_level,
procedures, procedures,
@ -275,7 +276,7 @@ macro_rules! assert_llvm_evals_to {
($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr, $ignore_problems:expr) => { ($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr, $ignore_problems:expr) => {
use bumpalo::Bump; use bumpalo::Bump;
use inkwell::context::Context; use inkwell::context::Context;
use roc_gen::run_jit_function; use roc_gen_llvm::run_jit_function;
let arena = Bump::new(); let arena = Bump::new();
let context = Context::create(); let context = Context::create();

View file

@ -63,9 +63,10 @@ pub fn generate(filenames: Vec<PathBuf>, std_lib: StdLib, build_dir: &Path) {
// Write each package's module docs html file // Write each package's module docs html file
for (docs_by_id, interns) in package.modules.iter_mut() { for (docs_by_id, interns) in package.modules.iter_mut() {
for module in docs_by_id.values_mut() { for module in docs_by_id.values_mut() {
let mut filename = String::new(); let module_dir = build_dir.join(module.name.replace(".", "/").as_str());
filename.push_str(module.name.as_str());
filename.push_str(".html"); fs::create_dir_all(&module_dir)
.expect("TODO gracefully handle not being able to create the module dir");
let rendered_module = template_html let rendered_module = template_html
.replace( .replace(
@ -78,7 +79,7 @@ pub fn generate(filenames: Vec<PathBuf>, std_lib: StdLib, build_dir: &Path) {
render_main_content(interns, module).as_str(), render_main_content(interns, module).as_str(),
); );
fs::write(build_dir.join(filename), rendered_module) fs::write(module_dir.join("index.html"), rendered_module)
.expect("TODO gracefully handle failing to write html"); .expect("TODO gracefully handle failing to write html");
} }
} }
@ -142,7 +143,7 @@ fn render_main_content(interns: &Interns, module: &mut ModuleDocumentation) -> S
); );
} }
} }
DocEntry::DetatchedDoc(docs) => { DocEntry::DetachedDoc(docs) => {
buf.push_str( buf.push_str(
markdown_to_html(&mut module.scope, interns, docs.to_string()).as_str(), markdown_to_html(&mut module.scope, interns, docs.to_string()).as_str(),
); );
@ -228,7 +229,6 @@ fn render_sidebar<'a, I: Iterator<Item = &'a ModuleDocumentation>>(modules: I) -
let href = { let href = {
let mut href_buf = String::new(); let mut href_buf = String::new();
href_buf.push_str(name); href_buf.push_str(name);
href_buf.push_str(".html");
href_buf href_buf
}; };
@ -305,7 +305,7 @@ pub fn files_to_documentations(
&std_lib, &std_lib,
src_dir.as_path(), src_dir.as_path(),
MutMap::default(), MutMap::default(),
8, // TODO: Is it okay to hardcode ptr_bytes here? I think it should be fine since we'er only type checking (also, 8 => 32bit system) std::mem::size_of::<usize>() as u32, // This is just type-checking for docs, so "target" doesn't matter
builtin_defs_map, builtin_defs_map,
) { ) {
Ok(loaded) => files_docs.push((loaded.documentation, loaded.interns)), Ok(loaded) => files_docs.push((loaded.documentation, loaded.interns)),
@ -567,7 +567,7 @@ fn make_doc_link(scope: &mut Scope, interns: &Interns, doc_item: &str) -> String
let mut link = String::new(); let mut link = String::new();
link.push_str(module_str); link.push_str(module_str);
link.push_str(".html#"); link.push('#');
link.push_str(ident_str); link.push_str(ident_str);
let mut buf = String::new(); let mut buf = String::new();
@ -583,8 +583,8 @@ fn make_doc_link(scope: &mut Scope, interns: &Interns, doc_item: &str) -> String
} }
Err(_) => { Err(_) => {
panic!( panic!(
"Could not find symbol in scope for module link : {}", "Tried to generate an automatic link in docs for symbol `{}`, but that symbol was not in scope in this module. Scope was: {:?}",
doc_item doc_item, scope
) )
} }
} }

View file

@ -6,9 +6,9 @@
<!-- <title>TODO populate this based on the module's name, e.g. "Parser - roc/parser"</title> --> <!-- <title>TODO populate this based on the module's name, e.g. "Parser - roc/parser"</title> -->
<!-- <meta name="description" content="TODO populate this based on the module's description"> --> <!-- <meta name="description" content="TODO populate this based on the module's description"> -->
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
<link rel="icon" href="favicon.svg"> <link rel="icon" href="/favicon.svg">
<script type="text/javascript" src="search.js" defer></script> <script type="text/javascript" src="/search.js" defer></script>
<link rel="stylesheet" href="styles.css"> <link rel="stylesheet" href="/styles.css">
</head> </head>
<body> <body>

View file

@ -39,9 +39,11 @@ Nice collection of research on innovative editors, [link](https://futureofcoding
* [godbolt.org Compiler Explorer](https://godbolt.org/) * [godbolt.org Compiler Explorer](https://godbolt.org/)
* [whitebox debug visualization](https://vimeo.com/483795097) * [whitebox debug visualization](https://vimeo.com/483795097)
* [Hest](https://ivanish.ca/hest-time-travel/) tool for making highly interactive simulations. * [Hest](https://ivanish.ca/hest-time-travel/) tool for making highly interactive simulations.
* [replit](https://replit.com/) collaborative browser based IDE.
* Say you have a failing test that used to work, it would be very valuable to see all code that was changed that was used only by that test. * Say you have a failing test that used to work, it would be very valuable to see all code that was changed that was used only by that test.
e.g. you have a test `calculate_sum_test` that only uses the function `add`, when the test fails you should be able to see a diff showing only what changed for the function `add`. It would also be great to have a diff of [expression values](https://homepages.cwi.nl/~storm/livelit/images/bret.png) Bret Victor style. An ambitious project would be to suggest or automatically try fixes based on these diffs. e.g. you have a test `calculate_sum_test` that only uses the function `add`, when the test fails you should be able to see a diff showing only what changed for the function `add`. It would also be great to have a diff of [expression values](https://homepages.cwi.nl/~storm/livelit/images/bret.png) Bret Victor style. An ambitious project would be to suggest or automatically try fixes based on these diffs.
* I think it could be possible to create a minimal reproduction of a program / block of code / code used by a single test. So for a failing unit test I would expect it to extract imports, the platform, types and functions that are necessary to run only that unit test and put them in a standalone roc project. This would be useful for sharing bugs with library+application authors and colleagues, for profiling or debugging with all "clutter" removed. * I think it could be possible to create a minimal reproduction of a program / block of code / code used by a single test. So for a failing unit test I would expect it to extract imports, the platform, types and functions that are necessary to run only that unit test and put them in a standalone roc project. This would be useful for sharing bugs with library+application authors and colleagues, for profiling or debugging with all "clutter" removed.
* Ability to share program state at a breakpoint with someone else.
### Cool regular editors ### Cool regular editors
@ -102,7 +104,7 @@ e.g. you have a test `calculate_sum_test` that only uses the function `add`, whe
* When refactoring; * When refactoring;
- Cutting and pasting code to a new file should automatically add imports to the new file and delete them from the old file. - Cutting and pasting code to a new file should automatically add imports to the new file and delete them from the old file.
- Ability to link e.g. variable name in comments to actual variable name. Comment is automatically updated when variable name is changed. - Ability to link e.g. variable name in comments to actual variable name. Comment is automatically updated when variable name is changed.
- When updating dependencies with breaking changes; show similar diffs from github projects that have succesfully updated that dependency. - When updating dependencies with breaking changes; show similar diffs from github projects that have successfully updated that dependency.
- AST backed renaming, changing variable/function/type name should change it all over the codebase. - AST backed renaming, changing variable/function/type name should change it all over the codebase.
* Automatically create all "arms" when pattern matching after entering `when var is` based on the type. * Automatically create all "arms" when pattern matching after entering `when var is` based on the type.
- All `when ... is` should be updated if the type is changed, e.g. adding Indigo to the Color type should add an arm everywhere where `when color is` is used. - All `when ... is` should be updated if the type is changed, e.g. adding Indigo to the Color type should add an arm everywhere where `when color is` is used.
@ -133,7 +135,7 @@ e.g. you have a test `calculate_sum_test` that only uses the function `add`, whe
- Webcam based eye tracking for quick selection. - Webcam based eye tracking for quick selection.
- Machine Learning: - Machine Learning:
* GPT-3 can generate correct python functions based on a comment describing the functionality, video [here](https://www.youtube.com/watch?v=utuz7wBGjKM). It's possible that training a model using ast's may lead to better results than text based models. * GPT-3 can generate correct python functions based on a comment describing the functionality, video [here](https://www.youtube.com/watch?v=utuz7wBGjKM). It's possible that training a model using ast's may lead to better results than text based models.
- Current autocomplete lacks flow, moving through suggestions with arrows is slow. Being able to code by weaving together autocomplete suggestions layed out in rows using eye tracking, that could flow. - Current autocomplete lacks flow, moving through suggestions with arrows is slow. Being able to code by weaving together autocomplete suggestions laid out in rows using eye tracking, that could flow.
#### Productivity Inspiration #### Productivity Inspiration
@ -205,9 +207,9 @@ Thoughts and ideas possibly taken from above inspirations or separate.
Or Total blindness where we need to trough sound to communicate to the user Or Total blindness where we need to trough sound to communicate to the user
Screen readers read trees of labeled elements. Each platform has different apis, but I think they are horrible. Just close your eyes and imagine listening to screen reader all day while you are using this majectic machines called computers. Screen readers read trees of labeled elements. Each platform has different apis, but I think they are horrible. Just close your eyes and imagine listening to screen reader all day while you are using this majectic machines called computers.
But blind people walk with a tool and they can react much better to sound/space relations than full on visal majority does. They are acute to sound as a spatial hint. And a hand for most of them is a very sensitive tool that can make sounds in space. But blind people walk with a tool and they can react much better to sound/space relations than full on visal majority does. They are acute to sound as a spatial hint. And a hand for most of them is a very sensitive tool that can make sounds in space.
Imagine if everytime for the user doesnt want to rely on shining rendered pixels on the screen for a feedback from machine, we make a accoustic room simulation, where with moving the "stick", either with mouse or with key arrows, we bump into one of the objects and that produces certain contextually appropriate sound (clean)*ding* Imagine if everytime for the user doesnt want to rely on shining rendered pixels on the screen for a feedback from machine, we make a acoustic room simulation, where with moving the "stick", either with mouse or with key arrows, we bump into one of the objects and that produces certain contextually appropriate sound (clean)*ding*
On the each level of abstraction they can make sounds more deeper, so then when you type letters you feel like you are playing with the sand (soft)*shh*. We would need help from some sound engeneer about it, but imagine moving down, which can be voice trigered command for motion impaired, you hear (soft)*pup* and the name of the module, and then you have options and commands appropriate for the module, they could map to those basic 4 buttons that we trained user on, and he would shortcut all the soft talk with click of a button. Think of the satisfaction when you can skip the dialog of the game and get straight into action. (X) Open functions! each function would make a sound and say its name, unless you press search and start searching for a specific function inside module, if you want one you select or move to next. On the each level of abstraction they can make sounds more deeper, so then when you type letters you feel like you are playing with the sand (soft)*shh*. We would need help from some sound engineer about it, but imagine moving down, which can be voice trigered command for motion impaired, you hear (soft)*pup* and the name of the module, and then you have options and commands appropriate for the module, they could map to those basic 4 buttons that we trained user on, and he would shortcut all the soft talk with click of a button. Think of the satisfaction when you can skip the dialog of the game and get straight into action. (X) Open functions! each function would make a sound and say its name, unless you press search and start searching for a specific function inside module, if you want one you select or move to next.
* Motor impariments * Motor impariments
[rant]BACKS OF CODERS ARE NOT HEALTHY! We need to change that![/neverstop] [rant]BACKS OF CODERS ARE NOT HEALTHY! We need to change that![/neverstop]

View file

@ -105,7 +105,7 @@ impl GridNodeMap {
} }
} }
// get position of first occurence of node_id if get_first_pos, else get the last occurence // get position of first occurrence of node_id if get_first_pos, else get the last occurrence
pub fn get_node_position(&self, node_id: MarkNodeId, get_first_pos: bool) -> EdResult<TextPos> { pub fn get_node_position(&self, node_id: MarkNodeId, get_first_pos: bool) -> EdResult<TextPos> {
let mut last_pos_opt = None; let mut last_pos_opt = None;

View file

@ -81,7 +81,7 @@ pub fn set_clipboard_txt(clipboard_opt: &mut Option<Clipboard>, txt: &str) -> Ed
clipboard.set_content(txt.to_owned())?; clipboard.set_content(txt.to_owned())?;
} else { } else {
return Err(ClipboardWriteFailed { return Err(ClipboardWriteFailed {
err_msg: "Clipboard was never initialized succesfully.".to_owned(), err_msg: "Clipboard was never initialized successfully.".to_owned(),
}); });
} }
@ -93,7 +93,7 @@ pub fn get_clipboard_txt(clipboard_opt: &mut Option<Clipboard>) -> EdResult<Stri
clipboard.get_content() clipboard.get_content()
} else { } else {
Err(ClipboardReadFailed { Err(ClipboardReadFailed {
err_msg: "Clipboard was never initialized succesfully.".to_owned(), err_msg: "Clipboard was never initialized successfully.".to_owned(),
}) })
} }
} }

View file

@ -32,7 +32,7 @@ pub fn index_of<T: ::std::fmt::Debug + std::cmp::Eq>(elt: T, slice: &[T]) -> EdR
Ok(index) Ok(index)
} }
// returns the index of the first occurence of element and index of the last occurence // returns the index of the first occurrence of element and index of the last occurrence
pub fn first_last_index_of<T: ::std::fmt::Debug + std::cmp::Eq>( pub fn first_last_index_of<T: ::std::fmt::Debug + std::cmp::Eq>(
elt: T, elt: T,
slice: &[T], slice: &[T],

View file

@ -5,7 +5,7 @@ use wgpu::{
ShaderStage, ShaderStage,
}; };
// orthographic projection is used to transfrom pixel coords to the coordinate system used by wgpu // orthographic projection is used to transform pixel coords to the coordinate system used by wgpu
#[repr(C)] #[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]

View file

@ -630,7 +630,7 @@ pub fn constrain_expr<'a>(
} }
} }
} }
_ => todo!("implement constaints for {:?}", expr), _ => todo!("implement constraints for {:?}", expr),
} }
} }

View file

@ -555,7 +555,7 @@ pub fn to_expr2<'a>(
// A "when" with no branches is a runtime error, but it will mess things up // A "when" with no branches is a runtime error, but it will mess things up
// if code gen mistakenly thinks this is a tail call just because its condition // if code gen mistakenly thinks this is a tail call just because its condition
// happend to be one. (The condition gave us our initial output value.) // happened to be one. (The condition gave us our initial output value.)
if branches.is_empty() { if branches.is_empty() {
output.tail_call = None; output.tail_call = None;
} }

View file

@ -42,6 +42,23 @@ pub const NODE_BYTES: usize = 32;
// usize pointers, which would be too big for us to have 16B nodes. // usize pointers, which would be too big for us to have 16B nodes.
// On the plus side, we could be okay with higher memory usage early on, // On the plus side, we could be okay with higher memory usage early on,
// and then later use the Mesh strategy to reduce long-running memory usage. // and then later use the Mesh strategy to reduce long-running memory usage.
//
// With this system, we can allocate up to 4B nodes. If we wanted to keep
// a generational index in there, like https://crates.io/crates/sharded-slab
// does, we could use some of the 32 bits for that. For example, if we wanted
// to have a 5-bit generational index (supporting up to 32 generations), then
// we would have 27 bits remaining, meaning we could only support at most
// 134M nodes. Since the editor has a separate Pool for each module, is that
// enough for any single module we'll encounter in practice? Probably, and
// especially if we allocate super large collection literals on the heap instead
// of in the pool.
//
// Another possible design is to try to catch reuse bugs using an "ASan" like
// approach: in development builds, whenever we "free" a particular slot, we
// can add it to a dev-build-only "freed nodes" list and don't hand it back
// out (so, we leak the memory.) Then we can (again, in development builds only)
// check to see if we're about to store something in zeroed-out memory; if so, check
// to see if it was
#[derive(Debug, Eq)] #[derive(Debug, Eq)]
pub struct NodeId<T> { pub struct NodeId<T> {

View file

@ -776,7 +776,7 @@ fn type_to_variable<'a>(
Alias(symbol, args, alias_type_id) => { Alias(symbol, args, alias_type_id) => {
// TODO cache in uniqueness inference gives problems! all Int's get the same uniqueness var! // TODO cache in uniqueness inference gives problems! all Int's get the same uniqueness var!
// Cache aliases without type arguments. Commonly used aliases like `Int` would otherwise get O(n) // Cache aliases without type arguments. Commonly used aliases like `Int` would otherwise get O(n)
// different variables (once for each occurence). The recursion restriction is required // different variables (once for each occurrence). The recursion restriction is required
// for uniqueness types only: recursive aliases "introduce" an unbound uniqueness // for uniqueness types only: recursive aliases "introduce" an unbound uniqueness
// attribute in the body, when // attribute in the body, when
// //

View file

@ -1,4 +1,4 @@
interface Quicksort exposes [ sortBy, show ] imports [] interface Quicksort exposes [ sortBy, sortWith, show ] imports []
show : List I64 -> Str show : List I64 -> Str
show = \list -> show = \list ->

File diff suppressed because one or more lines are too long

View file

@ -26,7 +26,7 @@ toStr = \@Scalar u32
Ok str -> str Ok str -> str
Err _ -> Err _ ->
# This will quickly crash if it ever runs, but we're confident # This will quickly crash if it ever runs, but we're confident
# this Err branch will never run. That's becasue it only runs # this Err branch will never run. That's because it only runs
# if Str.fromScalar receives an invalid scalar value, and we've # if Str.fromScalar receives an invalid scalar value, and we've
# already validated this! # already validated this!
toStr (@Scalar (scalar * 256)) toStr (@Scalar (scalar * 256))

View file

@ -1427,6 +1427,8 @@ Here are various Roc expressions involving operators, and what they desugar to.
| `a ^ b` | `Num.pow a b` | | `a ^ b` | `Num.pow a b` |
| `a % b` | `Num.rem a b` | | `a % b` | `Num.rem a b` |
| `a %% b` | `Num.mod a b` | | `a %% b` | `Num.mod a b` |
| `a >> b` | `Num.shr a b` |
| `a << b` | `Num.shl a b` |
| `-a` | `Num.neg a` | | `-a` | `Num.neg a` |
| `-f x y` | `Num.neg (f x y)` | | `-f x y` | `Num.neg (f x y)` |
| `a == b` | `Bool.isEq a b` | | `a == b` | `Bool.isEq a b` |

View file

@ -23,7 +23,7 @@ edition = "2018"
# commit of TheDan64/inkwell, push a new tag which points to the latest commit, # commit of TheDan64/inkwell, push a new tag which points to the latest commit,
# change the tag value in this Cargo.toml to point to that tag, and `cargo update`. # change the tag value in this Cargo.toml to point to that tag, and `cargo update`.
# This way, GitHub Actions works and nobody's builds get broken. # This way, GitHub Actions works and nobody's builds get broken.
inkwell = { git = "https://github.com/rtfeldman/inkwell", tag = "llvm10-0.release5", features = [ "llvm10-0" ] } inkwell = { git = "https://github.com/rtfeldman/inkwell", tag = "llvm12-0.release2", features = [ "llvm10-0" ] }
[features] [features]
target-arm = [] target-arm = []

View file

@ -185,9 +185,9 @@ macro_rules! impl_doc {
/// Mark this document as a group. /// Mark this document as a group.
/// ///
/// Groups are layed out on a single line if possible. Within a group, all basic documents with /// Groups are laid out on a single line if possible. Within a group, all basic documents with
/// several possible layouts are assigned the same layout, that is, they are all layed out /// several possible layouts are assigned the same layout, that is, they are all laid out
/// horizontally and combined into a one single line, or they are each layed out on their own /// horizontally and combined into a one single line, or they are each laid out on their own
/// line. /// line.
#[inline] #[inline]
pub fn group(self) -> Self { pub fn group(self) -> Self {
@ -714,9 +714,9 @@ where
/// Mark this document as a group. /// Mark this document as a group.
/// ///
/// Groups are layed out on a single line if possible. Within a group, all basic documents with /// Groups are laid out on a single line if possible. Within a group, all basic documents with
/// several possible layouts are assigned the same layout, that is, they are all layed out /// several possible layouts are assigned the same layout, that is, they are all laid out
/// horizontally and combined into a one single line, or they are each layed out on their own /// horizontally and combined into a one single line, or they are each laid out on their own
/// line. /// line.
#[inline] #[inline]
pub fn group(self) -> DocBuilder<'a, D, A> { pub fn group(self) -> DocBuilder<'a, D, A> {

1
www/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
build

25
www/build.sh Executable file
View file

@ -0,0 +1,25 @@
#!/bin/bash
set -euxo pipefail
# cd into the directory where this script lives.
# This allows us to run this script from the root project directory,
# which is what Netlify wants to do.
SCRIPT_RELATIVE_DIR=$(dirname "${BASH_SOURCE[0]}")
cd $SCRIPT_RELATIVE_DIR
rm -rf build/
cp -r public/ build/
pushd ..
echo 'Generating docs...'
# We run the CLI with --no-default-features because that way we don't have the
# "llvm" feature and therefore don't depend on LLVM being installed on the
# system. (Netlify's build servers have Rust installed, but not LLVM.)
#
# We set RUSTFLAGS to -Awarnings to ignore warnings during this build,
# because when building without "the" llvm feature (which is only ever done
# for this exact use case), the result is lots of "unused" warnings!
RUSTFLAGS=-Awarnings cargo run -p roc_cli --no-default-features docs compiler/builtins/docs/Bool.roc
mv generated-docs/ www/build/builtins
popd

30
www/netlify.toml Normal file
View file

@ -0,0 +1,30 @@
# This is the file that generates and deploys https://www.roc-lang.org,
# which is served on Netlify.
#
# Netlify's docs for how this configuration file works:
# https://docs.netlify.com/routing/headers/#syntax-for-the-netlify-configuration-file
[build]
publish = "build/"
command = "bash build.sh"
[[headers]]
for = "/*"
[headers.values]
X-Frame-Options = "DENY"
X-XSS-Protection = "1; mode=block"
Content-Security-Policy = "default-src 'self'; img-src *;"
X-Content-Type-Options = "nosniff"
# Redirect roc-lang.org/authors to the AUTHORS file in this repo
#
# This is referenced in the LICENSE file, which says to see roc-lang.org/authors
# for a list of authors!
[[redirects]]
from = "/authors"
to = "https://github.com/rtfeldman/roc/blob/trunk/AUTHORS"
force = true
status = 302 # TODO once the repo is public, use status = 200 and this URL:
# https://raw.githubusercontent.com/rtfeldman/roc/trunk/AUTHORS
#
# This way, roc-lang.org/authors will show the authors directly,
# proxied from the current AUTHORS file on GitHub, no redirects.

4
www/public/favicon.svg Normal file
View file

@ -0,0 +1,4 @@
<svg viewBox="0 0 52 53" xmlns="http://www.w3.org/2000/svg">
<style>polygon {fill: #5c0bff;}@media (prefers-color-scheme: dark) {polygon {fill: #7733ff;}} </style>
<polygon points="0,0 23.8834,3.21052 37.2438,19.0101 45.9665,16.6324 50.5,22 45,22 44.0315,26.3689 26.4673,39.3424 27.4527,45.2132 17.655,53 23.6751,22.7086"/>
</svg>

After

Width:  |  Height:  |  Size: 335 B

23
www/public/index.html Normal file
View file

@ -0,0 +1,23 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>The Roc Programming Language</title>
<!-- <meta name="description" content="A language for making delightful software."> -->
<meta name="viewport" content="width=device-width">
<!-- <link rel="icon" href="/favicon.svg"> -->
</head>
<body>
<h1>Work in Progress</h1>
<p>Roc's initial release is still under development, and this website is a placeholder until that release is ready.</p>
<p>In the meantime, if you'd like to learn more about Roc, here are some videos:</p>
<ul>
<li><a href="https://youtu.be/FMyyYdFSOHA">Roc on Zig Showtime</a> - April 24, 2021</li>
<li><a href="https://youtu.be/ZnYa99QoznE?t=4790">Roc at the Berlin FP Meetup</a> - September 1, 2020 (this one has details on how to try out Roc or get involved)</li>
</ul>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show more