Merge branch 'main' into main

This commit is contained in:
Olof Blomqvist 2025-04-08 00:33:06 +02:00 committed by GitHub
commit 53e127da27
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
113 changed files with 1977 additions and 849 deletions

View file

@ -7,6 +7,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
# use .tar.gz for quick testing
ARCHIVE_FORMAT: .tar.br
@ -183,7 +186,7 @@ jobs:
- name: build basic-cli docs
env:
ROC_DOCS_URL_ROOT: /packages/basic-cli/${{ env.RELEASE_TAG }}
ROC_DOCS_URL_ROOT: /basic-cli/${{ env.RELEASE_TAG }}
run: |
./roc_nightly/roc docs ./basic-cli/platform/main.roc
tar -czvf docs.tar.gz generated-docs/

View file

@ -7,6 +7,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
test-basic-cli-release-arm64:
runs-on: [self-hosted, Linux, ARM64]

View file

@ -7,6 +7,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
# use .tar.gz for quick testing
ARCHIVE_FORMAT: .tar.br
@ -168,3 +171,17 @@ jobs:
name: basic-webserver-platform
path: |
${{ env.TAR_FILENAME }}
- name: build basic-webserver docs
env:
ROC_DOCS_URL_ROOT: /basic-webserver/${{ env.RELEASE_TAG }}
run: |
./roc_nightly/roc docs ./basic-webserver/platform/main.roc
tar -czvf docs.tar.gz generated-docs/
- name: Upload docs archive
uses: actions/upload-artifact@v4
with:
name: release-assets-docs
path: |
docs.tar.gz

View file

@ -3,6 +3,9 @@ on:
name: Benchmarks
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1
ROC_NUM_WORKERS: 1

View file

@ -4,6 +4,9 @@ on:
- cron: '0 5 * * 1'
name: Garbage collect nix store
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
clean-big-ci:

View file

@ -4,6 +4,9 @@ on:
- cron: '0 5 * * *'
name: Clean up nix on mac mini m1
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
clean-mac-mini-arm64:

View file

@ -1,12 +1,15 @@
name: CI Manager
on:
pull_request:
# cancel current runs when a new commit is pushed
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
pull_request:
# Do not add permissions here! Configure them at the job level!
permissions: {}
name: CI Manager
jobs:
check-changes:
@ -45,8 +48,8 @@ jobs:
# Files that ci manager workflows should not run on.
- '!.gitignore'
- '!.reuse'
- '!AUTHORS'
- '!LEGAL_DETAILS'
- '!authors'
- '!legal_details'
- '!LICENSE'
- '!*.md'
- '!src/**/*.md'

View file

@ -3,6 +3,9 @@ on:
name: Old CI manager
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
check-changes:
runs-on: ubuntu-22.04
@ -16,7 +19,7 @@ jobs:
id: check_ignored_files
run: |
git fetch origin ${{ github.base_ref }}
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^AUTHORS$)'; then
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^authors$)'; then
echo "should_run_tests=y" >> $GITHUB_OUTPUT
else
echo "should_run_tests=n" >> $GITHUB_OUTPUT
@ -36,7 +39,7 @@ jobs:
id: check_rs_comments
run: |
git fetch origin ${{ github.base_ref }}
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^AUTHORS$|\.rs|\.roc)'; then
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^authors$|\.rs|\.roc)'; then
echo "should_run_tests=y" >> $GITHUB_OUTPUT
else
if git diff --unified=0 origin/${{ github.base_ref }} HEAD '*.rs' | grep -E --color=never '^[+-]' | grep -qvE '^(\+\+\+|\-\-\-|[+-]\s*($|\/\/[^\/]|\/\*.*\*\/\s*$))'; then
@ -50,7 +53,7 @@ jobs:
id: check_roc_comments
run: |
git fetch origin ${{ github.base_ref }}
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^AUTHORS$|\.rs|\.roc)'; then
if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -qvE '(\.md$|\.css$|\.html$|^authors$|\.rs|\.roc)'; then
echo "should_run_tests=y" >> $GITHUB_OUTPUT
else
if git diff --unified=0 origin/${{ github.base_ref }} HEAD '*.roc' | grep -E --color=never '^[+-]' | grep -qvE '^(\+\+\+|\-\-\-|[+-]\s*($|#))'; then

View file

@ -3,6 +3,9 @@ on:
name: CI New Compiler
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
check-zig:
runs-on: ubuntu-22.04

View file

@ -3,6 +3,9 @@ on:
name: devtools nix files test - linux
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
devtools-test-linux:
name: devtools-test-linux

View file

@ -3,6 +3,9 @@ on:
name: devtools nix files test - macos
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
devtools-test-macos:
name: devtools-test-mac

View file

@ -4,6 +4,9 @@ on:
name: Docker images tests
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
nightly-ubuntu-latest:
name: nightly-ubuntu-latest

View file

@ -3,6 +3,9 @@ on:
name: CI
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -3,6 +3,9 @@ on:
name: Macos x86-64 rust tests
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -3,6 +3,9 @@ on:
schedule:
- cron: '0 9 * * *' # 9=9am utc+0
# Do not add permissions here! Configure them at the job level!
permissions: {}
name: Check Markdown links
jobs:

View file

@ -6,6 +6,9 @@ on:
name: Nightly Release Linux arm64/aarch64
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
build:
name: build and package nightly release

View file

@ -6,6 +6,9 @@ on:
name: Nightly Release Linux x86_64
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
build:
name: build and package nightly release

View file

@ -6,6 +6,9 @@ on:
name: Nightly Release macOS Apple Silicon
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1
LLVM_SYS_180_PREFIX: /opt/homebrew/opt/llvm@18

View file

@ -6,6 +6,9 @@ on:
name: Nightly Release macOS x86_64
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
LLVM_SYS_180_PREFIX: /usr/local/opt/llvm@18

View file

@ -3,6 +3,9 @@ on:
- cron: '0 9 * * *'
name: Nightly netlify build and deploy
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
build:

View file

@ -6,6 +6,9 @@ name: test cargo build on linux arm64 inside nix
env:
RUST_BACKTRACE: 1
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
nix-linux-arm64-cargo:
name: nix-linux-arm64

View file

@ -6,6 +6,9 @@ name: test default.nix on linux arm64
env:
RUST_BACKTRACE: 1
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
nix-linux-arm64-default:
name: nix-linux-arm64

View file

@ -6,6 +6,9 @@ name: Nix linux x86_64 cargo test
env:
RUST_BACKTRACE: 1
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
nix-linux-x86:
name: nix-linux-x86

View file

@ -3,6 +3,9 @@ on:
name: Nix apple silicon cargo test
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1
@ -22,23 +25,23 @@ jobs:
run: nix develop -c cargo fmt --all -- --check
- name: check code style with clippy
run: nix develop -c cargo clippy --workspace --tests -- --deny warnings
run: nix develop -c cargo clippy -j 2 --workspace --tests -- --deny warnings
- name: check code style with clippy --release
run: nix develop -c cargo clippy --workspace --tests --release -- --deny warnings
run: nix develop -c cargo clippy -j 2 --workspace --tests --release -- --deny warnings
- name: test building default.nix
run: nix-build
# for skipped tests: see issue 6274
- name: execute tests with --release
run: nix develop -c cargo test --locked --release -- --skip cli_tests::inspect_gui --skip cli_tests::hello_gui
run: nix develop -c cargo test -j 1 --locked --release -- --skip cli_tests::inspect_gui --skip cli_tests::hello_gui
- name: roc test all builtins
run: nix develop -c ./ci/roc_test_builtins.sh
- name: test aarch64 dev backend
run: nix develop -c cargo nextest-gen-dev --locked --release --no-fail-fast
run: nix develop -c cargo nextest-gen-dev -j 2 --locked --release --no-fail-fast
# we run the llvm wasm tests only on this machine because it is fast and wasm should be cross-target
- name: execute llvm wasm tests with --release

View file

@ -3,6 +3,9 @@ on:
name: Nix macOS x86_64 cargo test
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -7,6 +7,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -1,8 +1,12 @@
name: 'Close stale PRs'
on:
schedule:
- cron: '30 1 * * *'
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
stale:
runs-on: ubuntu-latest

View file

@ -4,6 +4,9 @@ on:
name: Test latest alpha releases for macOS and Linux
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
test-alpha:
name: test alpha macos 13 (x64), macos 14 (aarch64), ubuntu 22.04-24.04 (x64), ubuntu 22.04-24.04 (aarch64)

View file

@ -4,6 +4,9 @@ on:
name: Test latest nightly releases for macOS and Linux
# Do not add permissions here! Configure them at the job level!
permissions: {}
jobs:
test-nightly:
name: test nightly macos 13 (x64), macos 14 (aarch64), ubuntu 22.04-24.04 (x64), ubuntu 22.04-24.04 (aarch64)

View file

@ -3,6 +3,9 @@ on:
name: CI
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1
@ -14,8 +17,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Check for duplicate AUTHORS
run: diff <(sort AUTHORS) <(sort AUTHORS | uniq) # The < operator treats a string as a file. diff 'succeeds' if no difference.
- name: Check for duplicate authors
run: diff <(sort authors) <(sort authors | uniq) # The < operator treats a string as a file. diff 'succeeds' if no difference.
- name: Update PATH to use zig 13
run: |

View file

@ -3,6 +3,9 @@ on:
name: cargo test debug nix
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -3,6 +3,9 @@ on:
name: windows - release build
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -3,6 +3,9 @@ on:
name: windows - subset of tests
# Do not add permissions here! Configure them at the job level!
permissions: {}
env:
RUST_BACKTRACE: 1

View file

@ -1,5 +1,8 @@
name: deploy www.roc-lang.org
# Do not add permissions here! Configure them at the job level!
permissions: {}
on:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

View file

@ -1,5 +1,8 @@
name: Notify zulip high priority issues
# Do not add permissions here! Configure them at the job level!
permissions: {}
on:
issues:
types: [labeled]

4
.gitignore vendored
View file

@ -8,9 +8,9 @@
# Specifically keep these files with no extension
!Earthfile
!AUTHORS
!authors
!LICENSE*
!LEGAL*
!legal*
!Dockerfile
# .reuse/dep5 see https://reuse.software/
!dep5

View file

@ -2,7 +2,7 @@
## Code of Conduct
We are committed to providing a friendly, safe and welcoming environment for all. See our [Code of Conduct](CODE_OF_CONDUCT.md) for details.
We are committed to providing a friendly, safe and welcoming environment for all. See our [Code of Conduct](code_of_conduct.md) for details.
## How to contribute

162
Glossary.md Normal file
View file

@ -0,0 +1,162 @@
Here you can find definitions for words that are commonly used in the **compiler** along
with links to the codebase. Check https://www.roc-lang.org/tutorial if you want to know
about general Roc terms. Feel free to ask for a term to be added or add one yourself!
Contributor note: definitons should be roughly ordered like in a tutorial, e.g.
Parser should be explained before Canonicalization.
## CLI
Command Line Interface. The entrypoint of the compiler that brings together all
functionality in the Roc toolset and makes it accessible to the user through the
terminal, e.g. `roc build main.roc`.
- new compiler: [src/main.zig](src/main.zig)
- old compiler: [crates/cli/src/main.rs](crates/cli/src/main.rs)
## Module
A .roc file forms one module.
Types of modules:
- app [(example)](https://github.com/roc-lang/examples/blob/main/examples/HelloWorld/main.roc): Applications are combined with a platform and compiled into an executable.
- module [(example)](https://github.com/roc-lang/examples/blob/main/examples/MultipleRocFiles/Hello.roc): Provide types and functions which can be imported into other modules.
- package [(example)](https://github.com/lukewilliamboswell/roc-json/blob/main/package/main.roc): Organises modules to share functionality across applications and platforms.
- platform [(example)](https://github.com/roc-lang/basic-cli/blob/main/platform/main.roc): Provides memory management and effects like writing to files, network communication,... to interface with the outside world. [Detailed explanation](https://www.roc-lang.org/platforms).
- hosted [(example)](https://github.com/roc-lang/basic-cli/blob/main/platform/Host.roc): Lists all Roc types and functions provided by the platform.
Implementation:
- new compiler:
- [processing of modules](src/coordinate.zig)
- [folder with lots of module related things](src/base)
- old compiler:
- [module folder](crates/compiler/module)
## Identifier
## Keyword
## Operator
## Syntax
## Compiler Phase
A compiler phase is a distinct stage in the process the compiler goes through to translate high-level source code into machine code that a computer can execute. Compilers dont just do this in one big step, they break it down into several phases, each handling a specific task. Some examples of phases: [tokenization](#tokenization), [parsing](#parsing), [code generation](#code-gen),... .
## Compiler Pass
## Tokenization
The process of breaking down source code into smaller units called tokens. These tokens are the basic building blocks of a programming language, such as [keywords](#Keyword), [identifiers](#identifier), [operators](#operator), and [symbols](#symbol). The input code is scanned character by character and is grouped into meaningful sequences based on the language's syntax rules.
This step makes [parsing](#parsing) simpler.
Example source code:
```roc
module []
foo : U64
```
Corresponding tokens:
```
KwModule(1:1-1:7),OpenSquare(1:8-1:9),CloseSquare(1:9-1:10),Newline(1:1-1:1),
Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpColon(3:5-3:6),UpperIdent(3:7-3:10),Newline(1:1-1:1)
```
New compiler:
- [tokenize.zig](src/check/parse/tokenize.zig)
Old compiler:
- We did not do a separate tokenization step, everything happened in the [parser](crates/compiler/parse/src/parser.rs).
## AST
(Abstract Syntax Tree)
An AST organizes and represents the source code as a tree-like structure.
So for the code below:
```roc
module []
foo : U64
```
The AST is:
```
(file
(module (1:1-1:10))
(type_anno (3:1-4:4)
"foo"
(tag (3:7-3:10) "U64")))
```
It captures the meaning of the code, while ignoring purely syntactic details like parentheses, commas, semicolons,... .
Compared to raw source code, this structured format is much easier to analyze and manipulate programmatically by the next compiler phase.
The AST is created by the [parser](#parsing).
New compiler:
- See the `Node` struct in [this file](src/check/parse/IR.zig).
- You can see examples of ASTs in the .txt files in [this folder](src/snapshots).
Old compiler:
- See `FullAst` [here](crates/compiler/parse/src/ast.rs)
- [Some tests](crates/compiler/parse/tests/test_parse.rs)
- [Many snapshot tests](crates/compiler/test_syntax/tests/snapshots)
## Parsing
## Symbol
## Closure
## Canonicalization
## Lambda Set
## Type Inference
## Monomorphization
(mono, specialization)
Monomorphization, also known as type specialization, is the process of creating a distinct copy
of each instance of a generic function or value based on all specific usages in a program.
For example; a function with the type `Num a -> Num a` may only be called in the program with a
`U64` and a `I64`. Specialization will then create two functions with the types `U64 -> U64` and
`I64 -> I64`.
This trades off some compile time for a much better runtime performance, since we don't need to
look up which implementation to call at runtime (AKA dynamic dispatch).
Related Files:
- new compiler:
- [specialize_functions.zig](src/build/specialize_functions.zig)
- [specialize_functions folder](src/build/specialize_functions)
- [specialize_types.zig](src/build/specialize_types.zig)
- [specialize types folder](src/build/specialize_types)
- old compiler:
- [mono folder](crates/compiler/mono)
- [mono tests](crates/compiler/test_mono)
- [mono macro tests](crates/compiler/test_mono_macros)
## Type Checking
## Reference Count
## Alias Analysis
## Code Gen
## Host
## Linking
### Surgical Linker
### Legacy Linker
## Glue
## WASM

View file

@ -6,7 +6,7 @@
- [**tutorial**](https://roc-lang.org/tutorial)
- [**docs** for the standard library](https://www.roc-lang.org/builtins)
- [**examples**](https://www.roc-lang.org/examples)
- [**faq**: frequently asked questions](https://github.com/roc-lang/roc/blob/main/www/content/faq.md)
- [**faq**: frequently asked questions](https://www.roc-lang.org/faq)
- [**group chat**](https://roc.zulipchat.com) for help, questions and discussions
If you'd like to contribute, [get started here](CONTRIBUTING.md). Don't hesitate to ask for help on our [group chat](https://roc.zulipchat.com), we're friendly!

View file

View file

@ -43,9 +43,6 @@ pub fn build(b: *std.Build) void {
const tracy_callstack = b.option(bool, "tracy-callstack", "Include callstack information with Tracy data. Does nothing if -Dtracy is not provided") orelse (tracy != null);
const tracy_allocation = b.option(bool, "tracy-allocation", "Include allocation information with Tracy data. Does nothing if -Dtracy is not provided") orelse (tracy != null);
const tracy_callstack_depth: u32 = b.option(u32, "tracy-callstack-depth", "Declare callstack depth for Tracy data. Does nothing if -Dtracy_callstack is not provided") orelse 10;
if (tracy != null and target.result.os.tag == .macos) {
std.log.warn("Tracy has significantly more overhead on MacOS. Be cautious when generating timing and analyzing results.", .{});
}
if (tracy_callstack) {
std.log.warn("Tracy callstack is enable. This can significantly skew timings, but is important for understanding source location. Be cautious when generating timing and analyzing results.", .{});
}
@ -53,7 +50,12 @@ pub fn build(b: *std.Build) void {
// Create compile time build options
const build_options = b.addOptions();
build_options.addOption(bool, "enable_tracy", tracy != null);
build_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
if (target.result.os.tag == .macos and tracy_callstack) {
std.log.warn("Tracy callstack does not work on MacOS, disabling.", .{});
build_options.addOption(bool, "enable_tracy_callstack", false);
} else {
build_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
}
build_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
build_options.addOption(u32, "tracy_callstack_depth", tracy_callstack_depth);
@ -271,6 +273,7 @@ fn add_tracy(
base.root_module.addIncludePath(.{ .cwd_relative = tracy_path });
base.root_module.addCSourceFile(.{ .file = .{ .cwd_relative = client_cpp }, .flags = tracy_c_flags });
base.root_module.addCSourceFile(.{ .file = .{ .cwd_relative = "src/tracy-shutdown.cpp" }, .flags = tracy_c_flags });
if (!links_llvm) {
base.root_module.linkSystemLibrary("c++", .{ .use_pkg_config = .no });
}

View file

@ -55,6 +55,6 @@
"build.zig.zon",
"src",
"LICENSE",
"LEGAL_DETAILS",
"legal_details",
},
}

View file

@ -10,7 +10,7 @@ strip ./target/release-with-lto/roc_language_server
mkdir -p $1 $1/examples
mv target/release-with-lto/{roc,roc_language_server,lib} $1
mv LICENSE LEGAL_DETAILS $1
mv LICENSE legal_details $1
mv crates/cli/tests/platform-switching $1/examples
mv examples/README.md $1/examples

View file

@ -17,7 +17,7 @@ use roc_region::all::{Loc, Region};
// BinOp precedence logic adapted from Gluon by Markus Westerlind
// https://github.com/gluon-lang/gluon - license information can be found in
// the LEGAL_DETAILS file in the root directory of this distribution.
// the legal_details file in the root directory of this distribution.
//
// Thank you, Markus!

View file

@ -15,7 +15,7 @@ pub fn path_to_roc_binary() -> PathBuf {
pub fn path_to_binary(binary_name: &str) -> PathBuf {
// Adapted from https://github.com/volta-cli/volta/blob/cefdf7436a15af3ce3a38b8fe53bb0cfdb37d3dd/tests/acceptance/support/sandbox.rs#L680
// by the Volta Contributors - license information can be found in
// the LEGAL_DETAILS file in the root directory of this distribution.
// the legal_details file in the root directory of this distribution.
//
// Thank you, Volta contributors!
let mut path = env::var_os("CARGO_BIN_PATH")

View file

@ -179,10 +179,6 @@ pub const IncN = fn (?[*]u8, u64) callconv(.C) void;
pub const Dec = fn (?[*]u8) callconv(.C) void;
const REFCOUNT_MAX_ISIZE: isize = 0;
// Only top bit set.
const REFCOUNT_IS_ATOMIC_MASK: isize = std.math.minInt(isize);
// All other bits of the refcount.
const REFCOUNT_VALUE_MASK = ~REFCOUNT_IS_ATOMIC_MASK;
pub const IntWidth = enum(u8) {
U8 = 0,
@ -203,7 +199,7 @@ const Refcount = enum {
atomic,
};
const RC_TYPE: Refcount = .normal;
const RC_TYPE: Refcount = .atomic;
pub fn increfRcPtrC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void {
if (RC_TYPE == .none) return;
@ -229,12 +225,7 @@ pub fn increfRcPtrC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void {
ptr_to_refcount.* = refcount +% amount;
},
.atomic => {
// If the first bit of the refcount is set, this variable is atomic.
if (refcount & REFCOUNT_IS_ATOMIC_MASK != 0) {
_ = @atomicRmw(isize, ptr_to_refcount, .Add, amount, .monotonic);
} else {
ptr_to_refcount.* = refcount +% amount;
}
_ = @atomicRmw(isize, ptr_to_refcount, .Add, amount, .monotonic);
},
.none => unreachable,
}
@ -393,17 +384,9 @@ inline fn decref_ptr_to_refcount(
}
},
.atomic => {
// If the first bit of the refcount is set, this variable is atomic.
if (refcount_ptr[0] & REFCOUNT_IS_ATOMIC_MASK != 0) {
const last = @atomicRmw(isize, &refcount_ptr[0], .Sub, 1, .monotonic);
if (last & REFCOUNT_VALUE_MASK == 1) {
free_ptr_to_refcount(refcount_ptr, alignment, elements_refcounted);
}
} else {
refcount_ptr[0] = refcount -% 1;
if (refcount == 1) {
free_ptr_to_refcount(refcount_ptr, alignment, elements_refcounted);
}
const last = @atomicRmw(isize, &refcount_ptr[0], .Sub, 1, .monotonic);
if (last == 1) {
free_ptr_to_refcount(refcount_ptr, alignment, elements_refcounted);
}
},
.none => unreachable,
@ -437,7 +420,7 @@ pub inline fn rcUnique(refcount: isize) bool {
return refcount == 1;
},
.atomic => {
return refcount & REFCOUNT_VALUE_MASK == 1;
return refcount == 1;
},
.none => {
return false;
@ -451,7 +434,7 @@ pub inline fn rcConstant(refcount: isize) bool {
return refcount == REFCOUNT_MAX_ISIZE;
},
.atomic => {
return refcount & REFCOUNT_VALUE_MASK == REFCOUNT_MAX_ISIZE & REFCOUNT_VALUE_MASK;
return refcount == REFCOUNT_MAX_ISIZE;
},
.none => {
return true;

View file

@ -211,7 +211,7 @@ fn parse_literal_suffix(num_str: &str) -> (Option<ParsedWidth>, &str) {
///
/// The Rust Project is dual-licensed under either Apache 2.0 or MIT,
/// at the user's choice. License information can be found in
/// the LEGAL_DETAILS file in the root directory of this distribution.
/// the legal_details file in the root directory of this distribution.
///
/// Thanks to the Rust project and its contributors!
fn from_str_radix(src: &str, radix: u32) -> Result<ParsedNumResult, IntErrorKind> {

View file

@ -246,38 +246,25 @@ fn render_package_index(docs_by_module: &[(ModuleId, ModuleDocumentation)]) -> S
push_html(&mut module_list_buf, "li", [], link_buf.as_str());
}
let header = {
let mut header_buf = String::new();
push_html(
&mut header_buf,
"h2",
[("class", "module-name")],
"Exposed Modules",
);
push_html(
&mut header_buf,
"a",
[
("class", "llm-prompt-link"),
("title", "Documentation in a LLM-friendly format"),
("href", "llms.txt"),
],
"LLM docs",
);
header_buf
};
// The HTML for the index page
let mut index_buf = String::new();
push_html(
&mut index_buf,
"div",
[("class", "module-header-container")],
&header,
"a",
[
("class", "ai-docs-link"),
("title", "Documentation in an AI-friendly format"),
("href", "llms.txt"),
],
"<span style=\"vertical-align: 2px;\">🤖</span> AI docs",
);
push_html(
&mut index_buf,
"h2",
[("class", "module-name")],
"Exposed Modules",
);
push_html(

View file

@ -206,6 +206,7 @@ main {
scrollbar-color: var(--violet) var(--body-bg-color);
scrollbar-gutter: stable both-edges;
scroll-padding-top: calc(16px + 16px + 1lh + 16px + 16px);
align-content: start;
}
main > * {
@ -714,19 +715,8 @@ pre>samp {
line-height: 1.3em;
}
.module-header-container {
display: flex;
justify-content: space-between;
align-items: flex-end;
margin-bottom: 48px;
}
.llm-prompt-link {
flex-shrink: 0;
}
.module-name {
flex-grow: 1;
.ai-docs-link {
margin-bottom: 10px;
}
@media (prefers-color-scheme: dark) {

View file

@ -1458,7 +1458,7 @@ fn surgery_elf_help(
}
} else {
internal_error!(
"Undefined Symbol in relocation, {:+x?}: {:+x?} try compiling with --linker legacy",
"Undefined Symbol in relocation, {:+x?}: {:+x?}\n\nTIP: try compiling with `--linker legacy`",
rel,
app_obj.symbol_by_index(index)
);

View file

@ -18,7 +18,7 @@ pub struct Out {
fn path_to_roc_binary() -> PathBuf {
// Adapted from https://github.com/volta-cli/volta/blob/cefdf7436a15af3ce3a38b8fe53bb0cfdb37d3dd/tests/acceptance/support/sandbox.rs#L680
// by the Volta Contributors - license information can be found in
// the LEGAL_DETAILS file in the root directory of this distribution.
// the legal_details file in the root directory of this distribution.
//
// Thank you, Volta contributors!
let mut path = env::var_os("CARGO_BIN_PATH")

View file

@ -31,10 +31,11 @@ Note that the addresses shown in objdump may use a different offset compared to
1. [Download here](https://hex-rays.com/ida-free/)
2. Build your roc app with the legacy linker if it does not error only with the surgical linker: `roc build myApp.roc --linker=legacy`
3. Open the produced executable with IDA free, don't change any of the suggested settings.
4. You probably want to go to the function you saw in valgrind like `List_walkTryHelp_...` [here](https://github.com/roc-lang/examples/pull/192#issuecomment-2269571439). You can use Ctrl+F in the Function s window in IDA free.
5. Right click and choose `Add Breakpoint` at the first instruction of the function you clicked on the previous step.
6. Run the debugger by pressing F9
7. Use step into (F7) and step over (F8) to see what's going on. Keep an eye on the `General Registers` and `Stack view` windows while you're stepping.
4. If IDA ever asks for the path for roc_app, just click cancel.
5. You probably want to go to the function you saw in valgrind like `List_walkTryHelp_...` [here](https://github.com/roc-lang/examples/pull/192#issuecomment-2269571439). You can use Ctrl+F in the Function s window in IDA free.
6. Right click and choose `Add Breakpoint` at the first instruction of the function you clicked on the previous step.
7. Run the debugger by pressing F9
8. Use step into (F7) and step over (F8) to see what's going on. Keep an eye on the `General Registers` and `Stack view` windows while you're stepping.
#### gdb

View file

@ -161,62 +161,89 @@
function processTrace(trace, otherTrace, resultId) {
const lines = trace.trim().split('\n');
const otherLines = otherTrace.trim().split('\n');
let contentHtml = '';
let lineNumbersHtml = '';
let indentLevel = 0;
let blockStartLine = -1;
const container = document.createElement('div');
const lineNumbersDiv = document.createElement('div');
const contentAreaDiv = document.createElement('div');
lineNumbersDiv.className = 'line-numbers';
contentAreaDiv.className = 'content-area';
container.appendChild(lineNumbersDiv);
container.appendChild(contentAreaDiv);
// Generate line numbers
for (let i = 1; i <= lines.length; i++) {
lineNumbersHtml += `${i}\n`;
const lineNum = document.createElement('div');
lineNum.textContent = i;
lineNumbersDiv.appendChild(lineNum);
}
let indentLevel = 0;
let currentBlock = contentAreaDiv;
for (let i = 0; i < lines.length; i++) {
const line = lines[i].trim();
const shouldHighlight = !otherLines.some(otherLine => otherLine.trim() === line);
const highlightClass = shouldHighlight ? 'highlight' : '';
const isBlockStart = line.endsWith('{') && i < lines.length - 1;
if (isBlockStart) {
blockStartLine = i;
const functionName = line;
contentHtml += `<div class="function-block" style="margin-left: ${indentLevel * 20}px">
<div class="function-header ${highlightClass}">
<span class="toggle-btn"></span>
<span class="function-name">${functionName}</span>
</div>
<div class="function-content">`;
const functionBlock = document.createElement('div');
const header = document.createElement('div');
const toggleBtn = document.createElement('span');
const functionName = document.createElement('span');
const content = document.createElement('div');
functionBlock.className = 'function-block';
functionBlock.style.marginLeft = `${indentLevel * 20}px`;
header.className = `function-header ${highlightClass}`;
toggleBtn.className = 'toggle-btn';
toggleBtn.textContent = '▼';
functionName.className = 'function-name';
functionName.textContent = line;
content.className = 'function-content';
header.appendChild(toggleBtn);
header.appendChild(functionName);
functionBlock.appendChild(header);
functionBlock.appendChild(content);
currentBlock.appendChild(functionBlock);
indentLevel++;
currentBlock = content;
} else if (line.includes('}')) {
if (indentLevel > 0) {
indentLevel--;
contentHtml += `</div><span class="function-end ${highlightClass}">${line}</span></div>`;
const endSpan = document.createElement('span');
endSpan.className = `function-end ${highlightClass}`;
endSpan.textContent = line;
currentBlock.appendChild(endSpan);
currentBlock = currentBlock.parentElement.parentElement; // Move up to parent block
} else {
contentHtml += `<div class="line ${highlightClass}">${line}</div>`;
const lineDiv = document.createElement('div');
lineDiv.className = `line ${highlightClass}`;
lineDiv.textContent = line;
currentBlock.appendChild(lineDiv);
}
} else {
const isLastLineBlock = line.endsWith('{') && i === lines.length - 1;
if (isLastLineBlock) {
contentHtml += `<div class="line ${highlightClass}">${line}</div>`;
} else {
contentHtml += `<div class="line ${highlightClass}">${line}</div>`;
}
const lineDiv = document.createElement('div');
lineDiv.className = `line ${highlightClass}`;
lineDiv.textContent = line;
currentBlock.appendChild(lineDiv);
}
}
return `<div class="line-numbers">${lineNumbersHtml}</div><div class="content-area">${contentHtml}</div>`;
return container;
}
function initializeCollapsible(containerId) {
const container = document.getElementById(containerId);
const functionBlocks = container.querySelectorAll('.function-block');
functionBlocks.forEach(block => {
const header = block.querySelector('.function-header');
const toggleBtn = block.querySelector('.toggle-btn');
header.addEventListener('click', (e) => {
header.addEventListener('click', () => {
block.classList.toggle('collapsed');
toggleBtn.textContent = block.classList.contains('collapsed') ? '▶' : '▼';
});
@ -227,8 +254,13 @@
const trace1 = document.getElementById('input1').value;
const trace2 = document.getElementById('input2').value;
document.getElementById('result1').innerHTML = processTrace(trace1, trace2, 'result1');
document.getElementById('result2').innerHTML = processTrace(trace2, trace1, 'result2');
const result1 = document.getElementById('result1');
const result2 = document.getElementById('result2');
result1.innerHTML = ''; // Clear previous content
result2.innerHTML = ''; // Clear previous content
result1.appendChild(processTrace(trace1, trace2, 'result1'));
result2.appendChild(processTrace(trace2, trace1, 'result2'));
initializeCollapsible('result1');
initializeCollapsible('result2');

View file

@ -1,3 +1,3 @@
# Examples
Checkout the [roc examples site](https://github.com/roc-lang/examples) to see examples of using roc.
Checkout the [roc examples site](https://www.roc-lang.org/examples) to see examples of using roc.

View file

@ -97,6 +97,7 @@
zls # zig language server
watchexec
simple-http-server # to view the website locally
]);
aliases = ''

View file

@ -1,6 +1,14 @@
{ rustPlatform, fetchFromGitHub }:
{
lib,
stdenv,
rustPlatform,
fetchFromGitHub,
pkg-config,
openssl,
darwin,
}:
rustPlatform.buildRustPackage {
rustPlatform.buildRustPackage rec {
pname = "simple-http-server";
version = "0.1.0"; # adjust version as needed
@ -8,9 +16,18 @@ rustPlatform.buildRustPackage {
owner = "Anton-4";
repo = "simple-http-server";
rev = "f3089e5736a1e8abdb69ba9e7618fe5e518a2df0";
sha256 = "sha256-Vcckv75hmJV7F9mqPtM3piSIZg9MvKI/oU7/tv4viy4=";
};
cargoLock = {
lockFile = "${src}/Cargo.lock";
};
nativeBuildInputs = [ pkg-config ];
buildInputs =
[ openssl ]
++ lib.optionals stdenv.hostPlatform.isDarwin [
darwin.apple_sdk.frameworks.Security
];
}

View file

@ -80,11 +80,9 @@ Often times, it is worth profiling once with `-Dtracy-callstack=false` to have m
Now that I have the compiler built with tracy, I can launch the tracy server on my mac machine to record the result (you can also run it on the same machine if wanted).
Run `tracy-profiler`, input the correct ip address, and press connect.
Then run the instrumented version of zig: `./zig-out/bin/roc format /tmp/new.roc`.
For best results `export TRACY_NO_EXIT=1` before running. This ensures that all tracing data is uploaded before roc exits.
Also, run with the root user to capture more information.
In this case, I ran:
```
export TRACY_NO_EXIT=1
sudo ./zig-out/bin/roc format /tmp/new.roc
```

View file

@ -62,6 +62,15 @@ pub const Store = struct {
attributes: std.ArrayListUnmanaged(Attributes) = .{},
next_unique_name: u32 = 0,
/// Initialize the memory for an `Ident.Store` with a specific capaicty.
pub fn initCapacity(gpa: std.mem.Allocator, capacity: usize) Store {
return .{
.interner = SmallStringInterner.initCapacity(gpa, capacity),
.exposing_modules = std.ArrayListUnmanaged(ModuleImport.Idx).initCapacity(gpa, capacity) catch |err| exitOnOom(err),
.attributes = std.ArrayListUnmanaged(Attributes).initCapacity(gpa, capacity) catch |err| exitOnOom(err),
};
}
/// Deinitialize the memory for an `Ident.Store`.
pub fn deinit(self: *Store, gpa: std.mem.Allocator) void {
self.interner.deinit(gpa);

View file

@ -25,12 +25,13 @@ problems: Problem.List,
/// Initialize the module environment.
pub fn init(gpa: std.mem.Allocator) Self {
// TODO: maybe wire in smarter default based on the initial input text size.
return Self{
.gpa = gpa,
.idents = .{},
.ident_ids_for_slicing = .{},
.strings = .{},
.problems = .{},
.idents = Ident.Store.initCapacity(gpa, 256),
.ident_ids_for_slicing = collections.SafeList(Ident.Idx).initCapacity(gpa, 64),
.strings = StringLiteral.Store.initCapacityBytes(gpa, 256),
.problems = Problem.List.initCapacity(gpa, 16),
};
}

View file

@ -33,6 +33,15 @@ pub const Store = struct {
/// continues to the previous byte
buffer: std.ArrayListUnmanaged(u8) = .{},
/// Intiizalizes a `StringLiteral.Store` with capacity `bytes` of space.
/// Note this specifically is the number of bytes for storing strings.
/// The string `hello, world!` will use 14 bytes including the null terminator.
pub fn initCapacityBytes(gpa: std.mem.Allocator, bytes: usize) Store {
return .{
.buffer = std.ArrayListUnmanaged(u8).initCapacity(gpa, bytes) catch |err| exitOnOom(err),
};
}
/// Deinitialize a `StringLiteral.Store`'s memory.
pub fn deinit(self: *Store, gpa: std.mem.Allocator) void {
self.buffer.deinit(gpa);

View file

@ -1,13 +1,5 @@
//! The intermediate representation (IR) for a Roc module that has been monomorphized.
//!
//! Monomorphization, also known as type specialization, is the process of creating a distinct copy
//! of each instance of a generic function or value based on all specific usages in a program.
//! For example; a function with the type `Num a -> Num a` may only be called in the program with a
//! `U64` and a `I64`. Specialization will then create two functions with the types `U64 -> U64` and
//! `I64 -> I64`.
//! This trades off some compile time for a much better runtime performance, since we don't need to
//! look up which implementation to call at runtime (AKA dynamic dispatch).
//!
//! Doing type specialization as the first build stage helps simplify compilation of lambda sets, or
//! values captured by closures.
//!

View file

@ -31,7 +31,7 @@ tokens: TokenizedBuffer,
store: NodeStore,
errors: []const Diagnostic,
/// Returns true if the given region spans multiple lines.
/// Calculate whether this region is - or will be - multiline
pub fn regionIsMultiline(self: *IR, region: Region) bool {
var i = region.start;
const tags = self.tokens.tokens.items(.tag);
@ -112,6 +112,10 @@ pub const Diagnostic = struct {
pub const Region = struct {
start: TokenIdx,
end: TokenIdx,
pub fn spanAcross(self: Region, other: Region) Region {
return .{ .start = self.start, .end = other.end };
}
};
/// Unstructured information about a Node. These
@ -502,39 +506,26 @@ pub const NodeStore = struct {
pub fn initWithCapacity(gpa: std.mem.Allocator, capacity: usize) NodeStore {
var store: NodeStore = .{
.gpa = gpa,
.nodes = .{},
.extra_data = .{},
.scratch_statements = .{},
.scratch_tokens = .{},
.scratch_exprs = .{},
.scratch_patterns = .{},
.scratch_record_fields = .{},
.scratch_pattern_record_fields = .{},
.scratch_when_branches = .{},
.scratch_type_annos = .{},
.scratch_anno_record_fields = .{},
.scratch_exposed_items = .{},
.nodes = Node.List.initCapacity(gpa, capacity),
.extra_data = std.ArrayListUnmanaged(u32).initCapacity(gpa, capacity / 2) catch |err| exitOnOom(err),
.scratch_statements = std.ArrayListUnmanaged(StatementIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_tokens = std.ArrayListUnmanaged(TokenIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_exprs = std.ArrayListUnmanaged(ExprIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_patterns = std.ArrayListUnmanaged(PatternIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_record_fields = std.ArrayListUnmanaged(RecordFieldIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_pattern_record_fields = std.ArrayListUnmanaged(PatternRecordFieldIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_when_branches = std.ArrayListUnmanaged(WhenBranchIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_type_annos = std.ArrayListUnmanaged(TypeAnnoIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_anno_record_fields = std.ArrayListUnmanaged(AnnoRecordFieldIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
.scratch_exposed_items = std.ArrayListUnmanaged(ExposedItemIdx).initCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err),
};
store.nodes.ensureTotalCapacity(gpa, capacity);
_ = store.nodes.append(gpa, .{
.tag = .root,
.main_token = 0,
.data = .{ .lhs = 0, .rhs = 0 },
.region = .{ .start = 0, .end = 0 },
});
store.extra_data.ensureTotalCapacity(gpa, capacity / 2) catch |err| exitOnOom(err);
store.scratch_statements.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_tokens.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_exprs.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_patterns.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_record_fields.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_pattern_record_fields.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_when_branches.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_type_annos.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_anno_record_fields.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
store.scratch_exposed_items.ensureTotalCapacity(gpa, scratch_90th_percentile_capacity) catch |err| exitOnOom(err);
return store;
}
@ -543,7 +534,7 @@ pub const NodeStore = struct {
// will only have to be resized in >90th percentile case.
// It is not scientific, and should be tuned when we have enough
// Roc code to instrument this and determine a real 90th percentile.
const scratch_90th_percentile_capacity = 10;
const scratch_90th_percentile_capacity = std.math.ceilPowerOfTwoAssert(usize, 10);
/// Deinitializes all data owned by the store.
/// A caller should ensure that they have taken
@ -673,7 +664,7 @@ pub const NodeStore = struct {
node.region = mod.region;
},
.malformed => {
@panic("use addMalformed instead");
@panic("Use addMalformed instead");
},
else => {},
}
@ -804,7 +795,7 @@ pub const NodeStore = struct {
node.data.rhs = a.anno.id;
},
.malformed => {
@panic("use addMalformed instead");
@panic("Use addMalformed instead");
},
}
const nid = store.nodes.append(store.gpa, node);
@ -875,16 +866,14 @@ pub const NodeStore = struct {
node.region = u.region;
},
.alternatives => |a| {
// disabled because it was hit by a fuzz test
// for a repro see src/snapshots/fuzz_crash_012.txt
// std.debug.assert(a.patterns.span.len > 1);
std.debug.assert(a.patterns.span.len > 1);
node.region = a.region;
node.tag = .alternatives_patt;
node.data.lhs = a.patterns.span.start;
node.data.rhs = a.patterns.span.len;
},
.malformed => {
@panic("use addMalformed instead");
@panic("Use addMalformed instead");
},
}
const nid = store.nodes.append(store.gpa, node);
@ -1033,7 +1022,7 @@ pub const NodeStore = struct {
node.region = e.region;
},
.malformed => {
@panic("use addMalformed instead");
@panic("Use addMalformed instead");
},
}
const nid = store.nodes.append(store.gpa, node);
@ -1207,7 +1196,7 @@ pub const NodeStore = struct {
node.data.lhs = p.anno.id;
},
.malformed => {
@panic("use addMalformed instead");
@panic("Use addMalformed instead");
},
}
@ -1397,6 +1386,12 @@ pub const NodeStore = struct {
.anno = .{ .id = node.data.rhs },
} };
},
.malformed => {
return .{ .malformed = .{
.reason = @enumFromInt(node.data.lhs),
.region = node.region,
} };
},
else => {
std.debug.panic("Expected a valid statement tag, got {s}", .{@tagName(node.tag)});
},
@ -1482,6 +1477,12 @@ pub const NodeStore = struct {
.region = node.region,
} };
},
.malformed => {
return .{ .malformed = .{
.reason = @enumFromInt(node.data.lhs),
.region = node.region,
} };
},
else => {
std.debug.panic("Expected a valid pattern tag, got {s}", .{@tagName(node.tag)});
},
@ -1804,22 +1805,20 @@ pub const NodeStore = struct {
region: Region,
pub fn toSExpr(self: @This(), env: *base.ModuleEnv, ir: *IR, line_starts: std.ArrayList(u32)) sexpr.Expr {
var node = sexpr.Expr.init(env.gpa, "file");
node.appendRegionChild(env.gpa, ir.regionInfo(self.region, line_starts));
var file_node = sexpr.Expr.init(env.gpa, "file");
const header = ir.store.getHeader(self.header);
var header_node = header.toSExpr(env, ir, line_starts);
node.appendNodeChild(env.gpa, &header_node);
file_node.appendNodeChild(env.gpa, &header_node);
for (ir.store.statementSlice(self.statements)) |stmt_id| {
const stmt = ir.store.getStatement(stmt_id);
var stmt_node = stmt.toSExpr(env, ir, line_starts);
node.appendNodeChild(env.gpa, &stmt_node);
file_node.appendNodeChild(env.gpa, &stmt_node);
}
return node;
return file_node;
}
};
@ -1830,19 +1829,17 @@ pub const NodeStore = struct {
region: Region,
pub fn toSExpr(self: @This(), env: *base.ModuleEnv, ir: *IR, line_starts: std.ArrayList(u32)) sexpr.Expr {
var node = sexpr.Expr.init(env.gpa, "block");
node.appendRegionChild(env.gpa, ir.regionInfo(self.region, line_starts));
var block_node = sexpr.Expr.init(env.gpa, "block");
for (ir.store.statementSlice(self.statements)) |stmt_idx| {
const stmt = ir.store.getStatement(stmt_idx);
var stmt_node = stmt.toSExpr(env, ir, line_starts);
node.appendNodeChild(env.gpa, &stmt_node);
block_node.appendNodeChild(env.gpa, &stmt_node);
}
return node;
return block_node;
}
};
@ -1942,12 +1939,11 @@ pub const NodeStore = struct {
},
pub fn toSExpr(self: @This(), env: *base.ModuleEnv, ir: *IR, line_starts: std.ArrayList(u32)) sexpr.Expr {
_ = line_starts;
var node = sexpr.Expr.init(env.gpa, "exposed_item");
var inner_node = sexpr.Expr.init(env.gpa, @tagName(self));
switch (self) {
.lower_ident => |i| {
node.appendRegionChild(env.gpa, ir.regionInfo(i.region, line_starts));
const token = ir.tokens.tokens.get(i.ident);
const text = env.idents.getText(token.extra.interned);
inner_node.appendStringChild(env.gpa, text);
@ -1958,7 +1954,6 @@ pub const NodeStore = struct {
}
},
.upper_ident => |i| {
node.appendRegionChild(env.gpa, ir.regionInfo(i.region, line_starts));
const token = ir.tokens.tokens.get(i.ident);
const text = env.idents.getText(token.extra.interned);
inner_node.appendStringChild(env.gpa, text);
@ -1969,7 +1964,6 @@ pub const NodeStore = struct {
}
},
.upper_ident_star => |i| {
node.appendRegionChild(env.gpa, ir.regionInfo(i.region, line_starts));
const token = ir.tokens.tokens.get(i.ident);
const text = env.idents.getText(token.extra.interned);
inner_node.appendStringChild(env.gpa, text);

View file

@ -58,7 +58,7 @@ fn test_parser(source: []const u8, run: fn (parser: Parser) TestError!void) Test
/// helper to advance the parser until a non-newline token is encountered
pub fn advance(self: *Parser) void {
while (true) {
while (true and self.peek() != .EndOfFile) {
self.pos += 1;
if (self.peek() != .Newline) {
break;
@ -101,12 +101,17 @@ pub fn peekLast(self: Parser) ?Token.Tag {
}
/// peek at the next available token
pub fn peekNext(self: Parser) Token.Tag {
const next = self.pos + 1;
var next = self.pos + 1;
const tags = self.tok_buf.tokens.items(.tag);
while (next < self.tok_buf.tokens.len and tags[next] == .Newline) {
next += 1;
}
if (next >= self.tok_buf.tokens.len) {
return .EndOfFile;
}
return self.tok_buf.tokens.items(.tag)[next];
return tags[next];
}
/// add a diagnostic error
pub fn pushDiagnostic(self: *Parser, tag: IR.Diagnostic.Tag, region: IR.Region) void {
self.diagnostics.append(self.gpa, .{
@ -144,6 +149,10 @@ pub fn parseFile(self: *Parser) void {
.region = .{ .start = 0, .end = 0 },
});
while (self.peek() == .Newline) {
self.advanceOne();
}
const header = self.parseHeader();
const scratch_top = self.store.scratchStatementTop();
@ -315,6 +324,8 @@ pub fn parseAppHeader(self: *Parser) IR.NodeStore.HeaderIdx {
self.store.clearScratchRecordFieldsFrom(fields_scratch_top);
return self.pushMalformed(IR.NodeStore.HeaderIdx, .expected_package_platform_close_curly, start);
}
self.advanceOne(); // Advance past CloseCurly
const end = self.pos;
const packages = self.store.recordFieldSpanFrom(fields_scratch_top);
self.advance();
@ -326,7 +337,7 @@ pub fn parseAppHeader(self: *Parser) IR.NodeStore.HeaderIdx {
.platform_name = pn,
.provides = provides,
.packages = packages,
.region = .{ .start = 0, .end = 0 },
.region = .{ .start = start, .end = end },
},
};
const idx = self.store.addHeader(header);
@ -410,13 +421,15 @@ pub fn parseStmt(self: *Parser) ?IR.NodeStore.StatementIdx {
qualifier = self.pos;
self.advance(); // Advance past LowerIdent
}
if (self.peek() == .UpperIdent or (qualifier != null and self.peek() == .NoSpaceDotUpperIdent)) {
if (self.peek() == .UpperIdent or (qualifier != null and (self.peek() == .NoSpaceDotUpperIdent or self.peek() == .DotUpperIdent))) {
var exposes: IR.NodeStore.ExposedItemSpan = .{ .span = .{ .start = 0, .len = 0 } };
const module_name_tok = self.pos;
var end = self.pos;
if (self.peekNext() == .KwAs) {
self.advance(); // Advance past UpperIdent
self.advance(); // Advance past KwAs
alias_tok = self.pos;
end = self.pos;
self.expect(.UpperIdent) catch {
const malformed = self.pushMalformed(IR.NodeStore.StatementIdx, .unexpected_token, start);
self.advance();
@ -429,7 +442,7 @@ pub fn parseStmt(self: *Parser) ?IR.NodeStore.StatementIdx {
return self.pushMalformed(IR.NodeStore.StatementIdx, .import_exposing_no_open, start);
};
const scratch_top = self.store.scratchExposedItemTop();
_ = self.parseCollectionSpan(IR.NodeStore.ExposedItemIdx, .CloseSquare, IR.NodeStore.addScratchExposedItem, Parser.parseExposedItem) catch {
end = self.parseCollectionSpan(IR.NodeStore.ExposedItemIdx, .CloseSquare, IR.NodeStore.addScratchExposedItem, Parser.parseExposedItem) catch {
while (self.peek() != .CloseSquare and self.peek() != .EndOfFile) {
self.advance();
}
@ -446,7 +459,7 @@ pub fn parseStmt(self: *Parser) ?IR.NodeStore.StatementIdx {
.qualifier_tok = qualifier,
.alias_tok = alias_tok,
.exposes = exposes,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
if (self.peek() == .Newline) {
self.advance();
@ -503,20 +516,16 @@ pub fn parseStmt(self: *Parser) ?IR.NodeStore.StatementIdx {
self.advance(); // Advance past LowerIdent
self.advance(); // Advance past OpAssign
const idx = self.parseExpr();
const expr_region = self.store.nodes.items.items(.region)[idx.id];
const patt_idx = self.store.addPattern(.{ .ident = .{
.ident_tok = start,
.region = .{ .start = start, .end = start },
} });
const statement_idx = self.store.addStatement(.{
.decl = .{
.pattern = patt_idx,
.body = idx,
.region = .{
.start = start,
.end = self.pos - 1, // we want the end of the previous token
},
},
});
const statement_idx = self.store.addStatement(.{ .decl = .{
.pattern = patt_idx,
.body = idx,
.region = .{ .start = start, .end = expr_region.end },
} });
if (self.peek() == .Newline) {
self.advance();
}
@ -609,7 +618,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
self.advance(); // Advance past NoSpaceOpenRound
// Parse args
const scratch_top = self.store.scratchPatternTop();
_ = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseRound, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
const args_end = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseRound, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
while (self.peek() != .CloseRound and self.peek() != .EndOfFile) {
self.advance();
}
@ -618,7 +627,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
};
const args = self.store.patternSpanFrom(scratch_top);
pattern = self.store.addPattern(.{ .tag = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = args_end },
.args = args,
.tag_tok = start,
} });
@ -647,7 +656,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
// List
self.advance();
const scratch_top = self.store.scratchPatternTop();
_ = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseSquare, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
const end = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseSquare, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
while (self.peek() != .CloseSquare and self.peek() != .EndOfFile) {
self.advance();
}
@ -657,7 +666,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
const patterns = self.store.patternSpanFrom(scratch_top);
pattern = self.store.addPattern(.{ .list = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.patterns = patterns,
} });
},
@ -675,9 +684,10 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
if (self.peek() != .CloseCurly) {
return self.pushMalformed(IR.NodeStore.PatternIdx, .pattern_unexpected_token, start);
}
const end = self.pos;
self.advance();
pattern = self.store.addPattern(.{ .record = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.fields = fields,
} });
},
@ -708,7 +718,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
.OpenRound, .NoSpaceOpenRound => {
self.advance();
const scratch_top = self.store.scratchPatternTop();
_ = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseRound, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
const end = self.parseCollectionSpan(IR.NodeStore.PatternIdx, .CloseRound, IR.NodeStore.addScratchPattern, parsePatternWithAlts) catch {
while (self.peek() != .CloseRound and self.peek() != .EndOfFile) {
self.advance();
}
@ -719,7 +729,7 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
pattern = self.store.addPattern(.{ .tuple = .{
.patterns = patterns,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
},
else => {
@ -745,9 +755,11 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) IR.NodeStore.Patt
if ((self.store.scratchPatternTop() - patterns_scratch_top) == 0) {
std.debug.panic("Should have gotten a valid pattern, pos={d} peek={s}\n", .{ self.pos, @tagName(self.peek()) });
}
const last_pattern = self.store.scratch_patterns.items[self.store.scratchPatternTop() - 1];
const last_pattern_region = self.store.nodes.items.items(.region)[last_pattern.id];
const patterns = self.store.patternSpanFrom(patterns_scratch_top);
return self.store.addPattern(.{ .alternatives = .{
.region = .{ .start = outer_start, .end = self.pos },
.region = .{ .start = outer_start, .end = last_pattern_region.end },
.patterns = patterns,
} });
}
@ -886,7 +898,7 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
self.advance();
// TODO: Parenthesized expressions
const scratch_top = self.store.scratchExprTop();
_ = self.parseCollectionSpan(IR.NodeStore.ExprIdx, .CloseRound, IR.NodeStore.addScratchExpr, parseExpr) catch {
const end = self.parseCollectionSpan(IR.NodeStore.ExprIdx, .CloseRound, IR.NodeStore.addScratchExpr, parseExpr) catch {
while (self.peek() != .CloseRound and self.peek() != .EndOfFile) {
self.advance();
}
@ -896,7 +908,7 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
const items = self.store.exprSpanFrom(scratch_top);
expr = self.store.addExpr(.{ .tuple = .{
.items = items,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
},
.OpenCurly => {
@ -906,21 +918,23 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
// This is the best guesstimation of this being a Record for now. I believe we have to have a NoSpaceOpColon
// for this to be full-proof without backtracking.
const scratch_top = self.store.scratchRecordFieldTop();
_ = self.parseCollectionSpan(IR.NodeStore.RecordFieldIdx, .CloseCurly, IR.NodeStore.addScratchRecordField, parseRecordField) catch {
const end = self.parseCollectionSpan(IR.NodeStore.RecordFieldIdx, .CloseCurly, IR.NodeStore.addScratchRecordField, parseRecordField) catch {
self.store.clearScratchRecordFieldsFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.ExprIdx, .unexpected_token, start);
};
const fields = self.store.recordFieldSpanFrom(scratch_top);
expr = self.store.addExpr(.{ .record = .{
.fields = fields,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
} else {
const scratch_top = self.store.scratchStatementTop();
var end = self.pos;
while (self.peek() != .EndOfFile) {
const statement = self.parseStmt() orelse break;
self.store.addScratchStatement(statement);
end = self.pos;
if (self.peek() == .CloseCurly) {
self.advance();
break;
@ -931,7 +945,7 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
expr = self.store.addExpr(.{ .block = .{
.statements = statements,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
}
},
@ -945,10 +959,11 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
const args = self.store.patternSpanFrom(scratch_top);
const body = self.parseExpr();
const body_region = self.store.nodes.items.items(.region)[body.id];
expr = self.store.addExpr(.{ .lambda = .{
.body = body,
.args = args,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = body_region.end },
} });
},
.KwIf => {
@ -1012,7 +1027,7 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) IR.NodeStore.ExprIdx {
}
if (expr) |e| {
var expression = self.parseExprSuffix(start, e);
while (self.peek() == .NoSpaceDotInt or self.peek() == .NoSpaceDotLowerIdent) {
while (self.peek() == .NoSpaceDotInt or self.peek() == .NoSpaceDotLowerIdent or self.peek() == .DotLowerIdent) {
const tok = self.peek();
if (tok == .NoSpaceDotInt) {
return self.pushMalformed(IR.NodeStore.ExprIdx, .expr_no_space_dot_int, self.pos);
@ -1062,7 +1077,7 @@ fn parseExprSuffix(self: *Parser, start: u32, e: IR.NodeStore.ExprIdx) IR.NodeSt
if (self.peek() == .NoSpaceOpenRound) {
self.advance();
const scratch_top = self.store.scratchExprTop();
_ = self.parseCollectionSpan(IR.NodeStore.ExprIdx, .CloseRound, IR.NodeStore.addScratchExpr, parseExpr) catch {
const end = self.parseCollectionSpan(IR.NodeStore.ExprIdx, .CloseRound, IR.NodeStore.addScratchExpr, parseExpr) catch {
self.store.clearScratchExprsFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.ExprIdx, .unexpected_token, start);
};
@ -1071,7 +1086,7 @@ fn parseExprSuffix(self: *Parser, start: u32, e: IR.NodeStore.ExprIdx) IR.NodeSt
expression = self.store.addExpr(.{ .apply = .{
.args = args,
.@"fn" = e,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
}
if (self.peek() == .NoSpaceOpQuestion) {
@ -1131,8 +1146,10 @@ pub fn parseStringExpr(self: *Parser) IR.NodeStore.ExprIdx {
self.advanceOne();
const scratch_top = self.store.scratchExprTop();
while (self.peek() != .EndOfFile) {
var string_end = self.pos;
switch (self.peek()) {
.StringEnd => {
string_end = self.pos;
self.advanceOne();
break;
},
@ -1145,11 +1162,14 @@ pub fn parseStringExpr(self: *Parser) IR.NodeStore.ExprIdx {
self.store.addScratchExpr(index);
},
.OpenStringInterpolation => {
self.advanceOne();
self.advance();
const ex = self.parseExpr();
self.store.addScratchExpr(ex);
while (self.peek() == .Newline) {
self.advanceOne();
}
if (self.peek() != .CloseStringInterpolation) {
return self.pushMalformed(IR.NodeStore.ExprIdx, .string_expected_close_interpolation, self.pos);
return self.pushMalformed(IR.NodeStore.ExprIdx, .string_expected_close_interpolation, start);
}
self.advanceOne();
},
@ -1201,15 +1221,17 @@ pub fn parseTypeHeader(self: *Parser) IR.NodeStore.TypeHeaderIdx {
});
}
const scratch_top = self.store.scratchTokenTop();
var end = self.pos;
while (self.peek() == .LowerIdent) {
self.store.addScratchToken(self.pos);
end = self.pos;
self.advance(); // Advance past LowerIdent
}
const args = self.store.tokenSpanFrom(scratch_top);
return self.store.addTypeHeader(.{
.name = start,
.args = args,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
});
}
@ -1228,10 +1250,12 @@ pub fn parseTypeAnno(self: *Parser, looking_for_args: TyFnArgs) IR.NodeStore.Typ
if (self.peekNext() != .NoSpaceOpenRound) {
anno = self.store.addTypeAnno(.{ .tag = .{
.tok = self.pos,
.args = .{ .span = .{
.start = 0,
.len = 0,
} },
.args = .{
.span = .{
.start = 0,
.len = 0,
},
},
.region = .{ .start = start, .end = self.pos },
} });
self.advance(); // Advance past UpperIdent
@ -1239,13 +1263,13 @@ pub fn parseTypeAnno(self: *Parser, looking_for_args: TyFnArgs) IR.NodeStore.Typ
self.advance(); // Advance past UpperIdent
self.advance(); // Advance past NoSpaceOpenRound
const scratch_top = self.store.scratchTypeAnnoTop();
_ = self.parseCollectionSpan(IR.NodeStore.TypeAnnoIdx, .CloseRound, IR.NodeStore.addScratchTypeAnno, parseTypeAnnoInCollection) catch {
const end = self.parseCollectionSpan(IR.NodeStore.TypeAnnoIdx, .CloseRound, IR.NodeStore.addScratchTypeAnno, parseTypeAnnoInCollection) catch {
self.store.clearScratchTypeAnnosFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.TypeAnnoIdx, .unexpected_token, start);
};
const args = self.store.typeAnnoSpanFrom(scratch_top);
anno = self.store.addTypeAnno(.{ .tag = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.tok = start,
.args = args,
} });
@ -1277,6 +1301,7 @@ pub fn parseTypeAnno(self: *Parser, looking_for_args: TyFnArgs) IR.NodeStore.Typ
const args = self.store.typeAnnoSpanFrom(scratch_top);
self.advance();
const ret = self.parseTypeAnno(.not_looking_for_args);
const ret_region = self.store.nodes.items.items(.region)[ret.id];
if (self.peek() != .CloseRound) {
self.store.clearScratchTypeAnnosFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.TypeAnnoIdx, .unexpected_token, start);
@ -1284,48 +1309,50 @@ pub fn parseTypeAnno(self: *Parser, looking_for_args: TyFnArgs) IR.NodeStore.Typ
const function = self.store.addTypeAnno(.{ .@"fn" = .{
.args = args,
.ret = ret,
.region = .{ .start = after_round, .end = self.pos },
.region = .{ .start = after_round, .end = ret_region.end },
} });
const end = self.pos;
self.advance();
return self.store.addTypeAnno(.{ .parens = .{
.anno = function,
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
} });
}
if (self.peek() != .CloseRound) {
self.store.clearScratchTypeAnnosFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.TypeAnnoIdx, .unexpected_token, start);
}
const end = self.pos;
self.advance(); // Advance past CloseRound
const annos = self.store.typeAnnoSpanFrom(scratch_top);
anno = self.store.addTypeAnno(.{ .tuple = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.annos = annos,
} });
},
.OpenCurly => {
self.advance(); // Advance past OpenCurly
const scratch_top = self.store.scratchAnnoRecordFieldTop();
_ = self.parseCollectionSpan(IR.NodeStore.AnnoRecordFieldIdx, .CloseCurly, IR.NodeStore.addScratchAnnoRecordField, parseAnnoRecordField) catch {
const end = self.parseCollectionSpan(IR.NodeStore.AnnoRecordFieldIdx, .CloseCurly, IR.NodeStore.addScratchAnnoRecordField, parseAnnoRecordField) catch {
self.store.clearScratchAnnoRecordFieldsFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.TypeAnnoIdx, .unexpected_token, start);
};
const fields = self.store.annoRecordFieldSpanFrom(scratch_top);
anno = self.store.addTypeAnno(.{ .record = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.fields = fields,
} });
},
.OpenSquare => {
self.advance(); // Advance past OpenSquare
const scratch_top = self.store.scratchTypeAnnoTop();
_ = self.parseCollectionSpan(IR.NodeStore.TypeAnnoIdx, .CloseSquare, IR.NodeStore.addScratchTypeAnno, parseTypeAnnoInCollection) catch {
const end = self.parseCollectionSpan(IR.NodeStore.TypeAnnoIdx, .CloseSquare, IR.NodeStore.addScratchTypeAnno, parseTypeAnnoInCollection) catch {
self.store.clearScratchTypeAnnosFrom(scratch_top);
return self.pushMalformed(IR.NodeStore.TypeAnnoIdx, .unexpected_token, start);
};
const tags = self.store.typeAnnoSpanFrom(scratch_top);
anno = self.store.addTypeAnno(.{ .tag_union = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = end },
.open_anno = null,
.tags = tags,
} });
@ -1356,8 +1383,9 @@ pub fn parseTypeAnno(self: *Parser, looking_for_args: TyFnArgs) IR.NodeStore.Typ
self.advance(); // Advance past arrow
// TODO: Handle thin vs fat arrow
const ret = self.parseTypeAnno(.not_looking_for_args);
const region = self.store.nodes.items.items(.region)[ret.id];
return self.store.addTypeAnno(.{ .@"fn" = .{
.region = .{ .start = start, .end = self.pos },
.region = .{ .start = start, .end = region.end },
.args = args,
.ret = ret,
} });

View file

@ -361,9 +361,11 @@ pub const TokenizedBuffer = struct {
tokens: Token.List,
env: *base.ModuleEnv,
pub fn init(env: *base.ModuleEnv) TokenizedBuffer {
pub fn initCapacity(env: *base.ModuleEnv, capacity: usize) TokenizedBuffer {
var tokens = Token.List{};
tokens.ensureTotalCapacity(env.gpa, capacity) catch |err| exitOnOom(err);
return TokenizedBuffer{
.tokens = Token.List{},
.tokens = tokens,
.env = env,
};
}
@ -404,12 +406,17 @@ pub const TokenizedBuffer = struct {
}) catch |err| exitOnOom(err);
}
pub fn pushNewline(self: *TokenizedBuffer, indent: u32) void {
self.tokens.append(self.env.gpa, .{
pub fn pushNewline(self: *TokenizedBuffer, comment: ?Comment) void {
var token = Token{
.tag = .Newline,
.offset = indent, // store the indent in the offset field
.offset = 0, // store the Comment start - if it is exists here
.extra = .{ .length = 0 },
}) catch |err| exitOnOom(err);
};
if (comment) |c| {
token.offset = c.begin;
token.extra = .{ .length = if (c.end > c.begin) c.end - c.begin else 0 };
}
self.tokens.append(self.env.gpa, token) catch |err| exitOnOom(err);
}
/// Returns the offset of the token at index `idx`.
@ -522,6 +529,7 @@ pub const Cursor = struct {
messages: []Diagnostic,
message_count: u32,
tab_width: u8 = 4, // TODO: make this configurable
comment: ?Comment = null,
/// Initialize a Cursor with the given input buffer and a pre-allocated messages slice.
pub fn init(buf: []const u8, messages: []Diagnostic) Cursor {
@ -582,6 +590,14 @@ pub const Cursor = struct {
}
}
pub fn popComment(self: *Cursor) ?Comment {
if (self.comment) |c| {
self.comment = null;
return c;
}
return null;
}
/// Chomps trivia (whitespace, comments, etc.) and returns an optional indent.
/// If the chomped trivia includes a newline, returns the indent of the next (real) line.
/// Otherwise, returns null.
@ -604,20 +620,24 @@ pub const Cursor = struct {
self.pos += 1;
sawNewline = true;
indent = 0;
return indent;
} else if (b == '\r') {
self.pos += 1;
sawNewline = true;
indent = 0;
if (self.pos < self.buf.len and self.buf[self.pos] == '\n') {
self.pos += 1;
return indent;
} else {
self.pushMessageHere(.MisplacedCarriageReturn);
}
} else if (b == '#') {
self.pos += 1;
const comment_start = self.pos;
while (self.pos < self.buf.len and self.buf[self.pos] != '\n' and self.buf[self.pos] != '\r') {
self.pos += 1;
}
self.comment = Comment{ .begin = comment_start, .end = self.pos };
} else if (b >= 0 and b <= 31) {
self.pushMessageHere(.AsciiControl);
self.pos += 1;
@ -625,9 +645,6 @@ pub const Cursor = struct {
break;
}
}
if (sawNewline) {
return indent;
}
return null;
}
@ -988,7 +1005,9 @@ pub const Tokenizer = struct {
/// Note that the caller must also provide a pre-allocated messages buffer.
pub fn init(env: *base.ModuleEnv, text: []const u8, messages: []Diagnostic) Tokenizer {
const cursor = Cursor.init(text, messages);
const output = TokenizedBuffer.init(env);
// TODO: tune this more. Syntax grab bag is 3:1.
// Generally, roc code will be less dense than that.
const output = TokenizedBuffer.initCapacity(env, @max(text.len / 8, 64));
return Tokenizer{
.cursor = cursor,
.output = output,
@ -1086,8 +1105,8 @@ pub const Tokenizer = struct {
switch (b) {
// Whitespace & control characters
0...32, '#' => {
if (self.cursor.chompTrivia()) |indent| {
self.output.pushNewline(indent);
if (self.cursor.chompTrivia()) |_| {
self.output.pushNewline(self.cursor.popComment());
}
sawWhitespace = true;
},

View file

@ -29,6 +29,21 @@ regions: std.ArrayListUnmanaged(Region) = .{},
/// A unique index for a deduped string in this interner.
pub const Idx = enum(u32) { _ };
/// Intiailize a `SmallStringInterner` with the specified capacity.
pub fn initCapacity(gpa: std.mem.Allocator, capacity: usize) Self {
// TODO: tune this. Rough assumption that average small string is 4 bytes.
const bytes_per_string = 4;
var self = Self{
.bytes = std.ArrayListUnmanaged(u8).initCapacity(gpa, capacity * bytes_per_string) catch |err| exitOnOom(err),
.strings = .{},
.outer_indices = std.ArrayListUnmanaged(StringIdx).initCapacity(gpa, capacity) catch |err| exitOnOom(err),
.regions = std.ArrayListUnmanaged(Region).initCapacity(gpa, capacity) catch |err| exitOnOom(err),
};
self.strings.ensureTotalCapacityContext(gpa, @intCast(capacity), StringIdx.TableContext{ .bytes = &self.bytes }) catch |err| exitOnOom(err);
return self;
}
/// Free all memory consumed by this interner.
/// Will invalidate all slices referencing the interner.
pub fn deinit(self: *Self, gpa: std.mem.Allocator) void {

View file

@ -49,6 +49,13 @@ pub fn SafeList(comptime T: type) type {
}
};
/// Intialize the `SafeList` with the specified capacity.
pub fn initCapacity(gpa: Allocator, capacity: usize) SafeList(T) {
return .{
.items = std.ArrayListUnmanaged(T).initCapacity(gpa, capacity) catch |err| exitOnOom(err),
};
}
/// Deinitialize the memory of this `SafeList`.
pub fn deinit(self: *SafeList(T), gpa: Allocator) void {
self.items.deinit(gpa);
@ -163,6 +170,15 @@ pub fn SafeMultiList(comptime T: type) type {
return self.items.items(field_name)[@as(usize, @intFromEnum(idx))];
}
/// Intialize the `SafeMultiList` with the specified capacity.
pub fn initCapacity(gpa: Allocator, capacity: usize) SafeMultiList(T) {
var items = std.MultiArrayList(T){};
items.ensureTotalCapacity(gpa, capacity) catch |err| exitOnOom(err);
return .{
.items = items,
};
}
/// Deinitialize the memory of a `SafeMultiList`.
pub fn deinit(self: *SafeMultiList(T), gpa: Allocator) void {
self.items.deinit(gpa);

View file

@ -1,4 +1,5 @@
const std = @import("std");
const tracy = @import("../tracy.zig");
/// Exit the current process when we hit an out-of-memory error.
///
@ -21,5 +22,8 @@ pub fn exitOnOom(err: std.mem.Allocator.Error) noreturn {
/// Log a fatal error and exit the process with a non-zero code.
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.io.getStdErr().writer().print(format, args) catch unreachable;
if (tracy.enable) {
tracy.waitForShutdown() catch unreachable;
}
std.process.exit(1);
}

File diff suppressed because it is too large Load diff

View file

@ -39,20 +39,28 @@ const usage =
/// The CLI entrypoint for the Roc compiler.
pub fn main() !void {
const gpa = std.heap.c_allocator;
const args = try std.process.argsAlloc(gpa);
defer gpa.free(args);
var gpa_tracy: tracy.TracyAllocator(null) = undefined;
var gpa = std.heap.c_allocator;
if (tracy.enable_allocation) {
var gpa_tracy = tracy.tracyAllocator(gpa);
return mainArgs(gpa_tracy.allocator(), args);
gpa_tracy = tracy.tracyAllocator(gpa);
gpa = gpa_tracy.allocator();
}
return mainArgs(gpa, args);
var arena_impl = std.heap.ArenaAllocator.init(gpa);
defer arena_impl.deinit();
const arena = arena_impl.allocator();
const args = try std.process.argsAlloc(arena);
const result = mainArgs(gpa, arena, args);
if (tracy.enable) {
try tracy.waitForShutdown();
}
return result;
}
fn mainArgs(gpa: Allocator, args: []const []const u8) !void {
fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const trace = tracy.trace(@src());
defer trace.end();
@ -74,7 +82,7 @@ fn mainArgs(gpa: Allocator, args: []const []const u8) !void {
.roc_build => try rocBuild(gpa, opt, cmd_args),
.roc_test => try rocTest(gpa, opt, cmd_args),
.roc_repl => try rocRepl(gpa, opt, cmd_args),
.roc_format => try rocFormat(gpa, cmd_args),
.roc_format => try rocFormat(gpa, arena, cmd_args),
.roc_version => try rocVersion(gpa, cmd_args),
.roc_check => rocCheck(gpa, opt, cmd_args),
.roc_docs => try rocDocs(gpa, opt, cmd_args),
@ -130,19 +138,24 @@ fn rocRepl(gpa: Allocator, opt: RocOpt, args: []const []const u8) !void {
/// Reads, parses, formats, and overwrites all Roc files at the given paths.
/// Recurses into directories to search for Roc files.
fn rocFormat(gpa: Allocator, args: []const []const u8) !void {
fn rocFormat(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var timer = try std.time.Timer.start();
var count: usize = 0;
var count = fmt.SuccessFailCount{ .success = 0, .failure = 0 };
if (args.len > 0) {
for (args) |arg| {
count += try fmt.formatPath(gpa, std.fs.cwd(), arg);
const inner_count = try fmt.formatPath(gpa, arena, std.fs.cwd(), arg);
count.success += inner_count.success;
count.failure += inner_count.failure;
}
} else {
count = try fmt.formatPath(gpa, std.fs.cwd(), "main.roc");
count = try fmt.formatPath(gpa, arena, std.fs.cwd(), "main.roc");
}
const elapsed = timer.read() / std.time.ns_per_ms;
try std.io.getStdOut().writer().print("Successfully formatted {} files in {} ms.\n", .{ count, elapsed });
try std.io.getStdOut().writer().print("Successfully formatted {} files\n", .{count.success});
if (count.failure > 0) {
try std.io.getStdOut().writer().print("Failed to format {} files.\n", .{count.failure});
}
try std.io.getStdOut().writer().print("Took {} ms.\n", .{elapsed});
}
fn rocVersion(gpa: Allocator, args: []const []const u8) !void {

View file

@ -12,20 +12,27 @@ foo =
~~~PROBLEMS
NIL
~~~TOKENS
KwModule(1:1-1:7),OpenSquare(1:12-1:13),Newline(1:2-1:2),
LowerIdent(3:2-3:5),Comma(3:5-3:6),Newline(1:6-1:6),
KwModule(1:1-1:7),OpenSquare(1:12-1:13),Newline(1:1-1:1),
Newline(2:2-2:24),
LowerIdent(3:2-3:5),Comma(3:5-3:6),Newline(1:1-1:1),
CloseSquare(4:6-4:7),Newline(1:1-1:1),
LowerIdent(6:1-6:4),OpAssign(6:5-6:6),Newline(1:5-1:5),
Newline(1:1-1:1),
LowerIdent(6:1-6:4),OpAssign(6:5-6:6),Newline(1:1-1:1),
Newline(1:1-1:1),
StringStart(8:5-8:6),StringPart(8:6-8:9),StringEnd(8:9-8:10),EndOfFile(8:10-8:10),
~~~PARSE
(file (1:1-8:10)
(module (1:1-4:7)
(exposed_item (3:2-3:5) (lower_ident "foo")))
(file
(module (1:1-4:7) (exposed_item (lower_ident "foo")))
(decl (6:1-8:10)
(ident (6:1-6:4) "foo")
(string (8:5-8:10) (string_part (8:6-8:9) "one"))))
~~~FORMATTED
module [foo]
module [
# some crazy formatting
foo,
]
foo = "one"
foo =
"one"
~~~END

View file

@ -8,11 +8,11 @@ add2 = x + 2
NIL
~~~TOKENS
KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:13),CloseSquare(1:13-1:14),Newline(1:1-1:1),
Newline(1:1-1:1),
LowerIdent(3:1-3:5),OpAssign(3:6-3:7),LowerIdent(3:8-3:9),OpPlus(3:10-3:11),Int(3:17-3:18),EndOfFile(3:18-3:18),
~~~PARSE
(file (1:1-3:18)
(module (1:1-1:14)
(exposed_item (1:9-1:13) (lower_ident "add2")))
(file
(module (1:1-1:14) (exposed_item (lower_ident "add2")))
(decl (3:1-3:18)
(ident (3:1-3:5) "add2")
(binop

View file

@ -8,9 +8,10 @@ foo = if tru then 0
PARSER: no_else
~~~TOKENS
KwModule(1:1-1:7),OpenSquare(1:8-1:9),CloseSquare(1:9-1:10),Newline(1:1-1:1),
Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),KwIf(3:7-3:9),LowerIdent(3:10-3:13),LowerIdent(3:14-3:18),Int(3:19-3:20),EndOfFile(3:20-3:20),
~~~PARSE
(file (1:1-3:20)
(file
(module (1:1-1:10))
(decl (3:1-3:20)
(ident (3:1-3:4) "foo")

View file

@ -8,9 +8,10 @@ foo = asd.0
PARSER: expr_no_space_dot_int
~~~TOKENS
KwModule(1:1-1:7),OpenSquare(1:8-1:9),CloseSquare(1:9-1:10),Newline(1:1-1:1),
Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),LowerIdent(3:7-3:10),NoSpaceDotInt(3:10-3:12),EndOfFile(3:12-3:12),
~~~PARSE
(file (1:1-3:12)
(file
(module (1:1-1:10))
(decl (3:1-3:12)
(ident (3:1-3:4) "foo")

View file

@ -10,7 +10,7 @@ PARSER: unexpected_token
~~~TOKENS
LowerIdent(1:1-1:3),OpBar(1:3-1:4),OpPercent(1:4-1:5),EndOfFile(1:5-1:5),
~~~PARSE
(file (1:1-1:5)
(file
(malformed_header (1:1-1:3) "missing_header")
(malformed_expr (1:3-1:5) "unexpected_token"))
~~~FORMATTED

View file

@ -26,7 +26,7 @@ PARSER: unexpected_token
~~~TOKENS
LowerIdent(1:1-1:5),OpColon(1:5-1:6),MalformedUnknownToken(1:6-1:7),OpColon(1:7-1:8),OpColon(1:8-1:9),OpColon(1:9-1:10),OpColon(1:10-1:11),OpColon(1:11-1:12),OpColon(1:12-1:13),OpColon(1:13-1:14),OpColon(1:14-1:15),OpColon(1:15-1:16),OpColon(1:16-1:17),OpColon(1:17-1:18),OpColon(1:18-1:19),OpColon(1:19-1:20),OpColon(1:20-1:21),LowerIdent(1:21-1:23),OpenSquare(1:23-1:24),OpPercent(1:24-1:25),EndOfFile(1:25-1:25),
~~~PARSE
(file (1:1-1:25)
(file
(malformed_header (1:1-1:5) "missing_header")
(malformed_expr (1:5-1:6) "unexpected_token")
(malformed_expr (1:6-1:7) "unexpected_token")
@ -47,23 +47,5 @@ LowerIdent(1:1-1:5),OpColon(1:5-1:6),MalformedUnknownToken(1:6-1:7),OpColon(1:7-
(ident (1:21-1:23) "" "le")
(malformed_expr (1:23-1:25) "unexpected_token"))
~~~FORMATTED
le
~~~END

View file

@ -11,7 +11,7 @@ PARSER: missing_header
~~~TOKENS
OpAssign(1:2-1:3),StringStart(1:4-1:5),StringPart(1:5-1:7),EndOfFile(1:7-1:7),
~~~PARSE
(file (1:2-1:7)
(file
(malformed_header (1:2-1:3) "missing_header")
(string (1:4-1:7) (string_part (1:5-1:7) "te")))
~~~FORMATTED

View file

@ -8,7 +8,7 @@ PARSER: missing_header
~~~TOKENS
UpperIdent(1:1-1:2),EndOfFile(1:2-1:2),
~~~PARSE
(file (1:1-1:2) (malformed_header (1:1-1:2) "missing_header"))
(file (malformed_header (1:1-1:2) "missing_header"))
~~~FORMATTED
~~~END

View file

@ -8,7 +8,7 @@ PARSER: missing_header
~~~TOKENS
LowerIdent(1:1-1:5),EndOfFile(1:5-1:5),
~~~PARSE
(file (1:1-1:5) (malformed_header (1:1-1:5) "missing_header"))
(file (malformed_header (1:1-1:5) "missing_header"))
~~~FORMATTED
~~~END

View file

@ -10,11 +10,10 @@ PARSER: unexpected_token
~~~TOKENS
LowerIdent(1:1-1:4),NoSpaceDotInt(1:4-1:6),NoSpaceDotLowerIdent(1:6-1:8),EndOfFile(1:8-1:8),
~~~PARSE
(file (1:1-1:8)
(file
(malformed_header (1:1-1:4) "missing_header")
(malformed_expr (1:4-1:6) "unexpected_token")
(malformed_expr (1:6-1:8) "unexpected_token"))
~~~FORMATTED
~~~END

View file

@ -12,7 +12,7 @@ PARSER: unexpected_token
~~~TOKENS
OpBar(1:1-1:2),OpBar(1:3-1:4),Int(1:4-1:5),EndOfFile(1:5-1:5),
~~~PARSE
(file (1:1-1:5)
(file
(malformed_header (1:1-1:2) "missing_header")
(malformed_expr (1:3-1:5) "unexpected_token"))
~~~FORMATTED

View file

@ -17,19 +17,25 @@ TOKENIZE: (6:6-6:12) UnclosedString:
^^^^^^
PARSER: missing_header
~~~TOKENS
LowerIdent(1:2-1:3),OpenCurly(1:3-1:4),LowerIdent(1:4-1:5),Comma(1:5-1:6),Newline(1:6-1:6),
LowerIdent(1:2-1:3),OpenCurly(1:3-1:4),LowerIdent(1:4-1:5),Comma(1:5-1:6),Newline(1:1-1:1),
CloseCurly(2:6-2:7),Newline(1:1-1:1),
LowerIdent(4:1-4:4),OpAssign(4:5-4:6),Newline(1:5-1:5),
Newline(1:1-1:1),
LowerIdent(4:1-4:4),OpAssign(4:5-4:6),Newline(1:1-1:1),
Newline(1:1-1:1),
StringStart(6:5-6:6),StringPart(6:6-6:12),EndOfFile(6:12-6:12),
~~~PARSE
(file (1:2-6:12)
(file
(malformed_header (1:2-1:3) "missing_header")
(record (1:3-4:4) (field "o"))
(record (1:3-2:7) (field "o"))
(decl (4:1-6:12)
(ident (4:1-4:4) "foo")
(string (6:5-6:12) (string_part (6:6-6:12) "onmo %"))))
~~~FORMATTED
{ o }
{
o,
}
foo = "onmo %"
foo =
"onmo %"
~~~END

View file

@ -19,19 +19,23 @@ TOKENIZE: (5:6-5:35) UnclosedString:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PARSER: missing_header
~~~TOKENS
UpperIdent(1:1-1:2),OpenCurly(1:2-1:3),LowerIdent(1:3-1:4),Comma(1:4-1:5),Newline(1:5-1:5),
UpperIdent(1:1-1:2),OpenCurly(1:2-1:3),LowerIdent(1:3-1:4),Comma(1:4-1:5),Newline(1:1-1:1),
CloseCurly(2:6-2:7),Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),Newline(1:5-1:5),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),Newline(1:1-1:1),
Newline(1:1-1:1),
StringStart(5:5-5:6),StringPart(5:6-5:35),EndOfFile(5:35-5:35),
~~~PARSE
(file (1:1-5:35)
(file
(malformed_header (1:1-1:2) "missing_header")
(record (1:2-3:4) (field "o"))
(record (1:2-2:7) (field "o"))
(decl (3:1-5:35)
(ident (3:1-3:4) "foo")
(string (5:5-5:35) (string_part (5:6-5:35) "on (string 'onmo %')))"))))
~~~FORMATTED
{ o }
{
o,
}
foo =
foo = "on (string 'onmo %')))"
"on (string 'onmo %')))"
~~~END

View file

@ -11,7 +11,7 @@ PARSER: header_expected_open_square
~~~TOKENS
KwModule(1:1-1:7),UpperIdent(1:8-1:9),UpperIdent(1:10-1:11),EndOfFile(1:11-1:11),
~~~PARSE
(file (1:1-1:11)
(file
(malformed_header (1:8-1:9) "header_expected_open_square")
(tag (1:10-1:11) "F"))
~~~FORMATTED

View file

@ -11,7 +11,7 @@ PARSER: unexpected_token
~~~TOKENS
OpBar(1:2-1:3),OpBar(1:3-1:4),NoSpaceOpenRound(1:4-1:5),OpBar(1:5-1:6),NoSpaceOpenRound(1:6-1:7),LowerIdent(1:7-1:17),OpBar(1:17-1:18),EndOfFile(1:18-1:18),
~~~PARSE
(file (1:2-1:18)
(file
(malformed_header (1:2-1:3) "missing_header")
(malformed_expr (1:3-1:18) "unexpected_token"))
~~~FORMATTED

View file

@ -7,9 +7,7 @@ PARSER: missing_header
~~~TOKENS
Int(1:1-1:2),OpenCurly(1:2-1:3),EndOfFile(1:3-1:3),
~~~PARSE
(file (1:1-1:3)
(malformed_header (1:1-1:2) "missing_header")
(block (1:2-1:3)))
(file (malformed_header (1:1-1:2) "missing_header") (block))
~~~FORMATTED
{}
~~~END

View file

@ -14,7 +14,7 @@ MalformedNumberNoDigits(1:1-1:3),NoSpaceDotInt(1:3-1:5),Newline(1:1-1:1),
MalformedNumberNoDigits(2:1-2:6),Newline(1:1-1:1),
MalformedNumberBadSuffix(3:1-3:5),EndOfFile(3:5-3:5),
~~~PARSE
(file (1:1-3:5)
(file
(malformed_header (1:1-1:3) "missing_header")
(malformed_expr (1:3-1:5) "unexpected_token")
(malformed_expr (2:1-2:6) "unexpected_token")

View file

@ -18,7 +18,7 @@ Int(2:1-2:4),Newline(1:1-1:1),
Int(3:1-3:4),NoSpaceDotInt(3:4-3:6),Newline(1:1-1:1),
Int(4:1-4:3),EndOfFile(4:3-4:3),
~~~PARSE
(file (1:1-4:3)
(file
(malformed_header (1:1-1:4) "missing_header")
(malformed_expr (1:4-1:6) "unexpected_token")
(int (2:1-2:4) "0_0")
@ -28,6 +28,5 @@ Int(4:1-4:3),EndOfFile(4:3-4:3),
0_0
0_
~~~END

View file

@ -8,7 +8,7 @@ PARSER: unexpected_token
~~~TOKENS
Int(1:1-1:2),OpBar(1:2-1:3),EndOfFile(1:3-1:3),
~~~PARSE
(file (1:1-1:3)
(file
(malformed_header (1:1-1:2) "missing_header")
(malformed_expr (1:2-1:3) "unexpected_token"))
~~~FORMATTED

View file

@ -11,16 +11,14 @@ PARSER: string_expected_close_interpolation
LowerIdent(1:1-1:3),OpAssign(1:4-1:5),StringStart(1:6-1:7),StringPart(1:7-1:10),StringEnd(1:10-1:11),Newline(1:1-1:1),
LowerIdent(2:1-2:4),OpAssign(2:5-2:6),StringStart(2:7-2:8),StringPart(2:8-2:14),OpenStringInterpolation(2:14-2:16),LowerIdent(2:16-2:20),EndOfFile(2:20-2:20),
~~~PARSE
(file (1:1-2:20)
(file
(malformed_header (1:1-1:3) "missing_header")
(malformed_expr (1:4-1:5) "unexpected_token")
(string (1:6-1:11) (string_part (1:7-1:10) "luc"))
(decl (2:1-2:20)
(ident (2:1-2:4) "foo")
(malformed_expr (2:20-2:20) "string_expected_close_interpolation")))
(malformed_expr (2:7-2:20) "string_expected_close_interpolation")))
~~~FORMATTED
"luc"
foo =
~~~END

View file

@ -0,0 +1,17 @@
~~~META
description=fuzz crash
verbose-tokens
~~~SOURCE
0 (
~~~PROBLEMS
PARSER: missing_header
PARSER: unexpected_token
~~~TOKENS
Int(1:1-1:2),OpenRound(1:3-1:4),EndOfFile(1:4-1:4),
~~~PARSE
(file
(malformed_header (1:1-1:2) "missing_header")
(malformed_expr (1:3-1:4) "unexpected_token"))
~~~FORMATTED
~~~END

View file

@ -7,7 +7,7 @@ PARSER: header_expected_open_square
~~~TOKENS
KwModule(1:1-1:7),EndOfFile(1:7-1:7),
~~~PARSE
(file (1:1-1:7) (malformed_header (1:7-1:7) "header_expected_open_square"))
(file (malformed_header (1:7-1:7) "header_expected_open_square"))
~~~FORMATTED
~~~END

View file

@ -10,12 +10,14 @@ main! = |_| Stdout.line!("Hello, world!")
NIL
~~~TOKENS
KwApp(1:1-1:4),OpenSquare(1:5-1:6),LowerIdent(1:6-1:11),CloseSquare(1:11-1:12),OpenCurly(1:13-1:14),LowerIdent(1:15-1:17),OpColon(1:17-1:18),KwPlatform(1:19-1:27),StringStart(1:28-1:29),StringPart(1:29-1:54),StringEnd(1:54-1:55),CloseCurly(1:56-1:57),Newline(1:1-1:1),
Newline(1:1-1:1),
KwImport(3:1-3:7),LowerIdent(3:8-3:10),NoSpaceDotUpperIdent(3:10-3:17),Newline(1:1-1:1),
Newline(1:1-1:1),
LowerIdent(5:1-5:6),OpAssign(5:7-5:8),OpBar(5:9-5:10),Underscore(5:10-5:11),OpBar(5:11-5:12),UpperIdent(5:13-5:19),NoSpaceDotLowerIdent(5:19-5:25),NoSpaceOpenRound(5:25-5:26),StringStart(5:26-5:27),StringPart(5:27-5:40),StringEnd(5:40-5:41),CloseRound(5:41-5:42),EndOfFile(5:42-5:42),
~~~PARSE
(file (1:1-5:42)
(app (1:1-1:4) "TODO implement toSExpr for app module header")
(import (3:1-5:6) ".Stdout" (qualifier "pf"))
(file
(app (1:1-1:1) "TODO implement toSExpr for app module header")
(import (3:1-3:17) ".Stdout" (qualifier "pf"))
(decl (5:1-5:42)
(ident (5:1-5:6) "main!")
(lambda (5:9-5:42)

View file

@ -12,22 +12,27 @@ foo = if true A
NIL
~~~TOKENS
KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:12),CloseSquare(1:12-1:13),Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),KwIf(3:7-3:9),LowerIdent(3:10-3:14),UpperIdent(3:15-3:16),Newline(1:5-1:5),
KwElse(5:5-5:9),OpenCurly(5:10-5:11),Newline(1:5-1:5),
UpperIdent(6:5-6:6),Newline(1:5-1:5),
Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),KwIf(3:7-3:9),LowerIdent(3:10-3:14),UpperIdent(3:15-3:16),Newline(1:1-1:1),
Newline(1:1-1:1),
KwElse(5:5-5:9),OpenCurly(5:10-5:11),Newline(1:1-1:1),
UpperIdent(6:5-6:6),Newline(1:1-1:1),
CloseCurly(7:5-7:6),EndOfFile(7:6-7:6),
~~~PARSE
(file (1:1-7:6)
(module (1:1-1:13)
(exposed_item (1:9-1:12) (lower_ident "foo")))
(file
(module (1:1-1:13) (exposed_item (lower_ident "foo")))
(decl (3:1-7:6)
(ident (3:1-3:4) "foo")
(if_then_else (3:7-7:6)
(ident (3:10-3:14) "" "true")
(tag (3:15-3:16) "A")
(block (5:10-7:6) (tag (6:5-6:6) "B")))))
(block (tag (6:5-6:6) "B")))))
~~~FORMATTED
module [foo]
foo = if true A else B
foo = if true A
else {
B
}
~~~END

View file

@ -10,14 +10,11 @@ NIL
KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:12),CloseSquare(1:12-1:13),Newline(1:1-1:1),
LowerIdent(2:1-2:4),OpAssign(2:5-2:6),Float(2:7-2:12),EndOfFile(2:12-2:12),
~~~PARSE
(file (1:1-2:12)
(module (1:1-1:13)
(exposed_item (1:9-1:12) (lower_ident "foo")))
(file
(module (1:1-1:13) (exposed_item (lower_ident "foo")))
(decl (2:1-2:12)
(ident (2:1-2:4) "foo")
(float (2:7-2:12) "12.34")))
~~~FORMATTED
module [foo]
foo = 12.34
NO CHANGE
~~~END

View file

@ -10,14 +10,11 @@ NIL
KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:12),CloseSquare(1:12-1:13),Newline(1:1-1:1),
LowerIdent(2:1-2:4),OpAssign(2:5-2:6),Int(2:7-2:9),EndOfFile(2:9-2:9),
~~~PARSE
(file (1:1-2:9)
(module (1:1-1:13)
(exposed_item (1:9-1:12) (lower_ident "foo")))
(file
(module (1:1-1:13) (exposed_item (lower_ident "foo")))
(decl (2:1-2:9)
(ident (2:1-2:4) "foo")
(int (2:7-2:9) "42")))
~~~FORMATTED
module [foo]
foo = 42
NO CHANGE
~~~END

View file

@ -12,9 +12,8 @@ KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:12),CloseSquare(1:12-1:13
LowerIdent(2:1-2:5),OpAssign(2:6-2:7),StringStart(2:8-2:9),StringPart(2:9-2:12),StringEnd(2:12-2:13),Newline(1:1-1:1),
LowerIdent(3:1-3:4),OpAssign(3:5-3:6),StringStart(3:7-3:8),StringPart(3:8-3:14),OpenStringInterpolation(3:14-3:16),LowerIdent(3:16-3:20),CloseStringInterpolation(3:20-3:21),StringPart(3:21-3:21),StringEnd(3:21-3:22),EndOfFile(3:22-3:22),
~~~PARSE
(file (1:1-3:22)
(module (1:1-1:13)
(exposed_item (1:9-1:12) (lower_ident "foo")))
(file
(module (1:1-1:13) (exposed_item (lower_ident "foo")))
(decl (2:1-2:13)
(ident (2:1-2:5) "name")
(string (2:8-2:13) (string_part (2:9-2:12) "luc")))
@ -25,9 +24,5 @@ LowerIdent(3:1-3:4),OpAssign(3:5-3:6),StringStart(3:7-3:8),StringPart(3:8-3:14),
(ident (3:16-3:20) "" "name")
(string_part (3:21-3:21) ""))))
~~~FORMATTED
module [foo]
name = "luc"
foo = "hello ${name}"
NO CHANGE
~~~END

View file

@ -10,14 +10,11 @@ NIL
KwModule(1:1-1:7),OpenSquare(1:8-1:9),LowerIdent(1:9-1:12),CloseSquare(1:12-1:13),Newline(1:1-1:1),
LowerIdent(2:1-2:4),OpAssign(2:5-2:6),UpperIdent(2:7-2:15),EndOfFile(2:15-2:15),
~~~PARSE
(file (1:1-2:15)
(module (1:1-1:13)
(exposed_item (1:9-1:12) (lower_ident "foo")))
(file
(module (1:1-1:13) (exposed_item (lower_ident "foo")))
(decl (2:1-2:15)
(ident (2:1-2:4) "foo")
(tag (2:7-2:15) "FortyTwo")))
~~~FORMATTED
module [foo]
foo = FortyTwo
NO CHANGE
~~~END

Some files were not shown because too many files have changed in this diff Show more