Merge branch 'main' into improve-platform-error

This commit is contained in:
Anton-4 2025-12-08 16:02:38 +01:00 committed by GitHub
commit f07016d2bc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 4956 additions and 2065 deletions

View file

@ -85,18 +85,25 @@ jobs:
include:
- os: macos-15-intel
cpu_flag: -Dcpu=x86_64_v3
target_flag: ''
- os: macos-15
cpu_flag: ''
target_flag: ''
- os: ubuntu-22.04
cpu_flag: -Dcpu=x86_64_v3
target_flag: -Dtarget=x86_64-linux-musl
- os: ubuntu-24.04-arm
cpu_flag: ''
target_flag: -Dtarget=aarch64-linux-musl
- os: windows-2022
cpu_flag: -Dcpu=x86_64_v3
target_flag: ''
- os: windows-2025
cpu_flag: -Dcpu=x86_64_v3
target_flag: ''
- os: windows-11-arm
cpu_flag: ''
target_flag: ''
steps:
- name: Checkout
@ -119,7 +126,7 @@ jobs:
- name: build roc + repro executables
uses: ./.github/actions/flaky-retry
with:
command: "zig build -Dfuzz -Dsystem-afl=false -Doptimize=ReleaseFast ${{ matrix.cpu_flag }}"
command: "zig build -Dfuzz -Dsystem-afl=false -Doptimize=ReleaseFast ${{ matrix.cpu_flag }} ${{ matrix.target_flag }}"
error_string_contains: "EndOfStream"
retry_count: 3
@ -136,7 +143,7 @@ jobs:
- name: Run Test Platforms (Unix)
if: runner.os != 'Windows'
run: |
zig build test-cli
zig build test-cli ${{ matrix.target_flag }}
- name: Setup MSVC (Windows)
if: runner.os == 'Windows'
@ -165,13 +172,13 @@ jobs:
zig-out\bin\roc.exe check ./src/PROFILING/bench_repeated_check.roc
- name: zig snapshot tests
run: zig build snapshot -- --debug
run: zig build snapshot ${{ matrix.target_flag }} -- --debug
# 1) in debug mode
- name: build and execute tests, build repro executables
uses: ./.github/actions/flaky-retry
with:
command: "zig build test -Dfuzz -Dsystem-afl=false"
command: "zig build test -Dfuzz -Dsystem-afl=false ${{ matrix.target_flag }}"
error_string_contains: "double roundtrip bundle"
retry_count: 3
@ -179,7 +186,7 @@ jobs:
- name: Build and execute tests, build repro executables. All in release mode.
uses: ./.github/actions/flaky-retry
with:
command: "zig build test -Doptimize=ReleaseFast -Dfuzz -Dsystem-afl=false ${{ matrix.cpu_flag }}"
command: "zig build test -Doptimize=ReleaseFast -Dfuzz -Dsystem-afl=false ${{ matrix.cpu_flag }} ${{ matrix.target_flag }}"
error_string_contains: "double roundtrip bundle"
retry_count: 3
@ -208,6 +215,8 @@ jobs:
# We can re-evaluate as new version of zig/valgrind come out.
if: ${{ matrix.os == 'ubuntu-22.04' }}
run: |
# Install libc6-dbg which is required for Valgrind's function redirections
sudo apt-get update && sudo apt-get install -y libc6-dbg
sudo snap install valgrind --classic
valgrind --version
./ci/custom_valgrind.sh ./zig-out/bin/snapshot --debug --verbose
@ -248,7 +257,7 @@ jobs:
run: |
git clean -fdx
git reset --hard HEAD
nix develop ./src/ -c zig build && zig build snapshot && zig build test
nix develop ./src/ -c zig build ${{ matrix.target_flag }} && zig build snapshot ${{ matrix.target_flag }} && zig build test ${{ matrix.target_flag }}
zig-cross-compile:
needs: check-once

161
build.zig
View file

@ -292,6 +292,156 @@ const CheckTypeCheckerPatternsStep = struct {
}
};
/// Build step that checks for @enumFromInt(0) usage in all .zig files.
///
/// We forbid @enumFromInt(0) because it hides bugs and makes them harder to debug.
/// If we need a placeholder value that we believe will never be read, we should
/// use `undefined` instead - that way our intent is clear, and it can fail in a
/// more obvious way if our assumption is incorrect.
const CheckEnumFromIntZeroStep = struct {
step: Step,
fn create(b: *std.Build) *CheckEnumFromIntZeroStep {
const self = b.allocator.create(CheckEnumFromIntZeroStep) catch @panic("OOM");
self.* = .{
.step = Step.init(.{
.id = Step.Id.custom,
.name = "check-enum-from-int-zero",
.owner = b,
.makeFn = make,
}),
};
return self;
}
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
const allocator = b.allocator;
var violations = std.ArrayList(Violation).empty;
defer violations.deinit(allocator);
// Recursively scan src/ for .zig files
var dir = std.fs.cwd().openDir("src", .{ .iterate = true }) catch |err| {
return step.fail("Failed to open src directory: {}", .{err});
};
defer dir.close();
try scanDirectoryForEnumFromIntZero(allocator, dir, "src", &violations);
if (violations.items.len > 0) {
std.debug.print("\n", .{});
std.debug.print("=" ** 80 ++ "\n", .{});
std.debug.print("FORBIDDEN PATTERN: @enumFromInt(0)\n", .{});
std.debug.print("=" ** 80 ++ "\n\n", .{});
std.debug.print(
\\Using @enumFromInt(0) is forbidden in this codebase.
\\
\\WHY THIS RULE EXISTS:
\\ @enumFromInt(0) hides bugs and makes them harder to debug. It creates
\\ a "valid-looking" value that can silently propagate through the code
\\ when something goes wrong.
\\
\\WHAT TO DO INSTEAD:
\\ If you need a placeholder value that you believe will never be read,
\\ use `undefined` instead. This makes your intent clear, and if your
\\ assumption is wrong and the value IS read, it will fail more obviously.
\\
\\ When using `undefined`, add a comment explaining why it's correct there
\\ (e.g., where it will be overwritten before being read).
\\
\\ Example - WRONG:
\\ .anno = @enumFromInt(0), // placeholder - will be replaced
\\
\\ Example - RIGHT:
\\ .anno = undefined, // overwritten in Phase 1.7 before use
\\
\\VIOLATIONS FOUND:
\\
, .{});
for (violations.items) |violation| {
std.debug.print(" {s}:{d}: {s}\n", .{
violation.file_path,
violation.line_number,
violation.line_content,
});
}
std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{});
return step.fail(
"Found {d} uses of @enumFromInt(0). Using placeholder values like this has consistently led to bugs in this code base. " ++
"Do not use @enumFromInt(0) and also do not uncritically replace it with another placeholder like .first or something like that. " ++
"If you want it to be uninitialized and are very confident it will be overwritten before it is ever read, then use `undefined`. " ++
"Otherwise, take a step back and rethink how this code works; there should be a way to implement this in a way that does not use hardcoded placeholder indices like 0! " ++
"See above for details.",
.{violations.items.len},
);
}
}
const Violation = struct {
file_path: []const u8,
line_number: usize,
line_content: []const u8,
};
fn scanDirectoryForEnumFromIntZero(
allocator: std.mem.Allocator,
dir: std.fs.Dir,
path_prefix: []const u8,
violations: *std.ArrayList(Violation),
) !void {
var walker = try dir.walk(allocator);
defer walker.deinit();
while (try walker.next()) |entry| {
if (entry.kind != .file) continue;
if (!std.mem.endsWith(u8, entry.path, ".zig")) continue;
const full_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ path_prefix, entry.path });
const file = dir.openFile(entry.path, .{}) catch continue;
defer file.close();
const content = file.readToEndAlloc(allocator, 10 * 1024 * 1024) catch continue;
defer allocator.free(content);
var line_number: usize = 1;
var line_start: usize = 0;
for (content, 0..) |char, i| {
if (char == '\n') {
const line = content[line_start..i];
const trimmed = std.mem.trim(u8, line, " \t");
// Skip comments
if (std.mem.startsWith(u8, trimmed, "//")) {
line_number += 1;
line_start = i + 1;
continue;
}
// Check for @enumFromInt(0) usage
if (std.mem.indexOf(u8, line, "@enumFromInt(0)") != null) {
try violations.append(allocator, .{
.file_path = full_path,
.line_number = line_number,
.line_content = try allocator.dupe(u8, trimmed),
});
}
line_number += 1;
line_start = i + 1;
}
}
}
}
};
/// Build step that checks for unused variable suppression patterns.
///
/// In this codebase, we don't use `_ = variable;` to suppress unused variable warnings.
@ -1419,6 +1569,10 @@ pub fn build(b: *std.Build) void {
const check_patterns = CheckTypeCheckerPatternsStep.create(b);
test_step.dependOn(&check_patterns.step);
// Add check for @enumFromInt(0) usage
const check_enum_from_int = CheckEnumFromIntZeroStep.create(b);
test_step.dependOn(&check_enum_from_int.step);
// Add check for unused variable suppression patterns
const check_unused = CheckUnusedSuppressionStep.create(b);
test_step.dependOn(&check_unused.step);
@ -1496,6 +1650,8 @@ pub fn build(b: *std.Build) void {
}
// Ensure host library is copied before running the test
run_fx_platform_test.step.dependOn(&copy_test_fx_host.step);
// Ensure roc binary is built before running the test (tests invoke roc CLI)
run_fx_platform_test.step.dependOn(roc_step);
tests_summary.addRun(&run_fx_platform_test.step);
}
@ -2050,9 +2206,8 @@ fn addStaticLlvmOptionsToModule(mod: *std.Build.Module) !void {
mod.linkSystemLibrary("z", link_static);
if (mod.resolved_target.?.result.os.tag != .windows or mod.resolved_target.?.result.abi != .msvc) {
// TODO: Can this just be `mod.link_libcpp = true`? Does that make a difference?
// This means we rely on clang-or-zig-built LLVM, Clang, LLD libraries.
mod.linkSystemLibrary("c++", .{});
// Use Zig's bundled static libc++ to keep the binary statically linked
mod.link_libcpp = true;
}
if (mod.resolved_target.?.result.os.tag == .windows) {

View file

@ -11,6 +11,7 @@ pub const parallel = @import("parallel.zig");
pub const SmallStringInterner = @import("SmallStringInterner.zig");
pub const safe_memory = @import("safe_memory.zig");
pub const stack_overflow = @import("stack_overflow.zig");
pub const target = @import("target.zig");
pub const DataSpan = @import("DataSpan.zig").DataSpan;
@ -158,6 +159,7 @@ test "base tests" {
std.testing.refAllDecls(@import("Scratch.zig"));
std.testing.refAllDecls(@import("SExprTree.zig"));
std.testing.refAllDecls(@import("SmallStringInterner.zig"));
std.testing.refAllDecls(@import("stack_overflow.zig"));
std.testing.refAllDecls(@import("StringLiteral.zig"));
std.testing.refAllDecls(@import("target.zig"));
}

263
src/base/stack_overflow.zig Normal file
View file

@ -0,0 +1,263 @@
//! Signal handling for the Roc compiler (stack overflow, segfault, division by zero).
//!
//! This module provides a thin wrapper around the generic signal handlers in
//! builtins.handlers, configured with compiler-specific error messages.
//!
//! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate
//! signal stack and install handlers for SIGSEGV, SIGBUS, and SIGFPE.
//!
//! On Windows, we use SetUnhandledExceptionFilter to catch various exceptions.
//!
//! WASI is not currently supported (no signal handling available).
const std = @import("std");
const builtin = @import("builtin");
const handlers = @import("builtins").handlers;
const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined;
/// Error message to display on stack overflow
const STACK_OVERFLOW_MESSAGE = "\nThe Roc compiler overflowed its stack memory and had to exit.\n\n";
/// Callback for stack overflow in the compiler
fn handleStackOverflow() noreturn {
if (comptime builtin.os.tag == .windows) {
// Windows: use WriteFile for signal-safe output
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null);
kernel32.ExitProcess(134);
} else if (comptime builtin.os.tag != .wasi) {
// POSIX: use direct write syscall for signal-safety
_ = posix.write(posix.STDERR_FILENO, STACK_OVERFLOW_MESSAGE) catch {};
posix.exit(134);
} else {
// WASI fallback
std.process.exit(134);
}
}
/// Error message to display on arithmetic error (division by zero, etc.)
const ARITHMETIC_ERROR_MESSAGE = "\nThe Roc compiler divided by zero and had to exit.\n\n";
/// Callback for arithmetic errors (division by zero) in the compiler
fn handleArithmeticError() noreturn {
if (comptime builtin.os.tag == .windows) {
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, ARITHMETIC_ERROR_MESSAGE.ptr, ARITHMETIC_ERROR_MESSAGE.len, &bytes_written, null);
kernel32.ExitProcess(136);
} else if (comptime builtin.os.tag != .wasi) {
_ = posix.write(posix.STDERR_FILENO, ARITHMETIC_ERROR_MESSAGE) catch {};
posix.exit(136); // 128 + 8 (SIGFPE)
} else {
std.process.exit(136);
}
}
/// Callback for access violation in the compiler
fn handleAccessViolation(fault_addr: usize) noreturn {
if (comptime builtin.os.tag == .windows) {
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
var addr_buf: [18]u8 = undefined;
const addr_str = handlers.formatHex(fault_addr, &addr_buf);
const msg1 = "\nAccess violation in the Roc compiler.\nFault address: ";
const msg2 = "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n";
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, msg1.ptr, msg1.len, &bytes_written, null);
_ = kernel32.WriteFile(stderr_handle, addr_str.ptr, @intCast(addr_str.len), &bytes_written, null);
_ = kernel32.WriteFile(stderr_handle, msg2.ptr, msg2.len, &bytes_written, null);
kernel32.ExitProcess(139);
} else {
// POSIX (and WASI fallback): use direct write syscall for signal-safety
const generic_msg = "\nSegmentation fault (SIGSEGV) in the Roc compiler.\nFault address: ";
_ = posix.write(posix.STDERR_FILENO, generic_msg) catch {};
// Write the fault address as hex
var addr_buf: [18]u8 = undefined;
const addr_str = handlers.formatHex(fault_addr, &addr_buf);
_ = posix.write(posix.STDERR_FILENO, addr_str) catch {};
_ = posix.write(posix.STDERR_FILENO, "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n") catch {};
posix.exit(139);
}
}
/// Install signal handlers for stack overflow, segfault, and division by zero.
/// This should be called early in main() before any significant work is done.
/// Returns true if the handlers were installed successfully, false otherwise.
pub fn install() bool {
return handlers.install(handleStackOverflow, handleAccessViolation, handleArithmeticError);
}
/// Test function that intentionally causes a stack overflow.
/// This is used to verify the handler works correctly.
pub fn triggerStackOverflowForTest() noreturn {
// Use a recursive function that can't be tail-call optimized
const S = struct {
fn recurse(n: usize) usize {
// Prevent tail-call optimization by doing work after the recursive call
var buf: [1024]u8 = undefined;
buf[0] = @truncate(n);
const result = if (n == 0) 0 else recurse(n + 1);
// Use the buffer to prevent it from being optimized away
return result + buf[0];
}
};
// This will recurse until stack overflow
const result = S.recurse(1);
// This should never be reached
std.debug.print("Unexpected result: {}\n", .{result});
std.process.exit(1);
}
test "formatHex" {
var buf: [18]u8 = undefined;
const zero = handlers.formatHex(0, &buf);
try std.testing.expectEqualStrings("0x0", zero);
const small = handlers.formatHex(0xff, &buf);
try std.testing.expectEqualStrings("0xff", small);
const medium = handlers.formatHex(0xdeadbeef, &buf);
try std.testing.expectEqualStrings("0xdeadbeef", medium);
}
/// Check if we're being run as a subprocess to trigger stack overflow.
/// This is called by tests to create a child process that will crash.
/// Returns true if we should trigger the overflow (and not return).
pub fn checkAndTriggerIfSubprocess() bool {
// Check for the special environment variable that signals we should crash
const env_val = std.process.getEnvVarOwned(std.heap.page_allocator, "ROC_TEST_TRIGGER_STACK_OVERFLOW") catch return false;
defer std.heap.page_allocator.free(env_val);
if (std.mem.eql(u8, env_val, "1")) {
// Install handler and trigger overflow
_ = install();
triggerStackOverflowForTest();
// Never returns
}
return false;
}
test "stack overflow handler produces helpful error message" {
// Skip on WASI - no process spawning or signal handling
if (comptime builtin.os.tag == .wasi) {
return error.SkipZigTest;
}
if (comptime builtin.os.tag == .windows) {
// Windows test would need subprocess spawning which is more complex
// The handler is installed and works, but testing it is harder
// For now, just verify the handler installs successfully
if (install()) {
return; // Success - handler installed
}
return error.SkipZigTest;
}
try testStackOverflowPosix();
}
fn testStackOverflowPosix() !void {
// Create a pipe to capture stderr from the child
const pipe_fds = try posix.pipe();
const pipe_read = pipe_fds[0];
const pipe_write = pipe_fds[1];
const fork_result = posix.fork() catch {
posix.close(pipe_read);
posix.close(pipe_write);
return error.ForkFailed;
};
if (fork_result == 0) {
// Child process
posix.close(pipe_read);
// Redirect stderr to the pipe
posix.dup2(pipe_write, posix.STDERR_FILENO) catch posix.exit(99);
posix.close(pipe_write);
// Install the handler and trigger stack overflow
_ = install();
triggerStackOverflowForTest();
// Should never reach here
unreachable;
} else {
// Parent process
posix.close(pipe_write);
// Wait for child to exit
const wait_result = posix.waitpid(fork_result, 0);
const status = wait_result.status;
// Parse the wait status (Unix encoding)
const exited_normally = (status & 0x7f) == 0;
const exit_code: u8 = @truncate((status >> 8) & 0xff);
const termination_signal: u8 = @truncate(status & 0x7f);
// Read stderr output from child
var stderr_buf: [4096]u8 = undefined;
const bytes_read = posix.read(pipe_read, &stderr_buf) catch 0;
posix.close(pipe_read);
const stderr_output = stderr_buf[0..bytes_read];
try verifyHandlerOutput(exited_normally, exit_code, termination_signal, stderr_output);
}
}
fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: u8, stderr_output: []const u8) !void {
// Exit code 134 = stack overflow detected
// Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow)
if (exited_normally and (exit_code == 134 or exit_code == 139)) {
// Check that our handler message was printed
const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "overflowed its stack memory") != null;
const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null;
// Handler should have printed EITHER stack overflow message OR segfault message
try std.testing.expect(has_stack_overflow_msg or has_segfault_msg);
} else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) {
// The handler might not have caught it - this can happen on some systems
// where the signal delivery is different. Just warn and skip.
std.debug.print("Warning: Stack overflow was not caught by handler (signal {})\n", .{termination_signal});
return error.SkipZigTest;
} else {
std.debug.print("Unexpected exit status: exited={}, code={}, signal={}\n", .{ exited_normally, exit_code, termination_signal });
std.debug.print("Stderr: {s}\n", .{stderr_output});
return error.TestUnexpectedResult;
}
}

View file

@ -307,7 +307,7 @@ pub const ModuleType = enum {
.fs => &.{},
.tracy => &.{ .build_options, .builtins },
.collections => &.{},
.base => &.{.collections},
.base => &.{ .collections, .builtins },
.roc_src => &.{},
.types => &.{ .base, .collections },
.reporting => &.{ .collections, .base },

View file

@ -119,6 +119,19 @@ Builtin :: [].{
},
)
count_if : List(a), (a -> Bool) -> U64
count_if = |list, predicate|
List.fold(
list,
0,
|acc, elem|
if predicate(elem) {
acc + 1
} else {
acc
},
)
fold : List(item), state, (state, item -> state) -> state
fold = |list, init, step| {
var $state = init
@ -331,6 +344,18 @@ Builtin :: [].{
from_numeral : Numeral -> Try(U8, [InvalidNumeral(Str), ..others])
from_str : Str -> Try(U8, [BadNumStr, ..others])
# # List of integers beginning with this `U8` and ending with the other `U8`.
# # (Use [until] instead to end with the other `U8` minus one.)
# # Returns an empty list if this `U8` is greater than the other.
to : U8, U8 -> List(U8)
to = |start, end| range_to(start, end)
# # List of integers beginning with this `U8` and ending with the other `U8` minus one.
# # (Use [to] instead to end with the other `U8` exactly, instead of minus one.)
# # Returns an empty list if this `U8` is greater than or equal to the other.
until : U8, U8 -> List(U8)
until = |start, end| range_until(start, end)
# Conversions to signed integers (I8 is lossy, others are safe)
to_i8_wrap : U8 -> I8
to_i8_try : U8 -> Try(I8, [OutOfRange, ..others])
@ -977,8 +1002,29 @@ Builtin :: [].{
}
}
# Private top-level function for unsafe list access
# This is a low-level operation that gets replaced by the compiler
range_to = |var $current, end| {
var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist.
while $current <= end {
$answer = $answer.append($current)
$current = $current + 1
}
$answer
}
range_until = |var $current, end| {
var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist.
while $current < end {
$answer = $answer.append($current)
$current = $current + 1
}
$answer
}
# Implemented by the compiler, does not perform bounds checks
list_get_unsafe : List(item), U64 -> item
# Unsafe conversion functions - these return simple records instead of Try types

338
src/builtins/handlers.zig Normal file
View file

@ -0,0 +1,338 @@
//! Generic signal handlers for stack overflow, access violation, and arithmetic errors.
//!
//! This module provides a mechanism to catch runtime errors like stack overflows,
//! access violations, and division by zero, handling them with custom callbacks
//! instead of crashing with a raw signal.
//!
//! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate
//! signal stack and install handlers for SIGSEGV, SIGBUS, and SIGFPE.
//!
//! On Windows, we use SetUnhandledExceptionFilter to catch various exceptions.
//!
//! WASI is not currently supported (no signal handling available).
const std = @import("std");
const builtin = @import("builtin");
const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined;
// Windows types and constants
const DWORD = u32;
const LONG = i32;
const ULONG_PTR = usize;
const PVOID = ?*anyopaque;
const HANDLE = ?*anyopaque;
const BOOL = i32;
const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD;
const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005;
const EXCEPTION_INT_DIVIDE_BY_ZERO: DWORD = 0xC0000094;
const EXCEPTION_INT_OVERFLOW: DWORD = 0xC0000095;
const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize));
const EXCEPTION_RECORD = extern struct {
ExceptionCode: DWORD,
ExceptionFlags: DWORD,
ExceptionRecord: ?*EXCEPTION_RECORD,
ExceptionAddress: PVOID,
NumberParameters: DWORD,
ExceptionInformation: [15]ULONG_PTR,
};
const CONTEXT = extern struct {
// We don't need the full context, just enough to make the struct valid
data: [1232]u8, // Size varies by arch, this is x64 size
};
const EXCEPTION_POINTERS = extern struct {
ExceptionRecord: *EXCEPTION_RECORD,
ContextRecord: *CONTEXT,
};
const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(.winapi) LONG;
// Windows API imports
extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(.winapi) LPTOP_LEVEL_EXCEPTION_FILTER;
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) BOOL;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
/// Size of the alternate signal stack (64KB should be plenty for the handler)
const ALT_STACK_SIZE = 64 * 1024;
/// Storage for the alternate signal stack (POSIX only)
var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined;
/// Whether the handler has been installed
var handler_installed = false;
/// Callback function type for handling stack overflow
pub const StackOverflowCallback = *const fn () noreturn;
/// Callback function type for handling access violation/segfault
pub const AccessViolationCallback = *const fn (fault_addr: usize) noreturn;
/// Callback function type for handling division by zero (and other arithmetic errors)
pub const ArithmeticErrorCallback = *const fn () noreturn;
/// Stored callbacks (set during install)
var stack_overflow_callback: ?StackOverflowCallback = null;
var access_violation_callback: ?AccessViolationCallback = null;
var arithmetic_error_callback: ?ArithmeticErrorCallback = null;
/// Install signal handlers with custom callbacks.
///
/// Parameters:
/// - on_stack_overflow: Called when a stack overflow is detected. Must not return.
/// - on_access_violation: Called for other memory access violations (segfaults).
/// Receives the fault address. Must not return.
/// - on_arithmetic_error: Called for arithmetic errors like division by zero. Must not return.
///
/// Returns true if the handlers were installed successfully, false otherwise.
pub fn install(
on_stack_overflow: StackOverflowCallback,
on_access_violation: AccessViolationCallback,
on_arithmetic_error: ArithmeticErrorCallback,
) bool {
if (handler_installed) return true;
stack_overflow_callback = on_stack_overflow;
access_violation_callback = on_access_violation;
arithmetic_error_callback = on_arithmetic_error;
if (comptime builtin.os.tag == .windows) {
return installWindows();
}
if (comptime builtin.os.tag == .wasi) {
// WASI doesn't support signal handling
return false;
}
return installPosix();
}
fn installPosix() bool {
// Set up the alternate signal stack
var alt_stack = posix.stack_t{
.sp = &alt_stack_storage,
.flags = 0,
.size = ALT_STACK_SIZE,
};
posix.sigaltstack(&alt_stack, null) catch {
return false;
};
// Install the SIGSEGV handler for stack overflow and access violations
const segv_action = posix.Sigaction{
.handler = .{ .sigaction = handleSegvSignal },
.mask = posix.sigemptyset(),
.flags = posix.SA.SIGINFO | posix.SA.ONSTACK,
};
posix.sigaction(posix.SIG.SEGV, &segv_action, null);
// Also catch SIGBUS which can occur on some systems for stack overflow
posix.sigaction(posix.SIG.BUS, &segv_action, null);
// Install the SIGFPE handler for division by zero and other arithmetic errors
const fpe_action = posix.Sigaction{
.handler = .{ .sigaction = handleFpeSignal },
.mask = posix.sigemptyset(),
.flags = posix.SA.SIGINFO | posix.SA.ONSTACK,
};
posix.sigaction(posix.SIG.FPE, &fpe_action, null);
handler_installed = true;
return true;
}
fn installWindows() bool {
_ = SetUnhandledExceptionFilter(handleExceptionWindows);
handler_installed = true;
return true;
}
/// Windows exception handler function
fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG {
const exception_code = exception_info.ExceptionRecord.ExceptionCode;
// Check if this is a known exception type
const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW);
const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION);
const is_divide_by_zero = (exception_code == EXCEPTION_INT_DIVIDE_BY_ZERO);
const is_int_overflow = (exception_code == EXCEPTION_INT_OVERFLOW);
const is_arithmetic_error = is_divide_by_zero or is_int_overflow;
if (!is_stack_overflow and !is_access_violation and !is_arithmetic_error) {
// Let other handlers deal with this exception
return EXCEPTION_CONTINUE_SEARCH;
}
if (is_stack_overflow) {
if (stack_overflow_callback) |callback| {
callback();
}
ExitProcess(134);
} else if (is_arithmetic_error) {
if (arithmetic_error_callback) |callback| {
callback();
}
ExitProcess(136); // 128 + 8 (SIGFPE)
} else {
if (access_violation_callback) |callback| {
// Get fault address from ExceptionInformation[1] for access violations
const fault_addr = exception_info.ExceptionRecord.ExceptionInformation[1];
callback(fault_addr);
}
ExitProcess(139);
}
}
/// The POSIX SIGSEGV/SIGBUS signal handler function
fn handleSegvSignal(_: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void {
// Get the fault address - access differs by platform
const fault_addr: usize = getFaultAddress(info);
// Get the current stack pointer to help determine if this is a stack overflow
var current_sp: usize = 0;
asm volatile (""
: [sp] "={sp}" (current_sp),
);
// A stack overflow typically occurs when the fault address is near the stack pointer
// or below the stack (stacks grow downward on most architectures)
const likely_stack_overflow = isLikelyStackOverflow(fault_addr, current_sp);
if (likely_stack_overflow) {
if (stack_overflow_callback) |callback| {
callback();
}
} else {
if (access_violation_callback) |callback| {
callback(fault_addr);
}
}
// If no callback was set, exit with appropriate code
if (likely_stack_overflow) {
posix.exit(134); // 128 + 6 (SIGABRT-like)
} else {
posix.exit(139); // 128 + 11 (SIGSEGV)
}
}
/// The POSIX SIGFPE signal handler function (division by zero, etc.)
fn handleFpeSignal(_: i32, _: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void {
if (arithmetic_error_callback) |callback| {
callback();
}
// If no callback was set, exit with SIGFPE code
posix.exit(136); // 128 + 8 (SIGFPE)
}
/// Get the fault address from siginfo_t (platform-specific)
fn getFaultAddress(info: *const posix.siginfo_t) usize {
// The siginfo_t structure varies by platform
if (comptime builtin.os.tag == .linux) {
// Linux: fault address is in fields.sigfault.addr
return @intFromPtr(info.fields.sigfault.addr);
} else if (comptime builtin.os.tag == .macos or
builtin.os.tag == .ios or
builtin.os.tag == .tvos or
builtin.os.tag == .watchos or
builtin.os.tag == .visionos or
builtin.os.tag == .freebsd or
builtin.os.tag == .dragonfly or
builtin.os.tag == .netbsd or
builtin.os.tag == .openbsd)
{
// macOS/iOS/BSD: fault address is in addr field
return @intFromPtr(info.addr);
} else {
// Fallback: return 0 if we can't determine the address
return 0;
}
}
/// Heuristic to determine if a fault is likely a stack overflow
fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool {
// If fault address is 0 or very low, it's likely a null pointer dereference
if (fault_addr < 4096) return false;
// If the fault address is close to the current stack pointer (within 16MB),
// it's very likely a stack overflow. The signal handler runs on an alternate
// stack, but the fault address should still be near where the stack was.
const sp_distance = if (fault_addr < current_sp) current_sp - fault_addr else fault_addr - current_sp;
if (sp_distance < 16 * 1024 * 1024) { // Within 16MB of stack pointer
return true;
}
// On 64-bit systems, stacks are typically placed in high memory.
// On macOS, the stack is around 0x16XXXXXXXX (about 6GB mark).
// On Linux, it's typically near 0x7FFFFFFFFFFF.
// If the fault address is in the upper half of the address space,
// it's more likely to be a stack-related issue.
if (comptime @sizeOf(usize) == 8) {
// 64-bit: check if address is in upper portion of address space
// On macOS, stacks start around 0x100000000 (4GB) and go up
// On Linux, stacks are near 0x7FFFFFFFFFFF
const lower_bound: usize = 0x100000000; // 4GB
if (fault_addr > lower_bound) {
// This is in the region where stacks typically are on 64-bit systems
// Default to assuming it's a stack overflow for addresses in this range
return true;
}
} else {
// 32-bit: stacks are typically in the upper portion of the 4GB space
const lower_bound: usize = 0x40000000; // 1GB
if (fault_addr > lower_bound) {
return true;
}
}
return false;
}
/// Format a usize as hexadecimal (for use in callbacks)
pub fn formatHex(value: usize, buf: []u8) []const u8 {
const hex_chars = "0123456789abcdef";
var i: usize = buf.len;
if (value == 0) {
i -= 1;
buf[i] = '0';
} else {
var v = value;
while (v > 0 and i > 2) {
i -= 1;
buf[i] = hex_chars[v & 0xf];
v >>= 4;
}
}
// Add 0x prefix
i -= 1;
buf[i] = 'x';
i -= 1;
buf[i] = '0';
return buf[i..];
}
test "formatHex" {
var buf: [18]u8 = undefined;
const zero = formatHex(0, &buf);
try std.testing.expectEqualStrings("0x0", zero);
const small = formatHex(0xff, &buf);
try std.testing.expectEqualStrings("0xff", small);
const medium = formatHex(0xdeadbeef, &buf);
try std.testing.expectEqualStrings("0xdeadbeef", medium);
}

View file

@ -3,6 +3,7 @@ const std = @import("std");
pub const host_abi = @import("host_abi.zig");
pub const dec = @import("dec.zig");
pub const handlers = @import("handlers.zig");
pub const hash = @import("hash.zig");
pub const list = @import("list.zig");
pub const num = @import("num.zig");
@ -12,6 +13,7 @@ pub const utils = @import("utils.zig");
test "builtins tests" {
std.testing.refAllDecls(@import("dec.zig"));
std.testing.refAllDecls(@import("handlers.zig"));
std.testing.refAllDecls(@import("hash.zig"));
std.testing.refAllDecls(@import("host_abi.zig"));
std.testing.refAllDecls(@import("list.zig"));

View file

@ -719,7 +719,10 @@ pub fn fromF64(f: f64) ?RocDec {
/// Represents an import statement in a module
pub const Import = struct {
pub const Idx = enum(u32) { _ };
pub const Idx = enum(u32) {
first = 0,
_,
};
/// Sentinel value indicating unresolved import (max u32)
pub const UNRESOLVED_MODULE: u32 = std.math.maxInt(u32);

View file

@ -51,8 +51,10 @@ in_statement_position: bool = true,
scopes: std.ArrayList(Scope) = .{},
/// Special scope for rigid type variables in annotations
type_vars_scope: base.Scratch(TypeVarScope),
/// Special scope for tracking exposed items from module header
exposed_scope: Scope = undefined,
/// Set of identifiers exposed from this module header (values not used)
exposed_idents: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{},
/// Set of types exposed from this module header (values not used)
exposed_types: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{},
/// Track exposed identifiers by text to handle changing indices
exposed_ident_texts: std.StringHashMapUnmanaged(Region) = .{},
/// Track exposed types by text to handle changing indices
@ -180,7 +182,8 @@ pub fn deinit(
const gpa = self.env.gpa;
self.type_vars_scope.deinit();
self.exposed_scope.deinit(gpa);
self.exposed_idents.deinit(gpa);
self.exposed_types.deinit(gpa);
self.exposed_ident_texts.deinit(gpa);
self.exposed_type_texts.deinit(gpa);
self.placeholder_idents.deinit(gpa);
@ -234,7 +237,6 @@ pub fn init(
.scratch_record_fields = try base.Scratch(types.RecordField).init(gpa),
.scratch_seen_record_fields = try base.Scratch(SeenRecordField).init(gpa),
.type_vars_scope = try base.Scratch(TypeVarScope).init(gpa),
.exposed_scope = Scope.init(false),
.scratch_tags = try base.Scratch(types.Tag).init(gpa),
.scratch_free_vars = try base.Scratch(Pattern.Idx).init(gpa),
.scratch_captures = try base.Scratch(Pattern.Idx).init(gpa),
@ -458,8 +460,8 @@ fn processTypeDeclFirstPass(
// Type was already introduced - check if it's a placeholder (anno = 0) or a real declaration
const existing_stmt = self.env.store.getStatement(existing_stmt_idx);
const is_placeholder = switch (existing_stmt) {
.s_alias_decl => |alias| @intFromEnum(alias.anno) == 0,
.s_nominal_decl => |nominal| @intFromEnum(nominal.anno) == 0,
.s_alias_decl => |alias| alias.anno == .placeholder,
.s_nominal_decl => |nominal| nominal.anno == .placeholder,
else => false,
};
@ -483,13 +485,13 @@ fn processTypeDeclFirstPass(
.alias => Statement{
.s_alias_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced below
.anno = .placeholder, // placeholder, will be overwritten
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced below
.anno = .placeholder, // placeholder, will be overwritten
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -503,13 +505,13 @@ fn processTypeDeclFirstPass(
.alias => Statement{
.s_alias_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced
.anno = .placeholder, // placeholder, will be overwritten
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced
.anno = .placeholder, // placeholder, will be overwritten
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -636,13 +638,13 @@ fn introduceTypeNameOnly(
.alias => Statement{
.s_alias_decl = .{
.header = header_idx,
.anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7
.anno = .placeholder, // placeholder, overwritten in Phase 1.7
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = header_idx,
.anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7
.anno = .placeholder, // placeholder, overwritten in Phase 1.7
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -1210,7 +1212,6 @@ fn processAssociatedItemsSecondPass(
const parent_text = self.env.getIdent(parent_name);
const name_text = self.env.getIdent(name_ident);
const qualified_idx = try self.env.insertQualifiedIdent(parent_text, name_text);
// Create anno-only def with the qualified name
const def_idx = try self.createAnnoOnlyDef(qualified_idx, type_anno_idx, where_clauses, region);
@ -1747,7 +1748,7 @@ pub fn canonicalizeFile(
// canonicalize_header_packages();
// First, process the header to create exposed_scope and set module_kind
// First, process the header to populate exposed_idents/exposed_types and set module_kind
const header = self.parse_ir.store.getHeader(file.header);
switch (header) {
.module => |h| {
@ -2552,11 +2553,9 @@ fn createExposedScope(
self: *Self,
exposes: AST.Collection.Idx,
) std.mem.Allocator.Error!void {
const gpa = self.env.gpa;
// Reset exposed_scope (already initialized in init)
self.exposed_scope.deinit(gpa);
self.exposed_scope = Scope.init(false);
// Clear exposed sets (they're already initialized with default values)
self.exposed_idents.clearRetainingCapacity();
self.exposed_types.clearRetainingCapacity();
try self.addToExposedScope(exposes);
}
@ -2595,9 +2594,8 @@ fn addToExposedScope(
// Add to exposed_items for permanent storage (unconditionally)
try self.env.addExposedById(ident_idx);
// Use a dummy pattern index - we just need to track that it's exposed
const dummy_idx = @as(Pattern.Idx, @enumFromInt(0));
try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx);
// Just track that this identifier is exposed
try self.exposed_idents.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2628,9 +2626,8 @@ fn addToExposedScope(
// Don't add types to exposed_items - types are not values
// Only add to type_bindings for type resolution
// Use a dummy statement index - we just need to track that it's exposed
const dummy_idx = @as(Statement.Idx, @enumFromInt(0));
try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx });
// Just track that this type is exposed
try self.exposed_types.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2661,9 +2658,8 @@ fn addToExposedScope(
// Don't add types to exposed_items - types are not values
// Only add to type_bindings for type resolution
// Use a dummy statement index - we just need to track that it's exposed
const dummy_idx = @as(Statement.Idx, @enumFromInt(0));
try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx });
// Just track that this type is exposed
try self.exposed_types.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2711,9 +2707,8 @@ fn addPlatformProvidesItems(
// Add to exposed_items for permanent storage
try self.env.addExposedById(ident_idx);
// Add to exposed_scope so it becomes an export
const dummy_idx = @as(Pattern.Idx, @enumFromInt(0));
try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx);
// Track that this identifier is exposed (for exports)
try self.exposed_idents.put(gpa, ident_idx, {});
// Also track in exposed_ident_texts
const token_region = self.parse_ir.tokens.resolve(@intCast(field.name));
@ -2815,7 +2810,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void {
const defs_slice = self.env.store.sliceDefs(self.env.all_defs);
// Check each definition to see if it corresponds to an exposed item.
// We check exposed_scope.idents which only contains items from the exposing clause,
// We check exposed_idents which only contains items from the exposing clause,
// not associated items like "Color.as_str" which are registered separately.
for (defs_slice) |def_idx| {
const def = self.env.store.getDef(def_idx);
@ -2823,7 +2818,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void {
if (pattern == .assign) {
// Check if this identifier was explicitly exposed in the module header
if (self.exposed_scope.idents.contains(pattern.assign.ident)) {
if (self.exposed_idents.contains(pattern.assign.ident)) {
try self.env.store.addScratchDef(def_idx);
}
}
@ -4122,15 +4117,6 @@ pub fn canonicalizeExpr(
break :blk_qualified;
}
// Check if this is a package-qualified import (e.g., "pf.Stdout")
// These are cross-package imports resolved by the workspace resolver
const is_pkg_qualified = if (module_info) |info| info.is_package_qualified else false;
if (is_pkg_qualified) {
// Package-qualified import - member resolution happens via the resolver
// Fall through to normal identifier lookup
break :blk_qualified;
}
// Generate a more helpful error for auto-imported types (List, Bool, Try, etc.)
const is_auto_imported_type = if (self.module_envs) |envs_map|
envs_map.contains(module_name)
@ -5185,7 +5171,7 @@ pub fn canonicalizeExpr(
.patterns = ok_branch_pat_span,
.value = ok_lookup_idx,
.guard = null,
.redundant = @enumFromInt(0),
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5259,7 +5245,7 @@ pub fn canonicalizeExpr(
.patterns = err_branch_pat_span,
.value = return_expr_idx,
.guard = null,
.redundant = @enumFromInt(0),
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5273,7 +5259,7 @@ pub fn canonicalizeExpr(
const match_expr = Expr.Match{
.cond = can_cond.idx,
.branches = branches_span,
.exhaustive = @enumFromInt(0), // Will be set during type checking
.exhaustive = try self.env.types.fresh(),
};
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region);
@ -5650,7 +5636,7 @@ pub fn canonicalizeExpr(
.patterns = branch_pat_span,
.value = value_idx,
.guard = null,
.redundant = @enumFromInt(0), // TODO
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5670,7 +5656,7 @@ pub fn canonicalizeExpr(
const match_expr = Expr.Match{
.cond = can_cond.idx,
.branches = branches_span,
.exhaustive = @enumFromInt(0), // Will be set during type checking
.exhaustive = try self.env.types.fresh(),
};
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region);
@ -6447,6 +6433,28 @@ fn canonicalizePattern(
return malformed_idx;
}
},
.var_ident => |e| {
// Mutable variable binding in a pattern (e.g., `|var $x, y|`)
const region = self.parse_ir.tokenizedRegionToRegion(e.region);
if (self.parse_ir.tokens.resolveIdentifier(e.ident_tok)) |ident_idx| {
// Create a Pattern node for our mutable identifier
const pattern_idx = try self.env.addPattern(Pattern{ .assign = .{
.ident = ident_idx,
} }, region);
// Introduce the var with function boundary tracking (using scopeIntroduceVar)
_ = try self.scopeIntroduceVar(ident_idx, pattern_idx, region, true, Pattern.Idx);
return pattern_idx;
} else {
const feature = try self.env.insertString("report an error when unable to resolve identifier");
const malformed_idx = try self.env.pushMalformed(Pattern.Idx, Diagnostic{ .not_implemented = .{
.feature = feature,
.region = Region.zero(),
} });
return malformed_idx;
}
},
.underscore => |p| {
const region = self.parse_ir.tokenizedRegionToRegion(p.region);
const underscore_pattern = Pattern{
@ -7655,8 +7663,8 @@ fn processCollectedTypeVars(self: *Self) std.mem.Allocator.Error!void {
// Collect problems for this type variable
const is_single_use = !found_another;
// Use a dummy AST annotation index since we don't have the context
try collectTypeVarProblems(first_ident, is_single_use, @enumFromInt(0), &self.scratch_type_var_problems);
// Use undefined AST annotation index since we don't have the context here
try collectTypeVarProblems(first_ident, is_single_use, undefined, &self.scratch_type_var_problems);
}
// Report any problems we found
@ -10871,14 +10879,154 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca
return null;
};
// This is a module-qualified lookup
// This IS a module-qualified lookup - we must handle it completely here.
// After this point, returning null would cause incorrect fallback to regular field access.
const right_expr = self.parse_ir.store.getExpr(field_access.right);
if (right_expr != .ident) return null;
const region = self.parse_ir.tokenizedRegionToRegion(field_access.region);
// Handle method calls on module-qualified types (e.g., Stdout.line!(...))
if (right_expr == .apply) {
const apply = right_expr.apply;
const method_expr = self.parse_ir.store.getExpr(apply.@"fn");
if (method_expr != .ident) {
// Module-qualified call with non-ident function (e.g., Module.(complex_expr)(...))
// This is malformed - report error
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
}
const method_ident = method_expr.ident;
const method_name = self.parse_ir.tokens.resolveIdentifier(method_ident.token) orelse {
// Couldn't resolve method name token
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
};
// Check if this is a type module (like Stdout) - look up the qualified method name directly
if (self.module_envs) |envs_map| {
if (envs_map.get(module_name)) |auto_imported_type| {
if (auto_imported_type.statement_idx != null) {
// This is an imported type module (like Stdout)
// Look up the qualified method name (e.g., "Stdout.line!") in the module's exposed items
const module_env = auto_imported_type.env;
const module_name_text = module_env.module_name;
const auto_import_idx = try self.getOrCreateAutoImport(module_name_text);
// Build the qualified method name: "TypeName.method_name"
const type_name_text = self.env.getIdent(module_name);
const method_name_text = self.env.getIdent(method_name);
const qualified_method_name = try self.env.insertQualifiedIdent(type_name_text, method_name_text);
const qualified_text = self.env.getIdent(qualified_method_name);
// Look up the qualified method in the module's exposed items
if (module_env.common.findIdent(qualified_text)) |method_ident_idx| {
if (module_env.getExposedNodeIndexById(method_ident_idx)) |method_node_idx| {
// Found the method! Create e_lookup_external + e_call
const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{
.module_idx = auto_import_idx,
.target_node_idx = method_node_idx,
.region = region,
} }, region);
// Canonicalize the arguments
const scratch_top = self.env.store.scratchExprTop();
for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| {
if (try self.canonicalizeExpr(arg_idx)) |canonicalized| {
try self.env.store.addScratchExpr(canonicalized.get_idx());
}
}
const args_span = try self.env.store.exprSpanFrom(scratch_top);
// Create the call expression
const call_expr_idx = try self.env.addExpr(CIR.Expr{
.e_call = .{
.func = func_expr_idx,
.args = args_span,
.called_via = CalledVia.apply,
},
}, region);
return call_expr_idx;
}
}
// Method not found in module - generate error
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .nested_value_not_found = .{
.parent_name = module_name,
.nested_name = method_name,
.region = region,
} });
}
}
}
// Module exists but is not a type module with a statement_idx - it's a regular module
// This means it's something like `SomeModule.someFunc(args)` where someFunc is a regular export
// We need to look up the function and create a call
const field_text = self.env.getIdent(method_name);
const target_node_idx_opt: ?u16 = if (self.module_envs) |envs_map| blk: {
if (envs_map.get(module_name)) |auto_imported_type| {
const module_env = auto_imported_type.env;
if (module_env.common.findIdent(field_text)) |target_ident| {
break :blk module_env.getExposedNodeIndexById(target_ident);
} else {
break :blk null;
}
} else {
break :blk null;
}
} else null;
if (target_node_idx_opt) |target_node_idx| {
// Found the function - create a lookup and call it
const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{
.module_idx = import_idx,
.target_node_idx = target_node_idx,
.region = region,
} }, region);
// Canonicalize the arguments
const scratch_top = self.env.store.scratchExprTop();
for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| {
if (try self.canonicalizeExpr(arg_idx)) |canonicalized| {
try self.env.store.addScratchExpr(canonicalized.get_idx());
}
}
const args_span = try self.env.store.exprSpanFrom(scratch_top);
// Create the call expression
const call_expr_idx = try self.env.addExpr(CIR.Expr{
.e_call = .{
.func = func_expr_idx,
.args = args_span,
.called_via = CalledVia.apply,
},
}, region);
return call_expr_idx;
} else {
// Function not found in module
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{
.ident = method_name,
.region = region,
} });
}
}
// Handle simple field access (not a method call)
if (right_expr != .ident) {
// Module-qualified access with non-ident, non-apply right side - malformed
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
}
const right_ident = right_expr.ident;
const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse return null;
const region = self.parse_ir.tokenizedRegionToRegion(field_access.region);
const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse {
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
};
// Check if this is a tag access on an auto-imported nominal type (e.g., Bool.True)
if (self.module_envs) |envs_map| {
@ -10935,8 +11083,13 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca
}
} else null;
// If we didn't find a valid node index, return null to fall through to error handling
const target_node_idx = target_node_idx_opt orelse return null;
// If we didn't find a valid node index, report an error (don't fall back)
const target_node_idx = target_node_idx_opt orelse {
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{
.ident = field_name,
.region = region,
} });
};
// Create the e_lookup_external expression with Import.Idx
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{

View file

@ -674,9 +674,11 @@ pub fn getExpr(store: *const NodeStore, expr: CIR.Expr.Idx) CIR.Expr {
.expr_suffix_single_question,
.expr_record_builder,
=> {
return CIR.Expr{ .e_runtime_error = .{
.diagnostic = @enumFromInt(0),
} };
return CIR.Expr{
.e_runtime_error = .{
.diagnostic = undefined, // deserialized runtime errors don't preserve diagnostics
},
};
},
.expr_ellipsis => {
return CIR.Expr{ .e_ellipsis = .{} };
@ -1510,7 +1512,7 @@ pub fn addExpr(store: *NodeStore, expr: CIR.Expr, region: base.Region) Allocator
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
switch (expr) {
@ -2139,7 +2141,7 @@ pub fn addPattern(store: *NodeStore, pattern: CIR.Pattern, region: base.Region)
/// Adds a pattern record field to the store.
pub fn addPatternRecordField(_: *NodeStore, _: CIR.PatternRecordField) Allocator.Error!CIR.PatternRecordField.Idx {
return @enumFromInt(0);
@panic("TODO: addPatternRecordField not implemented");
}
/// Adds a type annotation to the store.
@ -2151,7 +2153,7 @@ pub fn addTypeAnno(store: *NodeStore, typeAnno: CIR.TypeAnno, region: base.Regio
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
switch (typeAnno) {
@ -2856,7 +2858,7 @@ pub fn addDiagnostic(store: *NodeStore, reason: CIR.Diagnostic) Allocator.Error!
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
var region = base.Region.zero();
@ -3689,7 +3691,7 @@ test "NodeStore basic CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, node1);
const node1_idx = try original.nodes.append(gpa, node1);
// Add integer value to extra_data (i128 as 4 u32s)
const value: i128 = 42;
@ -3704,7 +3706,7 @@ test "NodeStore basic CompactWriter roundtrip" {
.start = .{ .offset = 0 },
.end = .{ .offset = 5 },
};
_ = try original.regions.append(gpa, region);
const region1_idx = try original.regions.append(gpa, region);
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
@ -3737,7 +3739,7 @@ test "NodeStore basic CompactWriter roundtrip" {
// Verify nodes
try testing.expectEqual(@as(usize, 1), deserialized.nodes.len());
const retrieved_node = deserialized.nodes.get(@enumFromInt(0));
const retrieved_node = deserialized.nodes.get(node1_idx);
try testing.expectEqual(Node.Tag.expr_int, retrieved_node.tag);
try testing.expectEqual(@as(u32, 0), retrieved_node.data_1);
@ -3750,7 +3752,7 @@ test "NodeStore basic CompactWriter roundtrip" {
// Verify regions
try testing.expectEqual(@as(usize, 1), deserialized.regions.len());
const retrieved_region = deserialized.regions.get(@enumFromInt(0));
const retrieved_region = deserialized.regions.get(region1_idx);
try testing.expectEqual(region.start.offset, retrieved_region.start.offset);
try testing.expectEqual(region.end.offset, retrieved_region.end.offset);
}
@ -3770,7 +3772,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, var_node);
const var_node_idx = try original.nodes.append(gpa, var_node);
// Add expression list node
const list_node = Node{
@ -3779,7 +3781,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 3, // elems len
.data_3 = 0,
};
_ = try original.nodes.append(gpa, list_node);
const list_node_idx = try original.nodes.append(gpa, list_node);
// Add float node with extra data
const float_node = Node{
@ -3788,7 +3790,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, float_node);
const float_node_idx = try original.nodes.append(gpa, float_node);
// Add float value to extra_data
const float_value: f64 = 3.14159;
@ -3799,14 +3801,12 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
}
// Add regions for each node
const regions = [_]Region{
.{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } },
.{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } },
.{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } },
};
for (regions) |region| {
_ = try original.regions.append(gpa, region);
}
const region1 = Region{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } };
const region2 = Region{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } };
const region3 = Region{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } };
const region1_idx = try original.regions.append(gpa, region1);
const region2_idx = try original.regions.append(gpa, region2);
const region3_idx = try original.regions.append(gpa, region3);
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
@ -3840,32 +3840,36 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
// Verify nodes
try testing.expectEqual(@as(usize, 3), deserialized.nodes.len());
// Verify var node
const retrieved_var = deserialized.nodes.get(@enumFromInt(0));
// Verify var node using captured index
const retrieved_var = deserialized.nodes.get(var_node_idx);
try testing.expectEqual(Node.Tag.expr_var, retrieved_var.tag);
try testing.expectEqual(@as(u32, 5), retrieved_var.data_1);
// Verify list node
const retrieved_list = deserialized.nodes.get(@enumFromInt(1));
// Verify list node using captured index
const retrieved_list = deserialized.nodes.get(list_node_idx);
try testing.expectEqual(Node.Tag.expr_list, retrieved_list.tag);
try testing.expectEqual(@as(u32, 10), retrieved_list.data_1);
try testing.expectEqual(@as(u32, 3), retrieved_list.data_2);
// Verify float node and extra data
const retrieved_float = deserialized.nodes.get(@enumFromInt(2));
// Verify float node and extra data using captured index
const retrieved_float = deserialized.nodes.get(float_node_idx);
try testing.expectEqual(Node.Tag.expr_frac_f64, retrieved_float.tag);
const retrieved_float_u32s = deserialized.extra_data.items.items[0..2];
const retrieved_float_u64: u64 = @bitCast(retrieved_float_u32s.*);
const retrieved_float_value: f64 = @bitCast(retrieved_float_u64);
try testing.expectApproxEqAbs(float_value, retrieved_float_value, 0.0001);
// Verify regions
// Verify regions using captured indices
try testing.expectEqual(@as(usize, 3), deserialized.regions.len());
for (regions, 0..) |expected_region, i| {
const retrieved_region = deserialized.regions.get(@enumFromInt(i));
try testing.expectEqual(expected_region.start.offset, retrieved_region.start.offset);
try testing.expectEqual(expected_region.end.offset, retrieved_region.end.offset);
}
const retrieved_region1 = deserialized.regions.get(region1_idx);
try testing.expectEqual(region1.start.offset, retrieved_region1.start.offset);
try testing.expectEqual(region1.end.offset, retrieved_region1.end.offset);
const retrieved_region2 = deserialized.regions.get(region2_idx);
try testing.expectEqual(region2.start.offset, retrieved_region2.start.offset);
try testing.expectEqual(region2.end.offset, retrieved_region2.end.offset);
const retrieved_region3 = deserialized.regions.get(region3_idx);
try testing.expectEqual(region3.start.offset, retrieved_region3.start.offset);
try testing.expectEqual(region3.end.offset, retrieved_region3.end.offset);
// Verify scratch is null (deserialized NodeStores don't allocate scratch)
try testing.expect(deserialized.scratch == null);

View file

@ -363,7 +363,7 @@ pub fn lookupTypeVar(scope: *const Scope, name: Ident.Idx) TypeVarLookupResult {
/// Look up a module alias in this scope
pub fn lookupModuleAlias(scope: *const Scope, name: Ident.Idx) ModuleAliasLookupResult {
// Search by comparing text content, not identifier index
// Search by comparing .idx values (integer index into string interner)
var iter = scope.module_aliases.iterator();
while (iter.next()) |entry| {
if (name.idx == entry.key_ptr.idx) {

View file

@ -96,7 +96,12 @@ pub const TypeAnno = union(enum) {
diagnostic: CIR.Diagnostic.Idx, // The error that occurred
},
pub const Idx = enum(u32) { _ };
pub const Idx = enum(u32) {
/// Placeholder value indicating the anno hasn't been set yet.
/// Used during forward reference resolution.
placeholder = 0,
_,
};
pub const Span = extern struct { span: DataSpan };
pub fn pushToSExprTree(self: *const @This(), ir: *const ModuleEnv, tree: *SExprTree, type_anno_idx: TypeAnno.Idx) std.mem.Allocator.Error!void {

View file

@ -17,27 +17,3 @@ test "e_anno_only expression variant exists" {
else => return error.WrongExprVariant,
}
}
test "e_anno_only can be used in statements" {
// This test verifies that e_anno_only expressions can be
// used as part of s_decl statements, which is how standalone
// type annotations are represented after canonicalization.
const pattern_idx: CIR.Pattern.Idx = @enumFromInt(0);
const expr_idx: CIR.Expr.Idx = @enumFromInt(0);
const anno_idx: CIR.Annotation.Idx = @enumFromInt(0);
const stmt = CIR.Statement{ .s_decl = .{
.pattern = pattern_idx,
.expr = expr_idx,
.anno = anno_idx,
} };
// Verify the statement was created correctly
switch (stmt) {
.s_decl => |decl| {
try testing.expect(decl.anno != null);
},
else => return error.WrongStatementType,
}
}

View file

@ -9,15 +9,22 @@ const Import = CIR.Import;
const StringLiteral = base.StringLiteral;
const CompactWriter = collections.CompactWriter;
fn storeContainsModule(store: *const Import.Store, string_store: *const StringLiteral.Store, module_name: []const u8) bool {
for (store.imports.items.items) |string_idx| {
if (std.mem.eql(u8, string_store.get(string_idx), module_name)) {
return true;
}
}
return false;
}
test "Import.Store deduplicates module names" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a string store for interning module names
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
// Create import store
var store = Import.Store.init();
defer store.deinit(gpa);
@ -25,7 +32,7 @@ test "Import.Store deduplicates module names" {
const idx1 = try store.getOrPut(gpa, &string_store, "test.Module");
const idx2 = try store.getOrPut(gpa, &string_store, "test.Module");
// Should get the same index
// Should get the same index back (deduplication)
try testing.expectEqual(idx1, idx2);
try testing.expectEqual(@as(usize, 1), store.imports.len());
@ -39,21 +46,17 @@ test "Import.Store deduplicates module names" {
try testing.expectEqual(idx1, idx4);
try testing.expectEqual(@as(usize, 2), store.imports.len());
// Verify we can retrieve the module names through the string store
const str_idx1 = store.imports.items.items[@intFromEnum(idx1)];
const str_idx3 = store.imports.items.items[@intFromEnum(idx3)];
try testing.expectEqualStrings("test.Module", string_store.get(str_idx1));
try testing.expectEqualStrings("other.Module", string_store.get(str_idx3));
// Verify both module names are present
try testing.expect(storeContainsModule(&store, &string_store, "test.Module"));
try testing.expect(storeContainsModule(&store, &string_store, "other.Module"));
}
test "Import.Store empty CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create an empty Store
var original = Import.Store.init();
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -66,15 +69,12 @@ test "Import.Store empty CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr = @as(*Import.Store.Serialized, @ptrCast(@alignCast(buffer.ptr)));
const deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
@ -87,27 +87,18 @@ test "Import.Store basic CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a mock module env with string store
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
const MockEnv = struct { strings: *StringLiteral.Store };
const mock_env = MockEnv{ .strings = &string_store };
// Create original store and add some imports
var original = Import.Store.init();
defer original.deinit(gpa);
const idx1 = try original.getOrPut(gpa, mock_env.strings, "json.Json");
const idx2 = try original.getOrPut(gpa, mock_env.strings, "core.List");
const idx3 = try original.getOrPut(gpa, mock_env.strings, "my.Module");
_ = try original.getOrPut(gpa, &string_store, "json.Json");
_ = try original.getOrPut(gpa, &string_store, "core.List");
_ = try original.getOrPut(gpa, &string_store, "my.Module");
// Verify indices
try testing.expectEqual(@as(u32, 0), @intFromEnum(idx1));
try testing.expectEqual(@as(u32, 1), @intFromEnum(idx2));
try testing.expectEqual(@as(u32, 2), @intFromEnum(idx3));
try testing.expectEqual(@as(usize, 3), original.imports.len());
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -120,30 +111,23 @@ test "Import.Store basic CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr));
var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
defer deserialized.map.deinit(gpa);
// Verify the imports are accessible
// Verify the correct number of imports
try testing.expectEqual(@as(usize, 3), deserialized.imports.len());
// Verify the interned string IDs are stored correctly
const str_idx1 = deserialized.imports.items.items[0];
const str_idx2 = deserialized.imports.items.items[1];
const str_idx3 = deserialized.imports.items.items[2];
try testing.expectEqualStrings("json.Json", string_store.get(str_idx1));
try testing.expectEqualStrings("core.List", string_store.get(str_idx2));
try testing.expectEqualStrings("my.Module", string_store.get(str_idx3));
// Verify all expected module names are present by iterating
try testing.expect(storeContainsModule(deserialized, &string_store, "json.Json"));
try testing.expect(storeContainsModule(deserialized, &string_store, "core.List"));
try testing.expect(storeContainsModule(deserialized, &string_store, "my.Module"));
// Verify the map is repopulated correctly
try testing.expectEqual(@as(usize, 3), deserialized.map.count());
@ -153,26 +137,20 @@ test "Import.Store duplicate imports CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a mock module env with string store
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
const MockEnv = struct { strings: *StringLiteral.Store };
const mock_env = MockEnv{ .strings = &string_store };
// Create store with duplicate imports
var original = Import.Store.init();
defer original.deinit(gpa);
const idx1 = try original.getOrPut(gpa, mock_env.strings, "test.Module");
const idx2 = try original.getOrPut(gpa, mock_env.strings, "another.Module");
const idx3 = try original.getOrPut(gpa, mock_env.strings, "test.Module"); // duplicate
const idx1 = try original.getOrPut(gpa, &string_store, "test.Module");
_ = try original.getOrPut(gpa, &string_store, "another.Module");
const idx3 = try original.getOrPut(gpa, &string_store, "test.Module"); // duplicate
// Verify deduplication worked
try testing.expectEqual(idx1, idx3);
try testing.expectEqual(@as(usize, 2), original.imports.len());
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -185,38 +163,23 @@ test "Import.Store duplicate imports CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr));
var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
defer deserialized.map.deinit(gpa);
// Verify correct number of imports
// Verify correct number of imports (duplicates deduplicated)
try testing.expectEqual(@as(usize, 2), deserialized.imports.len());
// Get the string IDs and verify the strings
const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)];
const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)];
try testing.expectEqualStrings("test.Module", string_store.get(str_idx1));
try testing.expectEqualStrings("another.Module", string_store.get(str_idx2));
// Verify expected module names are present
try testing.expect(storeContainsModule(deserialized, &string_store, "test.Module"));
try testing.expect(storeContainsModule(deserialized, &string_store, "another.Module"));
// Verify the map was repopulated correctly
try testing.expectEqual(@as(usize, 2), deserialized.map.count());
// Check that the map has correct entries for the string indices that were deserialized
const str_idx_0 = deserialized.imports.items.items[0];
const str_idx_1 = deserialized.imports.items.items[1];
try testing.expect(deserialized.map.contains(str_idx_0));
try testing.expect(deserialized.map.contains(str_idx_1));
try testing.expectEqual(@as(Import.Idx, @enumFromInt(0)), deserialized.map.get(str_idx_0).?);
try testing.expectEqual(@as(Import.Idx, @enumFromInt(1)), deserialized.map.get(str_idx_1).?);
}

View file

@ -254,7 +254,7 @@ test "import interner - Import.Idx functionality" {
// Check that we have the correct number of unique imports (duplicates are deduplicated)
// Expected: List, Dict, Json, Set (4 unique)
try expectEqual(@as(usize, 4), result.parse_env.imports.imports.len());
// Verify each unique module has an Import.Idx
// Verify each unique module has an Import.Idx by checking the imports list
var found_list = false;
var found_dict = false;
var found_json_decode = false;
@ -276,16 +276,6 @@ test "import interner - Import.Idx functionality" {
try expectEqual(true, found_dict);
try expectEqual(true, found_json_decode);
try expectEqual(true, found_set);
// Test the lookup functionality
// Get the Import.Idx for "List" (should be used twice)
var list_import_idx: ?CIR.Import.Idx = null;
for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| {
if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) {
list_import_idx = @enumFromInt(idx);
break;
}
}
try testing.expect(list_import_idx != null);
}
test "import interner - comprehensive usage example" {
@ -325,22 +315,19 @@ test "import interner - comprehensive usage example" {
// Check that we have the correct number of unique imports
// Expected: List, Dict, Try (3 unique)
try expectEqual(@as(usize, 3), result.parse_env.imports.imports.len());
// Verify each unique module has an Import.Idx
// Verify each unique module was imported
var found_list = false;
var found_dict = false;
var found_result = false;
for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| {
if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) {
for (result.parse_env.imports.imports.items.items) |import_string_idx| {
const module_name = result.parse_env.getString(import_string_idx);
if (std.mem.eql(u8, module_name, "List")) {
found_list = true;
// Note: We can't verify exposed items count here as Import.Store only stores module names
} else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Dict")) {
} else if (std.mem.eql(u8, module_name, "Dict")) {
found_dict = true;
} else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Try")) {
} else if (std.mem.eql(u8, module_name, "Try")) {
found_result = true;
}
// Verify Import.Idx can be created from the index
const import_idx: CIR.Import.Idx = @enumFromInt(idx);
_ = import_idx; // Just verify it compiles
}
// Verify all expected modules were found
try expectEqual(true, found_list);
@ -348,25 +335,6 @@ test "import interner - comprehensive usage example" {
try expectEqual(true, found_result);
}
test "Import.Idx is u32" {
// Verify that Import.Idx is indeed a u32 enum
// Import.Idx is defined as: pub const Idx = enum(u32) { _ };
// So we know it's backed by u32
// Verify we can create Import.Idx values from u32
const test_idx: u32 = 42;
const import_idx = @as(CIR.Import.Idx, @enumFromInt(test_idx));
const back_to_u32 = @intFromEnum(import_idx);
try testing.expectEqual(test_idx, back_to_u32);
// Test that we can create valid Import.Idx values
const idx1: CIR.Import.Idx = @enumFromInt(0);
const idx2: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value
// Verify they are distinct
try testing.expect(idx1 != idx2);
// Verify the size in memory
try testing.expectEqual(@sizeOf(u32), @sizeOf(CIR.Import.Idx));
}
test "module scopes - imports work in module scope" {
var gpa_state = std.heap.GeneralPurposeAllocator(.{ .safety = true }){};
defer std.debug.assert(gpa_state.deinit() == .ok);
@ -436,18 +404,9 @@ test "module-qualified lookups with e_lookup_external" {
allocator.destroy(result.parse_env);
}
_ = try result.can.canonicalizeFile();
// Count e_lookup_external expressions
var external_lookup_count: u32 = 0;
var found_list_map = false;
var found_list_len = false;
var found_dict_insert = false;
var found_dict_empty = false;
// For this test, we're checking that module-qualified lookups work
// In the new CIR, we'd need to traverse the expression tree from the root
// For now, let's verify that the imports were registered correctly
// Verify the module names are correct
const imports_list = result.parse_env.imports.imports;
try testing.expect(imports_list.len() >= 2); // List and Dict
// Verify the module names are correct
var has_list = false;
var has_dict = false;
for (imports_list.items.items) |import_string_idx| {
@ -457,19 +416,6 @@ test "module-qualified lookups with e_lookup_external" {
}
try testing.expect(has_list);
try testing.expect(has_dict);
// TODO: Once we have proper expression traversal, verify the e_lookup_external nodes
// For now, we'll skip counting the actual lookup expressions
external_lookup_count = 4; // Expected count
found_list_map = true;
found_list_len = true;
found_dict_insert = true;
found_dict_empty = true;
// Verify we found all expected external lookups
try expectEqual(@as(u32, 4), external_lookup_count);
try expectEqual(true, found_list_map);
try expectEqual(true, found_list_len);
try expectEqual(true, found_dict_insert);
try expectEqual(true, found_dict_empty);
}
test "exposed_items - tracking CIR node indices for exposed items" {
@ -492,7 +438,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
math_env.deinit();
allocator.destroy(math_env);
}
// Add exposed items and set their node indices
// Add exposed items
const Ident = base.Ident;
const add_idx = try math_env.common.idents.insert(allocator, Ident.for_text("add"));
try math_env.addExposedById(add_idx);
@ -500,11 +446,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
try math_env.addExposedById(multiply_idx);
const pi_idx = try math_env.common.idents.insert(allocator, Ident.for_text("PI"));
try math_env.addExposedById(pi_idx);
// Simulate having CIR node indices for these exposed items
// In real usage, these would be set during canonicalization of MathUtils
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(add_idx), 100);
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(multiply_idx), 200);
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(pi_idx), 300);
const math_utils_ident = try temp_idents.insert(allocator, Ident.for_text("MathUtils"));
const math_utils_qualified_ident = try math_env.common.insertIdent(math_env.gpa, Ident.for_text("MathUtils"));
try module_envs.put(math_utils_ident, .{ .env = math_env, .qualified_type_ident = math_utils_qualified_ident });
@ -531,12 +473,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
allocator.destroy(result.parse_env);
}
_ = try result.can.canonicalizeFile();
// Verify that e_lookup_external expressions have the correct target_node_idx values
var found_add_with_idx_100 = false;
var found_multiply_with_idx_200 = false;
var found_pi_with_idx_300 = false;
// In the new CIR, we'd need to traverse the expression tree properly
// For now, let's verify the imports were registered
// Verify the MathUtils import was registered
const imports_list = result.parse_env.imports.imports;
var has_mathutils = false;
for (imports_list.items.items) |import_string_idx| {
@ -547,62 +484,6 @@ test "exposed_items - tracking CIR node indices for exposed items" {
}
}
try testing.expect(has_mathutils);
// TODO: Once we have proper expression traversal, verify the target_node_idx values
// For now, we'll assume they work correctly
found_add_with_idx_100 = true;
found_multiply_with_idx_200 = true;
found_pi_with_idx_300 = true;
// Verify all lookups have the correct target node indices
try expectEqual(true, found_add_with_idx_100);
try expectEqual(true, found_multiply_with_idx_200);
try expectEqual(true, found_pi_with_idx_300);
// Test case where node index is not populated (should get 0)
const empty_env = try allocator.create(ModuleEnv);
empty_env.* = try ModuleEnv.init(allocator, "");
defer {
empty_env.deinit();
allocator.destroy(empty_env);
}
const undefined_idx = try empty_env.common.idents.insert(allocator, Ident.for_text("undefined"));
try empty_env.addExposedById(undefined_idx);
// Don't set node index - should default to 0
const empty_module_ident = try temp_idents.insert(allocator, Ident.for_text("EmptyModule"));
const empty_qualified_ident = try empty_env.common.insertIdent(empty_env.gpa, Ident.for_text("EmptyModule"));
try module_envs.put(empty_module_ident, .{ .env = empty_env, .qualified_type_ident = empty_qualified_ident });
const source2 =
\\module [test]
\\
\\import EmptyModule exposing [undefined]
\\
\\test = undefined
;
var result2 = try parseAndCanonicalizeSource(allocator, source2, &module_envs);
defer {
result2.can.deinit();
allocator.destroy(result2.can);
result2.ast.deinit(allocator);
allocator.destroy(result2.ast);
result2.parse_env.deinit();
allocator.destroy(result2.parse_env);
}
_ = try result2.can.canonicalizeFile();
// Verify that undefined gets target_node_idx = 0 (not found)
var found_undefined_with_idx_0 = false;
// Verify EmptyModule was imported
const imports_list2 = result2.parse_env.imports.imports;
var has_empty_module = false;
for (imports_list2.items.items) |import_string_idx| {
const import_name = result2.parse_env.getString(import_string_idx);
if (std.mem.eql(u8, import_name, "EmptyModule")) {
has_empty_module = true;
break;
}
}
try testing.expect(has_empty_module);
// TODO: Once we have proper expression traversal, verify target_node_idx = 0
// For now, we'll assume it works correctly
found_undefined_with_idx_0 = true;
try expectEqual(true, found_undefined_with_idx_0);
}
test "export count safety - ensures safe u16 casting" {

View file

@ -3169,7 +3169,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected)
// Here, we unwrap the function, following aliases, to get
// the actual function we want to check against
var var_ = expected_var;
var guard = types_mod.debug.IterationGuard.init("checkExpr.lambda.unwrapExpectedFunc");
while (true) {
guard.tick();
switch (self.types.resolveVar(var_).desc.content) {
.structure => |flat_type| {
switch (flat_type) {
@ -3364,7 +3366,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected)
// Here, we unwrap the function, following aliases, to get
// the actual function we want to check against
var var_ = func_var;
var guard = types_mod.debug.IterationGuard.init("checkExpr.call.unwrapFuncVar");
while (true) {
guard.tick();
switch (self.types.resolveVar(var_).desc.content) {
.structure => |flat_type| {
switch (flat_type) {

View file

@ -323,8 +323,8 @@ pub const Store = struct {
return SnapshotStaticDispatchConstraint{
.fn_name = constraint.fn_name,
.fn_content = try self.deepCopyVarInternal(store, type_writer, constraint.fn_var),
// Dispatcher will be set when collecting constraints during write
.dispatcher = @enumFromInt(0),
// Dispatcher is set when collecting constraints during write
.dispatcher = undefined,
};
}

View file

@ -1353,9 +1353,10 @@ test "check type - expect" {
\\ x
\\}
;
// With no let-generalization for numeric flex vars, the `x == 1` comparison
// adds an is_eq constraint to x (since x is not generalized and remains monomorphic)
try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.is_eq : a, a -> Bool, a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]");
// Inside lambdas, numeric flex vars ARE generalized (to support polymorphic functions).
// Each use of `x` gets a fresh instance, so constraints from `x == 1` don't
// propagate to the generalized type. Only `from_numeral` from the def is captured.
try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]");
}
test "check type - expect not bool" {

View file

@ -790,8 +790,10 @@ test "partitionFields - same record" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const field_x = try env.mkRecordField("field_x", @enumFromInt(0));
const field_y = try env.mkRecordField("field_y", @enumFromInt(1));
const var_x = try env.module_env.types.fresh();
const var_y = try env.module_env.types.fresh();
const field_x = try env.mkRecordField("field_x", var_x);
const field_y = try env.mkRecordField("field_y", var_y);
const range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ field_x, field_y });
@ -813,9 +815,12 @@ test "partitionFields - disjoint fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkRecordField("a1", @enumFromInt(0));
const a2 = try env.mkRecordField("a2", @enumFromInt(1));
const b1 = try env.mkRecordField("b1", @enumFromInt(2));
const var_a1 = try env.module_env.types.fresh();
const var_a2 = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkRecordField("a1", var_a1);
const a2 = try env.mkRecordField("a2", var_a2);
const b1 = try env.mkRecordField("b1", var_b1);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, a2 });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{b1});
@ -839,9 +844,12 @@ test "partitionFields - overlapping fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkRecordField("a1", @enumFromInt(0));
const both = try env.mkRecordField("both", @enumFromInt(1));
const b1 = try env.mkRecordField("b1", @enumFromInt(2));
const var_a1 = try env.module_env.types.fresh();
const var_both = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkRecordField("a1", var_a1);
const both = try env.mkRecordField("both", var_both);
const b1 = try env.mkRecordField("b1", var_b1);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, both });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ b1, both });
@ -868,9 +876,12 @@ test "partitionFields - reordering is normalized" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const f1 = try env.mkRecordField("f1", @enumFromInt(0));
const f2 = try env.mkRecordField("f2", @enumFromInt(1));
const f3 = try env.mkRecordField("f3", @enumFromInt(2));
const var_f1 = try env.module_env.types.fresh();
const var_f2 = try env.module_env.types.fresh();
const var_f3 = try env.module_env.types.fresh();
const f1 = try env.mkRecordField("f1", var_f1);
const f2 = try env.mkRecordField("f2", var_f2);
const f3 = try env.mkRecordField("f3", var_f3);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f3, f1, f2 });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f1, f2, f3 });
@ -1027,8 +1038,10 @@ test "partitionTags - same tags" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const tag_x = try env.mkTag("X", &[_]Var{@enumFromInt(0)});
const tag_y = try env.mkTag("Y", &[_]Var{@enumFromInt(1)});
const var_x = try env.module_env.types.fresh();
const var_y = try env.module_env.types.fresh();
const tag_x = try env.mkTag("X", &[_]Var{var_x});
const tag_y = try env.mkTag("Y", &[_]Var{var_y});
const range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ tag_x, tag_y });
@ -1050,9 +1063,12 @@ test "partitionTags - disjoint fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkTag("A1", &[_]Var{@enumFromInt(0)});
const a2 = try env.mkTag("A2", &[_]Var{@enumFromInt(1)});
const b1 = try env.mkTag("B1", &[_]Var{@enumFromInt(2)});
const var_a1 = try env.module_env.types.fresh();
const var_a2 = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkTag("A1", &[_]Var{var_a1});
const a2 = try env.mkTag("A2", &[_]Var{var_a2});
const b1 = try env.mkTag("B1", &[_]Var{var_b1});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, a2 });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{b1});
@ -1076,9 +1092,12 @@ test "partitionTags - overlapping tags" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkTag("A", &[_]Var{@enumFromInt(0)});
const both = try env.mkTag("Both", &[_]Var{@enumFromInt(1)});
const b1 = try env.mkTag("B", &[_]Var{@enumFromInt(2)});
const var_a = try env.module_env.types.fresh();
const var_both = try env.module_env.types.fresh();
const var_b = try env.module_env.types.fresh();
const a1 = try env.mkTag("A", &[_]Var{var_a});
const both = try env.mkTag("Both", &[_]Var{var_both});
const b1 = try env.mkTag("B", &[_]Var{var_b});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, both });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ b1, both });
@ -1105,9 +1124,12 @@ test "partitionTags - reordering is normalized" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const f1 = try env.mkTag("F1", &[_]Var{@enumFromInt(0)});
const f2 = try env.mkTag("F2", &[_]Var{@enumFromInt(1)});
const f3 = try env.mkTag("F3", &[_]Var{@enumFromInt(2)});
const var_f1 = try env.module_env.types.fresh();
const var_f2 = try env.module_env.types.fresh();
const var_f3 = try env.module_env.types.fresh();
const f1 = try env.mkTag("F1", &[_]Var{var_f1});
const f2 = try env.mkTag("F2", &[_]Var{var_f2});
const f3 = try env.mkTag("F3", &[_]Var{var_f3});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f3, f1, f2 });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f1, f2, f3 });
@ -1487,7 +1509,7 @@ test "unify - flex with constraints vs structure captures deferred check" {
// Check that constraint was captured
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(structure_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,
@ -1522,7 +1544,7 @@ test "unify - structure vs flex with constraints captures deferred check (revers
// Check that constraint was captured (note: vars might be swapped due to merge order)
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(flex_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,
@ -1575,7 +1597,7 @@ test "unify - flex vs nominal type captures constraint" {
// Check that constraint was captured
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(nominal_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,

View file

@ -1511,7 +1511,9 @@ const Unifier = struct {
// then recursiv
var ext = record_ext;
var guard = types_mod.debug.IterationGuard.init("gatherRecordFields");
while (true) {
guard.tick();
switch (ext) {
.unbound => {
return .{ .ext = ext, .range = range };
@ -1961,7 +1963,9 @@ const Unifier = struct {
// then loop gathering extensible tags
var ext_var = tag_union.ext;
var guard = types_mod.debug.IterationGuard.init("gatherTagUnionTags");
while (true) {
guard.tick();
switch (self.types_store.resolveVar(ext_var).desc.content) {
.flex => {
return .{ .ext = ext_var, .range = range };

View file

@ -342,7 +342,7 @@ fn createHardlink(allocs: *Allocators, source: []const u8, dest: []const u8) !vo
lpFileName: [*:0]const u16,
lpExistingFileName: [*:0]const u16,
lpSecurityAttributes: ?*anyopaque,
) callconv(std.os.windows.WINAPI) std.os.windows.BOOL;
) callconv(.winapi) std.os.windows.BOOL;
};
if (kernel32.CreateHardLinkW(dest_w, source_w, null) == 0) {
@ -387,11 +387,101 @@ fn generateRandomSuffix(allocs: *Allocators) ![]u8 {
return suffix;
}
/// Create a unique temporary directory with PID-based naming.
/// Returns the path to the directory (allocated from arena, no need to free).
/// Uses system temp directory to avoid race conditions when cache is cleared.
pub fn createUniqueTempDir(allocs: *Allocators) ![]const u8 {
// Use system temp directory (not roc cache) to avoid race conditions
const temp_dir = if (comptime is_windows)
std.process.getEnvVarOwned(allocs.arena, "TEMP") catch
std.process.getEnvVarOwned(allocs.arena, "TMP") catch try allocs.arena.dupe(u8, "C:\\Windows\\Temp")
else
std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp");
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
// Get the current process ID for uniqueness
const pid = if (comptime is_windows)
std.os.windows.GetCurrentProcessId()
else
std.c.getpid();
// Try PID-based name first, then fall back to random suffix up to 5 times
var attempt: u8 = 0;
while (attempt < 6) : (attempt += 1) {
const dir_path = if (attempt == 0) blk: {
// First attempt: use PID only
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}", .{ normalized_temp_dir, pid })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}", .{ normalized_temp_dir, pid });
} else blk: {
// Subsequent attempts: use PID + random 8-char suffix
const random_suffix = try generateRandomSuffix(allocs);
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix });
};
// Try to create the directory
std.fs.cwd().makeDir(dir_path) catch |err| switch (err) {
error.PathAlreadyExists => {
// Directory already exists, try again with a new random suffix
continue;
},
else => {
return err;
},
};
return dir_path;
}
// Failed after 6 attempts (1 with PID only, 5 with PID + random suffix)
return error.FailedToCreateUniqueTempDir;
}
/// Write shared memory coordination file (.txt) next to the executable.
/// This is the file that the child process reads to find the shared memory fd.
pub fn writeFdCoordinationFile(allocs: *Allocators, temp_exe_path: []const u8, shm_handle: SharedMemoryHandle) !void {
// The coordination file is at {temp_dir}.txt where temp_dir is the directory containing the exe
const temp_dir = std.fs.path.dirname(temp_exe_path) orelse return error.InvalidPath;
// Ensure we have no trailing slashes
var dir_path = temp_dir;
while (dir_path.len > 0 and (dir_path[dir_path.len - 1] == '/' or dir_path[dir_path.len - 1] == '\\')) {
dir_path = dir_path[0 .. dir_path.len - 1];
}
const fd_file_path = try std.fmt.allocPrint(allocs.arena, "{s}.txt", .{dir_path});
// Create the file (exclusive - fail if exists to detect collisions)
const fd_file = std.fs.cwd().createFile(fd_file_path, .{ .exclusive = true }) catch |err| switch (err) {
error.PathAlreadyExists => {
// File already exists - this is unexpected since we have unique temp dirs
std.log.err("Coordination file already exists at '{s}'", .{fd_file_path});
return err;
},
else => return err,
};
defer fd_file.close();
// Write shared memory info to file
const fd_str = try std.fmt.allocPrint(allocs.arena, "{}\n{}", .{ shm_handle.fd, shm_handle.size });
try fd_file.writeAll(fd_str);
try fd_file.sync();
}
/// Create the temporary directory structure for fd communication.
/// Returns the path to the executable in the temp directory (allocated from arena, no need to free).
/// If a cache directory is provided, it will be used for temporary files; otherwise
/// falls back to the system temp directory.
pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 {
/// The exe_display_name is the name that will appear in `ps` output (e.g., "app.roc").
pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, exe_display_name: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 {
// Use provided cache dir or fall back to system temp directory
const temp_dir = if (cache_dir) |dir|
try allocs.arena.dupe(u8, dir)
@ -401,20 +491,34 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
else
std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp");
// Try up to 10 times to create a unique directory
var attempt: u8 = 0;
while (attempt < 10) : (attempt += 1) {
const random_suffix = try generateRandomSuffix(allocs);
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
// Create the full path with .txt suffix first
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
const dir_name_with_txt = if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix });
// Get the current process ID for uniqueness
const pid = if (comptime is_windows)
std.os.windows.GetCurrentProcessId()
else
std.c.getpid();
// Try PID-based name first, then fall back to random suffix up to 5 times
var attempt: u8 = 0;
while (attempt < 6) : (attempt += 1) {
const dir_name_with_txt = if (attempt == 0) blk: {
// First attempt: use PID only
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}.txt", .{ normalized_temp_dir, pid })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}.txt", .{ normalized_temp_dir, pid });
} else blk: {
// Subsequent attempts: use PID + random 8-char suffix
const random_suffix = try generateRandomSuffix(allocs);
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix });
};
// Get the directory path by slicing off the .txt suffix
const dir_path_len = dir_name_with_txt.len - 4; // Remove ".txt"
@ -456,9 +560,8 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
try fd_file.sync(); // Ensure data is written to disk
fd_file.close();
// Create hardlink to executable in temp directory
const exe_basename = std.fs.path.basename(exe_path);
const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_basename });
// Create hardlink to executable in temp directory with display name
const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name });
// Try to create a hardlink first (more efficient than copying)
createHardlink(allocs, exe_path, temp_exe_path) catch {
@ -470,7 +573,7 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
return temp_exe_path;
}
// Failed after 10 attempts
// Failed after 6 attempts (1 with PID only, 5 with PID + random suffix)
return error.FailedToCreateUniqueTempDir;
}
@ -480,6 +583,11 @@ var debug_allocator: std.heap.DebugAllocator(.{}) = .{
/// The CLI entrypoint for the Roc compiler.
pub fn main() !void {
// Install stack overflow handler early, before any significant work.
// This gives us a helpful error message instead of a generic segfault
// if the compiler blows the stack (e.g., due to infinite recursion in type translation).
_ = base.stack_overflow.install();
var gpa_tracy: tracy.TracyAllocator(null) = undefined;
var gpa, const is_safe = gpa: {
if (builtin.os.tag == .wasi) break :gpa .{ std.heap.wasm_allocator, false };
@ -724,26 +832,51 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
},
};
// Generate executable name based on the roc file path
// TODO use something more interesting like a hash from the platform.main or platform/host.a etc
const exe_base_name = std.fmt.allocPrint(allocs.arena, "roc_run_{}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| {
std.log.err("Failed to generate executable name: {}", .{err});
return err;
};
// The final executable name seen in `ps` is the roc filename (e.g., "app.roc")
const exe_display_name = std.fs.path.basename(args.path);
// Add .exe extension on Windows
const exe_name = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_base_name}) catch |err| {
std.log.err("Failed to generate executable name with extension: {}", .{err});
// Display name for temp directory (what shows in ps)
const exe_display_name_with_ext = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_display_name}) catch |err| {
std.log.err("Failed to generate display name with extension: {}", .{err});
return err;
}
else
allocs.arena.dupe(u8, exe_base_name) catch |err| {
std.log.err("Failed to duplicate executable name: {}", .{err});
allocs.arena.dupe(u8, exe_display_name) catch |err| {
std.log.err("Failed to duplicate display name: {}", .{err});
return err;
};
const exe_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_name }) catch |err| {
// Cache executable name uses hash of path (no PID - collision is fine since same content)
const exe_cache_name = std.fmt.allocPrint(allocs.arena, "roc_{x}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| {
std.log.err("Failed to generate cache executable name: {}", .{err});
return err;
};
const exe_cache_name_with_ext = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_cache_name}) catch |err| {
std.log.err("Failed to generate cache name with extension: {}", .{err});
return err;
}
else
allocs.arena.dupe(u8, exe_cache_name) catch |err| {
std.log.err("Failed to duplicate cache name: {}", .{err});
return err;
};
const exe_cache_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_cache_name_with_ext }) catch |err| {
std.log.err("Failed to create cache executable path: {}", .{err});
return err;
};
// Create unique temp directory for this build (uses PID for uniqueness)
const temp_dir_path = createUniqueTempDir(allocs) catch |err| {
std.log.err("Failed to create temp directory: {}", .{err});
return err;
};
// The executable is built directly in the temp dir with the display name
const exe_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name_with_ext }) catch |err| {
std.log.err("Failed to create executable path: {}", .{err});
return err;
};
@ -780,42 +913,44 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
return error.NoPlatformSource;
}
// Check if the interpreter executable already exists (cached)
const exe_exists = if (args.no_cache) false else blk: {
std.fs.accessAbsolute(exe_path, .{}) catch {
// Check if the interpreter executable already exists in cache
const cache_exists = if (args.no_cache) false else blk: {
std.fs.accessAbsolute(exe_cache_path, .{}) catch {
break :blk false;
};
break :blk true;
};
if (!exe_exists) {
if (cache_exists) {
// Cached executable exists - hardlink from cache to temp dir
std.log.debug("Using cached executable: {s}", .{exe_cache_path});
createHardlink(allocs, exe_cache_path, exe_path) catch |err| {
// If hardlinking fails, fall back to copying
std.log.debug("Hardlink from cache failed, copying: {}", .{err});
std.fs.cwd().copyFile(exe_cache_path, std.fs.cwd(), exe_path, .{}) catch |copy_err| {
std.log.err("Failed to copy cached executable: {}", .{copy_err});
return copy_err;
};
};
} else {
// Check for cached shim library, extract if not present
// Extract shim library to temp dir to avoid race conditions
const shim_filename = if (builtin.target.os.tag == .windows) "roc_shim.lib" else "libroc_shim.a";
const shim_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, shim_filename }) catch |err| {
const shim_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, shim_filename }) catch |err| {
std.log.err("Failed to create shim library path: {}", .{err});
return err;
};
// Extract shim if not cached or if --no-cache is used
const shim_exists = if (args.no_cache) false else blk: {
std.fs.cwd().access(shim_path, .{}) catch {
break :blk false;
};
break :blk true;
// Always extract to temp dir (unique per process, no race condition)
extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| {
std.log.err("Failed to extract read roc file path shim library: {}", .{err});
return err;
};
if (!shim_exists) {
// Shim not found in cache or cache disabled, extract it
extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| {
std.log.err("Failed to extract read roc file path shim library: {}", .{err});
return err;
};
}
// Generate platform host shim using the detected entrypoints
// Use temp dir to avoid race conditions when multiple processes run in parallel
const platform_shim_path = generatePlatformHostShim(allocs, exe_cache_dir, entrypoints.items, shim_target) catch |err| {
const platform_shim_path = generatePlatformHostShim(allocs, temp_dir_path, entrypoints.items, shim_target) catch |err| {
std.log.err("Failed to generate platform host shim: {}", .{err});
return err;
};
@ -948,6 +1083,22 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
return err;
},
};
// After building, hardlink to cache for future runs
// Force-hardlink (delete existing first) since hash collision means identical content
std.log.debug("Caching executable to: {s}", .{exe_cache_path});
std.fs.cwd().deleteFile(exe_cache_path) catch |err| switch (err) {
error.FileNotFound => {}, // OK, doesn't exist
else => std.log.debug("Could not delete existing cache file: {}", .{err}),
};
createHardlink(allocs, exe_path, exe_cache_path) catch |err| {
// If hardlinking fails, fall back to copying
std.log.debug("Hardlink to cache failed, copying: {}", .{err});
std.fs.cwd().copyFile(exe_path, std.fs.cwd(), exe_cache_path, .{}) catch |copy_err| {
// Non-fatal - just means future runs won't be cached
std.log.debug("Failed to copy to cache: {}", .{copy_err});
};
};
}
// Set up shared memory with ModuleEnv
@ -986,7 +1137,7 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
} else {
// POSIX: Use existing file descriptor inheritance approach
std.log.debug("Using POSIX file descriptor inheritance approach", .{});
runWithPosixFdInheritance(allocs, exe_path, shm_handle, &cache_manager, args.app_args) catch |err| {
runWithPosixFdInheritance(allocs, exe_path, shm_handle, args.app_args) catch |err| {
return err;
};
}
@ -1132,40 +1283,32 @@ fn runWithWindowsHandleInheritance(allocs: *Allocators, exe_path: []const u8, sh
}
/// Run child process using POSIX file descriptor inheritance (existing approach for Unix)
fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_manager: *CacheManager, app_args: []const []const u8) !void {
// Get cache directory for temporary files
const temp_cache_dir = cache_manager.config.getTempDir(allocs.arena) catch |err| {
std.log.err("Failed to get temp cache directory: {}", .{err});
/// The exe_path should already be in a unique temp directory created by createUniqueTempDir.
fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, app_args: []const []const u8) !void {
// Write the coordination file (.txt) next to the executable
// The executable is already in a unique temp directory
std.log.debug("Writing fd coordination file for: {s}", .{exe_path});
writeFdCoordinationFile(allocs, exe_path, shm_handle) catch |err| {
std.log.err("Failed to write fd coordination file: {}", .{err});
return err;
};
std.log.debug("Coordination file written successfully", .{});
// Ensure temp cache directory exists
std.fs.cwd().makePath(temp_cache_dir) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => {
std.log.err("Failed to create temp cache directory: {}", .{err});
return err;
},
};
// Create temporary directory structure for fd communication
std.log.debug("Creating temporary directory structure for fd communication", .{});
const temp_exe_path = createTempDirStructure(allocs, exe_path, shm_handle, temp_cache_dir) catch |err| {
std.log.err("Failed to create temp dir structure: {}", .{err});
return err;
};
std.log.debug("Temporary executable created at: {s}", .{temp_exe_path});
// Configure fd inheritance
var flags = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0);
if (flags < 0) {
// Configure fd inheritance - clear FD_CLOEXEC so child process inherits the fd
// NOTE: The doNotOptimizeAway calls are required to prevent the ReleaseFast
// optimizer from incorrectly optimizing away or reordering the fcntl calls.
const getfd_result = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0);
std.mem.doNotOptimizeAway(&getfd_result);
if (getfd_result < 0) {
std.log.err("Failed to get fd flags: {}", .{c._errno().*});
return error.FdConfigFailed;
}
flags &= ~@as(c_int, posix.FD_CLOEXEC);
if (posix.fcntl(shm_handle.fd, posix.F_SETFD, flags) < 0) {
const new_flags = getfd_result & ~@as(c_int, posix.FD_CLOEXEC);
std.mem.doNotOptimizeAway(&new_flags);
const setfd_result = posix.fcntl(shm_handle.fd, posix.F_SETFD, new_flags);
std.mem.doNotOptimizeAway(&setfd_result);
if (setfd_result < 0) {
std.log.err("Failed to set fd flags: {}", .{c._errno().*});
return error.FdConfigFailed;
}
@ -1175,7 +1318,7 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.log.err("Failed to allocate argv: {}", .{err});
return err;
};
argv[0] = temp_exe_path;
argv[0] = exe_path;
for (app_args, 0..) |arg, i| {
argv[1 + i] = arg;
}
@ -1192,10 +1335,10 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
child.stderr_behavior = .Inherit;
// Spawn the child process
std.log.debug("Spawning child process: {s} with {} app args", .{ temp_exe_path, app_args.len });
std.log.debug("Spawning child process: {s} with {} app args", .{ exe_path, app_args.len });
std.log.debug("Child process working directory: {s}", .{child.cwd.?});
child.spawn() catch |err| {
std.log.err("Failed to spawn {s}: {}", .{ temp_exe_path, err });
std.log.err("Failed to spawn {s}: {}", .{ exe_path, err });
return err;
};
std.log.debug("Child process spawned successfully (PID: {})", .{child.id});
@ -1213,12 +1356,12 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.log.debug("Child process completed successfully", .{});
} else {
// Propagate the exit code from the child process to our parent
std.log.debug("Child process {s} exited with code: {}", .{ temp_exe_path, exit_code });
std.log.debug("Child process {s} exited with code: {}", .{ exe_path, exit_code });
std.process.exit(exit_code);
}
},
.Signal => |signal| {
std.log.err("Child process {s} killed by signal: {}", .{ temp_exe_path, signal });
std.log.err("Child process {s} killed by signal: {}", .{ exe_path, signal });
if (signal == 11) { // SIGSEGV
std.log.err("Child process crashed with segmentation fault (SIGSEGV)", .{});
} else if (signal == 6) { // SIGABRT
@ -1230,11 +1373,11 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.process.exit(128 +| @as(u8, @truncate(signal)));
},
.Stopped => |signal| {
std.log.err("Child process {s} stopped by signal: {}", .{ temp_exe_path, signal });
std.log.err("Child process {s} stopped by signal: {}", .{ exe_path, signal });
return error.ProcessStopped;
},
.Unknown => |status| {
std.log.err("Child process {s} terminated with unknown status: {}", .{ temp_exe_path, status });
std.log.err("Child process {s} terminated with unknown status: {}", .{ exe_path, status });
return error.ProcessUnknownTermination;
},
}
@ -1422,44 +1565,12 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
const module_env_ptr = try compileModuleToSharedMemory(
allocs,
module_path,
module_filename,
module_name, // Use just "Stdout" (not "Stdout.roc") so type-module detection works
shm_allocator,
&builtin_modules,
&.{},
);
// Add exposed item aliases with "pf." prefix for import resolution
// The canonicalizer builds lookup names like "Stdout.roc.pf.Stdout.line!"
// because the import "pf.Stdout" creates an alias Stdout -> pf.Stdout,
// and scopeLookupModule returns "pf.Stdout" which becomes part of the qualified name.
// We need to add aliases that match this pattern.
module_env_ptr.common.exposed_items.ensureSorted(shm_allocator);
const exposed_entries = module_env_ptr.common.exposed_items.items.entries.items;
for (exposed_entries) |entry| {
const key_ident: base.Ident.Idx = @bitCast(entry.key);
const key_text = module_env_ptr.common.getIdent(key_ident);
// Check if this is a qualified name like "Stdout.roc.Stdout.line!"
// We want to create an alias "Stdout.roc.pf.Stdout.line!"
// The pattern is: "{module}.roc.{Type}.{method}"
// We want to create: "{module}.roc.pf.{Type}.{method}"
if (std.mem.indexOf(u8, key_text, ".roc.")) |roc_pos| {
const prefix = key_text[0 .. roc_pos + 5]; // "Stdout.roc."
const suffix = key_text[roc_pos + 5 ..]; // "Stdout.line!"
// Create the aliased name "Stdout.roc.pf.Stdout.line!"
const aliased_name = try std.fmt.allocPrint(shm_allocator, "{s}pf.{s}", .{ prefix, suffix });
// Note: We don't defer free because this is allocated in shm_allocator (shared memory)
// Insert the aliased name into the platform env's ident table
const aliased_ident = try module_env_ptr.insertIdent(base.Ident.for_text(aliased_name));
// First add to exposed items, then set node index
try module_env_ptr.common.exposed_items.addExposedById(shm_allocator, @bitCast(aliased_ident));
try module_env_ptr.common.exposed_items.setNodeIndexById(shm_allocator, @bitCast(aliased_ident), entry.value);
}
}
// Store platform modules at indices 0..N-2, app will be at N-1
module_env_offsets_ptr[i] = @intFromPtr(module_env_ptr) - @intFromPtr(shm.base_ptr);
platform_env_ptrs[i] = module_env_ptr;
@ -1605,19 +1716,29 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
// Two keys are needed for each platform module:
// 1. "pf.Stdout" - used during import validation (import pf.Stdout)
// 2. "Stdout" - used during expression canonicalization (Stdout.line!)
// Also set statement_idx to a non-null value to trigger qualified name lookup,
// since associated items are stored as "Stdout.roc.Stdout.line!", not just "line!".
// Also set statement_idx to the actual type node index, which is needed for
// creating e_nominal_external and e_lookup_external expressions.
for (exposed_modules.items, 0..) |module_name, i| {
const platform_env = platform_env_ptrs[i];
// For platform modules, the qualified type name is "ModuleName.roc.ModuleName"
// This matches how associated items are stored (e.g., "Stdout.roc.Stdout.line!")
// For platform modules (type modules), the qualified type name is just the type name.
// Type modules like Stdout.roc store associated items as "Stdout.line!" (not "Stdout.roc.Stdout.line!")
// because processTypeDeclFirstPass uses parent_name=null for top-level types.
// Insert into app_env (calling module) since Ident.Idx values are not transferable between stores.
const qualified_type_name = try std.fmt.allocPrint(allocs.gpa, "{s}.roc.{s}", .{ module_name, module_name });
defer allocs.gpa.free(qualified_type_name);
const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(qualified_type_name));
const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(module_name));
// Look up the type in the platform module's exposed_items to get the actual node index
const type_ident_in_platform = platform_env.common.findIdent(module_name) orelse {
std.log.err("Platform module '{s}' does not expose a type named '{s}'", .{ module_name, module_name });
return error.MissingTypeInPlatformModule;
};
const type_node_idx = platform_env.getExposedNodeIndexById(type_ident_in_platform) orelse {
std.log.err("Platform module type '{s}' has no node index in exposed_items", .{module_name});
return error.MissingNodeIndexForPlatformType;
};
const auto_type = Can.AutoImportedType{
.env = platform_env,
.statement_idx = @enumFromInt(0), // Non-null triggers qualified name building
.statement_idx = @enumFromInt(type_node_idx), // actual type node index for e_lookup_external
.qualified_type_ident = type_qualified_ident,
};

File diff suppressed because it is too large Load diff

View file

@ -23,7 +23,7 @@ pub fn SafeRange(comptime Idx: type) type {
/// An empty range
pub fn empty() Self {
return .{ .start = @enumFromInt(0), .count = 0 };
return .{ .start = undefined, .count = 0 };
}
// Drop first elem from the span, if possible
@ -99,6 +99,8 @@ pub fn SafeList(comptime T: type) type {
/// An index for an item in the list.
pub const Idx = enum(u32) {
/// The first valid index in the list.
first = 0,
_,
/// Get the raw u32 value for storage
@ -246,6 +248,11 @@ pub fn SafeList(comptime T: type) type {
/// Convert a range to a slice
pub fn sliceRange(self: *const SafeList(T), range: Range) Slice {
// Empty ranges have undefined start, return empty slice directly
if (range.count == 0) {
return &.{};
}
const start: usize = @intFromEnum(range.start);
const end: usize = start + range.count;
@ -368,7 +375,7 @@ pub fn SafeList(comptime T: type) type {
return Iterator{
.array = self,
.len = self.len(),
.current = @enumFromInt(0),
.current = .first,
};
}
};
@ -396,7 +403,7 @@ pub fn SafeMultiList(comptime T: type) type {
items: std.MultiArrayList(T) = .{},
/// Index of an item in the list.
pub const Idx = enum(u32) { zero = 0, _ };
pub const Idx = enum(u32) { first = 0, _ };
/// A non-type-safe slice of the list.
pub const Slice = std.MultiArrayList(T).Slice;
@ -461,7 +468,7 @@ pub fn SafeMultiList(comptime T: type) type {
pub fn appendSlice(self: *SafeMultiList(T), gpa: Allocator, elems: []const T) std.mem.Allocator.Error!Range {
if (elems.len == 0) {
return .{ .start = .zero, .count = 0 };
return .{ .start = .first, .count = 0 };
}
const start_length = self.len();
try self.items.ensureUnusedCapacity(gpa, elems.len);
@ -474,6 +481,17 @@ pub fn SafeMultiList(comptime T: type) type {
/// Convert a range to a slice
pub fn sliceRange(self: *const SafeMultiList(T), range: Range) Slice {
// Empty ranges have undefined start, return empty slice directly
if (range.count == 0) {
const base = self.items.slice();
// Return a zero-length slice based on the existing slice
return .{
.ptrs = base.ptrs,
.len = 0,
.capacity = 0,
};
}
const start: usize = @intFromEnum(range.start);
const end: usize = start + range.count;
@ -963,7 +981,7 @@ test "SafeList edge cases serialization" {
try testing.expectEqual(@as(usize, 0), deserialized.list_u32.len());
try testing.expectEqual(@as(usize, 1), deserialized.list_u8.len());
try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(.first).*);
}
}
@ -1048,11 +1066,12 @@ test "SafeList CompactWriter complete roundtrip example" {
const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Step 8: Verify data is accessible and correct
const Idx = SafeList(u32).Idx;
try testing.expectEqual(@as(usize, 4), deserialized.len());
try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u32, 100), deserialized.get(.first).*);
try testing.expectEqual(@as(u32, 200), deserialized.get(@as(Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u32, 300), deserialized.get(@as(Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u32, 400), deserialized.get(@as(Idx, @enumFromInt(3))).*);
}
test "SafeList CompactWriter multiple lists with different alignments" {
@ -1155,10 +1174,11 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u8));
offset += 3 * @sizeOf(u8);
const U8Idx = SafeList(u8).Idx;
try testing.expectEqual(@as(usize, 3), deser_u8.len());
try testing.expectEqual(@as(u8, 10), deser_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 20), deser_u8.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u8, 30), deser_u8.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u8, 10), deser_u8.get(.first).*);
try testing.expectEqual(@as(u8, 20), deser_u8.get(@as(U8Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u8, 30), deser_u8.get(@as(U8Idx, @enumFromInt(2))).*);
// 2. Deserialize u16 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized));
@ -1169,9 +1189,10 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u16));
offset += 2 * @sizeOf(u16);
const U16Idx = SafeList(u16).Idx;
try testing.expectEqual(@as(usize, 2), deser_u16.len());
try testing.expectEqual(@as(u16, 1000), deser_u16.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u16, 2000), deser_u16.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u16, 1000), deser_u16.get(.first).*);
try testing.expectEqual(@as(u16, 2000), deser_u16.get(@as(U16Idx, @enumFromInt(1))).*);
// 3. Deserialize u32 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized));
@ -1182,11 +1203,12 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u32));
offset += 4 * @sizeOf(u32);
const U32Idx = SafeList(u32).Idx;
try testing.expectEqual(@as(usize, 4), deser_u32.len());
try testing.expectEqual(@as(u32, 100_000), deser_u32.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u32, 100_000), deser_u32.get(.first).*);
try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@as(U32Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@as(U32Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@as(U32Idx, @enumFromInt(3))).*);
// 4. Deserialize u64 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized));
@ -1197,22 +1219,24 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u64));
offset += 2 * @sizeOf(u64);
const U64Idx = SafeList(u64).Idx;
try testing.expectEqual(@as(usize, 2), deser_u64.len());
try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(.first).*);
try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@as(U64Idx, @enumFromInt(1))).*);
// 5. Deserialize struct list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(AlignedStruct).Serialized));
const s_struct = @as(*SafeList(AlignedStruct).Serialized, @ptrCast(@alignCast(buffer.ptr + offset)));
const deser_struct = s_struct.deserialize(@as(i64, @intCast(base_addr)));
const StructIdx = SafeList(AlignedStruct).Idx;
try testing.expectEqual(@as(usize, 2), deser_struct.len());
const item0 = deser_struct.get(@enumFromInt(0));
const item0 = deser_struct.get(.first);
try testing.expectEqual(@as(u32, 42), item0.x);
try testing.expectEqual(@as(u64, 1337), item0.y);
try testing.expectEqual(@as(u8, 255), item0.z);
const item1 = deser_struct.get(@enumFromInt(1));
const item1 = deser_struct.get(@as(StructIdx, @enumFromInt(1)));
try testing.expectEqual(@as(u32, 99), item1.x);
try testing.expectEqual(@as(u64, 9999), item1.y);
try testing.expectEqual(@as(u8, 128), item1.z);
@ -1318,10 +1342,11 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u8));
offset += 3; // 3 u8 elements
const D1Idx = SafeList(u8).Idx;
try testing.expectEqual(@as(usize, 3), d1.len());
try testing.expectEqual(@as(u8, 1), d1.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 2), d1.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u8, 3), d1.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u8, 1), d1.get(.first).*);
try testing.expectEqual(@as(u8, 2), d1.get(@as(D1Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u8, 3), d1.get(@as(D1Idx, @enumFromInt(2))).*);
// 2. Second list - u64
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized));
@ -1331,9 +1356,10 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u64));
offset += 2 * @sizeOf(u64); // 2 u64 elements
const D2Idx = SafeList(u64).Idx;
try testing.expectEqual(@as(usize, 2), d2.len());
try testing.expectEqual(@as(u64, 1_000_000), d2.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u64, 2_000_000), d2.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u64, 1_000_000), d2.get(.first).*);
try testing.expectEqual(@as(u64, 2_000_000), d2.get(@as(D2Idx, @enumFromInt(1))).*);
// 3. Third list - u16
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized));
@ -1343,11 +1369,12 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u16));
offset += 4 * @sizeOf(u16); // 4 u16 elements
const D3Idx = SafeList(u16).Idx;
try testing.expectEqual(@as(usize, 4), d3.len());
try testing.expectEqual(@as(u16, 100), d3.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u16, 200), d3.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u16, 300), d3.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u16, 400), d3.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u16, 100), d3.get(.first).*);
try testing.expectEqual(@as(u16, 200), d3.get(@as(D3Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u16, 300), d3.get(@as(D3Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u16, 400), d3.get(@as(D3Idx, @enumFromInt(3))).*);
// 4. Fourth list - u32
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized));
@ -1355,7 +1382,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
const d4 = s4.deserialize(@as(i64, @intCast(base)));
try testing.expectEqual(@as(usize, 1), d4.len());
try testing.expectEqual(@as(u32, 42), d4.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 42), d4.get(.first).*);
}
test "SafeList CompactWriter brute-force alignment verification" {
@ -1476,7 +1503,7 @@ test "SafeList CompactWriter brute-force alignment verification" {
offset += 1; // 1 u8 element
try testing.expectEqual(@as(usize, 1), d_u8.len());
try testing.expectEqual(@as(u8, 42), d_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 42), d_u8.get(.first).*);
// Second list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(T).Serialized));
@ -1551,28 +1578,32 @@ test "SafeMultiList CompactWriter roundtrip with file" {
const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Verify the data
const Idx = SafeMultiList(TestStruct).Idx;
try testing.expectEqual(@as(usize, 4), deserialized.len());
// Verify all the data
try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).id);
try testing.expectEqual(@as(u64, 1000), deserialized.get(@enumFromInt(0)).value);
try testing.expectEqual(true, deserialized.get(@enumFromInt(0)).flag);
try testing.expectEqual(@as(u8, 10), deserialized.get(@enumFromInt(0)).data);
try testing.expectEqual(@as(u32, 100), deserialized.get(.first).id);
try testing.expectEqual(@as(u64, 1000), deserialized.get(.first).value);
try testing.expectEqual(true, deserialized.get(.first).flag);
try testing.expectEqual(@as(u8, 10), deserialized.get(.first).data);
try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).id);
try testing.expectEqual(@as(u64, 2000), deserialized.get(@enumFromInt(1)).value);
try testing.expectEqual(false, deserialized.get(@enumFromInt(1)).flag);
try testing.expectEqual(@as(u8, 20), deserialized.get(@enumFromInt(1)).data);
const second_idx: Idx = @enumFromInt(1);
try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).id);
try testing.expectEqual(@as(u64, 2000), deserialized.get(second_idx).value);
try testing.expectEqual(false, deserialized.get(second_idx).flag);
try testing.expectEqual(@as(u8, 20), deserialized.get(second_idx).data);
try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).id);
try testing.expectEqual(@as(u64, 3000), deserialized.get(@enumFromInt(2)).value);
try testing.expectEqual(true, deserialized.get(@enumFromInt(2)).flag);
try testing.expectEqual(@as(u8, 30), deserialized.get(@enumFromInt(2)).data);
const third_idx: Idx = @enumFromInt(2);
try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).id);
try testing.expectEqual(@as(u64, 3000), deserialized.get(third_idx).value);
try testing.expectEqual(true, deserialized.get(third_idx).flag);
try testing.expectEqual(@as(u8, 30), deserialized.get(third_idx).data);
try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).id);
try testing.expectEqual(@as(u64, 4000), deserialized.get(@enumFromInt(3)).value);
try testing.expectEqual(false, deserialized.get(@enumFromInt(3)).flag);
try testing.expectEqual(@as(u8, 40), deserialized.get(@enumFromInt(3)).data);
const fourth_idx: Idx = @enumFromInt(3);
try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).id);
try testing.expectEqual(@as(u64, 4000), deserialized.get(fourth_idx).value);
try testing.expectEqual(false, deserialized.get(fourth_idx).flag);
try testing.expectEqual(@as(u8, 40), deserialized.get(fourth_idx).data);
}
test "SafeMultiList empty list CompactWriter roundtrip" {
@ -1702,30 +1733,31 @@ test "SafeMultiList CompactWriter multiple lists different alignments" {
const base = @as(i64, @intCast(@intFromPtr(buffer.ptr)));
// Deserialize list1 (at offset1)
const D1Idx = SafeMultiList(Type1).Idx;
const d1_serialized = @as(*SafeMultiList(Type1).Serialized, @ptrCast(@alignCast(buffer.ptr + offset1)));
const d1 = d1_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 3), d1.len());
try testing.expectEqual(@as(u8, 10), d1.get(@enumFromInt(0)).a);
try testing.expectEqual(@as(u16, 100), d1.get(@enumFromInt(0)).b);
try testing.expectEqual(@as(u8, 20), d1.get(@enumFromInt(1)).a);
try testing.expectEqual(@as(u16, 200), d1.get(@enumFromInt(1)).b);
try testing.expectEqual(@as(u8, 30), d1.get(@enumFromInt(2)).a);
try testing.expectEqual(@as(u16, 300), d1.get(@enumFromInt(2)).b);
try testing.expectEqual(@as(u8, 10), d1.get(.first).a);
try testing.expectEqual(@as(u16, 100), d1.get(.first).b);
try testing.expectEqual(@as(u8, 20), d1.get(@as(D1Idx, @enumFromInt(1))).a);
try testing.expectEqual(@as(u16, 200), d1.get(@as(D1Idx, @enumFromInt(1))).b);
try testing.expectEqual(@as(u8, 30), d1.get(@as(D1Idx, @enumFromInt(2))).a);
try testing.expectEqual(@as(u16, 300), d1.get(@as(D1Idx, @enumFromInt(2))).b);
// Deserialize list2 (at offset2)
const d2_serialized = @as(*SafeMultiList(Type2).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2)));
const d2 = d2_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 2), d2.len());
try testing.expectEqual(@as(u32, 1000), d2.get(@enumFromInt(0)).x);
try testing.expectEqual(@as(u64, 10000), d2.get(@enumFromInt(0)).y);
try testing.expectEqual(@as(u32, 1000), d2.get(.first).x);
try testing.expectEqual(@as(u64, 10000), d2.get(.first).y);
// Deserialize list3 (at offset3)
const d3_serialized = @as(*SafeMultiList(Type3).Serialized, @ptrCast(@alignCast(buffer.ptr + offset3)));
const d3 = d3_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 2), d3.len());
try testing.expectEqual(@as(u64, 999), d3.get(@enumFromInt(0)).id);
try testing.expectEqual(@as(u8, 42), d3.get(@enumFromInt(0)).data);
try testing.expectEqual(true, d3.get(@enumFromInt(0)).flag);
try testing.expectEqual(@as(u64, 999), d3.get(.first).id);
try testing.expectEqual(@as(u8, 42), d3.get(.first).data);
try testing.expectEqual(true, d3.get(.first).flag);
}
test "SafeMultiList CompactWriter brute-force alignment verification" {
@ -1815,10 +1847,11 @@ test "SafeMultiList CompactWriter brute-force alignment verification" {
const d2_serialized = @as(*SafeMultiList(TestType).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2)));
const d2 = d2_serialized.deserialize(base);
if (length > 0) {
const d2_first_idx: SafeMultiList(TestType).Idx = .first;
try testing.expectEqual(@as(usize, 1), d2.len());
try testing.expectEqual(@as(u8, 255), d2.get(@enumFromInt(0)).a);
try testing.expectEqual(@as(u32, 999999), d2.get(@enumFromInt(0)).b);
try testing.expectEqual(@as(u64, 888888888), d2.get(@enumFromInt(0)).c);
try testing.expectEqual(@as(u8, 255), d2.get(d2_first_idx).a);
try testing.expectEqual(@as(u32, 999999), d2.get(d2_first_idx).b);
try testing.expectEqual(@as(u64, 888888888), d2.get(d2_first_idx).c);
} else {
try testing.expectEqual(@as(usize, 0), d2.len());
}
@ -2286,7 +2319,8 @@ test "SafeMultiList.Serialized roundtrip" {
try testing.expectEqual(@as(u8, 64), c_values[2]);
// Check get() method
const item1 = list.get(@as(SafeMultiList(TestStruct).Idx, @enumFromInt(0)));
const first_idx: SafeMultiList(TestStruct).Idx = .first;
const item1 = list.get(first_idx);
try testing.expectEqual(@as(u32, 100), item1.a);
try testing.expectEqual(@as(f32, 1.5), item1.b);
try testing.expectEqual(@as(u8, 255), item1.c);

View file

@ -64,7 +64,7 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" {
defer result.deinit();
// Now get the type of map_result and convert it to a string
// Find the map_result definition
// Find the map_result definition and get its type var from the expression
const defs_slice = env.store.sliceDefs(env.all_defs);
var map_result_var: ?types.Var = null;
for (defs_slice) |def_idx| {
@ -74,8 +74,8 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" {
const ident_idx = pattern.assign.ident;
const ident_text = env.getIdent(ident_idx);
if (std.mem.eql(u8, ident_text, "map_result")) {
// Get the type variable from the first definition - it's the first in the defs list
map_result_var = @enumFromInt(0); // First variable
// Get the type variable from the definition's expression
map_result_var = ModuleEnv.varFrom(def.expr);
break;
}
}

View file

@ -34,14 +34,203 @@ const Expr = CIR.Expr;
const StackValue = @This();
// ============================================================================
// Internal helper functions for memory operations that don't need rt_var
// ============================================================================
/// Increment reference count for a value given its layout and pointer.
/// Used internally when we don't need full StackValue type information.
fn increfLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.incref(1);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
const list_value = @as(*const RocList, @ptrCast(@alignCast(ptr.?))).*;
list_value.incref(1, false);
return;
}
if (layout.tag == .box) {
if (ptr == null) return;
const slot: *usize = @ptrCast(@alignCast(ptr.?));
if (slot.* != 0) {
const data_ptr: [*]u8 = @as([*]u8, @ptrFromInt(slot.*));
builtins.utils.increfDataPtrC(@as(?[*]u8, data_ptr), 1);
}
return;
}
if (layout.tag == .record) {
if (ptr == null) return;
const record_data = layout_cache.getRecordData(layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
increfLayoutPtr(field_layout, field_ptr, layout_cache);
}
return;
}
if (layout.tag == .tuple) {
if (ptr == null) return;
const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
increfLayoutPtr(elem_layout, elem_ptr, layout_cache);
}
return;
}
if (layout.tag == .tag_union) {
if (ptr == null) return;
// For unions, we need to read the tag and incref the appropriate payload
// This is complex - for now just skip (caller should handle specific union types)
return;
}
// Other layout types (scalar ints/floats, zst, etc.) don't need refcounting
}
/// Decrement reference count for a value given its layout and pointer.
/// Used internally when we don't need full StackValue type information.
fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore, ops: *RocOps) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.decref(ops);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
const list_header: *const RocList = @ptrCast(@alignCast(ptr.?));
const list_value = list_header.*;
const elem_layout = layout_cache.getLayout(layout.data.list);
const alignment_u32: u32 = @intCast(elem_layout.alignment(layout_cache.targetUsize()).toByteUnits());
const element_width: usize = @intCast(layout_cache.layoutSize(elem_layout));
const elements_refcounted = elem_layout.isRefcounted();
// Decref elements when unique
if (list_value.isUnique()) {
if (list_value.getAllocationDataPtr()) |source| {
const count = list_value.getAllocationElementCount(elements_refcounted);
var idx: usize = 0;
while (idx < count) : (idx += 1) {
const elem_ptr = source + idx * element_width;
decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops);
}
}
}
list_value.decref(alignment_u32, element_width, elements_refcounted, null, &builtins.list.rcNone, ops);
return;
}
if (layout.tag == .box) {
if (ptr == null) return;
const slot: *usize = @ptrCast(@alignCast(ptr.?));
const raw_ptr = slot.*;
if (raw_ptr == 0) return;
const data_ptr = @as([*]u8, @ptrFromInt(raw_ptr));
const target_usize = layout_cache.targetUsize();
const elem_layout = layout_cache.getLayout(layout.data.box);
const elem_alignment: u32 = @intCast(elem_layout.alignment(target_usize).toByteUnits());
const ptr_int = @intFromPtr(data_ptr);
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const unmasked_ptr = ptr_int & ~tag_mask;
const payload_ptr = @as([*]u8, @ptrFromInt(unmasked_ptr));
const refcount_ptr: *isize = @as(*isize, @ptrFromInt(unmasked_ptr - @sizeOf(isize)));
if (builtins.utils.rcUnique(refcount_ptr.*)) {
if (elem_layout.isRefcounted()) {
decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops);
}
}
builtins.utils.decrefDataPtrC(@as(?[*]u8, payload_ptr), elem_alignment, false, ops);
slot.* = 0;
return;
}
if (layout.tag == .record) {
if (ptr == null) return;
const record_data = layout_cache.getRecordData(layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
decrefLayoutPtr(field_layout, field_ptr, layout_cache, ops);
}
return;
}
if (layout.tag == .tuple) {
if (ptr == null) return;
const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
decrefLayoutPtr(elem_layout, elem_ptr, layout_cache, ops);
}
return;
}
if (layout.tag == .closure) {
if (ptr == null) return;
// Get the closure header to find the captures layout
const closure_header: *const layout_mod.Closure = @ptrCast(@alignCast(ptr.?));
const captures_layout = layout_cache.getLayout(closure_header.captures_layout_idx);
// Only decref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (record_data.fields.count > 0) {
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
decrefLayoutPtr(captures_layout, rec_ptr, layout_cache, ops);
}
}
return;
}
// Other layout types (scalar ints/floats, zst, etc.) don't need refcounting
}
/// Type and memory layout information for the result value
layout: Layout,
/// Ptr to the actual value in stack memory
ptr: ?*anyopaque,
/// Flag to track whether the memory has been initialized
is_initialized: bool = false,
/// Optional runtime type variable for type information (used in constant folding)
rt_var: ?types.Var = null,
/// Runtime type variable for type information (used for method dispatch and constant folding)
rt_var: types.Var,
/// Copy this stack value to a destination pointer with bounds checking
pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopaque) !void {
@ -226,13 +415,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
increfLayoutPtr(field_layout, field_ptr, layout_cache);
}
return;
}
@ -263,13 +446,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.incref(layout_cache);
increfLayoutPtr(elem_layout, elem_ptr, layout_cache);
}
return;
}
@ -304,29 +481,8 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?));
const rec_ptr: [*]u8 = @ptrCast(base_ptr + aligned_off);
// Iterate over each field in the captures record and incref all fields.
// We call incref on ALL fields (not just isRefcounted()) because:
// - For directly refcounted types (str, list, box): increfs them
// - For nested records/tuples: recursively handles their contents
// - For scalars: incref is a no-op
// This is symmetric with decref.
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(captures_layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(rec_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
}
// Incref the entire captures record (which handles all fields recursively)
increfLayoutPtr(captures_layout, @ptrCast(rec_ptr), layout_cache);
}
}
return;
@ -365,13 +521,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
}
// Incref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
payload_value.incref(layout_cache);
increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache);
return;
}
@ -722,7 +872,7 @@ pub const TupleAccessor = struct {
element_layouts: layout_mod.TupleField.SafeMultiList.Slice,
/// Get a StackValue for the element at the given original index (before sorting)
pub fn getElement(self: TupleAccessor, original_index: usize) !StackValue {
pub fn getElement(self: TupleAccessor, original_index: usize, elem_rt_var: types.Var) !StackValue {
// Find the sorted index corresponding to this original index
const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds;
@ -748,13 +898,24 @@ pub const TupleAccessor = struct {
.layout = element_layout,
.ptr = element_ptr,
.is_initialized = true, // Elements in existing tuples are initialized
.rt_var = elem_rt_var,
};
}
/// Get just the element pointer without needing type information (for internal operations like setElement)
pub fn getElementPtr(self: TupleAccessor, original_index: usize) !*anyopaque {
const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds;
std.debug.assert(self.base_value.is_initialized);
std.debug.assert(self.base_value.ptr != null);
const element_offset = self.layout_cache.getTupleElementOffset(self.tuple_layout.data.tuple.idx, @intCast(sorted_index));
const base_ptr = @as([*]u8, @ptrCast(self.base_value.ptr.?));
return @as(*anyopaque, @ptrCast(base_ptr + element_offset));
}
/// Set an element by copying from a source StackValue
pub fn setElement(self: TupleAccessor, index: usize, source: StackValue) !void {
const dest_element = try self.getElement(index);
try source.copyToPtr(self.layout_cache, dest_element.ptr.?);
const dest_ptr = try self.getElementPtr(index);
try source.copyToPtr(self.layout_cache, dest_ptr);
}
/// Find the sorted element index corresponding to an original tuple position
@ -871,11 +1032,11 @@ pub const ListAccessor = struct {
return self.list.len();
}
pub fn getElement(self: ListAccessor, index: usize) !StackValue {
pub fn getElement(self: ListAccessor, index: usize, elem_rt_var: types.Var) !StackValue {
if (index >= self.list.len()) return error.ListIndexOutOfBounds;
if (self.element_size == 0) {
return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true };
return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true, .rt_var = elem_rt_var };
}
const base_ptr = self.list.bytes orelse return error.NullStackPointer;
@ -884,8 +1045,18 @@ pub const ListAccessor = struct {
.layout = self.element_layout,
.ptr = @ptrCast(base_ptr + offset),
.is_initialized = true,
.rt_var = elem_rt_var,
};
}
/// Get just the element pointer without needing type information (for internal operations)
pub fn getElementPtr(self: ListAccessor, index: usize) !?*anyopaque {
if (index >= self.list.len()) return error.ListIndexOutOfBounds;
if (self.element_size == 0) return null;
const base_ptr = self.list.bytes orelse return error.NullStackPointer;
const offset = index * self.element_size;
return @ptrCast(base_ptr + offset);
}
};
fn storeListElementCount(list: *RocList, elements_refcounted: bool) void {
@ -961,7 +1132,7 @@ pub const RecordAccessor = struct {
field_layouts: layout_mod.RecordField.SafeMultiList.Slice,
/// Get a StackValue for the field at the given index
pub fn getFieldByIndex(self: RecordAccessor, index: usize) !StackValue {
pub fn getFieldByIndex(self: RecordAccessor, index: usize, field_rt_var: types.Var) !StackValue {
if (index >= self.field_layouts.len) {
return error.RecordIndexOutOfBounds;
}
@ -988,11 +1159,12 @@ pub const RecordAccessor = struct {
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true, // Fields in existing records are initialized
.rt_var = field_rt_var,
};
}
/// Get a StackValue for the field with the given name
pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx) !?StackValue {
pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx, field_rt_var: types.Var) !?StackValue {
const field_offset = self.layout_cache.getRecordFieldOffsetByName(
self.record_layout.data.record.idx,
field_name_idx,
@ -1026,12 +1198,13 @@ pub const RecordAccessor = struct {
.layout = field_layout.?,
.ptr = field_ptr,
.is_initialized = true,
.rt_var = field_rt_var,
};
}
/// Set a field by copying from a source StackValue
pub fn setFieldByIndex(self: RecordAccessor, index: usize, source: StackValue) !void {
const dest_field = try self.getFieldByIndex(index);
const dest_field = try self.getFieldByIndex(index, source.rt_var);
try source.copyToPtr(self.layout_cache, dest_field.ptr.?);
}
@ -1168,15 +1341,6 @@ pub fn copyTo(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) vo
);
}
/// Create a StackValue view of a memory region (no copy)
pub fn fromPtr(layout: Layout, ptr: *anyopaque) StackValue {
return StackValue{
.layout = layout,
.ptr = ptr,
.is_initialized = true,
};
}
/// Copy value data to another StackValue WITHOUT incrementing refcounts (move semantics)
pub fn copyWithoutRefcount(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) void {
std.debug.assert(self.is_initialized);
@ -1269,56 +1433,12 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
}
// Handle records by recursively incref'ing each field (symmetric with decref)
if (self.layout.tag == .record) {
if (self.ptr == null) return;
const record_data = layout_cache.getRecordData(self.layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
}
increfLayoutPtr(self.layout, self.ptr, layout_cache);
return;
}
// Handle tuples by recursively incref'ing each element (symmetric with decref)
if (self.layout.tag == .tuple) {
if (self.ptr == null) return;
const tuple_data = layout_cache.getTupleData(self.layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.incref(layout_cache);
}
increfLayoutPtr(self.layout, self.ptr, layout_cache);
return;
}
// Handle tag unions by reading discriminant and incref'ing only the active variant's payload
@ -1342,17 +1462,11 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
const variant_layout = layout_cache.getLayout(variants.get(discriminant).payload_layout);
// Incref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
if (comptime trace_refcount) {
traceRefcount("INCREF tag_union disc={} variant_layout.tag={}", .{ discriminant, @intFromEnum(variant_layout.tag) });
}
payload_value.incref(layout_cache);
increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache);
return;
}
}
@ -1450,12 +1564,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
var idx: usize = 0;
while (idx < count) : (idx += 1) {
const elem_ptr = source + idx * element_width;
const elem_value = StackValue{
.layout = elem_layout,
.ptr = @ptrCast(elem_ptr),
.is_initialized = true,
};
elem_value.decref(layout_cache, ops);
decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops);
}
}
}
@ -1498,12 +1607,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
if (builtins.utils.rcUnique(refcount_ptr.*)) {
if (elem_layout.isRefcounted()) {
const payload_value = StackValue{
.layout = elem_layout,
.ptr = @ptrCast(@alignCast(payload_ptr)),
.is_initialized = true,
};
payload_value.decref(layout_cache, ops);
decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops);
}
}
@ -1523,26 +1627,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
});
}
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.decref(layout_cache, ops);
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.box_of_zst => {
@ -1563,61 +1648,11 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
});
}
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.decref(layout_cache, ops);
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.closure => {
if (self.ptr == null) return;
// Get the closure header to find the captures layout
const closure = self.asClosure();
const captures_layout = layout_cache.getLayout(closure.captures_layout_idx);
// Only decref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (record_data.fields.count > 0) {
if (comptime trace_refcount) {
traceRefcount("DECREF closure ptr=0x{x} captures={}", .{
@intFromPtr(self.ptr),
record_data.fields.count,
});
}
// Calculate the offset to the captures record (after header, with alignment)
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
// Create a StackValue for the captures record and decref it
const captures_value = StackValue{
.layout = captures_layout,
.ptr = rec_ptr,
.is_initialized = true,
};
captures_value.decref(layout_cache, ops);
}
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.tag_union => {
@ -1649,13 +1684,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
}
// Decref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
payload_value.decref(layout_cache, ops);
decrefLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache, ops);
return;
},
else => {},

View file

@ -348,16 +348,8 @@ pub const ComptimeEvaluator = struct {
// Convert StackValue to CIR expression based on layout
const layout = stack_value.layout;
// Get the runtime type variable from the StackValue first, or fall back to expression type
const rt_var: types_mod.Var = if (stack_value.rt_var) |sv_rt_var|
sv_rt_var
else blk: {
// Fall back to expression type variable
const ct_var = ModuleEnv.varFrom(def.expr);
break :blk self.interpreter.translateTypeVar(self.env, ct_var) catch {
return error.NotImplemented;
};
};
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
// Check if it's a tag union type
@ -471,7 +463,8 @@ pub const ComptimeEvaluator = struct {
// Get variant_var and ext_var
const variant_var: types_mod.Var = bool_rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -492,33 +485,33 @@ pub const ComptimeEvaluator = struct {
/// Fold a tag union (represented as scalar, like Bool) to an e_zero_argument_tag expression
fn foldTagUnionScalar(self: *ComptimeEvaluator, def_idx: CIR.Def.Idx, expr_idx: CIR.Expr.Idx, stack_value: eval_mod.StackValue) !void {
_ = def_idx; // unused now that we get rt_var from stack_value
// The value is the tag index directly (scalar integer)
// The value is the tag index directly (scalar integer).
// The caller already verified layout.tag == .scalar, and scalar tag unions are always ints.
std.debug.assert(stack_value.layout.tag == .scalar and stack_value.layout.data.scalar.tag == .int);
const tag_index: usize = @intCast(stack_value.asI128());
// Get the runtime type variable from the StackValue (already validated in tryFoldConstant)
const rt_var = stack_value.rt_var orelse return error.NotImplemented;
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
// Get the list of tags for this union type
var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator);
defer tag_list.deinit();
try self.interpreter.appendUnionTags(rt_var, &tag_list);
if (tag_index >= tag_list.items.len) {
return error.NotImplemented;
}
// Tag index from the value must be valid
std.debug.assert(tag_index < tag_list.items.len);
const tag_info = tag_list.items[tag_index];
const arg_vars = self.interpreter.runtime_types.sliceVars(tag_info.args);
// Only fold zero-argument tags (like True, False)
if (arg_vars.len != 0) {
return error.NotImplemented;
}
// Scalar tag unions don't have payloads, so arg_vars must be empty
std.debug.assert(arg_vars.len == 0);
// Get variant_var and ext_var from type information
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
const variant_var: types_mod.Var = rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -543,17 +536,18 @@ pub const ComptimeEvaluator = struct {
var acc = try stack_value.asTuple(&self.interpreter.runtime_layout_store);
// Element 1 is the tag discriminant - getElement takes original index directly
const tag_field = try acc.getElement(1);
const tag_elem_rt_var = try self.interpreter.runtime_types.fresh();
const tag_field = try acc.getElement(1, tag_elem_rt_var);
// Extract tag index
if (tag_field.layout.tag != .scalar or tag_field.layout.data.scalar.tag != .int) {
return error.NotImplemented;
}
const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true };
const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = tag_elem_rt_var };
const tag_index: usize = @intCast(tmp_sv.asI128());
// Get the runtime type variable from the StackValue (already validated in tryFoldConstant)
const rt_var = stack_value.rt_var orelse return error.NotImplemented;
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
// Get the list of tags for this union type
var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator);
@ -575,7 +569,8 @@ pub const ComptimeEvaluator = struct {
// Get variant_var and ext_var from type information
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
const variant_var: types_mod.Var = rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -996,7 +991,8 @@ pub const ComptimeEvaluator = struct {
}
// Build is_negative Bool
const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0);
const bool_rt_var = try self.interpreter.getCanonicalBoolRuntimeVar();
const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0, bool_rt_var);
if (is_neg_value.ptr) |ptr| {
@as(*u8, @ptrCast(@alignCast(ptr))).* = @intFromBool(num_lit_info.is_negative);
}
@ -1132,7 +1128,7 @@ pub const ComptimeEvaluator = struct {
try self.interpreter.bindings.append(.{
.pattern_idx = params[0],
.value = num_literal_record,
.expr_idx = @enumFromInt(0),
.expr_idx = null, // No source expression for synthetic binding
.source_env = origin_env,
});
defer _ = self.interpreter.bindings.pop();
@ -1192,7 +1188,8 @@ pub const ComptimeEvaluator = struct {
const list_layout_idx = try self.interpreter.runtime_layout_store.insertList(layout_mod.Idx.u8);
const list_layout = self.interpreter.runtime_layout_store.getLayout(list_layout_idx);
const dest = try self.interpreter.pushRaw(list_layout, 0);
// rt_var not needed for List(U8) construction - only layout matters
const dest = try self.interpreter.pushRaw(list_layout, 0, undefined);
if (dest.ptr == null) return dest;
const header: *builtins.list.RocList = @ptrCast(@alignCast(dest.ptr.?));
@ -1242,7 +1239,8 @@ pub const ComptimeEvaluator = struct {
const record_layout_idx = try self.interpreter.runtime_layout_store.putRecord(self.env, &field_layouts, &field_names);
const record_layout = self.interpreter.runtime_layout_store.getLayout(record_layout_idx);
var dest = try self.interpreter.pushRaw(record_layout, 0);
// rt_var not needed for Numeral record construction - only layout matters
var dest = try self.interpreter.pushRaw(record_layout, 0, undefined);
var accessor = try dest.asRecord(&self.interpreter.runtime_layout_store);
// Use self.env for field lookups since the record was built with self.env's idents
@ -1315,7 +1313,8 @@ pub const ComptimeEvaluator = struct {
// Use layout store's env for field lookups since records use that env's idents
const layout_env = self.interpreter.runtime_layout_store.env;
const tag_idx = accessor.findFieldIndex(layout_env.idents.tag) orelse return true;
const tag_field = accessor.getFieldByIndex(tag_idx) catch return true;
const tag_rt_var = self.interpreter.runtime_types.fresh() catch return true;
const tag_field = accessor.getFieldByIndex(tag_idx, tag_rt_var) catch return true;
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tag_value = tag_field.asI128();
@ -1343,7 +1342,8 @@ pub const ComptimeEvaluator = struct {
var accessor = result.asTuple(&self.interpreter.runtime_layout_store) catch return true;
// Element 1 is tag discriminant - getElement takes original index directly
const tag_field = accessor.getElement(1) catch return true;
const tag_elem_rt_var = self.interpreter.runtime_types.fresh() catch return true;
const tag_field = accessor.getElement(1, tag_elem_rt_var) catch return true;
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tag_value = tag_field.asI128();
@ -1396,7 +1396,10 @@ pub const ComptimeEvaluator = struct {
// This should never happen - Try type must have a payload field
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (missing payload field)", .{});
};
const payload_field = try_accessor.getFieldByIndex(payload_idx) catch {
const payload_rt_var = self.interpreter.runtime_types.fresh() catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not create rt_var)", .{});
};
const payload_field = try_accessor.getFieldByIndex(payload_idx, payload_rt_var) catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not access payload)", .{});
};
@ -1411,7 +1414,10 @@ pub const ComptimeEvaluator = struct {
// Check if this has a payload field (for the Str)
// Single-tag unions might not have a "tag" field, so we look for payload first
if (err_accessor.findFieldIndex(layout_env.idents.payload)) |err_payload_idx| {
const err_payload = err_accessor.getFieldByIndex(err_payload_idx) catch {
const err_payload_rt_var = self.interpreter.runtime_types.fresh() catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: could not create rt_var for InvalidNumeral payload", .{});
};
const err_payload = err_accessor.getFieldByIndex(err_payload_idx, err_payload_rt_var) catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: could not access InvalidNumeral payload", .{});
};
return try self.extractStrFromValue(err_payload);
@ -1421,7 +1427,8 @@ pub const ComptimeEvaluator = struct {
// Iterate through fields looking for a Str
var field_idx: usize = 0;
while (true) : (field_idx += 1) {
const field = err_accessor.getFieldByIndex(field_idx) catch break;
const iter_field_rt_var = self.interpreter.runtime_types.fresh() catch break;
const field = err_accessor.getFieldByIndex(field_idx, iter_field_rt_var) catch break;
if (field.layout.tag == .scalar and field.layout.data.scalar.tag == .str) {
return try self.extractStrFromValue(field);
}
@ -1507,8 +1514,12 @@ pub const ComptimeEvaluator = struct {
try self.reportProblem(expect_info.message, expect_info.region, .expect_failed);
},
.error_eval => |error_info| {
const error_name = @errorName(error_info.err);
try self.reportProblem(error_name, error_info.region, .error_eval);
// Provide user-friendly messages for specific errors
const error_message = switch (error_info.err) {
error.DivisionByZero => "Division by zero",
else => @errorName(error_info.err),
};
try self.reportProblem(error_message, error_info.region, .error_eval);
},
}
}

File diff suppressed because it is too large Load diff

View file

@ -130,7 +130,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
const count = tup_acc.getElementCount();
if (count > 0) {
// Get tag index from the last element
const tag_elem = try tup_acc.getElement(count - 1);
// rt_var not needed for tag discriminant access (it's always an integer)
const tag_elem = try tup_acc.getElement(count - 1, undefined);
if (tag_elem.layout.tag == .scalar and tag_elem.layout.data.scalar.tag == .int) {
if (std.math.cast(usize, tag_elem.asI128())) |tag_idx| {
tag_index = tag_idx;
@ -150,26 +151,28 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
if (arg_vars.len == 1) {
// Single payload: first element
// Get the correct layout from the type variable, not the payload union layout
const payload_elem = try tup_acc.getElement(0);
const arg_var = arg_vars[0];
const payload_elem = try tup_acc.getElement(0, arg_var);
const layout_idx = try ctx.layout_store.addTypeVar(arg_var, ctx.type_scope);
const arg_layout = ctx.layout_store.getLayout(layout_idx);
const payload_value = StackValue{
.layout = arg_layout,
.ptr = payload_elem.ptr,
.is_initialized = payload_elem.is_initialized,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
try out.appendSlice(rendered);
} else {
// Multiple payloads: first element is a nested tuple containing all payload args
const payload_elem = try tup_acc.getElement(0);
// rt_var undefined for tuple access (we have the individual element types)
const payload_elem = try tup_acc.getElement(0, undefined);
if (payload_elem.layout.tag == .tuple) {
var payload_tup = try payload_elem.asTuple(ctx.layout_store);
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const elem_value = try payload_tup.getElement(j);
const elem_value = try payload_tup.getElement(j, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -189,9 +192,10 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
} else if (value.layout.tag == .record) {
var acc = try value.asRecord(ctx.layout_store);
if (acc.findFieldIndex(ctx.env.idents.tag)) |idx| {
const tag_field = try acc.getFieldByIndex(idx);
const field_rt = try ctx.runtime_types.fresh();
const tag_field = try acc.getFieldByIndex(idx, field_rt);
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true };
const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = undefined };
// Only treat as tag if value fits in usize (valid tag discriminants are small)
if (std.math.cast(usize, tmp_sv.asI128())) |tag_idx| {
tag_index = tag_idx;
@ -205,7 +209,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
errdefer out.deinit();
try out.appendSlice(tag_name);
if (acc.findFieldIndex(ctx.env.idents.payload)) |pidx| {
const payload = try acc.getFieldByIndex(pidx);
const field_rt = try ctx.runtime_types.fresh();
const payload = try acc.getFieldByIndex(pidx, field_rt);
const args_range = tags.items(.args)[tag_index];
const arg_vars = ctx.runtime_types.sliceVars(toVarRange(args_range));
if (arg_vars.len > 0) {
@ -218,6 +223,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = arg_layout,
.ptr = payload.ptr,
.is_initialized = payload.is_initialized,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
@ -237,6 +243,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = tuple_layout,
.ptr = payload.ptr,
.is_initialized = payload.is_initialized,
.rt_var = undefined, // not needed - type known from layout
};
if (tuple_size == 0 or payload.ptr == null) {
var j: usize = 0;
@ -247,6 +254,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = elem_layouts[j],
.ptr = null,
.is_initialized = true,
.rt_var = arg_vars[j],
},
arg_vars[j],
);
@ -259,7 +267,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch;
const elem_value = try tup_acc.getElement(sorted_idx);
const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -308,6 +316,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = arg_layout,
.ptr = payload_ptr,
.is_initialized = true,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
@ -333,6 +342,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = elem_layouts[j],
.ptr = null,
.is_initialized = true,
.rt_var = arg_vars[j],
},
arg_vars[j],
);
@ -345,12 +355,13 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = tuple_layout,
.ptr = payload_ptr,
.is_initialized = true,
.rt_var = undefined, // not needed - type known from layout
};
var tup_acc = try tuple_value.asTuple(ctx.layout_store);
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch;
const elem_value = try tup_acc.getElement(sorted_idx);
const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -383,6 +394,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = payload_layout,
.ptr = null,
.is_initialized = true,
.rt_var = payload_var,
};
switch (value.layout.tag) {
@ -464,7 +476,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
const idx = acc.findFieldIndex(f.name) orelse {
std.debug.panic("Record field not found in layout: type says field '{s}' exists but layout doesn't have it", .{name_text});
};
const field_val = try acc.getFieldByIndex(idx);
const field_rt = try ctx.runtime_types.fresh();
const field_val = try acc.getFieldByIndex(idx, field_rt);
const rendered = try renderValueRocWithType(ctx, field_val, f.var_);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -537,7 +550,8 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
const count = acc.getElementCount();
var i: usize = 0;
while (i < count) : (i += 1) {
const elem = try acc.getElement(i);
// rt_var undefined (no type info available in this context)
const elem = try acc.getElement(i, undefined);
const rendered = try renderValueRoc(ctx, elem);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -560,7 +574,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
while (i < len) : (i += 1) {
if (roc_list.bytes) |bytes| {
const elem_ptr: *anyopaque = @ptrCast(bytes + i * elem_size);
const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true };
const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true, .rt_var = undefined };
const rendered = try renderValueRoc(ctx, elem_val);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -601,7 +615,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
const field_layout = ctx.layout_store.getLayout(fld.layout);
const base_ptr: [*]u8 = @ptrCast(@alignCast(value.ptr.?));
const field_ptr: *anyopaque = @ptrCast(base_ptr + offset);
const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true };
const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true, .rt_var = undefined };
const rendered = try renderValueRoc(ctx, field_val);
defer gpa.free(rendered);
try out.appendSlice(rendered);

View file

@ -1763,3 +1763,66 @@ test "comptime eval - to_str on unbound number literal" {
// Flex var defaults to Dec; Dec.to_str is provided by builtins
try testing.expectEqual(@as(usize, 0), result.problems.len());
}
// --- Division by zero tests ---
test "comptime eval - division by zero produces error" {
const src =
\\x = 5 // 0
;
var result = try parseCheckAndEvalModule(src);
defer cleanupEvalModule(&result);
const summary = try result.evaluator.evalAll();
// Should evaluate 1 declaration with no crashes (it's an error, not a crash)
try testing.expectEqual(@as(u32, 1), summary.evaluated);
try testing.expectEqual(@as(u32, 0), summary.crashed);
// Should have 1 problem reported (division by zero)
try testing.expect(result.problems.len() >= 1);
try testing.expect(errorContains(result.problems, "Division by zero"));
}
test "comptime eval - division by zero in expression" {
const src =
\\a = 10
\\b = 0
\\c = a // b
;
var result = try parseCheckAndEvalModule(src);
defer cleanupEvalModule(&result);
const summary = try result.evaluator.evalAll();
// Should evaluate 3 declarations, c will cause an error
try testing.expectEqual(@as(u32, 3), summary.evaluated);
// Should have 1 problem reported (division by zero)
try testing.expect(result.problems.len() >= 1);
try testing.expect(errorContains(result.problems, "Division by zero"));
}
test "comptime eval - modulo by zero produces error" {
const src =
\\x = 10 % 0
;
var result = try parseCheckAndEvalModule(src);
defer cleanupEvalModule(&result);
const summary = try result.evaluator.evalAll();
// Should evaluate 1 declaration
try testing.expectEqual(@as(u32, 1), summary.evaluated);
// Should have 1 problem reported (division by zero for modulo)
try testing.expect(result.problems.len() >= 1);
try testing.expect(errorContains(result.problems, "Division by zero"));
}
// Note: "division by zero does not halt other defs" test is skipped because
// the interpreter state after an eval error may not allow continuing evaluation
// of subsequent definitions that share the same evaluation context.

View file

@ -1387,3 +1387,25 @@ test "if block with local bindings - regression" {
\\else 99
, 0, .no_trace);
}
test "List.len returns proper U64 nominal type for method calls - regression" {
// Regression test for InvalidMethodReceiver when calling methods on List.len result
// Bug report: `n = List.len([]); _str = n.to_str()` crashed with InvalidMethodReceiver
// The issue was that List.len created a fresh runtime type variable instead of using
// the return_rt_var parameter, which prevented method resolution from finding the
// U64 nominal type information needed to look up .to_str()
try runExpectStr(
\\{
\\ n = List.len([])
\\ n.to_str()
\\}
, "0", .no_trace);
// Also test with non-empty list
try runExpectStr(
\\{
\\ n = List.len([1, 2, 3])
\\ n.to_str()
\\}
, "3", .no_trace);
}

View file

@ -331,7 +331,8 @@ pub fn runExpectTuple(src: []const u8, expected_elements: []const ExpectedElemen
for (expected_elements) |expected_element| {
// Get the element at the specified index
const element = try tuple_accessor.getElement(@intCast(expected_element.index));
// Use the result's rt_var since we're accessing elements of the evaluated expression
const element = try tuple_accessor.getElement(@intCast(expected_element.index), result.rt_var);
// Check if this is an integer or Dec
try std.testing.expect(element.layout.tag == .scalar);
@ -397,6 +398,7 @@ pub fn runExpectRecord(src: []const u8, expected_fields: []const ExpectedField,
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
.rt_var = result.rt_var, // use result's rt_var for field access
};
// Check if this is an integer or Dec
const int_val = if (field_layout.data.scalar.tag == .int) blk: {
@ -453,7 +455,8 @@ pub fn runExpectListI64(src: []const u8, expected_elements: []const i64, should_
try std.testing.expectEqual(expected_elements.len, list_accessor.len());
for (expected_elements, 0..) |expected_val, i| {
const element = try list_accessor.getElement(i);
// Use the result's rt_var since we're accessing elements of the evaluated expression
const element = try list_accessor.getElement(i, result.rt_var);
// Check if this is an integer
try std.testing.expect(element.layout.tag == .scalar);

View file

@ -17,10 +17,10 @@ test "Stack.alloca basic allocation" {
var stack = try Stack.initCapacity(std.testing.allocator, 1024);
defer stack.deinit();
const ptr1 = try stack.alloca(10, @enumFromInt(0));
const ptr1 = try stack.alloca(10, .@"1");
try std.testing.expectEqual(@as(u32, 10), stack.used);
const ptr2 = try stack.alloca(20, @enumFromInt(0));
const ptr2 = try stack.alloca(20, .@"1");
try std.testing.expectEqual(@as(u32, 30), stack.used);
// The pointers should be different
@ -42,7 +42,7 @@ test "Stack.alloca with alignment" {
// Create initial misalignment
if (misalign > 0) {
_ = try stack.alloca(@intCast(misalign), @enumFromInt(0));
_ = try stack.alloca(@intCast(misalign), .@"1");
}
// Test each alignment with the current misalignment
@ -70,7 +70,7 @@ test "Stack.alloca with alignment" {
stack.used = 0;
for (alignments) |alignment| {
// Create some misalignment
_ = try stack.alloca(3, @enumFromInt(0));
_ = try stack.alloca(3, .@"1");
const before_used = stack.used;
const ptr = try stack.alloca(alignment * 2, @enumFromInt(std.math.log2_int(u32, alignment)));
@ -88,10 +88,10 @@ test "Stack.alloca overflow" {
defer stack.deinit();
// This should succeed
_ = try stack.alloca(50, @enumFromInt(0));
_ = try stack.alloca(50, .@"1");
// This should fail (would total 150 bytes)
try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, @enumFromInt(0)));
try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, .@"1"));
// Stack should still be in valid state
try std.testing.expectEqual(@as(u32, 50), stack.used);
@ -102,14 +102,14 @@ test "Stack.restore" {
defer stack.deinit();
const checkpoint = stack.next();
_ = try stack.alloca(100, @enumFromInt(0));
_ = try stack.alloca(100, .@"1");
try std.testing.expectEqual(@as(u32, 100), stack.used);
stack.restore(checkpoint);
try std.testing.expectEqual(@as(u32, 0), stack.used);
// Allocate again after restore
const ptr1 = try stack.alloca(50, @enumFromInt(0));
const ptr1 = try stack.alloca(50, .@"1");
try std.testing.expectEqual(@intFromPtr(checkpoint), @intFromPtr(ptr1));
}
@ -120,7 +120,7 @@ test "Stack.isEmpty" {
try std.testing.expect(stack.isEmpty());
try std.testing.expectEqual(@as(u32, 100), stack.available());
_ = try stack.alloca(30, @enumFromInt(0));
_ = try stack.alloca(30, .@"1");
try std.testing.expect(!stack.isEmpty());
try std.testing.expectEqual(@as(u32, 70), stack.available());
}
@ -129,8 +129,8 @@ test "Stack zero-size allocation" {
var stack = try Stack.initCapacity(std.testing.allocator, 100);
defer stack.deinit();
const ptr1 = try stack.alloca(0, @enumFromInt(0));
const ptr2 = try stack.alloca(0, @enumFromInt(0));
const ptr1 = try stack.alloca(0, .@"1");
const ptr2 = try stack.alloca(0, .@"1");
// Zero-size allocations should return the same pointer
try std.testing.expectEqual(@intFromPtr(ptr1), @intFromPtr(ptr2));
@ -147,8 +147,8 @@ test "Stack memory is aligned to max_roc_alignment" {
try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value);
// Also verify after some allocations
_ = try stack.alloca(100, @enumFromInt(0));
_ = try stack.alloca(200, @enumFromInt(0));
_ = try stack.alloca(100, .@"1");
_ = try stack.alloca(200, .@"1");
// The start pointer should still be aligned
try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value);

View file

@ -34,7 +34,7 @@
testcmd() {
zig build snapshot && zig build test
}
export -f testscmd
export -f testcmd
fmtcmd() {
zig build fmt

View file

@ -1353,6 +1353,11 @@ const Formatter = struct {
region = i.region;
try fmt.formatIdent(i.ident_tok, null);
},
.var_ident => |i| {
region = i.region;
try fmt.pushAll("var ");
try fmt.formatIdent(i.ident_tok, null);
},
.tag => |t| {
region = t.region;

View file

@ -100,9 +100,9 @@ fn readFdInfoFromFile(allocator: std.mem.Allocator) CoordinationError!FdInfo {
};
const dir_basename = std.fs.path.basename(exe_dir);
// Verify it has the expected prefix
if (!std.mem.startsWith(u8, dir_basename, "roc-tmp-")) {
std.log.err("Unexpected directory name: expected 'roc-tmp-*', got '{s}'", .{dir_basename});
// Verify it has the expected prefix (roc-{pid} or roc-{pid}-{suffix})
if (!std.mem.startsWith(u8, dir_basename, "roc-")) {
std.log.err("Unexpected directory name: expected 'roc-*', got '{s}'", .{dir_basename});
return error.FdInfoReadFailed;
}

View file

@ -310,11 +310,21 @@ pub fn mapMemory(handle: Handle, size: usize, base_addr: ?*anyopaque) SharedMemo
posix.MAP_SHARED,
handle,
0,
) orelse {
std.log.err("POSIX: Failed to map shared memory (size: {})", .{size});
);
// mmap returns MAP_FAILED (which is (void *)-1) on error, not null
// Need to check for both null and MAP_FAILED
if (ptr == null) {
std.log.err("POSIX: Failed to map shared memory - null returned (size: {})", .{size});
return error.MmapFailed;
};
return ptr;
}
const ptr_value = @intFromPtr(ptr.?);
if (ptr_value == std.math.maxInt(usize)) {
// This is MAP_FAILED (-1 cast to pointer)
const errno = std.c._errno().*;
std.log.err("POSIX: Failed to map shared memory - MAP_FAILED (size: {}, fd: {}, errno: {})", .{ size, handle, errno });
return error.MmapFailed;
}
return ptr.?;
},
else => return error.UnsupportedPlatform,
}

View file

@ -1242,6 +1242,11 @@ pub const Pattern = union(enum) {
ident_tok: Token.Idx,
region: TokenizedRegion,
},
/// A mutable variable binding in a pattern, e.g., `var $x` in `|var $x, y|`
var_ident: struct {
ident_tok: Token.Idx,
region: TokenizedRegion,
},
tag: struct {
tag_tok: Token.Idx,
args: Pattern.Span,
@ -1305,6 +1310,7 @@ pub const Pattern = union(enum) {
pub fn to_tokenized_region(self: @This()) TokenizedRegion {
return switch (self) {
.ident => |p| p.region,
.var_ident => |p| p.region,
.tag => |p| p.region,
.int => |p| p.region,
.frac => |p| p.region,
@ -1339,6 +1345,21 @@ pub const Pattern = union(enum) {
try tree.endNode(begin, attrs);
},
.var_ident => |ident| {
const begin = tree.beginNode();
try tree.pushStaticAtom("p-var-ident");
try ast.appendRegionInfoToSexprTree(env, tree, ident.region);
// Add raw attribute
const raw_begin = tree.beginNode();
try tree.pushStaticAtom("raw");
try tree.pushString(ast.resolve(ident.ident_tok));
const attrs2 = tree.beginNode();
try tree.endNode(raw_begin, attrs2);
const attrs = tree.beginNode();
try tree.endNode(begin, attrs);
},
.tag => |tag| {
const begin = tree.beginNode();
try tree.pushStaticAtom("p-tag");

View file

@ -250,6 +250,10 @@ pub const Tag = enum {
/// * lhs - LHS DESCRIPTION
/// * rhs - RHS DESCRIPTION
ident_patt,
/// Mutable variable binding in pattern
/// Example: `var $x` in `|var $x, y|`
/// * main_token - the identifier token
var_ident_patt,
/// DESCRIPTION
/// Example: EXAMPLE
/// * lhs - LHS DESCRIPTION

View file

@ -21,6 +21,9 @@ const sexpr = base.sexpr;
/// packing optional data into u32 fields where 0 would otherwise be ambiguous.
const OPTIONAL_VALUE_OFFSET: u32 = 1;
/// The root node is always stored at index 0 in the node list.
pub const root_node_idx: Node.List.Idx = .first;
const NodeStore = @This();
gpa: std.mem.Allocator,
@ -46,7 +49,7 @@ pub const AST_HEADER_NODE_COUNT = 6;
/// Count of the statement nodes in the AST
pub const AST_STATEMENT_NODE_COUNT = 13;
/// Count of the pattern nodes in the AST
pub const AST_PATTERN_NODE_COUNT = 14;
pub const AST_PATTERN_NODE_COUNT = 15;
/// Count of the type annotation nodes in the AST
pub const AST_TYPE_ANNO_NODE_COUNT = 10;
/// Count of the expression nodes in the AST
@ -166,7 +169,7 @@ pub fn addMalformed(store: *NodeStore, comptime T: type, reason: Diagnostic.Tag,
/// Adds a file node to the store.
pub fn addFile(store: *NodeStore, file: AST.File) std.mem.Allocator.Error!void {
try store.extra_data.append(store.gpa, @intFromEnum(file.header));
store.nodes.set(@enumFromInt(0), .{
store.nodes.set(root_node_idx, .{
.tag = .root,
.main_token = 0,
.data = .{ .lhs = file.statements.span.start, .rhs = file.statements.span.len },
@ -478,6 +481,11 @@ pub fn addPattern(store: *NodeStore, pattern: AST.Pattern) std.mem.Allocator.Err
node.region = i.region;
node.main_token = i.ident_tok;
},
.var_ident => |i| {
node.tag = .var_ident_patt;
node.region = i.region;
node.main_token = i.ident_tok;
},
.tag => |t| {
const data_start = @as(u32, @intCast(store.extra_data.items.len));
try store.extra_data.append(store.gpa, t.args.span.len);
@ -1014,7 +1022,7 @@ pub fn addTypeAnno(store: *NodeStore, anno: AST.TypeAnno) std.mem.Allocator.Erro
/// TODO
pub fn getFile(store: *const NodeStore) AST.File {
const node = store.nodes.get(@enumFromInt(0));
const node = store.nodes.get(root_node_idx);
const header_ed_idx = @as(usize, @intCast(node.data.lhs + node.data.rhs));
const header = store.extra_data.items[header_ed_idx];
return .{
@ -1387,6 +1395,12 @@ pub fn getPattern(store: *const NodeStore, pattern_idx: AST.Pattern.Idx) AST.Pat
.region = node.region,
} };
},
.var_ident_patt => {
return .{ .var_ident = .{
.ident_tok = node.main_token,
.region = node.region,
} };
},
.tag_patt => {
const args_start = node.data.lhs;

View file

@ -197,7 +197,7 @@ pub fn parseFile(self: *Parser) Error!void {
self.store.emptyScratch();
try self.store.addFile(.{
.header = @as(AST.Header.Idx, @enumFromInt(0)),
.header = undefined, // overwritten below after parseHeader()
.statements = AST.Statement.Span{ .span = base.DataSpan.empty() },
.region = AST.TokenizedRegion.empty(),
});
@ -1452,6 +1452,19 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) Error!AST.Pattern
.region = .{ .start = start, .end = self.pos },
} });
},
.KwVar => {
// Mutable variable binding in pattern, e.g., `var $x`
self.advance();
if (self.peek() != .LowerIdent) {
return try self.pushMalformed(AST.Pattern.Idx, .var_must_have_ident, self.pos);
}
const ident_tok = self.pos;
self.advance();
pattern = try self.store.addPattern(.{ .var_ident = .{
.ident_tok = ident_tok,
.region = .{ .start = start, .end = self.pos },
} });
},
.NamedUnderscore => {
self.advance();
pattern = try self.store.addPattern(.{ .ident = .{
@ -2069,9 +2082,6 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) Error!AST.Expr.Idx {
},
}
lookahead_pos += 1;
// Limit lookahead to prevent infinite loops
if (lookahead_pos > saved_pos + 100) break;
}
}

View file

@ -281,6 +281,12 @@ test "NodeStore round trip - Pattern" {
.region = rand_region(),
},
});
try patterns.append(gpa, AST.Pattern{
.var_ident = .{
.ident_tok = rand_token_idx(),
.region = rand_region(),
},
});
try patterns.append(gpa, AST.Pattern{
.tag = .{
.args = AST.Pattern.Span{ .span = rand_span() },

View file

@ -855,16 +855,7 @@ pub const Repl = struct {
try self.generateAndStoreDebugHtml(module_env, final_expr_idx);
}
const output = blk: {
if (result.rt_var) |rt_var| {
break :blk try interpreter.renderValueRocWithType(result, rt_var, self.roc_ops);
}
const expr_ct_var = can.ModuleEnv.varFrom(final_expr_idx);
const expr_rt_var = interpreter.translateTypeVar(module_env, expr_ct_var) catch {
break :blk try interpreter.renderValueRoc(result);
};
break :blk try interpreter.renderValueRocWithType(result, expr_rt_var, self.roc_ops);
};
const output = try interpreter.renderValueRocWithType(result, result.rt_var, self.roc_ops);
result.decref(&interpreter.runtime_layout_store, self.roc_ops);
return .{ .expression = output };

View file

@ -9,6 +9,7 @@ const std = @import("std");
const base = @import("base");
const types_mod = @import("types.zig");
const import_mapping_mod = @import("import_mapping.zig");
const debug = @import("debug.zig");
const TypesStore = @import("store.zig").Store;
const Allocator = std.mem.Allocator;
@ -610,7 +611,9 @@ fn gatherRecordFields(self: *TypeWriter, fields: RecordField.SafeMultiList.Range
}
var ext = initial_ext;
var guard = debug.IterationGuard.init("TypeWriter.gatherRecordFields");
while (true) {
guard.tick();
const resolved = self.types.resolveVar(ext);
switch (resolved.desc.content) {
.flex => |flex| {

70
src/types/debug.zig Normal file
View file

@ -0,0 +1,70 @@
//! Debug utilities for type checking
//!
//! These utilities are only active in debug builds and help catch infinite loops
//! in type-checking code by limiting the number of iterations.
const std = @import("std");
const builtin = @import("builtin");
/// Maximum number of iterations before panicking in debug builds.
/// This is set high enough to handle legitimate complex types but low enough
/// to catch infinite loops quickly during development.
pub const MAX_ITERATIONS: u32 = 100_000;
/// A debug-only iteration guard that panics if a loop exceeds MAX_ITERATIONS.
/// In release builds, this is a no-op.
///
/// Usage:
/// ```
/// var guard = IterationGuard.init("myFunction");
/// while (condition) {
/// guard.tick();
/// // ... loop body
/// }
/// ```
pub const IterationGuard = struct {
count: u32,
location: []const u8,
const Self = @This();
pub fn init(location: []const u8) Self {
return .{
.count = 0,
.location = location,
};
}
/// Call this at the start of each loop iteration.
/// In debug builds, panics if MAX_ITERATIONS is exceeded.
/// In release builds, this is a no-op that should be optimized away.
pub inline fn tick(self: *Self) void {
if (builtin.mode == .Debug) {
self.count += 1;
if (self.count > MAX_ITERATIONS) {
std.debug.panic(
"Infinite loop detected in type-checking at '{s}' after {d} iterations. " ++
"This usually indicates a cyclic type or bug in the type checker.",
.{ self.location, self.count },
);
}
}
}
/// Returns the current iteration count (useful for debugging).
pub fn getCount(self: *const Self) u32 {
return self.count;
}
};
test "IterationGuard does not panic for normal iteration counts" {
var guard = IterationGuard.init("test");
var i: u32 = 0;
while (i < 1000) : (i += 1) {
guard.tick();
}
// In release builds, tick() is a no-op so count stays at 0.
// In debug builds, count should be 1000.
const expected: u32 = if (builtin.mode == .Debug) 1000 else 0;
try std.testing.expectEqual(expected, guard.getCount());
}

View file

@ -205,12 +205,18 @@ pub const Generalizer = struct {
if (@intFromEnum(resolved.desc.rank) < rank_to_generalize_int) {
// Rank was lowered during adjustment - variable escaped
try var_pool.addVarToRank(resolved.var_, resolved.desc.rank);
} else if (self.hasNumeralConstraint(resolved.desc.content)) {
// Flex var with numeric constraint - don't generalize.
} else if (rank_to_generalize_int == @intFromEnum(Rank.top_level) and self.hasNumeralConstraint(resolved.desc.content)) {
// Flex var with numeric constraint at TOP LEVEL - don't generalize.
// This ensures numeric literals like `x = 15` stay monomorphic so that
// later usage like `I64.to_str(x)` can constrain x to I64.
// Without this, let-generalization would create a fresh copy at each use,
// leaving the original as an unconstrained flex var that defaults to Dec.
//
// However, at rank > top_level (inside lambdas OR inside nested blocks),
// we DO generalize numeric literals. This allows:
// - Polymorphic functions like `|a| a + 1` to work correctly
// - Numeric literals in blocks like `{ n = 42; use_as_i64(n); use_as_dec(n) }`
// to be used polymorphically within that block's scope.
try var_pool.addVarToRank(resolved.var_, resolved.desc.rank);
} else {
// Rank unchanged - safe to generalize

View file

@ -12,6 +12,7 @@ pub const store = @import("store.zig");
pub const instantiate = @import("instantiate.zig");
pub const generalize = @import("generalize.zig");
pub const import_mapping = @import("import_mapping.zig");
pub const debug = @import("debug.zig");
pub const TypeWriter = @import("TypeWriter.zig");

View file

@ -7,6 +7,7 @@ const collections = @import("collections");
const serialization = @import("serialization");
const types = @import("types.zig");
const debug = @import("debug.zig");
const Allocator = std.mem.Allocator;
const Desc = types.Descriptor;
@ -588,7 +589,9 @@ pub const Store = struct {
if (initial_var != redirected_root_var) {
var compressed_slot_idx = Self.varToSlotIdx(initial_var);
var compressed_slot: Slot = self.slots.get(compressed_slot_idx);
var guard = debug.IterationGuard.init("resolveVarAndCompressPath");
while (true) {
guard.tick();
switch (compressed_slot) {
.redirect => |next_redirect_var| {
self.slots.set(compressed_slot_idx, Slot{ .redirect = redirected_root_var });
@ -610,8 +613,10 @@ pub const Store = struct {
var redirected_slot: Slot = self.slots.get(redirected_slot_idx);
var is_root = true;
var guard = debug.IterationGuard.init("resolveVar");
while (true) {
guard.tick();
switch (redirected_slot) {
.redirect => |next_redirect_var| {
redirected_slot_idx = Self.varToSlotIdx(next_redirect_var);
@ -1006,7 +1011,10 @@ const SlotStore = struct {
}
/// A type-safe index into the store
const Idx = enum(u32) { _ };
const Idx = enum(u32) {
first = 0,
_,
};
};
/// Represents a store of descriptors
@ -1109,7 +1117,10 @@ const DescStore = struct {
/// A type-safe index into the store
/// This type is made public below
const Idx = enum(u32) { _ };
const Idx = enum(u32) {
first = 0,
_,
};
};
/// An index into the desc store
@ -1385,13 +1396,27 @@ test "SlotStore.Serialized roundtrip" {
const gpa = std.testing.allocator;
const CompactWriter = collections.CompactWriter;
// Use a real Store to get real Var and DescStore.Idx values
var store = try Store.init(gpa);
defer store.deinit();
// Create real type variables - fresh() creates a flex var with a root slot
const var_a = try store.fresh();
const var_b = try store.fresh();
const var_c = try store.fresh();
// Get the DescStore.Idx from the root slots
const desc_idx_a = store.getSlot(var_a).root;
const desc_idx_c = store.getSlot(var_c).root;
// Create a separate SlotStore for serialization testing
var slot_store = try SlotStore.init(gpa, 4);
defer slot_store.deinit(gpa);
// Add some slots
_ = try slot_store.insert(gpa, .{ .root = @enumFromInt(100) });
_ = try slot_store.insert(gpa, .{ .redirect = @enumFromInt(0) });
_ = try slot_store.insert(gpa, .{ .root = @enumFromInt(200) });
// Add slots and capture returned indices
const slot_a = try slot_store.insert(gpa, .{ .root = desc_idx_a });
const slot_b = try slot_store.insert(gpa, .{ .redirect = var_b });
const slot_c = try slot_store.insert(gpa, .{ .root = desc_idx_c });
// Create temp file
var tmp_dir = std.testing.tmpDir(.{});
@ -1424,11 +1449,11 @@ test "SlotStore.Serialized roundtrip" {
const deser_ptr = @as(*SlotStore.Serialized, @ptrCast(@alignCast(buffer.ptr)));
const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Verify
// Verify using captured indices
try std.testing.expectEqual(@as(u64, 3), deserialized.backing.len());
try std.testing.expectEqual(Slot{ .root = @enumFromInt(100) }, deserialized.get(@enumFromInt(0)));
try std.testing.expectEqual(Slot{ .redirect = @enumFromInt(0) }, deserialized.get(@enumFromInt(1)));
try std.testing.expectEqual(Slot{ .root = @enumFromInt(200) }, deserialized.get(@enumFromInt(2)));
try std.testing.expectEqual(Slot{ .root = desc_idx_a }, deserialized.get(slot_a));
try std.testing.expectEqual(Slot{ .redirect = var_b }, deserialized.get(slot_b));
try std.testing.expectEqual(Slot{ .root = desc_idx_c }, deserialized.get(slot_c));
}
test "DescStore.Serialized roundtrip" {
@ -1438,7 +1463,7 @@ test "DescStore.Serialized roundtrip" {
var desc_store = try DescStore.init(gpa, 4);
defer desc_store.deinit(gpa);
// Add some descriptors
// Add some descriptors and capture returned indices
const desc1 = Descriptor{
.content = Content{ .flex = Flex.init() },
.rank = Rank.generalized,
@ -1450,8 +1475,8 @@ test "DescStore.Serialized roundtrip" {
.mark = Mark.visited,
};
_ = try desc_store.insert(gpa, desc1);
_ = try desc_store.insert(gpa, desc2);
const desc_idx_1 = try desc_store.insert(gpa, desc1);
const desc_idx_2 = try desc_store.insert(gpa, desc2);
// Create temp file
var tmp_dir = std.testing.tmpDir(.{});
@ -1489,10 +1514,10 @@ test "DescStore.Serialized roundtrip" {
const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Note: deserialize already handles relocation, don't call relocate again
// Verify
// Verify using captured indices
try std.testing.expectEqual(@as(usize, 2), deserialized.backing.items.len);
try std.testing.expectEqual(desc1, deserialized.get(@enumFromInt(0)));
try std.testing.expectEqual(desc2, deserialized.get(@enumFromInt(1)));
try std.testing.expectEqual(desc1, deserialized.get(desc_idx_1));
try std.testing.expectEqual(desc2, deserialized.get(desc_idx_2));
}
test "Store.Serialized roundtrip" {

View file

@ -5,9 +5,9 @@ import pf.Stdout
# Test: both Exit and CustomError in different branches
# This triggers the type error
main! = |args| {
if List.is_empty(args) {
Err(Exit(42))
} else {
Err(CustomError)
}
if List.is_empty(args) {
Err(Exit(42))
} else {
Err(CustomError)
}
}

View file

@ -13,5 +13,8 @@ main_for_host! = |args|
match main!(args) {
Ok({}) => 0
Err(Exit(code)) => code
_ => 1
Err(other) => {
Stderr.line!("exited with other error: ${inspect other})
1
}
}

View file

@ -0,0 +1,14 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
# Use a mutable variable to prevent compile-time evaluation
main! = || {
# The var keyword creates a runtime variable that can't be constant-folded
var $divisor = 0
# This will trigger a division by zero error at runtime
result = 42 / $divisor
Stdout.line!("Result: ${U64.to_str(result)}")
}

View file

@ -1,13 +1,233 @@
///! Platform host that tests effectful functions writing to stdout and stderr.
//! Platform host for testing effectful Roc applications.
//!
//! This host provides stdin/stdout/stderr effects and includes a test mode for
//! verifying IO behavior without performing actual syscalls.
//!
//! ## Test Mode
//!
//! Run with `--test <spec>` to simulate IO and verify behavior:
//! ```
//! ./zig-out/bin/roc app.roc -- --test "1>Hello, world!"
//! ```
//!
//! Spec format uses pipe-separated operations:
//! - `0<input` - provide "input" as stdin
//! - `1>output` - expect "output" on stdout
//! - `2>output` - expect "output" on stderr
//!
//! Example with multiple operations:
//! ```
//! --test "0<user input|1>Before stdin|1>After stdin"
//! ```
//!
//! Use `--test-verbose <spec>` for detailed output during test execution.
//!
//! Exit codes:
//! - 0: All expectations matched in order
//! - 1: Test failed (mismatch, missing output, extra output, or invalid spec)
const std = @import("std");
const builtin = @import("builtin");
const builtins = @import("builtins");
const build_options = @import("build_options");
const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined;
const trace_refcount = build_options.trace_refcount;
/// Error message to display on stack overflow in a Roc program
const STACK_OVERFLOW_MESSAGE = "\nThis Roc application overflowed its stack memory and crashed.\n\n";
/// Callback for stack overflow in a Roc program
fn handleRocStackOverflow() noreturn {
if (comptime builtin.os.tag == .windows) {
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null);
kernel32.ExitProcess(134);
} else if (comptime builtin.os.tag != .wasi) {
_ = posix.write(posix.STDERR_FILENO, STACK_OVERFLOW_MESSAGE) catch {};
posix.exit(134);
} else {
std.process.exit(134);
}
}
/// Callback for access violation in a Roc program
fn handleRocAccessViolation(fault_addr: usize) noreturn {
if (comptime builtin.os.tag == .windows) {
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
var addr_buf: [18]u8 = undefined;
const addr_str = builtins.handlers.formatHex(fault_addr, &addr_buf);
const msg1 = "\nSegmentation fault (SIGSEGV) in this Roc program.\nFault address: ";
const msg2 = "\n\n";
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, msg1.ptr, msg1.len, &bytes_written, null);
_ = kernel32.WriteFile(stderr_handle, addr_str.ptr, @intCast(addr_str.len), &bytes_written, null);
_ = kernel32.WriteFile(stderr_handle, msg2.ptr, msg2.len, &bytes_written, null);
kernel32.ExitProcess(139);
} else {
// POSIX (and WASI fallback)
const msg = "\nSegmentation fault (SIGSEGV) in this Roc program.\nFault address: ";
_ = posix.write(posix.STDERR_FILENO, msg) catch {};
var addr_buf: [18]u8 = undefined;
const addr_str = builtins.handlers.formatHex(fault_addr, &addr_buf);
_ = posix.write(posix.STDERR_FILENO, addr_str) catch {};
_ = posix.write(posix.STDERR_FILENO, "\n\n") catch {};
posix.exit(139);
}
}
/// Error message to display on division by zero in a Roc program
const DIVISION_BY_ZERO_MESSAGE = "\nThis Roc application divided by zero and crashed.\n\n";
/// Callback for arithmetic errors (division by zero) in a Roc program
fn handleRocArithmeticError() noreturn {
if (comptime builtin.os.tag == .windows) {
const DWORD = u32;
const HANDLE = ?*anyopaque;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const kernel32 = struct {
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
};
const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE);
var bytes_written: DWORD = 0;
_ = kernel32.WriteFile(stderr_handle, DIVISION_BY_ZERO_MESSAGE.ptr, DIVISION_BY_ZERO_MESSAGE.len, &bytes_written, null);
kernel32.ExitProcess(136);
} else if (comptime builtin.os.tag != .wasi) {
_ = posix.write(posix.STDERR_FILENO, DIVISION_BY_ZERO_MESSAGE) catch {};
posix.exit(136); // 128 + 8 (SIGFPE)
} else {
std.process.exit(136);
}
}
/// Type of IO operation in test spec
const EffectType = enum(u8) {
stdin_input, // 0<
stdout_expect, // 1>
stderr_expect, // 2>
};
/// A single entry in the test spec
const SpecEntry = struct {
effect_type: EffectType,
value: []const u8,
spec_line: usize, // For error reporting
};
/// Test state for simulated IO mode
const TestState = struct {
enabled: bool,
verbose: bool,
entries: []const SpecEntry,
current_index: usize,
failed: bool,
failure_info: ?FailureInfo,
const FailureInfo = struct {
expected_type: EffectType,
expected_value: []const u8,
actual_type: EffectType,
actual_value: []const u8,
spec_line: usize,
};
fn init() TestState {
return .{
.enabled = false,
.verbose = false,
.entries = &.{},
.current_index = 0,
.failed = false,
.failure_info = null,
};
}
};
/// Parse error for invalid spec format
const ParseError = error{
InvalidSpecFormat,
OutOfMemory,
};
/// Parse test spec string into array of SpecEntry
/// Format: "0<input|1>output|2>error" (pipe-separated)
/// Returns error if any segment doesn't start with a valid pattern (0<, 1>, 2>)
fn parseTestSpec(allocator: std.mem.Allocator, spec: []const u8) ParseError![]SpecEntry {
var entries = std.ArrayList(SpecEntry).initCapacity(allocator, 8) catch return ParseError.OutOfMemory;
errdefer entries.deinit(allocator);
var line_num: usize = 1;
// Split on pipe character
var iter = std.mem.splitScalar(u8, spec, '|');
while (iter.next()) |segment| {
defer line_num += 1;
// Skip empty segments (e.g., trailing pipe)
if (segment.len == 0) continue;
// Check for valid pattern prefix
if (segment.len < 2) {
const stderr_file: std.fs.File = .stderr();
stderr_file.writeAll("Error: Invalid spec segment '") catch {};
stderr_file.writeAll(segment) catch {};
stderr_file.writeAll("' - must start with 0<, 1>, or 2>\n") catch {};
return ParseError.InvalidSpecFormat;
}
const effect_type: EffectType = blk: {
if (segment[0] == '0' and segment[1] == '<') break :blk .stdin_input;
if (segment[0] == '1' and segment[1] == '>') break :blk .stdout_expect;
if (segment[0] == '2' and segment[1] == '>') break :blk .stderr_expect;
// Invalid pattern - report error
const stderr_file: std.fs.File = .stderr();
stderr_file.writeAll("Error: Invalid spec segment '") catch {};
stderr_file.writeAll(segment) catch {};
stderr_file.writeAll("' - must start with 0<, 1>, or 2>\n") catch {};
return ParseError.InvalidSpecFormat;
};
entries.append(allocator, .{
.effect_type = effect_type,
.value = segment[2..],
.spec_line = line_num,
}) catch return ParseError.OutOfMemory;
}
return entries.toOwnedSlice(allocator) catch ParseError.OutOfMemory;
}
/// Host environment - contains GeneralPurposeAllocator for leak detection
const HostEnv = struct {
gpa: std.heap.GeneralPurposeAllocator(.{}),
test_state: TestState,
};
/// Roc allocation function with size-tracking metadata
@ -163,16 +383,47 @@ fn __main() callconv(.c) void {}
// C compatible main for runtime
fn main(argc: c_int, argv: [*][*:0]u8) callconv(.c) c_int {
_ = argc;
_ = argv;
platform_main() catch |err| {
const stderr: std.fs.File = .stderr();
stderr.writeAll("HOST ERROR: ") catch {};
stderr.writeAll(@errorName(err)) catch {};
stderr.writeAll("\n") catch {};
// Parse --test or --test-verbose argument
var test_spec: ?[]const u8 = null;
var test_verbose: bool = false;
var i: usize = 1;
const arg_count: usize = @intCast(argc);
const stderr_file: std.fs.File = .stderr();
while (i < arg_count) : (i += 1) {
const arg = std.mem.span(argv[i]);
if (std.mem.eql(u8, arg, "--test-verbose")) {
if (i + 1 < arg_count) {
i += 1;
test_spec = std.mem.span(argv[i]);
test_verbose = true;
} else {
stderr_file.writeAll("Error: --test-verbose requires a spec argument\n") catch {};
return 1;
}
} else if (std.mem.eql(u8, arg, "--test")) {
if (i + 1 < arg_count) {
i += 1;
test_spec = std.mem.span(argv[i]);
} else {
stderr_file.writeAll("Error: --test requires a spec argument\n") catch {};
return 1;
}
} else if (arg.len >= 2 and arg[0] == '-' and arg[1] == '-') {
stderr_file.writeAll("Error: unknown flag '") catch {};
stderr_file.writeAll(arg) catch {};
stderr_file.writeAll("'\n") catch {};
stderr_file.writeAll("Usage: <app> [--test <spec>] [--test-verbose <spec>]\n") catch {};
return 1;
}
}
const exit_code = platform_main(test_spec, test_verbose) catch |err| {
stderr_file.writeAll("HOST ERROR: ") catch {};
stderr_file.writeAll(@errorName(err)) catch {};
stderr_file.writeAll("\n") catch {};
return 1;
};
return 0;
return exit_code;
}
// Use the actual RocStr from builtins instead of defining our own
@ -182,14 +433,69 @@ const RocStr = builtins.str.RocStr;
/// Follows RocCall ABI: (ops, ret_ptr, args_ptr)
/// Returns {} and takes Str as argument
fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void {
_ = ops;
_ = ret_ptr; // Return value is {} which is zero-sized
// Arguments struct for single Str parameter
const Args = extern struct { str: RocStr };
const args: *Args = @ptrCast(@alignCast(args_ptr));
const message = args.str.asSlice();
const host: *HostEnv = @ptrCast(@alignCast(ops.env));
// Test mode: verify output matches expected
if (host.test_state.enabled) {
const stderr_file: std.fs.File = .stderr();
if (host.test_state.current_index < host.test_state.entries.len) {
const entry = host.test_state.entries[host.test_state.current_index];
if (entry.effect_type == .stderr_expect and std.mem.eql(u8, entry.value, message)) {
host.test_state.current_index += 1;
if (host.test_state.verbose) {
stderr_file.writeAll("[OK] stderr: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\"\n") catch {};
}
return; // Match!
}
// Mismatch - must allocate a copy of the message since the RocStr may be freed
const actual_copy = host.gpa.allocator().dupe(u8, message) catch "";
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = entry.effect_type,
.expected_value = entry.value,
.actual_type = .stderr_expect,
.actual_value = actual_copy,
.spec_line = entry.spec_line,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stderr: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\" (expected ") catch {};
stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {};
stderr_file.writeAll(": \"") catch {};
stderr_file.writeAll(entry.value) catch {};
stderr_file.writeAll("\")\n") catch {};
}
} else {
// Extra output not in spec - must allocate a copy of the message
const actual_copy = host.gpa.allocator().dupe(u8, message) catch "";
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = .stderr_expect, // We expected nothing
.expected_value = "",
.actual_type = .stderr_expect,
.actual_value = actual_copy,
.spec_line = 0,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stderr: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\" (unexpected - no more expected operations)\n") catch {};
}
}
return;
}
// Normal mode: write to stderr
const stderr: std.fs.File = .stderr();
stderr.writeAll(message) catch {};
stderr.writeAll("\n") catch {};
@ -201,19 +507,69 @@ fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt
fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void {
_ = args_ptr; // Argument is {} which is zero-sized
// Read a line from stdin
const host: *HostEnv = @ptrCast(@alignCast(ops.env));
const result: *RocStr = @ptrCast(@alignCast(ret_ptr));
// Test mode: consume next stdin_input entry from spec
if (host.test_state.enabled) {
const stderr_file: std.fs.File = .stderr();
if (host.test_state.current_index < host.test_state.entries.len) {
const entry = host.test_state.entries[host.test_state.current_index];
if (entry.effect_type == .stdin_input) {
host.test_state.current_index += 1;
result.* = RocStr.fromSlice(entry.value, ops);
if (host.test_state.verbose) {
stderr_file.writeAll("[OK] stdin: \"") catch {};
stderr_file.writeAll(entry.value) catch {};
stderr_file.writeAll("\"\n") catch {};
}
return;
}
// Wrong type - expected stdin but spec has output
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = entry.effect_type,
.expected_value = entry.value,
.actual_type = .stdin_input,
.actual_value = "(stdin read)",
.spec_line = entry.spec_line,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stdin read (expected ") catch {};
stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {};
stderr_file.writeAll(": \"") catch {};
stderr_file.writeAll(entry.value) catch {};
stderr_file.writeAll("\")\n") catch {};
}
} else {
// Ran out of entries - app tried to read more stdin than provided
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = .stdin_input,
.expected_value = "",
.actual_type = .stdin_input,
.actual_value = "(stdin read)",
.spec_line = 0,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stdin read (unexpected - no more expected operations)\n") catch {};
}
}
result.* = RocStr.empty();
return;
}
// Normal mode: Read a line from stdin
var buffer: [4096]u8 = undefined;
const stdin_file: std.fs.File = .stdin();
const bytes_read = stdin_file.read(&buffer) catch {
// Return empty string on error
const result: *RocStr = @ptrCast(@alignCast(ret_ptr));
result.* = RocStr.empty();
return;
};
// Handle EOF (no bytes read)
if (bytes_read == 0) {
const result: *RocStr = @ptrCast(@alignCast(ret_ptr));
result.* = RocStr.empty();
return;
}
@ -233,7 +589,6 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr
// Create RocStr from the read line and return it
// RocStr.fromSlice handles allocation internally (either inline for small strings
// or via roc_alloc for big strings with proper refcount tracking)
const result: *RocStr = @ptrCast(@alignCast(ret_ptr));
result.* = RocStr.fromSlice(line, ops);
}
@ -241,14 +596,69 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr
/// Follows RocCall ABI: (ops, ret_ptr, args_ptr)
/// Returns {} and takes Str as argument
fn hostedStdoutLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void {
_ = ops;
_ = ret_ptr; // Return value is {} which is zero-sized
// Arguments struct for single Str parameter
const Args = extern struct { str: RocStr };
const args: *Args = @ptrCast(@alignCast(args_ptr));
const message = args.str.asSlice();
const host: *HostEnv = @ptrCast(@alignCast(ops.env));
// Test mode: verify output matches expected
if (host.test_state.enabled) {
const stderr_file: std.fs.File = .stderr();
if (host.test_state.current_index < host.test_state.entries.len) {
const entry = host.test_state.entries[host.test_state.current_index];
if (entry.effect_type == .stdout_expect and std.mem.eql(u8, entry.value, message)) {
host.test_state.current_index += 1;
if (host.test_state.verbose) {
stderr_file.writeAll("[OK] stdout: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\"\n") catch {};
}
return; // Match!
}
// Mismatch - must allocate a copy of the message since the RocStr may be freed
const actual_copy = host.gpa.allocator().dupe(u8, message) catch "";
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = entry.effect_type,
.expected_value = entry.value,
.actual_type = .stdout_expect,
.actual_value = actual_copy,
.spec_line = entry.spec_line,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stdout: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\" (expected ") catch {};
stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {};
stderr_file.writeAll(": \"") catch {};
stderr_file.writeAll(entry.value) catch {};
stderr_file.writeAll("\")\n") catch {};
}
} else {
// Extra output not in spec - must allocate a copy of the message
const actual_copy = host.gpa.allocator().dupe(u8, message) catch "";
host.test_state.failed = true;
host.test_state.failure_info = .{
.expected_type = .stdout_expect, // We expected nothing
.expected_value = "",
.actual_type = .stdout_expect,
.actual_value = actual_copy,
.spec_line = 0,
};
if (host.test_state.verbose) {
stderr_file.writeAll("[FAIL] stdout: \"") catch {};
stderr_file.writeAll(message) catch {};
stderr_file.writeAll("\" (unexpected - no more expected operations)\n") catch {};
}
}
return;
}
// Normal mode: write to stdout
const stdout: std.fs.File = .stdout();
stdout.writeAll(message) catch {};
stdout.writeAll("\n") catch {};
@ -263,11 +673,36 @@ const hosted_function_ptrs = [_]builtins.host_abi.HostedFn{
};
/// Platform host entrypoint
fn platform_main() !void {
fn platform_main(test_spec: ?[]const u8, test_verbose: bool) !c_int {
// Install signal handlers for stack overflow, access violations, and division by zero
// This allows us to display helpful error messages instead of crashing
_ = builtins.handlers.install(handleRocStackOverflow, handleRocAccessViolation, handleRocArithmeticError);
var host_env = HostEnv{
.gpa = std.heap.GeneralPurposeAllocator(.{}){},
.test_state = TestState.init(),
};
// Parse test spec if provided
if (test_spec) |spec| {
host_env.test_state.entries = try parseTestSpec(host_env.gpa.allocator(), spec);
host_env.test_state.enabled = true;
host_env.test_state.verbose = test_verbose;
}
defer {
// Free duplicated actual_value if allocated (on test failure)
if (host_env.test_state.failure_info) |info| {
if (info.actual_value.len > 0) {
host_env.gpa.allocator().free(info.actual_value);
}
}
// Free test entries if allocated
if (host_env.test_state.entries.len > 0) {
host_env.gpa.allocator().free(host_env.test_state.entries);
}
const leaked = host_env.gpa.deinit();
if (leaked == .leak) {
std.log.err("\x1b[33mMemory leak detected!\x1b[0m", .{});
@ -298,4 +733,70 @@ fn platform_main() !void {
// causing a segfault if you pass null. This should be changed! Dereferencing
// garbage memory is obviously pointless, and there's no reason we should do it.
roc__main(&roc_ops, @as(*anyopaque, @ptrCast(&ret)), @as(*anyopaque, @ptrCast(&args)));
// Check test results if in test mode
if (host_env.test_state.enabled) {
// Check if test failed or not all entries were consumed
if (host_env.test_state.failed or host_env.test_state.current_index != host_env.test_state.entries.len) {
const stderr_file: std.fs.File = .stderr();
// Print failure info
if (host_env.test_state.failure_info) |info| {
if (info.spec_line == 0) {
// Extra/unexpected output
stderr_file.writeAll("TEST FAILED: Unexpected ") catch {};
stderr_file.writeAll(effectTypeName(info.actual_type)) catch {};
stderr_file.writeAll(" output: \"") catch {};
stderr_file.writeAll(info.actual_value) catch {};
stderr_file.writeAll("\"\n") catch {};
} else {
var buf: [512]u8 = undefined;
const msg = std.fmt.bufPrint(&buf, "TEST FAILED at spec line {d}:\n Expected: {s} \"{s}\"\n Got: {s} \"{s}\"\n", .{
info.spec_line,
effectTypeName(info.expected_type),
info.expected_value,
effectTypeName(info.actual_type),
info.actual_value,
}) catch "TEST FAILED\n";
stderr_file.writeAll(msg) catch {};
}
} else if (host_env.test_state.current_index < host_env.test_state.entries.len) {
// Not all entries were consumed - list what's remaining
const remaining = host_env.test_state.entries.len - host_env.test_state.current_index;
var buf: [256]u8 = undefined;
const msg = std.fmt.bufPrint(&buf, "TEST FAILED: {d} expected IO operation(s) not performed:\n", .{remaining}) catch "TEST FAILED: expected IO operations not performed\n";
stderr_file.writeAll(msg) catch {};
// List up to 5 unconsumed entries
const max_to_show: usize = 5;
var shown: usize = 0;
for (host_env.test_state.entries[host_env.test_state.current_index..]) |entry| {
if (shown >= max_to_show) {
stderr_file.writeAll(" ...\n") catch {};
break;
}
stderr_file.writeAll(" - ") catch {};
stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {};
stderr_file.writeAll(": \"") catch {};
stderr_file.writeAll(entry.value) catch {};
stderr_file.writeAll("\"\n") catch {};
shown += 1;
}
} else {
stderr_file.writeAll("TEST FAILED\n") catch {};
}
return 1;
}
}
return 0;
}
fn effectTypeName(effect_type: EffectType) []const u8 {
return switch (effect_type) {
.stdin_input => "stdin",
.stdout_expect => "stdout",
.stderr_expect => "stderr",
};
}

View file

@ -0,0 +1,16 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
# This function causes infinite recursion, leading to stack overflow at runtime.
# It cannot be tail-call optimized because there's work after the recursive call.
overflow : I64 -> I64
overflow = |n|
# Prevent tail-call optimization by adding to the result after recursion
overflow(n + 1) + 1
main! = || {
# This will overflow the stack at runtime
result = overflow(0)
Stdout.line!("Result: ${I64.to_str(result)}")
}

View file

@ -1,13 +1,10 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
import pf.Stdin
str : Str -> Str
str = |s| s
import pf.Stdout
main! = || {
Stdout.line!(str("Before stdin"))
temp = Stdin.line!()
Stdout.line!(str("After stdin"))
Stdout.line!("Before stdin")
_line = Stdin.line!()
Stdout.line!("After stdin")
}

View file

@ -0,0 +1,15 @@
app [main!] { pf: platform "./platform/main.roc" }
# Regression test: Calling .sublist() method on a List(U8) from "".to_utf8()
# causes a segfault when the variable doesn't have an explicit type annotation.
# Error was: "Roc crashed: Error evaluating from shared memory: InvalidMethodReceiver"
# The bug was that translateTypeVar was using the wrong module (closure's source module)
# instead of the caller's module when translating the return type.
main! = || {
# Test case 1: Method call without type annotation (original bug)
s = "".to_utf8()
_slice = s.sublist({ start: 0, len: 0 })
# Test case 2: Comparing empty list with method result
_ignore = "".to_utf8() == []
}

View file

@ -0,0 +1,29 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
print! : Str => {}
print! = |msg| msg.split_on("\n").for_each!(Stdout.line!)
fnA! : Str => Try(I64, _)
fnA! = |_input| {
var $x = 1
Ok($x)
}
fnB! : Str => Try(I64, _)
fnB! = |_input| {
var $y = 2
Ok($y)
}
run! = || {
print!("A1: ${fnA!("test")?.to_str()}")
print!("A2: ${fnA!("test")?.to_str()}")
print!("A3: ${fnA!("test")?.to_str()}")
Ok({})
}
main! = || {
_ignore = run!()
}

View file

@ -0,0 +1,91 @@
# META
~~~ini
description=Numeric let-generalization inside nested block (rank > top_level)
type=expr
~~~
# SOURCE
~~~roc
{
n = 42
a = I64.to_str(n)
b = Dec.to_str(n)
Str.concat(a, b)
}
~~~
# EXPECTED
NIL
# PROBLEMS
NIL
# TOKENS
~~~zig
OpenCurly,
LowerIdent,OpAssign,Int,
LowerIdent,OpAssign,UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,CloseRound,
LowerIdent,OpAssign,UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,CloseRound,
UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,Comma,LowerIdent,CloseRound,
CloseCurly,
EndOfFile,
~~~
# PARSE
~~~clojure
(e-block
(statements
(s-decl
(p-ident (raw "n"))
(e-int (raw "42")))
(s-decl
(p-ident (raw "a"))
(e-apply
(e-ident (raw "I64.to_str"))
(e-ident (raw "n"))))
(s-decl
(p-ident (raw "b"))
(e-apply
(e-ident (raw "Dec.to_str"))
(e-ident (raw "n"))))
(e-apply
(e-ident (raw "Str.concat"))
(e-ident (raw "a"))
(e-ident (raw "b")))))
~~~
# FORMATTED
~~~roc
{
n = 42
a = I64.to_str(n)
b = Dec.to_str(n)
Str.concat(a, b)
}
~~~
# CANONICALIZE
~~~clojure
(e-block
(s-let
(p-assign (ident "n"))
(e-num (value "42")))
(s-let
(p-assign (ident "a"))
(e-call
(e-lookup-external
(builtin))
(e-lookup-local
(p-assign (ident "n")))))
(s-let
(p-assign (ident "b"))
(e-call
(e-lookup-external
(builtin))
(e-lookup-local
(p-assign (ident "n")))))
(e-call
(e-lookup-external
(builtin))
(e-lookup-local
(p-assign (ident "a")))
(e-lookup-local
(p-assign (ident "b")))))
~~~
# TYPES
~~~clojure
(expr (type "Str"))
~~~

View file

@ -0,0 +1,13 @@
# META
~~~ini
description=List.count_if counts elements where predicate returns true
type=repl
~~~
# SOURCE
~~~roc
» List.count_if([1, 2, 3, 4, 5], |x| x > 2)
~~~
# OUTPUT
3
# PROBLEMS
NIL

View file

@ -0,0 +1,13 @@
# META
~~~ini
description=List.count_if returns list length when all elements match
type=repl
~~~
# SOURCE
~~~roc
» List.count_if([1, 2, 3, 4, 5], |x| x > 0)
~~~
# OUTPUT
5
# PROBLEMS
NIL

View file

@ -0,0 +1,13 @@
# META
~~~ini
description=List.count_if on empty list returns 0
type=repl
~~~
# SOURCE
~~~roc
» List.count_if([], |x| x > 2)
~~~
# OUTPUT
0
# PROBLEMS
NIL

View file

@ -0,0 +1,13 @@
# META
~~~ini
description=List.count_if returns 0 when no elements match
type=repl
~~~
# SOURCE
~~~roc
» List.count_if([1, 2, 3], |x| x > 10)
~~~
# OUTPUT
0
# PROBLEMS
NIL

View file

@ -1,6 +1,6 @@
# META
~~~ini
description=Numeric without annotation, multiple uses with different types (produces type error)
description=Numeric without annotation, multiple uses with different types (each use gets fresh type)
type=repl
~~~
# SOURCE
@ -17,6 +17,6 @@ assigned `a`
---
assigned `b`
---
TYPE MISMATCH
"4242.0"
# PROBLEMS
NIL

View file

@ -0,0 +1,22 @@
# META
~~~ini
description=Numeric sum then convert to I16 string
type=repl
~~~
# SOURCE
~~~roc
» a = 4
» b = 5
» sum = a + b
» I16.to_str(sum)
~~~
# OUTPUT
assigned `a`
---
assigned `b`
---
assigned `sum`
---
"9"
# PROBLEMS
NIL

View file

@ -0,0 +1,19 @@
# META
~~~ini
description=U8.to - creates a list of integers from start to end (inclusive)
type=repl
~~~
# SOURCE
~~~roc
» 1u8.to(5u8)
» 0u8.to(0u8)
» 5u8.to(3u8)
~~~
# OUTPUT
[1, 2, 3, 4, 5]
---
[0]
---
[]
# PROBLEMS
NIL

View file

@ -0,0 +1,19 @@
# META
~~~ini
description=U8.until - creates a list of integers from start to end (exclusive)
type=repl
~~~
# SOURCE
~~~roc
» 0u8.until(3u8)
» 1u8.until(1u8)
» 5u8.until(3u8)
~~~
# OUTPUT
[0, 1, 2]
---
[]
---
[]
# PROBLEMS
NIL

View file

@ -0,0 +1,17 @@
# META
~~~ini
description=Test var in lambda parameters
type=repl
~~~
# SOURCE
~~~roc
» f = |var $x, y| { $x = $x + y
$x }
» f(1, 2)
~~~
# OUTPUT
assigned `f`
---
3
# PROBLEMS
NIL

View file

@ -22,3 +22,4 @@ HSA = "HSA"
typ = "typ"
ba = "ba"
Trys = "Trys"
sigfault = "sigfault"