Merge remote-tracking branch 'remote/main' into fx-behvaiour-tests

This commit is contained in:
Luke Boswell 2025-12-08 09:34:26 +11:00
commit 3d818c326d
No known key found for this signature in database
GPG key ID: 54A7324B1B975757
142 changed files with 5103 additions and 2437 deletions

320
build.zig
View file

@ -87,7 +87,7 @@ const TestsSummaryStep = struct {
/// 2. They are brittle to changes that type-checking should not be sensitive to
///
/// Instead, we always compare indices - either into node stores or to interned string indices.
/// This step enforces that rule by failing the build if `std.mem.` is found in src/check/ or src/layout/.
/// This step enforces that rule by failing the build if `std.mem.` is found in src/canonicalize/, src/check/, src/layout/, or src/eval/.
const CheckTypeCheckerPatternsStep = struct {
step: Step,
@ -104,15 +104,15 @@ const CheckTypeCheckerPatternsStep = struct {
return self;
}
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
fn make(step: *Step, _: Step.MakeOptions) !void {
const b = step.owner;
const allocator = b.allocator;
var violations = std.ArrayList(Violation).empty;
defer violations.deinit(allocator);
// Recursively scan src/check/, src/layout/, and src/eval/ for .zig files
// Recursively scan src/canonicalize/, src/check/, src/layout/, and src/eval/ for .zig files
// TODO: uncomment "src/canonicalize" once its std.mem violations are fixed
const dirs_to_scan = [_][]const u8{ "src/check", "src/layout", "src/eval" };
for (dirs_to_scan) |dir_path| {
var dir = std.fs.cwd().openDir(dir_path, .{ .iterate = true }) catch |err| {
@ -130,7 +130,7 @@ const CheckTypeCheckerPatternsStep = struct {
std.debug.print("=" ** 80 ++ "\n\n", .{});
std.debug.print(
\\Code in src/check/, src/layout/, and src/eval/ must NOT do raw string comparison or manipulation.
\\Code in src/canonicalize/, src/check/, src/layout/, and src/eval/ must NOT do raw string comparison or manipulation.
\\
\\WHY THIS RULE EXISTS:
\\ We NEVER do string or byte comparisons because:
@ -170,7 +170,7 @@ const CheckTypeCheckerPatternsStep = struct {
std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{});
return step.fail(
"Found {d} forbidden patterns (raw string comparison or manipulation) in src/check/, src/layout/, or src/eval/. " ++
"Found {d} forbidden patterns (raw string comparison or manipulation) in src/canonicalize/, src/check/, src/layout/, or src/eval/. " ++
"See above for details on why this is forbidden and what to do instead.",
.{violations.items.len},
);
@ -292,6 +292,303 @@ const CheckTypeCheckerPatternsStep = struct {
}
};
/// Build step that checks for @enumFromInt(0) usage in all .zig files.
///
/// We forbid @enumFromInt(0) because it hides bugs and makes them harder to debug.
/// If we need a placeholder value that we believe will never be read, we should
/// use `undefined` instead - that way our intent is clear, and it can fail in a
/// more obvious way if our assumption is incorrect.
const CheckEnumFromIntZeroStep = struct {
step: Step,
fn create(b: *std.Build) *CheckEnumFromIntZeroStep {
const self = b.allocator.create(CheckEnumFromIntZeroStep) catch @panic("OOM");
self.* = .{
.step = Step.init(.{
.id = Step.Id.custom,
.name = "check-enum-from-int-zero",
.owner = b,
.makeFn = make,
}),
};
return self;
}
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
const allocator = b.allocator;
var violations = std.ArrayList(Violation).empty;
defer violations.deinit(allocator);
// Recursively scan src/ for .zig files
var dir = std.fs.cwd().openDir("src", .{ .iterate = true }) catch |err| {
return step.fail("Failed to open src directory: {}", .{err});
};
defer dir.close();
try scanDirectoryForEnumFromIntZero(allocator, dir, "src", &violations);
if (violations.items.len > 0) {
std.debug.print("\n", .{});
std.debug.print("=" ** 80 ++ "\n", .{});
std.debug.print("FORBIDDEN PATTERN: @enumFromInt(0)\n", .{});
std.debug.print("=" ** 80 ++ "\n\n", .{});
std.debug.print(
\\Using @enumFromInt(0) is forbidden in this codebase.
\\
\\WHY THIS RULE EXISTS:
\\ @enumFromInt(0) hides bugs and makes them harder to debug. It creates
\\ a "valid-looking" value that can silently propagate through the code
\\ when something goes wrong.
\\
\\WHAT TO DO INSTEAD:
\\ If you need a placeholder value that you believe will never be read,
\\ use `undefined` instead. This makes your intent clear, and if your
\\ assumption is wrong and the value IS read, it will fail more obviously.
\\
\\ When using `undefined`, add a comment explaining why it's correct there
\\ (e.g., where it will be overwritten before being read).
\\
\\ Example - WRONG:
\\ .anno = @enumFromInt(0), // placeholder - will be replaced
\\
\\ Example - RIGHT:
\\ .anno = undefined, // overwritten in Phase 1.7 before use
\\
\\VIOLATIONS FOUND:
\\
, .{});
for (violations.items) |violation| {
std.debug.print(" {s}:{d}: {s}\n", .{
violation.file_path,
violation.line_number,
violation.line_content,
});
}
std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{});
return step.fail(
"Found {d} uses of @enumFromInt(0). Using placeholder values like this has consistently led to bugs in this code base. " ++
"Do not use @enumFromInt(0) and also do not uncritically replace it with another placeholder like .first or something like that. " ++
"If you want it to be uninitialized and are very confident it will be overwritten before it is ever read, then use `undefined`. " ++
"Otherwise, take a step back and rethink how this code works; there should be a way to implement this in a way that does not use hardcoded placeholder indices like 0! " ++
"See above for details.",
.{violations.items.len},
);
}
}
const Violation = struct {
file_path: []const u8,
line_number: usize,
line_content: []const u8,
};
fn scanDirectoryForEnumFromIntZero(
allocator: std.mem.Allocator,
dir: std.fs.Dir,
path_prefix: []const u8,
violations: *std.ArrayList(Violation),
) !void {
var walker = try dir.walk(allocator);
defer walker.deinit();
while (try walker.next()) |entry| {
if (entry.kind != .file) continue;
if (!std.mem.endsWith(u8, entry.path, ".zig")) continue;
const full_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ path_prefix, entry.path });
const file = dir.openFile(entry.path, .{}) catch continue;
defer file.close();
const content = file.readToEndAlloc(allocator, 10 * 1024 * 1024) catch continue;
defer allocator.free(content);
var line_number: usize = 1;
var line_start: usize = 0;
for (content, 0..) |char, i| {
if (char == '\n') {
const line = content[line_start..i];
const trimmed = std.mem.trim(u8, line, " \t");
// Skip comments
if (std.mem.startsWith(u8, trimmed, "//")) {
line_number += 1;
line_start = i + 1;
continue;
}
// Check for @enumFromInt(0) usage
if (std.mem.indexOf(u8, line, "@enumFromInt(0)") != null) {
try violations.append(allocator, .{
.file_path = full_path,
.line_number = line_number,
.line_content = try allocator.dupe(u8, trimmed),
});
}
line_number += 1;
line_start = i + 1;
}
}
}
}
};
/// Build step that checks for unused variable suppression patterns.
///
/// In this codebase, we don't use `_ = variable;` to suppress unused variable warnings.
/// Instead, we delete the unused variable/argument and update all call sites as necessary.
const CheckUnusedSuppressionStep = struct {
step: Step,
fn create(b: *std.Build) *CheckUnusedSuppressionStep {
const self = b.allocator.create(CheckUnusedSuppressionStep) catch @panic("OOM");
self.* = .{
.step = Step.init(.{
.id = Step.Id.custom,
.name = "check-unused-suppression",
.owner = b,
.makeFn = make,
}),
};
return self;
}
fn make(step: *Step, _: Step.MakeOptions) !void {
const b = step.owner;
const allocator = b.allocator;
var violations = std.ArrayList(Violation).empty;
defer violations.deinit(allocator);
// Scan all src/ directories for .zig files
var dir = std.fs.cwd().openDir("src", .{ .iterate = true }) catch |err| {
return step.fail("Failed to open src/ directory: {}", .{err});
};
defer dir.close();
try scanDirectoryForUnusedSuppression(allocator, dir, "src", &violations);
if (violations.items.len > 0) {
std.debug.print("\n", .{});
std.debug.print("=" ** 80 ++ "\n", .{});
std.debug.print("UNUSED VARIABLE SUPPRESSION DETECTED\n", .{});
std.debug.print("=" ** 80 ++ "\n\n", .{});
std.debug.print(
\\In this codebase, we do NOT use `_ = variable;` to suppress unused warnings.
\\
\\Instead, you should:
\\ 1. Delete the unused variable, parameter, or argument
\\ 2. Update all call sites as necessary
\\ 3. Propagate the change through the codebase until tests pass
\\
\\VIOLATIONS FOUND:
\\
, .{});
for (violations.items) |violation| {
std.debug.print(" {s}:{d}: {s}\n", .{
violation.file_path,
violation.line_number,
violation.line_content,
});
}
std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{});
return step.fail(
"Found {d} unused variable suppression patterns (`_ = identifier;`). " ++
"Delete the unused variables and update call sites instead.",
.{violations.items.len},
);
}
}
const Violation = struct {
file_path: []const u8,
line_number: usize,
line_content: []const u8,
};
fn scanDirectoryForUnusedSuppression(
allocator: std.mem.Allocator,
dir: std.fs.Dir,
path_prefix: []const u8,
violations: *std.ArrayList(Violation),
) !void {
var walker = try dir.walk(allocator);
defer walker.deinit();
while (try walker.next()) |entry| {
if (entry.kind != .file) continue;
if (!std.mem.endsWith(u8, entry.path, ".zig")) continue;
const full_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ path_prefix, entry.path });
const file = dir.openFile(entry.path, .{}) catch continue;
defer file.close();
const content = file.readToEndAlloc(allocator, 10 * 1024 * 1024) catch continue;
defer allocator.free(content);
var line_number: usize = 1;
var line_start: usize = 0;
for (content, 0..) |char, i| {
if (char == '\n') {
const line = content[line_start..i];
const trimmed = std.mem.trim(u8, line, " \t");
// Check for pattern: _ = identifier;
// where identifier is alphanumeric with underscores
if (isUnusedSuppression(trimmed)) {
try violations.append(allocator, .{
.file_path = full_path,
.line_number = line_number,
.line_content = try allocator.dupe(u8, trimmed),
});
}
line_number += 1;
line_start = i + 1;
}
}
}
}
fn isUnusedSuppression(line: []const u8) bool {
// Pattern: `_ = identifier;` where identifier is alphanumeric with underscores
// Must start with "_ = " and end with ";"
if (!std.mem.startsWith(u8, line, "_ = ")) return false;
if (!std.mem.endsWith(u8, line, ";")) return false;
// Extract the identifier part (between "_ = " and ";")
const identifier = line[4 .. line.len - 1];
// Must have at least one character
if (identifier.len == 0) return false;
// Check that identifier contains only alphanumeric chars and underscores
// Also allow dots for field access like `_ = self.field;` which we also want to catch
for (identifier) |c| {
if (!std.ascii.isAlphanumeric(c) and c != '_' and c != '.') {
return false;
}
}
return true;
}
};
fn checkFxPlatformTestCoverage(step: *Step) !void {
const b = step.owner;
std.debug.print("---- checking fx platform test coverage ----\n", .{});
@ -1272,6 +1569,14 @@ pub fn build(b: *std.Build) void {
const check_patterns = CheckTypeCheckerPatternsStep.create(b);
test_step.dependOn(&check_patterns.step);
// Add check for @enumFromInt(0) usage
const check_enum_from_int = CheckEnumFromIntZeroStep.create(b);
test_step.dependOn(&check_enum_from_int.step);
// Add check for unused variable suppression patterns
const check_unused = CheckUnusedSuppressionStep.create(b);
test_step.dependOn(&check_unused.step);
test_step.dependOn(&tests_summary.step);
b.default_step.dependOn(playground_step);
@ -2189,9 +2494,8 @@ fn generateGlibcStub(b: *std.Build, target: ResolvedTarget, target_name: []const
const writer = assembly_buf.writer(b.allocator);
const target_arch = target.result.cpu.arch;
const target_abi = target.result.abi;
glibc_stub_build.generateComprehensiveStub(b.allocator, writer, target_arch, target_abi) catch |err| {
glibc_stub_build.generateComprehensiveStub(writer, target_arch) catch |err| {
std.log.warn("Failed to generate comprehensive stub assembly for {s}: {}, using minimal ELF", .{ target_name, err });
// Fall back to minimal ELF
const stub_content = switch (target.result.cpu.arch) {

View file

@ -47,21 +47,15 @@ fn PlainTextSExprWriter(comptime WriterType: type) type {
try self.writer.print(fmt, args);
}
pub fn setColor(self: *@This(), color: Color) !void {
_ = self;
_ = color;
pub fn setColor(_: *@This(), _: Color) !void {
// No-op for plain text
}
pub fn beginSourceRange(self: *@This(), start_byte: u32, end_byte: u32) !void {
_ = self;
_ = start_byte;
_ = end_byte;
pub fn beginSourceRange(_: *@This(), _: u32, _: u32) !void {
// No-op for plain text
}
pub fn endSourceRange(self: *@This()) !void {
_ = self;
pub fn endSourceRange(_: *@This()) !void {
// No-op for plain text
}

View file

@ -11,6 +11,7 @@ pub const parallel = @import("parallel.zig");
pub const SmallStringInterner = @import("SmallStringInterner.zig");
pub const safe_memory = @import("safe_memory.zig");
pub const stack_overflow = @import("stack_overflow.zig");
pub const target = @import("target.zig");
pub const DataSpan = @import("DataSpan.zig").DataSpan;
@ -158,6 +159,7 @@ test "base tests" {
std.testing.refAllDecls(@import("Scratch.zig"));
std.testing.refAllDecls(@import("SExprTree.zig"));
std.testing.refAllDecls(@import("SmallStringInterner.zig"));
std.testing.refAllDecls(@import("stack_overflow.zig"));
std.testing.refAllDecls(@import("StringLiteral.zig"));
std.testing.refAllDecls(@import("target.zig"));
}

View file

@ -106,9 +106,8 @@ test "safeCast and safeRead" {
var buffer = [_]u8{ 0x12, 0x34, 0x56, 0x78 };
const ptr = @as(*anyopaque, @ptrCast(&buffer));
const value = try safeRead(u16, ptr, 0, 4);
// Endianness dependent, but should not crash
_ = value;
// Just verify this doesn't error - actual value is endianness dependent
_ = try safeRead(u16, ptr, 0, 4);
try std.testing.expectError(error.BufferOverflow, safeRead(u32, ptr, 1, 4));
}

450
src/base/stack_overflow.zig Normal file
View file

@ -0,0 +1,450 @@
//! Stack overflow detection and handling for the Roc compiler.
//!
//! This module provides a mechanism to catch stack overflows and report them
//! with a helpful error message instead of a generic segfault. This is particularly
//! useful during compiler development when recursive algorithms might blow the stack.
//!
//! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate
//! signal stack and install a SIGSEGV handler that detects stack overflows.
//!
//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW.
//!
//! WASI is not currently supported (no signal handling available).
const std = @import("std");
const builtin = @import("builtin");
const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined;
// Windows types and constants
const DWORD = u32;
const LONG = i32;
const ULONG_PTR = usize;
const PVOID = ?*anyopaque;
const HANDLE = ?*anyopaque;
const BOOL = i32;
const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD;
const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005;
const EXCEPTION_CONTINUE_SEARCH: LONG = 0;
const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12));
const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize));
const EXCEPTION_RECORD = extern struct {
ExceptionCode: DWORD,
ExceptionFlags: DWORD,
ExceptionRecord: ?*EXCEPTION_RECORD,
ExceptionAddress: PVOID,
NumberParameters: DWORD,
ExceptionInformation: [15]ULONG_PTR,
};
const CONTEXT = extern struct {
// We don't need the full context, just enough to make the struct valid
data: [1232]u8, // Size varies by arch, this is x64 size
};
const EXCEPTION_POINTERS = extern struct {
ExceptionRecord: *EXCEPTION_RECORD,
ContextRecord: *CONTEXT,
};
const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(.winapi) LONG;
// Windows API imports
extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(.winapi) LPTOP_LEVEL_EXCEPTION_FILTER;
extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE;
extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) BOOL;
extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn;
/// Size of the alternate signal stack (64KB should be plenty for the handler)
const ALT_STACK_SIZE = 64 * 1024;
/// Storage for the alternate signal stack (POSIX only)
var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined;
/// Whether the handler has been installed
var handler_installed = false;
/// Error message to display on stack overflow
const STACK_OVERFLOW_MESSAGE =
\\
\\================================================================================
\\STACK OVERFLOW in the Roc compiler
\\================================================================================
\\
\\The Roc compiler ran out of stack space. This is a bug in the compiler,
\\not in your code.
\\
\\This often happens due to:
\\ - Infinite recursion in type translation or unification
\\ - Very deeply nested expressions without tail-call optimization
\\ - Cyclic data structures without proper cycle detection
\\
\\Please report this issue at: https://github.com/roc-lang/roc/issues
\\
\\Include the Roc code that triggered this error if possible.
\\
\\================================================================================
\\
\\
;
/// Install the stack overflow handler.
/// This should be called early in main() before any significant work is done.
/// Returns true if the handler was installed successfully, false otherwise.
pub fn install() bool {
if (handler_installed) return true;
if (comptime builtin.os.tag == .windows) {
return installWindows();
}
if (comptime builtin.os.tag == .wasi) {
// WASI doesn't support signal handling
return false;
}
return installPosix();
}
fn installPosix() bool {
// Set up the alternate signal stack
var alt_stack = posix.stack_t{
.sp = &alt_stack_storage,
.flags = 0,
.size = ALT_STACK_SIZE,
};
posix.sigaltstack(&alt_stack, null) catch {
return false;
};
// Install the SIGSEGV handler
const action = posix.Sigaction{
.handler = .{ .sigaction = handleSignalPosix },
.mask = posix.sigemptyset(),
.flags = posix.SA.SIGINFO | posix.SA.ONSTACK,
};
posix.sigaction(posix.SIG.SEGV, &action, null);
// Also catch SIGBUS which can occur on some systems for stack overflow
posix.sigaction(posix.SIG.BUS, &action, null);
handler_installed = true;
return true;
}
fn installWindows() bool {
_ = SetUnhandledExceptionFilter(handleExceptionWindows);
handler_installed = true;
return true;
}
/// Windows exception handler function
fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG {
const exception_code = exception_info.ExceptionRecord.ExceptionCode;
// Check if this is a stack overflow or access violation
const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW);
const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION);
if (!is_stack_overflow and !is_access_violation) {
// Let other handlers deal with this exception
return EXCEPTION_CONTINUE_SEARCH;
}
// Write error message to stderr
const stderr_handle = GetStdHandle(STD_ERROR_HANDLE);
if (stderr_handle != INVALID_HANDLE_VALUE and stderr_handle != null) {
var bytes_written: DWORD = 0;
if (is_stack_overflow) {
_ = WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null);
} else {
const msg = "\nAccess violation in the Roc compiler.\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n";
_ = WriteFile(stderr_handle, msg.ptr, msg.len, &bytes_written, null);
}
}
// Exit with appropriate code
const exit_code: c_uint = if (is_stack_overflow) 134 else 139;
ExitProcess(exit_code);
}
/// The POSIX signal handler function
fn handleSignalPosix(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void {
// Get the fault address - access differs by platform
const fault_addr: usize = getFaultAddress(info);
// Get the current stack pointer to help determine if this is a stack overflow
var current_sp: usize = 0;
asm volatile (""
: [sp] "={sp}" (current_sp),
);
// A stack overflow typically occurs when the fault address is near the stack pointer
// or below the stack (stacks grow downward on most architectures)
const likely_stack_overflow = isLikelyStackOverflow(fault_addr, current_sp);
// Write our error message to stderr (use STDERR_FILENO directly for signal safety)
const stderr_fd = posix.STDERR_FILENO;
if (likely_stack_overflow) {
_ = posix.write(stderr_fd, STACK_OVERFLOW_MESSAGE) catch {};
} else {
// Generic segfault - provide some context
const generic_msg = switch (sig) {
posix.SIG.SEGV => "\nSegmentation fault (SIGSEGV) in the Roc compiler.\nFault address: ",
posix.SIG.BUS => "\nBus error (SIGBUS) in the Roc compiler.\nFault address: ",
else => "\nFatal signal in the Roc compiler.\nFault address: ",
};
_ = posix.write(stderr_fd, generic_msg) catch {};
// Write the fault address as hex
var addr_buf: [18]u8 = undefined;
const addr_str = formatHex(fault_addr, &addr_buf);
_ = posix.write(stderr_fd, addr_str) catch {};
_ = posix.write(stderr_fd, "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n") catch {};
}
// Exit with a distinct error code for stack overflow
if (likely_stack_overflow) {
posix.exit(134); // 128 + 6 (SIGABRT-like)
} else {
posix.exit(139); // 128 + 11 (SIGSEGV)
}
}
/// Get the fault address from siginfo_t (platform-specific)
fn getFaultAddress(info: *const posix.siginfo_t) usize {
// The siginfo_t structure varies by platform
if (comptime builtin.os.tag == .linux) {
// Linux: fault address is in fields.sigfault.addr
return @intFromPtr(info.fields.sigfault.addr);
} else if (comptime builtin.os.tag == .macos or
builtin.os.tag == .ios or
builtin.os.tag == .tvos or
builtin.os.tag == .watchos or
builtin.os.tag == .visionos or
builtin.os.tag == .freebsd or
builtin.os.tag == .dragonfly or
builtin.os.tag == .netbsd or
builtin.os.tag == .openbsd)
{
// macOS/iOS/BSD: fault address is in addr field
return @intFromPtr(info.addr);
} else {
// Fallback: return 0 if we can't determine the address
return 0;
}
}
/// Heuristic to determine if a fault is likely a stack overflow
fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool {
// If fault address is 0 or very low, it's likely a null pointer dereference
if (fault_addr < 4096) return false;
// Stack overflows typically fault near the stack guard page
// The fault address will be close to (but below) the current stack pointer
// We use a generous range since the stack pointer in the signal handler
// is on the alternate stack
// On most systems, the main stack is in high memory and grows down
// A stack overflow fault will be at an address lower than the normal stack
// Check if fault address is within a reasonable range of where stack would be
// This is a heuristic - we check if the fault is in the lower part of address space
// where guard pages typically are
const max_addr = std.math.maxInt(usize);
const high_memory_threshold = max_addr - (16 * 1024 * 1024 * 1024); // 16GB from top
// If the fault is in the high memory region (where stacks live) but at a page boundary
// it's likely a stack guard page hit
if (fault_addr > high_memory_threshold) {
// Check if it's at a page boundary (guard pages are typically page-aligned)
const page_size = std.heap.page_size_min;
const page_aligned = (fault_addr & (page_size - 1)) == 0 or (fault_addr & (page_size - 1)) < 64;
if (page_aligned) return true;
}
// Also check if the fault address is suspiciously close to the current SP
// This catches cases where we're still on the main stack when the overflow happens
const sp_distance = if (fault_addr < current_sp) current_sp - fault_addr else fault_addr - current_sp;
if (sp_distance < 1024 * 1024) { // Within 1MB of stack pointer
return true;
}
return false;
}
/// Format a usize as hexadecimal
fn formatHex(value: usize, buf: []u8) []const u8 {
const hex_chars = "0123456789abcdef";
var i: usize = buf.len;
if (value == 0) {
i -= 1;
buf[i] = '0';
} else {
var v = value;
while (v > 0 and i > 2) {
i -= 1;
buf[i] = hex_chars[v & 0xf];
v >>= 4;
}
}
// Add 0x prefix
i -= 1;
buf[i] = 'x';
i -= 1;
buf[i] = '0';
return buf[i..];
}
/// Test function that intentionally causes a stack overflow.
/// This is used to verify the handler works correctly.
pub fn triggerStackOverflowForTest() noreturn {
// Use a recursive function that can't be tail-call optimized
const S = struct {
fn recurse(n: usize) usize {
// Prevent tail-call optimization by doing work after the recursive call
var buf: [1024]u8 = undefined;
buf[0] = @truncate(n);
const result = if (n == 0) 0 else recurse(n + 1);
// Use the buffer to prevent it from being optimized away
return result + buf[0];
}
};
// This will recurse until stack overflow
const result = S.recurse(1);
// This should never be reached
std.debug.print("Unexpected result: {}\n", .{result});
std.process.exit(1);
}
test "formatHex" {
var buf: [18]u8 = undefined;
const zero = formatHex(0, &buf);
try std.testing.expectEqualStrings("0x0", zero);
const small = formatHex(0xff, &buf);
try std.testing.expectEqualStrings("0xff", small);
const medium = formatHex(0xdeadbeef, &buf);
try std.testing.expectEqualStrings("0xdeadbeef", medium);
}
/// Check if we're being run as a subprocess to trigger stack overflow.
/// This is called by tests to create a child process that will crash.
/// Returns true if we should trigger the overflow (and not return).
pub fn checkAndTriggerIfSubprocess() bool {
// Check for the special environment variable that signals we should crash
const env_val = std.process.getEnvVarOwned(std.heap.page_allocator, "ROC_TEST_TRIGGER_STACK_OVERFLOW") catch return false;
defer std.heap.page_allocator.free(env_val);
if (std.mem.eql(u8, env_val, "1")) {
// Install handler and trigger overflow
_ = install();
triggerStackOverflowForTest();
// Never returns
}
return false;
}
test "stack overflow handler produces helpful error message" {
// Skip on WASI - no process spawning or signal handling
if (comptime builtin.os.tag == .wasi) {
return error.SkipZigTest;
}
if (comptime builtin.os.tag == .windows) {
// Windows test would need subprocess spawning which is more complex
// The handler is installed and works, but testing it is harder
// For now, just verify the handler installs successfully
if (install()) {
return; // Success - handler installed
}
return error.SkipZigTest;
}
try testStackOverflowPosix();
}
fn testStackOverflowPosix() !void {
// Create a pipe to capture stderr from the child
const pipe_fds = try posix.pipe();
const pipe_read = pipe_fds[0];
const pipe_write = pipe_fds[1];
const fork_result = posix.fork() catch {
posix.close(pipe_read);
posix.close(pipe_write);
return error.ForkFailed;
};
if (fork_result == 0) {
// Child process
posix.close(pipe_read);
// Redirect stderr to the pipe
posix.dup2(pipe_write, posix.STDERR_FILENO) catch posix.exit(99);
posix.close(pipe_write);
// Install the handler and trigger stack overflow
_ = install();
triggerStackOverflowForTest();
// Should never reach here
unreachable;
} else {
// Parent process
posix.close(pipe_write);
// Wait for child to exit
const wait_result = posix.waitpid(fork_result, 0);
const status = wait_result.status;
// Parse the wait status (Unix encoding)
const exited_normally = (status & 0x7f) == 0;
const exit_code: u8 = @truncate((status >> 8) & 0xff);
const termination_signal: u8 = @truncate(status & 0x7f);
// Read stderr output from child
var stderr_buf: [4096]u8 = undefined;
const bytes_read = posix.read(pipe_read, &stderr_buf) catch 0;
posix.close(pipe_read);
const stderr_output = stderr_buf[0..bytes_read];
try verifyHandlerOutput(exited_normally, exit_code, termination_signal, stderr_output);
}
}
fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: u8, stderr_output: []const u8) !void {
// Exit code 134 = stack overflow detected
// Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow)
if (exited_normally and (exit_code == 134 or exit_code == 139)) {
// Check that our handler message was printed
const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null;
const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null;
const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null;
// Handler should have printed EITHER stack overflow message OR segfault message
try std.testing.expect(has_stack_overflow_msg or has_segfault_msg);
try std.testing.expect(has_roc_compiler_msg);
} else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) {
// The handler might not have caught it - this can happen on some systems
// where the signal delivery is different. Just warn and skip.
std.debug.print("Warning: Stack overflow was not caught by handler (signal {})\n", .{termination_signal});
return error.SkipZigTest;
} else {
std.debug.print("Unexpected exit status: exited={}, code={}, signal={}\n", .{ exited_normally, exit_code, termination_signal });
std.debug.print("Stderr: {s}\n", .{stderr_output});
return error.TestUnexpectedResult;
}
}

View file

@ -13,5 +13,5 @@ pub const encode = base58.encode;
pub const decode = base58.decode;
test {
_ = base58;
@import("std").testing.refAllDecls(@This());
}

View file

@ -1617,7 +1617,7 @@ fn compileModule(
}
// 4. Canonicalize
try module_env.initCIRFields(gpa, module_name);
try module_env.initCIRFields(module_name);
var can_result = try gpa.create(Can);
defer {

View file

@ -4,14 +4,9 @@ const std = @import("std");
/// Generate assembly stub with essential libc symbols
pub fn generateComprehensiveStub(
allocator: std.mem.Allocator,
writer: anytype,
target_arch: std.Target.Cpu.Arch,
target_abi: std.Target.Abi,
) !void {
_ = allocator;
_ = target_abi;
const ptr_width: u32 = switch (target_arch) {
.x86_64, .aarch64 => 8,
else => 4,

View file

@ -119,6 +119,19 @@ Builtin :: [].{
},
)
count_if : List(a), (a -> Bool) -> U64
count_if = |list, predicate|
List.fold(
list,
0,
|acc, elem|
if predicate(elem) {
acc + 1
} else {
acc
},
)
fold : List(item), state, (state, item -> state) -> state
fold = |list, init, step| {
var $state = init
@ -331,6 +344,18 @@ Builtin :: [].{
from_numeral : Numeral -> Try(U8, [InvalidNumeral(Str), ..others])
from_str : Str -> Try(U8, [BadNumStr, ..others])
# # List of integers beginning with this `U8` and ending with the other `U8`.
# # (Use [until] instead to end with the other `U8` minus one.)
# # Returns an empty list if this `U8` is greater than the other.
to : U8, U8 -> List(U8)
to = |start, end| range_to(start, end)
# # List of integers beginning with this `U8` and ending with the other `U8` minus one.
# # (Use [to] instead to end with the other `U8` exactly, instead of minus one.)
# # Returns an empty list if this `U8` is greater than or equal to the other.
until : U8, U8 -> List(U8)
until = |start, end| range_until(start, end)
# Conversions to signed integers (I8 is lossy, others are safe)
to_i8_wrap : U8 -> I8
to_i8_try : U8 -> Try(I8, [OutOfRange, ..others])
@ -977,8 +1002,29 @@ Builtin :: [].{
}
}
# Private top-level function for unsafe list access
# This is a low-level operation that gets replaced by the compiler
range_to = |var $current, end| {
var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist.
while $current <= end {
$answer = $answer.append($current)
$current = $current + 1
}
$answer
}
range_until = |var $current, end| {
var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist.
while $current < end {
$answer = $answer.append($current)
$current = $current + 1
}
$answer
}
# Implemented by the compiler, does not perform bounds checks
list_get_unsafe : List(item), U64 -> item
# Unsafe conversion functions - these return simple records instead of Try types

View file

@ -47,29 +47,15 @@ const ___tracy_c_zone_context = extern struct {
/// The tracy context object for tracking zones.
/// Make sure to defer calling end.
pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
pub inline fn end(self: @This()) void {
_ = self;
}
pub inline fn end(_: @This()) void {}
pub inline fn addText(self: @This(), text: []const u8) void {
_ = self;
_ = text;
}
pub inline fn addText(_: @This(), _: []const u8) void {}
pub inline fn setName(self: @This(), name: []const u8) void {
_ = self;
_ = name;
}
pub inline fn setName(_: @This(), _: []const u8) void {}
pub inline fn setColor(self: @This(), color: u32) void {
_ = self;
_ = color;
}
pub inline fn setColor(_: @This(), _: u32) void {}
pub inline fn setValue(self: @This(), value: u64) void {
_ = self;
_ = value;
}
pub inline fn setValue(_: @This(), _: u64) void {}
};
/// Creates a source location based tracing zone.

View file

@ -97,9 +97,6 @@ fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.c) void {
allocator.free(slice);
}
fn testing_roc_panic(c_ptr: *anyopaque, tag_id: u32) callconv(.c) void {
_ = c_ptr;
_ = tag_id;
fn testing_roc_panic(_: *anyopaque, _: u32) callconv(.c) void {
@panic("Roc panicked");
}

View file

@ -635,16 +635,6 @@ pub fn strSplitOn(
return list;
}
fn initFromSmallStr(
slice_bytes: [*]u8,
len: usize,
_: usize,
// TODO we probable don't need this here
roc_ops: *RocOps,
) RocStr {
return RocStr.init(slice_bytes, len, roc_ops);
}
/// TODO
pub fn strSplitOnHelp(
array: [*]RocStr,

View file

@ -3,6 +3,7 @@
//! This module provides essential infrastructure for builtin operations,
//! including memory allocation interfaces, overflow detection utilities,
//! debug functions, and common types used throughout the builtin modules.
//!
//! It serves as the foundation layer that other builtin modules depend on
//! for low-level operations and host interface functions.
const std = @import("std");
@ -163,18 +164,11 @@ pub const TestEnv = struct {
}
}
fn rocDbgFn(roc_dbg: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = env;
_ = roc_dbg;
}
fn rocDbgFn(_: *const RocDbg, _: *anyopaque) callconv(.c) void {}
fn rocExpectFailedFn(roc_expect: *const RocExpectFailed, env: *anyopaque) callconv(.c) void {
_ = env;
_ = roc_expect;
}
fn rocExpectFailedFn(_: *const RocExpectFailed, _: *anyopaque) callconv(.c) void {}
fn rocCrashedFn(roc_crashed: *const RocCrashed, env: *anyopaque) callconv(.c) noreturn {
_ = env;
fn rocCrashedFn(roc_crashed: *const RocCrashed, _: *anyopaque) callconv(.c) noreturn {
const message = roc_crashed.utf8_bytes[0..roc_crashed.len];
@panic(message);
}
@ -763,10 +757,9 @@ test "TestEnv basic functionality" {
// Should start with no allocations
try std.testing.expectEqual(@as(usize, 0), test_env.getAllocationCount());
// Get ops should work
// Get ops should work - verify we can get ops and it points back to our test env
const ops = test_env.getOps();
// Function pointers are non-null by design, just verify we can get ops
_ = ops;
try std.testing.expectEqual(@as(*anyopaque, @ptrCast(&test_env)), ops.env);
}
test "TestEnv allocation tracking" {

View file

@ -40,5 +40,4 @@ pub const freeForZstd = bundle.freeForZstd;
test {
_ = @import("test_bundle.zig");
_ = @import("test_streaming.zig");
_ = bundle;
}

View file

@ -271,7 +271,7 @@ pub const WhereClause = union(enum) {
const attrs = tree.beginNode();
try tree.endNode(begin, attrs);
},
.w_malformed => |malformed| {
.w_malformed => {
const begin = tree.beginNode();
try tree.pushStaticAtom("malformed");
@ -280,7 +280,6 @@ pub const WhereClause = union(enum) {
const region = cir.store.getRegionAt(node_idx);
try cir.appendRegionInfoToSExprTreeFromRegion(tree, region);
_ = malformed;
const attrs = tree.beginNode();
try tree.endNode(begin, attrs);
},
@ -720,7 +719,10 @@ pub fn fromF64(f: f64) ?RocDec {
/// Represents an import statement in a module
pub const Import = struct {
pub const Idx = enum(u32) { _ };
pub const Idx = enum(u32) {
first = 0,
_,
};
/// Sentinel value indicating unresolved import (max u32)
pub const UNRESOLVED_MODULE: u32 = std.math.maxInt(u32);

View file

@ -51,8 +51,10 @@ in_statement_position: bool = true,
scopes: std.ArrayList(Scope) = .{},
/// Special scope for rigid type variables in annotations
type_vars_scope: base.Scratch(TypeVarScope),
/// Special scope for tracking exposed items from module header
exposed_scope: Scope = undefined,
/// Set of identifiers exposed from this module header (values not used)
exposed_idents: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{},
/// Set of types exposed from this module header (values not used)
exposed_types: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{},
/// Track exposed identifiers by text to handle changing indices
exposed_ident_texts: std.StringHashMapUnmanaged(Region) = .{},
/// Track exposed types by text to handle changing indices
@ -180,7 +182,8 @@ pub fn deinit(
const gpa = self.env.gpa;
self.type_vars_scope.deinit();
self.exposed_scope.deinit(gpa);
self.exposed_idents.deinit(gpa);
self.exposed_types.deinit(gpa);
self.exposed_ident_texts.deinit(gpa);
self.exposed_type_texts.deinit(gpa);
self.placeholder_idents.deinit(gpa);
@ -234,7 +237,6 @@ pub fn init(
.scratch_record_fields = try base.Scratch(types.RecordField).init(gpa),
.scratch_seen_record_fields = try base.Scratch(SeenRecordField).init(gpa),
.type_vars_scope = try base.Scratch(TypeVarScope).init(gpa),
.exposed_scope = Scope.init(false),
.scratch_tags = try base.Scratch(types.Tag).init(gpa),
.scratch_free_vars = try base.Scratch(Pattern.Idx).init(gpa),
.scratch_captures = try base.Scratch(Pattern.Idx).init(gpa),
@ -458,8 +460,8 @@ fn processTypeDeclFirstPass(
// Type was already introduced - check if it's a placeholder (anno = 0) or a real declaration
const existing_stmt = self.env.store.getStatement(existing_stmt_idx);
const is_placeholder = switch (existing_stmt) {
.s_alias_decl => |alias| @intFromEnum(alias.anno) == 0,
.s_nominal_decl => |nominal| @intFromEnum(nominal.anno) == 0,
.s_alias_decl => |alias| alias.anno == .placeholder,
.s_nominal_decl => |nominal| nominal.anno == .placeholder,
else => false,
};
@ -483,13 +485,13 @@ fn processTypeDeclFirstPass(
.alias => Statement{
.s_alias_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced below
.anno = .placeholder, // placeholder, will be overwritten
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced below
.anno = .placeholder, // placeholder, will be overwritten
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -503,13 +505,13 @@ fn processTypeDeclFirstPass(
.alias => Statement{
.s_alias_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced
.anno = .placeholder, // placeholder, will be overwritten
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = final_header_idx,
.anno = @enumFromInt(0), // placeholder - will be replaced
.anno = .placeholder, // placeholder, will be overwritten
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -636,13 +638,13 @@ fn introduceTypeNameOnly(
.alias => Statement{
.s_alias_decl = .{
.header = header_idx,
.anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7
.anno = .placeholder, // placeholder, overwritten in Phase 1.7
},
},
.nominal, .@"opaque" => Statement{
.s_nominal_decl = .{
.header = header_idx,
.anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7
.anno = .placeholder, // placeholder, overwritten in Phase 1.7
.is_opaque = type_decl.kind == .@"opaque",
},
},
@ -1210,7 +1212,6 @@ fn processAssociatedItemsSecondPass(
const parent_text = self.env.getIdent(parent_name);
const name_text = self.env.getIdent(name_ident);
const qualified_idx = try self.env.insertQualifiedIdent(parent_text, name_text);
// Create anno-only def with the qualified name
const def_idx = try self.createAnnoOnlyDef(qualified_idx, type_anno_idx, where_clauses, region);
@ -1322,10 +1323,8 @@ fn processAssociatedItemsSecondPass(
fn registerUserFacingName(
self: *Self,
fully_qualified_idx: Ident.Idx,
item_name_idx: Ident.Idx,
pattern_idx: CIR.Pattern.Idx,
) std.mem.Allocator.Error!void {
_ = item_name_idx;
// Get the fully qualified text and strip the module prefix
const fully_qualified_text = self.env.getIdent(fully_qualified_idx);
@ -1656,7 +1655,7 @@ fn processAssociatedItemsFirstPass(
// - Module scope gets "Foo.Bar.baz" (user-facing fully qualified)
// - Foo's scope gets "Bar.baz" (partially qualified)
// - Bar's scope gets "baz" (unqualified)
try self.registerUserFacingName(qualified_idx, decl_ident, placeholder_pattern_idx);
try self.registerUserFacingName(qualified_idx, placeholder_pattern_idx);
}
}
},
@ -1684,7 +1683,7 @@ fn processAssociatedItemsFirstPass(
try current_scope.idents.put(self.env.gpa, qualified_idx, placeholder_pattern_idx);
// Register progressively qualified names at each scope level per the plan
try self.registerUserFacingName(qualified_idx, anno_ident, placeholder_pattern_idx);
try self.registerUserFacingName(qualified_idx, placeholder_pattern_idx);
}
},
else => {
@ -1749,7 +1748,7 @@ pub fn canonicalizeFile(
// canonicalize_header_packages();
// First, process the header to create exposed_scope and set module_kind
// First, process the header to populate exposed_idents/exposed_types and set module_kind
const header = self.parse_ir.store.getHeader(file.header);
switch (header) {
.module => |h| {
@ -2280,9 +2279,8 @@ pub fn canonicalizeFile(
}
}
},
.malformed => |malformed| {
.malformed => {
// We won't touch this since it's already a parse error.
_ = malformed;
},
}
}
@ -2555,11 +2553,9 @@ fn createExposedScope(
self: *Self,
exposes: AST.Collection.Idx,
) std.mem.Allocator.Error!void {
const gpa = self.env.gpa;
// Reset exposed_scope (already initialized in init)
self.exposed_scope.deinit(gpa);
self.exposed_scope = Scope.init(false);
// Clear exposed sets (they're already initialized with default values)
self.exposed_idents.clearRetainingCapacity();
self.exposed_types.clearRetainingCapacity();
try self.addToExposedScope(exposes);
}
@ -2598,9 +2594,8 @@ fn addToExposedScope(
// Add to exposed_items for permanent storage (unconditionally)
try self.env.addExposedById(ident_idx);
// Use a dummy pattern index - we just need to track that it's exposed
const dummy_idx = @as(Pattern.Idx, @enumFromInt(0));
try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx);
// Just track that this identifier is exposed
try self.exposed_idents.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2631,9 +2626,8 @@ fn addToExposedScope(
// Don't add types to exposed_items - types are not values
// Only add to type_bindings for type resolution
// Use a dummy statement index - we just need to track that it's exposed
const dummy_idx = @as(Statement.Idx, @enumFromInt(0));
try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx });
// Just track that this type is exposed
try self.exposed_types.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2664,9 +2658,8 @@ fn addToExposedScope(
// Don't add types to exposed_items - types are not values
// Only add to type_bindings for type resolution
// Use a dummy statement index - we just need to track that it's exposed
const dummy_idx = @as(Statement.Idx, @enumFromInt(0));
try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx });
// Just track that this type is exposed
try self.exposed_types.put(gpa, ident_idx, {});
}
// Store by text in a temporary hash map, since indices may change
@ -2687,9 +2680,8 @@ fn addToExposedScope(
try self.exposed_type_texts.put(gpa, type_text, region);
}
},
.malformed => |malformed| {
.malformed => {
// Malformed exposed items are already captured as diagnostics during parsing
_ = malformed;
},
}
}
@ -2715,9 +2707,8 @@ fn addPlatformProvidesItems(
// Add to exposed_items for permanent storage
try self.env.addExposedById(ident_idx);
// Add to exposed_scope so it becomes an export
const dummy_idx = @as(Pattern.Idx, @enumFromInt(0));
try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx);
// Track that this identifier is exposed (for exports)
try self.exposed_idents.put(gpa, ident_idx, {});
// Also track in exposed_ident_texts
const token_region = self.parse_ir.tokens.resolve(@intCast(field.name));
@ -2819,7 +2810,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void {
const defs_slice = self.env.store.sliceDefs(self.env.all_defs);
// Check each definition to see if it corresponds to an exposed item.
// We check exposed_scope.idents which only contains items from the exposing clause,
// We check exposed_idents which only contains items from the exposing clause,
// not associated items like "Color.as_str" which are registered separately.
for (defs_slice) |def_idx| {
const def = self.env.store.getDef(def_idx);
@ -2827,7 +2818,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void {
if (pattern == .assign) {
// Check if this identifier was explicitly exposed in the module header
if (self.exposed_scope.idents.contains(pattern.assign.ident)) {
if (self.exposed_idents.contains(pattern.assign.ident)) {
try self.env.store.addScratchDef(def_idx);
}
}
@ -2895,20 +2886,14 @@ fn bringImportIntoScope(
const exposed = self.parse_ir.store.getExposedItem(exposed_idx);
switch (exposed) {
.lower_ident => |ident| {
// TODO handle `as` here using an Alias
if (self.parse_ir.tokens.resolveIdentifier(ident.ident)) |ident_idx| {
_ = ident_idx;
// TODO Introduce our import
// TODO Introduce our import
if (self.parse_ir.tokens.resolveIdentifier(ident.ident)) |_| {
// _ = self.scope.levels.introduce(gpa, &self.env.idents, .ident, .{ .scope_name = ident_idx, .ident = ident_idx });
}
},
.upper_ident => |imported_type| {
_ = imported_type;
// const alias = Alias{
.upper_ident => {
// TODO: const alias = Alias{
// .name = imported_type.name,
// .region = ir.env.tag_names.getRegion(imported_type.name),
// .is_builtin = false,
@ -2921,9 +2906,7 @@ fn bringImportIntoScope(
// .alias = alias_idx,
// });
},
.upper_ident_star => |ident| {
_ = ident;
},
.upper_ident_star => {},
}
}
}
@ -2965,6 +2948,7 @@ fn importAliased(
alias_tok: ?Token.Idx,
exposed_items_span: CIR.ExposedItem.Span,
import_region: Region,
is_package_qualified: bool,
) std.mem.Allocator.Error!?Statement.Idx {
const module_name_text = self.env.getIdent(module_name);
@ -2979,8 +2963,8 @@ fn importAliased(
// 2. Resolve the alias
const alias = try self.resolveModuleAlias(alias_tok, module_name) orelse return null;
// 3. Add to scope: alias -> module_name mapping
try self.scopeIntroduceModuleAlias(alias, module_name, import_region, exposed_items_span);
// 3. Add to scope: alias -> module_name mapping (includes is_package_qualified flag)
try self.scopeIntroduceModuleAlias(alias, module_name, import_region, exposed_items_span, is_package_qualified);
// 4. Process type imports from this module
try self.processTypeImports(module_name, alias);
@ -3011,12 +2995,16 @@ fn importAliased(
// 9. Check that this module actually exists, and if not report an error
// Only check if module_envs is provided - when it's null, we don't know what modules
// exist yet (e.g., during standalone module canonicalization without full project context)
// Skip for package-qualified imports (e.g., "pf.Stdout") - those are cross-package
// imports that are resolved by the workspace resolver
if (self.module_envs) |envs_map| {
if (!envs_map.contains(module_name)) {
try self.env.pushDiagnostic(Diagnostic{ .module_not_found = .{
.module_name = module_name,
.region = import_region,
} });
if (!is_package_qualified) {
try self.env.pushDiagnostic(Diagnostic{ .module_not_found = .{
.module_name = module_name,
.region = import_region,
} });
}
}
}
@ -3101,6 +3089,7 @@ fn importUnaliased(
module_name: Ident.Idx,
exposed_items_span: CIR.ExposedItem.Span,
import_region: Region,
is_package_qualified: bool,
) std.mem.Allocator.Error!Statement.Idx {
const module_name_text = self.env.getIdent(module_name);
@ -3138,12 +3127,16 @@ fn importUnaliased(
// 6. Check that this module actually exists, and if not report an error
// Only check if module_envs is provided - when it's null, we don't know what modules
// exist yet (e.g., during standalone module canonicalization without full project context)
// Skip for package-qualified imports (e.g., "pf.Stdout") - those are cross-package
// imports that are resolved by the workspace resolver
if (self.module_envs) |envs_map| {
if (!envs_map.contains(module_name)) {
try self.env.pushDiagnostic(Diagnostic{ .module_not_found = .{
.module_name = module_name,
.region = import_region,
} });
if (!is_package_qualified) {
try self.env.pushDiagnostic(Diagnostic{ .module_not_found = .{
.module_name = module_name,
.region = import_region,
} });
}
}
}
@ -3224,11 +3217,14 @@ fn canonicalizeImportStatement(
const cir_exposes = try self.env.store.exposedItemSpanFrom(scratch_start);
const import_region = self.parse_ir.tokenizedRegionToRegion(import_stmt.region);
// 3. Dispatch to the appropriate handler based on whether this is a nested import
// 3. Check if this is a package-qualified import (has a qualifier like "pf" in "pf.Stdout")
const is_package_qualified = import_stmt.qualifier_tok != null;
// 4. Dispatch to the appropriate handler based on whether this is a nested import
return if (import_stmt.nested_import)
try self.importUnaliased(module_name, cir_exposes, import_region)
try self.importUnaliased(module_name, cir_exposes, import_region, is_package_qualified)
else
try self.importAliased(module_name, import_stmt.alias_tok, cir_exposes, import_region);
try self.importAliased(module_name, import_stmt.alias_tok, cir_exposes, import_region, is_package_qualified);
}
/// Resolve the module alias name from either explicit alias or module name
@ -3935,17 +3931,21 @@ pub fn canonicalizeExpr(
const qualifier_tok = @as(Token.Idx, @intCast(qualifier_tokens[0]));
if (self.parse_ir.tokens.resolveIdentifier(qualifier_tok)) |module_alias| {
// Check if this is a module alias, or an auto-imported module
const module_name = self.scopeLookupModule(module_alias) orelse blk: {
const module_info: ?Scope.ModuleAliasInfo = self.scopeLookupModule(module_alias) orelse blk: {
// Not in scope, check if it's an auto-imported module
if (self.module_envs) |envs_map| {
if (envs_map.contains(module_alias)) {
// This is an auto-imported module like Bool or Try
// Use the module_alias directly as the module_name
break :blk module_alias;
// Use the module_alias directly as the module_name (not package-qualified)
break :blk Scope.ModuleAliasInfo{
.module_name = module_alias,
.is_package_qualified = false,
};
}
}
break :blk null;
} orelse {
};
const module_name = if (module_info) |info| info.module_name else {
// Not a module alias and not an auto-imported module
// Check if the qualifier is a type - if so, try to lookup associated items
const is_type_in_scope = self.scopeLookupTypeBinding(module_alias) != null;
@ -5171,7 +5171,7 @@ pub fn canonicalizeExpr(
.patterns = ok_branch_pat_span,
.value = ok_lookup_idx,
.guard = null,
.redundant = @enumFromInt(0),
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5245,7 +5245,7 @@ pub fn canonicalizeExpr(
.patterns = err_branch_pat_span,
.value = return_expr_idx,
.guard = null,
.redundant = @enumFromInt(0),
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5259,7 +5259,7 @@ pub fn canonicalizeExpr(
const match_expr = Expr.Match{
.cond = can_cond.idx,
.branches = branches_span,
.exhaustive = @enumFromInt(0), // Will be set during type checking
.exhaustive = try self.env.types.fresh(),
};
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region);
@ -5307,6 +5307,11 @@ pub fn canonicalizeExpr(
.if_then_else => |e| {
const region = self.parse_ir.tokenizedRegionToRegion(e.region);
// Use scratch_captures as intermediate buffer for collecting free vars
// This avoids capturing intermediate data from nested block canonicalization
const captures_top = self.scratch_captures.top();
defer self.scratch_captures.clearFrom(captures_top);
const free_vars_start = self.scratch_free_vars.top();
// Start collecting if-branches
@ -5327,6 +5332,14 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = malformed_idx, .free_vars = DataSpan.empty() };
};
// Collect free variables from the condition into scratch_captures
const cond_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_cond.free_vars);
for (cond_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
const can_then = try self.canonicalizeExpr(current_if.then) orelse {
const ast_then = self.parse_ir.store.getExpr(current_if.then);
const then_region = self.parse_ir.tokenizedRegionToRegion(ast_then.to_tokenized_region());
@ -5336,6 +5349,14 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = malformed_idx, .free_vars = DataSpan.empty() };
};
// Collect free variables from the then-branch into scratch_captures
const then_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_then.free_vars);
for (then_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
// Add this condition/then pair as an if-branch
const if_branch = Expr.IfBranch{
.cond = can_cond.idx,
@ -5357,6 +5378,15 @@ pub fn canonicalizeExpr(
});
return CanonicalizedExpr{ .idx = malformed_idx, .free_vars = DataSpan.empty() };
};
// Collect free variables from the else-branch into scratch_captures
const else_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_else.free_vars);
for (else_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
final_else = can_else.idx;
break;
}
@ -5376,7 +5406,16 @@ pub fn canonicalizeExpr(
},
}, region);
const free_vars_span = self.scratch_free_vars.spanFrom(free_vars_start);
// Clear intermediate data from scratch_free_vars
self.scratch_free_vars.clearFrom(free_vars_start);
// Copy collected free vars from scratch_captures to scratch_free_vars
const if_free_vars_start = self.scratch_free_vars.top();
const captures_slice = self.scratch_captures.sliceFromStart(captures_top);
for (captures_slice) |fv| {
try self.scratch_free_vars.append(fv);
}
const free_vars_span = self.scratch_free_vars.spanFrom(if_free_vars_start);
return CanonicalizedExpr{ .idx = expr_idx, .free_vars = free_vars_span };
},
.if_without_else => |e| {
@ -5395,6 +5434,11 @@ pub fn canonicalizeExpr(
// Desugar to if-then-else with empty record {} as the final else
// Type checking will ensure the then-branch also has type {}
// Use scratch_captures as intermediate buffer for collecting free vars
// This avoids capturing intermediate data from nested block canonicalization
const captures_top = self.scratch_captures.top();
defer self.scratch_captures.clearFrom(captures_top);
const free_vars_start = self.scratch_free_vars.top();
// Canonicalize condition
@ -5407,6 +5451,14 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = malformed_idx, .free_vars = DataSpan.empty() };
};
// Collect free variables from the condition into scratch_captures
const cond_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_cond.free_vars);
for (cond_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
// Canonicalize then branch
const can_then = try self.canonicalizeExpr(e.then) orelse {
const ast_then = self.parse_ir.store.getExpr(e.then);
@ -5417,6 +5469,14 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = malformed_idx, .free_vars = DataSpan.empty() };
};
// Collect free variables from the then-branch into scratch_captures
const then_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_then.free_vars);
for (then_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
// Create an empty record {} as the implicit else
const empty_record_idx = try self.env.addExpr(CIR.Expr{ .e_empty_record = .{} }, region);
@ -5438,7 +5498,16 @@ pub fn canonicalizeExpr(
},
}, region);
const free_vars_span = self.scratch_free_vars.spanFrom(free_vars_start);
// Clear intermediate data from scratch_free_vars
self.scratch_free_vars.clearFrom(free_vars_start);
// Copy collected free vars from scratch_captures to scratch_free_vars
const if_free_vars_start = self.scratch_free_vars.top();
const captures_slice = self.scratch_captures.sliceFromStart(captures_top);
for (captures_slice) |fv| {
try self.scratch_free_vars.append(fv);
}
const free_vars_span = self.scratch_free_vars.spanFrom(if_free_vars_start);
return CanonicalizedExpr{ .idx = expr_idx, .free_vars = free_vars_span };
},
.match => |m| {
@ -5567,7 +5636,7 @@ pub fn canonicalizeExpr(
.patterns = branch_pat_span,
.value = value_idx,
.guard = null,
.redundant = @enumFromInt(0), // TODO
.redundant = try self.env.types.fresh(),
},
region,
);
@ -5587,7 +5656,7 @@ pub fn canonicalizeExpr(
const match_expr = Expr.Match{
.cond = can_cond.idx,
.branches = branches_span,
.exhaustive = @enumFromInt(0), // Will be set during type checking
.exhaustive = try self.env.types.fresh(),
};
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region);
@ -5636,7 +5705,7 @@ pub fn canonicalizeExpr(
},
.for_expr => |for_expr| {
const region = self.parse_ir.tokenizedRegionToRegion(for_expr.region);
const result = try self.canonicalizeForLoop(for_expr.patt, for_expr.expr, for_expr.body, region);
const result = try self.canonicalizeForLoop(for_expr.patt, for_expr.expr, for_expr.body);
const for_expr_idx = try self.env.addExpr(Expr{
.e_for = .{
@ -5648,9 +5717,8 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = for_expr_idx, .free_vars = result.free_vars };
},
.malformed => |malformed| {
.malformed => {
// We won't touch this since it's already a parse error.
_ = malformed;
return null;
},
}
@ -5686,9 +5754,7 @@ fn canonicalizeForLoop(
ast_patt: AST.Pattern.Idx,
ast_list_expr: AST.Expr.Idx,
ast_body: AST.Expr.Idx,
region: base.Region,
) std.mem.Allocator.Error!CanonicalizedForLoop {
_ = region;
// Tmp state to capture free vars from both expr & body
// This is stored as a map to avoid duplicate captures
@ -6039,7 +6105,8 @@ fn canonicalizeTagExpr(self: *Self, e: AST.TagExpr, mb_args: ?AST.Expr.Span, reg
// For Imported.Foo.Bar.X: module=Imported, type=Foo.Bar, tag=X
// qualifiers=[Imported, Foo, Bar], so type name is built from qualifiers[1..]
const module_name = self.scopeLookupModule(first_tok_ident).?; // Already checked above
const module_info = self.scopeLookupModule(first_tok_ident).?; // Already checked above
const module_name = module_info.module_name;
const module_name_text = self.env.getIdent(module_name);
// Check if this is imported in the current scope
@ -6366,6 +6433,28 @@ fn canonicalizePattern(
return malformed_idx;
}
},
.var_ident => |e| {
// Mutable variable binding in a pattern (e.g., `|var $x, y|`)
const region = self.parse_ir.tokenizedRegionToRegion(e.region);
if (self.parse_ir.tokens.resolveIdentifier(e.ident_tok)) |ident_idx| {
// Create a Pattern node for our mutable identifier
const pattern_idx = try self.env.addPattern(Pattern{ .assign = .{
.ident = ident_idx,
} }, region);
// Introduce the var with function boundary tracking (using scopeIntroduceVar)
_ = try self.scopeIntroduceVar(ident_idx, pattern_idx, region, true, Pattern.Idx);
return pattern_idx;
} else {
const feature = try self.env.insertString("report an error when unable to resolve identifier");
const malformed_idx = try self.env.pushMalformed(Pattern.Idx, Diagnostic{ .not_implemented = .{
.feature = feature,
.region = Region.zero(),
} });
return malformed_idx;
}
},
.underscore => |p| {
const region = self.parse_ir.tokenizedRegionToRegion(p.region);
const underscore_pattern = Pattern{
@ -6766,13 +6855,14 @@ fn canonicalizePattern(
const module_alias = try self.env.insertIdent(base.Ident.for_text(module_alias_text));
// Check if this is a module alias
const module_name = self.scopeLookupModule(module_alias) orelse {
const module_info = self.scopeLookupModule(module_alias) orelse {
// Module is not in current scope
return try self.env.pushMalformed(Pattern.Idx, CIR.Diagnostic{ .module_not_imported = .{
.module_name = module_alias,
.region = region,
} });
};
const module_name = module_info.module_name;
const module_name_text = self.env.getIdent(module_name);
// Check if this module is imported in the current scope
@ -7151,9 +7241,8 @@ fn canonicalizePattern(
return pattern_idx;
}
},
.malformed => |malformed| {
.malformed => {
// We won't touch this since it's already a parse error.
_ = malformed;
return null;
},
}
@ -7574,8 +7663,8 @@ fn processCollectedTypeVars(self: *Self) std.mem.Allocator.Error!void {
// Collect problems for this type variable
const is_single_use = !found_another;
// Use a dummy AST annotation index since we don't have the context
try collectTypeVarProblems(first_ident, is_single_use, @enumFromInt(0), &self.scratch_type_var_problems);
// Use undefined AST annotation index since we don't have the context here
try collectTypeVarProblems(first_ident, is_single_use, undefined, &self.scratch_type_var_problems);
}
// Report any problems we found
@ -8002,7 +8091,7 @@ fn canonicalizeTypeAnnoBasicType(
const module_alias = try self.env.insertIdent(base.Ident.for_text(module_alias_text));
// Check if this is a module alias
const module_name = self.scopeLookupModule(module_alias) orelse {
const module_info = self.scopeLookupModule(module_alias) orelse {
// Module is not in current scope - but check if it's a type name first
if (self.scopeLookupTypeBinding(module_alias)) |_| {
// This is in scope as a type/value, but doesn't expose the nested type being requested
@ -8019,6 +8108,7 @@ fn canonicalizeTypeAnnoBasicType(
.region = region,
} });
};
const module_name = module_info.module_name;
const module_name_text = self.env.getIdent(module_name);
// Check if this module is imported in the current scope
@ -9227,7 +9317,7 @@ pub fn canonicalizeBlockStatement(self: *Self, ast_stmt: AST.Statement, ast_stmt
},
.@"for" => |for_stmt| {
const region = self.parse_ir.tokenizedRegionToRegion(for_stmt.region);
const result = try self.canonicalizeForLoop(for_stmt.patt, for_stmt.expr, for_stmt.body, region);
const result = try self.canonicalizeForLoop(for_stmt.patt, for_stmt.expr, for_stmt.body);
const stmt_idx = try self.env.addStatement(Statement{
.s_for = .{
@ -10199,7 +10289,7 @@ fn scopeLookupTypeBindingConst(self: *const Self, ident_idx: Ident.Idx) ?TypeBin
}
/// Look up a module alias in the scope hierarchy
fn scopeLookupModule(self: *const Self, alias_name: Ident.Idx) ?Ident.Idx {
fn scopeLookupModule(self: *const Self, alias_name: Ident.Idx) ?Scope.ModuleAliasInfo {
// Search from innermost to outermost scope
var i = self.scopes.items.len;
while (i > 0) {
@ -10207,7 +10297,7 @@ fn scopeLookupModule(self: *const Self, alias_name: Ident.Idx) ?Ident.Idx {
const scope = &self.scopes.items[i];
switch (scope.lookupModuleAlias(alias_name)) {
.found => |module_name| return module_name,
.found => |module_info| return module_info,
.not_found => continue,
}
}
@ -10216,7 +10306,7 @@ fn scopeLookupModule(self: *const Self, alias_name: Ident.Idx) ?Ident.Idx {
}
/// Introduce a module alias into scope
fn scopeIntroduceModuleAlias(self: *Self, alias_name: Ident.Idx, module_name: Ident.Idx, import_region: Region, exposed_items_span: CIR.ExposedItem.Span) std.mem.Allocator.Error!void {
fn scopeIntroduceModuleAlias(self: *Self, alias_name: Ident.Idx, module_name: Ident.Idx, import_region: Region, exposed_items_span: CIR.ExposedItem.Span, is_package_qualified: bool) std.mem.Allocator.Error!void {
const gpa = self.env.gpa;
const current_scope = &self.scopes.items[self.scopes.items.len - 1];
@ -10257,11 +10347,11 @@ fn scopeIntroduceModuleAlias(self: *Self, alias_name: Ident.Idx, module_name: Id
}
// Simplified introduction without parent lookup for now
const result = try current_scope.introduceModuleAlias(gpa, alias_name, module_name, null);
const result = try current_scope.introduceModuleAlias(gpa, alias_name, module_name, is_package_qualified, null);
switch (result) {
.success => {},
.shadowing_warning => |shadowed_module| {
.shadowing_warning => {
// Create diagnostic for module alias shadowing
try self.env.pushDiagnostic(Diagnostic{
.shadowing_warning = .{
@ -10270,11 +10360,9 @@ fn scopeIntroduceModuleAlias(self: *Self, alias_name: Ident.Idx, module_name: Id
.original_region = Region.zero(),
},
});
_ = shadowed_module; // Suppress unused variable warning
},
.already_in_scope => |existing_module| {
.already_in_scope => {
// Module alias already exists in current scope
// For now, just issue a diagnostic
try self.env.pushDiagnostic(Diagnostic{
.shadowing_warning = .{
.ident = alias_name,
@ -10282,13 +10370,12 @@ fn scopeIntroduceModuleAlias(self: *Self, alias_name: Ident.Idx, module_name: Id
.original_region = Region.zero(),
},
});
_ = existing_module; // Suppress unused variable warning
},
}
}
/// Helper function to look up module aliases in parent scopes only
fn scopeLookupModuleInParentScopes(self: *const Self, alias_name: Ident.Idx) ?Ident.Idx {
fn scopeLookupModuleInParentScopes(self: *const Self, alias_name: Ident.Idx) ?Scope.ModuleAliasInfo {
// Search from second-innermost to outermost scope (excluding current scope)
if (self.scopes.items.len <= 1) return null;
@ -10297,8 +10384,8 @@ fn scopeLookupModuleInParentScopes(self: *const Self, alias_name: Ident.Idx) ?Id
i -= 1;
const scope = &self.scopes.items[i];
switch (scope.lookupModuleAlias(&self.env.idents, alias_name)) {
.found => |module_name| return module_name,
switch (scope.lookupModuleAlias(alias_name)) {
.found => |module_info| return module_info,
.not_found => continue,
}
}
@ -10740,12 +10827,13 @@ fn createAnnotationFromTypeAnno(
/// we create external declarations that will be resolved later when
/// we have access to the other module's IR after it has been type checked.
fn processTypeImports(self: *Self, module_name: Ident.Idx, alias_name: Ident.Idx) std.mem.Allocator.Error!void {
// Set up the module alias for qualified lookups
// Set up the module alias for qualified lookups (type imports are not package-qualified)
const scope = self.currentScope();
_ = try scope.introduceModuleAlias(
self.env.gpa,
alias_name,
module_name,
false, // Type imports are not package-qualified
null, // No parent lookup function for now
);
}
@ -10765,7 +10853,8 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca
const module_alias = self.parse_ir.tokens.resolveIdentifier(left_ident.token) orelse return null;
// Check if this is a module alias
const module_name = self.scopeLookupModule(module_alias) orelse return null;
const module_info = self.scopeLookupModule(module_alias) orelse return null;
const module_name = module_info.module_name;
const module_text = self.env.getIdent(module_name);
// Check if this module is imported in the current scope
@ -10790,14 +10879,154 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca
return null;
};
// This is a module-qualified lookup
// This IS a module-qualified lookup - we must handle it completely here.
// After this point, returning null would cause incorrect fallback to regular field access.
const right_expr = self.parse_ir.store.getExpr(field_access.right);
if (right_expr != .ident) return null;
const region = self.parse_ir.tokenizedRegionToRegion(field_access.region);
// Handle method calls on module-qualified types (e.g., Stdout.line!(...))
if (right_expr == .apply) {
const apply = right_expr.apply;
const method_expr = self.parse_ir.store.getExpr(apply.@"fn");
if (method_expr != .ident) {
// Module-qualified call with non-ident function (e.g., Module.(complex_expr)(...))
// This is malformed - report error
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
}
const method_ident = method_expr.ident;
const method_name = self.parse_ir.tokens.resolveIdentifier(method_ident.token) orelse {
// Couldn't resolve method name token
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
};
// Check if this is a type module (like Stdout) - look up the qualified method name directly
if (self.module_envs) |envs_map| {
if (envs_map.get(module_name)) |auto_imported_type| {
if (auto_imported_type.statement_idx != null) {
// This is an imported type module (like Stdout)
// Look up the qualified method name (e.g., "Stdout.line!") in the module's exposed items
const module_env = auto_imported_type.env;
const module_name_text = module_env.module_name;
const auto_import_idx = try self.getOrCreateAutoImport(module_name_text);
// Build the qualified method name: "TypeName.method_name"
const type_name_text = self.env.getIdent(module_name);
const method_name_text = self.env.getIdent(method_name);
const qualified_method_name = try self.env.insertQualifiedIdent(type_name_text, method_name_text);
const qualified_text = self.env.getIdent(qualified_method_name);
// Look up the qualified method in the module's exposed items
if (module_env.common.findIdent(qualified_text)) |method_ident_idx| {
if (module_env.getExposedNodeIndexById(method_ident_idx)) |method_node_idx| {
// Found the method! Create e_lookup_external + e_call
const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{
.module_idx = auto_import_idx,
.target_node_idx = method_node_idx,
.region = region,
} }, region);
// Canonicalize the arguments
const scratch_top = self.env.store.scratchExprTop();
for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| {
if (try self.canonicalizeExpr(arg_idx)) |canonicalized| {
try self.env.store.addScratchExpr(canonicalized.get_idx());
}
}
const args_span = try self.env.store.exprSpanFrom(scratch_top);
// Create the call expression
const call_expr_idx = try self.env.addExpr(CIR.Expr{
.e_call = .{
.func = func_expr_idx,
.args = args_span,
.called_via = CalledVia.apply,
},
}, region);
return call_expr_idx;
}
}
// Method not found in module - generate error
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .nested_value_not_found = .{
.parent_name = module_name,
.nested_name = method_name,
.region = region,
} });
}
}
}
// Module exists but is not a type module with a statement_idx - it's a regular module
// This means it's something like `SomeModule.someFunc(args)` where someFunc is a regular export
// We need to look up the function and create a call
const field_text = self.env.getIdent(method_name);
const target_node_idx_opt: ?u16 = if (self.module_envs) |envs_map| blk: {
if (envs_map.get(module_name)) |auto_imported_type| {
const module_env = auto_imported_type.env;
if (module_env.common.findIdent(field_text)) |target_ident| {
break :blk module_env.getExposedNodeIndexById(target_ident);
} else {
break :blk null;
}
} else {
break :blk null;
}
} else null;
if (target_node_idx_opt) |target_node_idx| {
// Found the function - create a lookup and call it
const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{
.module_idx = import_idx,
.target_node_idx = target_node_idx,
.region = region,
} }, region);
// Canonicalize the arguments
const scratch_top = self.env.store.scratchExprTop();
for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| {
if (try self.canonicalizeExpr(arg_idx)) |canonicalized| {
try self.env.store.addScratchExpr(canonicalized.get_idx());
}
}
const args_span = try self.env.store.exprSpanFrom(scratch_top);
// Create the call expression
const call_expr_idx = try self.env.addExpr(CIR.Expr{
.e_call = .{
.func = func_expr_idx,
.args = args_span,
.called_via = CalledVia.apply,
},
}, region);
return call_expr_idx;
} else {
// Function not found in module
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{
.ident = method_name,
.region = region,
} });
}
}
// Handle simple field access (not a method call)
if (right_expr != .ident) {
// Module-qualified access with non-ident, non-apply right side - malformed
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
}
const right_ident = right_expr.ident;
const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse return null;
const region = self.parse_ir.tokenizedRegionToRegion(field_access.region);
const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse {
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{
.region = region,
} });
};
// Check if this is a tag access on an auto-imported nominal type (e.g., Bool.True)
if (self.module_envs) |envs_map| {
@ -10854,8 +11083,13 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca
}
} else null;
// If we didn't find a valid node index, return null to fall through to error handling
const target_node_idx = target_node_idx_opt orelse return null;
// If we didn't find a valid node index, report an error (don't fall back)
const target_node_idx = target_node_idx_opt orelse {
return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{
.ident = field_name,
.region = region,
} });
};
// Create the e_lookup_external expression with Import.Idx
const expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{

View file

@ -468,7 +468,6 @@ pub const Diagnostic = union(enum) {
allocator: Allocator,
ident_name: []const u8,
region_info: base.RegionInfo,
original_region_info: base.RegionInfo,
filename: []const u8,
source: []const u8,
line_starts: []const u32,
@ -490,10 +489,6 @@ pub const Diagnostic = union(enum) {
line_starts,
);
// we don't need to display the original region info
// as this header is in a single location
_ = original_region_info;
try report.document.addReflowingText("You can remove the duplicate entry to fix this warning.");
return report;

View file

@ -22,8 +22,6 @@ pub const Idx = enum(u32) { _ };
pub const Span = extern struct { span: DataSpan };
/// Converts this external declaration to an S-expression tree representation for debugging
pub fn pushToSExprTree(self: *const ExternalDecl, cir: anytype, tree: anytype) !void {
_ = self;
_ = cir;
pub fn pushToSExprTree(_: *const ExternalDecl, _: anytype, tree: anytype) !void {
try tree.pushStaticAtom("external-decl-stub");
}

View file

@ -122,10 +122,6 @@ pub fn replaceAnnoOnlyWithHosted(env: *ModuleEnv) !std.ArrayList(CIR.Def.Idx) {
env.store.extra_data.items.items[extra_start + 1] = @intFromEnum(expr_idx);
// Verify the def still has its annotation after modification
const modified_def = env.store.getDef(def_idx);
_ = modified_def;
// Track this modified def index
try modified_def_indices.append(gpa, def_idx);
}

View file

@ -437,8 +437,7 @@ pub fn relocate(self: *Self, offset: isize) void {
}
/// Initialize the compilation fields in an existing ModuleEnv
pub fn initCIRFields(self: *Self, gpa: std.mem.Allocator, module_name: []const u8) !void {
_ = gpa; // unused since we don't create new allocations
pub fn initCIRFields(self: *Self, module_name: []const u8) !void {
self.module_kind = .deprecated_module; // default until canonicalization sets the actual kind
self.all_defs = .{ .span = .{ .start = 0, .len = 0 } };
self.all_statements = .{ .span = .{ .start = 0, .len = 0 } };
@ -454,8 +453,8 @@ pub fn initCIRFields(self: *Self, gpa: std.mem.Allocator, module_name: []const u
}
/// Alias for initCIRFields for backwards compatibility with tests
pub fn initModuleEnvFields(self: *Self, gpa: std.mem.Allocator, module_name: []const u8) !void {
return self.initCIRFields(gpa, module_name);
pub fn initModuleEnvFields(self: *Self, module_name: []const u8) !void {
return self.initCIRFields(module_name);
}
/// Initialize the module environment.
@ -982,7 +981,6 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
.redundant_exposed => |data| blk: {
const ident_name = self.getIdent(data.ident);
const region_info = self.calcRegionInfo(data.region);
const original_region_info = self.calcRegionInfo(data.original_region);
var report = Report.init(allocator, "REDUNDANT EXPOSED", .warning);
const owned_ident = try report.addOwnedString(ident_name);
@ -1001,10 +999,6 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
self.getLineStartsAll(),
);
// we don't need to display the original region info
// as this header is in a single location
_ = original_region_info;
try report.document.addReflowingText("You can remove the duplicate entry to fix this warning.");
break :blk report;
@ -1206,9 +1200,7 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
break :blk report;
},
.lambda_body_not_canonicalized => |data| blk: {
_ = data;
.lambda_body_not_canonicalized => blk: {
var report = Report.init(allocator, "INVALID LAMBDA", .runtime_error);
try report.document.addReflowingText("The body of this lambda expression is not valid.");
@ -1234,9 +1226,7 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
break :blk report;
},
.var_across_function_boundary => |data| blk: {
_ = data;
.var_across_function_boundary => blk: {
var report = Report.init(allocator, "VAR REASSIGNMENT ERROR", .runtime_error);
try report.document.addReflowingText("Cannot reassign a ");
try report.document.addKeyword("var");
@ -1248,9 +1238,7 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
break :blk report;
},
.tuple_elem_not_canonicalized => |data| blk: {
_ = data;
.tuple_elem_not_canonicalized => blk: {
var report = Report.init(allocator, "INVALID TUPLE ELEMENT", .runtime_error);
try report.document.addReflowingText("This tuple element is malformed or contains invalid syntax.");
@ -2238,8 +2226,7 @@ pub fn addMatchBranchPattern(self: *Self, expr: CIR.Expr.Match.BranchPattern, re
/// Add a new pattern record field to the node store.
/// This function asserts that the nodes and regions are in sync.
pub fn addPatternRecordField(self: *Self, expr: CIR.PatternRecordField, region: Region) std.mem.Allocator.Error!CIR.PatternRecordField.Idx {
_ = region;
pub fn addPatternRecordField(self: *Self, expr: CIR.PatternRecordField) std.mem.Allocator.Error!CIR.PatternRecordField.Idx {
const expr_idx = try self.store.addPatternRecordField(expr);
self.debugAssertArraysInSync();
return expr_idx;

View file

@ -674,9 +674,11 @@ pub fn getExpr(store: *const NodeStore, expr: CIR.Expr.Idx) CIR.Expr {
.expr_suffix_single_question,
.expr_record_builder,
=> {
return CIR.Expr{ .e_runtime_error = .{
.diagnostic = @enumFromInt(0),
} };
return CIR.Expr{
.e_runtime_error = .{
.diagnostic = undefined, // deserialized runtime errors don't preserve diagnostics
},
};
},
.expr_ellipsis => {
return CIR.Expr{ .e_ellipsis = .{} };
@ -1126,9 +1128,7 @@ pub fn getPattern(store: *const NodeStore, pattern_idx: CIR.Pattern.Idx) CIR.Pat
}
/// Retrieves a pattern record field from the store.
pub fn getPatternRecordField(store: *NodeStore, patternRecordField: CIR.PatternRecordField.Idx) CIR.PatternRecordField {
_ = store;
_ = patternRecordField;
pub fn getPatternRecordField(_: *NodeStore, _: CIR.PatternRecordField.Idx) CIR.PatternRecordField {
// Return empty placeholder since PatternRecordField has no fields yet
return CIR.PatternRecordField{};
}
@ -1512,7 +1512,7 @@ pub fn addExpr(store: *NodeStore, expr: CIR.Expr, region: base.Region) Allocator
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
switch (expr) {
@ -2140,11 +2140,8 @@ pub fn addPattern(store: *NodeStore, pattern: CIR.Pattern, region: base.Region)
}
/// Adds a pattern record field to the store.
pub fn addPatternRecordField(store: *NodeStore, patternRecordField: CIR.PatternRecordField) Allocator.Error!CIR.PatternRecordField.Idx {
_ = store;
_ = patternRecordField;
return @enumFromInt(0);
pub fn addPatternRecordField(_: *NodeStore, _: CIR.PatternRecordField) Allocator.Error!CIR.PatternRecordField.Idx {
@panic("TODO: addPatternRecordField not implemented");
}
/// Adds a type annotation to the store.
@ -2156,7 +2153,7 @@ pub fn addTypeAnno(store: *NodeStore, typeAnno: CIR.TypeAnno, region: base.Regio
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
switch (typeAnno) {
@ -2861,7 +2858,7 @@ pub fn addDiagnostic(store: *NodeStore, reason: CIR.Diagnostic) Allocator.Error!
.data_1 = 0,
.data_2 = 0,
.data_3 = 0,
.tag = @enumFromInt(0),
.tag = undefined, // set below in switch
};
var region = base.Region.zero();
@ -3694,7 +3691,7 @@ test "NodeStore basic CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, node1);
const node1_idx = try original.nodes.append(gpa, node1);
// Add integer value to extra_data (i128 as 4 u32s)
const value: i128 = 42;
@ -3709,7 +3706,7 @@ test "NodeStore basic CompactWriter roundtrip" {
.start = .{ .offset = 0 },
.end = .{ .offset = 5 },
};
_ = try original.regions.append(gpa, region);
const region1_idx = try original.regions.append(gpa, region);
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
@ -3742,7 +3739,7 @@ test "NodeStore basic CompactWriter roundtrip" {
// Verify nodes
try testing.expectEqual(@as(usize, 1), deserialized.nodes.len());
const retrieved_node = deserialized.nodes.get(@enumFromInt(0));
const retrieved_node = deserialized.nodes.get(node1_idx);
try testing.expectEqual(Node.Tag.expr_int, retrieved_node.tag);
try testing.expectEqual(@as(u32, 0), retrieved_node.data_1);
@ -3755,7 +3752,7 @@ test "NodeStore basic CompactWriter roundtrip" {
// Verify regions
try testing.expectEqual(@as(usize, 1), deserialized.regions.len());
const retrieved_region = deserialized.regions.get(@enumFromInt(0));
const retrieved_region = deserialized.regions.get(region1_idx);
try testing.expectEqual(region.start.offset, retrieved_region.start.offset);
try testing.expectEqual(region.end.offset, retrieved_region.end.offset);
}
@ -3775,7 +3772,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, var_node);
const var_node_idx = try original.nodes.append(gpa, var_node);
// Add expression list node
const list_node = Node{
@ -3784,7 +3781,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 3, // elems len
.data_3 = 0,
};
_ = try original.nodes.append(gpa, list_node);
const list_node_idx = try original.nodes.append(gpa, list_node);
// Add float node with extra data
const float_node = Node{
@ -3793,7 +3790,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
.data_2 = 0,
.data_3 = 0,
};
_ = try original.nodes.append(gpa, float_node);
const float_node_idx = try original.nodes.append(gpa, float_node);
// Add float value to extra_data
const float_value: f64 = 3.14159;
@ -3804,14 +3801,12 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
}
// Add regions for each node
const regions = [_]Region{
.{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } },
.{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } },
.{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } },
};
for (regions) |region| {
_ = try original.regions.append(gpa, region);
}
const region1 = Region{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } };
const region2 = Region{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } };
const region3 = Region{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } };
const region1_idx = try original.regions.append(gpa, region1);
const region2_idx = try original.regions.append(gpa, region2);
const region3_idx = try original.regions.append(gpa, region3);
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
@ -3845,32 +3840,36 @@ test "NodeStore multiple nodes CompactWriter roundtrip" {
// Verify nodes
try testing.expectEqual(@as(usize, 3), deserialized.nodes.len());
// Verify var node
const retrieved_var = deserialized.nodes.get(@enumFromInt(0));
// Verify var node using captured index
const retrieved_var = deserialized.nodes.get(var_node_idx);
try testing.expectEqual(Node.Tag.expr_var, retrieved_var.tag);
try testing.expectEqual(@as(u32, 5), retrieved_var.data_1);
// Verify list node
const retrieved_list = deserialized.nodes.get(@enumFromInt(1));
// Verify list node using captured index
const retrieved_list = deserialized.nodes.get(list_node_idx);
try testing.expectEqual(Node.Tag.expr_list, retrieved_list.tag);
try testing.expectEqual(@as(u32, 10), retrieved_list.data_1);
try testing.expectEqual(@as(u32, 3), retrieved_list.data_2);
// Verify float node and extra data
const retrieved_float = deserialized.nodes.get(@enumFromInt(2));
// Verify float node and extra data using captured index
const retrieved_float = deserialized.nodes.get(float_node_idx);
try testing.expectEqual(Node.Tag.expr_frac_f64, retrieved_float.tag);
const retrieved_float_u32s = deserialized.extra_data.items.items[0..2];
const retrieved_float_u64: u64 = @bitCast(retrieved_float_u32s.*);
const retrieved_float_value: f64 = @bitCast(retrieved_float_u64);
try testing.expectApproxEqAbs(float_value, retrieved_float_value, 0.0001);
// Verify regions
// Verify regions using captured indices
try testing.expectEqual(@as(usize, 3), deserialized.regions.len());
for (regions, 0..) |expected_region, i| {
const retrieved_region = deserialized.regions.get(@enumFromInt(i));
try testing.expectEqual(expected_region.start.offset, retrieved_region.start.offset);
try testing.expectEqual(expected_region.end.offset, retrieved_region.end.offset);
}
const retrieved_region1 = deserialized.regions.get(region1_idx);
try testing.expectEqual(region1.start.offset, retrieved_region1.start.offset);
try testing.expectEqual(region1.end.offset, retrieved_region1.end.offset);
const retrieved_region2 = deserialized.regions.get(region2_idx);
try testing.expectEqual(region2.start.offset, retrieved_region2.start.offset);
try testing.expectEqual(region2.end.offset, retrieved_region2.end.offset);
const retrieved_region3 = deserialized.regions.get(region3_idx);
try testing.expectEqual(region3.start.offset, retrieved_region3.start.offset);
try testing.expectEqual(region3.end.offset, retrieved_region3.end.offset);
// Verify scratch is null (deserialized NodeStores don't allocate scratch)
try testing.expect(deserialized.scratch == null);

View file

@ -50,8 +50,8 @@ forward_references: std.AutoHashMapUnmanaged(Ident.Idx, ForwardReference),
type_bindings: std.AutoHashMapUnmanaged(Ident.Idx, TypeBinding),
/// Maps type variables to their type annotation indices
type_vars: std.AutoHashMapUnmanaged(Ident.Idx, CIR.TypeAnno.Idx),
/// Maps module alias names to their full module names
module_aliases: std.AutoHashMapUnmanaged(Ident.Idx, Ident.Idx),
/// Maps module alias names to their full module info (name + whether package-qualified)
module_aliases: std.AutoHashMapUnmanaged(Ident.Idx, ModuleAliasInfo),
/// Maps exposed item names to their source modules and original names (for import resolution)
exposed_items: std.AutoHashMapUnmanaged(Ident.Idx, ExposedItemInfo),
/// Maps module names to their Import.Idx for modules imported in this scope
@ -69,7 +69,7 @@ pub fn init(is_function_boundary: bool) Scope {
.forward_references = std.AutoHashMapUnmanaged(Ident.Idx, ForwardReference){},
.type_bindings = std.AutoHashMapUnmanaged(Ident.Idx, TypeBinding){},
.type_vars = std.AutoHashMapUnmanaged(Ident.Idx, CIR.TypeAnno.Idx){},
.module_aliases = std.AutoHashMapUnmanaged(Ident.Idx, Ident.Idx){},
.module_aliases = std.AutoHashMapUnmanaged(Ident.Idx, ModuleAliasInfo){},
.exposed_items = std.AutoHashMapUnmanaged(Ident.Idx, ExposedItemInfo){},
.imported_modules = std.StringHashMapUnmanaged(CIR.Import.Idx){},
.is_function_boundary = is_function_boundary,
@ -124,9 +124,15 @@ pub const TypeVarLookupResult = union(enum) {
not_found: void,
};
/// Information about a module alias
pub const ModuleAliasInfo = struct {
module_name: Ident.Idx,
is_package_qualified: bool,
};
/// Result of looking up a module alias
pub const ModuleAliasLookupResult = union(enum) {
found: Ident.Idx,
found: ModuleAliasInfo,
not_found: void,
};
@ -174,8 +180,8 @@ pub const TypeVarIntroduceResult = union(enum) {
/// Result of introducing a module alias
pub const ModuleAliasIntroduceResult = union(enum) {
success: void,
shadowing_warning: Ident.Idx, // The module alias that was shadowed
already_in_scope: Ident.Idx, // The module alias already exists in this scope
shadowing_warning: ModuleAliasInfo, // The module alias that was shadowed
already_in_scope: ModuleAliasInfo, // The module alias already exists in this scope
};
/// Result of introducing an exposed item
@ -204,7 +210,7 @@ pub const ItemKind = enum { ident, alias, type_var, module_alias, exposed_item }
pub fn items(scope: *Scope, comptime item_kind: ItemKind) switch (item_kind) {
.ident, .alias => *std.AutoHashMapUnmanaged(Ident.Idx, CIR.Pattern.Idx),
.type_var => *std.AutoHashMapUnmanaged(Ident.Idx, CIR.TypeAnno.Idx),
.module_alias => *std.AutoHashMapUnmanaged(Ident.Idx, Ident.Idx),
.module_alias => *std.AutoHashMapUnmanaged(Ident.Idx, ModuleAliasInfo),
.exposed_item => *std.AutoHashMapUnmanaged(Ident.Idx, ExposedItemInfo),
} {
return switch (item_kind) {
@ -220,7 +226,7 @@ pub fn items(scope: *Scope, comptime item_kind: ItemKind) switch (item_kind) {
pub fn itemsConst(scope: *const Scope, comptime item_kind: ItemKind) switch (item_kind) {
.ident, .alias => *const std.AutoHashMapUnmanaged(Ident.Idx, CIR.Pattern.Idx),
.type_var => *const std.AutoHashMapUnmanaged(Ident.Idx, CIR.TypeAnno.Idx),
.module_alias => *const std.AutoHashMapUnmanaged(Ident.Idx, Ident.Idx),
.module_alias => *const std.AutoHashMapUnmanaged(Ident.Idx, ModuleAliasInfo),
.exposed_item => *const std.AutoHashMapUnmanaged(Ident.Idx, ExposedItemInfo),
} {
return switch (item_kind) {
@ -236,7 +242,7 @@ pub fn itemsConst(scope: *const Scope, comptime item_kind: ItemKind) switch (ite
pub fn put(scope: *Scope, gpa: std.mem.Allocator, comptime item_kind: ItemKind, name: Ident.Idx, value: switch (item_kind) {
.ident, .alias => CIR.Pattern.Idx,
.type_var => CIR.TypeAnno.Idx,
.module_alias => Ident.Idx,
.module_alias => ModuleAliasInfo,
.exposed_item => ExposedItemInfo,
}) std.mem.Allocator.Error!void {
try scope.items(item_kind).put(gpa, name, value);
@ -357,7 +363,7 @@ pub fn lookupTypeVar(scope: *const Scope, name: Ident.Idx) TypeVarLookupResult {
/// Look up a module alias in this scope
pub fn lookupModuleAlias(scope: *const Scope, name: Ident.Idx) ModuleAliasLookupResult {
// Search by comparing text content, not identifier index
// Search by comparing .idx values (integer index into string interner)
var iter = scope.module_aliases.iterator();
while (iter.next()) |entry| {
if (name.idx == entry.key_ptr.idx) {
@ -373,7 +379,8 @@ pub fn introduceModuleAlias(
gpa: std.mem.Allocator,
alias_name: Ident.Idx,
module_name: Ident.Idx,
parent_lookup_fn: ?fn (Ident.Idx) ?Ident.Idx,
is_package_qualified: bool,
parent_lookup_fn: ?fn (Ident.Idx) ?ModuleAliasInfo,
) std.mem.Allocator.Error!ModuleAliasIntroduceResult {
// Check if already exists in current scope by comparing text content
var iter = scope.module_aliases.iterator();
@ -385,15 +392,20 @@ pub fn introduceModuleAlias(
}
// Check for shadowing in parent scopes
var shadowed_module: ?Ident.Idx = null;
var shadowed_module: ?ModuleAliasInfo = null;
if (parent_lookup_fn) |lookup_fn| {
shadowed_module = lookup_fn(alias_name);
}
try scope.put(gpa, .module_alias, alias_name, module_name);
const module_info = ModuleAliasInfo{
.module_name = module_name,
.is_package_qualified = is_package_qualified,
};
if (shadowed_module) |module| {
return ModuleAliasIntroduceResult{ .shadowing_warning = module };
try scope.put(gpa, .module_alias, alias_name, module_info);
if (shadowed_module) |info| {
return ModuleAliasIntroduceResult{ .shadowing_warning = info };
}
return ModuleAliasIntroduceResult{ .success = {} };

View file

@ -96,7 +96,12 @@ pub const TypeAnno = union(enum) {
diagnostic: CIR.Diagnostic.Idx, // The error that occurred
},
pub const Idx = enum(u32) { _ };
pub const Idx = enum(u32) {
/// Placeholder value indicating the anno hasn't been set yet.
/// Used during forward reference resolution.
placeholder = 0,
_,
};
pub const Span = extern struct { span: DataSpan };
pub fn pushToSExprTree(self: *const @This(), ir: *const ModuleEnv, tree: *SExprTree, type_anno_idx: TypeAnno.Idx) std.mem.Allocator.Error!void {

View file

@ -48,7 +48,7 @@ pub fn init(source: []const u8) !TestEnv {
parse_ast.store.emptyScratch();
try module_env.initCIRFields(gpa, "test");
try module_env.initCIRFields("test");
can.* = try Can.init(module_env, parse_ast, null);

View file

@ -17,27 +17,3 @@ test "e_anno_only expression variant exists" {
else => return error.WrongExprVariant,
}
}
test "e_anno_only can be used in statements" {
// This test verifies that e_anno_only expressions can be
// used as part of s_decl statements, which is how standalone
// type annotations are represented after canonicalization.
const pattern_idx: CIR.Pattern.Idx = @enumFromInt(0);
const expr_idx: CIR.Expr.Idx = @enumFromInt(0);
const anno_idx: CIR.Annotation.Idx = @enumFromInt(0);
const stmt = CIR.Statement{ .s_decl = .{
.pattern = pattern_idx,
.expr = expr_idx,
.anno = anno_idx,
} };
// Verify the statement was created correctly
switch (stmt) {
.s_decl => |decl| {
try testing.expect(decl.anno != null);
},
else => return error.WrongStatementType,
}
}

View file

@ -27,7 +27,7 @@ test "exposed but not implemented - values" {
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
@ -66,7 +66,7 @@ test "exposed but not implemented - types" {
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
@ -105,7 +105,7 @@ test "redundant exposed entries" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -148,7 +148,7 @@ test "shadowing with exposed items" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -181,7 +181,7 @@ test "shadowing non-exposed items" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -221,7 +221,7 @@ test "exposed items correctly tracked across shadowing" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -277,7 +277,7 @@ test "complex case with redundant, shadowing, and not implemented" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -329,7 +329,7 @@ test "exposed_items is populated correctly" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -361,7 +361,7 @@ test "exposed_items persists after canonicalization" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -391,7 +391,7 @@ test "exposed_items never has entries removed" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);
@ -424,7 +424,7 @@ test "exposed_items handles identifiers with different attributes" {
;
var env = try ModuleEnv.init(allocator, source);
defer env.deinit();
try env.initCIRFields(allocator, "Test");
try env.initCIRFields("Test");
var ast = try parse.parse(&env.common, allocator);
defer ast.deinit(allocator);
var czer = try Can.init(&env, &ast, null);

View file

@ -30,9 +30,7 @@ test "fractional literal - basic decimal" {
try testing.expectEqual(dec.value.numerator, 314);
try testing.expectEqual(dec.value.denominator_power_of_ten, 2);
},
.e_dec => |dec| {
_ = dec;
},
.e_dec => {},
else => {
std.debug.print("Unexpected expr type: {}\n", .{expr});
try testing.expect(false); // Should be dec_small or frac_dec
@ -54,9 +52,8 @@ test "fractional literal - scientific notation small" {
// This is expected behavior when the value is too small for i16 representation
try testing.expectEqual(dec.value.numerator, 0);
},
.e_dec => |frac| {
.e_dec => {
// RocDec stores the value in a special format
_ = frac;
},
.e_frac_f64 => |frac| {
try testing.expectApproxEqAbs(frac.value, 1.23e-10, 1e-20);

View file

@ -9,15 +9,22 @@ const Import = CIR.Import;
const StringLiteral = base.StringLiteral;
const CompactWriter = collections.CompactWriter;
fn storeContainsModule(store: *const Import.Store, string_store: *const StringLiteral.Store, module_name: []const u8) bool {
for (store.imports.items.items) |string_idx| {
if (std.mem.eql(u8, string_store.get(string_idx), module_name)) {
return true;
}
}
return false;
}
test "Import.Store deduplicates module names" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a string store for interning module names
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
// Create import store
var store = Import.Store.init();
defer store.deinit(gpa);
@ -25,7 +32,7 @@ test "Import.Store deduplicates module names" {
const idx1 = try store.getOrPut(gpa, &string_store, "test.Module");
const idx2 = try store.getOrPut(gpa, &string_store, "test.Module");
// Should get the same index
// Should get the same index back (deduplication)
try testing.expectEqual(idx1, idx2);
try testing.expectEqual(@as(usize, 1), store.imports.len());
@ -39,21 +46,17 @@ test "Import.Store deduplicates module names" {
try testing.expectEqual(idx1, idx4);
try testing.expectEqual(@as(usize, 2), store.imports.len());
// Verify we can retrieve the module names through the string store
const str_idx1 = store.imports.items.items[@intFromEnum(idx1)];
const str_idx3 = store.imports.items.items[@intFromEnum(idx3)];
try testing.expectEqualStrings("test.Module", string_store.get(str_idx1));
try testing.expectEqualStrings("other.Module", string_store.get(str_idx3));
// Verify both module names are present
try testing.expect(storeContainsModule(&store, &string_store, "test.Module"));
try testing.expect(storeContainsModule(&store, &string_store, "other.Module"));
}
test "Import.Store empty CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create an empty Store
var original = Import.Store.init();
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -66,15 +69,12 @@ test "Import.Store empty CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr = @as(*Import.Store.Serialized, @ptrCast(@alignCast(buffer.ptr)));
const deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
@ -87,27 +87,18 @@ test "Import.Store basic CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a mock module env with string store
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
const MockEnv = struct { strings: *StringLiteral.Store };
const mock_env = MockEnv{ .strings = &string_store };
// Create original store and add some imports
var original = Import.Store.init();
defer original.deinit(gpa);
const idx1 = try original.getOrPut(gpa, mock_env.strings, "json.Json");
const idx2 = try original.getOrPut(gpa, mock_env.strings, "core.List");
const idx3 = try original.getOrPut(gpa, mock_env.strings, "my.Module");
_ = try original.getOrPut(gpa, &string_store, "json.Json");
_ = try original.getOrPut(gpa, &string_store, "core.List");
_ = try original.getOrPut(gpa, &string_store, "my.Module");
// Verify indices
try testing.expectEqual(@as(u32, 0), @intFromEnum(idx1));
try testing.expectEqual(@as(u32, 1), @intFromEnum(idx2));
try testing.expectEqual(@as(u32, 2), @intFromEnum(idx3));
try testing.expectEqual(@as(usize, 3), original.imports.len());
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -120,30 +111,23 @@ test "Import.Store basic CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr));
var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
defer deserialized.map.deinit(gpa);
// Verify the imports are accessible
// Verify the correct number of imports
try testing.expectEqual(@as(usize, 3), deserialized.imports.len());
// Verify the interned string IDs are stored correctly
const str_idx1 = deserialized.imports.items.items[0];
const str_idx2 = deserialized.imports.items.items[1];
const str_idx3 = deserialized.imports.items.items[2];
try testing.expectEqualStrings("json.Json", string_store.get(str_idx1));
try testing.expectEqualStrings("core.List", string_store.get(str_idx2));
try testing.expectEqualStrings("my.Module", string_store.get(str_idx3));
// Verify all expected module names are present by iterating
try testing.expect(storeContainsModule(deserialized, &string_store, "json.Json"));
try testing.expect(storeContainsModule(deserialized, &string_store, "core.List"));
try testing.expect(storeContainsModule(deserialized, &string_store, "my.Module"));
// Verify the map is repopulated correctly
try testing.expectEqual(@as(usize, 3), deserialized.map.count());
@ -153,26 +137,20 @@ test "Import.Store duplicate imports CompactWriter roundtrip" {
const testing = std.testing;
const gpa = testing.allocator;
// Create a mock module env with string store
var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024);
defer string_store.deinit(gpa);
const MockEnv = struct { strings: *StringLiteral.Store };
const mock_env = MockEnv{ .strings = &string_store };
// Create store with duplicate imports
var original = Import.Store.init();
defer original.deinit(gpa);
const idx1 = try original.getOrPut(gpa, mock_env.strings, "test.Module");
const idx2 = try original.getOrPut(gpa, mock_env.strings, "another.Module");
const idx3 = try original.getOrPut(gpa, mock_env.strings, "test.Module"); // duplicate
const idx1 = try original.getOrPut(gpa, &string_store, "test.Module");
_ = try original.getOrPut(gpa, &string_store, "another.Module");
const idx3 = try original.getOrPut(gpa, &string_store, "test.Module"); // duplicate
// Verify deduplication worked
try testing.expectEqual(idx1, idx3);
try testing.expectEqual(@as(usize, 2), original.imports.len());
// Create a temp file
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -185,38 +163,23 @@ test "Import.Store duplicate imports CompactWriter roundtrip" {
const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized);
try serialized.serialize(&original, gpa, &writer);
// Write to file
try writer.writeGather(gpa, file);
// Read back
try file.seekTo(0);
const buffer = try file.readToEndAlloc(gpa, 1024 * 1024);
defer gpa.free(buffer);
// Cast to Serialized and deserialize
const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr));
var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa);
defer deserialized.map.deinit(gpa);
// Verify correct number of imports
// Verify correct number of imports (duplicates deduplicated)
try testing.expectEqual(@as(usize, 2), deserialized.imports.len());
// Get the string IDs and verify the strings
const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)];
const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)];
try testing.expectEqualStrings("test.Module", string_store.get(str_idx1));
try testing.expectEqualStrings("another.Module", string_store.get(str_idx2));
// Verify expected module names are present
try testing.expect(storeContainsModule(deserialized, &string_store, "test.Module"));
try testing.expect(storeContainsModule(deserialized, &string_store, "another.Module"));
// Verify the map was repopulated correctly
try testing.expectEqual(@as(usize, 2), deserialized.map.count());
// Check that the map has correct entries for the string indices that were deserialized
const str_idx_0 = deserialized.imports.items.items[0];
const str_idx_1 = deserialized.imports.items.items[1];
try testing.expect(deserialized.map.contains(str_idx_0));
try testing.expect(deserialized.map.contains(str_idx_1));
try testing.expectEqual(@as(Import.Idx, @enumFromInt(0)), deserialized.map.get(str_idx_0).?);
try testing.expectEqual(@as(Import.Idx, @enumFromInt(1)), deserialized.map.get(str_idx_1).?);
}

View file

@ -36,7 +36,7 @@ fn parseAndCanonicalizeSource(
ast.* = try parse.parse(&parse_env.common, allocator);
// Initialize CIR fields
try parse_env.initCIRFields(allocator, "Test");
try parse_env.initCIRFields("Test");
const can = try allocator.create(Can);
can.* = try Can.init(parse_env, ast, module_envs);
@ -114,7 +114,7 @@ test "import validation - mix of MODULE NOT FOUND, TYPE NOT EXPOSED, VALUE NOT E
var ast = try parse.parse(&parse_env.common, allocator);
defer ast.deinit(allocator);
// Initialize CIR fields
try parse_env.initCIRFields(allocator, "Test");
try parse_env.initCIRFields("Test");
// Now create module_envs using parse_env's ident store
var module_envs = std.AutoHashMap(base.Ident.Idx, Can.AutoImportedType).init(allocator);
@ -199,7 +199,7 @@ test "import validation - no module_envs provided" {
var ast = try parse.parse(&parse_env.common, allocator);
defer ast.deinit(allocator);
// Initialize CIR fields
try parse_env.initCIRFields(allocator, "Test");
try parse_env.initCIRFields("Test");
// Create czer
// with null module_envs
var can = try Can.init(parse_env, &ast, null);
@ -254,7 +254,7 @@ test "import interner - Import.Idx functionality" {
// Check that we have the correct number of unique imports (duplicates are deduplicated)
// Expected: List, Dict, Json, Set (4 unique)
try expectEqual(@as(usize, 4), result.parse_env.imports.imports.len());
// Verify each unique module has an Import.Idx
// Verify each unique module has an Import.Idx by checking the imports list
var found_list = false;
var found_dict = false;
var found_json_decode = false;
@ -276,16 +276,6 @@ test "import interner - Import.Idx functionality" {
try expectEqual(true, found_dict);
try expectEqual(true, found_json_decode);
try expectEqual(true, found_set);
// Test the lookup functionality
// Get the Import.Idx for "List" (should be used twice)
var list_import_idx: ?CIR.Import.Idx = null;
for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| {
if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) {
list_import_idx = @enumFromInt(idx);
break;
}
}
try testing.expect(list_import_idx != null);
}
test "import interner - comprehensive usage example" {
@ -325,22 +315,19 @@ test "import interner - comprehensive usage example" {
// Check that we have the correct number of unique imports
// Expected: List, Dict, Try (3 unique)
try expectEqual(@as(usize, 3), result.parse_env.imports.imports.len());
// Verify each unique module has an Import.Idx
// Verify each unique module was imported
var found_list = false;
var found_dict = false;
var found_result = false;
for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| {
if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) {
for (result.parse_env.imports.imports.items.items) |import_string_idx| {
const module_name = result.parse_env.getString(import_string_idx);
if (std.mem.eql(u8, module_name, "List")) {
found_list = true;
// Note: We can't verify exposed items count here as Import.Store only stores module names
} else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Dict")) {
} else if (std.mem.eql(u8, module_name, "Dict")) {
found_dict = true;
} else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Try")) {
} else if (std.mem.eql(u8, module_name, "Try")) {
found_result = true;
}
// Verify Import.Idx can be created from the index
const import_idx: CIR.Import.Idx = @enumFromInt(idx);
_ = import_idx; // Just verify it compiles
}
// Verify all expected modules were found
try expectEqual(true, found_list);
@ -348,25 +335,6 @@ test "import interner - comprehensive usage example" {
try expectEqual(true, found_result);
}
test "Import.Idx is u32" {
// Verify that Import.Idx is indeed a u32 enum
// Import.Idx is defined as: pub const Idx = enum(u32) { _ };
// So we know it's backed by u32
// Verify we can create Import.Idx values from u32
const test_idx: u32 = 42;
const import_idx = @as(CIR.Import.Idx, @enumFromInt(test_idx));
const back_to_u32 = @intFromEnum(import_idx);
try testing.expectEqual(test_idx, back_to_u32);
// Test that we can create valid Import.Idx values
const idx1: CIR.Import.Idx = @enumFromInt(0);
const idx2: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value
// Verify they are distinct
try testing.expect(idx1 != idx2);
// Verify the size in memory
try testing.expectEqual(@sizeOf(u32), @sizeOf(CIR.Import.Idx));
}
test "module scopes - imports work in module scope" {
var gpa_state = std.heap.GeneralPurposeAllocator(.{ .safety = true }){};
defer std.debug.assert(gpa_state.deinit() == .ok);
@ -436,18 +404,9 @@ test "module-qualified lookups with e_lookup_external" {
allocator.destroy(result.parse_env);
}
_ = try result.can.canonicalizeFile();
// Count e_lookup_external expressions
var external_lookup_count: u32 = 0;
var found_list_map = false;
var found_list_len = false;
var found_dict_insert = false;
var found_dict_empty = false;
// For this test, we're checking that module-qualified lookups work
// In the new CIR, we'd need to traverse the expression tree from the root
// For now, let's verify that the imports were registered correctly
// Verify the module names are correct
const imports_list = result.parse_env.imports.imports;
try testing.expect(imports_list.len() >= 2); // List and Dict
// Verify the module names are correct
var has_list = false;
var has_dict = false;
for (imports_list.items.items) |import_string_idx| {
@ -457,19 +416,6 @@ test "module-qualified lookups with e_lookup_external" {
}
try testing.expect(has_list);
try testing.expect(has_dict);
// TODO: Once we have proper expression traversal, verify the e_lookup_external nodes
// For now, we'll skip counting the actual lookup expressions
external_lookup_count = 4; // Expected count
found_list_map = true;
found_list_len = true;
found_dict_insert = true;
found_dict_empty = true;
// Verify we found all expected external lookups
try expectEqual(@as(u32, 4), external_lookup_count);
try expectEqual(true, found_list_map);
try expectEqual(true, found_list_len);
try expectEqual(true, found_dict_insert);
try expectEqual(true, found_dict_empty);
}
test "exposed_items - tracking CIR node indices for exposed items" {
@ -492,7 +438,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
math_env.deinit();
allocator.destroy(math_env);
}
// Add exposed items and set their node indices
// Add exposed items
const Ident = base.Ident;
const add_idx = try math_env.common.idents.insert(allocator, Ident.for_text("add"));
try math_env.addExposedById(add_idx);
@ -500,11 +446,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
try math_env.addExposedById(multiply_idx);
const pi_idx = try math_env.common.idents.insert(allocator, Ident.for_text("PI"));
try math_env.addExposedById(pi_idx);
// Simulate having CIR node indices for these exposed items
// In real usage, these would be set during canonicalization of MathUtils
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(add_idx), 100);
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(multiply_idx), 200);
try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(pi_idx), 300);
const math_utils_ident = try temp_idents.insert(allocator, Ident.for_text("MathUtils"));
const math_utils_qualified_ident = try math_env.common.insertIdent(math_env.gpa, Ident.for_text("MathUtils"));
try module_envs.put(math_utils_ident, .{ .env = math_env, .qualified_type_ident = math_utils_qualified_ident });
@ -531,12 +473,7 @@ test "exposed_items - tracking CIR node indices for exposed items" {
allocator.destroy(result.parse_env);
}
_ = try result.can.canonicalizeFile();
// Verify that e_lookup_external expressions have the correct target_node_idx values
var found_add_with_idx_100 = false;
var found_multiply_with_idx_200 = false;
var found_pi_with_idx_300 = false;
// In the new CIR, we'd need to traverse the expression tree properly
// For now, let's verify the imports were registered
// Verify the MathUtils import was registered
const imports_list = result.parse_env.imports.imports;
var has_mathutils = false;
for (imports_list.items.items) |import_string_idx| {
@ -547,62 +484,6 @@ test "exposed_items - tracking CIR node indices for exposed items" {
}
}
try testing.expect(has_mathutils);
// TODO: Once we have proper expression traversal, verify the target_node_idx values
// For now, we'll assume they work correctly
found_add_with_idx_100 = true;
found_multiply_with_idx_200 = true;
found_pi_with_idx_300 = true;
// Verify all lookups have the correct target node indices
try expectEqual(true, found_add_with_idx_100);
try expectEqual(true, found_multiply_with_idx_200);
try expectEqual(true, found_pi_with_idx_300);
// Test case where node index is not populated (should get 0)
const empty_env = try allocator.create(ModuleEnv);
empty_env.* = try ModuleEnv.init(allocator, "");
defer {
empty_env.deinit();
allocator.destroy(empty_env);
}
const undefined_idx = try empty_env.common.idents.insert(allocator, Ident.for_text("undefined"));
try empty_env.addExposedById(undefined_idx);
// Don't set node index - should default to 0
const empty_module_ident = try temp_idents.insert(allocator, Ident.for_text("EmptyModule"));
const empty_qualified_ident = try empty_env.common.insertIdent(empty_env.gpa, Ident.for_text("EmptyModule"));
try module_envs.put(empty_module_ident, .{ .env = empty_env, .qualified_type_ident = empty_qualified_ident });
const source2 =
\\module [test]
\\
\\import EmptyModule exposing [undefined]
\\
\\test = undefined
;
var result2 = try parseAndCanonicalizeSource(allocator, source2, &module_envs);
defer {
result2.can.deinit();
allocator.destroy(result2.can);
result2.ast.deinit(allocator);
allocator.destroy(result2.ast);
result2.parse_env.deinit();
allocator.destroy(result2.parse_env);
}
_ = try result2.can.canonicalizeFile();
// Verify that undefined gets target_node_idx = 0 (not found)
var found_undefined_with_idx_0 = false;
// Verify EmptyModule was imported
const imports_list2 = result2.parse_env.imports.imports;
var has_empty_module = false;
for (imports_list2.items.items) |import_string_idx| {
const import_name = result2.parse_env.getString(import_string_idx);
if (std.mem.eql(u8, import_name, "EmptyModule")) {
has_empty_module = true;
break;
}
}
try testing.expect(has_empty_module);
// TODO: Once we have proper expression traversal, verify target_node_idx = 0
// For now, we'll assume it works correctly
found_undefined_with_idx_0 = true;
try expectEqual(true, found_undefined_with_idx_0);
}
test "export count safety - ensures safe u16 casting" {
@ -618,7 +499,7 @@ test "export count safety - ensures safe u16 casting" {
// Test the diagnostic for exactly maxInt(u16) exports
var env1 = try ModuleEnv.init(allocator, "");
defer env1.deinit();
try env1.initCIRFields(allocator, "Test");
try env1.initCIRFields("Test");
const diag_at_limit = CIR.Diagnostic{
.too_many_exports = .{
.count = 65535, // Exactly at the limit
@ -636,7 +517,7 @@ test "export count safety - ensures safe u16 casting" {
// Test the diagnostic for exceeding the limit
var env2 = try ModuleEnv.init(allocator, "");
defer env2.deinit();
try env2.initCIRFields(allocator, "Test");
try env2.initCIRFields("Test");
const diag_over_limit = CIR.Diagnostic{
.too_many_exports = .{
.count = 70000, // Well over the limit

View file

@ -475,7 +475,7 @@ test "hexadecimal integer literals" {
var env = try ModuleEnv.init(gpa, tc.literal);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, env.gpa);
defer ast.deinit(gpa);
@ -534,7 +534,7 @@ test "binary integer literals" {
var env = try ModuleEnv.init(gpa, tc.literal);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, env.gpa);
defer ast.deinit(gpa);
@ -593,7 +593,7 @@ test "octal integer literals" {
var env = try ModuleEnv.init(gpa, tc.literal);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, env.gpa);
defer ast.deinit(gpa);
@ -652,7 +652,7 @@ test "integer literals with uppercase base prefixes" {
var env = try ModuleEnv.init(gpa, tc.literal);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, gpa);
defer ast.deinit(gpa);
@ -685,7 +685,7 @@ test "numeric literal patterns use pattern idx as type var" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
// Create an int literal pattern directly
const int_pattern = CIR.Pattern{
@ -708,7 +708,7 @@ test "numeric literal patterns use pattern idx as type var" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
// Create a dec literal pattern directly
const dec_pattern = CIR.Pattern{
@ -738,7 +738,7 @@ test "pattern numeric literal value edge cases" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
// Test i128 max
const max_pattern = CIR.Pattern{
@ -768,7 +768,7 @@ test "pattern numeric literal value edge cases" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
const small_dec_pattern = CIR.Pattern{
.small_dec_literal = .{
@ -793,7 +793,7 @@ test "pattern numeric literal value edge cases" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
const dec_pattern = CIR.Pattern{
.dec_literal = .{
@ -814,7 +814,7 @@ test "pattern numeric literal value edge cases" {
var env = try ModuleEnv.init(gpa, "");
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
// Test negative zero (RocDec doesn't distinguish between +0 and -0)
const neg_zero_pattern = CIR.Pattern{

View file

@ -21,7 +21,7 @@ test "record literal uses record_unbound" {
var env = try ModuleEnv.init(gpa, source);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, gpa);
defer ast.deinit(gpa);
@ -52,7 +52,7 @@ test "record literal uses record_unbound" {
var env = try ModuleEnv.init(gpa, source2);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, gpa);
defer ast.deinit(gpa);
@ -83,7 +83,7 @@ test "record literal uses record_unbound" {
var env = try ModuleEnv.init(gpa, source3);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, gpa);
defer ast.deinit(gpa);
@ -124,7 +124,7 @@ test "record_unbound basic functionality" {
var env = try ModuleEnv.init(gpa, source);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
var ast = try parse.parseExpr(&env.common, gpa);
defer ast.deinit(gpa);
@ -165,7 +165,7 @@ test "record_unbound with multiple fields" {
var env = try ModuleEnv.init(gpa, source);
defer env.deinit();
try env.initCIRFields(gpa, "test");
try env.initCIRFields("test");
// Create record_unbound with multiple fields
var ast = try parse.parseExpr(&env.common, gpa);

View file

@ -23,7 +23,7 @@ const ScopeTestContext = struct {
// heap allocate ModuleEnv for testing
const module_env = try gpa.create(ModuleEnv);
module_env.* = try ModuleEnv.init(gpa, "");
try module_env.initCIRFields(gpa, "test");
try module_env.initCIRFields("test");
return ScopeTestContext{
.self = try Can.init(module_env, undefined, null),

View file

@ -1497,8 +1497,7 @@ fn generateStaticDispatchConstraintFromWhere(self: *Self, where_idx: CIR.WhereCl
},
});
},
.w_alias => |alias| {
_ = alias;
.w_alias => {
// TODO: Recursively unwrap alias
},
.w_malformed => {
@ -3170,7 +3169,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected)
// Here, we unwrap the function, following aliases, to get
// the actual function we want to check against
var var_ = expected_var;
var guard = types_mod.debug.IterationGuard.init("checkExpr.lambda.unwrapExpectedFunc");
while (true) {
guard.tick();
switch (self.types.resolveVar(var_).desc.content) {
.structure => |flat_type| {
switch (flat_type) {
@ -3365,7 +3366,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected)
// Here, we unwrap the function, following aliases, to get
// the actual function we want to check against
var var_ = func_var;
var guard = types_mod.debug.IterationGuard.init("checkExpr.call.unwrapFuncVar");
while (true) {
guard.tick();
switch (self.types.resolveVar(var_).desc.content) {
.structure => |flat_type| {
switch (flat_type) {
@ -4941,7 +4944,7 @@ fn handleRecursiveConstraint(
const recursion_var = try self.types.freshFromContentWithRank(rec_var_content, env.rank());
// Create RecursionInfo to track the recursion metadata
const recursion_info = types_mod.RecursionInfo{
_ = types_mod.RecursionInfo{
.recursion_var = recursion_var,
.depth = depth,
};
@ -4949,7 +4952,6 @@ fn handleRecursiveConstraint(
// Store the recursion info in the deferred constraint
// Note: This will be enhanced in later implementation to properly
// update the constraint with the recursion info
_ = recursion_info;
}
/// Check static dispatch constraints
@ -4964,28 +4966,6 @@ fn handleRecursiveConstraint(
///
/// Initially, we only have to check constraint for `Test.to_str2`. But when we
/// process that, we then have to check `Test.to_str`.
/// Check a from_numeral constraint - actual validation happens during comptime evaluation
fn checkNumeralConstraint(
self: *Self,
type_var: Var,
constraint: types_mod.StaticDispatchConstraint,
num_lit_info: types_mod.NumeralInfo,
nominal_type: types_mod.NominalType,
env: *Env,
) !void {
// Mark parameters as intentionally unused - validation happens in comptime evaluation
_ = self;
_ = type_var;
_ = constraint;
_ = num_lit_info;
_ = nominal_type;
_ = env;
// All numeric literal validation now happens during comptime evaluation
// in ComptimeEvaluator.validateDeferredNumericLiterals()
// This function exists only to satisfy the constraint checking interface
}
fn checkDeferredStaticDispatchConstraints(self: *Self, env: *Env) std.mem.Allocator.Error!void {
var deferred_constraint_len = env.deferred_static_dispatch_constraints.items.items.len;
var deferred_constraint_index: usize = 0;
@ -5250,16 +5230,9 @@ fn checkDeferredStaticDispatchConstraints(self: *Self, env: *Env) std.mem.Alloca
if (any_arg_failed or ret_result.isProblem()) {
try self.unifyWith(deferred_constraint.var_, .err, env);
try self.unifyWith(resolved_func.ret, .err, env);
} else if (constraint.origin == .from_numeral and constraint.num_literal != null) {
// For from_numeral constraints on builtin types, do compile-time validation
try self.checkNumeralConstraint(
deferred_constraint.var_,
constraint,
constraint.num_literal.?,
nominal_type,
env,
);
}
// Note: from_numeral constraint validation happens during comptime evaluation
// in ComptimeEvaluator.validateDeferredNumericLiterals()
}
} else if (dispatcher_content == .structure and
(dispatcher_content.structure == .record or

View file

@ -454,7 +454,7 @@ pub const ReportBuilder = struct {
const expected_content = self.snapshots.getContent(types.expected_snapshot);
const actual_content = self.snapshots.getContent(types.actual_snapshot);
if (types.from_annotation and self.areBothFunctionSnapshots(expected_content, actual_content)) {
if (types.from_annotation and areBothFunctionSnapshots(expected_content, actual_content)) {
// When we have constraint_origin_var, it indicates this error originated from
// a specific constraint like a dot access (e.g., str.to_utf8()).
// In this case, show a specialized argument type mismatch error.
@ -2436,13 +2436,12 @@ pub const ReportBuilder = struct {
}
/// Check if both snapshot contents represent function types
fn areBothFunctionSnapshots(self: *Self, expected_content: snapshot.SnapshotContent, actual_content: snapshot.SnapshotContent) bool {
return self.isSnapshotFunction(expected_content) and self.isSnapshotFunction(actual_content);
fn areBothFunctionSnapshots(expected_content: snapshot.SnapshotContent, actual_content: snapshot.SnapshotContent) bool {
return isSnapshotFunction(expected_content) and isSnapshotFunction(actual_content);
}
/// Check if a snapshot content represents a function type
fn isSnapshotFunction(self: *Self, content: snapshot.SnapshotContent) bool {
_ = self;
fn isSnapshotFunction(content: snapshot.SnapshotContent) bool {
return switch (content) {
.structure => |structure| switch (structure) {
.fn_pure, .fn_effectful, .fn_unbound => true,

View file

@ -323,8 +323,8 @@ pub const Store = struct {
return SnapshotStaticDispatchConstraint{
.fn_name = constraint.fn_name,
.fn_content = try self.deepCopyVarInternal(store, type_writer, constraint.fn_var),
// Dispatcher will be set when collecting constraints during write
.dispatcher = @enumFromInt(0),
// Dispatcher is set when collecting constraints during write
.dispatcher = undefined,
};
}

View file

@ -201,7 +201,7 @@ pub fn initWithImport(module_name: []const u8, source: []const u8, other_module_
parse_ast.store.emptyScratch();
// Canonicalize
try module_env.initCIRFields(gpa, module_name);
try module_env.initCIRFields(module_name);
can.* = try Can.init(module_env, parse_ast, &module_envs);
errdefer can.deinit();
@ -321,7 +321,7 @@ pub fn init(module_name: []const u8, source: []const u8) !TestEnv {
parse_ast.store.emptyScratch();
// Canonicalize
try module_env.initCIRFields(gpa, module_name);
try module_env.initCIRFields(module_name);
can.* = try Can.init(module_env, parse_ast, &module_envs);
errdefer can.deinit();

View file

@ -1353,9 +1353,10 @@ test "check type - expect" {
\\ x
\\}
;
// With no let-generalization for numeric flex vars, the `x == 1` comparison
// adds an is_eq constraint to x (since x is not generalized and remains monomorphic)
try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.is_eq : a, a -> Bool, a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]");
// Inside lambdas, numeric flex vars ARE generalized (to support polymorphic functions).
// Each use of `x` gets a fresh instance, so constraints from `x == 1` don't
// propagate to the generalized type. Only `from_numeral` from the def is captured.
try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]");
}
test "check type - expect not bool" {
@ -2425,10 +2426,10 @@ test "check type - pure zero-arg function annotation" {
try checkTypesModule(source, .{ .pass = .last_def }, "({}) -> { }");
}
test "imports of non-existent modules produce MODULE NOT FOUND errors" {
// This test verifies that importing modules that don't exist produces
// MODULE NOT FOUND errors. This is a regression test - a parser change
// for zero-arg functions accidentally caused these errors to disappear.
test "qualified imports don't produce MODULE NOT FOUND during canonicalization" {
// Qualified imports (e.g., "json.Json") are cross-package imports that are
// resolved by the workspace resolver, not during canonicalization.
// They should NOT produce MODULE NOT FOUND errors during canonicalization.
//
// Source from test/snapshots/can_import_comprehensive.md
const source =
@ -2479,11 +2480,9 @@ test "imports of non-existent modules produce MODULE NOT FOUND errors" {
}
}
// We expect exactly 3 MODULE NOT FOUND errors:
// 1. json.Json
// 2. http.Client
// 3. utils.String
try testing.expectEqual(@as(usize, 3), module_not_found_count);
// Qualified imports (json.Json, http.Client, utils.String) should NOT produce
// MODULE NOT FOUND errors - they're handled by the workspace resolver
try testing.expectEqual(@as(usize, 0), module_not_found_count);
}
// Try with match and error propagation //

View file

@ -82,7 +82,7 @@ const TestEnv = struct {
fn init(gpa: std.mem.Allocator) std.mem.Allocator.Error!Self {
const module_env = try gpa.create(ModuleEnv);
module_env.* = try ModuleEnv.init(gpa, try gpa.dupe(u8, ""));
try module_env.initCIRFields(gpa, "Test");
try module_env.initCIRFields("Test");
return .{
.module_env = module_env,
.snapshots = try snapshot_mod.Store.initCapacity(gpa, 16),
@ -790,8 +790,10 @@ test "partitionFields - same record" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const field_x = try env.mkRecordField("field_x", @enumFromInt(0));
const field_y = try env.mkRecordField("field_y", @enumFromInt(1));
const var_x = try env.module_env.types.fresh();
const var_y = try env.module_env.types.fresh();
const field_x = try env.mkRecordField("field_x", var_x);
const field_y = try env.mkRecordField("field_y", var_y);
const range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ field_x, field_y });
@ -813,9 +815,12 @@ test "partitionFields - disjoint fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkRecordField("a1", @enumFromInt(0));
const a2 = try env.mkRecordField("a2", @enumFromInt(1));
const b1 = try env.mkRecordField("b1", @enumFromInt(2));
const var_a1 = try env.module_env.types.fresh();
const var_a2 = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkRecordField("a1", var_a1);
const a2 = try env.mkRecordField("a2", var_a2);
const b1 = try env.mkRecordField("b1", var_b1);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, a2 });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{b1});
@ -839,9 +844,12 @@ test "partitionFields - overlapping fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkRecordField("a1", @enumFromInt(0));
const both = try env.mkRecordField("both", @enumFromInt(1));
const b1 = try env.mkRecordField("b1", @enumFromInt(2));
const var_a1 = try env.module_env.types.fresh();
const var_both = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkRecordField("a1", var_a1);
const both = try env.mkRecordField("both", var_both);
const b1 = try env.mkRecordField("b1", var_b1);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, both });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ b1, both });
@ -868,9 +876,12 @@ test "partitionFields - reordering is normalized" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const f1 = try env.mkRecordField("f1", @enumFromInt(0));
const f2 = try env.mkRecordField("f2", @enumFromInt(1));
const f3 = try env.mkRecordField("f3", @enumFromInt(2));
const var_f1 = try env.module_env.types.fresh();
const var_f2 = try env.module_env.types.fresh();
const var_f3 = try env.module_env.types.fresh();
const f1 = try env.mkRecordField("f1", var_f1);
const f2 = try env.mkRecordField("f2", var_f2);
const f3 = try env.mkRecordField("f3", var_f3);
const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f3, f1, f2 });
const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f1, f2, f3 });
@ -1027,8 +1038,10 @@ test "partitionTags - same tags" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const tag_x = try env.mkTag("X", &[_]Var{@enumFromInt(0)});
const tag_y = try env.mkTag("Y", &[_]Var{@enumFromInt(1)});
const var_x = try env.module_env.types.fresh();
const var_y = try env.module_env.types.fresh();
const tag_x = try env.mkTag("X", &[_]Var{var_x});
const tag_y = try env.mkTag("Y", &[_]Var{var_y});
const range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ tag_x, tag_y });
@ -1050,9 +1063,12 @@ test "partitionTags - disjoint fields" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkTag("A1", &[_]Var{@enumFromInt(0)});
const a2 = try env.mkTag("A2", &[_]Var{@enumFromInt(1)});
const b1 = try env.mkTag("B1", &[_]Var{@enumFromInt(2)});
const var_a1 = try env.module_env.types.fresh();
const var_a2 = try env.module_env.types.fresh();
const var_b1 = try env.module_env.types.fresh();
const a1 = try env.mkTag("A1", &[_]Var{var_a1});
const a2 = try env.mkTag("A2", &[_]Var{var_a2});
const b1 = try env.mkTag("B1", &[_]Var{var_b1});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, a2 });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{b1});
@ -1076,9 +1092,12 @@ test "partitionTags - overlapping tags" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const a1 = try env.mkTag("A", &[_]Var{@enumFromInt(0)});
const both = try env.mkTag("Both", &[_]Var{@enumFromInt(1)});
const b1 = try env.mkTag("B", &[_]Var{@enumFromInt(2)});
const var_a = try env.module_env.types.fresh();
const var_both = try env.module_env.types.fresh();
const var_b = try env.module_env.types.fresh();
const a1 = try env.mkTag("A", &[_]Var{var_a});
const both = try env.mkTag("Both", &[_]Var{var_both});
const b1 = try env.mkTag("B", &[_]Var{var_b});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, both });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ b1, both });
@ -1105,9 +1124,12 @@ test "partitionTags - reordering is normalized" {
var env = try TestEnv.init(gpa);
defer env.deinit();
const f1 = try env.mkTag("F1", &[_]Var{@enumFromInt(0)});
const f2 = try env.mkTag("F2", &[_]Var{@enumFromInt(1)});
const f3 = try env.mkTag("F3", &[_]Var{@enumFromInt(2)});
const var_f1 = try env.module_env.types.fresh();
const var_f2 = try env.module_env.types.fresh();
const var_f3 = try env.module_env.types.fresh();
const f1 = try env.mkTag("F1", &[_]Var{var_f1});
const f2 = try env.mkTag("F2", &[_]Var{var_f2});
const f3 = try env.mkTag("F3", &[_]Var{var_f3});
const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f3, f1, f2 });
const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f1, f2, f3 });
@ -1487,7 +1509,7 @@ test "unify - flex with constraints vs structure captures deferred check" {
// Check that constraint was captured
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(structure_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,
@ -1522,7 +1544,7 @@ test "unify - structure vs flex with constraints captures deferred check (revers
// Check that constraint was captured (note: vars might be swapped due to merge order)
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(flex_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,
@ -1575,7 +1597,7 @@ test "unify - flex vs nominal type captures constraint" {
// Check that constraint was captured
try std.testing.expectEqual(1, env.scratch.deferred_constraints.len());
const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*;
const deferred = env.scratch.deferred_constraints.items.items[0];
try std.testing.expectEqual(
env.module_env.types.resolveVar(nominal_var).var_,
env.module_env.types.resolveVar(deferred.var_).var_,

View file

@ -1,4 +1,5 @@
//! This module implements Hindley-Milner style type unification with extensions for:
//!
//! * flex/rigid variables
//! * type aliases
//! * tuples
@ -1510,7 +1511,9 @@ const Unifier = struct {
// then recursiv
var ext = record_ext;
var guard = types_mod.debug.IterationGuard.init("gatherRecordFields");
while (true) {
guard.tick();
switch (ext) {
.unbound => {
return .{ .ext = ext, .range = range };
@ -1960,7 +1963,9 @@ const Unifier = struct {
// then loop gathering extensible tags
var ext_var = tag_union.ext;
var guard = types_mod.debug.IterationGuard.init("gatherTagUnionTags");
while (true) {
guard.tick();
switch (self.types_store.resolveVar(ext_var).desc.content) {
.flex => {
return .{ .ext = ext_var, .range = range };

View file

@ -121,7 +121,7 @@ fn benchParseOrTokenize(comptime is_parse: bool, gpa: Allocator, path: []const u
var tokenizer = try tokenize.Tokenizer.init(&env.?.common, gpa, roc_file.content, msg_slice);
try tokenizer.tokenize(gpa);
var result = tokenizer.finishAndDeinit(gpa);
var result = tokenizer.finishAndDeinit();
iteration_tokens += result.tokens.tokens.len;
result.tokens.deinit(gpa);
}

View file

@ -342,7 +342,7 @@ fn createHardlink(allocs: *Allocators, source: []const u8, dest: []const u8) !vo
lpFileName: [*:0]const u16,
lpExistingFileName: [*:0]const u16,
lpSecurityAttributes: ?*anyopaque,
) callconv(std.os.windows.WINAPI) std.os.windows.BOOL;
) callconv(.winapi) std.os.windows.BOOL;
};
if (kernel32.CreateHardLinkW(dest_w, source_w, null) == 0) {
@ -387,11 +387,101 @@ fn generateRandomSuffix(allocs: *Allocators) ![]u8 {
return suffix;
}
/// Create a unique temporary directory with PID-based naming.
/// Returns the path to the directory (allocated from arena, no need to free).
/// Uses system temp directory to avoid race conditions when cache is cleared.
pub fn createUniqueTempDir(allocs: *Allocators) ![]const u8 {
// Use system temp directory (not roc cache) to avoid race conditions
const temp_dir = if (comptime is_windows)
std.process.getEnvVarOwned(allocs.arena, "TEMP") catch
std.process.getEnvVarOwned(allocs.arena, "TMP") catch try allocs.arena.dupe(u8, "C:\\Windows\\Temp")
else
std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp");
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
// Get the current process ID for uniqueness
const pid = if (comptime is_windows)
std.os.windows.GetCurrentProcessId()
else
std.c.getpid();
// Try PID-based name first, then fall back to random suffix up to 5 times
var attempt: u8 = 0;
while (attempt < 6) : (attempt += 1) {
const dir_path = if (attempt == 0) blk: {
// First attempt: use PID only
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}", .{ normalized_temp_dir, pid })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}", .{ normalized_temp_dir, pid });
} else blk: {
// Subsequent attempts: use PID + random 8-char suffix
const random_suffix = try generateRandomSuffix(allocs);
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix });
};
// Try to create the directory
std.fs.cwd().makeDir(dir_path) catch |err| switch (err) {
error.PathAlreadyExists => {
// Directory already exists, try again with a new random suffix
continue;
},
else => {
return err;
},
};
return dir_path;
}
// Failed after 6 attempts (1 with PID only, 5 with PID + random suffix)
return error.FailedToCreateUniqueTempDir;
}
/// Write shared memory coordination file (.txt) next to the executable.
/// This is the file that the child process reads to find the shared memory fd.
pub fn writeFdCoordinationFile(allocs: *Allocators, temp_exe_path: []const u8, shm_handle: SharedMemoryHandle) !void {
// The coordination file is at {temp_dir}.txt where temp_dir is the directory containing the exe
const temp_dir = std.fs.path.dirname(temp_exe_path) orelse return error.InvalidPath;
// Ensure we have no trailing slashes
var dir_path = temp_dir;
while (dir_path.len > 0 and (dir_path[dir_path.len - 1] == '/' or dir_path[dir_path.len - 1] == '\\')) {
dir_path = dir_path[0 .. dir_path.len - 1];
}
const fd_file_path = try std.fmt.allocPrint(allocs.arena, "{s}.txt", .{dir_path});
// Create the file (exclusive - fail if exists to detect collisions)
const fd_file = std.fs.cwd().createFile(fd_file_path, .{ .exclusive = true }) catch |err| switch (err) {
error.PathAlreadyExists => {
// File already exists - this is unexpected since we have unique temp dirs
std.log.err("Coordination file already exists at '{s}'", .{fd_file_path});
return err;
},
else => return err,
};
defer fd_file.close();
// Write shared memory info to file
const fd_str = try std.fmt.allocPrint(allocs.arena, "{}\n{}", .{ shm_handle.fd, shm_handle.size });
try fd_file.writeAll(fd_str);
try fd_file.sync();
}
/// Create the temporary directory structure for fd communication.
/// Returns the path to the executable in the temp directory (allocated from arena, no need to free).
/// If a cache directory is provided, it will be used for temporary files; otherwise
/// falls back to the system temp directory.
pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 {
/// The exe_display_name is the name that will appear in `ps` output (e.g., "app.roc").
pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, exe_display_name: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 {
// Use provided cache dir or fall back to system temp directory
const temp_dir = if (cache_dir) |dir|
try allocs.arena.dupe(u8, dir)
@ -401,20 +491,34 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
else
std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp");
// Try up to 10 times to create a unique directory
var attempt: u8 = 0;
while (attempt < 10) : (attempt += 1) {
const random_suffix = try generateRandomSuffix(allocs);
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
// Create the full path with .txt suffix first
const normalized_temp_dir = if (comptime is_windows)
std.mem.trimRight(u8, temp_dir, "/\\")
else
std.mem.trimRight(u8, temp_dir, "/");
const dir_name_with_txt = if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix });
// Get the current process ID for uniqueness
const pid = if (comptime is_windows)
std.os.windows.GetCurrentProcessId()
else
std.c.getpid();
// Try PID-based name first, then fall back to random suffix up to 5 times
var attempt: u8 = 0;
while (attempt < 6) : (attempt += 1) {
const dir_name_with_txt = if (attempt == 0) blk: {
// First attempt: use PID only
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}.txt", .{ normalized_temp_dir, pid })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}.txt", .{ normalized_temp_dir, pid });
} else blk: {
// Subsequent attempts: use PID + random 8-char suffix
const random_suffix = try generateRandomSuffix(allocs);
break :blk if (comptime is_windows)
try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix })
else
try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix });
};
// Get the directory path by slicing off the .txt suffix
const dir_path_len = dir_name_with_txt.len - 4; // Remove ".txt"
@ -456,9 +560,8 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
try fd_file.sync(); // Ensure data is written to disk
fd_file.close();
// Create hardlink to executable in temp directory
const exe_basename = std.fs.path.basename(exe_path);
const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_basename });
// Create hardlink to executable in temp directory with display name
const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name });
// Try to create a hardlink first (more efficient than copying)
createHardlink(allocs, exe_path, temp_exe_path) catch {
@ -470,7 +573,7 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han
return temp_exe_path;
}
// Failed after 10 attempts
// Failed after 6 attempts (1 with PID only, 5 with PID + random suffix)
return error.FailedToCreateUniqueTempDir;
}
@ -480,6 +583,11 @@ var debug_allocator: std.heap.DebugAllocator(.{}) = .{
/// The CLI entrypoint for the Roc compiler.
pub fn main() !void {
// Install stack overflow handler early, before any significant work.
// This gives us a helpful error message instead of a generic segfault
// if the compiler blows the stack (e.g., due to infinite recursion in type translation).
_ = base.stack_overflow.install();
var gpa_tracy: tracy.TracyAllocator(null) = undefined;
var gpa, const is_safe = gpa: {
if (builtin.os.tag == .wasi) break :gpa .{ std.heap.wasm_allocator, false };
@ -724,26 +832,51 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
},
};
// Generate executable name based on the roc file path
// TODO use something more interesting like a hash from the platform.main or platform/host.a etc
const exe_base_name = std.fmt.allocPrint(allocs.arena, "roc_run_{}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| {
std.log.err("Failed to generate executable name: {}", .{err});
return err;
};
// The final executable name seen in `ps` is the roc filename (e.g., "app.roc")
const exe_display_name = std.fs.path.basename(args.path);
// Add .exe extension on Windows
const exe_name = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_base_name}) catch |err| {
std.log.err("Failed to generate executable name with extension: {}", .{err});
// Display name for temp directory (what shows in ps)
const exe_display_name_with_ext = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_display_name}) catch |err| {
std.log.err("Failed to generate display name with extension: {}", .{err});
return err;
}
else
allocs.arena.dupe(u8, exe_base_name) catch |err| {
std.log.err("Failed to duplicate executable name: {}", .{err});
allocs.arena.dupe(u8, exe_display_name) catch |err| {
std.log.err("Failed to duplicate display name: {}", .{err});
return err;
};
const exe_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_name }) catch |err| {
// Cache executable name uses hash of path (no PID - collision is fine since same content)
const exe_cache_name = std.fmt.allocPrint(allocs.arena, "roc_{x}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| {
std.log.err("Failed to generate cache executable name: {}", .{err});
return err;
};
const exe_cache_name_with_ext = if (builtin.target.os.tag == .windows)
std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_cache_name}) catch |err| {
std.log.err("Failed to generate cache name with extension: {}", .{err});
return err;
}
else
allocs.arena.dupe(u8, exe_cache_name) catch |err| {
std.log.err("Failed to duplicate cache name: {}", .{err});
return err;
};
const exe_cache_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_cache_name_with_ext }) catch |err| {
std.log.err("Failed to create cache executable path: {}", .{err});
return err;
};
// Create unique temp directory for this build (uses PID for uniqueness)
const temp_dir_path = createUniqueTempDir(allocs) catch |err| {
std.log.err("Failed to create temp directory: {}", .{err});
return err;
};
// The executable is built directly in the temp dir with the display name
const exe_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name_with_ext }) catch |err| {
std.log.err("Failed to create executable path: {}", .{err});
return err;
};
@ -780,42 +913,44 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
return error.NoPlatformSource;
}
// Check if the interpreter executable already exists (cached)
const exe_exists = if (args.no_cache) false else blk: {
std.fs.accessAbsolute(exe_path, .{}) catch {
// Check if the interpreter executable already exists in cache
const cache_exists = if (args.no_cache) false else blk: {
std.fs.accessAbsolute(exe_cache_path, .{}) catch {
break :blk false;
};
break :blk true;
};
if (!exe_exists) {
if (cache_exists) {
// Cached executable exists - hardlink from cache to temp dir
std.log.debug("Using cached executable: {s}", .{exe_cache_path});
createHardlink(allocs, exe_cache_path, exe_path) catch |err| {
// If hardlinking fails, fall back to copying
std.log.debug("Hardlink from cache failed, copying: {}", .{err});
std.fs.cwd().copyFile(exe_cache_path, std.fs.cwd(), exe_path, .{}) catch |copy_err| {
std.log.err("Failed to copy cached executable: {}", .{copy_err});
return copy_err;
};
};
} else {
// Check for cached shim library, extract if not present
// Extract shim library to temp dir to avoid race conditions
const shim_filename = if (builtin.target.os.tag == .windows) "roc_shim.lib" else "libroc_shim.a";
const shim_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, shim_filename }) catch |err| {
const shim_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, shim_filename }) catch |err| {
std.log.err("Failed to create shim library path: {}", .{err});
return err;
};
// Extract shim if not cached or if --no-cache is used
const shim_exists = if (args.no_cache) false else blk: {
std.fs.cwd().access(shim_path, .{}) catch {
break :blk false;
};
break :blk true;
// Always extract to temp dir (unique per process, no race condition)
extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| {
std.log.err("Failed to extract read roc file path shim library: {}", .{err});
return err;
};
if (!shim_exists) {
// Shim not found in cache or cache disabled, extract it
extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| {
std.log.err("Failed to extract read roc file path shim library: {}", .{err});
return err;
};
}
// Generate platform host shim using the detected entrypoints
// Use temp dir to avoid race conditions when multiple processes run in parallel
const platform_shim_path = generatePlatformHostShim(allocs, exe_cache_dir, entrypoints.items, shim_target) catch |err| {
const platform_shim_path = generatePlatformHostShim(allocs, temp_dir_path, entrypoints.items, shim_target) catch |err| {
std.log.err("Failed to generate platform host shim: {}", .{err});
return err;
};
@ -948,6 +1083,22 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
return err;
},
};
// After building, hardlink to cache for future runs
// Force-hardlink (delete existing first) since hash collision means identical content
std.log.debug("Caching executable to: {s}", .{exe_cache_path});
std.fs.cwd().deleteFile(exe_cache_path) catch |err| switch (err) {
error.FileNotFound => {}, // OK, doesn't exist
else => std.log.debug("Could not delete existing cache file: {}", .{err}),
};
createHardlink(allocs, exe_path, exe_cache_path) catch |err| {
// If hardlinking fails, fall back to copying
std.log.debug("Hardlink to cache failed, copying: {}", .{err});
std.fs.cwd().copyFile(exe_path, std.fs.cwd(), exe_cache_path, .{}) catch |copy_err| {
// Non-fatal - just means future runs won't be cached
std.log.debug("Failed to copy to cache: {}", .{copy_err});
};
};
}
// Set up shared memory with ModuleEnv
@ -986,7 +1137,7 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
} else {
// POSIX: Use existing file descriptor inheritance approach
std.log.debug("Using POSIX file descriptor inheritance approach", .{});
runWithPosixFdInheritance(allocs, exe_path, shm_handle, &cache_manager, args.app_args) catch |err| {
runWithPosixFdInheritance(allocs, exe_path, shm_handle, args.app_args) catch |err| {
return err;
};
}
@ -1132,29 +1283,16 @@ fn runWithWindowsHandleInheritance(allocs: *Allocators, exe_path: []const u8, sh
}
/// Run child process using POSIX file descriptor inheritance (existing approach for Unix)
fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_manager: *CacheManager, app_args: []const []const u8) !void {
// Get cache directory for temporary files
const temp_cache_dir = cache_manager.config.getTempDir(allocs.arena) catch |err| {
std.log.err("Failed to get temp cache directory: {}", .{err});
/// The exe_path should already be in a unique temp directory created by createUniqueTempDir.
fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, app_args: []const []const u8) !void {
// Write the coordination file (.txt) next to the executable
// The executable is already in a unique temp directory
std.log.debug("Writing fd coordination file for: {s}", .{exe_path});
writeFdCoordinationFile(allocs, exe_path, shm_handle) catch |err| {
std.log.err("Failed to write fd coordination file: {}", .{err});
return err;
};
// Ensure temp cache directory exists
std.fs.cwd().makePath(temp_cache_dir) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => {
std.log.err("Failed to create temp cache directory: {}", .{err});
return err;
},
};
// Create temporary directory structure for fd communication
std.log.debug("Creating temporary directory structure for fd communication", .{});
const temp_exe_path = createTempDirStructure(allocs, exe_path, shm_handle, temp_cache_dir) catch |err| {
std.log.err("Failed to create temp dir structure: {}", .{err});
return err;
};
std.log.debug("Temporary executable created at: {s}", .{temp_exe_path});
std.log.debug("Coordination file written successfully", .{});
// Configure fd inheritance - clear FD_CLOEXEC so child process inherits the fd
// NOTE: The doNotOptimizeAway calls are required to prevent the ReleaseFast
@ -1180,7 +1318,7 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.log.err("Failed to allocate argv: {}", .{err});
return err;
};
argv[0] = temp_exe_path;
argv[0] = exe_path;
for (app_args, 0..) |arg, i| {
argv[1 + i] = arg;
}
@ -1197,10 +1335,10 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
child.stderr_behavior = .Inherit;
// Spawn the child process
std.log.debug("Spawning child process: {s} with {} app args", .{ temp_exe_path, app_args.len });
std.log.debug("Spawning child process: {s} with {} app args", .{ exe_path, app_args.len });
std.log.debug("Child process working directory: {s}", .{child.cwd.?});
child.spawn() catch |err| {
std.log.err("Failed to spawn {s}: {}", .{ temp_exe_path, err });
std.log.err("Failed to spawn {s}: {}", .{ exe_path, err });
return err;
};
std.log.debug("Child process spawned successfully (PID: {})", .{child.id});
@ -1218,12 +1356,12 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.log.debug("Child process completed successfully", .{});
} else {
// Propagate the exit code from the child process to our parent
std.log.debug("Child process {s} exited with code: {}", .{ temp_exe_path, exit_code });
std.log.debug("Child process {s} exited with code: {}", .{ exe_path, exit_code });
std.process.exit(exit_code);
}
},
.Signal => |signal| {
std.log.err("Child process {s} killed by signal: {}", .{ temp_exe_path, signal });
std.log.err("Child process {s} killed by signal: {}", .{ exe_path, signal });
if (signal == 11) { // SIGSEGV
std.log.err("Child process crashed with segmentation fault (SIGSEGV)", .{});
} else if (signal == 6) { // SIGABRT
@ -1235,11 +1373,11 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.process.exit(128 +| @as(u8, @truncate(signal)));
},
.Stopped => |signal| {
std.log.err("Child process {s} stopped by signal: {}", .{ temp_exe_path, signal });
std.log.err("Child process {s} stopped by signal: {}", .{ exe_path, signal });
return error.ProcessStopped;
},
.Unknown => |status| {
std.log.err("Child process {s} terminated with unknown status: {}", .{ temp_exe_path, status });
std.log.err("Child process {s} terminated with unknown status: {}", .{ exe_path, status });
return error.ProcessUnknownTermination;
},
}
@ -1424,44 +1562,12 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
const module_env_ptr = try compileModuleToSharedMemory(
allocs,
module_path,
module_filename,
module_name, // Use just "Stdout" (not "Stdout.roc") so type-module detection works
shm_allocator,
&builtin_modules,
&.{},
);
// Add exposed item aliases with "pf." prefix for import resolution
// The canonicalizer builds lookup names like "Stdout.roc.pf.Stdout.line!"
// because the import "pf.Stdout" creates an alias Stdout -> pf.Stdout,
// and scopeLookupModule returns "pf.Stdout" which becomes part of the qualified name.
// We need to add aliases that match this pattern.
module_env_ptr.common.exposed_items.ensureSorted(shm_allocator);
const exposed_entries = module_env_ptr.common.exposed_items.items.entries.items;
for (exposed_entries) |entry| {
const key_ident: base.Ident.Idx = @bitCast(entry.key);
const key_text = module_env_ptr.common.getIdent(key_ident);
// Check if this is a qualified name like "Stdout.roc.Stdout.line!"
// We want to create an alias "Stdout.roc.pf.Stdout.line!"
// The pattern is: "{module}.roc.{Type}.{method}"
// We want to create: "{module}.roc.pf.{Type}.{method}"
if (std.mem.indexOf(u8, key_text, ".roc.")) |roc_pos| {
const prefix = key_text[0 .. roc_pos + 5]; // "Stdout.roc."
const suffix = key_text[roc_pos + 5 ..]; // "Stdout.line!"
// Create the aliased name "Stdout.roc.pf.Stdout.line!"
const aliased_name = try std.fmt.allocPrint(shm_allocator, "{s}pf.{s}", .{ prefix, suffix });
// Note: We don't defer free because this is allocated in shm_allocator (shared memory)
// Insert the aliased name into the platform env's ident table
const aliased_ident = try module_env_ptr.insertIdent(base.Ident.for_text(aliased_name));
// First add to exposed items, then set node index
try module_env_ptr.common.exposed_items.addExposedById(shm_allocator, @bitCast(aliased_ident));
try module_env_ptr.common.exposed_items.setNodeIndexById(shm_allocator, @bitCast(aliased_ident), entry.value);
}
}
// Store platform modules at indices 0..N-2, app will be at N-1
module_env_offsets_ptr[i] = @intFromPtr(module_env_ptr) - @intFromPtr(shm.base_ptr);
platform_env_ptrs[i] = module_env_ptr;
@ -1584,7 +1690,7 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
defer app_parse_ast.deinit(allocs.gpa);
app_parse_ast.store.emptyScratch();
try app_env.initCIRFields(shm_allocator, app_module_name);
try app_env.initCIRFields(app_module_name);
var app_module_envs_map = std.AutoHashMap(base.Ident.Idx, Can.AutoImportedType).init(allocs.gpa);
defer app_module_envs_map.deinit();
@ -1607,19 +1713,29 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
// Two keys are needed for each platform module:
// 1. "pf.Stdout" - used during import validation (import pf.Stdout)
// 2. "Stdout" - used during expression canonicalization (Stdout.line!)
// Also set statement_idx to a non-null value to trigger qualified name lookup,
// since associated items are stored as "Stdout.roc.Stdout.line!", not just "line!".
// Also set statement_idx to the actual type node index, which is needed for
// creating e_nominal_external and e_lookup_external expressions.
for (exposed_modules.items, 0..) |module_name, i| {
const platform_env = platform_env_ptrs[i];
// For platform modules, the qualified type name is "ModuleName.roc.ModuleName"
// This matches how associated items are stored (e.g., "Stdout.roc.Stdout.line!")
// For platform modules (type modules), the qualified type name is just the type name.
// Type modules like Stdout.roc store associated items as "Stdout.line!" (not "Stdout.roc.Stdout.line!")
// because processTypeDeclFirstPass uses parent_name=null for top-level types.
// Insert into app_env (calling module) since Ident.Idx values are not transferable between stores.
const qualified_type_name = try std.fmt.allocPrint(allocs.gpa, "{s}.roc.{s}", .{ module_name, module_name });
defer allocs.gpa.free(qualified_type_name);
const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(qualified_type_name));
const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(module_name));
// Look up the type in the platform module's exposed_items to get the actual node index
const type_ident_in_platform = platform_env.common.findIdent(module_name) orelse {
std.log.err("Platform module '{s}' does not expose a type named '{s}'", .{ module_name, module_name });
return error.MissingTypeInPlatformModule;
};
const type_node_idx = platform_env.getExposedNodeIndexById(type_ident_in_platform) orelse {
std.log.err("Platform module type '{s}' has no node index in exposed_items", .{module_name});
return error.MissingNodeIndexForPlatformType;
};
const auto_type = Can.AutoImportedType{
.env = platform_env,
.statement_idx = @enumFromInt(0), // Non-null triggers qualified name building
.statement_idx = @enumFromInt(type_node_idx), // actual type node index for e_lookup_external
.qualified_type_ident = type_qualified_ident,
};
@ -1831,7 +1947,7 @@ fn compileModuleToSharedMemory(
parse_ast.store.emptyScratch();
// Initialize CIR
try env.initCIRFields(shm_allocator, module_name_copy);
try env.initCIRFields(module_name_copy);
// Create module_envs map
var module_envs_map = std.AutoHashMap(base.Ident.Idx, Can.AutoImportedType).init(allocs.gpa);
@ -3052,7 +3168,7 @@ fn rocTest(allocs: *Allocators, args: cli_args.TestArgs) !void {
parse_ast.store.emptyScratch();
// Initialize CIR fields in ModuleEnv
try env.initCIRFields(allocs.gpa, module_name);
try env.initCIRFields(module_name);
// Populate module_envs with Bool, Try, Dict, Set using shared function
try Can.populateModuleEnvs(
@ -3222,8 +3338,7 @@ fn rocTest(allocs: *Allocators, args: cli_args.TestArgs) !void {
}
}
fn rocRepl(allocs: *Allocators) !void {
_ = allocs;
fn rocRepl(_: *Allocators) !void {
const stderr = stderrWriter();
defer stderr.flush() catch {};
stderr.print("repl not implemented\n", .{}) catch {};

View file

@ -1080,3 +1080,63 @@ test "run allows warnings without blocking execution" {
// Should produce output (runs successfully)
try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello, World!") != null);
}
test "fx platform method inspect on string" {
// Tests that calling .inspect() on a Str correctly reports MISSING METHOD
// (Str doesn't have an inspect method, unlike custom opaque types)
const allocator = testing.allocator;
const run_result = try runRoc(allocator, "test/fx/test_method_inspect.roc", .{});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
// This should fail because Str doesn't have an inspect method
try checkFailure(run_result);
// Should show MISSING METHOD error
try testing.expect(std.mem.indexOf(u8, run_result.stderr, "MISSING METHOD") != null);
}
test "fx platform if-expression closure capture regression" {
// Regression test: Variables bound inside an if-expression's block were
// incorrectly being captured as free variables by the enclosing lambda,
// causing a crash with "e_closure: failed to resolve capture value".
const allocator = testing.allocator;
const run_result = try runRoc(allocator, "test/fx/if-closure-capture.roc", .{});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
try checkSuccess(run_result);
}
test "fx platform var with string interpolation segfault" {
// Regression test: Using `var` variables with string interpolation causes segfault.
// The code calls fnA! multiple times, each using var state variables, and
// interpolates the results into strings.
const allocator = testing.allocator;
const run_result = try runRoc(allocator, "test/fx/var_interp_segfault.roc", .{});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
try checkSuccess(run_result);
// Verify the expected output
try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A1: 1") != null);
try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A2: 1") != null);
try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A3: 1") != null);
}
test "fx platform sublist method on inferred type" {
// Regression test: Calling .sublist() method on a List(U8) from "".to_utf8()
// causes a segfault when the variable doesn't have an explicit type annotation.
// Error was: "Roc crashed: Error evaluating from shared memory: InvalidMethodReceiver"
const allocator = testing.allocator;
const run_result = try runRoc(allocator, "test/fx/sublist_method_segfault.roc", .{});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
try checkSuccess(run_result);
}

View file

@ -86,13 +86,6 @@ test "roc docs generates nested package documentation" {
\\
);
// Create output directory path
const output_dir = try std.fs.path.join(gpa, &[_][]const u8{ tmp_path, "generated-docs" });
defer gpa.free(output_dir);
const root_path = try std.fs.path.join(gpa, &[_][]const u8{ tmp_path, "root.roc" });
defer gpa.free(root_path);
// Note: We would call main.rocDocs(gpa, args) here, but it requires
// a full build environment setup. Instead, we test the individual
// helper functions in separate tests below.
@ -103,9 +96,6 @@ test "roc docs generates nested package documentation" {
tmp.dir.access("bar/main.roc", .{}) catch unreachable;
tmp.dir.access("baz/main.roc", .{}) catch unreachable;
tmp.dir.access("qux/main.roc", .{}) catch unreachable;
_ = root_path;
_ = output_dir;
}
test "generatePackageIndex creates valid HTML" {

View file

@ -23,7 +23,7 @@ pub fn SafeRange(comptime Idx: type) type {
/// An empty range
pub fn empty() Self {
return .{ .start = @enumFromInt(0), .count = 0 };
return .{ .start = undefined, .count = 0 };
}
// Drop first elem from the span, if possible
@ -99,6 +99,8 @@ pub fn SafeList(comptime T: type) type {
/// An index for an item in the list.
pub const Idx = enum(u32) {
/// The first valid index in the list.
first = 0,
_,
/// Get the raw u32 value for storage
@ -246,6 +248,11 @@ pub fn SafeList(comptime T: type) type {
/// Convert a range to a slice
pub fn sliceRange(self: *const SafeList(T), range: Range) Slice {
// Empty ranges have undefined start, return empty slice directly
if (range.count == 0) {
return &.{};
}
const start: usize = @intFromEnum(range.start);
const end: usize = start + range.count;
@ -368,7 +375,7 @@ pub fn SafeList(comptime T: type) type {
return Iterator{
.array = self,
.len = self.len(),
.current = @enumFromInt(0),
.current = .first,
};
}
};
@ -396,7 +403,7 @@ pub fn SafeMultiList(comptime T: type) type {
items: std.MultiArrayList(T) = .{},
/// Index of an item in the list.
pub const Idx = enum(u32) { zero = 0, _ };
pub const Idx = enum(u32) { first = 0, _ };
/// A non-type-safe slice of the list.
pub const Slice = std.MultiArrayList(T).Slice;
@ -461,7 +468,7 @@ pub fn SafeMultiList(comptime T: type) type {
pub fn appendSlice(self: *SafeMultiList(T), gpa: Allocator, elems: []const T) std.mem.Allocator.Error!Range {
if (elems.len == 0) {
return .{ .start = .zero, .count = 0 };
return .{ .start = .first, .count = 0 };
}
const start_length = self.len();
try self.items.ensureUnusedCapacity(gpa, elems.len);
@ -474,6 +481,17 @@ pub fn SafeMultiList(comptime T: type) type {
/// Convert a range to a slice
pub fn sliceRange(self: *const SafeMultiList(T), range: Range) Slice {
// Empty ranges have undefined start, return empty slice directly
if (range.count == 0) {
const base = self.items.slice();
// Return a zero-length slice based on the existing slice
return .{
.ptrs = base.ptrs,
.len = 0,
.capacity = 0,
};
}
const start: usize = @intFromEnum(range.start);
const end: usize = start + range.count;
@ -963,7 +981,7 @@ test "SafeList edge cases serialization" {
try testing.expectEqual(@as(usize, 0), deserialized.list_u32.len());
try testing.expectEqual(@as(usize, 1), deserialized.list_u8.len());
try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(.first).*);
}
}
@ -1048,11 +1066,12 @@ test "SafeList CompactWriter complete roundtrip example" {
const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Step 8: Verify data is accessible and correct
const Idx = SafeList(u32).Idx;
try testing.expectEqual(@as(usize, 4), deserialized.len());
try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u32, 100), deserialized.get(.first).*);
try testing.expectEqual(@as(u32, 200), deserialized.get(@as(Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u32, 300), deserialized.get(@as(Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u32, 400), deserialized.get(@as(Idx, @enumFromInt(3))).*);
}
test "SafeList CompactWriter multiple lists with different alignments" {
@ -1155,10 +1174,11 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u8));
offset += 3 * @sizeOf(u8);
const U8Idx = SafeList(u8).Idx;
try testing.expectEqual(@as(usize, 3), deser_u8.len());
try testing.expectEqual(@as(u8, 10), deser_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 20), deser_u8.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u8, 30), deser_u8.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u8, 10), deser_u8.get(.first).*);
try testing.expectEqual(@as(u8, 20), deser_u8.get(@as(U8Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u8, 30), deser_u8.get(@as(U8Idx, @enumFromInt(2))).*);
// 2. Deserialize u16 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized));
@ -1169,9 +1189,10 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u16));
offset += 2 * @sizeOf(u16);
const U16Idx = SafeList(u16).Idx;
try testing.expectEqual(@as(usize, 2), deser_u16.len());
try testing.expectEqual(@as(u16, 1000), deser_u16.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u16, 2000), deser_u16.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u16, 1000), deser_u16.get(.first).*);
try testing.expectEqual(@as(u16, 2000), deser_u16.get(@as(U16Idx, @enumFromInt(1))).*);
// 3. Deserialize u32 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized));
@ -1182,11 +1203,12 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u32));
offset += 4 * @sizeOf(u32);
const U32Idx = SafeList(u32).Idx;
try testing.expectEqual(@as(usize, 4), deser_u32.len());
try testing.expectEqual(@as(u32, 100_000), deser_u32.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u32, 100_000), deser_u32.get(.first).*);
try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@as(U32Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@as(U32Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@as(U32Idx, @enumFromInt(3))).*);
// 4. Deserialize u64 list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized));
@ -1197,22 +1219,24 @@ test "SafeList CompactWriter multiple lists with different alignments" {
offset = std.mem.alignForward(usize, offset, @alignOf(u64));
offset += 2 * @sizeOf(u64);
const U64Idx = SafeList(u64).Idx;
try testing.expectEqual(@as(usize, 2), deser_u64.len());
try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(.first).*);
try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@as(U64Idx, @enumFromInt(1))).*);
// 5. Deserialize struct list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(AlignedStruct).Serialized));
const s_struct = @as(*SafeList(AlignedStruct).Serialized, @ptrCast(@alignCast(buffer.ptr + offset)));
const deser_struct = s_struct.deserialize(@as(i64, @intCast(base_addr)));
const StructIdx = SafeList(AlignedStruct).Idx;
try testing.expectEqual(@as(usize, 2), deser_struct.len());
const item0 = deser_struct.get(@enumFromInt(0));
const item0 = deser_struct.get(.first);
try testing.expectEqual(@as(u32, 42), item0.x);
try testing.expectEqual(@as(u64, 1337), item0.y);
try testing.expectEqual(@as(u8, 255), item0.z);
const item1 = deser_struct.get(@enumFromInt(1));
const item1 = deser_struct.get(@as(StructIdx, @enumFromInt(1)));
try testing.expectEqual(@as(u32, 99), item1.x);
try testing.expectEqual(@as(u64, 9999), item1.y);
try testing.expectEqual(@as(u8, 128), item1.z);
@ -1318,10 +1342,11 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u8));
offset += 3; // 3 u8 elements
const D1Idx = SafeList(u8).Idx;
try testing.expectEqual(@as(usize, 3), d1.len());
try testing.expectEqual(@as(u8, 1), d1.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 2), d1.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u8, 3), d1.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u8, 1), d1.get(.first).*);
try testing.expectEqual(@as(u8, 2), d1.get(@as(D1Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u8, 3), d1.get(@as(D1Idx, @enumFromInt(2))).*);
// 2. Second list - u64
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized));
@ -1331,9 +1356,10 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u64));
offset += 2 * @sizeOf(u64); // 2 u64 elements
const D2Idx = SafeList(u64).Idx;
try testing.expectEqual(@as(usize, 2), d2.len());
try testing.expectEqual(@as(u64, 1_000_000), d2.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u64, 2_000_000), d2.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u64, 1_000_000), d2.get(.first).*);
try testing.expectEqual(@as(u64, 2_000_000), d2.get(@as(D2Idx, @enumFromInt(1))).*);
// 3. Third list - u16
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized));
@ -1343,11 +1369,12 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
offset = std.mem.alignForward(usize, offset, @alignOf(u16));
offset += 4 * @sizeOf(u16); // 4 u16 elements
const D3Idx = SafeList(u16).Idx;
try testing.expectEqual(@as(usize, 4), d3.len());
try testing.expectEqual(@as(u16, 100), d3.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u16, 200), d3.get(@enumFromInt(1)).*);
try testing.expectEqual(@as(u16, 300), d3.get(@enumFromInt(2)).*);
try testing.expectEqual(@as(u16, 400), d3.get(@enumFromInt(3)).*);
try testing.expectEqual(@as(u16, 100), d3.get(.first).*);
try testing.expectEqual(@as(u16, 200), d3.get(@as(D3Idx, @enumFromInt(1))).*);
try testing.expectEqual(@as(u16, 300), d3.get(@as(D3Idx, @enumFromInt(2))).*);
try testing.expectEqual(@as(u16, 400), d3.get(@as(D3Idx, @enumFromInt(3))).*);
// 4. Fourth list - u32
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized));
@ -1355,7 +1382,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" {
const d4 = s4.deserialize(@as(i64, @intCast(base)));
try testing.expectEqual(@as(usize, 1), d4.len());
try testing.expectEqual(@as(u32, 42), d4.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u32, 42), d4.get(.first).*);
}
test "SafeList CompactWriter brute-force alignment verification" {
@ -1476,7 +1503,7 @@ test "SafeList CompactWriter brute-force alignment verification" {
offset += 1; // 1 u8 element
try testing.expectEqual(@as(usize, 1), d_u8.len());
try testing.expectEqual(@as(u8, 42), d_u8.get(@enumFromInt(0)).*);
try testing.expectEqual(@as(u8, 42), d_u8.get(.first).*);
// Second list
offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(T).Serialized));
@ -1551,28 +1578,32 @@ test "SafeMultiList CompactWriter roundtrip with file" {
const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Verify the data
const Idx = SafeMultiList(TestStruct).Idx;
try testing.expectEqual(@as(usize, 4), deserialized.len());
// Verify all the data
try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).id);
try testing.expectEqual(@as(u64, 1000), deserialized.get(@enumFromInt(0)).value);
try testing.expectEqual(true, deserialized.get(@enumFromInt(0)).flag);
try testing.expectEqual(@as(u8, 10), deserialized.get(@enumFromInt(0)).data);
try testing.expectEqual(@as(u32, 100), deserialized.get(.first).id);
try testing.expectEqual(@as(u64, 1000), deserialized.get(.first).value);
try testing.expectEqual(true, deserialized.get(.first).flag);
try testing.expectEqual(@as(u8, 10), deserialized.get(.first).data);
try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).id);
try testing.expectEqual(@as(u64, 2000), deserialized.get(@enumFromInt(1)).value);
try testing.expectEqual(false, deserialized.get(@enumFromInt(1)).flag);
try testing.expectEqual(@as(u8, 20), deserialized.get(@enumFromInt(1)).data);
const second_idx: Idx = @enumFromInt(1);
try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).id);
try testing.expectEqual(@as(u64, 2000), deserialized.get(second_idx).value);
try testing.expectEqual(false, deserialized.get(second_idx).flag);
try testing.expectEqual(@as(u8, 20), deserialized.get(second_idx).data);
try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).id);
try testing.expectEqual(@as(u64, 3000), deserialized.get(@enumFromInt(2)).value);
try testing.expectEqual(true, deserialized.get(@enumFromInt(2)).flag);
try testing.expectEqual(@as(u8, 30), deserialized.get(@enumFromInt(2)).data);
const third_idx: Idx = @enumFromInt(2);
try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).id);
try testing.expectEqual(@as(u64, 3000), deserialized.get(third_idx).value);
try testing.expectEqual(true, deserialized.get(third_idx).flag);
try testing.expectEqual(@as(u8, 30), deserialized.get(third_idx).data);
try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).id);
try testing.expectEqual(@as(u64, 4000), deserialized.get(@enumFromInt(3)).value);
try testing.expectEqual(false, deserialized.get(@enumFromInt(3)).flag);
try testing.expectEqual(@as(u8, 40), deserialized.get(@enumFromInt(3)).data);
const fourth_idx: Idx = @enumFromInt(3);
try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).id);
try testing.expectEqual(@as(u64, 4000), deserialized.get(fourth_idx).value);
try testing.expectEqual(false, deserialized.get(fourth_idx).flag);
try testing.expectEqual(@as(u8, 40), deserialized.get(fourth_idx).data);
}
test "SafeMultiList empty list CompactWriter roundtrip" {
@ -1702,30 +1733,31 @@ test "SafeMultiList CompactWriter multiple lists different alignments" {
const base = @as(i64, @intCast(@intFromPtr(buffer.ptr)));
// Deserialize list1 (at offset1)
const D1Idx = SafeMultiList(Type1).Idx;
const d1_serialized = @as(*SafeMultiList(Type1).Serialized, @ptrCast(@alignCast(buffer.ptr + offset1)));
const d1 = d1_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 3), d1.len());
try testing.expectEqual(@as(u8, 10), d1.get(@enumFromInt(0)).a);
try testing.expectEqual(@as(u16, 100), d1.get(@enumFromInt(0)).b);
try testing.expectEqual(@as(u8, 20), d1.get(@enumFromInt(1)).a);
try testing.expectEqual(@as(u16, 200), d1.get(@enumFromInt(1)).b);
try testing.expectEqual(@as(u8, 30), d1.get(@enumFromInt(2)).a);
try testing.expectEqual(@as(u16, 300), d1.get(@enumFromInt(2)).b);
try testing.expectEqual(@as(u8, 10), d1.get(.first).a);
try testing.expectEqual(@as(u16, 100), d1.get(.first).b);
try testing.expectEqual(@as(u8, 20), d1.get(@as(D1Idx, @enumFromInt(1))).a);
try testing.expectEqual(@as(u16, 200), d1.get(@as(D1Idx, @enumFromInt(1))).b);
try testing.expectEqual(@as(u8, 30), d1.get(@as(D1Idx, @enumFromInt(2))).a);
try testing.expectEqual(@as(u16, 300), d1.get(@as(D1Idx, @enumFromInt(2))).b);
// Deserialize list2 (at offset2)
const d2_serialized = @as(*SafeMultiList(Type2).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2)));
const d2 = d2_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 2), d2.len());
try testing.expectEqual(@as(u32, 1000), d2.get(@enumFromInt(0)).x);
try testing.expectEqual(@as(u64, 10000), d2.get(@enumFromInt(0)).y);
try testing.expectEqual(@as(u32, 1000), d2.get(.first).x);
try testing.expectEqual(@as(u64, 10000), d2.get(.first).y);
// Deserialize list3 (at offset3)
const d3_serialized = @as(*SafeMultiList(Type3).Serialized, @ptrCast(@alignCast(buffer.ptr + offset3)));
const d3 = d3_serialized.deserialize(base);
try testing.expectEqual(@as(usize, 2), d3.len());
try testing.expectEqual(@as(u64, 999), d3.get(@enumFromInt(0)).id);
try testing.expectEqual(@as(u8, 42), d3.get(@enumFromInt(0)).data);
try testing.expectEqual(true, d3.get(@enumFromInt(0)).flag);
try testing.expectEqual(@as(u64, 999), d3.get(.first).id);
try testing.expectEqual(@as(u8, 42), d3.get(.first).data);
try testing.expectEqual(true, d3.get(.first).flag);
}
test "SafeMultiList CompactWriter brute-force alignment verification" {
@ -1815,10 +1847,11 @@ test "SafeMultiList CompactWriter brute-force alignment verification" {
const d2_serialized = @as(*SafeMultiList(TestType).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2)));
const d2 = d2_serialized.deserialize(base);
if (length > 0) {
const d2_first_idx: SafeMultiList(TestType).Idx = .first;
try testing.expectEqual(@as(usize, 1), d2.len());
try testing.expectEqual(@as(u8, 255), d2.get(@enumFromInt(0)).a);
try testing.expectEqual(@as(u32, 999999), d2.get(@enumFromInt(0)).b);
try testing.expectEqual(@as(u64, 888888888), d2.get(@enumFromInt(0)).c);
try testing.expectEqual(@as(u8, 255), d2.get(d2_first_idx).a);
try testing.expectEqual(@as(u32, 999999), d2.get(d2_first_idx).b);
try testing.expectEqual(@as(u64, 888888888), d2.get(d2_first_idx).c);
} else {
try testing.expectEqual(@as(usize, 0), d2.len());
}
@ -1963,8 +1996,7 @@ test "SafeMultiList CompactWriter verify exact memory layout" {
// Sort by alignment descending, then name ascending
std.mem.sort(FieldInfo, &field_infos, {}, struct {
fn lessThan(ctx: void, lhs: FieldInfo, rhs: FieldInfo) bool {
_ = ctx;
fn lessThan(_: void, lhs: FieldInfo, rhs: FieldInfo) bool {
if (lhs.alignment != rhs.alignment) {
return lhs.alignment > rhs.alignment;
}
@ -2287,7 +2319,8 @@ test "SafeMultiList.Serialized roundtrip" {
try testing.expectEqual(@as(u8, 64), c_values[2]);
// Check get() method
const item1 = list.get(@as(SafeMultiList(TestStruct).Idx, @enumFromInt(0)));
const first_idx: SafeMultiList(TestStruct).Idx = .first;
const item1 = list.get(first_idx);
try testing.expectEqual(@as(u32, 100), item1.a);
try testing.expectEqual(@as(f32, 1.5), item1.b);
try testing.expectEqual(@as(u8, 255), item1.c);

View file

@ -84,13 +84,10 @@ pub const CacheKey = struct {
/// Format cache key for debugging output.
pub fn format(
self: Self,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
try writer.print("CacheKey{{ content: {x}, mtime: {}, compiler: {x} }}", .{
self.content_hash[0..8], // First 8 bytes for readability
self.file_mtime,

View file

@ -65,12 +65,6 @@ pub const CacheManager = struct {
};
}
/// Deinitialize the cache manager.
pub fn deinit(self: *Self) void {
_ = self;
// Nothing to deinit currently
}
/// Load a cached module based on its content and compiler version.
/// Look up a cache entry by content and compiler version.
///

View file

@ -201,17 +201,6 @@ pub const CacheModule = struct {
}
}
/// Convenience functions for reading/writing cache files
pub fn writeToFile(
allocator: Allocator,
cache_data: []const u8,
file_path: []const u8,
filesystem: anytype,
) !void {
_ = allocator;
try filesystem.writeFile(file_path, cache_data);
}
/// Convenience function for reading cache files
pub fn readFromFile(
allocator: Allocator,

View file

@ -294,10 +294,8 @@ const GlobalQueue = struct {
}
// Hook from ModuleBuild to enqueue newly discovered/scheduled modules
pub fn hookOnSchedule(ctx: ?*anyopaque, package_name: []const u8, module_name: []const u8, _path: []const u8, _depth: u32) void {
pub fn hookOnSchedule(ctx: ?*anyopaque, package_name: []const u8, module_name: []const u8, _: []const u8, _: u32) void {
var self: *GlobalQueue = @ptrCast(@alignCast(ctx.?));
_ = _path;
_ = _depth;
// Enqueue to global queue - log but don't fail on error
self.enqueue(package_name, module_name) catch {
// Continue anyway - the module will still be processed by local scheduler
@ -411,7 +409,6 @@ pub const BuildEnv = struct {
// Deinit cache manager if present
if (self.cache_manager) |cm| {
cm.deinit();
self.gpa.destroy(cm);
}
@ -682,14 +679,8 @@ pub const BuildEnv = struct {
ws: *BuildEnv,
// Called by ModuleBuild.schedule_hook when a module is discovered/scheduled
pub fn onSchedule(ctx: ?*anyopaque, package_name: []const u8, module_name: []const u8, _path: []const u8, _depth: u32) void {
const self: *ScheduleCtx = @ptrCast(@alignCast(ctx.?));
_ = package_name;
_ = module_name;
_ = _path;
_ = _depth;
pub fn onSchedule(_: ?*anyopaque, _: []const u8, _: []const u8, _: []const u8, _: u32) void {
// Early reports auto-register in OrderedSink.emitReport when they are emitted
_ = self;
}
};
@ -704,12 +695,6 @@ pub const BuildEnv = struct {
}
}
fn resolverClassify(ctx: ?*anyopaque, _: []const u8, _: []const u8) bool {
_ = ctx;
// Unused: ModuleBuild determines external vs local from CIR (s_import.qualifier_tok)
return false;
}
fn resolverScheduleExternal(ctx: ?*anyopaque, current_package: []const u8, import_name: []const u8) void {
var self: *ResolverCtx = @ptrCast(@alignCast(ctx.?));
const cur_pkg = self.ws.packages.get(current_package) orelse return;
@ -761,8 +746,7 @@ pub const BuildEnv = struct {
return sched.*.getEnvIfDone(rest);
}
fn resolverResolveLocalPath(ctx: ?*anyopaque, _current_package: []const u8, root_dir: []const u8, import_name: []const u8) []const u8 {
_ = _current_package;
fn resolverResolveLocalPath(ctx: ?*anyopaque, _: []const u8, root_dir: []const u8, import_name: []const u8) []const u8 {
var self: *ResolverCtx = @ptrCast(@alignCast(ctx.?));
return self.ws.dottedToPath(root_dir, import_name) catch import_name;
}
@ -774,7 +758,6 @@ pub const BuildEnv = struct {
ctx.* = .{ .ws = self };
return .{
.ctx = ctx,
.classify = resolverClassify,
.scheduleExternal = resolverScheduleExternal,
.isReady = resolverIsReady,
.getEnv = resolverGetEnv,

View file

@ -83,8 +83,6 @@ pub const ScheduleHook = struct {
/// Resolver for handling imports across package boundaries
pub const ImportResolver = struct {
ctx: ?*anyopaque,
/// Return true if the import_name refers to an external package (e.g. "cli.Stdout")
classify: *const fn (ctx: ?*anyopaque, current_package: []const u8, import_name: []const u8) bool,
/// Ensure the external import is scheduled for building in its owning package
scheduleExternal: *const fn (ctx: ?*anyopaque, current_package: []const u8, import_name: []const u8) void,
/// Return true if the external import is fully type-checked and its ModuleEnv is ready
@ -579,7 +577,7 @@ pub const PackageEnv = struct {
var env = try ModuleEnv.init(self.gpa, src);
// init CIR fields
try env.initCIRFields(self.gpa, st.name);
try env.initCIRFields(st.name);
try env.common.calcLineStarts(self.gpa);

View file

@ -325,7 +325,7 @@
// var module_env = try ModuleEnv.init(gpa, source);
// defer module_env.deinit();
// try module_env.initCIRFields(gpa, "TestModule");
// try module_env.initCIRFields("TestModule");
// // CIR is now just an alias for ModuleEnv, so use module_env directly
// const cir = &module_env;
@ -401,7 +401,7 @@
// // var module_env = try ModuleEnv.init(gpa, source);
// // defer module_env.deinit();
// // try module_env.initCIRFields(gpa, "TestModule");
// // try module_env.initCIRFields("TestModule");
// // // CIR is now just an alias for ModuleEnv, so use module_env directly
// // const cir = &module_env;

View file

@ -34,7 +34,7 @@ test "ModuleEnv.Serialized roundtrip" {
_ = try original.common.line_starts.append(gpa, 20);
// Initialize CIR fields to ensure imports are available
try original.initCIRFields(gpa, "TestModule");
try original.initCIRFields("TestModule");
// Add some imports to test serialization/deserialization
const import1 = try original.imports.getOrPut(gpa, &original.common.strings, "json.Json");
@ -193,7 +193,7 @@ test "ModuleEnv.Serialized roundtrip" {
// defer original.deinit();
// // Initialize CIR fields
// try original.initCIRFields(gpa, "test.Types");
// try original.initCIRFields("test.Types");
// // Add some type variables
// const var1 = try original.types.freshFromContent(.err);
@ -358,7 +358,7 @@ test "ModuleEnv.Serialized roundtrip" {
// defer original.deinit();
// // Initialize CIR fields
// try original.initCIRFields(gpa, "test.Hello");
// try original.initCIRFields("test.Hello");
// // Create arena allocator for serialization
// var arena = std.heap.ArenaAllocator.init(gpa);
@ -431,11 +431,8 @@ test "ModuleEnv pushExprTypesToSExprTree extracts and formats types" {
.origin_module = builtin_ident,
.is_opaque = false,
};
const str_type = try env.types.freshFromContent(.{ .structure = .{ .nominal_type = str_nominal } });
// Add a string segment expression
const segment_idx = try env.addExpr(.{ .e_str_segment = .{ .literal = str_literal_idx } }, base.Region.from_raw_offsets(0, 5));
_ = str_type;
// Now create a string expression that references the segment
const expr_idx = try env.addExpr(.{ .e_str = .{ .span = Expr.Span{ .span = base.DataSpan{ .start = @intFromEnum(segment_idx), .len = 1 } } } }, base.Region.from_raw_offsets(0, 5));

View file

@ -64,7 +64,7 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" {
defer result.deinit();
// Now get the type of map_result and convert it to a string
// Find the map_result definition
// Find the map_result definition and get its type var from the expression
const defs_slice = env.store.sliceDefs(env.all_defs);
var map_result_var: ?types.Var = null;
for (defs_slice) |def_idx| {
@ -74,8 +74,8 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" {
const ident_idx = pattern.assign.ident;
const ident_text = env.getIdent(ident_idx);
if (std.mem.eql(u8, ident_text, "map_result")) {
// Get the type variable from the first definition - it's the first in the defs list
map_result_var = @enumFromInt(0); // First variable
// Get the type variable from the definition's expression
map_result_var = ModuleEnv.varFrom(def.expr);
break;
}
}

View file

@ -34,17 +34,206 @@ const Expr = CIR.Expr;
const StackValue = @This();
// ============================================================================
// Internal helper functions for memory operations that don't need rt_var
// ============================================================================
/// Increment reference count for a value given its layout and pointer.
/// Used internally when we don't need full StackValue type information.
fn increfLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.incref(1);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
const list_value = @as(*const RocList, @ptrCast(@alignCast(ptr.?))).*;
list_value.incref(1, false);
return;
}
if (layout.tag == .box) {
if (ptr == null) return;
const slot: *usize = @ptrCast(@alignCast(ptr.?));
if (slot.* != 0) {
const data_ptr: [*]u8 = @as([*]u8, @ptrFromInt(slot.*));
builtins.utils.increfDataPtrC(@as(?[*]u8, data_ptr), 1);
}
return;
}
if (layout.tag == .record) {
if (ptr == null) return;
const record_data = layout_cache.getRecordData(layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
increfLayoutPtr(field_layout, field_ptr, layout_cache);
}
return;
}
if (layout.tag == .tuple) {
if (ptr == null) return;
const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
increfLayoutPtr(elem_layout, elem_ptr, layout_cache);
}
return;
}
if (layout.tag == .tag_union) {
if (ptr == null) return;
// For unions, we need to read the tag and incref the appropriate payload
// This is complex - for now just skip (caller should handle specific union types)
return;
}
// Other layout types (scalar ints/floats, zst, etc.) don't need refcounting
}
/// Decrement reference count for a value given its layout and pointer.
/// Used internally when we don't need full StackValue type information.
fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore, ops: *RocOps) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.decref(ops);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
const list_header: *const RocList = @ptrCast(@alignCast(ptr.?));
const list_value = list_header.*;
const elem_layout = layout_cache.getLayout(layout.data.list);
const alignment_u32: u32 = @intCast(elem_layout.alignment(layout_cache.targetUsize()).toByteUnits());
const element_width: usize = @intCast(layout_cache.layoutSize(elem_layout));
const elements_refcounted = elem_layout.isRefcounted();
// Decref elements when unique
if (list_value.isUnique()) {
if (list_value.getAllocationDataPtr()) |source| {
const count = list_value.getAllocationElementCount(elements_refcounted);
var idx: usize = 0;
while (idx < count) : (idx += 1) {
const elem_ptr = source + idx * element_width;
decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops);
}
}
}
list_value.decref(alignment_u32, element_width, elements_refcounted, null, &builtins.list.rcNone, ops);
return;
}
if (layout.tag == .box) {
if (ptr == null) return;
const slot: *usize = @ptrCast(@alignCast(ptr.?));
const raw_ptr = slot.*;
if (raw_ptr == 0) return;
const data_ptr = @as([*]u8, @ptrFromInt(raw_ptr));
const target_usize = layout_cache.targetUsize();
const elem_layout = layout_cache.getLayout(layout.data.box);
const elem_alignment: u32 = @intCast(elem_layout.alignment(target_usize).toByteUnits());
const ptr_int = @intFromPtr(data_ptr);
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const unmasked_ptr = ptr_int & ~tag_mask;
const payload_ptr = @as([*]u8, @ptrFromInt(unmasked_ptr));
const refcount_ptr: *isize = @as(*isize, @ptrFromInt(unmasked_ptr - @sizeOf(isize)));
if (builtins.utils.rcUnique(refcount_ptr.*)) {
if (elem_layout.isRefcounted()) {
decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops);
}
}
builtins.utils.decrefDataPtrC(@as(?[*]u8, payload_ptr), elem_alignment, false, ops);
slot.* = 0;
return;
}
if (layout.tag == .record) {
if (ptr == null) return;
const record_data = layout_cache.getRecordData(layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
decrefLayoutPtr(field_layout, field_ptr, layout_cache, ops);
}
return;
}
if (layout.tag == .tuple) {
if (ptr == null) return;
const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
decrefLayoutPtr(elem_layout, elem_ptr, layout_cache, ops);
}
return;
}
if (layout.tag == .closure) {
if (ptr == null) return;
// Get the closure header to find the captures layout
const closure_header: *const layout_mod.Closure = @ptrCast(@alignCast(ptr.?));
const captures_layout = layout_cache.getLayout(closure_header.captures_layout_idx);
// Only decref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (record_data.fields.count > 0) {
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
decrefLayoutPtr(captures_layout, rec_ptr, layout_cache, ops);
}
}
return;
}
// Other layout types (scalar ints/floats, zst, etc.) don't need refcounting
}
/// Type and memory layout information for the result value
layout: Layout,
/// Ptr to the actual value in stack memory
ptr: ?*anyopaque,
/// Flag to track whether the memory has been initialized
is_initialized: bool = false,
/// Optional runtime type variable for type information (used in constant folding)
rt_var: ?types.Var = null,
/// Runtime type variable for type information (used for method dispatch and constant folding)
rt_var: types.Var,
/// Copy this stack value to a destination pointer with bounds checking
pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopaque, _: *RocOps) !void {
pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopaque) !void {
std.debug.assert(self.is_initialized); // Source must be initialized before copying
// For closures, use getTotalSize to include capture data; for others use layoutSize
@ -226,13 +415,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
increfLayoutPtr(field_layout, field_ptr, layout_cache);
}
return;
}
@ -263,13 +446,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.incref(layout_cache);
increfLayoutPtr(elem_layout, elem_ptr, layout_cache);
}
return;
}
@ -304,29 +481,8 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?));
const rec_ptr: [*]u8 = @ptrCast(base_ptr + aligned_off);
// Iterate over each field in the captures record and incref all fields.
// We call incref on ALL fields (not just isRefcounted()) because:
// - For directly refcounted types (str, list, box): increfs them
// - For nested records/tuples: recursively handles their contents
// - For scalars: incref is a no-op
// This is symmetric with decref.
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(captures_layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(rec_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
}
// Incref the entire captures record (which handles all fields recursively)
increfLayoutPtr(captures_layout, @ptrCast(rec_ptr), layout_cache);
}
}
return;
@ -365,13 +521,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
}
// Incref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
payload_value.incref(layout_cache);
increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache);
return;
}
@ -722,7 +872,7 @@ pub const TupleAccessor = struct {
element_layouts: layout_mod.TupleField.SafeMultiList.Slice,
/// Get a StackValue for the element at the given original index (before sorting)
pub fn getElement(self: TupleAccessor, original_index: usize) !StackValue {
pub fn getElement(self: TupleAccessor, original_index: usize, elem_rt_var: types.Var) !StackValue {
// Find the sorted index corresponding to this original index
const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds;
@ -748,13 +898,24 @@ pub const TupleAccessor = struct {
.layout = element_layout,
.ptr = element_ptr,
.is_initialized = true, // Elements in existing tuples are initialized
.rt_var = elem_rt_var,
};
}
/// Get just the element pointer without needing type information (for internal operations like setElement)
pub fn getElementPtr(self: TupleAccessor, original_index: usize) !*anyopaque {
const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds;
std.debug.assert(self.base_value.is_initialized);
std.debug.assert(self.base_value.ptr != null);
const element_offset = self.layout_cache.getTupleElementOffset(self.tuple_layout.data.tuple.idx, @intCast(sorted_index));
const base_ptr = @as([*]u8, @ptrCast(self.base_value.ptr.?));
return @as(*anyopaque, @ptrCast(base_ptr + element_offset));
}
/// Set an element by copying from a source StackValue
pub fn setElement(self: TupleAccessor, index: usize, source: StackValue, ops: *RocOps) !void {
const dest_element = try self.getElement(index);
try source.copyToPtr(self.layout_cache, dest_element.ptr.?, ops);
pub fn setElement(self: TupleAccessor, index: usize, source: StackValue) !void {
const dest_ptr = try self.getElementPtr(index);
try source.copyToPtr(self.layout_cache, dest_ptr);
}
/// Find the sorted element index corresponding to an original tuple position
@ -871,11 +1032,11 @@ pub const ListAccessor = struct {
return self.list.len();
}
pub fn getElement(self: ListAccessor, index: usize) !StackValue {
pub fn getElement(self: ListAccessor, index: usize, elem_rt_var: types.Var) !StackValue {
if (index >= self.list.len()) return error.ListIndexOutOfBounds;
if (self.element_size == 0) {
return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true };
return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true, .rt_var = elem_rt_var };
}
const base_ptr = self.list.bytes orelse return error.NullStackPointer;
@ -884,8 +1045,18 @@ pub const ListAccessor = struct {
.layout = self.element_layout,
.ptr = @ptrCast(base_ptr + offset),
.is_initialized = true,
.rt_var = elem_rt_var,
};
}
/// Get just the element pointer without needing type information (for internal operations)
pub fn getElementPtr(self: ListAccessor, index: usize) !?*anyopaque {
if (index >= self.list.len()) return error.ListIndexOutOfBounds;
if (self.element_size == 0) return null;
const base_ptr = self.list.bytes orelse return error.NullStackPointer;
const offset = index * self.element_size;
return @ptrCast(base_ptr + offset);
}
};
fn storeListElementCount(list: *RocList, elements_refcounted: bool) void {
@ -961,7 +1132,7 @@ pub const RecordAccessor = struct {
field_layouts: layout_mod.RecordField.SafeMultiList.Slice,
/// Get a StackValue for the field at the given index
pub fn getFieldByIndex(self: RecordAccessor, index: usize) !StackValue {
pub fn getFieldByIndex(self: RecordAccessor, index: usize, field_rt_var: types.Var) !StackValue {
if (index >= self.field_layouts.len) {
return error.RecordIndexOutOfBounds;
}
@ -988,11 +1159,12 @@ pub const RecordAccessor = struct {
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true, // Fields in existing records are initialized
.rt_var = field_rt_var,
};
}
/// Get a StackValue for the field with the given name
pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx) !?StackValue {
pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx, field_rt_var: types.Var) !?StackValue {
const field_offset = self.layout_cache.getRecordFieldOffsetByName(
self.record_layout.data.record.idx,
field_name_idx,
@ -1026,13 +1198,14 @@ pub const RecordAccessor = struct {
.layout = field_layout.?,
.ptr = field_ptr,
.is_initialized = true,
.rt_var = field_rt_var,
};
}
/// Set a field by copying from a source StackValue
pub fn setFieldByIndex(self: RecordAccessor, index: usize, source: StackValue, ops: *RocOps) !void {
const dest_field = try self.getFieldByIndex(index);
try source.copyToPtr(self.layout_cache, dest_field.ptr.?, ops);
pub fn setFieldByIndex(self: RecordAccessor, index: usize, source: StackValue) !void {
const dest_field = try self.getFieldByIndex(index, source.rt_var);
try source.copyToPtr(self.layout_cache, dest_field.ptr.?);
}
/// Get the number of fields in this record
@ -1168,15 +1341,6 @@ pub fn copyTo(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) vo
);
}
/// Create a StackValue view of a memory region (no copy)
pub fn fromPtr(layout: Layout, ptr: *anyopaque) StackValue {
return StackValue{
.layout = layout,
.ptr = ptr,
.is_initialized = true,
};
}
/// Copy value data to another StackValue WITHOUT incrementing refcounts (move semantics)
pub fn copyWithoutRefcount(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) void {
std.debug.assert(self.is_initialized);
@ -1269,56 +1433,12 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
}
// Handle records by recursively incref'ing each field (symmetric with decref)
if (self.layout.tag == .record) {
if (self.ptr == null) return;
const record_data = layout_cache.getRecordData(self.layout.data.record.idx);
if (record_data.fields.count == 0) return;
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.incref(layout_cache);
}
increfLayoutPtr(self.layout, self.ptr, layout_cache);
return;
}
// Handle tuples by recursively incref'ing each element (symmetric with decref)
if (self.layout.tag == .tuple) {
if (self.ptr == null) return;
const tuple_data = layout_cache.getTupleData(self.layout.data.tuple.idx);
if (tuple_data.fields.count == 0) return;
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.incref(layout_cache);
}
increfLayoutPtr(self.layout, self.ptr, layout_cache);
return;
}
// Handle tag unions by reading discriminant and incref'ing only the active variant's payload
@ -1342,17 +1462,11 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
const variant_layout = layout_cache.getLayout(variants.get(discriminant).payload_layout);
// Incref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
if (comptime trace_refcount) {
traceRefcount("INCREF tag_union disc={} variant_layout.tag={}", .{ discriminant, @intFromEnum(variant_layout.tag) });
}
payload_value.incref(layout_cache);
increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache);
return;
}
}
@ -1450,12 +1564,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
var idx: usize = 0;
while (idx < count) : (idx += 1) {
const elem_ptr = source + idx * element_width;
const elem_value = StackValue{
.layout = elem_layout,
.ptr = @ptrCast(elem_ptr),
.is_initialized = true,
};
elem_value.decref(layout_cache, ops);
decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops);
}
}
}
@ -1498,12 +1607,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
if (builtins.utils.rcUnique(refcount_ptr.*)) {
if (elem_layout.isRefcounted()) {
const payload_value = StackValue{
.layout = elem_layout,
.ptr = @ptrCast(@alignCast(payload_ptr)),
.is_initialized = true,
};
payload_value.decref(layout_cache, ops);
decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops);
}
}
@ -1523,26 +1627,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
});
}
const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var field_index: usize = 0;
while (field_index < field_layouts.len) : (field_index += 1) {
const field_info = field_layouts.get(field_index);
const field_layout = layout_cache.getLayout(field_info.layout);
const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index));
const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset));
const field_value = StackValue{
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
};
field_value.decref(layout_cache, ops);
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.box_of_zst => {
@ -1563,61 +1648,11 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
});
}
const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields());
const base_ptr = @as([*]u8, @ptrCast(self.ptr.?));
var elem_index: usize = 0;
while (elem_index < element_layouts.len) : (elem_index += 1) {
const elem_info = element_layouts.get(elem_index);
const elem_layout = layout_cache.getLayout(elem_info.layout);
const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index));
const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset));
const elem_value = StackValue{
.layout = elem_layout,
.ptr = elem_ptr,
.is_initialized = true,
};
elem_value.decref(layout_cache, ops);
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.closure => {
if (self.ptr == null) return;
// Get the closure header to find the captures layout
const closure = self.asClosure();
const captures_layout = layout_cache.getLayout(closure.captures_layout_idx);
// Only decref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (record_data.fields.count > 0) {
if (comptime trace_refcount) {
traceRefcount("DECREF closure ptr=0x{x} captures={}", .{
@intFromPtr(self.ptr),
record_data.fields.count,
});
}
// Calculate the offset to the captures record (after header, with alignment)
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
// Create a StackValue for the captures record and decref it
const captures_value = StackValue{
.layout = captures_layout,
.ptr = rec_ptr,
.is_initialized = true,
};
captures_value.decref(layout_cache, ops);
}
}
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
return;
},
.tag_union => {
@ -1649,13 +1684,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
}
// Decref only the active variant's payload (at offset 0)
const payload_value = StackValue{
.layout = variant_layout,
.ptr = @as(*anyopaque, @ptrCast(base_ptr)),
.is_initialized = true,
};
payload_value.decref(layout_cache, ops);
decrefLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache, ops);
return;
},
else => {},

View file

@ -54,10 +54,8 @@ fn comptimeRocAlloc(alloc_args: *RocAlloc, env: *anyopaque) callconv(.c) void {
alloc_args.answer = base_ptr;
}
fn comptimeRocDealloc(dealloc_args: *RocDealloc, env: *anyopaque) callconv(.c) void {
fn comptimeRocDealloc(_: *RocDealloc, _: *anyopaque) callconv(.c) void {
// No-op: arena allocator frees all memory at once when evaluation completes
_ = dealloc_args;
_ = env;
}
fn comptimeRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) void {
@ -93,8 +91,7 @@ fn comptimeRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) v
realloc_args.answer = new_ptr;
}
fn comptimeRocDbg(dbg_args: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = env;
fn comptimeRocDbg(dbg_args: *const RocDbg, _: *anyopaque) callconv(.c) void {
var stderr_buffer: [256]u8 = undefined;
var stderr_writer = std.fs.File.stderr().writer(&stderr_buffer);
const stderr = &stderr_writer.interface;
@ -351,16 +348,8 @@ pub const ComptimeEvaluator = struct {
// Convert StackValue to CIR expression based on layout
const layout = stack_value.layout;
// Get the runtime type variable from the StackValue first, or fall back to expression type
const rt_var: types_mod.Var = if (stack_value.rt_var) |sv_rt_var|
sv_rt_var
else blk: {
// Fall back to expression type variable
const ct_var = ModuleEnv.varFrom(def.expr);
break :blk self.interpreter.translateTypeVar(self.env, ct_var) catch {
return error.NotImplemented;
};
};
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
// Check if it's a tag union type
@ -474,7 +463,8 @@ pub const ComptimeEvaluator = struct {
// Get variant_var and ext_var
const variant_var: types_mod.Var = bool_rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -495,33 +485,33 @@ pub const ComptimeEvaluator = struct {
/// Fold a tag union (represented as scalar, like Bool) to an e_zero_argument_tag expression
fn foldTagUnionScalar(self: *ComptimeEvaluator, def_idx: CIR.Def.Idx, expr_idx: CIR.Expr.Idx, stack_value: eval_mod.StackValue) !void {
_ = def_idx; // unused now that we get rt_var from stack_value
// The value is the tag index directly (scalar integer)
// The value is the tag index directly (scalar integer).
// The caller already verified layout.tag == .scalar, and scalar tag unions are always ints.
std.debug.assert(stack_value.layout.tag == .scalar and stack_value.layout.data.scalar.tag == .int);
const tag_index: usize = @intCast(stack_value.asI128());
// Get the runtime type variable from the StackValue (already validated in tryFoldConstant)
const rt_var = stack_value.rt_var orelse return error.NotImplemented;
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
// Get the list of tags for this union type
var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator);
defer tag_list.deinit();
try self.interpreter.appendUnionTags(rt_var, &tag_list);
if (tag_index >= tag_list.items.len) {
return error.NotImplemented;
}
// Tag index from the value must be valid
std.debug.assert(tag_index < tag_list.items.len);
const tag_info = tag_list.items[tag_index];
const arg_vars = self.interpreter.runtime_types.sliceVars(tag_info.args);
// Only fold zero-argument tags (like True, False)
if (arg_vars.len != 0) {
return error.NotImplemented;
}
// Scalar tag unions don't have payloads, so arg_vars must be empty
std.debug.assert(arg_vars.len == 0);
// Get variant_var and ext_var from type information
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
const variant_var: types_mod.Var = rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -546,17 +536,18 @@ pub const ComptimeEvaluator = struct {
var acc = try stack_value.asTuple(&self.interpreter.runtime_layout_store);
// Element 1 is the tag discriminant - getElement takes original index directly
const tag_field = try acc.getElement(1);
const tag_elem_rt_var = try self.interpreter.runtime_types.fresh();
const tag_field = try acc.getElement(1, tag_elem_rt_var);
// Extract tag index
if (tag_field.layout.tag != .scalar or tag_field.layout.data.scalar.tag != .int) {
return error.NotImplemented;
}
const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true };
const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = tag_elem_rt_var };
const tag_index: usize = @intCast(tmp_sv.asI128());
// Get the runtime type variable from the StackValue (already validated in tryFoldConstant)
const rt_var = stack_value.rt_var orelse return error.NotImplemented;
// Get the runtime type variable from the StackValue
const rt_var = stack_value.rt_var;
// Get the list of tags for this union type
var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator);
@ -578,7 +569,8 @@ pub const ComptimeEvaluator = struct {
// Get variant_var and ext_var from type information
const resolved = self.interpreter.runtime_types.resolveVar(rt_var);
const variant_var: types_mod.Var = rt_var;
var ext_var: types_mod.Var = @enumFromInt(0);
// ext_var will be set if this is a tag_union type
var ext_var: types_mod.Var = undefined;
if (resolved.desc.content == .structure) {
if (resolved.desc.content.structure == .tag_union) {
@ -999,7 +991,8 @@ pub const ComptimeEvaluator = struct {
}
// Build is_negative Bool
const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0);
const bool_rt_var = try self.interpreter.getCanonicalBoolRuntimeVar();
const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0, bool_rt_var);
if (is_neg_value.ptr) |ptr| {
@as(*u8, @ptrCast(@alignCast(ptr))).* = @intFromBool(num_lit_info.is_negative);
}
@ -1014,7 +1007,7 @@ pub const ComptimeEvaluator = struct {
// Build the Numeral record
// Ownership of before_list and after_list is transferred to this record
const num_literal_record = try self.buildNumeralRecord(is_neg_value, before_list, after_list, roc_ops);
const num_literal_record = try self.buildNumeralRecord(is_neg_value, before_list, after_list);
defer num_literal_record.decref(&self.interpreter.runtime_layout_store, roc_ops);
// Evaluate the from_numeral function to get a closure
@ -1135,7 +1128,7 @@ pub const ComptimeEvaluator = struct {
try self.interpreter.bindings.append(.{
.pattern_idx = params[0],
.value = num_literal_record,
.expr_idx = @enumFromInt(0),
.expr_idx = null, // No source expression for synthetic binding
.source_env = origin_env,
});
defer _ = self.interpreter.bindings.pop();
@ -1195,7 +1188,8 @@ pub const ComptimeEvaluator = struct {
const list_layout_idx = try self.interpreter.runtime_layout_store.insertList(layout_mod.Idx.u8);
const list_layout = self.interpreter.runtime_layout_store.getLayout(list_layout_idx);
const dest = try self.interpreter.pushRaw(list_layout, 0);
// rt_var not needed for List(U8) construction - only layout matters
const dest = try self.interpreter.pushRaw(list_layout, 0, undefined);
if (dest.ptr == null) return dest;
const header: *builtins.list.RocList = @ptrCast(@alignCast(dest.ptr.?));
@ -1229,7 +1223,6 @@ pub const ComptimeEvaluator = struct {
is_negative: eval_mod.StackValue,
digits_before_pt: eval_mod.StackValue,
digits_after_pt: eval_mod.StackValue,
roc_ops: *RocOps,
) !eval_mod.StackValue {
// Use precomputed idents from self.env for field names
const field_layouts = [_]layout_mod.Layout{
@ -1246,18 +1239,19 @@ pub const ComptimeEvaluator = struct {
const record_layout_idx = try self.interpreter.runtime_layout_store.putRecord(self.env, &field_layouts, &field_names);
const record_layout = self.interpreter.runtime_layout_store.getLayout(record_layout_idx);
var dest = try self.interpreter.pushRaw(record_layout, 0);
// rt_var not needed for Numeral record construction - only layout matters
var dest = try self.interpreter.pushRaw(record_layout, 0, undefined);
var accessor = try dest.asRecord(&self.interpreter.runtime_layout_store);
// Use self.env for field lookups since the record was built with self.env's idents
const is_neg_idx = accessor.findFieldIndex(self.env.idents.is_negative) orelse return error.OutOfMemory;
try accessor.setFieldByIndex(is_neg_idx, is_negative, roc_ops);
try accessor.setFieldByIndex(is_neg_idx, is_negative);
const before_pt_idx = accessor.findFieldIndex(self.env.idents.digits_before_pt) orelse return error.OutOfMemory;
try accessor.setFieldByIndex(before_pt_idx, digits_before_pt, roc_ops);
try accessor.setFieldByIndex(before_pt_idx, digits_before_pt);
const after_pt_idx = accessor.findFieldIndex(self.env.idents.digits_after_pt) orelse return error.OutOfMemory;
try accessor.setFieldByIndex(after_pt_idx, digits_after_pt, roc_ops);
try accessor.setFieldByIndex(after_pt_idx, digits_after_pt);
return dest;
}
@ -1319,7 +1313,8 @@ pub const ComptimeEvaluator = struct {
// Use layout store's env for field lookups since records use that env's idents
const layout_env = self.interpreter.runtime_layout_store.env;
const tag_idx = accessor.findFieldIndex(layout_env.idents.tag) orelse return true;
const tag_field = accessor.getFieldByIndex(tag_idx) catch return true;
const tag_rt_var = self.interpreter.runtime_types.fresh() catch return true;
const tag_field = accessor.getFieldByIndex(tag_idx, tag_rt_var) catch return true;
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tag_value = tag_field.asI128();
@ -1347,7 +1342,8 @@ pub const ComptimeEvaluator = struct {
var accessor = result.asTuple(&self.interpreter.runtime_layout_store) catch return true;
// Element 1 is tag discriminant - getElement takes original index directly
const tag_field = accessor.getElement(1) catch return true;
const tag_elem_rt_var = self.interpreter.runtime_types.fresh() catch return true;
const tag_field = accessor.getElement(1, tag_elem_rt_var) catch return true;
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tag_value = tag_field.asI128();
@ -1390,9 +1386,8 @@ pub const ComptimeEvaluator = struct {
fn extractInvalidNumeralMessage(
self: *ComptimeEvaluator,
try_accessor: eval_mod.StackValue.RecordAccessor,
region: base.Region,
_: base.Region,
) ![]const u8 {
_ = region;
// Get the payload field from the Try record
// Use layout store's env for field lookups
@ -1401,7 +1396,10 @@ pub const ComptimeEvaluator = struct {
// This should never happen - Try type must have a payload field
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (missing payload field)", .{});
};
const payload_field = try_accessor.getFieldByIndex(payload_idx) catch {
const payload_rt_var = self.interpreter.runtime_types.fresh() catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not create rt_var)", .{});
};
const payload_field = try_accessor.getFieldByIndex(payload_idx, payload_rt_var) catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not access payload)", .{});
};
@ -1416,7 +1414,10 @@ pub const ComptimeEvaluator = struct {
// Check if this has a payload field (for the Str)
// Single-tag unions might not have a "tag" field, so we look for payload first
if (err_accessor.findFieldIndex(layout_env.idents.payload)) |err_payload_idx| {
const err_payload = err_accessor.getFieldByIndex(err_payload_idx) catch {
const err_payload_rt_var = self.interpreter.runtime_types.fresh() catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: could not create rt_var for InvalidNumeral payload", .{});
};
const err_payload = err_accessor.getFieldByIndex(err_payload_idx, err_payload_rt_var) catch {
return try std.fmt.allocPrint(self.allocator, "Internal error: could not access InvalidNumeral payload", .{});
};
return try self.extractStrFromValue(err_payload);
@ -1426,7 +1427,8 @@ pub const ComptimeEvaluator = struct {
// Iterate through fields looking for a Str
var field_idx: usize = 0;
while (true) : (field_idx += 1) {
const field = err_accessor.getFieldByIndex(field_idx) catch break;
const iter_field_rt_var = self.interpreter.runtime_types.fresh() catch break;
const field = err_accessor.getFieldByIndex(field_idx, iter_field_rt_var) catch break;
if (field.layout.tag == .scalar and field.layout.data.scalar.tag == .str) {
return try self.extractStrFromValue(field);
}

File diff suppressed because it is too large Load diff

View file

@ -130,7 +130,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
const count = tup_acc.getElementCount();
if (count > 0) {
// Get tag index from the last element
const tag_elem = try tup_acc.getElement(count - 1);
// rt_var not needed for tag discriminant access (it's always an integer)
const tag_elem = try tup_acc.getElement(count - 1, undefined);
if (tag_elem.layout.tag == .scalar and tag_elem.layout.data.scalar.tag == .int) {
if (std.math.cast(usize, tag_elem.asI128())) |tag_idx| {
tag_index = tag_idx;
@ -150,26 +151,28 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
if (arg_vars.len == 1) {
// Single payload: first element
// Get the correct layout from the type variable, not the payload union layout
const payload_elem = try tup_acc.getElement(0);
const arg_var = arg_vars[0];
const payload_elem = try tup_acc.getElement(0, arg_var);
const layout_idx = try ctx.layout_store.addTypeVar(arg_var, ctx.type_scope);
const arg_layout = ctx.layout_store.getLayout(layout_idx);
const payload_value = StackValue{
.layout = arg_layout,
.ptr = payload_elem.ptr,
.is_initialized = payload_elem.is_initialized,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
try out.appendSlice(rendered);
} else {
// Multiple payloads: first element is a nested tuple containing all payload args
const payload_elem = try tup_acc.getElement(0);
// rt_var undefined for tuple access (we have the individual element types)
const payload_elem = try tup_acc.getElement(0, undefined);
if (payload_elem.layout.tag == .tuple) {
var payload_tup = try payload_elem.asTuple(ctx.layout_store);
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const elem_value = try payload_tup.getElement(j);
const elem_value = try payload_tup.getElement(j, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -189,9 +192,10 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
} else if (value.layout.tag == .record) {
var acc = try value.asRecord(ctx.layout_store);
if (acc.findFieldIndex(ctx.env.idents.tag)) |idx| {
const tag_field = try acc.getFieldByIndex(idx);
const field_rt = try ctx.runtime_types.fresh();
const tag_field = try acc.getFieldByIndex(idx, field_rt);
if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) {
const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true };
const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = undefined };
// Only treat as tag if value fits in usize (valid tag discriminants are small)
if (std.math.cast(usize, tmp_sv.asI128())) |tag_idx| {
tag_index = tag_idx;
@ -205,7 +209,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
errdefer out.deinit();
try out.appendSlice(tag_name);
if (acc.findFieldIndex(ctx.env.idents.payload)) |pidx| {
const payload = try acc.getFieldByIndex(pidx);
const field_rt = try ctx.runtime_types.fresh();
const payload = try acc.getFieldByIndex(pidx, field_rt);
const args_range = tags.items(.args)[tag_index];
const arg_vars = ctx.runtime_types.sliceVars(toVarRange(args_range));
if (arg_vars.len > 0) {
@ -218,6 +223,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = arg_layout,
.ptr = payload.ptr,
.is_initialized = payload.is_initialized,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
@ -237,6 +243,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = tuple_layout,
.ptr = payload.ptr,
.is_initialized = payload.is_initialized,
.rt_var = undefined, // not needed - type known from layout
};
if (tuple_size == 0 or payload.ptr == null) {
var j: usize = 0;
@ -247,6 +254,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = elem_layouts[j],
.ptr = null,
.is_initialized = true,
.rt_var = arg_vars[j],
},
arg_vars[j],
);
@ -259,7 +267,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch;
const elem_value = try tup_acc.getElement(sorted_idx);
const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -308,6 +316,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = arg_layout,
.ptr = payload_ptr,
.is_initialized = true,
.rt_var = arg_var,
};
const rendered = try renderValueRocWithType(ctx, payload_value, arg_var);
defer gpa.free(rendered);
@ -333,6 +342,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = elem_layouts[j],
.ptr = null,
.is_initialized = true,
.rt_var = arg_vars[j],
},
arg_vars[j],
);
@ -345,12 +355,13 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = tuple_layout,
.ptr = payload_ptr,
.is_initialized = true,
.rt_var = undefined, // not needed - type known from layout
};
var tup_acc = try tuple_value.asTuple(ctx.layout_store);
var j: usize = 0;
while (j < arg_vars.len) : (j += 1) {
const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch;
const elem_value = try tup_acc.getElement(sorted_idx);
const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]);
const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -383,6 +394,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
.layout = payload_layout,
.ptr = null,
.is_initialized = true,
.rt_var = payload_var,
};
switch (value.layout.tag) {
@ -464,7 +476,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types.
const idx = acc.findFieldIndex(f.name) orelse {
std.debug.panic("Record field not found in layout: type says field '{s}' exists but layout doesn't have it", .{name_text});
};
const field_val = try acc.getFieldByIndex(idx);
const field_rt = try ctx.runtime_types.fresh();
const field_val = try acc.getFieldByIndex(idx, field_rt);
const rendered = try renderValueRocWithType(ctx, field_val, f.var_);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -537,7 +550,8 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
const count = acc.getElementCount();
var i: usize = 0;
while (i < count) : (i += 1) {
const elem = try acc.getElement(i);
// rt_var undefined (no type info available in this context)
const elem = try acc.getElement(i, undefined);
const rendered = try renderValueRoc(ctx, elem);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -560,7 +574,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
while (i < len) : (i += 1) {
if (roc_list.bytes) |bytes| {
const elem_ptr: *anyopaque = @ptrCast(bytes + i * elem_size);
const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true };
const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true, .rt_var = undefined };
const rendered = try renderValueRoc(ctx, elem_val);
defer gpa.free(rendered);
try out.appendSlice(rendered);
@ -601,7 +615,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 {
const field_layout = ctx.layout_store.getLayout(fld.layout);
const base_ptr: [*]u8 = @ptrCast(@alignCast(value.ptr.?));
const field_ptr: *anyopaque = @ptrCast(base_ptr + offset);
const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true };
const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true, .rt_var = undefined };
const rendered = try renderValueRoc(ctx, field_val);
defer gpa.free(rendered);
try out.appendSlice(rendered);

View file

@ -144,9 +144,7 @@ fn testRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) void
realloc_args.answer = @ptrFromInt(@intFromPtr(new_base_ptr) + size_storage_bytes);
}
fn testRocDbg(dbg_args: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = dbg_args;
_ = env;
fn testRocDbg(_: *const RocDbg, _: *anyopaque) callconv(.c) void {
@panic("testRocDbg not implemented yet");
}

View file

@ -50,7 +50,7 @@ fn parseCheckAndEvalModule(src: []const u8) !struct {
var builtin_module = try builtin_loading.loadCompiledModule(gpa, compiled_builtins.builtin_bin, "Builtin", builtin_source);
errdefer builtin_module.deinit();
try module_env.initCIRFields(gpa, "test");
try module_env.initCIRFields("test");
const builtin_ctx: Check.BuiltinContext = .{
.module_name = try module_env.insertIdent(base.Ident.for_text("test")),
.bool_stmt = builtin_indices.bool_type,

View file

@ -1,4 +1,5 @@
//! Tests for compile-time evaluation of top-level declarations
const std = @import("std");
const parse = @import("parse");
const types = @import("types");
@ -57,7 +58,7 @@ fn parseCheckAndEvalModuleWithName(src: []const u8, module_name: []const u8) !Ev
errdefer builtin_module.deinit();
// Initialize CIR fields in ModuleEnv
try module_env.initCIRFields(gpa, module_name);
try module_env.initCIRFields(module_name);
const builtin_ctx: Check.BuiltinContext = .{
.module_name = try module_env.insertIdent(base.Ident.for_text(module_name)),
.bool_stmt = builtin_indices.bool_type,
@ -136,7 +137,7 @@ fn parseCheckAndEvalModuleWithImport(src: []const u8, import_name: []const u8, i
errdefer builtin_module.deinit();
// Initialize CIR fields in ModuleEnv
try module_env.initCIRFields(gpa, "test");
try module_env.initCIRFields("test");
const builtin_ctx: Check.BuiltinContext = .{
.module_name = try module_env.insertIdent(base.Ident.for_text("test")),
.bool_stmt = builtin_indices.bool_type,
@ -1179,7 +1180,7 @@ test "comptime eval - U8 valid max value" {
var result = try parseCheckAndEvalModule(src);
defer cleanupEvalModule(&result);
const summary = try result.evaluator.evalAll();
_ = try result.evaluator.evalAll();
// Debug: print any problems
if (result.problems.len() > 0) {
std.debug.print("\nU8 valid max problems ({d}):\n", .{result.problems.len()});
@ -1191,8 +1192,6 @@ test "comptime eval - U8 valid max value" {
std.debug.print("\n", .{});
}
}
try testing.expectEqual(@as(u32, 1), summary.evaluated);
try testing.expectEqual(@as(u32, 0), summary.crashed);
try testing.expectEqual(@as(usize, 0), result.problems.len());
}
@ -1318,9 +1317,8 @@ test "comptime eval - U16 valid max value" {
var result = try parseCheckAndEvalModule(src);
defer cleanupEvalModule(&result);
const summary = try result.evaluator.evalAll();
_ = try result.evaluator.evalAll();
try testing.expectEqual(@as(usize, 0), result.problems.len());
_ = summary;
}
test "comptime eval - U16 too large with descriptive error" {

View file

@ -711,7 +711,7 @@ test "ModuleEnv serialization and interpreter evaluation" {
parse_ast.store.emptyScratch();
// Initialize CIR fields in ModuleEnv
try original_env.initCIRFields(gpa, "test");
try original_env.initCIRFields("test");
// Get Bool and Try statement indices from builtin module
const bool_stmt_in_builtin_module = builtin_indices.bool_type;
@ -1352,3 +1352,38 @@ test "nested match with Result type - regression" {
\\}
, .no_trace);
}
// ============================================================================
// Bug regression tests - segfault issues from bug reports
// ============================================================================
test "list equality - single element list - regression" {
// Regression test for segfault when comparing single element lists
// Bug report: `main! = || { _bool = [1] == [1] }`
try runExpectBool("[1] == [1]", true, .no_trace);
}
test "list equality - nested lists - regression" {
// Regression test for segfault when comparing nested lists
// Bug report: `_bool = [[1],[2]] == [[1],[2]]`
try runExpectBool("[[1],[2]] == [[1],[2]]", true, .no_trace);
}
test "list equality - single string element list - regression" {
// Regression test for crash trying to compare numeric scalars instead of string scalars
// Bug report: `main! = || { _bool = [""] == [""] }`
try runExpectBool("[\"\"] == [\"\"]", true, .no_trace);
}
test "if block with local bindings - regression" {
// Regression test for segfault in if block with local variable bindings
// Bug report: `main! = || { if True { x = 0 _y = x } }`
try runExpectInt(
\\if True {
\\ x = 0
\\ _y = x
\\ x
\\}
\\else 99
, 0, .no_trace);
}

View file

@ -331,7 +331,8 @@ pub fn runExpectTuple(src: []const u8, expected_elements: []const ExpectedElemen
for (expected_elements) |expected_element| {
// Get the element at the specified index
const element = try tuple_accessor.getElement(@intCast(expected_element.index));
// Use the result's rt_var since we're accessing elements of the evaluated expression
const element = try tuple_accessor.getElement(@intCast(expected_element.index), result.rt_var);
// Check if this is an integer or Dec
try std.testing.expect(element.layout.tag == .scalar);
@ -397,6 +398,7 @@ pub fn runExpectRecord(src: []const u8, expected_fields: []const ExpectedField,
.layout = field_layout,
.ptr = field_ptr,
.is_initialized = true,
.rt_var = result.rt_var, // use result's rt_var for field access
};
// Check if this is an integer or Dec
const int_val = if (field_layout.data.scalar.tag == .int) blk: {
@ -453,7 +455,8 @@ pub fn runExpectListI64(src: []const u8, expected_elements: []const i64, should_
try std.testing.expectEqual(expected_elements.len, list_accessor.len());
for (expected_elements, 0..) |expected_val, i| {
const element = try list_accessor.getElement(i);
// Use the result's rt_var since we're accessing elements of the evaluated expression
const element = try list_accessor.getElement(i, result.rt_var);
// Check if this is an integer
try std.testing.expect(element.layout.tag == .scalar);
@ -621,7 +624,7 @@ pub fn parseAndCanonicalizeExpr(allocator: std.mem.Allocator, source: []const u8
parse_ast.store.emptyScratch();
// Initialize CIR fields in ModuleEnv
try module_env.initCIRFields(allocator, "test");
try module_env.initCIRFields("test");
// Register Builtin as import so Bool, Try, and Str are available
_ = try module_env.imports.getOrPut(allocator, &module_env.common.strings, "Builtin");

View file

@ -52,7 +52,7 @@ fn parseCheckAndEvalModule(src: []const u8) !struct {
var builtin_module = try builtin_loading.loadCompiledModule(gpa, compiled_builtins.builtin_bin, "Builtin", builtin_source);
errdefer builtin_module.deinit();
try module_env.initCIRFields(gpa, "test");
try module_env.initCIRFields("test");
const builtin_ctx: Check.BuiltinContext = .{
.module_name = try module_env.insertIdent(base.Ident.for_text("test")),
.bool_stmt = builtin_indices.bool_type,

View file

@ -17,10 +17,10 @@ test "Stack.alloca basic allocation" {
var stack = try Stack.initCapacity(std.testing.allocator, 1024);
defer stack.deinit();
const ptr1 = try stack.alloca(10, @enumFromInt(0));
const ptr1 = try stack.alloca(10, .@"1");
try std.testing.expectEqual(@as(u32, 10), stack.used);
const ptr2 = try stack.alloca(20, @enumFromInt(0));
const ptr2 = try stack.alloca(20, .@"1");
try std.testing.expectEqual(@as(u32, 30), stack.used);
// The pointers should be different
@ -42,7 +42,7 @@ test "Stack.alloca with alignment" {
// Create initial misalignment
if (misalign > 0) {
_ = try stack.alloca(@intCast(misalign), @enumFromInt(0));
_ = try stack.alloca(@intCast(misalign), .@"1");
}
// Test each alignment with the current misalignment
@ -70,7 +70,7 @@ test "Stack.alloca with alignment" {
stack.used = 0;
for (alignments) |alignment| {
// Create some misalignment
_ = try stack.alloca(3, @enumFromInt(0));
_ = try stack.alloca(3, .@"1");
const before_used = stack.used;
const ptr = try stack.alloca(alignment * 2, @enumFromInt(std.math.log2_int(u32, alignment)));
@ -88,10 +88,10 @@ test "Stack.alloca overflow" {
defer stack.deinit();
// This should succeed
_ = try stack.alloca(50, @enumFromInt(0));
_ = try stack.alloca(50, .@"1");
// This should fail (would total 150 bytes)
try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, @enumFromInt(0)));
try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, .@"1"));
// Stack should still be in valid state
try std.testing.expectEqual(@as(u32, 50), stack.used);
@ -102,14 +102,14 @@ test "Stack.restore" {
defer stack.deinit();
const checkpoint = stack.next();
_ = try stack.alloca(100, @enumFromInt(0));
_ = try stack.alloca(100, .@"1");
try std.testing.expectEqual(@as(u32, 100), stack.used);
stack.restore(checkpoint);
try std.testing.expectEqual(@as(u32, 0), stack.used);
// Allocate again after restore
const ptr1 = try stack.alloca(50, @enumFromInt(0));
const ptr1 = try stack.alloca(50, .@"1");
try std.testing.expectEqual(@intFromPtr(checkpoint), @intFromPtr(ptr1));
}
@ -120,7 +120,7 @@ test "Stack.isEmpty" {
try std.testing.expect(stack.isEmpty());
try std.testing.expectEqual(@as(u32, 100), stack.available());
_ = try stack.alloca(30, @enumFromInt(0));
_ = try stack.alloca(30, .@"1");
try std.testing.expect(!stack.isEmpty());
try std.testing.expectEqual(@as(u32, 70), stack.available());
}
@ -129,8 +129,8 @@ test "Stack zero-size allocation" {
var stack = try Stack.initCapacity(std.testing.allocator, 100);
defer stack.deinit();
const ptr1 = try stack.alloca(0, @enumFromInt(0));
const ptr2 = try stack.alloca(0, @enumFromInt(0));
const ptr1 = try stack.alloca(0, .@"1");
const ptr2 = try stack.alloca(0, .@"1");
// Zero-size allocations should return the same pointer
try std.testing.expectEqual(@intFromPtr(ptr1), @intFromPtr(ptr2));
@ -147,8 +147,8 @@ test "Stack memory is aligned to max_roc_alignment" {
try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value);
// Also verify after some allocations
_ = try stack.alloca(100, @enumFromInt(0));
_ = try stack.alloca(200, @enumFromInt(0));
_ = try stack.alloca(100, .@"1");
_ = try stack.alloca(200, .@"1");
// The start pointer should still be aligned
try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value);

View file

@ -70,9 +70,7 @@ fn testRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) void
realloc_args.answer = @ptrFromInt(@intFromPtr(new_slice.ptr) + size_storage_bytes);
}
fn testRocDbg(dbg_args: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = dbg_args;
_ = env;
fn testRocDbg(_: *const RocDbg, _: *anyopaque) callconv(.c) void {
@panic("testRocDbg not implemented yet");
}

View file

@ -34,7 +34,7 @@
testcmd() {
zig build snapshot && zig build test
}
export -f testscmd
export -f testcmd
fmtcmd() {
zig build fmt

View file

@ -1265,6 +1265,17 @@ const Formatter = struct {
}
_ = try fmt.formatExpr(d.expr);
},
.inspect => |i| {
try fmt.pushAll("inspect");
const expr_node = fmt.nodeRegion(@intFromEnum(i.expr));
if (multiline and try fmt.flushCommentsBefore(expr_node.start)) {
fmt.curr_indent += 1;
try fmt.pushIndent();
} else {
try fmt.push(' ');
}
_ = try fmt.formatExpr(i.expr);
},
.block => |b| {
try fmt.formatBlock(b);
},
@ -1342,6 +1353,11 @@ const Formatter = struct {
region = i.region;
try fmt.formatIdent(i.ident_tok, null);
},
.var_ident => |i| {
region = i.region;
try fmt.pushAll("var ");
try fmt.formatIdent(i.ident_tok, null);
},
.tag => |t| {
region = t.region;

View file

@ -288,58 +288,43 @@ fn writeFileDefault(path: []const u8, contents: []const u8) WriteError!void {
// Testing implementations that fail tests if called
fn fileExistsTesting(absolute_path: []const u8) OpenError!bool {
_ = absolute_path;
fn fileExistsTesting(_: []const u8) OpenError!bool {
@panic("fileExists should not be called in this test");
}
fn readFileTesting(relative_path: []const u8, allocator: Allocator) ReadError![]const u8 {
_ = relative_path;
_ = allocator;
fn readFileTesting(_: []const u8, _: Allocator) ReadError![]const u8 {
@panic("readFile should not be called in this test");
}
fn readFileIntoTesting(path: []const u8, buffer: []u8) ReadError!usize {
_ = path;
_ = buffer;
fn readFileIntoTesting(_: []const u8, _: []u8) ReadError!usize {
@panic("readFileInto should not be called in this test");
}
fn writeFileTesting(path: []const u8, contents: []const u8) WriteError!void {
_ = path;
_ = contents;
fn writeFileTesting(_: []const u8, _: []const u8) WriteError!void {
@panic("writeFile should not be called in this test");
}
fn openDirTesting(absolute_path: []const u8) OpenError!Dir {
_ = absolute_path;
fn openDirTesting(_: []const u8) OpenError!Dir {
@panic("openDir should not be called in this test");
}
fn dirNameTesting(absolute_path: []const u8) ?[]const u8 {
_ = absolute_path;
fn dirNameTesting(_: []const u8) ?[]const u8 {
@panic("dirName should not be called in this test");
}
fn baseNameTesting(absolute_path: []const u8) ?[]const u8 {
_ = absolute_path;
fn baseNameTesting(_: []const u8) ?[]const u8 {
@panic("baseName should not be called in this test");
}
fn canonicalizeTesting(root_relative_path: []const u8, allocator: Allocator) CanonicalizeError![]const u8 {
_ = root_relative_path;
_ = allocator;
fn canonicalizeTesting(_: []const u8, _: Allocator) CanonicalizeError![]const u8 {
@panic("canonicalize should not be called in this test");
}
fn makePathTesting(path: []const u8) MakePathError!void {
_ = path;
fn makePathTesting(_: []const u8) MakePathError!void {
@panic("makePath should not be called in this test");
}
fn renameTesting(old_path: []const u8, new_path: []const u8) RenameError!void {
_ = old_path;
_ = new_path;
fn renameTesting(_: []const u8, _: []const u8) RenameError!void {
@panic("rename should not be called in this test");
}

View file

@ -217,8 +217,7 @@ pub fn allocator(self: *SharedMemoryAllocator) std.mem.Allocator {
};
}
fn alloc(ctx: *anyopaque, len: usize, ptr_align: std.mem.Alignment, ret_addr: usize) ?[*]u8 {
_ = ret_addr;
fn alloc(ctx: *anyopaque, len: usize, ptr_align: std.mem.Alignment, _: usize) ?[*]u8 {
const self: *SharedMemoryAllocator = @ptrCast(@alignCast(ctx));
const alignment = @as(usize, 1) << @intFromEnum(ptr_align);
@ -248,33 +247,18 @@ fn alloc(ctx: *anyopaque, len: usize, ptr_align: std.mem.Alignment, ret_addr: us
}
}
fn resize(ctx: *anyopaque, buf: []u8, buf_align: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
_ = ctx;
_ = buf_align;
_ = ret_addr;
fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool {
// Simple bump allocator doesn't support resize
// Could be implemented by checking if this is the last allocation
return new_len <= buf.len;
}
fn free(ctx: *anyopaque, buf: []u8, buf_align: std.mem.Alignment, ret_addr: usize) void {
_ = ctx;
_ = buf;
_ = buf_align;
_ = ret_addr;
fn free(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize) void {
// Simple bump allocator doesn't support free
// Memory is only freed when the entire region is unmapped
}
fn remap(ctx: *anyopaque, old_mem: []u8, old_align: std.mem.Alignment, new_size: usize, ret_addr: usize) ?[*]u8 {
_ = ctx;
_ = old_mem;
_ = old_align;
_ = new_size;
_ = ret_addr;
fn remap(_: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize, _: usize) ?[*]u8 {
// Simple bump allocator doesn't support remapping
return null;
}

View file

@ -100,9 +100,9 @@ fn readFdInfoFromFile(allocator: std.mem.Allocator) CoordinationError!FdInfo {
};
const dir_basename = std.fs.path.basename(exe_dir);
// Verify it has the expected prefix
if (!std.mem.startsWith(u8, dir_basename, "roc-tmp-")) {
std.log.err("Unexpected directory name: expected 'roc-tmp-*', got '{s}'", .{dir_basename});
// Verify it has the expected prefix (roc-{pid} or roc-{pid}-{suffix})
if (!std.mem.startsWith(u8, dir_basename, "roc-")) {
std.log.err("Unexpected directory name: expected 'roc-*', got '{s}'", .{dir_basename});
return error.FdInfoReadFailed;
}

View file

@ -1121,20 +1121,7 @@ pub const Store = struct {
current = self.types_store.resolveVar(last_pending_field.var_);
continue :outer;
},
.fn_pure => |func| {
_ = func;
// Create empty captures layout for generic function type
const empty_captures_idx = try self.getEmptyRecordLayout();
break :flat_type Layout.closure(empty_captures_idx);
},
.fn_effectful => |func| {
_ = func;
// Create empty captures layout for generic function type
const empty_captures_idx = try self.getEmptyRecordLayout();
break :flat_type Layout.closure(empty_captures_idx);
},
.fn_unbound => |func| {
_ = func;
.fn_pure, .fn_effectful, .fn_unbound => {
// Create empty captures layout for generic function type
const empty_captures_idx = try self.getEmptyRecordLayout();
break :flat_type Layout.closure(empty_captures_idx);
@ -1285,7 +1272,7 @@ pub const Store = struct {
// and append our variant layouts. This ensures our variants are contiguous.
const variants_start: u32 = @intCast(self.tag_union_variants.len());
for (variant_layout_indices, 0..) |variant_layout_idx, variant_i| {
for (variant_layout_indices) |variant_layout_idx| {
const variant_layout = self.getLayout(variant_layout_idx);
const variant_size = self.layoutSize(variant_layout);
const variant_alignment = variant_layout.alignment(self.targetUsize());
@ -1298,7 +1285,6 @@ pub const Store = struct {
_ = try self.tag_union_variants.append(self.env.gpa, .{
.payload_layout = variant_layout_idx,
});
_ = variant_i;
}
// Calculate discriminant info

View file

@ -190,7 +190,7 @@ pub const SyntaxChecker = struct {
};
}
fn rangeFromReport(self: *SyntaxChecker, rep: reporting.Report) Diagnostics.Range {
fn rangeFromReport(_: *SyntaxChecker, rep: reporting.Report) Diagnostics.Range {
var start = Diagnostics.Position{ .line = 0, .character = 0 };
var end = Diagnostics.Position{ .line = 0, .character = 0 };
@ -220,7 +220,6 @@ pub const SyntaxChecker = struct {
}
}
_ = self;
return .{ .start = start, .end = end };
}

View file

@ -1000,7 +1000,7 @@ pub const Statement = union(enum) {
try tree.pushStaticAtom("exposing");
const attrs2 = tree.beginNode();
for (ast.store.exposedItemSlice(import.exposes)) |e| {
try ast.store.getExposedItem(e).pushToSExprTree(gpa, env, ast, tree);
try ast.store.getExposedItem(e).pushToSExprTree(env, ast, tree);
}
try tree.endNode(exposed, attrs2);
}
@ -1242,6 +1242,11 @@ pub const Pattern = union(enum) {
ident_tok: Token.Idx,
region: TokenizedRegion,
},
/// A mutable variable binding in a pattern, e.g., `var $x` in `|var $x, y|`
var_ident: struct {
ident_tok: Token.Idx,
region: TokenizedRegion,
},
tag: struct {
tag_tok: Token.Idx,
args: Pattern.Span,
@ -1305,6 +1310,7 @@ pub const Pattern = union(enum) {
pub fn to_tokenized_region(self: @This()) TokenizedRegion {
return switch (self) {
.ident => |p| p.region,
.var_ident => |p| p.region,
.tag => |p| p.region,
.int => |p| p.region,
.frac => |p| p.region,
@ -1339,6 +1345,21 @@ pub const Pattern = union(enum) {
try tree.endNode(begin, attrs);
},
.var_ident => |ident| {
const begin = tree.beginNode();
try tree.pushStaticAtom("p-var-ident");
try ast.appendRegionInfoToSexprTree(env, tree, ident.region);
// Add raw attribute
const raw_begin = tree.beginNode();
try tree.pushStaticAtom("raw");
try tree.pushString(ast.resolve(ident.ident_tok));
const attrs2 = tree.beginNode();
try tree.endNode(raw_begin, attrs2);
const attrs = tree.beginNode();
try tree.endNode(begin, attrs);
},
.tag => |tag| {
const begin = tree.beginNode();
try tree.pushStaticAtom("p-tag");
@ -1641,7 +1662,7 @@ pub const Header = union(enum) {
// Could push region info for provides_coll here if desired
for (provides_items) |item_idx| {
const item = ast.store.getExposedItem(item_idx);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(provides_begin, attrs2);
@ -1677,7 +1698,7 @@ pub const Header = union(enum) {
const attrs2 = tree.beginNode();
for (ast.store.exposedItemSlice(.{ .span = exposes.span })) |exposed| {
const item = ast.store.getExposedItem(exposed);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(exposes_begin, attrs2);
@ -1697,7 +1718,7 @@ pub const Header = union(enum) {
const attrs2 = tree.beginNode();
for (ast.store.exposedItemSlice(.{ .span = exposes.span })) |exposed| {
const item = ast.store.getExposedItem(exposed);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(exposes_begin, attrs2);
@ -1732,7 +1753,7 @@ pub const Header = union(enum) {
// Could push region info for rigids here if desired
for (ast.store.exposedItemSlice(.{ .span = rigids.span })) |exposed| {
const item = ast.store.getExposedItem(exposed);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(rigids_begin, attrs3);
@ -1748,7 +1769,7 @@ pub const Header = union(enum) {
const attrs4 = tree.beginNode();
for (ast.store.exposedItemSlice(.{ .span = exposes.span })) |exposed| {
const item = ast.store.getExposedItem(exposed);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(exposes_begin, attrs4);
@ -1793,7 +1814,7 @@ pub const Header = union(enum) {
const attrs2 = tree.beginNode();
for (ast.store.exposedItemSlice(.{ .span = exposes.span })) |exposed| {
const item = ast.store.getExposedItem(exposed);
try item.pushToSExprTree(gpa, env, ast, tree);
try item.pushToSExprTree(env, ast, tree);
}
try tree.endNode(exposes_begin, attrs2);
@ -1866,9 +1887,7 @@ pub const ExposedItem = union(enum) {
pub const Idx = enum(u32) { _ };
pub const Span = struct { span: base.DataSpan };
pub fn pushToSExprTree(self: @This(), gpa: std.mem.Allocator, env: *const CommonEnv, ast: *const AST, tree: *SExprTree) std.mem.Allocator.Error!void {
_ = gpa;
pub fn pushToSExprTree(self: @This(), env: *const CommonEnv, ast: *const AST, tree: *SExprTree) std.mem.Allocator.Error!void {
switch (self) {
.lower_ident => |i| {
const begin = tree.beginNode();

View file

@ -250,6 +250,10 @@ pub const Tag = enum {
/// * lhs - LHS DESCRIPTION
/// * rhs - RHS DESCRIPTION
ident_patt,
/// Mutable variable binding in pattern
/// Example: `var $x` in `|var $x, y|`
/// * main_token - the identifier token
var_ident_patt,
/// DESCRIPTION
/// Example: EXAMPLE
/// * lhs - LHS DESCRIPTION

View file

@ -21,6 +21,9 @@ const sexpr = base.sexpr;
/// packing optional data into u32 fields where 0 would otherwise be ambiguous.
const OPTIONAL_VALUE_OFFSET: u32 = 1;
/// The root node is always stored at index 0 in the node list.
pub const root_node_idx: Node.List.Idx = .first;
const NodeStore = @This();
gpa: std.mem.Allocator,
@ -46,7 +49,7 @@ pub const AST_HEADER_NODE_COUNT = 6;
/// Count of the statement nodes in the AST
pub const AST_STATEMENT_NODE_COUNT = 13;
/// Count of the pattern nodes in the AST
pub const AST_PATTERN_NODE_COUNT = 14;
pub const AST_PATTERN_NODE_COUNT = 15;
/// Count of the type annotation nodes in the AST
pub const AST_TYPE_ANNO_NODE_COUNT = 10;
/// Count of the expression nodes in the AST
@ -166,7 +169,7 @@ pub fn addMalformed(store: *NodeStore, comptime T: type, reason: Diagnostic.Tag,
/// Adds a file node to the store.
pub fn addFile(store: *NodeStore, file: AST.File) std.mem.Allocator.Error!void {
try store.extra_data.append(store.gpa, @intFromEnum(file.header));
store.nodes.set(@enumFromInt(0), .{
store.nodes.set(root_node_idx, .{
.tag = .root,
.main_token = 0,
.data = .{ .lhs = file.statements.span.start, .rhs = file.statements.span.len },
@ -478,6 +481,11 @@ pub fn addPattern(store: *NodeStore, pattern: AST.Pattern) std.mem.Allocator.Err
node.region = i.region;
node.main_token = i.ident_tok;
},
.var_ident => |i| {
node.tag = .var_ident_patt;
node.region = i.region;
node.main_token = i.ident_tok;
},
.tag => |t| {
const data_start = @as(u32, @intCast(store.extra_data.items.len));
try store.extra_data.append(store.gpa, t.args.span.len);
@ -1014,7 +1022,7 @@ pub fn addTypeAnno(store: *NodeStore, anno: AST.TypeAnno) std.mem.Allocator.Erro
/// TODO
pub fn getFile(store: *const NodeStore) AST.File {
const node = store.nodes.get(@enumFromInt(0));
const node = store.nodes.get(root_node_idx);
const header_ed_idx = @as(usize, @intCast(node.data.lhs + node.data.rhs));
const header = store.extra_data.items[header_ed_idx];
return .{
@ -1387,6 +1395,12 @@ pub fn getPattern(store: *const NodeStore, pattern_idx: AST.Pattern.Idx) AST.Pat
.region = node.region,
} };
},
.var_ident_patt => {
return .{ .var_ident = .{
.ident_tok = node.main_token,
.region = node.region,
} };
},
.tag_patt => {
const args_start = node.data.lhs;

View file

@ -197,7 +197,7 @@ pub fn parseFile(self: *Parser) Error!void {
self.store.emptyScratch();
try self.store.addFile(.{
.header = @as(AST.Header.Idx, @enumFromInt(0)),
.header = undefined, // overwritten below after parseHeader()
.statements = AST.Statement.Span{ .span = base.DataSpan.empty() },
.region = AST.TokenizedRegion.empty(),
});
@ -1452,6 +1452,19 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) Error!AST.Pattern
.region = .{ .start = start, .end = self.pos },
} });
},
.KwVar => {
// Mutable variable binding in pattern, e.g., `var $x`
self.advance();
if (self.peek() != .LowerIdent) {
return try self.pushMalformed(AST.Pattern.Idx, .var_must_have_ident, self.pos);
}
const ident_tok = self.pos;
self.advance();
pattern = try self.store.addPattern(.{ .var_ident = .{
.ident_tok = ident_tok,
.region = .{ .start = start, .end = self.pos },
} });
},
.NamedUnderscore => {
self.advance();
pattern = try self.store.addPattern(.{ .ident = .{
@ -2069,9 +2082,6 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) Error!AST.Expr.Idx {
},
}
lookahead_pos += 1;
// Limit lookahead to prevent infinite loops
if (lookahead_pos > saved_pos + 100) break;
}
}

View file

@ -40,7 +40,7 @@ fn runParse(env: *CommonEnv, gpa: std.mem.Allocator, parserCall: *const fn (*Par
const msg_slice = messages[0..];
var tokenizer = try tokenize.Tokenizer.init(env, gpa, env.source, msg_slice);
try tokenizer.tokenize(gpa);
var result = tokenizer.finishAndDeinit(gpa);
var result = tokenizer.finishAndDeinit();
var parser = try Parser.init(result.tokens, gpa);
defer parser.deinit();

View file

@ -281,6 +281,12 @@ test "NodeStore round trip - Pattern" {
.region = rand_region(),
},
});
try patterns.append(gpa, AST.Pattern{
.var_ident = .{
.ident_tok = rand_token_idx(),
.region = rand_region(),
},
});
try patterns.append(gpa, AST.Pattern{
.tag = .{
.args = AST.Pattern.Span{ .span = rand_span() },

View file

@ -1109,7 +1109,7 @@ pub const Tokenizer = struct {
self.string_interpolation_stack.deinit();
}
pub fn finishAndDeinit(self: *Tokenizer, _: std.mem.Allocator) TokenOutput {
pub fn finishAndDeinit(self: *Tokenizer) TokenOutput {
self.string_interpolation_stack.deinit();
const actual_message_count = @min(self.cursor.message_count, self.cursor.messages.len);
return .{
@ -1252,7 +1252,7 @@ pub const Tokenizer = struct {
} else {
self.cursor.pos += 1;
// Look at what follows the minus to determine if it's unary
const tokenType: Token.Tag = if (self.canFollowUnaryMinus(n)) .OpUnaryMinus else .OpBinaryMinus;
const tokenType: Token.Tag = if (canFollowUnaryMinus(n)) .OpUnaryMinus else .OpBinaryMinus;
try self.pushTokenNormalHere(gpa, tokenType, start);
}
} else {
@ -1569,8 +1569,7 @@ pub const Tokenizer = struct {
}
/// Determines if a character can follow a unary minus (i.e., can start an expression)
fn canFollowUnaryMinus(self: *const Tokenizer, c: u8) bool {
_ = self;
fn canFollowUnaryMinus(c: u8) bool {
return switch (c) {
// Identifiers
'a'...'z', 'A'...'Z', '_' => true,
@ -1684,7 +1683,7 @@ pub fn checkTokenizerInvariants(gpa: std.mem.Allocator, input: []const u8, debug
var messages: [32]Diagnostic = undefined;
var tokenizer = try Tokenizer.init(&env, gpa, input, &messages);
try tokenizer.tokenize(gpa);
var output = tokenizer.finishAndDeinit(gpa);
var output = tokenizer.finishAndDeinit();
defer output.tokens.deinit(gpa);
if (debug) {
@ -1719,7 +1718,7 @@ pub fn checkTokenizerInvariants(gpa: std.mem.Allocator, input: []const u8, debug
// Second tokenization.
tokenizer = try Tokenizer.init(&env, gpa, buf2.items, &messages);
try tokenizer.tokenize(gpa);
var output2 = tokenizer.finishAndDeinit(gpa);
var output2 = tokenizer.finishAndDeinit();
defer output2.tokens.deinit(gpa);
if (debug) {

View file

@ -94,15 +94,12 @@ fn readFileIntoWasm(path: []const u8, buffer: []u8) Filesystem.ReadError!usize {
return error.FileNotFound;
}
fn writeFileWasm(path: []const u8, contents: []const u8) Filesystem.WriteError!void {
_ = path;
_ = contents;
fn writeFileWasm(_: []const u8, _: []const u8) Filesystem.WriteError!void {
// Writing files is not supported in WASM playground
return error.AccessDenied;
}
fn openDirWasm(absolute_path: []const u8) Filesystem.OpenError!Filesystem.Dir {
_ = absolute_path;
fn openDirWasm(_: []const u8) Filesystem.OpenError!Filesystem.Dir {
// Directory operations are not supported in WASM playground
return error.FileNotFound;
}
@ -131,15 +128,12 @@ fn canonicalizeWasm(root_relative_path: []const u8, allocator: Allocator) Filesy
return allocator.dupe(u8, root_relative_path) catch handleOom();
}
fn makePathWasm(path: []const u8) Filesystem.MakePathError!void {
_ = path;
fn makePathWasm(_: []const u8) Filesystem.MakePathError!void {
// Directory creation is not supported in WASM playground
return error.AccessDenied;
}
fn renameWasm(old_path: []const u8, new_path: []const u8) Filesystem.RenameError!void {
_ = old_path;
_ = new_path;
fn renameWasm(_: []const u8, _: []const u8) Filesystem.RenameError!void {
// File operations are not supported in WASM playground
return error.AccessDenied;
}

View file

@ -442,9 +442,8 @@ fn wasmRocRealloc(realloc_args: *builtins.host_abi.RocRealloc, _: *anyopaque) ca
}
}
fn wasmRocDbg(dbg_args: *const builtins.host_abi.RocDbg, _: *anyopaque) callconv(.c) void {
fn wasmRocDbg(_: *const builtins.host_abi.RocDbg, _: *anyopaque) callconv(.c) void {
// No-op in WASM playground
_ = dbg_args;
}
fn wasmRocExpectFailed(expect_failed_args: *const builtins.host_abi.RocExpectFailed, env: *anyopaque) callconv(.c) void {
@ -934,7 +933,7 @@ fn compileSource(source: []const u8) !CompilerStageData {
// Stage 2: Canonicalization (always run, even with parse errors)
// The canonicalizer handles malformed parse nodes and continues processing
const env = result.module_env;
try env.initCIRFields(allocator, "main");
try env.initCIRFields("main");
// Load builtin modules and inject Bool and Result type declarations
// (following the pattern from eval.zig and TestEnv.zig)
@ -1183,8 +1182,7 @@ const ResponseWriter = struct {
return result;
}
fn drain(w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize {
_ = splat;
fn drain(w: *std.Io.Writer, data: []const []const u8, _: usize) std.Io.Writer.Error!usize {
const self: *Self = @alignCast(@fieldParentPtr("interface", w));
var total: usize = 0;
for (data) |bytes| {
@ -2078,10 +2076,9 @@ fn writeUnbundleErrorResponse(response: []u8, err: unbundle.UnbundleError) u8 {
error.OutOfMemory => "Out of memory",
};
const json = std.fmt.bufPrint(response, "{{\"success\":false,\"error\":\"{s}\"}}", .{error_msg}) catch {
_ = std.fmt.bufPrint(response, "{{\"success\":false,\"error\":\"{s}\"}}", .{error_msg}) catch {
return 1; // Response buffer too small
};
_ = json;
return 2; // Unbundle error
}

View file

@ -547,7 +547,7 @@ pub const Repl = struct {
// Create CIR
const cir = module_env; // CIR is now just ModuleEnv
try cir.initCIRFields(self.allocator, "repl");
try cir.initCIRFields("repl");
// Get Bool, Try, and Str statement indices from the IMPORTED modules (not copied!)
// These refer to the actual statements in the Builtin module
@ -749,7 +749,7 @@ pub const Repl = struct {
// Create CIR
const cir = module_env;
try cir.initCIRFields(self.allocator, "repl");
try cir.initCIRFields("repl");
// Populate all auto-imported builtin types using the shared helper to keep behavior consistent
var module_envs_map = std.AutoHashMap(base.Ident.Idx, can.Can.AutoImportedType).init(self.allocator);
@ -855,16 +855,7 @@ pub const Repl = struct {
try self.generateAndStoreDebugHtml(module_env, final_expr_idx);
}
const output = blk: {
if (result.rt_var) |rt_var| {
break :blk try interpreter.renderValueRocWithType(result, rt_var, self.roc_ops);
}
const expr_ct_var = can.ModuleEnv.varFrom(final_expr_idx);
const expr_rt_var = interpreter.translateTypeVar(module_env, expr_ct_var) catch {
break :blk try interpreter.renderValueRoc(result);
};
break :blk try interpreter.renderValueRocWithType(result, expr_rt_var, self.roc_ops);
};
const output = try interpreter.renderValueRocWithType(result, result.rt_var, self.roc_ops);
result.decref(&interpreter.runtime_layout_store, self.roc_ops);
return .{ .expression = output };

View file

@ -309,7 +309,7 @@ test "Repl - minimal interpreter integration" {
// Step 3: Create CIR
const cir = &module_env; // CIR is now just ModuleEnv
try cir.initCIRFields(gpa, "test");
try cir.initCIRFields("test");
// Get Bool, Try, and Str statement indices from the builtin module
const bool_stmt_in_builtin_module = builtin_indices.bool_type;

View file

@ -132,9 +132,7 @@ fn testRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) void
realloc_args.answer = @ptrFromInt(@intFromPtr(new_slice.ptr) + size_storage_bytes);
}
fn testRocDbg(dbg_args: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = dbg_args;
_ = env;
fn testRocDbg(_: *const RocDbg, _: *anyopaque) callconv(.c) void {
@panic("testRocDbg not implemented yet");
}

View file

@ -1129,7 +1129,7 @@ fn processSnapshotContent(
basename;
};
var can_ir = &module_env; // ModuleEnv contains the canonical IR
try can_ir.initCIRFields(allocator, module_name);
try can_ir.initCIRFields(module_name);
const builtin_ctx: Check.BuiltinContext = .{
.module_name = try can_ir.insertIdent(base.Ident.for_text(module_name)),
@ -2918,8 +2918,7 @@ fn generateReplOutputSection(output: *DualOutput, snapshot_path: []const u8, con
return success;
}
fn generateReplProblemsSection(output: *DualOutput, content: *const Content) !void {
_ = content;
fn generateReplProblemsSection(output: *DualOutput, _: *const Content) !void {
try output.begin_section("PROBLEMS");
try output.md_writer.writer.writeAll("NIL\n");
@ -3151,9 +3150,7 @@ fn snapshotRocRealloc(realloc_args: *RocRealloc, env: *anyopaque) callconv(.c) v
realloc_args.answer = @ptrFromInt(@intFromPtr(new_slice.ptr) + size_storage_bytes);
}
fn snapshotRocDbg(dbg_args: *const RocDbg, env: *anyopaque) callconv(.c) void {
_ = dbg_args;
_ = env;
fn snapshotRocDbg(_: *const RocDbg, _: *anyopaque) callconv(.c) void {
@panic("snapshotRocDbg not implemented yet");
}

View file

@ -9,6 +9,7 @@ const std = @import("std");
const base = @import("base");
const types_mod = @import("types.zig");
const import_mapping_mod = @import("import_mapping.zig");
const debug = @import("debug.zig");
const TypesStore = @import("store.zig").Store;
const Allocator = std.mem.Allocator;
@ -610,7 +611,9 @@ fn gatherRecordFields(self: *TypeWriter, fields: RecordField.SafeMultiList.Range
}
var ext = initial_ext;
var guard = debug.IterationGuard.init("TypeWriter.gatherRecordFields");
while (true) {
guard.tick();
const resolved = self.types.resolveVar(ext);
switch (resolved.desc.content) {
.flex => |flex| {

70
src/types/debug.zig Normal file
View file

@ -0,0 +1,70 @@
//! Debug utilities for type checking
//!
//! These utilities are only active in debug builds and help catch infinite loops
//! in type-checking code by limiting the number of iterations.
const std = @import("std");
const builtin = @import("builtin");
/// Maximum number of iterations before panicking in debug builds.
/// This is set high enough to handle legitimate complex types but low enough
/// to catch infinite loops quickly during development.
pub const MAX_ITERATIONS: u32 = 100_000;
/// A debug-only iteration guard that panics if a loop exceeds MAX_ITERATIONS.
/// In release builds, this is a no-op.
///
/// Usage:
/// ```
/// var guard = IterationGuard.init("myFunction");
/// while (condition) {
/// guard.tick();
/// // ... loop body
/// }
/// ```
pub const IterationGuard = struct {
count: u32,
location: []const u8,
const Self = @This();
pub fn init(location: []const u8) Self {
return .{
.count = 0,
.location = location,
};
}
/// Call this at the start of each loop iteration.
/// In debug builds, panics if MAX_ITERATIONS is exceeded.
/// In release builds, this is a no-op that should be optimized away.
pub inline fn tick(self: *Self) void {
if (builtin.mode == .Debug) {
self.count += 1;
if (self.count > MAX_ITERATIONS) {
std.debug.panic(
"Infinite loop detected in type-checking at '{s}' after {d} iterations. " ++
"This usually indicates a cyclic type or bug in the type checker.",
.{ self.location, self.count },
);
}
}
}
/// Returns the current iteration count (useful for debugging).
pub fn getCount(self: *const Self) u32 {
return self.count;
}
};
test "IterationGuard does not panic for normal iteration counts" {
var guard = IterationGuard.init("test");
var i: u32 = 0;
while (i < 1000) : (i += 1) {
guard.tick();
}
// In release builds, tick() is a no-op so count stays at 0.
// In debug builds, count should be 1000.
const expected: u32 = if (builtin.mode == .Debug) 1000 else 0;
try std.testing.expectEqual(expected, guard.getCount());
}

View file

@ -205,12 +205,18 @@ pub const Generalizer = struct {
if (@intFromEnum(resolved.desc.rank) < rank_to_generalize_int) {
// Rank was lowered during adjustment - variable escaped
try var_pool.addVarToRank(resolved.var_, resolved.desc.rank);
} else if (self.hasNumeralConstraint(resolved.desc.content)) {
// Flex var with numeric constraint - don't generalize.
} else if (rank_to_generalize_int == @intFromEnum(Rank.top_level) and self.hasNumeralConstraint(resolved.desc.content)) {
// Flex var with numeric constraint at TOP LEVEL - don't generalize.
// This ensures numeric literals like `x = 15` stay monomorphic so that
// later usage like `I64.to_str(x)` can constrain x to I64.
// Without this, let-generalization would create a fresh copy at each use,
// leaving the original as an unconstrained flex var that defaults to Dec.
//
// However, at rank > top_level (inside lambdas OR inside nested blocks),
// we DO generalize numeric literals. This allows:
// - Polymorphic functions like `|a| a + 1` to work correctly
// - Numeric literals in blocks like `{ n = 42; use_as_i64(n); use_as_dec(n) }`
// to be used polymorphically within that block's scope.
try var_pool.addVarToRank(resolved.var_, resolved.desc.rank);
} else {
// Rank unchanged - safe to generalize

View file

@ -12,6 +12,7 @@ pub const store = @import("store.zig");
pub const instantiate = @import("instantiate.zig");
pub const generalize = @import("generalize.zig");
pub const import_mapping = @import("import_mapping.zig");
pub const debug = @import("debug.zig");
pub const TypeWriter = @import("TypeWriter.zig");

View file

@ -7,6 +7,7 @@ const collections = @import("collections");
const serialization = @import("serialization");
const types = @import("types.zig");
const debug = @import("debug.zig");
const Allocator = std.mem.Allocator;
const Desc = types.Descriptor;
@ -50,8 +51,7 @@ pub const Slot = union(enum) {
redirect: Var,
/// Calculate the size needed to serialize this Slot
pub fn serializedSize(self: *const Slot) usize {
_ = self;
pub fn serializedSize(_: *const Slot) usize {
return @sizeOf(u8) + @sizeOf(u32); // tag + data
}
@ -589,7 +589,9 @@ pub const Store = struct {
if (initial_var != redirected_root_var) {
var compressed_slot_idx = Self.varToSlotIdx(initial_var);
var compressed_slot: Slot = self.slots.get(compressed_slot_idx);
var guard = debug.IterationGuard.init("resolveVarAndCompressPath");
while (true) {
guard.tick();
switch (compressed_slot) {
.redirect => |next_redirect_var| {
self.slots.set(compressed_slot_idx, Slot{ .redirect = redirected_root_var });
@ -611,8 +613,10 @@ pub const Store = struct {
var redirected_slot: Slot = self.slots.get(redirected_slot_idx);
var is_root = true;
var guard = debug.IterationGuard.init("resolveVar");
while (true) {
guard.tick();
switch (redirected_slot) {
.redirect => |next_redirect_var| {
redirected_slot_idx = Self.varToSlotIdx(next_redirect_var);
@ -1007,7 +1011,10 @@ const SlotStore = struct {
}
/// A type-safe index into the store
const Idx = enum(u32) { _ };
const Idx = enum(u32) {
first = 0,
_,
};
};
/// Represents a store of descriptors
@ -1110,7 +1117,10 @@ const DescStore = struct {
/// A type-safe index into the store
/// This type is made public below
const Idx = enum(u32) { _ };
const Idx = enum(u32) {
first = 0,
_,
};
};
/// An index into the desc store
@ -1386,13 +1396,27 @@ test "SlotStore.Serialized roundtrip" {
const gpa = std.testing.allocator;
const CompactWriter = collections.CompactWriter;
// Use a real Store to get real Var and DescStore.Idx values
var store = try Store.init(gpa);
defer store.deinit();
// Create real type variables - fresh() creates a flex var with a root slot
const var_a = try store.fresh();
const var_b = try store.fresh();
const var_c = try store.fresh();
// Get the DescStore.Idx from the root slots
const desc_idx_a = store.getSlot(var_a).root;
const desc_idx_c = store.getSlot(var_c).root;
// Create a separate SlotStore for serialization testing
var slot_store = try SlotStore.init(gpa, 4);
defer slot_store.deinit(gpa);
// Add some slots
_ = try slot_store.insert(gpa, .{ .root = @enumFromInt(100) });
_ = try slot_store.insert(gpa, .{ .redirect = @enumFromInt(0) });
_ = try slot_store.insert(gpa, .{ .root = @enumFromInt(200) });
// Add slots and capture returned indices
const slot_a = try slot_store.insert(gpa, .{ .root = desc_idx_a });
const slot_b = try slot_store.insert(gpa, .{ .redirect = var_b });
const slot_c = try slot_store.insert(gpa, .{ .root = desc_idx_c });
// Create temp file
var tmp_dir = std.testing.tmpDir(.{});
@ -1425,11 +1449,11 @@ test "SlotStore.Serialized roundtrip" {
const deser_ptr = @as(*SlotStore.Serialized, @ptrCast(@alignCast(buffer.ptr)));
const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Verify
// Verify using captured indices
try std.testing.expectEqual(@as(u64, 3), deserialized.backing.len());
try std.testing.expectEqual(Slot{ .root = @enumFromInt(100) }, deserialized.get(@enumFromInt(0)));
try std.testing.expectEqual(Slot{ .redirect = @enumFromInt(0) }, deserialized.get(@enumFromInt(1)));
try std.testing.expectEqual(Slot{ .root = @enumFromInt(200) }, deserialized.get(@enumFromInt(2)));
try std.testing.expectEqual(Slot{ .root = desc_idx_a }, deserialized.get(slot_a));
try std.testing.expectEqual(Slot{ .redirect = var_b }, deserialized.get(slot_b));
try std.testing.expectEqual(Slot{ .root = desc_idx_c }, deserialized.get(slot_c));
}
test "DescStore.Serialized roundtrip" {
@ -1439,7 +1463,7 @@ test "DescStore.Serialized roundtrip" {
var desc_store = try DescStore.init(gpa, 4);
defer desc_store.deinit(gpa);
// Add some descriptors
// Add some descriptors and capture returned indices
const desc1 = Descriptor{
.content = Content{ .flex = Flex.init() },
.rank = Rank.generalized,
@ -1451,8 +1475,8 @@ test "DescStore.Serialized roundtrip" {
.mark = Mark.visited,
};
_ = try desc_store.insert(gpa, desc1);
_ = try desc_store.insert(gpa, desc2);
const desc_idx_1 = try desc_store.insert(gpa, desc1);
const desc_idx_2 = try desc_store.insert(gpa, desc2);
// Create temp file
var tmp_dir = std.testing.tmpDir(.{});
@ -1490,10 +1514,10 @@ test "DescStore.Serialized roundtrip" {
const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))));
// Note: deserialize already handles relocation, don't call relocate again
// Verify
// Verify using captured indices
try std.testing.expectEqual(@as(usize, 2), deserialized.backing.items.len);
try std.testing.expectEqual(desc1, deserialized.get(@enumFromInt(0)));
try std.testing.expectEqual(desc2, deserialized.get(@enumFromInt(1)));
try std.testing.expectEqual(desc1, deserialized.get(desc_idx_1));
try std.testing.expectEqual(desc2, deserialized.get(desc_idx_2));
}
test "Store.Serialized roundtrip" {

View file

@ -143,7 +143,7 @@ test "BufferExtractWriter - basic functionality" {
// Create a file
const file_writer = try writer.extractWriter().createFile("test.txt");
try file_writer.writeAll("Hello, World!");
writer.extractWriter().finishFile(file_writer);
writer.extractWriter().finishFile();
// Create a directory (should be no-op for buffer writer)
try writer.extractWriter().makeDir("test_dir");
@ -151,7 +151,7 @@ test "BufferExtractWriter - basic functionality" {
// Create another file in a subdirectory
const file_writer2 = try writer.extractWriter().createFile("subdir/test2.txt");
try file_writer2.writeAll("Second file");
writer.extractWriter().finishFile(file_writer2);
writer.extractWriter().finishFile();
// Verify files were stored
try testing.expectEqual(@as(usize, 2), writer.files.count());
@ -185,7 +185,7 @@ test "DirExtractWriter - basic functionality" {
// Create a file
const file_writer = try writer.extractWriter().createFile("test.txt");
try file_writer.writeAll("Test content");
writer.extractWriter().finishFile(file_writer);
writer.extractWriter().finishFile();
// Verify file was created
const content = try tmp.dir.readFileAlloc(testing.allocator, "test.txt", 1024);
@ -195,7 +195,7 @@ test "DirExtractWriter - basic functionality" {
// Create a file in a subdirectory (should create parent dirs)
const file_writer2 = try writer.extractWriter().createFile("deep/nested/file.txt");
try file_writer2.writeAll("Nested content");
writer.extractWriter().finishFile(file_writer2);
writer.extractWriter().finishFile();
// Verify nested file was created
const nested_content = try tmp.dir.readFileAlloc(testing.allocator, "deep/nested/file.txt", 1024);
@ -304,12 +304,12 @@ test "BufferExtractWriter - overwrite existing file" {
// Create a file with initial content
const file_writer1 = try writer.extractWriter().createFile("test.txt");
try file_writer1.writeAll("Initial content");
writer.extractWriter().finishFile(file_writer1);
writer.extractWriter().finishFile();
// Overwrite the same file
const file_writer2 = try writer.extractWriter().createFile("test.txt");
try file_writer2.writeAll("New content");
writer.extractWriter().finishFile(file_writer2);
writer.extractWriter().finishFile();
// Verify it was overwritten
const file = writer.files.get("test.txt");
@ -327,7 +327,7 @@ test "DirExtractWriter - nested directory creation" {
// Create a file in a deeply nested path
const file_writer = try writer.extractWriter().createFile("a/b/c/d/e/file.txt");
try file_writer.writeAll("Nested content");
writer.extractWriter().finishFile(file_writer);
writer.extractWriter().finishFile();
// Verify the file was created
const content = try tmp.dir.readFileAlloc(testing.allocator, "a/b/c/d/e/file.txt", 1024);

View file

@ -65,7 +65,7 @@ pub const ExtractWriter = struct {
pub const VTable = struct {
createFile: *const fn (ptr: *anyopaque, path: []const u8) CreateFileError!*std.Io.Writer,
finishFile: *const fn (ptr: *anyopaque, writer: *std.Io.Writer) void,
finishFile: *const fn (ptr: *anyopaque) void,
makeDir: *const fn (ptr: *anyopaque, path: []const u8) MakeDirError!void,
};
@ -82,8 +82,8 @@ pub const ExtractWriter = struct {
return self.vtable.createFile(self.ptr, path);
}
pub fn finishFile(self: ExtractWriter, writer: *std.Io.Writer) void {
return self.vtable.finishFile(self.ptr, writer);
pub fn finishFile(self: ExtractWriter) void {
return self.vtable.finishFile(self.ptr);
}
pub fn makeDir(self: ExtractWriter, path: []const u8) MakeDirError!void {
@ -162,8 +162,7 @@ pub const DirExtractWriter = struct {
return &entry.writer.interface;
}
fn finishFile(ptr: *anyopaque, writer: *std.Io.Writer) void {
_ = writer;
fn finishFile(ptr: *anyopaque) void {
const self: *DirExtractWriter = @ptrCast(@alignCast(ptr));
// Close and remove the last file
if (self.open_files.items.len > 0) {
@ -236,7 +235,7 @@ pub const BufferExtractWriter = struct {
return &self.current_file_writer.?.writer;
}
fn finishFile(ptr: *anyopaque, _: *std.Io.Writer) void {
fn finishFile(ptr: *anyopaque) void {
const self: *BufferExtractWriter = @ptrCast(@alignCast(ptr));
if (self.current_file_writer) |*writer| {
if (self.current_file_path) |path| {
@ -591,7 +590,7 @@ pub fn unbundleStream(
},
.file => {
const file_writer = try extract_writer.createFile(file_path);
defer extract_writer.finishFile(file_writer);
defer extract_writer.finishFile();
try tar_iterator.streamRemaining(entry, file_writer);
try file_writer.flush();

View file

@ -106,8 +106,8 @@ const macos_externs = if (use_real_fsevents) struct {
// Stub implementations for cross-compilation
const macos_stubs = struct {
fn FSEventStreamCreate(
allocator: CFAllocatorRef,
callback: *const fn (
_: CFAllocatorRef,
_: *const fn (
streamRef: FSEventStreamRef,
clientCallBackInfo: ?*anyopaque,
numEvents: usize,
@ -115,58 +115,36 @@ const macos_stubs = struct {
eventFlags: [*]const FSEventStreamEventFlags,
eventIds: [*]const FSEventStreamEventId,
) callconv(.c) void,
context: ?*FSEventStreamContext,
pathsToWatch: CFArrayRef,
sinceWhen: FSEventStreamEventId,
latency: CFAbsoluteTime,
flags: FSEventStreamCreateFlags,
_: ?*FSEventStreamContext,
_: CFArrayRef,
_: FSEventStreamEventId,
_: CFAbsoluteTime,
_: FSEventStreamCreateFlags,
) ?FSEventStreamRef {
_ = allocator;
_ = callback;
_ = context;
_ = pathsToWatch;
_ = sinceWhen;
_ = latency;
_ = flags;
return null;
}
fn FSEventStreamScheduleWithRunLoop(
streamRef: FSEventStreamRef,
runLoop: CFRunLoopRef,
runLoopMode: CFStringRef,
) void {
_ = streamRef;
_ = runLoop;
_ = runLoopMode;
}
_: FSEventStreamRef,
_: CFRunLoopRef,
_: CFStringRef,
) void {}
fn FSEventStreamStart(streamRef: FSEventStreamRef) bool {
_ = streamRef;
fn FSEventStreamStart(_: FSEventStreamRef) bool {
return false;
}
fn FSEventStreamStop(streamRef: FSEventStreamRef) void {
_ = streamRef;
}
fn FSEventStreamStop(_: FSEventStreamRef) void {}
fn FSEventStreamUnscheduleFromRunLoop(
streamRef: FSEventStreamRef,
runLoop: CFRunLoopRef,
runLoopMode: CFStringRef,
) void {
_ = streamRef;
_ = runLoop;
_ = runLoopMode;
}
_: FSEventStreamRef,
_: CFRunLoopRef,
_: CFStringRef,
) void {}
fn FSEventStreamInvalidate(streamRef: FSEventStreamRef) void {
_ = streamRef;
}
fn FSEventStreamInvalidate(_: FSEventStreamRef) void {}
fn FSEventStreamRelease(streamRef: FSEventStreamRef) void {
_ = streamRef;
}
fn FSEventStreamRelease(_: FSEventStreamRef) void {}
fn CFRunLoopGetCurrent() CFRunLoopRef {
return @ptrFromInt(1);
@ -174,44 +152,30 @@ const macos_stubs = struct {
fn CFRunLoopRun() void {}
fn CFRunLoopRunInMode(mode: CFStringRef, seconds: CFAbsoluteTime, returnAfterSourceHandled: bool) i32 {
_ = mode;
_ = seconds;
_ = returnAfterSourceHandled;
fn CFRunLoopRunInMode(_: CFStringRef, _: CFAbsoluteTime, _: bool) i32 {
return 0;
}
fn CFRunLoopStop(rl: CFRunLoopRef) void {
_ = rl;
}
fn CFRunLoopStop(_: CFRunLoopRef) void {}
fn CFArrayCreate(
allocator: CFAllocatorRef,
values: [*]const ?*const anyopaque,
numValues: CFIndex,
callBacks: ?*const anyopaque,
_: CFAllocatorRef,
_: [*]const ?*const anyopaque,
_: CFIndex,
_: ?*const anyopaque,
) ?CFArrayRef {
_ = allocator;
_ = values;
_ = numValues;
_ = callBacks;
return null;
}
fn CFStringCreateWithCString(
alloc: CFAllocatorRef,
cStr: [*:0]const u8,
encoding: u32,
_: CFAllocatorRef,
_: [*:0]const u8,
_: u32,
) ?CFStringRef {
_ = alloc;
_ = cStr;
_ = encoding;
return null;
}
fn CFRelease(cf: ?*anyopaque) void {
_ = cf;
}
fn CFRelease(_: ?*anyopaque) void {}
const kCFRunLoopDefaultMode: CFStringRef = @ptrFromInt(1);
};
@ -570,17 +534,13 @@ pub const Watcher = struct {
}
fn fsEventsCallback(
streamRef: FSEventStreamRef,
_: FSEventStreamRef,
clientCallBackInfo: ?*anyopaque,
numEvents: usize,
eventPaths: *anyopaque,
eventFlags: [*]const FSEventStreamEventFlags,
eventIds: [*]const FSEventStreamEventId,
_: [*]const FSEventStreamEventFlags,
_: [*]const FSEventStreamEventId,
) callconv(.c) void {
_ = streamRef;
_ = eventFlags;
_ = eventIds;
if (clientCallBackInfo == null) return;
const self: *Watcher = @ptrCast(@alignCast(clientCallBackInfo.?));
@ -1130,8 +1090,7 @@ test "recursive directory watching" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1167,8 +1126,7 @@ test "multiple directories watching" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1205,8 +1163,7 @@ test "file modification detection" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1238,8 +1195,7 @@ test "rapid file creation" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1284,8 +1240,7 @@ test "directory creation and file addition" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1325,8 +1280,7 @@ test "start stop restart" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1442,8 +1396,7 @@ test "file rename detection" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;
@ -1560,8 +1513,7 @@ test "windows long path handling" {
};
const callback = struct {
fn cb(event: WatchEvent) void {
_ = event;
fn cb(_: WatchEvent) void {
_ = global.event_count.fetchAdd(1, .seq_cst);
}
}.cb;

View file

@ -0,0 +1,8 @@
app [main!] { pf: platform "./platform/main.roc" }
main! = || {
if True {
x = 0
_y = x
}
}

View file

@ -1,13 +1,10 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
import pf.Stdin
str : Str -> Str
str = |s| s
import pf.Stdout
main! = || {
Stdout.line!(str("Before stdin"))
temp = Stdin.line!()
Stdout.line!(str("After stdin"))
Stdout.line!("Before stdin")
_line = Stdin.line!()
Stdout.line!("After stdin")
}

View file

@ -0,0 +1,15 @@
app [main!] { pf: platform "./platform/main.roc" }
# Regression test: Calling .sublist() method on a List(U8) from "".to_utf8()
# causes a segfault when the variable doesn't have an explicit type annotation.
# Error was: "Roc crashed: Error evaluating from shared memory: InvalidMethodReceiver"
# The bug was that translateTypeVar was using the wrong module (closure's source module)
# instead of the caller's module when translating the return type.
main! = || {
# Test case 1: Method call without type annotation (original bug)
s = "".to_utf8()
_slice = s.sublist({ start: 0, len: 0 })
# Test case 2: Comparing empty list with method result
_ignore = "".to_utf8() == []
}

View file

@ -0,0 +1,8 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
main! = || {
x = "hello"
Stdout.line!(x.inspect())
}

View file

@ -0,0 +1,29 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
print! : Str => {}
print! = |msg| msg.split_on("\n").for_each!(Stdout.line!)
fnA! : Str => Try(I64, _)
fnA! = |_input| {
var $x = 1
Ok($x)
}
fnB! : Str => Try(I64, _)
fnB! = |_input| {
var $y = 2
Ok($y)
}
run! = || {
print!("A1: ${fnA!("test")?.to_str()}")
print!("A2: ${fnA!("test")?.to_str()}")
print!("A3: ${fnA!("test")?.to_str()}")
Ok({})
}
main! = || {
_ignore = run!()
}

View file

@ -40,10 +40,7 @@ main = {
}
~~~
# EXPECTED
MODULE NOT FOUND - can_import_comprehensive.md:1:1:1:17
MODULE NOT FOUND - can_import_comprehensive.md:2:1:2:48
DUPLICATE DEFINITION - can_import_comprehensive.md:3:1:3:27
MODULE NOT FOUND - can_import_comprehensive.md:3:1:3:27
UNDEFINED VARIABLE - can_import_comprehensive.md:6:14:6:22
UNDEFINED VARIABLE - can_import_comprehensive.md:7:14:7:23
UNDEFINED VARIABLE - can_import_comprehensive.md:8:14:8:22
@ -53,28 +50,6 @@ UNDEFINED VARIABLE - can_import_comprehensive.md:17:15:17:18
UNDEFINED VARIABLE - can_import_comprehensive.md:18:15:18:19
UNDEFINED VARIABLE - can_import_comprehensive.md:21:16:21:26
# PROBLEMS
**MODULE NOT FOUND**
The module `json.Json` was not found in this Roc project.
You're attempting to use this module here:
**can_import_comprehensive.md:1:1:1:17:**
```roc
import json.Json
```
^^^^^^^^^^^^^^^^
**MODULE NOT FOUND**
The module `http.Client` was not found in this Roc project.
You're attempting to use this module here:
**can_import_comprehensive.md:2:1:2:48:**
```roc
import http.Client as Http exposing [get, post]
```
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**DUPLICATE DEFINITION**
The name `Str` is being redeclared in this scope.
@ -93,17 +68,6 @@ import json.Json
^
**MODULE NOT FOUND**
The module `utils.String` was not found in this Roc project.
You're attempting to use this module here:
**can_import_comprehensive.md:3:1:3:27:**
```roc
import utils.String as Str
```
^^^^^^^^^^^^^^^^^^^^^^^^^^
**UNDEFINED VARIABLE**
Nothing is named `get` in this scope.
Is there an `import` or `exposing` missing up-top?

View file

@ -62,10 +62,7 @@ combineTrys = |jsonTry, httpStatus|
UNDECLARED TYPE - can_import_exposing_types.md:29:18:29:24
UNDECLARED TYPE - can_import_exposing_types.md:30:18:30:24
UNDECLARED TYPE - can_import_exposing_types.md:31:23:31:31
MODULE NOT FOUND - can_import_exposing_types.md:1:1:1:49
MODULE NOT FOUND - can_import_exposing_types.md:2:1:2:64
DUPLICATE DEFINITION - can_import_exposing_types.md:3:1:3:32
MODULE NOT FOUND - can_import_exposing_types.md:3:1:3:32
UNDECLARED TYPE - can_import_exposing_types.md:6:24:6:29
UNDECLARED TYPE - can_import_exposing_types.md:6:31:6:36
UNDEFINED VARIABLE - can_import_exposing_types.md:7:21:7:31
@ -125,28 +122,6 @@ This type is referenced here:
^^^^^^^^
**MODULE NOT FOUND**
The module `json.Json` was not found in this Roc project.
You're attempting to use this module here:
**can_import_exposing_types.md:1:1:1:49:**
```roc
import json.Json exposing [Value, Error, Config]
```
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**MODULE NOT FOUND**
The module `http.Client` was not found in this Roc project.
You're attempting to use this module here:
**can_import_exposing_types.md:2:1:2:64:**
```roc
import http.Client as Http exposing [Request, Response, Status]
```
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**DUPLICATE DEFINITION**
The name `Try` is being redeclared in this scope.
@ -165,17 +140,6 @@ import json.Json exposing [Value, Error, Config]
^
**MODULE NOT FOUND**
The module `utils.Try` was not found in this Roc project.
You're attempting to use this module here:
**can_import_exposing_types.md:3:1:3:32:**
```roc
import utils.Try exposing [Try]
```
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**UNDECLARED TYPE**
The type _Value_ is not declared in this scope.

View file

@ -10,20 +10,8 @@ import json.Json
main = Json.utf8
~~~
# EXPECTED
MODULE NOT FOUND - can_import_json.md:1:1:1:17
UNDEFINED VARIABLE - can_import_json.md:3:8:3:17
# PROBLEMS
**MODULE NOT FOUND**
The module `json.Json` was not found in this Roc project.
You're attempting to use this module here:
**can_import_json.md:1:1:1:17:**
```roc
import json.Json
```
^^^^^^^^^^^^^^^^
**UNDEFINED VARIABLE**
Nothing is named `utf8` in this scope.
Is there an `import` or `exposing` missing up-top?

Some files were not shown because too many files have changed in this diff Show more