Merge branch 'roc-lang:main' into lsp-syntax-parsing

This commit is contained in:
Etienne Latendresse-Tremblay 2025-12-09 20:28:46 -05:00 committed by GitHub
commit 3652023738
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
88 changed files with 7461 additions and 2623 deletions

View file

@ -7,8 +7,8 @@ name: Cross Compilation Test
permissions: {}
jobs:
# Step 1: Cross-compile musl and glibc targets from different host platforms
cross-compile:
# Cross-compile all fx platform tests from all host platforms
cross-compile-fx-tests:
runs-on: ${{ matrix.host }}
strategy:
fail-fast: false
@ -20,10 +20,8 @@ jobs:
windows-2022, # Windows x64 host
]
target: [
x64musl, # Linux x86_64 musl
arm64musl, # Linux ARM64 musl
x64glibc, # Linux x86_64 glibc
arm64glibc, # Linux ARM64 glibc
x64musl, # Linux x86_64 musl (static linking)
arm64musl, # Linux ARM64 musl (static linking)
]
steps:
- name: Checkout
@ -40,26 +38,38 @@ jobs:
with:
arch: x64
- name: Build roc compiler and other necessary libs
- name: Build roc compiler and test_runner
uses: ./.github/actions/flaky-retry
with:
command: 'zig build'
error_string_contains: 'EndOfStream|503'
retry_count: 3
- name: Cross-compile int platform (Unix)
- name: Run fx platform cross-compilation tests (Unix)
if: runner.os != 'Windows'
run: |
echo "Cross-compiling from ${{ matrix.host }} to ${{ matrix.target }}"
./zig-out/bin/roc build --target=${{ matrix.target }} --output=int_app_${{ matrix.target }}_${{ matrix.host }} test/int/app.roc
echo "Cross-compiling fx tests from ${{ matrix.host }} to ${{ matrix.target }}"
./zig-out/bin/test_runner ./zig-out/bin/roc fx --target=${{ matrix.target }} --mode=cross
- name: Cross-compile int platform (Windows)
- name: Run fx platform cross-compilation tests (Windows)
if: runner.os == 'Windows'
run: |
echo "Cross-compiling from ${{ matrix.host }} to ${{ matrix.target }}"
echo "Cross-compiling fx tests from ${{ matrix.host }} to ${{ matrix.target }}"
zig-out\bin\test_runner.exe zig-out\bin\roc.exe fx --target=${{ matrix.target }} --mode=cross
- name: Cross-compile int platform test app (Unix)
if: runner.os != 'Windows'
run: |
echo "Cross-compiling int platform from ${{ matrix.host }} to ${{ matrix.target }}"
./zig-out/bin/roc build --target=${{ matrix.target }} --output=int_app_${{ matrix.target }}_${{ matrix.host }} test/int/app.roc
- name: Cross-compile int platform test app (Windows)
if: runner.os == 'Windows'
run: |
echo "Cross-compiling int platform from ${{ matrix.host }} to ${{ matrix.target }}"
zig-out\bin\roc.exe build --target=${{ matrix.target }} --output=int_app_${{ matrix.target }}_${{ matrix.host }} test/int/app.roc
- name: Upload cross-compiled executables
- name: Upload cross-compiled int app executables
uses: actions/upload-artifact@v4 # ratchet:actions/upload-artifact@v4
with:
name: cross-compiled-${{ matrix.host }}-${{ matrix.target }}
@ -67,9 +77,9 @@ jobs:
int_app_${{ matrix.target }}_*
retention-days: 1
# Step 2: Test cross-compiled executables on actual target platforms
test-cross-compiled:
needs: cross-compile
# Test cross-compiled int platform executables on actual target platforms
test-int-on-target:
needs: [cross-compile-fx-tests]
runs-on: ${{ matrix.target_os }}
strategy:
fail-fast: false
@ -83,14 +93,6 @@ jobs:
- target: arm64musl
target_os: ubuntu-24.04-arm
arch: arm64
# Test x64glibc executables on Linux x64
- target: x64glibc
target_os: ubuntu-22.04
arch: x64
# Test arm64glibc executables on Linux ARM64
- target: arm64glibc
target_os: ubuntu-24.04-arm
arch: arm64
steps:
- name: Download all cross-compiled artifacts
uses: actions/download-artifact@v4

View file

@ -159,7 +159,7 @@ jobs:
- name: Build Test Platforms (cross-compile)
if: runner.os != 'Windows'
run: |
./ci/test_int_platform.sh
./zig-out/bin/test_runner ./zig-out/bin/roc int --mode=cross
- name: roc executable minimal check (Unix)
if: runner.os != 'Windows'

1
.gitignore vendored
View file

@ -23,6 +23,7 @@
# Ignore the following directories and file extensions
target
!src/target
generated-docs
zig-out

452
build.zig
View file

@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const modules = @import("src/build/modules.zig");
const glibc_stub_build = @import("src/build/glibc_stub.zig");
const roc_target = @import("src/target/mod.zig");
const Dependency = std.Build.Dependency;
const Import = std.Build.Module.Import;
const InstallDir = std.Build.InstallDir;
@ -10,6 +11,34 @@ const OptimizeMode = std.builtin.OptimizeMode;
const ResolvedTarget = std.Build.ResolvedTarget;
const Step = std.Build.Step;
// =============================================================================
// Cross-compile target definitions
// =============================================================================
/// Cross-compile target specification
const CrossTarget = struct {
name: []const u8,
query: std.Target.Query,
};
/// Musl-only cross-compile targets (static linking)
const musl_cross_targets = [_]CrossTarget{
.{ .name = "x64musl", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .musl } },
.{ .name = "arm64musl", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .musl } },
};
/// Glibc cross-compile targets (dynamic linking)
const glibc_cross_targets = [_]CrossTarget{
.{ .name = "x64glibc", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .gnu } },
.{ .name = "arm64glibc", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .gnu } },
};
/// All Linux cross-compile targets (musl + glibc)
const linux_cross_targets = musl_cross_targets ++ glibc_cross_targets;
/// Test platform directories that need host libraries built
const all_test_platform_dirs = [_][]const u8{ "str", "int", "fx", "fx-open" };
fn mustUseLlvm(target: ResolvedTarget) bool {
return target.result.os.tag == .macos and target.result.cpu.arch == .x86_64;
}
@ -622,32 +651,49 @@ fn checkFxPlatformTestCoverage(step: *Step) !void {
}
}.lessThan);
// Read fx_platform_test.zig to extract tested files
const test_file_path = "src/cli/test/fx_platform_test.zig";
const test_file_contents = try std.fs.cwd().readFileAlloc(allocator, test_file_path, 1024 * 1024);
defer allocator.free(test_file_contents);
// Find all references to test/fx/*.roc files in the test file
// Find all references to test/fx/*.roc files in test source files
var tested_files = std.StringHashMap(void).init(allocator);
defer tested_files.deinit();
defer {
var key_iter = tested_files.keyIterator();
while (key_iter.next()) |key| {
allocator.free(key.*);
}
tested_files.deinit();
}
var line_iter = std.mem.splitScalar(u8, test_file_contents, '\n');
while (line_iter.next()) |line| {
// Look for patterns like "test/fx/filename.roc"
var search_start: usize = 0;
while (std.mem.indexOfPos(u8, line, search_start, "test/fx/")) |idx| {
const rest_of_line = line[idx..];
// Find the end of the filename
if (std.mem.indexOf(u8, rest_of_line, ".roc")) |roc_pos| {
const full_path = rest_of_line[0 .. roc_pos + 4]; // Include ".roc"
// Extract just the filename (after "test/fx/")
const filename = full_path["test/fx/".len..];
// Only count files in test/fx (not subdirectories like test/fx/subdir/)
if (std.mem.indexOf(u8, filename, "/") == null) {
try tested_files.put(filename, {});
// Scan both the test file and the shared specs file
const test_files_to_scan = [_][]const u8{
"src/cli/test/fx_platform_test.zig",
"src/cli/test/fx_test_specs.zig",
};
for (test_files_to_scan) |test_file_path| {
const test_file_contents = std.fs.cwd().readFileAlloc(allocator, test_file_path, 1024 * 1024) catch |err| {
std.debug.print("Warning: Could not read {s}: {}\n", .{ test_file_path, err });
continue;
};
defer allocator.free(test_file_contents);
var line_iter = std.mem.splitScalar(u8, test_file_contents, '\n');
while (line_iter.next()) |line| {
// Look for patterns like "test/fx/filename.roc"
var search_start: usize = 0;
while (std.mem.indexOfPos(u8, line, search_start, "test/fx/")) |idx| {
const rest_of_line = line[idx..];
// Find the end of the filename
if (std.mem.indexOf(u8, rest_of_line, ".roc")) |roc_pos| {
const full_path = rest_of_line[0 .. roc_pos + 4]; // Include ".roc"
// Extract just the filename (after "test/fx/")
const filename = full_path["test/fx/".len..];
// Only count files in test/fx (not subdirectories like test/fx/subdir/)
if (std.mem.indexOf(u8, filename, "/") == null) {
// Dupe the filename since the source buffer will be freed
const duped_filename = try allocator.dupe(u8, filename);
try tested_files.put(duped_filename, {});
}
}
search_start = idx + 1;
}
search_start = idx + 1;
}
}
@ -663,11 +709,11 @@ fn checkFxPlatformTestCoverage(step: *Step) !void {
// Report results
if (missing_tests.items.len > 0) {
std.debug.print("\nERROR: The following .roc files in test/fx/ do not have tests in {s}:\n", .{test_file_path});
std.debug.print("\nERROR: The following .roc files in test/fx/ do not have tests:\n", .{});
for (missing_tests.items) |missing_file| {
std.debug.print(" - {s}\n", .{missing_file});
}
std.debug.print("\nPlease add tests for these files or remove them from test/fx/.\n", .{});
std.debug.print("\nPlease add tests in fx_platform_test.zig or fx_test_specs.zig, or remove these files from test/fx/.\n", .{});
return step.fail("{d} .roc file(s) in test/fx/ are missing tests", .{missing_tests.items.len});
}
@ -718,6 +764,7 @@ const MiniCiStep = struct {
// Run the sequence of `zig build` commands that make up the
// mini CI pipeline.
try runSubBuild(step, "fmt", "zig build fmt");
try checkTestWiring(step);
try runSubBuild(step, null, "zig build");
try checkBuiltinRocFormatting(step);
try runSubBuild(step, "snapshot", "zig build snapshot");
@ -836,6 +883,39 @@ const MiniCiStep = struct {
},
}
}
fn checkTestWiring(step: *Step) !void {
const b = step.owner;
std.debug.print("---- minici: checking test wiring ----\n", .{});
var child_argv = std.ArrayList([]const u8).empty;
defer child_argv.deinit(b.allocator);
try child_argv.append(b.allocator, b.graph.zig_exe);
try child_argv.append(b.allocator, "run");
try child_argv.append(b.allocator, "ci/check_test_wiring.zig");
var child = std.process.Child.init(child_argv.items, b.allocator);
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = try child.spawnAndWait();
switch (term) {
.Exited => |code| {
if (code != 0) {
return step.fail(
"Test wiring check failed. Run 'zig run ci/check_test_wiring.zig' to see details.",
.{},
);
}
},
else => {
return step.fail("zig run ci/check_test_wiring.zig terminated abnormally", .{});
},
}
}
};
fn createAndRunBuiltinCompiler(
@ -908,6 +988,36 @@ fn createTestPlatformHostLib(
return lib;
}
/// Builds a test platform host library and sets up a step to copy it to the target-specific directory.
/// Returns the copy step for dependency wiring.
fn buildAndCopyTestPlatformHostLib(
b: *std.Build,
platform_dir: []const u8,
target: ResolvedTarget,
target_name: []const u8,
optimize: OptimizeMode,
roc_modules: modules.RocModules,
) *Step.UpdateSourceFiles {
const lib = createTestPlatformHostLib(
b,
b.fmt("test_platform_{s}_host_{s}", .{ platform_dir, target_name }),
b.pathJoin(&.{ "test", platform_dir, "platform/host.zig" }),
target,
optimize,
roc_modules,
);
// Use correct filename for target platform
const host_filename = if (target.result.os.tag == .windows) "host.lib" else "libhost.a";
const copy_step = b.addUpdateSourceFiles();
copy_step.addCopyFileToSource(
lib.getEmittedBin(),
b.pathJoin(&.{ "test", platform_dir, "platform/targets", target_name, host_filename }),
);
return copy_step;
}
/// Custom build step that clears the Roc cache directory.
/// Uses Zig's native filesystem APIs for cross-platform support.
const ClearRocCacheStep = struct {
@ -987,6 +1097,29 @@ const ClearRocCacheStep = struct {
}
};
const PrintBuildSuccessStep = struct {
step: Step,
fn create(b: *std.Build) *PrintBuildSuccessStep {
const self = b.allocator.create(PrintBuildSuccessStep) catch @panic("OOM");
self.* = .{
.step = Step.init(.{
.id = Step.Id.custom,
.name = "print-build-success",
.owner = b,
.makeFn = make,
}),
};
return self;
}
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = step;
_ = options;
std.debug.print("Build succeeded!\n", .{});
}
};
/// Create a step that clears the Roc cache directory.
/// This is useful when rebuilding test platforms to ensure stale cached hosts aren't used.
fn createClearCacheStep(b: *std.Build) *Step {
@ -1003,80 +1136,40 @@ fn setupTestPlatforms(
) void {
// Clear the Roc cache when test platforms are rebuilt to ensure stale cached hosts aren't used
const clear_cache_step = createClearCacheStep(b);
const native_target_name = roc_target.RocTarget.fromStdTarget(target.result).toName();
// Create test platform host static library (str)
const test_platform_host_lib = createTestPlatformHostLib(
b,
"test_platform_str_host",
"test/str/platform/host.zig",
target,
optimize,
roc_modules,
);
// Copy the test platform host library to the source directory
const copy_test_host = b.addUpdateSourceFiles();
const test_host_filename = if (target.result.os.tag == .windows) "host.lib" else "libhost.a";
copy_test_host.addCopyFileToSource(test_platform_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/str/platform", test_host_filename }));
// Clear cache after copying new host library
clear_cache_step.dependOn(&copy_test_host.step);
b.getInstallStep().dependOn(clear_cache_step);
test_platforms_step.dependOn(clear_cache_step);
// Create test platform host static libraries for int, fx, and fx-open - native target
const test_platform_dirs = [_][]const u8{ "int", "fx", "fx-open" };
for (test_platform_dirs) |platform_dir| {
const host_lib = createTestPlatformHostLib(
// Build all test platforms for native target
for (all_test_platform_dirs) |platform_dir| {
const copy_step = buildAndCopyTestPlatformHostLib(
b,
b.fmt("test_platform_{s}_host", .{platform_dir}),
b.pathJoin(&.{ "test", platform_dir, "platform/host.zig" }),
platform_dir,
target,
native_target_name,
optimize,
roc_modules,
);
const copy_host = b.addUpdateSourceFiles();
copy_host.addCopyFileToSource(host_lib.getEmittedBin(), b.pathJoin(&.{ "test", platform_dir, "platform", test_host_filename }));
clear_cache_step.dependOn(&copy_host.step);
clear_cache_step.dependOn(&copy_step.step);
}
// Cross-compile test platform host libraries for musl and glibc targets
const cross_compile_targets = [_]struct { name: []const u8, query: std.Target.Query }{
.{ .name = "x64musl", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .musl } },
.{ .name = "arm64musl", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .musl } },
.{ .name = "x64glibc", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .gnu } },
.{ .name = "arm64glibc", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .gnu } },
};
for (cross_compile_targets) |cross_target| {
// Cross-compile for musl targets (glibc not needed for test-platforms step)
for (musl_cross_targets) |cross_target| {
const cross_resolved_target = b.resolveTargetQuery(cross_target.query);
// Create cross-compiled host libraries for all test platforms
for (test_platform_dirs) |platform_dir| {
const cross_host_lib = createTestPlatformHostLib(
for (all_test_platform_dirs) |platform_dir| {
const copy_step = buildAndCopyTestPlatformHostLib(
b,
b.fmt("test_platform_{s}_host_{s}", .{ platform_dir, cross_target.name }),
b.pathJoin(&.{ "test", platform_dir, "platform/host.zig" }),
platform_dir,
cross_resolved_target,
cross_target.name,
optimize,
roc_modules,
);
const copy_cross_host = b.addUpdateSourceFiles();
copy_cross_host.addCopyFileToSource(cross_host_lib.getEmittedBin(), b.pathJoin(&.{ "test", platform_dir, "platform/targets", cross_target.name, "libhost.a" }));
clear_cache_step.dependOn(&copy_cross_host.step);
}
// Generate glibc stubs for gnu targets
if (cross_target.query.abi == .gnu) {
const glibc_stub = generateGlibcStub(b, cross_resolved_target, cross_target.name);
if (glibc_stub) |stub| {
b.getInstallStep().dependOn(&stub.step);
test_platforms_step.dependOn(&stub.step);
}
clear_cache_step.dependOn(&copy_step.step);
}
}
b.getInstallStep().dependOn(clear_cache_step);
test_platforms_step.dependOn(clear_cache_step);
}
pub fn build(b: *std.Build) void {
@ -1333,6 +1426,17 @@ pub fn build(b: *std.Build) void {
add_tracy(b, roc_modules.build_options, snapshot_exe, target, false, flag_enable_tracy);
install_and_run(b, no_bin, snapshot_exe, snapshot_step, snapshot_step, run_args);
// Unified test platform runner (replaces fx_cross_runner and int_cross_runner)
const test_runner_exe = b.addExecutable(.{
.name = "test_runner",
.root_module = b.createModule(.{
.root_source_file = b.path("src/cli/test/test_runner.zig"),
.target = target,
.optimize = optimize,
}),
});
b.installArtifact(test_runner_exe);
const playground_exe = b.addExecutable(.{
.name = "playground",
.root_module = b.createModule(.{
@ -1607,6 +1711,11 @@ pub fn build(b: *std.Build) void {
.aarch64 => .{ b.resolveTargetQuery(.{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .musl }), "arm64musl" },
else => .{ target, null },
},
.windows => switch (target.result.cpu.arch) {
.x86_64 => .{ target, "x64win" },
.aarch64 => .{ target, "arm64win" },
else => .{ target, null },
},
else => .{ target, null },
};
@ -1626,11 +1735,11 @@ pub fn build(b: *std.Build) void {
copy_test_fx_host.addCopyFileToSource(test_platform_fx_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/fx/platform", test_fx_host_filename }));
b.getInstallStep().dependOn(&copy_test_fx_host.step);
// On Linux, also copy to the target-specific directory so findHostLibrary finds it
// Also copy to the target-specific directory so findHostLibrary finds it
if (fx_host_target_dir) |target_dir| {
copy_test_fx_host.addCopyFileToSource(
test_platform_fx_host_lib.getEmittedBin(),
b.pathJoin(&.{ "test/fx/platform/targets", target_dir, "libhost.a" }),
b.pathJoin(&.{ "test/fx/platform/targets", target_dir, test_fx_host_filename }),
);
}
@ -1834,78 +1943,38 @@ fn addMainExe(
});
configureBackend(exe, target);
// Create test platform host static library (str)
const test_platform_host_lib = createTestPlatformHostLib(
b,
"test_platform_str_host",
"test/str/platform/host.zig",
target,
optimize,
roc_modules,
);
// Build str and int test platform host libraries for native target
// (fx and fx-open are only built via test-platforms step)
const main_build_platforms = [_][]const u8{ "str", "int" };
const native_target_name = roc_target.RocTarget.fromStdTarget(target.result).toName();
// Copy the test platform host library to the source directory
const copy_test_host = b.addUpdateSourceFiles();
const test_host_filename = if (target.result.os.tag == .windows) "host.lib" else "libhost.a";
copy_test_host.addCopyFileToSource(test_platform_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/str/platform", test_host_filename }));
b.getInstallStep().dependOn(&copy_test_host.step);
for (main_build_platforms) |platform_dir| {
const copy_step = buildAndCopyTestPlatformHostLib(
b,
platform_dir,
target,
native_target_name,
optimize,
roc_modules,
);
b.getInstallStep().dependOn(&copy_step.step);
}
// Create test platform host static library (int) - native target
const test_platform_int_host_lib = createTestPlatformHostLib(
b,
"test_platform_int_host",
"test/int/platform/host.zig",
target,
optimize,
roc_modules,
);
// Copy the int test platform host library to the source directory
const copy_test_int_host = b.addUpdateSourceFiles();
const test_int_host_filename = if (target.result.os.tag == .windows) "host.lib" else "libhost.a";
copy_test_int_host.addCopyFileToSource(test_platform_int_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/int/platform", test_int_host_filename }));
b.getInstallStep().dependOn(&copy_test_int_host.step);
// Cross-compile int platform host libraries for musl and glibc targets
const cross_compile_targets = [_]struct { name: []const u8, query: std.Target.Query }{
.{ .name = "x64musl", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .musl } },
.{ .name = "arm64musl", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .musl } },
.{ .name = "x64glibc", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .gnu } },
.{ .name = "arm64glibc", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .gnu } },
};
for (cross_compile_targets) |cross_target| {
// Cross-compile for all Linux targets (musl + glibc)
for (linux_cross_targets) |cross_target| {
const cross_resolved_target = b.resolveTargetQuery(cross_target.query);
// Create cross-compiled int host library
const cross_int_host_lib = createTestPlatformHostLib(
b,
b.fmt("test_platform_int_host_{s}", .{cross_target.name}),
"test/int/platform/host.zig",
cross_resolved_target,
optimize,
roc_modules,
);
// Copy to target-specific directory
const copy_cross_int_host = b.addUpdateSourceFiles();
copy_cross_int_host.addCopyFileToSource(cross_int_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/int/platform/targets", cross_target.name, "libhost.a" }));
b.getInstallStep().dependOn(&copy_cross_int_host.step);
// Create cross-compiled str host library
const cross_str_host_lib = createTestPlatformHostLib(
b,
b.fmt("test_platform_str_host_{s}", .{cross_target.name}),
"test/str/platform/host.zig",
cross_resolved_target,
optimize,
roc_modules,
);
// Copy to target-specific directory
const copy_cross_str_host = b.addUpdateSourceFiles();
copy_cross_str_host.addCopyFileToSource(cross_str_host_lib.getEmittedBin(), b.pathJoin(&.{ "test/str/platform/targets", cross_target.name, "libhost.a" }));
b.getInstallStep().dependOn(&copy_cross_str_host.step);
for (main_build_platforms) |platform_dir| {
const copy_step = buildAndCopyTestPlatformHostLib(
b,
platform_dir,
cross_resolved_target,
cross_target.name,
optimize,
roc_modules,
);
b.getInstallStep().dependOn(&copy_step.step);
}
// Generate glibc stubs for gnu targets
if (cross_target.query.abi == .gnu) {
@ -1964,6 +2033,59 @@ fn addMainExe(
copy_shim.addCopyFileToSource(shim_lib.getEmittedBin(), b.pathJoin(&.{ "src/cli", interpreter_shim_filename }));
exe.step.dependOn(&copy_shim.step);
// Cross-compile interpreter shim for all supported targets
// This allows `roc build --target=X` to work for cross-compilation
const cross_compile_shim_targets = [_]struct { name: []const u8, query: std.Target.Query }{
.{ .name = "x64musl", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .musl } },
.{ .name = "arm64musl", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .musl } },
.{ .name = "x64glibc", .query = .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .gnu } },
.{ .name = "arm64glibc", .query = .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .gnu } },
};
for (cross_compile_shim_targets) |cross_target| {
const cross_resolved_target = b.resolveTargetQuery(cross_target.query);
// Build builtins object for this target
const cross_builtins_obj = b.addObject(.{
.name = b.fmt("roc_builtins_{s}", .{cross_target.name}),
.root_module = b.createModule(.{
.root_source_file = b.path("src/builtins/static_lib.zig"),
.target = cross_resolved_target,
.optimize = optimize,
.strip = optimize != .Debug,
.pic = true,
}),
});
configureBackend(cross_builtins_obj, cross_resolved_target);
// Build interpreter shim library for this target
const cross_shim_lib = b.addLibrary(.{
.name = b.fmt("roc_interpreter_shim_{s}", .{cross_target.name}),
.root_module = b.createModule(.{
.root_source_file = b.path("src/interpreter_shim/main.zig"),
.target = cross_resolved_target,
.optimize = optimize,
.strip = optimize != .Debug,
.pic = true,
}),
.linkage = .static,
});
configureBackend(cross_shim_lib, cross_resolved_target);
roc_modules.addAll(cross_shim_lib);
cross_shim_lib.root_module.addImport("compiled_builtins", compiled_builtins_module);
cross_shim_lib.step.dependOn(&write_compiled_builtins.step);
cross_shim_lib.addObject(cross_builtins_obj);
cross_shim_lib.bundle_compiler_rt = true;
// Copy to target-specific directory for embedding
const copy_cross_shim = b.addUpdateSourceFiles();
copy_cross_shim.addCopyFileToSource(
cross_shim_lib.getEmittedBin(),
b.pathJoin(&.{ "src/cli/targets", cross_target.name, "libroc_interpreter_shim.a" }),
);
exe.step.dependOn(&copy_cross_shim.step);
}
const config = b.addOptions();
config.addOption(bool, "llvm", enable_llvm);
exe.root_module.addOptions("config", config);
@ -2001,7 +2123,12 @@ fn install_and_run(
b.getInstallStep().dependOn(&exe.step);
} else {
const install = b.addInstallArtifact(exe, .{});
build_step.dependOn(&install.step);
// Add a step to print success message after build completes
const success_step = PrintBuildSuccessStep.create(b);
success_step.step.dependOn(&install.step);
build_step.dependOn(&success_step.step);
b.getInstallStep().dependOn(&install.step);
const run = b.addRunArtifact(exe);
@ -2508,8 +2635,12 @@ fn generateGlibcStub(b: *std.Build, target: ResolvedTarget, target_name: []const
const libc_so = write_stub.add("libc.so", stub_content);
const copy_stubs = b.addUpdateSourceFiles();
copy_stubs.addCopyFileToSource(libc_so_6, b.pathJoin(&.{ "test/int/platform/targets", target_name, "libc.so.6" }));
copy_stubs.addCopyFileToSource(libc_so, b.pathJoin(&.{ "test/int/platform/targets", target_name, "libc.so" }));
// Platforms that need glibc stubs
const glibc_platforms = [_][]const u8{ "int", "str" };
for (glibc_platforms) |platform| {
copy_stubs.addCopyFileToSource(libc_so_6, b.pathJoin(&.{ "test", platform, "platform/targets", target_name, "libc.so.6" }));
copy_stubs.addCopyFileToSource(libc_so, b.pathJoin(&.{ "test", platform, "platform/targets", target_name, "libc.so" }));
}
copy_stubs.step.dependOn(&write_stub.step);
return copy_stubs;
@ -2522,11 +2653,16 @@ fn generateGlibcStub(b: *std.Build, target: ResolvedTarget, target_name: []const
// Compile the assembly into a proper shared library using Zig's build system
const libc_stub = glibc_stub_build.compileAssemblyStub(b, asm_file, target, .ReleaseSmall);
// Copy the generated files to the target directory
// Copy the generated files to all platforms that use glibc targets
const copy_stubs = b.addUpdateSourceFiles();
copy_stubs.addCopyFileToSource(libc_stub.getEmittedBin(), b.pathJoin(&.{ "test/int/platform/targets", target_name, "libc.so.6" }));
copy_stubs.addCopyFileToSource(libc_stub.getEmittedBin(), b.pathJoin(&.{ "test/int/platform/targets", target_name, "libc.so" }));
copy_stubs.addCopyFileToSource(asm_file, b.pathJoin(&.{ "test/int/platform/targets", target_name, "libc_stub.s" }));
// Platforms that need glibc stubs (have glibc targets defined in their .roc files)
const glibc_platforms = [_][]const u8{ "int", "str" };
for (glibc_platforms) |platform| {
copy_stubs.addCopyFileToSource(libc_stub.getEmittedBin(), b.pathJoin(&.{ "test", platform, "platform/targets", target_name, "libc.so.6" }));
copy_stubs.addCopyFileToSource(libc_stub.getEmittedBin(), b.pathJoin(&.{ "test", platform, "platform/targets", target_name, "libc.so" }));
copy_stubs.addCopyFileToSource(asm_file, b.pathJoin(&.{ "test", platform, "platform/targets", target_name, "libc_stub.s" }));
}
copy_stubs.step.dependOn(&libc_stub.step);
copy_stubs.step.dependOn(&write_stub.step);

View file

@ -46,9 +46,17 @@ pub fn main() !void {
// To avoid false positives, we:
// - Treat src/cli/main.zig as an additional aggregator when scanning @import()
// statements for wired test files.
// - Treat src/cli/test/fx_platform_test.zig as an aggregator since it imports
// fx_test_specs.zig which contains shared test specifications.
if (fileExists("src/cli/main.zig")) {
try mod_files.append(gpa, try gpa.dupe(u8, "src/cli/main.zig"));
}
if (fileExists("src/cli/test/fx_platform_test.zig")) {
try mod_files.append(gpa, try gpa.dupe(u8, "src/cli/test/fx_platform_test.zig"));
}
if (fileExists("src/cli/test/test_runner.zig")) {
try mod_files.append(gpa, try gpa.dupe(u8, "src/cli/test/test_runner.zig"));
}
if (test_files.items.len == 0) {
try stdout.print("{s}[OK]{s} No test files found to check\n", .{ TermColor.green, TermColor.reset });

View file

@ -1,399 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Colors for output (minimal usage)
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
# Test configuration
ROC_CLI="./zig-out/bin/roc"
INT_APP="test/int/app.roc"
TEST_OUTPUT_DIR="tmp_test_outputs"
# Supported targets for cross-compilation
CROSS_TARGETS=(
"x64musl"
"arm64musl"
"x64glibc"
"arm64glibc"
)
# Test results tracking
TESTS_RUN=0
TESTS_PASSED=0
TESTS_FAILED=0
FAILED_TESTS=()
print_header() {
echo "================================"
echo " Roc Int Platform Test Suite "
echo "================================"
echo
}
print_section() {
echo ">>> $1"
}
print_success() {
echo -e "${GREEN}PASS${NC} $1"
}
print_error() {
echo -e "${RED}FAIL${NC} $1"
}
print_info() {
echo "INFO $1"
}
# Portable timeout wrapper:
# - Uses GNU coreutils 'timeout' if available
# - Falls back to 'gtimeout' (Homebrew coreutils on macOS)
# - Otherwise uses a shell-based timer that sends SIGTERM after N seconds
# Usage: run_with_timeout <seconds> <command> [args...]
run_with_timeout() {
local seconds="$1"; shift
if command -v timeout >/dev/null 2>&1; then
timeout "${seconds}s" "$@"
return $?
elif command -v gtimeout >/dev/null 2>&1; then
gtimeout "${seconds}s" "$@"
return $?
else
( "$@" ) &
local cmd_pid=$!
( sleep "$seconds"; kill -0 "$cmd_pid" 2>/dev/null && kill -TERM "$cmd_pid" 2>/dev/null ) &
local timer_pid=$!
wait "$cmd_pid"
local exit_code=$?
kill -TERM "$timer_pid" 2>/dev/null || true
return "$exit_code"
fi
}
cleanup() {
if [ -d "$TEST_OUTPUT_DIR" ]; then
rm -rf "$TEST_OUTPUT_DIR"
fi
}
setup() {
# Create output directory
mkdir -p "$TEST_OUTPUT_DIR"
# Check if roc CLI exists
if [ ! -f "$ROC_CLI" ]; then
print_error "Roc CLI not found at $ROC_CLI"
print_info "Please run 'zig build' first to build the Roc compiler"
exit 1
fi
# Check if int app exists
if [ ! -f "$INT_APP" ]; then
print_error "Int test app not found at $INT_APP"
exit 1
fi
}
run_test() {
local test_name="$1"
local test_cmd="$2"
local expected_output="$3"
TESTS_RUN=$((TESTS_RUN + 1))
print_info "Running: $test_name"
echo " Command: $test_cmd"
if eval "$test_cmd" > "$TEST_OUTPUT_DIR/test_$TESTS_RUN.out" 2>&1; then
if [ -n "$expected_output" ]; then
# Check if expected output is present
if grep -q "$expected_output" "$TEST_OUTPUT_DIR/test_$TESTS_RUN.out"; then
print_success "$test_name"
TESTS_PASSED=$((TESTS_PASSED + 1))
return 0
else
print_error "$test_name - Expected output not found"
echo " Expected: $expected_output"
echo " Got (first 5 lines):"
cat "$TEST_OUTPUT_DIR/test_$TESTS_RUN.out" | head -5
echo " NOTE: For complete output, run: cat $TEST_OUTPUT_DIR/test_$TESTS_RUN.out"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$test_name")
return 1
fi
else
print_success "$test_name"
TESTS_PASSED=$((TESTS_PASSED + 1))
return 0
fi
else
print_error "$test_name - Command failed"
# Show more complete output for arm64glibc debugging
if [[ "$test_name" == *"arm64glibc"* ]]; then
echo " Complete error output for arm64glibc debugging:"
cat "$TEST_OUTPUT_DIR/test_$TESTS_RUN.out"
else
echo " Error output (first 10 lines):"
cat "$TEST_OUTPUT_DIR/test_$TESTS_RUN.out" | head -10
echo " NOTE: This is a summary of the error output."
echo " For complete output, run: cat $TEST_OUTPUT_DIR/test_$TESTS_RUN.out"
fi
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$test_name")
return 1
fi
}
test_native_execution() {
print_section "Testing Native Build and Execution"
local native_output="$TEST_OUTPUT_DIR/int_app_native"
# Test native build (should work on current platform)
run_test "Native build" \
"$ROC_CLI build --output=$native_output $INT_APP" \
""
# Verify the executable was created
if [ ! -f "$native_output" ]; then
print_error "Native executable not created"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("native executable creation")
return 1
fi
print_success "Native executable created"
# Show executable info
if command -v file >/dev/null 2>&1; then
echo " File type: $(file "$native_output")"
fi
# Make sure it's executable
chmod +x "$native_output"
# Test execution - the int platform should run the host which calls the app functions
print_info "Testing native execution..."
local exec_output="$TEST_OUTPUT_DIR/native_exec.out"
if run_with_timeout 10 "$native_output" > "$exec_output" 2>&1; then
local exit_code=$?
if [ $exit_code -eq 0 ]; then
print_success "Native executable runs and exits successfully"
# Show what the executable outputs (useful for debugging)
if [ -s "$exec_output" ]; then
echo " Output:"
head -5 "$exec_output" | sed 's/^/ /'
fi
TESTS_PASSED=$((TESTS_PASSED + 1))
else
print_error "Native executable exited with code $exit_code"
echo " Output (first 10 lines):"
head -10 "$exec_output" | sed 's/^/ /'
echo " NOTE: For complete output, run: cat $exec_output"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("native execution exit code")
fi
else
print_error "Native executable timed out or crashed"
echo " Output (first 10 lines):"
head -10 "$exec_output" | sed 's/^/ /'
echo " NOTE: For complete output, run: cat $exec_output"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("native execution timeout")
fi
TESTS_RUN=$((TESTS_RUN + 1))
}
test_cross_compilation() {
print_section "Testing Cross-Compilation"
for target in "${CROSS_TARGETS[@]}"; do
local output_name="$TEST_OUTPUT_DIR/int_app_$target"
# Test cross-compilation build
run_test "Cross-compile to $target" \
"$ROC_CLI build --target=$target --output=$output_name $INT_APP" \
""
# Check if the executable was created
if [ -f "$output_name" ]; then
print_success "Executable created for $target"
# Show some info about the generated executable
if command -v file >/dev/null 2>&1; then
echo " File info: $(file "$output_name")"
fi
if command -v ldd >/dev/null 2>&1 && [[ "$target" == *"$(uname -m)"* ]]; then
echo " Dependencies:"
ldd "$output_name" 2>/dev/null | head -5 || echo " (static or incompatible)"
fi
else
print_error "Executable not created for $target"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$target executable creation")
fi
done
}
test_platform_build() {
print_section "Testing Platform Build System"
# Test that platform libraries are built
run_test "Build platform libraries" \
"zig build" \
""
# Check that target directories exist with expected files
for target in "${CROSS_TARGETS[@]}"; do
local target_dir="test/int/platform/targets/$target"
if [ -d "$target_dir" ]; then
print_success "Target directory exists: $target"
# Check for expected files
local expected_files=("libhost.a")
if [[ "$target" == *"glibc"* ]]; then
expected_files+=("libc.so.6" "libc.so" "libc_stub.s")
fi
for file in "${expected_files[@]}"; do
if [ -f "$target_dir/$file" ]; then
echo " $file: present"
else
print_error " $file missing in $target"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$target/$file")
fi
done
else
print_error "Target directory missing: $target"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$target directory")
fi
done
}
test_glibc_stubs() {
print_section "Testing Glibc Stub Generation"
for target in "x64glibc" "arm64glibc"; do
local stub_file="test/int/platform/targets/$target/libc_stub.s"
if [ -f "$stub_file" ]; then
print_success "Glibc stub exists: $target"
# Check that essential symbols are present
local essential_symbols=("__libc_start_main" "abort" "getauxval" "_IO_stdin_used")
local missing_symbols=0
for symbol in "${essential_symbols[@]}"; do
if grep -q "$symbol" "$stub_file"; then
echo " $symbol: present"
else
print_error " Symbol $symbol missing from $target"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$target $symbol")
missing_symbols=$((missing_symbols + 1))
fi
done
if [ $missing_symbols -eq 0 ]; then
echo " All essential symbols present"
fi
# Check architecture-specific instructions
if [[ "$target" == "x64glibc" ]]; then
if grep -q "xor %rax" "$stub_file"; then
echo " x86_64 assembly: correct"
else
print_error " x86_64 assembly instructions missing from $target"
fi
elif [[ "$target" == "arm64glibc" ]]; then
if grep -q "mov x0" "$stub_file"; then
echo " ARM64 assembly: correct"
else
print_error " ARM64 assembly instructions missing from $target"
fi
fi
else
print_error "Glibc stub missing: $target"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$target stub")
fi
done
}
print_summary() {
echo
print_section "Test Summary"
echo "Total tests: $TESTS_RUN"
echo -e "${GREEN}Passed: $TESTS_PASSED${NC}"
echo -e "${RED}Failed: $TESTS_FAILED${NC}"
if [ $TESTS_FAILED -gt 0 ]; then
echo
echo "Failed tests:"
for failed_test in "${FAILED_TESTS[@]}"; do
echo " - $failed_test"
done
echo
print_error "Some tests failed"
return 1
else
echo
print_success "All tests passed"
return 0
fi
}
main() {
print_header
# Setup
setup
trap cleanup EXIT
# Run test suites
test_platform_build
test_glibc_stubs
test_cross_compilation
test_native_execution
# Print summary and exit with appropriate code
if print_summary; then
exit 0
else
exit 1
fi
}
# Handle command line arguments
case "${1:-}" in
--help|-h)
echo "Usage: $0 [--help]"
echo
echo "Test script for Roc's int platform cross-compilation."
echo "This script tests:"
echo " - Platform build system"
echo " - Glibc stub generation"
echo " - Native execution"
echo " - Cross-compilation to all supported targets"
echo
echo "Make sure to run 'zig build' first to build the Roc compiler."
exit 0
;;
*)
main "$@"
;;
esac

View file

@ -46,3 +46,12 @@
fun:__libc_malloc_impl
fun:*SmallVector*
}
{
musl-mallocng-enframe-llvm-mcregisterinfo
Memcheck:Cond
fun:enframe
fun:__libc_malloc_impl
...
fun:*llvm*MCRegisterInfo*
}

View file

@ -227,7 +227,13 @@ fn getNewZigFiles(allocator: Allocator) !PathList {
}
fn fileHasTopLevelComment(allocator: Allocator, file_path: []const u8) !bool {
const source = try readSourceFile(allocator, file_path);
const source = readSourceFile(allocator, file_path) catch |err| {
if (err == error.FileNotFound) {
// File was deleted but still shows in git diff - skip it
return true;
}
return err;
};
defer allocator.free(source);
return std.mem.indexOf(u8, source, "//!") != null;

View file

@ -288,6 +288,19 @@ pub const Store = struct {
// We deserialize by overwriting the Serialized memory with the runtime struct.
const store = @as(*Store, @ptrFromInt(@intFromPtr(self)));
// Check struct sizes - if Store > Serialized, we'd write past the end!
comptime {
const store_size = @sizeOf(Store);
const serialized_size = @sizeOf(Serialized);
if (store_size > serialized_size) {
@compileError(std.fmt.comptimePrint(
"STRUCT SIZE MISMATCH: Store ({d} bytes) > Serialized ({d} bytes). " ++
"Writing Store to Serialized memory will corrupt adjacent data!",
.{ store_size, serialized_size },
));
}
}
store.* = Store{
.interner = self.interner.deserialize(offset).*,
.attributes = self.attributes.deserialize(offset).*,

View file

@ -38,6 +38,69 @@ pub fn Scratch(comptime T: type) type {
return false;
}
/// Check if a value is in the array starting from a given position.
/// Note: If checking multiple values against the same range, use `setViewFrom()`
/// to build a SetView once and call `contains()` on it multiple times.
pub fn containsFrom(self: *const Self, start: u32, val: T) bool {
const range = self.items.items[@intCast(start)..];
for (range) |item| {
if (item == val) {
return true;
}
}
return false;
}
/// A view into a range of the scratch buffer optimized for membership queries.
/// For small ranges, uses linear scan. For larger ranges, uses a hash set.
pub const SetView = struct {
range: []const T,
set: ?std.AutoHashMapUnmanaged(T, void),
const hash_threshold = 16;
pub fn init(items: []const T) SetView {
if (items.len <= hash_threshold) {
return .{ .range = items, .set = null };
}
var set = std.AutoHashMapUnmanaged(T, void){};
set.ensureTotalCapacity(std.heap.page_allocator, @intCast(items.len)) catch {
// Fall back to linear scan on allocation failure
return .{ .range = items, .set = null };
};
for (items) |item| {
set.putAssumeCapacity(item, {});
}
return .{ .range = items, .set = set };
}
pub fn deinit(self: *SetView) void {
if (self.set) |*set| {
set.deinit(std.heap.page_allocator);
}
}
pub fn contains(self: *const SetView, val: T) bool {
if (self.set) |set| {
return set.contains(val);
}
for (self.range) |item| {
if (item == val) {
return true;
}
}
return false;
}
};
/// Create a SetView for efficient repeated membership queries on a range.
/// For small ranges, the SetView uses linear scan.
/// For larger ranges, it builds a hash set for O(1) lookups.
/// Remember to call deinit() on the returned SetView when done.
pub fn setViewFrom(self: *const Self, start: u32) SetView {
return SetView.init(self.items.items[@intCast(start)..]);
}
/// Places a new index of type `T` in the scratch
pub fn append(self: *Self, idx: T) std.mem.Allocator.Error!void {
try self.items.append(idx);

View file

@ -22,8 +22,55 @@ pub fn generateComprehensiveStub(
else => try writer.writeAll(" ret\n\n"),
}
// Essential libc symbols that must be present
const essential_symbols = [_][]const u8{ "__libc_start_main", "abort", "getauxval" };
// Essential libc symbols that must be present for linking
// These are resolved at runtime from real glibc
const essential_symbols = [_][]const u8{
// Core libc
"__libc_start_main",
"abort",
"getauxval",
"__tls_get_addr", // Thread-local storage
"__errno_location", // Thread-safe errno access
// Memory operations
"memcpy",
"memmove",
"mmap",
"mmap64",
"munmap",
"mremap",
"msync",
// File I/O
"close",
"read",
"write",
"readv",
"writev",
"openat64",
"lseek64",
"pread64",
"pwritev64",
"flock",
"copy_file_range",
"sendfile64",
// Path operations
"realpath",
"readlink",
// Environment
"getenv",
"isatty",
"sysconf", // System configuration (page size, etc.)
// Signal handling
"sigaction",
"sigemptyset",
// Dynamic linker
"dl_iterate_phdr",
"getcontext",
// Math functions
"fmod",
"fmodf",
"trunc",
"truncf",
};
for (essential_symbols) |symbol| {
try writer.print(".balign 8\n.globl {s}\n.type {s}, %function\n{s}:\n", .{ symbol, symbol, symbol });
@ -36,7 +83,7 @@ pub fn generateComprehensiveStub(
else => try writer.writeAll(" ret\n\n"),
}
} else {
// Other symbols return 0
// Other symbols return 0 or are no-ops (resolved at runtime)
switch (target_arch) {
.x86_64 => try writer.writeAll(" xor %rax, %rax\n ret\n\n"),
.aarch64 => try writer.writeAll(" mov x0, #0\n ret\n\n"),
@ -53,6 +100,14 @@ pub fn generateComprehensiveStub(
} else {
try writer.writeAll(".long 1\n");
}
// environ is a global variable (char **environ)
try writer.writeAll(".globl environ\n.type environ, %object\nenviron: ");
if (ptr_width == 8) {
try writer.writeAll(".quad 0\n");
} else {
try writer.writeAll(".long 0\n");
}
}
/// Compile assembly stub to shared library using Zig's build system

View file

@ -354,6 +354,7 @@ pub const RocModules = struct {
unbundle: *Module,
base58: *Module,
lsp: *Module,
roc_target: *Module,
pub fn create(b: *Build, build_options_step: *Step.Options, zstd: ?*Dependency) RocModules {
const self = RocModules{
@ -386,6 +387,7 @@ pub const RocModules = struct {
.unbundle = b.addModule("unbundle", .{ .root_source_file = b.path("src/unbundle/mod.zig") }),
.base58 = b.addModule("base58", .{ .root_source_file = b.path("src/base58/mod.zig") }),
.lsp = b.addModule("lsp", .{ .root_source_file = b.path("src/lsp/mod.zig") }),
.roc_target = b.addModule("roc_target", .{ .root_source_file = b.path("src/target/mod.zig") }),
};
// Link zstd to bundle module if available (it's unsupported on wasm32, so don't link it)
@ -466,6 +468,7 @@ pub const RocModules = struct {
step.root_module.addImport("unbundle", self.unbundle);
step.root_module.addImport("base58", self.base58);
step.root_module.addImport("roc_target", self.roc_target);
}
pub fn addAllToTest(self: RocModules, step: *Step.Compile) void {

View file

@ -15,6 +15,7 @@
//!
//! Each function documents its ownership semantics in its doc comment.
const std = @import("std");
const builtin = @import("builtin");
const utils = @import("utils.zig");
const UpdateMode = utils.UpdateMode;
@ -142,6 +143,17 @@ pub const RocList = extern struct {
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
const slice_mask = self.seamlessSliceMask();
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
// Verify the computed allocation pointer is properly aligned
if (comptime builtin.mode == .Debug) {
if (alloc_ptr != 0 and alloc_ptr % @alignOf(usize) != 0) {
std.debug.panic(
"getAllocationDataPtr: misaligned ptr=0x{x} (bytes=0x{x}, cap_or_alloc=0x{x}, is_slice={})",
.{ alloc_ptr, list_alloc_ptr, self.capacity_or_alloc_ptr, self.isSeamlessSlice() },
);
}
}
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
}
@ -154,6 +166,13 @@ pub const RocList = extern struct {
if (self.isSeamlessSlice() and elements_refcounted) {
// Seamless slices always refer to an underlying allocation.
const alloc_ptr = self.getAllocationDataPtr() orelse unreachable;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(alloc_ptr);
if (ptr_int % @sizeOf(usize) != 0) {
@panic("RocList.getAllocationElementCount: alloc_ptr is not properly aligned");
}
}
// - 1 is refcount.
// - 2 is size on heap.
const ptr = @as([*]usize, @ptrCast(@alignCast(alloc_ptr))) - 2;
@ -167,9 +186,17 @@ pub const RocList = extern struct {
// It will put the allocation size on the heap to enable the seamless slice to free the underlying allocation.
fn setAllocationElementCount(self: RocList, elements_refcounted: bool) void {
if (elements_refcounted and !self.isSeamlessSlice()) {
const alloc_ptr = self.getAllocationDataPtr();
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(alloc_ptr);
if (ptr_int % @sizeOf(usize) != 0) {
@panic("RocList.setAllocationElementCount: alloc_ptr is not properly aligned");
}
}
// - 1 is refcount.
// - 2 is size on heap.
const ptr = @as([*]usize, @ptrCast(@alignCast(self.getAllocationDataPtr()))) - 2;
const ptr = @as([*]usize, @ptrCast(@alignCast(alloc_ptr))) - 2;
ptr[0] = self.length;
}
}
@ -178,6 +205,13 @@ pub const RocList = extern struct {
// If the list is unique and not a seamless slice, the length needs to be store on the heap if the elements are refcounted.
if (elements_refcounted and self.isUnique() and !self.isSeamlessSlice()) {
if (self.getAllocationDataPtr()) |source| {
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(source);
if (ptr_int % @sizeOf(usize) != 0) {
@panic("RocList.incref: source is not properly aligned");
}
}
// - 1 is refcount.
// - 2 is size on heap.
const ptr = @as([*]usize, @ptrCast(@alignCast(source))) - 2;
@ -228,13 +262,32 @@ pub const RocList = extern struct {
}
fn refcount(self: RocList) usize {
// Reduced debug output - only print on potential issues
if (self.getCapacity() == 0 and !self.isSeamlessSlice()) {
// the zero-capacity is Clone, copying it will not leak memory
return 1;
}
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(self.getAllocationDataPtr())));
return (ptr - 1)[0];
const alloc_ptr = self.getAllocationDataPtr();
// Verify alignment before @alignCast
if (alloc_ptr) |non_null_ptr| {
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(non_null_ptr);
if (ptr_int % @sizeOf(usize) != 0) {
std.debug.panic("RocList.refcount: alloc_ptr=0x{x} is not {}-byte aligned (bytes=0x{x}, cap=0x{x})", .{
ptr_int,
@sizeOf(usize),
@intFromPtr(self.bytes),
self.capacity_or_alloc_ptr,
});
}
}
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(non_null_ptr)));
const refcount_val = (ptr - 1)[0];
return refcount_val;
} else {
@panic("RocList.refcount: getAllocationDataPtr returned null");
}
}
pub fn makeUnique(
@ -893,12 +946,33 @@ pub fn listSublist(
return output;
} else {
if (list.isUnique()) {
// Store original element count for proper cleanup when the slice is freed.
// When the seamless slice is later decreffed, it will decref ALL elements
// starting from the original allocation pointer, not just the slice elements.
list.setAllocationElementCount(elements_refcounted);
}
const list_alloc_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT;
const slice_alloc_ptr = list.capacity_or_alloc_ptr;
const slice_mask = list.seamlessSliceMask();
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
// Verify the encoded pointer will decode correctly
if (comptime builtin.mode == .Debug) {
const test_decode = alloc_ptr << 1;
const original_ptr = if (list.isSeamlessSlice())
slice_alloc_ptr << 1
else
@intFromPtr(source_ptr);
if (test_decode != (original_ptr & ~@as(usize, 1))) {
@panic("listSublist: encoding error");
}
// Verify alignment of the original allocation pointer
if (original_ptr % @alignOf(usize) != 0) {
@panic("listSublist: misaligned original ptr");
}
}
return RocList{
.bytes = source_ptr + start * element_width,
.length = keep_len,
@ -1156,27 +1230,37 @@ pub fn listConcat(
) callconv(.c) RocList {
// Early return for empty lists - avoid unnecessary allocations
if (list_a.isEmpty()) {
if (list_b.getCapacity() == 0) {
// b could be a seamless slice, so we still need to decref.
if (list_b.isEmpty()) {
// Both are empty, return list_a and clean up list_b
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
return list_a;
} else {
// list_b has capacity, return it and consume list_a
// list_a is empty, list_b has elements - return list_b
// list_a might still need decref if it has capacity
list_a.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
return list_b;
}
} else if (list_b.isEmpty()) {
if (list_a.getCapacity() == 0) {
// a could be a seamless slice, so we still need to decref.
list_a.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
return list_b;
} else {
// we must consume this list. Even though it has no elements, it could still have capacity
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
// list_b is empty, list_a has elements - return list_a
// list_b might still need decref if it has capacity
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
return list_a;
}
return list_a;
}
} else if (list_a.isUnique()) {
// Check if both lists share the same underlying allocation.
// This can happen when the same list is passed as both arguments (e.g., in repeat_helper).
const same_allocation = blk: {
const alloc_a = list_a.getAllocationDataPtr();
const alloc_b = list_b.getAllocationDataPtr();
break :blk (alloc_a != null and alloc_a == alloc_b);
};
// If they share the same allocation, we must:
// 1. NOT use the unique paths (reallocate might free/move the allocation)
// 2. Only decref once at the end (to avoid double-free)
// Instead, fall through to the general path that allocates a new list.
if (!same_allocation and list_a.isUnique()) {
const total_length: usize = list_a.len() + list_b.len();
const resized_list_a = list_a.reallocate(
@ -1211,7 +1295,7 @@ pub fn listConcat(
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
return resized_list_a;
} else if (list_b.isUnique()) {
} else if (!same_allocation and list_b.isUnique()) {
const total_length: usize = list_a.len() + list_b.len();
const resized_list_b = list_b.reallocate(
@ -1277,8 +1361,11 @@ pub fn listConcat(
}
// decrement list a and b.
// If they share the same allocation, only decref once to avoid double-free.
list_a.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
if (!same_allocation) {
list_b.decref(alignment, element_width, elements_refcounted, dec_context, dec, roc_ops);
}
return output;
}
@ -1474,6 +1561,12 @@ pub fn copy_i64(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to U128 and copies from src to dest.
pub fn copy_u128(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_val = @intFromPtr(dest.?);
const src_val = @intFromPtr(src.?);
if (dest_val % @alignOf(u128) != 0) std.debug.panic("[copy_u128] dest alignment error: ptr=0x{x}", .{dest_val});
if (src_val % @alignOf(u128) != 0) std.debug.panic("[copy_u128] src alignment error: ptr=0x{x}", .{src_val});
}
const dest_ptr = @as(*u128, @ptrCast(@alignCast(dest.?)));
const src_ptr = @as(*u128, @ptrCast(@alignCast(src.?)));
dest_ptr.* = src_ptr.*;
@ -1481,6 +1574,12 @@ pub fn copy_u128(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to I128 and copies from src to dest.
pub fn copy_i128(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_val = @intFromPtr(dest.?);
const src_val = @intFromPtr(src.?);
if (dest_val % @alignOf(i128) != 0) std.debug.panic("[copy_i128] dest alignment error: ptr=0x{x}", .{dest_val});
if (src_val % @alignOf(i128) != 0) std.debug.panic("[copy_i128] src alignment error: ptr=0x{x}", .{src_val});
}
const dest_ptr = @as(*i128, @ptrCast(@alignCast(dest.?)));
const src_ptr = @as(*i128, @ptrCast(@alignCast(src.?)));
dest_ptr.* = src_ptr.*;
@ -1488,6 +1587,16 @@ pub fn copy_i128(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to Boxes and copies from src to dest.
pub fn copy_box(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_addr = @intFromPtr(dest);
const src_addr = @intFromPtr(src);
if (dest_addr % @alignOf(usize) != 0) {
std.debug.panic("[copy_box] dest=0x{x} not aligned to {} bytes", .{ dest_addr, @alignOf(usize) });
}
if (src_addr % @alignOf(usize) != 0) {
std.debug.panic("[copy_box] src=0x{x} not aligned to {} bytes", .{ src_addr, @alignOf(usize) });
}
}
const dest_ptr = @as(*usize, @ptrCast(@alignCast(dest)));
const src_ptr = @as(*usize, @ptrCast(@alignCast(src)));
dest_ptr.* = src_ptr.*;
@ -1495,12 +1604,28 @@ pub fn copy_box(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to ZST Boxes and copies from src to dest.
pub fn copy_box_zst(dest: Opaque, _: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_addr = @intFromPtr(dest.?);
if (dest_addr % @alignOf(usize) != 0) {
std.debug.panic("[copy_box_zst] dest=0x{x} not aligned to {} bytes", .{ dest_addr, @alignOf(usize) });
}
}
const dest_ptr = @as(*usize, @ptrCast(@alignCast(dest.?)));
dest_ptr.* = 0;
}
/// Specialized copy fn which takes pointers as pointers to Lists and copies from src to dest.
pub fn copy_list(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_addr = @intFromPtr(dest.?);
const src_addr = @intFromPtr(src.?);
if (dest_addr % @alignOf(RocList) != 0) {
@panic("copy_list: dest is not properly aligned for RocList");
}
if (src_addr % @alignOf(RocList) != 0) {
@panic("copy_list: src is not properly aligned for RocList");
}
}
const dest_ptr = @as(*RocList, @ptrCast(@alignCast(dest.?)));
const src_ptr = @as(*RocList, @ptrCast(@alignCast(src.?)));
dest_ptr.* = src_ptr.*;
@ -1508,6 +1633,17 @@ pub fn copy_list(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to ZST Lists and copies from src to dest.
pub fn copy_list_zst(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_addr = @intFromPtr(dest.?);
const src_addr = @intFromPtr(src.?);
const required_alignment = @alignOf(RocList);
if (dest_addr % required_alignment != 0) {
@panic("copy_list_zst: dest is not properly aligned for RocList");
}
if (src_addr % required_alignment != 0) {
@panic("copy_list_zst: src is not properly aligned for RocList");
}
}
const dest_ptr = @as(*RocList, @ptrCast(@alignCast(dest.?)));
const src_ptr = @as(*RocList, @ptrCast(@alignCast(src.?)));
dest_ptr.* = src_ptr.*;
@ -1515,6 +1651,16 @@ pub fn copy_list_zst(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
/// Specialized copy fn which takes pointers as pointers to a RocStr and copies from src to dest.
pub fn copy_str(dest: Opaque, src: Opaque, _: usize) callconv(.c) void {
if (comptime builtin.mode == .Debug) {
const dest_addr = @intFromPtr(dest.?);
const src_addr = @intFromPtr(src.?);
if (dest_addr % @alignOf(RocStr) != 0) {
@panic("copy_str: dest is not properly aligned for RocStr");
}
if (src_addr % @alignOf(RocStr) != 0) {
@panic("copy_str: src is not properly aligned for RocStr");
}
}
const dest_ptr = @as(*RocStr, @ptrCast(@alignCast(dest.?)));
const src_ptr = @as(*RocStr, @ptrCast(@alignCast(src.?)));
dest_ptr.* = src_ptr.*;

View file

@ -17,6 +17,7 @@
//!
//! Each function documents its ownership semantics in its doc comment.
const std = @import("std");
const builtin = @import("builtin");
const RocList = @import("list.zig").RocList;
const RocOps = @import("host_abi.zig").RocOps;
@ -35,9 +36,26 @@ const rcNone = @import("utils.zig").rcNone;
/// The context parameter is expected to be a *RocOps.
fn strDecref(context: ?*anyopaque, element: ?[*]u8) callconv(.c) void {
if (element) |elem_ptr| {
if (comptime builtin.mode == .Debug) {
const elem_addr = @intFromPtr(elem_ptr);
const required_align = @alignOf(RocStr);
if (elem_addr % required_align != 0) {
@panic("strDecref: elem_ptr is not properly aligned for RocStr");
}
}
const str_ptr: *RocStr = @ptrCast(@alignCast(elem_ptr));
const roc_ops: *RocOps = @ptrCast(@alignCast(context.?));
str_ptr.decref(roc_ops);
if (context) |ctx| {
if (comptime builtin.mode == .Debug) {
const ctx_addr = @intFromPtr(ctx);
if (ctx_addr % @alignOf(RocOps) != 0) {
@panic("strDecref: context is not properly aligned for RocOps");
}
}
const roc_ops: *RocOps = @ptrCast(@alignCast(ctx));
str_ptr.decref(roc_ops);
} else {
@panic("strDecref: context is null");
}
}
}
@ -231,6 +249,17 @@ pub const RocStr = extern struct {
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
const slice_mask = self.seamlessSliceMask();
const alloc_ptr = (str_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
// Verify the computed allocation pointer is properly aligned
if (comptime builtin.mode == .Debug) {
if (alloc_ptr != 0 and alloc_ptr % @alignOf(usize) != 0) {
std.debug.panic(
"RocStr.getAllocationPtr: misaligned ptr=0x{x} (bytes=0x{x}, cap_or_alloc=0x{x}, is_slice={})",
.{ alloc_ptr, str_alloc_ptr, self.capacity_or_alloc_ptr, self.isSeamlessSlice() },
);
}
}
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
}
@ -238,6 +267,13 @@ pub const RocStr = extern struct {
if (!self.isSmallStr()) {
const alloc_ptr = self.getAllocationPtr();
if (alloc_ptr != null) {
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(alloc_ptr);
if (ptr_int % @sizeOf(isize) != 0) {
@panic("RocStr.incref: alloc_ptr is not properly aligned");
}
}
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(alloc_ptr)));
@import("utils.zig").increfRcPtrC(@as(*isize, @ptrCast(isizes - 1)), @as(isize, @intCast(n)));
}
@ -485,6 +521,14 @@ pub const RocStr = extern struct {
else
self.bytes;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(data_ptr);
if (ptr_int % @sizeOf(usize) != 0) {
@panic("RocStr.refcount: data_ptr is not properly aligned");
}
}
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(data_ptr)));
return (ptr - 1)[0];
}

View file

@ -292,13 +292,26 @@ pub fn decrefRcPtrC(
}
/// Safely decrements reference count for a potentially null pointer
/// WARNING: This function assumes `bytes` points to 8-byte aligned data.
/// It should NOT be used for seamless slices with non-zero start offsets,
/// as those have misaligned bytes pointers. Use RocList.decref instead.
pub fn decrefCheckNullC(
bytes_or_null: ?[*]u8,
alignment: u32,
elements_refcounted: bool,
roc_ops: *RocOps,
) callconv(.c) void {
// MARKER: This function has been updated with alignment checks 2024-12-06
if (bytes_or_null) |bytes| {
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(bytes);
const expected_align = @sizeOf(isize);
if (ptr_int % expected_align != 0) {
std.debug.panic("DECREF_CHECK_NULL: ptr=0x{x} is not 8-byte aligned!", .{ptr_int});
}
}
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(bytes)));
return @call(
.always_inline,
@ -320,9 +333,24 @@ pub fn decrefDataPtrC(
const bytes = bytes_or_null orelse return;
const data_ptr = @intFromPtr(bytes);
// Verify original pointer is properly aligned
if (comptime builtin.mode == .Debug) {
if (data_ptr % @alignOf(usize) != 0) {
std.debug.panic("decrefDataPtrC: ORIGINAL data_ptr=0x{x} is not {}-byte aligned!", .{ data_ptr, @alignOf(usize) });
}
}
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const unmasked_ptr = data_ptr & ~tag_mask;
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (unmasked_ptr % @alignOf(isize) != 0) {
std.debug.panic("decrefDataPtrC: unmasked_ptr=0x{x} (data_ptr=0x{x}) is not {}-byte aligned", .{ unmasked_ptr, data_ptr, @alignOf(isize) });
}
}
const isizes: [*]isize = @as([*]isize, @ptrFromInt(unmasked_ptr));
const rc_ptr = isizes - 1;
@ -339,10 +367,26 @@ pub fn increfDataPtrC(
const bytes = bytes_or_null orelse return;
const ptr = @intFromPtr(bytes);
// Verify original pointer is properly aligned (can fail if seamless slice encoding produces bad pointer)
if (comptime builtin.mode == .Debug) {
if (ptr % @alignOf(usize) != 0) {
std.debug.panic("increfDataPtrC: ORIGINAL ptr=0x{x} is not {}-byte aligned!", .{ ptr, @alignOf(usize) });
}
}
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const masked_ptr = ptr & ~tag_mask;
const rc_addr = masked_ptr - @sizeOf(usize);
const isizes: *isize = @as(*isize, @ptrFromInt(masked_ptr - @sizeOf(usize)));
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (rc_addr % @alignOf(isize) != 0) {
std.debug.panic("increfDataPtrC: rc_addr=0x{x} (ptr=0x{x}, masked=0x{x}) is not {}-byte aligned", .{ rc_addr, ptr, masked_ptr, @alignOf(isize) });
}
}
const isizes: *isize = @as(*isize, @ptrFromInt(rc_addr));
return increfRcPtrC(isizes, inc_amount);
}
@ -362,6 +406,13 @@ pub fn freeDataPtrC(
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const masked_ptr = ptr & ~tag_mask;
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (masked_ptr % @alignOf(isize) != 0) {
std.debug.panic("freeDataPtrC: masked_ptr=0x{x} (ptr=0x{x}) is not {}-byte aligned", .{ masked_ptr, ptr, @alignOf(isize) });
}
}
const isizes: [*]isize = @as([*]isize, @ptrFromInt(masked_ptr));
// we always store the refcount right before the data
@ -395,6 +446,15 @@ pub fn decref(
const bytes = bytes_or_null orelse return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(bytes);
const expected_align = @sizeOf(isize);
if (ptr_int % expected_align != 0) {
@panic("decref: bytes pointer is not properly aligned");
}
}
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(bytes)));
decref_ptr_to_refcount(isizes - 1, alignment, elements_refcounted, roc_ops);
@ -488,6 +548,13 @@ pub fn isUnique(
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const masked_ptr = ptr & ~tag_mask;
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (masked_ptr % @alignOf(isize) != 0) {
std.debug.panic("isUnique: masked_ptr=0x{x} (ptr=0x{x}) is not {}-byte aligned", .{ masked_ptr, ptr, @alignOf(isize) });
}
}
const isizes: [*]isize = @as([*]isize, @ptrFromInt(masked_ptr));
const refcount = (isizes - 1)[0];
@ -536,6 +603,11 @@ pub inline fn rcConstant(refcount: isize) bool {
pub inline fn assertValidRefcount(data_ptr: ?[*]u8) void {
if (builtin.mode != .Debug) return;
if (data_ptr) |ptr| {
// Debug alignment check
const ptr_addr = @intFromPtr(ptr) - @sizeOf(usize);
if (ptr_addr % @sizeOf(usize) != 0) {
std.debug.panic("[assertValidRefcount] ptr=0x{x} is not aligned", .{ptr_addr});
}
const rc_ptr: [*]isize = @ptrCast(@alignCast(ptr - @sizeOf(usize)));
const rc = rc_ptr[0];
if (rc == POISON_VALUE) {
@ -638,6 +710,15 @@ pub fn allocateWithRefcount(
const new_bytes = @as([*]u8, @ptrCast(roc_alloc_args.answer));
const data_ptr = new_bytes + extra_bytes;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(data_ptr);
if (ptr_int % ptr_width != 0) {
@panic("allocateWithRefcount: data_ptr is not properly aligned");
}
}
const refcount_ptr = @as([*]usize, @ptrCast(@as([*]align(ptr_width) u8, @alignCast(data_ptr)) - ptr_width));
refcount_ptr[0] = if (RC_TYPE == .none) REFCOUNT_STATIC_DATA else 1;

View file

@ -353,19 +353,12 @@ pub const ExposedItem = struct {
}
};
/// Represents a field in a record pattern for pattern matching
pub const PatternRecordField = struct {
pub const Idx = enum(u32) { _ };
pub const Span = extern struct { start: u32, len: u32 };
};
/// Represents an arbitrary precision smallish decimal value
pub const SmallDecValue = struct {
numerator: i16,
denominator_power_of_ten: u8,
/// Convert a small dec to f64 (use for size comparisons)
/// TODO: Review, claude generated
pub fn toF64(self: @This()) f64 {
const numerator_f64 = @as(f64, @floatFromInt(self.numerator));
const divisor = std.math.pow(f64, 10, @as(f64, @floatFromInt(self.denominator_power_of_ten)));
@ -485,7 +478,6 @@ pub const IntValue = struct {
}
/// Calculate the int requirements of an IntValue
/// TODO: Review, claude generated
pub fn toIntRequirements(self: IntValue) types_mod.IntRequirements {
var is_negated = false;
var u128_val: u128 = undefined;
@ -528,8 +520,6 @@ pub const IntValue = struct {
};
}
/// Calculate the frac requirements of an IntValue
/// TODO: Review, claude generated
/// Calculate the frac requirements of an IntValue
pub fn toFracRequirements(self: IntValue) types_mod.FracRequirements {
// Convert to f64 for checking
@ -1030,7 +1020,6 @@ pub fn isCastable(comptime T: type) bool {
TypeAnno.RecordField.Idx,
ExposedItem.Idx,
Expr.Match.BranchPattern.Idx,
PatternRecordField.Idx,
Node.Idx,
TypeVar,
=> true,

View file

@ -91,8 +91,12 @@ scratch_seen_record_fields: base.Scratch(SeenRecordField),
scratch_tags: base.Scratch(types.Tag),
/// Scratch free variables
scratch_free_vars: base.Scratch(Pattern.Idx),
/// Scratch free variables
/// Scratch captures (free variables being collected)
scratch_captures: base.Scratch(Pattern.Idx),
/// Scratch bound variables (for filtering out locally-bound vars from captures)
scratch_bound_vars: base.Scratch(Pattern.Idx),
/// Counter for generating unique malformed import placeholder names
malformed_import_count: u32 = 0,
const Ident = base.Ident;
const Region = base.Region;
@ -209,6 +213,7 @@ pub fn deinit(
self.scratch_tags.deinit();
self.scratch_free_vars.deinit();
self.scratch_captures.deinit();
self.scratch_bound_vars.deinit();
}
/// Options for initializing the canonicalizer.
@ -240,6 +245,7 @@ pub fn init(
.scratch_tags = try base.Scratch(types.Tag).init(gpa),
.scratch_free_vars = try base.Scratch(Pattern.Idx).init(gpa),
.scratch_captures = try base.Scratch(Pattern.Idx).init(gpa),
.scratch_bound_vars = try base.Scratch(Pattern.Idx).init(gpa),
};
// Top-level scope is not a function boundary
@ -2379,8 +2385,10 @@ fn createAnnoOnlyDef(
break :placeholder_check existing_pattern;
},
.not_found => {
// Placeholder is tracked but not found in any scope - this shouldn't happen
// Create a new pattern as fallback
// Placeholder is tracked but not found in current scope chain.
// This can happen if the placeholder was created in a scope that's
// not an ancestor of the current scope. Create a new pattern as fallback;
// any actual errors will be caught later during definition checking.
const pattern = Pattern{
.assign = .{
.ident = ident,
@ -2496,42 +2504,42 @@ const TypeAnnoIdent = struct {
where: ?WhereClause.Span,
};
fn collectBoundVars(self: *Self, pattern_idx: Pattern.Idx, bound_vars: *std.AutoHashMapUnmanaged(Pattern.Idx, void)) !void {
fn collectBoundVarsToScratch(self: *Self, pattern_idx: Pattern.Idx) !void {
const pattern = self.env.store.getPattern(pattern_idx);
switch (pattern) {
.assign => {
try bound_vars.put(self.env.gpa, pattern_idx, {});
try self.scratch_bound_vars.append(pattern_idx);
},
.record_destructure => |destructure| {
for (self.env.store.sliceRecordDestructs(destructure.destructs)) |destruct_idx| {
const destruct = self.env.store.getRecordDestruct(destruct_idx);
switch (destruct.kind) {
.Required => |sub_pattern_idx| try self.collectBoundVars(sub_pattern_idx, bound_vars),
.SubPattern => |sub_pattern_idx| try self.collectBoundVars(sub_pattern_idx, bound_vars),
.Required => |sub_pattern_idx| try self.collectBoundVarsToScratch(sub_pattern_idx),
.SubPattern => |sub_pattern_idx| try self.collectBoundVarsToScratch(sub_pattern_idx),
}
}
},
.tuple => |tuple| {
for (self.env.store.slicePatterns(tuple.patterns)) |elem_pattern_idx| {
try self.collectBoundVars(elem_pattern_idx, bound_vars);
try self.collectBoundVarsToScratch(elem_pattern_idx);
}
},
.applied_tag => |tag| {
for (self.env.store.slicePatterns(tag.args)) |arg_pattern_idx| {
try self.collectBoundVars(arg_pattern_idx, bound_vars);
try self.collectBoundVarsToScratch(arg_pattern_idx);
}
},
.as => |as_pat| {
try bound_vars.put(self.env.gpa, pattern_idx, {});
try self.collectBoundVars(as_pat.pattern, bound_vars);
try self.scratch_bound_vars.append(pattern_idx);
try self.collectBoundVarsToScratch(as_pat.pattern);
},
.list => |list| {
for (self.env.store.slicePatterns(list.patterns)) |elem_idx| {
try self.collectBoundVars(elem_idx, bound_vars);
try self.collectBoundVarsToScratch(elem_idx);
}
if (list.rest_info) |rest| {
if (rest.pattern) |rest_pat_idx| {
try self.collectBoundVars(rest_pat_idx, bound_vars);
try self.collectBoundVarsToScratch(rest_pat_idx);
}
}
},
@ -2861,56 +2869,6 @@ fn checkExposedButNotImplemented(self: *Self) std.mem.Allocator.Error!void {
}
}
fn bringImportIntoScope(
self: *Self,
import: *const AST.Statement,
) void {
// const gpa = self.env.gpa;
// const import_name: []u8 = &.{}; // import.module_name_tok;
// const shorthand: []u8 = &.{}; // import.qualifier_tok;
// const region = Region{
// .start = Region.Position.zero(),
// .end = Region.Position.zero(),
// };
// const res = self.env.imports.getOrInsert(gpa, import_name, shorthand);
// if (res.was_present) {
// _ = self.env.problems.append(Problem.Canonicalize.make(.{ .DuplicateImport = .{
// .duplicate_import_region = region,
// } }));
// }
const exposesSlice = self.parse_ir.store.exposedItemSlice(import.exposes);
for (exposesSlice) |exposed_idx| {
const exposed = self.parse_ir.store.getExposedItem(exposed_idx);
switch (exposed) {
.lower_ident => |ident| {
// TODO handle `as` here using an Alias
// TODO Introduce our import
if (self.parse_ir.tokens.resolveIdentifier(ident.ident)) |_| {
// _ = self.scope.levels.introduce(gpa, &self.env.idents, .ident, .{ .scope_name = ident_idx, .ident = ident_idx });
}
},
.upper_ident => {
// TODO: const alias = Alias{
// .name = imported_type.name,
// .region = ir.env.tag_names.getRegion(imported_type.name),
// .is_builtin = false,
// .kind = .ImportedUnknown,
// };
// const alias_idx = ir.aliases.append(alias);
//
// _ = scope.levels.introduce(.alias, .{
// .scope_name = imported_type.name,
// .alias = alias_idx,
// });
},
.upper_ident_star => {},
}
}
}
fn bringIngestedFileIntoScope(
self: *Self,
import: *const parse.AST.Stmt.Import,
@ -3201,8 +3159,11 @@ fn canonicalizeImportStatement(
.region = region,
} });
// Use a placeholder identifier instead
const placeholder_text = "MALFORMED_IMPORT";
// Use a unique placeholder identifier that starts with '#' to ensure it can't
// collide with user-defined identifiers (# starts a comment in Roc)
var buf: [32]u8 = undefined;
const placeholder_text = std.fmt.bufPrint(&buf, "#malformed_import_{d}", .{self.malformed_import_count}) catch unreachable;
self.malformed_import_count += 1;
break :blk try self.env.insertIdent(base.Ident.for_text(placeholder_text));
}
} else {
@ -4826,16 +4787,18 @@ pub fn canonicalizeExpr(
};
// Determine captures: free variables in body minus variables bound by args
var bound_vars = std.AutoHashMapUnmanaged(Pattern.Idx, void){};
defer bound_vars.deinit(self.env.gpa);
const bound_vars_top = self.scratch_bound_vars.top();
defer self.scratch_bound_vars.clearFrom(bound_vars_top);
for (self.env.store.slicePatterns(args_span)) |arg_pat_idx| {
try self.collectBoundVars(arg_pat_idx, &bound_vars);
try self.collectBoundVarsToScratch(arg_pat_idx);
}
const body_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_body.free_vars);
var bound_vars_view = self.scratch_bound_vars.setViewFrom(bound_vars_top);
defer bound_vars_view.deinit();
for (body_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv) and !bound_vars.contains(fv)) {
if (!self.scratch_captures.contains(fv) and !bound_vars_view.contains(fv)) {
try self.scratch_captures.append(fv);
}
}
@ -4913,15 +4876,21 @@ pub fn canonicalizeExpr(
return CanonicalizedExpr{ .idx = expr_idx, .free_vars = DataSpan.empty() };
},
.field_access => |field_access| {
// Track free vars from receiver and arguments
const free_vars_start = self.scratch_free_vars.top();
// Try module-qualified lookup first (e.g., Json.utf8)
if (try self.tryModuleQualifiedLookup(field_access)) |expr_idx| {
// Module-qualified lookups don't have free vars (they reference external definitions)
return CanonicalizedExpr{ .idx = expr_idx, .free_vars = DataSpan.empty() };
}
// Regular field access canonicalization
const expr_idx = (try self.canonicalizeRegularFieldAccess(field_access)) orelse return null;
const free_vars_span = self.scratch_free_vars.spanFrom(free_vars_start);
return CanonicalizedExpr{
.idx = (try self.canonicalizeRegularFieldAccess(field_access)) orelse return null,
.free_vars = DataSpan.empty(),
.idx = expr_idx,
.free_vars = free_vars_span,
};
},
.local_dispatch => |local_dispatch| {
@ -5590,11 +5559,11 @@ pub fn canonicalizeExpr(
const branch_pat_span = try self.env.store.matchBranchPatternSpanFrom(branch_pat_scratch_top);
// Collect variables bound by the branch pattern(s)
var branch_bound_vars = std.AutoHashMapUnmanaged(Pattern.Idx, void){};
defer branch_bound_vars.deinit(self.env.gpa);
const branch_bound_vars_top = self.scratch_bound_vars.top();
defer self.scratch_bound_vars.clearFrom(branch_bound_vars_top);
for (self.env.store.sliceMatchBranchPatterns(branch_pat_span)) |branch_pat_idx| {
const branch_pat = self.env.store.getMatchBranchPattern(branch_pat_idx);
try self.collectBoundVars(branch_pat.pattern, &branch_bound_vars);
try self.collectBoundVarsToScratch(branch_pat.pattern);
}
// Save position before canonicalizing body so we can filter pattern-bound vars
@ -5617,17 +5586,15 @@ pub fn canonicalizeExpr(
if (can_body.free_vars.len > 0) {
// Copy the free vars we need to filter
const body_free_vars_slice = self.scratch_free_vars.sliceFromSpan(can_body.free_vars);
var filtered_free_vars = std.ArrayListUnmanaged(Pattern.Idx){};
defer filtered_free_vars.deinit(self.env.gpa);
for (body_free_vars_slice) |fv| {
if (!branch_bound_vars.contains(fv)) {
try filtered_free_vars.append(self.env.gpa, fv);
}
}
// Clear back to before body canonicalization and re-add only filtered vars
// Clear back to before body canonicalization
self.scratch_free_vars.clearFrom(body_free_vars_start);
for (filtered_free_vars.items) |fv| {
try self.scratch_free_vars.append(fv);
// Re-add only filtered vars (not bound by branch patterns)
var bound_vars_view = self.scratch_bound_vars.setViewFrom(branch_bound_vars_top);
defer bound_vars_view.deinit();
for (body_free_vars_slice) |fv| {
if (!bound_vars_view.contains(fv)) {
try self.scratch_free_vars.append(fv);
}
}
}
@ -5781,9 +5748,9 @@ fn canonicalizeForLoop(
const ptrn = try self.canonicalizePatternOrMalformed(ast_patt);
// Collect bound vars from pattern
var for_bound_vars = std.AutoHashMapUnmanaged(Pattern.Idx, void){};
defer for_bound_vars.deinit(self.env.gpa);
try self.collectBoundVars(ptrn, &for_bound_vars);
const for_bound_vars_top = self.scratch_bound_vars.top();
defer self.scratch_bound_vars.clearFrom(for_bound_vars_top);
try self.collectBoundVarsToScratch(ptrn);
// Canonicalize the body
const body = blk: {
@ -5794,8 +5761,10 @@ fn canonicalizeForLoop(
// Copy free vars into captures, excluding pattern-bound vars
const body_free_vars_slice = self.scratch_free_vars.sliceFromSpan(body_expr.free_vars);
var bound_vars_view = self.scratch_bound_vars.setViewFrom(for_bound_vars_top);
defer bound_vars_view.deinit();
for (body_free_vars_slice) |fv| {
if (!for_bound_vars.contains(fv)) {
if (!bound_vars_view.contains(fv)) {
try captures.put(self.env.gpa, fv, {});
}
}
@ -6370,7 +6339,8 @@ fn canonicalizePatternOrMalformed(
}
}
fn canonicalizePattern(
/// Converts an AST pattern into a canonical pattern, introducing identifiers into scope.
pub fn canonicalizePattern(
self: *Self,
ast_pattern_idx: AST.Pattern.Idx,
) std.mem.Allocator.Error!?Pattern.Idx {
@ -8654,12 +8624,9 @@ fn canonicalizeBlock(self: *Self, e: AST.Block) std.mem.Allocator.Error!Canonica
// Keep track of the start position for statements
const stmt_start = self.env.store.scratch.?.statements.top();
// TODO Use a temporary scratch space for the block's free variables
//
// I apologize for leaving these AutoHashMapUnmanaged's here ... but it's a workaround
// to land a working closure capture implementation, and we can optimize this later. Forgive me.
var bound_vars = std.AutoHashMapUnmanaged(Pattern.Idx, void){};
defer bound_vars.deinit(self.env.gpa);
// Track bound variables using scratch space (for filtering out locally-bound vars from captures)
const bound_vars_top = self.scratch_bound_vars.top();
defer self.scratch_bound_vars.clearFrom(bound_vars_top);
const captures_top = self.scratch_captures.top();
defer self.scratch_captures.clearFrom(captures_top);
@ -8765,19 +8732,19 @@ fn canonicalizeBlock(self: *Self, e: AST.Block) std.mem.Allocator.Error!Canonica
if (stmt_result.canonicalized_stmt) |canonicailzed_stmt| {
try self.env.store.addScratchStatement(canonicailzed_stmt.idx);
// Collect bound variables for the
// Collect bound variables for the block
const cir_stmt = self.env.store.getStatement(canonicailzed_stmt.idx);
switch (cir_stmt) {
.s_decl => |decl| try self.collectBoundVars(decl.pattern, &bound_vars),
.s_decl_gen => |decl| try self.collectBoundVars(decl.pattern, &bound_vars),
.s_var => |var_stmt| try self.collectBoundVars(var_stmt.pattern_idx, &bound_vars),
.s_decl => |decl| try self.collectBoundVarsToScratch(decl.pattern),
.s_decl_gen => |decl| try self.collectBoundVarsToScratch(decl.pattern),
.s_var => |var_stmt| try self.collectBoundVarsToScratch(var_stmt.pattern_idx),
else => {},
}
// Collect free vars from the statement into the block's scratch space
const stmt_free_vars_slice = self.scratch_free_vars.sliceFromSpan(canonicailzed_stmt.free_vars);
for (stmt_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv) and !bound_vars.contains(fv)) {
if (!self.scratch_captures.contains(fv) and !self.scratch_bound_vars.containsFrom(bound_vars_top, fv)) {
try self.scratch_captures.append(fv);
}
}
@ -8807,7 +8774,7 @@ fn canonicalizeBlock(self: *Self, e: AST.Block) std.mem.Allocator.Error!Canonica
// Add free vars from the final expression to the block's scratch space
const final_expr_free_vars_slice = self.scratch_free_vars.sliceFromSpan(final_expr.free_vars);
for (final_expr_free_vars_slice) |fv| {
if (!self.scratch_captures.contains(fv) and !bound_vars.contains(fv)) {
if (!self.scratch_captures.contains(fv) and !self.scratch_bound_vars.containsFrom(bound_vars_top, fv)) {
try self.scratch_captures.append(fv);
}
}

View file

@ -438,7 +438,7 @@ pub fn relocate(self: *Self, offset: isize) void {
/// Initialize the compilation fields in an existing ModuleEnv
pub fn initCIRFields(self: *Self, module_name: []const u8) !void {
self.module_kind = .deprecated_module; // default until canonicalization sets the actual kind
self.module_kind = .deprecated_module; // Placeholder - set to actual kind during header canonicalization
self.all_defs = .{ .span = .{ .start = 0, .len = 0 } };
self.all_statements = .{ .span = .{ .start = 0, .len = 0 } };
self.exports = .{ .span = .{ .start = 0, .len = 0 } };
@ -468,7 +468,7 @@ pub fn init(gpa: std.mem.Allocator, source: []const u8) std.mem.Allocator.Error!
.gpa = gpa,
.common = common,
.types = try TypeStore.initCapacity(gpa, 2048, 512),
.module_kind = .deprecated_module, // Set during canonicalization
.module_kind = .deprecated_module, // Placeholder - set to actual kind during header canonicalization
.all_defs = .{ .span = .{ .start = 0, .len = 0 } },
.all_statements = .{ .span = .{ .start = 0, .len = 0 } },
.exports = .{ .span = .{ .start = 0, .len = 0 } },
@ -1246,7 +1246,7 @@ pub fn diagnosticToReport(self: *Self, diagnostic: CIR.Diagnostic, allocator: st
},
.f64_pattern_literal => |data| blk: {
// Extract the literal text from the source
const literal_text = self.getSourceAll()[data.region.start.offset..data.region.end.offset];
const literal_text = self.getSource(data.region);
var report = Report.init(allocator, "F64 NOT ALLOWED IN PATTERN", .runtime_error);
@ -2224,14 +2224,6 @@ pub fn addMatchBranchPattern(self: *Self, expr: CIR.Expr.Match.BranchPattern, re
return expr_idx;
}
/// Add a new pattern record field to the node store.
/// This function asserts that the nodes and regions are in sync.
pub fn addPatternRecordField(self: *Self, expr: CIR.PatternRecordField) std.mem.Allocator.Error!CIR.PatternRecordField.Idx {
const expr_idx = try self.store.addPatternRecordField(expr);
self.debugAssertArraysInSync();
return expr_idx;
}
/// Add a new type variable to the node store.
/// This function asserts that the nodes and regions are in sync.
pub fn addTypeSlot(
@ -2583,14 +2575,18 @@ pub fn getSource(self: *const Self, region: Region) []const u8 {
return self.common.getSource(region);
}
/// TODO this is a code smell... we should track down the places using this
/// and replace with something more sensible -- need to refactor diagnostics a little.
/// Get the entire source text. This is primarily needed for diagnostic output
/// where `addSourceRegion` requires access to the full source and line starts
/// to render error messages with context lines.
///
/// For extracting source text for a specific region, prefer `getSource(region)` instead.
pub fn getSourceAll(self: *const Self) []const u8 {
return self.common.getSourceAll();
}
/// TODO this is a code smell... we should track down the places using this
/// and replace with something more sensible -- need to refactor diagnostics a little.
/// Get all line start offsets. This is primarily needed for diagnostic output
/// where `addSourceRegion` requires access to the full source and line starts
/// to render error messages with context lines.
pub fn getLineStartsAll(self: *const Self) []const u32 {
return self.common.getLineStartsAll();
}

View file

@ -46,7 +46,6 @@ const Scratch = struct {
if_branches: base.Scratch(CIR.Expr.IfBranch.Idx),
where_clauses: base.Scratch(CIR.WhereClause.Idx),
patterns: base.Scratch(CIR.Pattern.Idx),
pattern_record_fields: base.Scratch(CIR.PatternRecordField.Idx),
record_destructs: base.Scratch(CIR.Pattern.RecordDestruct.Idx),
type_annos: base.Scratch(CIR.TypeAnno.Idx),
anno_record_fields: base.Scratch(CIR.TypeAnno.RecordField.Idx),
@ -67,7 +66,6 @@ const Scratch = struct {
.if_branches = try base.Scratch(CIR.Expr.IfBranch.Idx).init(gpa),
.where_clauses = try base.Scratch(CIR.WhereClause.Idx).init(gpa),
.patterns = try base.Scratch(CIR.Pattern.Idx).init(gpa),
.pattern_record_fields = try base.Scratch(CIR.PatternRecordField.Idx).init(gpa),
.record_destructs = try base.Scratch(CIR.Pattern.RecordDestruct.Idx).init(gpa),
.type_annos = try base.Scratch(CIR.TypeAnno.Idx).init(gpa),
.anno_record_fields = try base.Scratch(CIR.TypeAnno.RecordField.Idx).init(gpa),
@ -89,7 +87,6 @@ const Scratch = struct {
self.if_branches.deinit();
self.where_clauses.deinit();
self.patterns.deinit();
self.pattern_record_fields.deinit();
self.record_destructs.deinit();
self.type_annos.deinit();
self.anno_record_fields.deinit();
@ -1127,12 +1124,6 @@ pub fn getPattern(store: *const NodeStore, pattern_idx: CIR.Pattern.Idx) CIR.Pat
}
}
/// Retrieves a pattern record field from the store.
pub fn getPatternRecordField(_: *NodeStore, _: CIR.PatternRecordField.Idx) CIR.PatternRecordField {
// Return empty placeholder since PatternRecordField has no fields yet
return CIR.PatternRecordField{};
}
/// Retrieves a type annotation from the store.
pub fn getTypeAnno(store: *const NodeStore, typeAnno: CIR.TypeAnno.Idx) CIR.TypeAnno {
const node_idx: Node.Idx = @enumFromInt(@intFromEnum(typeAnno));
@ -2139,11 +2130,6 @@ pub fn addPattern(store: *NodeStore, pattern: CIR.Pattern, region: base.Region)
return @enumFromInt(@intFromEnum(node_idx));
}
/// Adds a pattern record field to the store.
pub fn addPatternRecordField(_: *NodeStore, _: CIR.PatternRecordField) Allocator.Error!CIR.PatternRecordField.Idx {
@panic("TODO: addPatternRecordField not implemented");
}
/// Adds a type annotation to the store.
///
/// IMPORTANT: You should not use this function directly! Instead, use it's
@ -3609,7 +3595,7 @@ pub const Serialized = extern struct {
/// Deserialize this Serialized struct into a NodeStore
pub fn deserialize(self: *Serialized, offset: i64, gpa: Allocator) *NodeStore {
// Note: Serialized may be smaller than the runtime struct.
// CRITICAL: On 32-bit platforms, deserializing nodes in-place corrupts the adjacent
// On 32-bit platforms, deserializing nodes in-place corrupts the adjacent
// regions and extra_data fields. We must deserialize in REVERSE order (last to first)
// so that each deserialization doesn't corrupt fields that haven't been deserialized yet.

View file

@ -1019,7 +1019,6 @@ test "hex literal parsing logic integration" {
}
// number req tests //
// TODO: Review, claude generated
test "IntValue.toIntRequirements - boundary values for each type" {
// u8 boundary: 255/256

View file

@ -200,3 +200,159 @@ test "record_unbound with multiple fields" {
else => return error.ExpectedRecord,
}
}
const CIR = @import("../CIR.zig");
const Pattern = CIR.Pattern;
test "record pattern destructuring" {
const gpa = std.testing.allocator;
// Test simple record destructuring: { x, y } = { x: 1, y: 2 }
const source = "{ x, y } = { x: 1, y: 2 }";
var env = try ModuleEnv.init(gpa, source);
defer env.deinit();
try env.initCIRFields("test");
var ast = try parse.parseStatement(&env.common, gpa);
defer ast.deinit(gpa);
var can = try Can.init(&env, &ast, null);
defer can.deinit();
// Enter a function scope so we can have local bindings
try can.scopeEnter(gpa, true);
const stmt_idx: parse.AST.Statement.Idx = @enumFromInt(ast.root_node_idx);
const stmt = ast.store.getStatement(stmt_idx);
// The statement should be a declaration
switch (stmt) {
.decl => |decl| {
// Get the pattern from the declaration
const pattern_idx = decl.pattern;
const canonical_pattern_idx = try can.canonicalizePattern(pattern_idx) orelse {
return error.CanonicalizePatternError;
};
const canonical_pattern = env.store.getPattern(canonical_pattern_idx);
// Check that it's a record_destructure pattern
switch (canonical_pattern) {
.record_destructure => |rd| {
// Get the destructs
const destructs = env.store.sliceRecordDestructs(rd.destructs);
try std.testing.expect(destructs.len == 2);
// Check the first destruct (x)
const destruct_x = env.store.getRecordDestruct(destructs[0]);
try std.testing.expectEqualStrings("x", env.getIdent(destruct_x.label));
// Check the second destruct (y)
const destruct_y = env.store.getRecordDestruct(destructs[1]);
try std.testing.expectEqualStrings("y", env.getIdent(destruct_y.label));
// Verify that x and y are now in scope
const x_ident = try env.insertIdent(Ident.for_text("x"));
const y_ident = try env.insertIdent(Ident.for_text("y"));
const x_lookup = can.scopeLookup(.ident, x_ident);
const y_lookup = can.scopeLookup(.ident, y_ident);
// Both should be found in scope
switch (x_lookup) {
.found => {},
else => return error.XNotInScope,
}
switch (y_lookup) {
.found => {},
else => return error.YNotInScope,
}
},
else => return error.ExpectedRecordDestructure,
}
},
else => return error.ExpectedDecl,
}
}
test "record pattern with sub-patterns" {
const gpa = std.testing.allocator;
// Test record destructuring with sub-patterns: { name: n, age: a } = person
const source = "{ name: n, age: a } = person";
var env = try ModuleEnv.init(gpa, source);
defer env.deinit();
try env.initCIRFields("test");
var ast = try parse.parseStatement(&env.common, gpa);
defer ast.deinit(gpa);
var can = try Can.init(&env, &ast, null);
defer can.deinit();
// Enter a function scope so we can have local bindings
try can.scopeEnter(gpa, true);
const stmt_idx: parse.AST.Statement.Idx = @enumFromInt(ast.root_node_idx);
const stmt = ast.store.getStatement(stmt_idx);
// The statement should be a declaration
switch (stmt) {
.decl => |decl| {
// Get the pattern from the declaration
const pattern_idx = decl.pattern;
const canonical_pattern_idx = try can.canonicalizePattern(pattern_idx) orelse {
return error.CanonicalizePatternError;
};
const canonical_pattern = env.store.getPattern(canonical_pattern_idx);
// Check that it's a record_destructure pattern
switch (canonical_pattern) {
.record_destructure => |rd| {
// Get the destructs
const destructs = env.store.sliceRecordDestructs(rd.destructs);
try std.testing.expect(destructs.len == 2);
// Check the first destruct (name: n)
const destruct_name = env.store.getRecordDestruct(destructs[0]);
try std.testing.expectEqualStrings("name", env.getIdent(destruct_name.label));
// The ident should be the sub-pattern variable name
try std.testing.expectEqualStrings("name", env.getIdent(destruct_name.ident));
// Should have a SubPattern kind
switch (destruct_name.kind) {
.SubPattern => {},
else => return error.ExpectedSubPattern,
}
// Check the second destruct (age: a)
const destruct_age = env.store.getRecordDestruct(destructs[1]);
try std.testing.expectEqualStrings("age", env.getIdent(destruct_age.label));
// Verify that n and a are now in scope (the sub-pattern bindings)
const n_ident = try env.insertIdent(Ident.for_text("n"));
const a_ident = try env.insertIdent(Ident.for_text("a"));
const n_lookup = can.scopeLookup(.ident, n_ident);
const a_lookup = can.scopeLookup(.ident, a_ident);
// Both should be found in scope
switch (n_lookup) {
.found => {},
else => return error.NNotInScope,
}
switch (a_lookup) {
.found => {},
else => return error.ANotInScope,
}
},
else => return error.ExpectedRecordDestructure,
}
},
else => return error.ExpectedDecl,
}
}

View file

@ -399,18 +399,17 @@ fn unifyWithCtx(self: *Self, a: Var, b: Var, env: *Env, ctx: unifier.Conf.Ctx) s
}
}
// Set regions and add to the current rank all variables created during unification
// Set regions and add to the current rank all variables created during unification.
//
// TODO: Setting all fresh var regions to be the same as the root var region
// is fine if this unification doesn't go very deep (ie doesn't recurse
// that much).
// We assign all fresh variables the region of `b` (the "actual" type), since `a` is
// typically the "expected" type from an annotation. This heuristic works well for
// most cases but can be imprecise for deeply nested unifications where fresh variables
// are created for sub-components (e.g., record fields, tag payloads). In those cases,
// error messages may point to the outer expression rather than the specific field.
//
// But if it does, this region may be imprecise. We can explore
// ways around this (like maybe capurting the origin var for each of unify's
// fresh var) and setting region that way
//
// Note that we choose `b`s region here, since `b` is the "actual" type
// (whereas `a` is the "expected" type, like from an annotation)
// A more precise solution would track the origin of each fresh variable during
// unification and propagate that back, but the current approach is sufficient for
// typical error reporting scenarios.
const region = self.cir.store.getNodeRegion(ModuleEnv.nodeIdxFrom(b));
for (self.unify_scratch.fresh_vars.items.items) |fresh_var| {
// Set the rank
@ -2264,148 +2263,35 @@ fn checkPatternHelp(
try self.unifyWith(pattern_var, tag_union_content, env);
},
// nominal //
.nominal => |nominal| blk: {
// TODO: Merge this with e_nominal_external
// First, check the type inside the expr
.nominal => |nominal| {
// Check the backing pattern first
const actual_backing_var = try self.checkPatternHelp(nominal.backing_pattern, env, .no_expectation, out_var);
// Then, we need an instance of the nominal type being referenced
// E.g. ConList.Cons(...)
// ^^^^^^^
const nominal_var = try self.instantiateVar(ModuleEnv.varFrom(nominal.nominal_type_decl), env, .{ .explicit = pattern_region });
const nominal_resolved = self.types.resolveVar(nominal_var).desc.content;
if (nominal_resolved == .structure and nominal_resolved.structure == .nominal_type) {
const nominal_type = nominal_resolved.structure.nominal_type;
// If this nominal type is opaque and we're not in the defining module
// then report an error
if (!nominal_type.canLiftInner(self.cir.module_name_idx)) {
_ = try self.problems.appendProblem(self.cir.gpa, .{ .cannot_access_opaque_nominal = .{
.var_ = pattern_var,
.nominal_type_name = nominal_type.ident.ident_idx,
} });
// Mark the entire expression as having a type error
try self.unifyWith(pattern_var, .err, env);
break :blk;
}
// Then, we extract the variable of the nominal type
// E.g. ConList(a) := [Cons(a, ConstList), Nil]
// ^^^^^^^^^^^^^^^^^^^^^^^^^
const nominal_backing_var = self.types.getNominalBackingVar(nominal_type);
// Now we unify what the user wrote with the backing type of the nominal was
// E.g. ConList.Cons(...) <-> [Cons(a, ConsList(a)), Nil]
// ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
const result = try self.unify(nominal_backing_var, actual_backing_var, env);
// Then, we handle the result of unification
switch (result) {
.ok => {
// If that unify call succeeded, then we this is a valid instance
// of this nominal type. So we set the expr's type to be the
// nominal type
_ = try self.unify(pattern_var, nominal_var, env);
},
.problem => |problem_idx| {
// Unification failed - the constructor is incompatible with the nominal type
// Set a specific error message based on the backing type kind
switch (nominal.backing_type) {
.tag => {
// Constructor doesn't exist or has wrong arity/types
self.setProblemTypeMismatchDetail(problem_idx, .invalid_nominal_tag);
},
else => {
// TODO: Add specific error messages for records, tuples, etc.
},
}
// Mark the entire expression as having a type error
try self.unifyWith(pattern_var, .err, env);
},
}
} else {
// If the nominal type is actually something else, then set the
// whole expression to be an error.
//
// TODO: Report a nice problem here
try self.unifyWith(pattern_var, .err, env);
}
// Use shared nominal type checking logic
_ = try self.checkNominalTypeUsage(
pattern_var,
actual_backing_var,
ModuleEnv.varFrom(nominal.nominal_type_decl),
nominal.backing_type,
pattern_region,
env,
);
},
.nominal_external => |nominal| blk: {
// TODO: Merge this with e_nominal
// First, check the type inside the expr
.nominal_external => |nominal| {
// Check the backing pattern first
const actual_backing_var = try self.checkPatternHelp(nominal.backing_pattern, env, .no_expectation, out_var);
// Resolve the external type declaration
if (try self.resolveVarFromExternal(nominal.module_idx, nominal.target_node_idx)) |ext_ref| {
// Then, we need an instance of the nominal type being referenced
// E.g. ConList.Cons(...)
// ^^^^^^^
const nominal_var = try self.instantiateVar(ext_ref.local_var, env, .{ .explicit = pattern_region });
const nominal_resolved = self.types.resolveVar(nominal_var).desc.content;
if (nominal_resolved == .structure and nominal_resolved.structure == .nominal_type) {
const nominal_type = nominal_resolved.structure.nominal_type;
// If this nominal type is opaque and we're not in the defining module
// then report an error
if (!nominal_type.canLiftInner(self.cir.module_name_idx)) {
_ = try self.problems.appendProblem(self.cir.gpa, .{ .cannot_access_opaque_nominal = .{
.var_ = pattern_var,
.nominal_type_name = nominal_type.ident.ident_idx,
} });
// Mark the entire expression as having a type error
try self.unifyWith(pattern_var, .err, env);
break :blk;
}
// Then, we extract the variable of the nominal type
// E.g. ConList(a) := [Cons(a, ConstList), Nil]
// ^^^^^^^^^^^^^^^^^^^^^^^^^
const nominal_backing_var = self.types.getNominalBackingVar(nominal_type);
// Now we unify what the user wrote with the backing type of the nominal was
// E.g. ConList.Cons(...) <-> [Cons(a, ConsList(a)), Nil]
// ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
const result = try self.unify(nominal_backing_var, actual_backing_var, env);
// Then, we handle the result of unification
switch (result) {
.ok => {
// If that unify call succeeded, then we this is a valid instance
// of this nominal type. So we set the expr's type to be the
// nominal type
_ = try self.unify(pattern_var, nominal_var, env);
},
.problem => |problem_idx| {
// Unification failed - the constructor is incompatible with the nominal type
// Set a specific error message based on the backing type kind
switch (nominal.backing_type) {
.tag => {
// Constructor doesn't exist or has wrong arity/types
self.setProblemTypeMismatchDetail(problem_idx, .invalid_nominal_tag);
},
else => {
// TODO: Add specific error messages for records, tuples, etc.
},
}
// Mark the entire expression as having a type error
try self.unifyWith(pattern_var, .err, env);
},
}
} else {
// If the nominal type is actually something else, then set the
// whole expression to be an error.
//
// TODO: Report a nice problem here
try self.unifyWith(pattern_var, .err, env);
}
// Use shared nominal type checking logic
_ = try self.checkNominalTypeUsage(
pattern_var,
actual_backing_var,
ext_ref.local_var,
nominal.backing_type,
pattern_region,
env,
);
} else {
try self.unifyWith(pattern_var, .err, env);
}
@ -2920,150 +2806,37 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected)
try self.unifyWith(expr_var, tag_union_content, env);
},
// nominal //
.e_nominal => |nominal| blk: {
// TODO: Merge this with e_nominal_external
// First, check the type inside the expr
.e_nominal => |nominal| {
// Check the backing expression first
does_fx = try self.checkExpr(nominal.backing_expr, env, .no_expectation) or does_fx;
const actual_backing_var = ModuleEnv.varFrom(nominal.backing_expr);
// Then, we need an instance of the nominal type being referenced
// E.g. ConList.Cons(...)
// ^^^^^^^
const nominal_var = try self.instantiateVar(ModuleEnv.varFrom(nominal.nominal_type_decl), env, .{ .explicit = expr_region });
const nominal_resolved = self.types.resolveVar(nominal_var).desc.content;
if (nominal_resolved == .structure and nominal_resolved.structure == .nominal_type) {
const nominal_type = nominal_resolved.structure.nominal_type;
// If this nominal type is opaque and we're not in the defining module
// then report an error
if (!nominal_type.canLiftInner(self.cir.module_name_idx)) {
_ = try self.problems.appendProblem(self.cir.gpa, .{ .cannot_access_opaque_nominal = .{
.var_ = expr_var,
.nominal_type_name = nominal_type.ident.ident_idx,
} });
// Mark the entire expression as having a type error
try self.unifyWith(expr_var, .err, env);
break :blk;
}
// Then, we extract the variable of the nominal type
// E.g. ConList(a) := [Cons(a, ConstList), Nil]
// ^^^^^^^^^^^^^^^^^^^^^^^^^
const nominal_backing_var = self.types.getNominalBackingVar(nominal_type);
// Now we unify what the user wrote with the backing type of the nominal was
// E.g. ConList.Cons(...) <-> [Cons(a, ConsList(a)), Nil]
// ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
const result = try self.unify(nominal_backing_var, actual_backing_var, env);
// Then, we handle the result of unification
switch (result) {
.ok => {
// If that unify call succeeded, then we this is a valid instance
// of this nominal type. So we set the expr's type to be the
// nominal type
_ = try self.unify(expr_var, nominal_var, env);
},
.problem => |problem_idx| {
// Unification failed - the constructor is incompatible with the nominal type
// Set a specific error message based on the backing type kind
switch (nominal.backing_type) {
.tag => {
// Constructor doesn't exist or has wrong arity/types
self.setProblemTypeMismatchDetail(problem_idx, .invalid_nominal_tag);
},
else => {
// TODO: Add specific error messages for records, tuples, etc.
},
}
// Mark the entire expression as having a type error
try self.unifyWith(expr_var, .err, env);
},
}
} else {
// If the nominal type is actually something else, then set the
// whole expression to be an error.
//
// TODO: Report a nice problem here
try self.unifyWith(expr_var, .err, env);
}
// Use shared nominal type checking logic
_ = try self.checkNominalTypeUsage(
expr_var,
actual_backing_var,
ModuleEnv.varFrom(nominal.nominal_type_decl),
nominal.backing_type,
expr_region,
env,
);
},
.e_nominal_external => |nominal| blk: {
// TODO: Merge this with e_nominal
// First, check the type inside the expr
.e_nominal_external => |nominal| {
// Check the backing expression first
does_fx = try self.checkExpr(nominal.backing_expr, env, .no_expectation) or does_fx;
const actual_backing_var = ModuleEnv.varFrom(nominal.backing_expr);
// Resolve the external type declaration
if (try self.resolveVarFromExternal(nominal.module_idx, nominal.target_node_idx)) |ext_ref| {
// Then, we need an instance of the nominal type being referenced
// E.g. ConList.Cons(...)
// ^^^^^^^
const nominal_var = try self.instantiateVar(ext_ref.local_var, env, .{ .explicit = expr_region });
const nominal_resolved = self.types.resolveVar(nominal_var).desc.content;
if (nominal_resolved == .structure and nominal_resolved.structure == .nominal_type) {
const nominal_type = nominal_resolved.structure.nominal_type;
// If this nominal type is opaque and we're not in the defining module
// then report an error
if (!nominal_type.canLiftInner(self.cir.module_name_idx)) {
_ = try self.problems.appendProblem(self.cir.gpa, .{ .cannot_access_opaque_nominal = .{
.var_ = expr_var,
.nominal_type_name = nominal_type.ident.ident_idx,
} });
// Mark the entire expression as having a type error
try self.unifyWith(expr_var, .err, env);
break :blk;
}
// Then, we extract the variable of the nominal type
// E.g. ConList(a) := [Cons(a, ConstList), Nil]
// ^^^^^^^^^^^^^^^^^^^^^^^^^
const nominal_backing_var = self.types.getNominalBackingVar(nominal_type);
// Now we unify what the user wrote with the backing type of the nominal was
// E.g. ConList.Cons(...) <-> [Cons(a, ConsList(a)), Nil]
// ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
const result = try self.unify(nominal_backing_var, actual_backing_var, env);
// Then, we handle the result of unification
switch (result) {
.ok => {
// If that unify call succeeded, then we this is a valid instance
// of this nominal type. So we set the expr's type to be the
// nominal type
_ = try self.unify(expr_var, nominal_var, env);
},
.problem => |problem_idx| {
// Unification failed - the constructor is incompatible with the nominal type
// Set a specific error message based on the backing type kind
switch (nominal.backing_type) {
.tag => {
// Constructor doesn't exist or has wrong arity/types
self.setProblemTypeMismatchDetail(problem_idx, .invalid_nominal_tag);
},
else => {
// TODO: Add specific error messages for records, tuples, etc.
},
}
// Mark the entire expression as having a type error
try self.unifyWith(expr_var, .err, env);
},
}
} else {
// If the nominal type is actually something else, then set the
// whole expression to be an error.
//
// TODO: Report a nice problem here
try self.unifyWith(expr_var, .err, env);
}
// Use shared nominal type checking logic
_ = try self.checkNominalTypeUsage(
expr_var,
actual_backing_var,
ext_ref.local_var,
nominal.backing_type,
expr_region,
env,
);
} else {
try self.unifyWith(expr_var, .err, env);
}
@ -4915,6 +4688,99 @@ fn copyVar(self: *Self, other_module_var: Var, other_module_env: *const ModuleEn
return copied_var;
}
// nominal type checking helpers //
/// Result of checking a nominal type usage
const NominalCheckResult = enum {
/// Successfully checked the nominal type
ok,
/// An error occurred (already reported and target_var set to error)
err,
};
/// Check a nominal type usage (either in pattern or expression context).
/// This is the shared logic for `.nominal`, `.nominal_external`, `.e_nominal`, and `.e_nominal_external`.
///
/// Parameters:
/// - target_var: The type variable to unify with (pattern_var or expr_var)
/// - actual_backing_var: The type variable of the backing expression/pattern
/// - nominal_type_decl_var: The type variable from the nominal type declaration
/// - backing_type: The kind of backing type (tag, record, tuple, value)
/// - region: The source region for instantiation
/// - env: The type checking environment
fn checkNominalTypeUsage(
self: *Self,
target_var: Var,
actual_backing_var: Var,
nominal_type_decl_var: Var,
backing_type: CIR.Expr.NominalBackingType,
region: Region,
env: *Env,
) std.mem.Allocator.Error!NominalCheckResult {
// Instantiate the nominal type declaration
const nominal_var = try self.instantiateVar(nominal_type_decl_var, env, .{ .explicit = region });
const nominal_resolved = self.types.resolveVar(nominal_var).desc.content;
if (nominal_resolved == .structure and nominal_resolved.structure == .nominal_type) {
const nominal_type = nominal_resolved.structure.nominal_type;
// If this nominal type is opaque and we're not in the defining module
// then report an error
if (!nominal_type.canLiftInner(self.cir.module_name_idx)) {
_ = try self.problems.appendProblem(self.cir.gpa, .{ .cannot_access_opaque_nominal = .{
.var_ = target_var,
.nominal_type_name = nominal_type.ident.ident_idx,
} });
// Mark the entire expression as having a type error
try self.unifyWith(target_var, .err, env);
return .err;
}
// Extract the backing type variable from the nominal type
// E.g. ConList(a) := [Cons(a, ConstList), Nil]
// ^^^^^^^^^^^^^^^^^^^^^^^^^
const nominal_backing_var = self.types.getNominalBackingVar(nominal_type);
// Unify what the user wrote with the backing type of the nominal
// E.g. ConList.Cons(...) <-> [Cons(a, ConsList(a)), Nil]
// ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^
const result = try self.unify(nominal_backing_var, actual_backing_var, env);
// Handle the result of unification
switch (result) {
.ok => {
// If unification succeeded, this is a valid instance of the nominal type
// So we set the target's type to be the nominal type
_ = try self.unify(target_var, nominal_var, env);
return .ok;
},
.problem => |problem_idx| {
// Unification failed - the constructor is incompatible with the nominal type
// Set a specific error message based on the backing type kind
switch (backing_type) {
.tag => {
// Constructor doesn't exist or has wrong arity/types
self.setProblemTypeMismatchDetail(problem_idx, .invalid_nominal_tag);
},
.record, .tuple, .value => {
// Other backing types - no specific error message yet
},
}
// Mark the entire expression as having a type error
try self.unifyWith(target_var, .err, env);
return .err;
},
}
} else {
// If the nominal type resolves to something other than a nominal_type structure,
// set the whole expression to be an error
try self.unifyWith(target_var, .err, env);
return .err;
}
}
// validate static dispatch constraints //
/// Handle a recursive static dispatch constraint by creating a RecursionVar

View file

@ -41,4 +41,5 @@ test "check tests" {
std.testing.refAllDecls(@import("test/builtin_scope_test.zig"));
std.testing.refAllDecls(@import("test/num_type_inference_test.zig"));
std.testing.refAllDecls(@import("test/unify_test.zig"));
std.testing.refAllDecls(@import("test/instantiate_tag_union_test.zig"));
}

View file

@ -48,11 +48,11 @@ pub const Problem = union(enum) {
negative_unsigned_int: NegativeUnsignedInt,
invalid_numeric_literal: InvalidNumericLiteral,
unused_value: UnusedValue,
infinite_recursion: struct { var_: Var },
anonymous_recursion: struct { var_: Var },
invalid_number_type: VarProblem1,
invalid_record_ext: VarProblem1,
invalid_tag_union_ext: VarProblem1,
infinite_recursion: VarWithSnapshot,
anonymous_recursion: VarWithSnapshot,
invalid_number_type: VarWithSnapshot,
invalid_record_ext: VarWithSnapshot,
invalid_tag_union_ext: VarWithSnapshot,
bug: Bug,
comptime_crash: ComptimeCrash,
comptime_expect_failed: ComptimeExpectFailed,
@ -80,8 +80,9 @@ pub const ComptimeEvalError = struct {
region: base.Region,
};
/// A single var problem
pub const VarProblem1 = struct {
/// A problem involving a single type variable, with a snapshot for error reporting.
/// Used for recursion errors, invalid extension types, etc.
pub const VarWithSnapshot = struct {
var_: Var,
snapshot: SnapshotContentIdx,
};
@ -331,12 +332,11 @@ pub const ReportBuilder = struct {
self.bytes_buf.deinit();
}
/// Get the formatted string for a snapshot, asserting it exists
/// Get the formatted string for a snapshot.
/// Returns a placeholder if the formatted string is missing, allowing error reporting
/// to continue gracefully even if snapshots are incomplete.
fn getFormattedString(self: *const Self, idx: SnapshotContentIdx) []const u8 {
return self.snapshots.getFormattedString(idx) orelse {
std.debug.assert(false); // Missing formatted string for snapshot
unreachable;
};
return self.snapshots.getFormattedString(idx) orelse "<unknown type>";
}
/// Build a report for a problem

View file

@ -594,13 +594,70 @@ pub const Store = struct {
const args = self.content_indexes.sliceRange(tag.args);
for (args) |arg_idx| {
try result.append(' ');
const formatted = self.getFormattedString(arg_idx) orelse {
std.debug.assert(false); // Missing formatted string for tag argument - snapshotVarForError must be called for all nested types
unreachable;
};
const formatted = self.getFormattedString(arg_idx) orelse "<unknown type>";
try result.appendSlice(formatted);
}
return result.toOwnedSlice();
}
};
// Tests
test "formatTagString - gracefully handles missing formatted strings" {
const gpa = std.testing.allocator;
var store = try Store.initCapacity(gpa, 16);
defer store.deinit();
// Create a tag with an argument that doesn't have a formatted string
// This should use the "<unknown type>" fallback instead of crashing
const unknown_content_idx = try store.contents.append(gpa, .err);
const args_range = try store.content_indexes.appendSlice(gpa, &[_]SnapshotContentIdx{unknown_content_idx});
// Create an ident store for the tag name
var ident_store = try Ident.Store.initCapacity(gpa, 64);
defer ident_store.deinit(gpa);
const tag_name = try ident_store.insert(gpa, Ident.for_text("MyTag"));
const tag = SnapshotTag{
.name = tag_name,
.args = args_range,
};
// Format should succeed and include the fallback placeholder
const result = try store.formatTagString(gpa, tag, &ident_store);
defer gpa.free(result);
try std.testing.expectEqualStrings("MyTag <unknown type>", result);
}
test "formatTagString - uses stored formatted strings when available" {
const gpa = std.testing.allocator;
var store = try Store.initCapacity(gpa, 16);
defer store.deinit();
// Create a content index and store a formatted string for it
const content_idx = try store.contents.append(gpa, .err);
const formatted_str = try gpa.dupe(u8, "U64");
try store.formatted_strings.put(gpa, content_idx, formatted_str);
const args_range = try store.content_indexes.appendSlice(gpa, &[_]SnapshotContentIdx{content_idx});
// Create an ident store for the tag name
var ident_store = try Ident.Store.initCapacity(gpa, 64);
defer ident_store.deinit(gpa);
const tag_name = try ident_store.insert(gpa, Ident.for_text("Some"));
const tag = SnapshotTag{
.name = tag_name,
.args = args_range,
};
// Format should use the stored formatted string
const result = try store.formatTagString(gpa, tag, &ident_store);
defer gpa.free(result);
try std.testing.expectEqualStrings("Some U64", result);
}

View file

@ -0,0 +1,122 @@
//! Test for instantiating tag unions with tag payloads
//! This test is a regression test for a bug where tag union args were uninitialized.
const std = @import("std");
const TestEnv = @import("./TestEnv.zig");
test "instantiate polymorphic function with nested recursive tag unions" {
// This tests instantiation of polymorphic functions with nested recursive calls
// that return tag unions with payloads. The code pattern is:
// 1. Multiple mutually-recursive functions return tuples containing Try types
// 2. Pattern matching destructures tuples to extract Try values
// 3. Functions are called multiple times triggering instantiation
// 4. Deep nesting of match expressions
//
// This is a regression test that ensures complex nested tag union patterns
// type-check without panicking.
const source =
\\tokenize_identifier = |file, index, acc, start_index| {
\\ ret = || {
\\ match Str.from_utf8(acc) {
\\ Ok(str) => (Ok(str), start_index, index)
\\ Err(_) => (Err("bad utf8"), start_index, index)
\\ }
\\ }
\\ match List.get(file, index) {
\\ Ok(c) =>
\\ if (c >= 97) and (c <= 122) {
\\ tokenize_identifier(file, index + 1, List.append(acc, c), start_index)
\\ } else {
\\ ret()
\\ }
\\ _ => ret()
\\ }
\\}
\\
\\get_next_token = |file, index| {
\\ match List.get(file, index) {
\\ Ok(c) =>
\\ if (c >= 97) and (c <= 122) {
\\ tokenize_identifier(file, index + 1, [c], index)
\\ } else {
\\ (Err("unexpected"), index, index + 1)
\\ }
\\ Err(_) =>
\\ (Ok("eof"), index, index)
\\ }
\\}
\\
\\parse_pattern_match_starting = |file, ident, tokenizer_result| {
\\ (token, _, index) = tokenizer_result
\\ match token {
\\ Ok(s) => Ok((ident, Some(s), token, index))
\\ _ => Ok((ident, None, token, index))
\\ }
\\}
\\
\\parse_pattern_match = |file, tokenizer_result| {
\\ (token, _, index) = tokenizer_result
\\ match token {
\\ Ok(ident) =>
\\ parse_pattern_match_starting(file, ident, get_next_token(file, index))
\\ _ => Err("error")
\\ }
\\}
\\
\\parse_value = |file, tokenizer_result, possibilities| {
\\ (token, token_pos, index) = tokenizer_result
\\ match token {
\\ Ok(n) => {
\\ match get_next_token(file, index) {
\\ (Ok(_), _, new_index) => Ok((n, new_index))
\\ _ => Ok((n, index))
\\ }
\\ }
\\ _ => Err("failed")
\\ }
\\}
\\
\\parse_block = |file, index, acc| {
\\ match get_next_token(file, index) {
\\ (Ok(n), _, index2) => {
\\ (token, token_pos, index3) = get_next_token(file, index2)
\\ match token {
\\ Ok(_) => {
\\ (args, index4) = parse_function_call_args(file, index3, [])?
\\ parse_block(file, index4, acc)
\\ }
\\ _ => {
\\ (pattern, token4, index4, possibilities) = parse_pattern_match_starting(file, n, (token, token_pos, index3))?
\\ parse_block(file, index4, acc)
\\ }
\\ }
\\ }
\\ got => Err("error")
\\ }
\\}
\\
\\parse_function_call_args = |file, index, acc| {
\\ (token, token_pos, index2) = get_next_token(file, index)
\\ match token {
\\ Ok(_) => Ok((acc, index2))
\\ _ => {
\\ (value, index3) = parse_value(file, (token, token_pos, index2), [])?
\\ (token2, token2_pos, index4) = get_next_token(file, index3)
\\ match token2 {
\\ Ok(_) => parse_function_call_args(file, index4, List.append(acc, value))
\\ _ => Err("error")
\\ }
\\ }
\\ }
\\}
\\
\\main = {
\\ file = [104, 101, 108, 108, 111]
\\ parse_block(file, 0, [])
\\}
;
var test_env = try TestEnv.init("Test", source);
defer test_env.deinit();
// We just care that it doesn't panic during type checking
}

View file

@ -1324,6 +1324,16 @@ test "unify - fails on infinite type" {
.problem => |problem_idx| {
const problem = env.problems.get(problem_idx);
try std.testing.expectEqual(.infinite_recursion, @as(Problem.Tag, problem));
// Verify that a snapshot was created for the recursion error
const snapshot_idx = problem.infinite_recursion.snapshot;
const snapshot_content = env.snapshots.getContent(snapshot_idx);
// The snapshot should be some valid content (not just err)
try std.testing.expect(snapshot_content != .err);
// Verify a formatted string was created
const formatted = env.snapshots.getFormattedString(snapshot_idx);
try std.testing.expect(formatted != null);
},
}
}
@ -1352,6 +1362,16 @@ test "unify - fails on anonymous recursion" {
.problem => |problem_idx| {
const problem = env.problems.get(problem_idx);
try std.testing.expectEqual(.anonymous_recursion, @as(Problem.Tag, problem));
// Verify that a snapshot was created for the recursion error
const snapshot_idx = problem.anonymous_recursion.snapshot;
const snapshot_content = env.snapshots.getContent(snapshot_idx);
// The snapshot should be some valid content (not just err)
try std.testing.expect(snapshot_content != .err);
// Verify a formatted string was created
const formatted = env.snapshots.getFormattedString(snapshot_idx);
try std.testing.expect(formatted != null);
},
}
}

View file

@ -280,17 +280,17 @@ pub fn unifyWithConf(
if (unify_scratch.err) |unify_err| {
switch (unify_err) {
.recursion_anonymous => |var_| {
// TODO: Snapshot infinite recursion
// const snapshot = snapshots.deepCopyVar(types, var_);
const snapshot = try snapshots.snapshotVarForError(types, type_writer, var_);
break :blk .{ .anonymous_recursion = .{
.var_ = var_,
.snapshot = snapshot,
} };
},
.recursion_infinite => |var_| {
// TODO: Snapshot infinite recursion
// const snapshot = snapshots.deepCopyVar(types, var_);
const snapshot = try snapshots.snapshotVarForError(types, type_writer, var_);
break :blk .{ .infinite_recursion = .{
.var_ = var_,
.snapshot = snapshot,
} };
},
.invalid_number_type => |var_| {

View file

@ -1,273 +0,0 @@
//! Generates app stub libraries for cross-compilation
//! These stubs provide the Roc app entrypoints that the platform host expects to call
const std = @import("std");
const builtin = @import("builtin");
const target_mod = @import("target.zig");
const builder = @import("builder.zig");
const RocTarget = target_mod.RocTarget;
const Allocator = std.mem.Allocator;
// Check if LLVM is available at compile time
const llvm_available = if (@import("builtin").is_test) false else @import("config").llvm;
/// Platform entrypoint information
pub const PlatformEntrypoint = struct {
name: []const u8, // Function name like "addInts", "processString"
};
/// Generate an app stub object file containing implementations for platform-expected entrypoints
pub fn generateAppStubObject(
allocator: Allocator,
output_dir: []const u8,
entrypoints: []const PlatformEntrypoint,
target: RocTarget,
) ![]const u8 {
// Check if LLVM is available
if (!llvm_available) {
return error.LLVMNotAvailable;
}
const std_zig_llvm = @import("std").zig.llvm;
const Builder = std_zig_llvm.Builder;
// Create LLVM Builder
var llvm_builder = try Builder.init(.{
.allocator = allocator,
.name = "roc_app_stub",
});
defer llvm_builder.deinit();
// Generate the app stub functions
try createAppStubs(&llvm_builder, entrypoints, target);
// Generate paths for temporary files
const bitcode_path = try std.fs.path.join(allocator, &.{ output_dir, "app_stub.bc" });
defer allocator.free(bitcode_path);
const object_filename = try std.fmt.allocPrint(allocator, "app_stub_{s}.o", .{@tagName(target)});
const object_path = try std.fs.path.join(allocator, &.{ output_dir, object_filename });
// Don't defer free object_path since we return it
// Generate bitcode
const producer = Builder.Producer{
.name = "Roc App Stub Generator",
.version = .{ .major = 1, .minor = 0, .patch = 0 },
};
const bitcode = try llvm_builder.toBitcode(allocator, producer);
defer allocator.free(bitcode);
// Write bitcode to file
const bc_file = try std.fs.cwd().createFile(bitcode_path, .{});
defer bc_file.close();
// Convert u32 array to bytes for writing
const bytes = std.mem.sliceAsBytes(bitcode);
try bc_file.writeAll(bytes);
std.log.debug("Wrote bitcode file: {s} ({} bytes)", .{ bitcode_path, bytes.len });
// Compile bitcode to object file using LLVM
// For native compilation, use empty CPU to let LLVM choose the default
// For cross-compilation, use "generic" for maximum compatibility
const detected_native = target_mod.RocTarget.detectNative();
const is_native = target == detected_native;
const cpu_name = if (is_native) "" else "generic";
std.log.debug("Native target: {}, Request target: {}, Is native: {}", .{ detected_native, target, is_native });
std.log.debug("Using CPU: '{s}'", .{cpu_name});
const compile_config = builder.CompileConfig{
.input_path = bitcode_path,
.output_path = object_path,
.optimization = .size,
.target = target,
.cpu = cpu_name,
.features = "",
};
std.log.debug("About to call compileBitcodeToObject...", .{});
const success = builder.compileBitcodeToObject(allocator, compile_config) catch |err| {
std.log.err("Failed to compile bitcode to object: {}", .{err});
allocator.free(object_path);
return err;
};
std.log.debug("compileBitcodeToObject returned: {}", .{success});
if (!success) {
std.log.err("Bitcode compilation returned false without error", .{});
allocator.free(object_path);
return error.CompilationFailed;
}
std.log.debug("Generated app stub object: {s}", .{object_path});
return object_path;
}
/// Creates app stub functions in LLVM IR
fn createAppStubs(llvm_builder: *std.zig.llvm.Builder, entrypoints: []const PlatformEntrypoint, target: RocTarget) !void {
// Create pointer type
const ptr_type = try llvm_builder.ptrType(.default);
// Add stub for each platform entrypoint
for (entrypoints) |entrypoint| {
try addRocCallAbiStub(llvm_builder, ptr_type, entrypoint.name, target);
}
}
/// Add an app entrypoint stub that follows the RocCall ABI
/// RocCall ABI: void roc__<name>(ops: *RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) callconv(.c) void;
fn addRocCallAbiStub(
llvm_builder: *std.zig.llvm.Builder,
ptr_type: std.zig.llvm.Builder.Type,
name: []const u8,
target: RocTarget,
) !void {
const Builder = std.zig.llvm.Builder;
const WipFunction = Builder.WipFunction;
// RocCall ABI signature: void roc__<name>(ops: *RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) callconv(.c) void
const params = [_]Builder.Type{ ptr_type, ptr_type, ptr_type };
const fn_type = try llvm_builder.fnType(.void, &params, .normal);
// Build the function name with roc__ prefix
const base_name = try std.fmt.allocPrint(llvm_builder.gpa, "roc__{s}", .{name});
defer llvm_builder.gpa.free(base_name);
// Add platform-specific prefix if needed (e.g., underscore for macOS)
const full_name = if (target.isMacOS())
try std.fmt.allocPrint(llvm_builder.gpa, "_{s}", .{base_name})
else
try llvm_builder.gpa.dupe(u8, base_name);
defer llvm_builder.gpa.free(full_name);
const fn_name = try llvm_builder.strtabString(full_name);
const func = try llvm_builder.addFunction(fn_type, fn_name, .default);
// Use external linkage so the symbol is visible to the linker
func.setLinkage(.external, llvm_builder);
var wip = try WipFunction.init(llvm_builder, .{
.function = func,
.strip = false,
});
defer wip.deinit();
const entry = try wip.block(0, "entry");
wip.cursor = .{ .block = entry };
// Generate actual implementation based on function name
if (std.mem.eql(u8, name, "add_ints")) {
try addIntsImplementation(&wip, llvm_builder);
} else if (std.mem.eql(u8, name, "multiply_ints")) {
try multiplyIntsImplementation(&wip, llvm_builder);
} else if (std.mem.eql(u8, name, "process_string")) {
// process_string not supported in cross-compilation stubs - only int platform supported
_ = try wip.retVoid();
} else {
// Default: just return void for unknown functions
_ = try wip.retVoid();
}
try wip.finish();
}
/// Get the expected app entrypoints for known test platforms based on host.zig files
pub fn getTestPlatformEntrypoints(allocator: Allocator, platform_type: []const u8) ![]PlatformEntrypoint {
if (std.mem.eql(u8, platform_type, "int")) {
// Based on test/int/platform/host.zig:
// extern fn roc__add_ints(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) callconv(.c) void;
// extern fn roc__multiply_ints(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) callconv(.c) void;
const entrypoints = try allocator.alloc(PlatformEntrypoint, 2);
entrypoints[0] = PlatformEntrypoint{ .name = "add_ints" };
entrypoints[1] = PlatformEntrypoint{ .name = "multiply_ints" };
return entrypoints;
}
// Only int platform supported for cross-compilation
return error.PlatformNotSupported;
}
/// Detect platform type from file path
pub fn detectPlatformType(platform_path: []const u8) []const u8 {
// Use cross-platform path checking
var iter = std.fs.path.componentIterator(platform_path) catch return "unknown";
while (iter.next()) |component| {
if (std.mem.eql(u8, component.name, "int")) {
return "int";
} else if (std.mem.eql(u8, component.name, "str")) {
return "str";
}
}
return "unknown";
}
/// Generate implementation for addInts: loads two i64s from arg_ptr, adds them, stores result to ret_ptr
fn addIntsImplementation(wip: *std.zig.llvm.Builder.WipFunction, llvm_builder: *std.zig.llvm.Builder) !void {
// Get function parameters: ops, ret_ptr, arg_ptr
const ret_ptr = wip.arg(1); // ret_ptr: *anyopaque -> where to store the i64 result
const arg_ptr = wip.arg(2); // arg_ptr: *anyopaque -> points to struct { a: i64, b: i64 }
// Cast arg_ptr to pointer to struct { i64, i64 }
const i64_type = .i64;
const args_struct_type = try llvm_builder.structType(.normal, &[_]std.zig.llvm.Builder.Type{ i64_type, i64_type });
const args_ptr_type = try llvm_builder.ptrType(.default);
const args_ptr = try wip.cast(.bitcast, arg_ptr, args_ptr_type, "args_ptr");
// Load the two i64 values from the args struct
const zero = try llvm_builder.intConst(.i32, 0);
const one = try llvm_builder.intConst(.i32, 1);
const a_ptr = try wip.gep(.inbounds, args_struct_type, args_ptr, &[_]std.zig.llvm.Builder.Value{ zero.toValue(), zero.toValue() }, "a_ptr");
const b_ptr = try wip.gep(.inbounds, args_struct_type, args_ptr, &[_]std.zig.llvm.Builder.Value{ zero.toValue(), one.toValue() }, "b_ptr");
const a = try wip.load(.normal, i64_type, a_ptr, .default, "a");
const b = try wip.load(.normal, i64_type, b_ptr, .default, "b");
// Add the two values
const result = try wip.bin(.add, a, b, "result");
// Cast ret_ptr and store the result
const ret_i64_ptr = try wip.cast(.bitcast, ret_ptr, args_ptr_type, "ret_i64_ptr");
_ = try wip.store(.normal, result, ret_i64_ptr, .default);
// Return void
_ = try wip.retVoid();
}
/// Generate implementation for multiplyInts: loads two i64s from arg_ptr, multiplies them, stores result to ret_ptr
fn multiplyIntsImplementation(wip: *std.zig.llvm.Builder.WipFunction, llvm_builder: *std.zig.llvm.Builder) !void {
// Get function parameters: ops, ret_ptr, arg_ptr
const ret_ptr = wip.arg(1); // ret_ptr: *anyopaque -> where to store the i64 result
const arg_ptr = wip.arg(2); // arg_ptr: *anyopaque -> points to struct { a: i64, b: i64 }
// Cast arg_ptr to pointer to struct { i64, i64 }
const i64_type = .i64;
const args_struct_type = try llvm_builder.structType(.normal, &[_]std.zig.llvm.Builder.Type{ i64_type, i64_type });
const args_ptr_type = try llvm_builder.ptrType(.default);
const args_ptr = try wip.cast(.bitcast, arg_ptr, args_ptr_type, "args_ptr");
// Load the two i64 values from the args struct
const zero = try llvm_builder.intConst(.i32, 0);
const one = try llvm_builder.intConst(.i32, 1);
const a_ptr = try wip.gep(.inbounds, args_struct_type, args_ptr, &[_]std.zig.llvm.Builder.Value{ zero.toValue(), zero.toValue() }, "a_ptr");
const b_ptr = try wip.gep(.inbounds, args_struct_type, args_ptr, &[_]std.zig.llvm.Builder.Value{ zero.toValue(), one.toValue() }, "b_ptr");
const a = try wip.load(.normal, i64_type, a_ptr, .default, "a");
const b = try wip.load(.normal, i64_type, b_ptr, .default, "b");
// Multiply the two values
const result = try wip.bin(.mul, a, b, "result");
// Cast ret_ptr and store the result
const ret_i64_ptr = try wip.cast(.bitcast, ret_ptr, args_ptr_type, "ret_i64_ptr");
_ = try wip.store(.normal, result, ret_i64_ptr, .default);
// Return void
_ = try wip.retVoid();
}

View file

@ -3,9 +3,23 @@
const std = @import("std");
const builtin = @import("builtin");
const target = @import("target.zig");
const reporting = @import("reporting");
const Allocator = std.mem.Allocator;
const is_windows = builtin.target.os.tag == .windows;
var stderr_file_writer: std.fs.File.Writer = .{
.interface = std.fs.File.Writer.initInterface(&.{}),
.file = if (is_windows) undefined else std.fs.File.stderr(),
.mode = .streaming,
};
fn stderrWriter() *std.Io.Writer {
if (is_windows) stderr_file_writer.file = std.fs.File.stderr();
return &stderr_file_writer.interface;
}
// Re-export RocTarget from target.zig for backward compatibility
pub const RocTarget = target.RocTarget;
@ -33,6 +47,7 @@ pub const CompileConfig = struct {
target: RocTarget,
cpu: []const u8 = "",
features: []const u8 = "",
debug: bool = false, // Enable debug info generation in output
/// Check if compiling for the current machine
pub fn isNative(self: CompileConfig) bool {
@ -175,7 +190,7 @@ pub fn initializeLLVM() void {
/// Compile LLVM bitcode file to object file
pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
if (comptime !llvm_available) {
std.log.err("LLVM is not available at compile time", .{});
renderLLVMNotAvailableError(gpa);
return error.LLVMNotAvailable;
}
@ -189,7 +204,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
// Verify input file exists
std.fs.cwd().access(config.input_path, .{}) catch |err| {
std.log.err("Input bitcode file does not exist or is not accessible: {s}, error: {}", .{ config.input_path, err });
renderFileNotAccessibleError(gpa, config.input_path, err);
return false;
};
@ -207,7 +222,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
defer gpa.free(bitcode_path_z);
if (externs.LLVMCreateMemoryBufferWithContentsOfFile(bitcode_path_z.ptr, &mem_buf, &error_message) != 0) {
std.log.err("Failed to load bitcode file: {s}", .{error_message});
renderLLVMError(gpa, "BITCODE LOAD ERROR", "Failed to load bitcode file", std.mem.span(error_message));
externs.LLVMDisposeMessage(error_message);
return false;
}
@ -218,7 +233,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
std.log.debug("Parsing bitcode into LLVM module...", .{});
var module: ?*anyopaque = null;
if (externs.LLVMParseBitcode(mem_buf, &module, &error_message) != 0) {
std.log.err("Failed to parse bitcode: {s}", .{error_message});
renderLLVMError(gpa, "BITCODE PARSE ERROR", "Failed to parse bitcode", std.mem.span(error_message));
externs.LLVMDisposeMessage(error_message);
return false;
}
@ -238,7 +253,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
std.log.debug("Getting LLVM target for triple: {s}", .{target_triple});
var llvm_target: ?*anyopaque = null;
if (externs.LLVMGetTargetFromTriple(target_triple_z.ptr, &llvm_target, &error_message) != 0) {
std.log.err("Failed to get target from triple: {s}", .{error_message});
renderTargetError(gpa, target_triple, std.mem.span(error_message));
externs.LLVMDisposeMessage(error_message);
return false;
}
@ -266,7 +281,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
false, // emulated_tls
);
if (target_machine == null) {
std.log.err("Failed to create target machine for triple='{s}', cpu='{s}', features='{s}'", .{ target_triple, config.cpu, config.features });
renderTargetMachineError(gpa, target_triple, config.cpu, config.features);
return false;
}
defer externs.LLVMDisposeTargetMachine(target_machine);
@ -284,7 +299,8 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
coverage_options.CoverageType = .ZigLLVMCoverageType_None;
const emit_options = ZigLLVMEmitOptions{
.is_debug = false,
// Auto-enable debug when roc is built in debug mode, OR when explicitly requested via --debug
.is_debug = (builtin.mode == .Debug) or config.debug,
.is_small = config.optimization == .size,
.time_report_out = null,
.tsan = false,
@ -307,7 +323,7 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
);
if (emit_result) {
std.log.err("Failed to emit object file to '{s}': {s}", .{ config.output_path, emit_error_message });
renderEmitError(gpa, config.output_path, std.mem.span(emit_error_message));
externs.LLVMDisposeMessage(emit_error_message);
return false;
}
@ -320,3 +336,149 @@ pub fn compileBitcodeToObject(gpa: Allocator, config: CompileConfig) !bool {
pub fn isLLVMAvailable() bool {
return llvm_available;
}
// --- Error Reporting Helpers ---
fn renderLLVMNotAvailableError(allocator: Allocator) void {
var report = reporting.Report.init(allocator, "LLVM NOT AVAILABLE", .fatal);
defer report.deinit();
report.document.addText("LLVM is not available at compile time.") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("This binary was built without LLVM support.") catch return;
report.document.addLineBreak() catch return;
report.document.addText("To use this feature, rebuild roc with LLVM enabled.") catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
fn renderFileNotAccessibleError(allocator: Allocator, path: []const u8, err: anyerror) void {
var report = reporting.Report.init(allocator, "FILE NOT ACCESSIBLE", .fatal);
defer report.deinit();
report.document.addText("Input bitcode file does not exist or is not accessible:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" ") catch return;
report.document.addAnnotated(path, .path) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("Error: ") catch return;
report.document.addAnnotated(@errorName(err), .error_highlight) catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
fn renderLLVMError(allocator: Allocator, title: []const u8, message: []const u8, llvm_message: []const u8) void {
var report = reporting.Report.init(allocator, title, .fatal);
defer report.deinit();
report.document.addText(message) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("LLVM error: ") catch return;
report.document.addAnnotated(llvm_message, .error_highlight) catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
fn renderTargetError(allocator: Allocator, triple: []const u8, llvm_message: []const u8) void {
var report = reporting.Report.init(allocator, "INVALID TARGET", .fatal);
defer report.deinit();
report.document.addText("Failed to get LLVM target for triple:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" ") catch return;
report.document.addAnnotated(triple, .emphasized) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("LLVM error: ") catch return;
report.document.addAnnotated(llvm_message, .error_highlight) catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
fn renderTargetMachineError(allocator: Allocator, triple: []const u8, cpu: []const u8, features: []const u8) void {
var report = reporting.Report.init(allocator, "TARGET MACHINE ERROR", .fatal);
defer report.deinit();
report.document.addText("Failed to create LLVM target machine with configuration:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" Triple: ") catch return;
report.document.addAnnotated(triple, .emphasized) catch return;
report.document.addLineBreak() catch return;
report.document.addText(" CPU: ") catch return;
if (cpu.len > 0) {
report.document.addAnnotated(cpu, .emphasized) catch return;
} else {
report.document.addText("(default)") catch return;
}
report.document.addLineBreak() catch return;
report.document.addText(" Features: ") catch return;
if (features.len > 0) {
report.document.addAnnotated(features, .emphasized) catch return;
} else {
report.document.addText("(default)") catch return;
}
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("This may indicate an unsupported target configuration.") catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
fn renderEmitError(allocator: Allocator, output_path: []const u8, llvm_message: []const u8) void {
var report = reporting.Report.init(allocator, "OBJECT FILE EMIT ERROR", .fatal);
defer report.deinit();
report.document.addText("Failed to emit object file:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" Output: ") catch return;
report.document.addAnnotated(output_path, .path) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("LLVM error: ") catch return;
report.document.addAnnotated(llvm_message, .error_highlight) catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}

View file

@ -86,6 +86,8 @@ pub const BuildArgs = struct {
opt: OptLevel, // the optimization level
target: ?[]const u8 = null, // the target to compile for (e.g., x64musl, x64glibc)
output: ?[]const u8 = null, // the path where the output binary should be created
debug: bool = false, // include debug information in the output binary
allow_errors: bool = false, // allow building even if there are type errors
z_bench_tokenize: ?[]const u8 = null, // benchmark tokenizer on a file or directory
z_bench_parse: ?[]const u8 = null, // benchmark parser on a file or directory
};
@ -243,6 +245,8 @@ fn parseBuild(args: []const []const u8) CliArgs {
var opt: OptLevel = .dev;
var target: ?[]const u8 = null;
var output: ?[]const u8 = null;
var debug: bool = false;
var allow_errors: bool = false;
var z_bench_tokenize: ?[]const u8 = null;
var z_bench_parse: ?[]const u8 = null;
for (args) |arg| {
@ -259,6 +263,8 @@ fn parseBuild(args: []const []const u8) CliArgs {
\\ --output=<output> The full path to the output binary, including filename. To specify directory only, specify a path that ends in a directory separator (e.g. a slash)
\\ --opt=<size|speed|dev> Optimize the build process for binary size, execution speed, or compilation speed. Defaults to compilation speed (dev)
\\ --target=<target> Target to compile for (e.g., x64musl, x64glibc, arm64musl). Defaults to native target with musl for static linking
\\ --debug Include debug information in the output binary
\\ --allow-errors Allow building even if there are type errors (warnings are always allowed)
\\ --z-bench-tokenize=<path> Benchmark tokenizer on a file or directory
\\ --z-bench-parse=<path> Benchmark parser on a file or directory
\\ -h, --help Print help
@ -298,6 +304,10 @@ fn parseBuild(args: []const []const u8) CliArgs {
} else {
return CliArgs{ .problem = CliProblem{ .missing_flag_value = .{ .flag = "--z-bench-parse" } } };
}
} else if (mem.eql(u8, arg, "--debug")) {
debug = true;
} else if (mem.eql(u8, arg, "--allow-errors")) {
allow_errors = true;
} else {
if (path != null) {
return CliArgs{ .problem = CliProblem{ .unexpected_argument = .{ .cmd = "build", .arg = arg } } };
@ -305,7 +315,7 @@ fn parseBuild(args: []const []const u8) CliArgs {
path = arg;
}
}
return CliArgs{ .build = BuildArgs{ .path = path orelse "main.roc", .opt = opt, .target = target, .output = output, .z_bench_tokenize = z_bench_tokenize, .z_bench_parse = z_bench_parse } };
return CliArgs{ .build = BuildArgs{ .path = path orelse "main.roc", .opt = opt, .target = target, .output = output, .debug = debug, .allow_errors = allow_errors, .z_bench_tokenize = z_bench_tokenize, .z_bench_parse = z_bench_parse } };
}
fn parseBundle(alloc: mem.Allocator, args: []const []const u8) std.mem.Allocator.Error!CliArgs {
@ -918,6 +928,19 @@ test "roc build" {
defer result.deinit(gpa);
try testing.expectEqualStrings("bar.roc", result.problem.unexpected_argument.arg);
}
{
// Test --debug flag
const result = try parse(gpa, &[_][]const u8{ "build", "--debug", "foo.roc" });
defer result.deinit(gpa);
try testing.expectEqualStrings("foo.roc", result.build.path);
try testing.expect(result.build.debug);
}
{
// Test that debug defaults to false
const result = try parse(gpa, &[_][]const u8{ "build", "foo.roc" });
defer result.deinit(gpa);
try testing.expect(!result.build.debug);
}
{
const result = try parse(gpa, &[_][]const u8{ "build", "-h" });
defer result.deinit(gpa);

View file

@ -1,8 +1,7 @@
//! Cross-compilation support and validation for Roc CLI
//! Handles host detection, target validation, and capability matrix
//! Handles target validation and capability matrix
const std = @import("std");
const builtin = @import("builtin");
const target_mod = @import("target.zig");
const RocTarget = target_mod.RocTarget;
@ -32,60 +31,10 @@ pub const CrossCompilationMatrix = struct {
pub const musl_targets = [_]RocTarget{
.x64musl,
.arm64musl,
};
/// Targets that require dynamic linking (glibc) - more complex cross-compilation
pub const glibc_targets = [_]RocTarget{
.x64glibc,
.arm64glibc,
};
/// Windows targets - require MinGW or similar toolchain
pub const windows_targets = [_]RocTarget{
// Future: .x64windows, .arm64windows
};
/// macOS targets - require OSXCross or similar toolchain
pub const macos_targets = [_]RocTarget{
// Future: .x64macos, .arm64macos
.arm32musl,
};
};
/// Detect the host target platform
pub fn detectHostTarget() RocTarget {
return switch (builtin.target.cpu.arch) {
.x86_64 => switch (builtin.target.os.tag) {
.linux => .x64glibc, // Default to glibc on Linux hosts
.windows => .x64win,
.macos => .x64mac,
else => .x64glibc,
},
.aarch64 => switch (builtin.target.os.tag) {
.linux => .arm64glibc,
.windows => .arm64win,
.macos => .arm64mac,
else => .arm64glibc,
},
else => .x64glibc, // Fallback
};
}
/// Check if a target is supported for static linking (musl)
pub fn isMuslTarget(target: RocTarget) bool {
return switch (target) {
.x64musl, .arm64musl => true,
else => false,
};
}
/// Check if a target requires dynamic linking (glibc)
pub fn isGlibcTarget(target: RocTarget) bool {
return switch (target) {
.x64glibc, .arm64glibc => true,
else => false,
};
}
/// Validate cross-compilation from host to target
pub fn validateCrossCompilation(host: RocTarget, target: RocTarget) CrossCompilationResult {
// Native compilation (host == target) is always supported
@ -93,17 +42,17 @@ pub fn validateCrossCompilation(host: RocTarget, target: RocTarget) CrossCompila
return CrossCompilationResult{ .supported = {} };
}
// Support both musl and glibc targets for cross-compilation
if (isMuslTarget(target) or isGlibcTarget(target)) {
// Support musl targets for cross-compilation (statically linked)
if (target.isStatic()) {
return CrossCompilationResult{ .supported = {} };
}
// Windows and macOS cross-compilation not yet supported
// glibc, Windows and macOS cross-compilation not yet supported
return CrossCompilationResult{
.unsupported_cross_compilation = .{
.host = host,
.target = target,
.reason = "Windows and macOS cross-compilation not yet implemented. Please use Linux targets (x64musl, arm64musl, x64glibc, arm64glibc) or log an issue at https://github.com/roc-lang/roc/issues",
.reason = "Only Linux musl targets (x64musl, arm64musl, arm32musl) are currently supported for cross-compilation. glibc, Windows and macOS support coming in a future release. Log an issue at https://github.com/roc-lang/roc/issues",
},
};
}
@ -112,9 +61,8 @@ pub fn validateCrossCompilation(host: RocTarget, target: RocTarget) CrossCompila
pub fn getHostCapabilities(host: RocTarget) []const RocTarget {
_ = host; // For now, all hosts have the same capabilities
// Support both musl and glibc targets from any host
const all_targets = CrossCompilationMatrix.musl_targets ++ CrossCompilationMatrix.glibc_targets;
return &all_targets;
// Only musl targets are supported for cross-compilation
return &CrossCompilationMatrix.musl_targets;
}
/// Print supported targets for the current host
@ -128,6 +76,7 @@ pub fn printSupportedTargets(writer: anytype, host: RocTarget) !void {
try writer.print("\nUnsupported targets (not yet implemented):\n", .{});
const unsupported = [_][]const u8{
"x64glibc, arm64glibc (Linux glibc cross-compilation)",
"x64windows, arm64windows (Windows cross-compilation)",
"x64macos, arm64macos (macOS cross-compilation)",
};

View file

@ -8,6 +8,7 @@ const Allocator = std.mem.Allocator;
const base = @import("base");
const Allocators = base.Allocators;
const libc_finder = @import("libc_finder.zig");
const RocTarget = @import("roc_target").RocTarget;
/// External C functions from zig_llvm.cpp - only available when LLVM is enabled
const llvm_available = if (@import("builtin").is_test) false else @import("config").llvm;
@ -54,14 +55,8 @@ pub const TargetAbi = enum {
gnu,
/// Convert from RocTarget to TargetAbi
pub fn fromRocTarget(roc_target: anytype) TargetAbi {
// Use string matching to avoid circular imports
const target_str = @tagName(roc_target);
if (std.mem.endsWith(u8, target_str, "musl")) {
return .musl;
} else {
return .gnu;
}
pub fn fromRocTarget(roc_target: RocTarget) TargetAbi {
return if (roc_target.isStatic()) .musl else .gnu;
}
};

View file

@ -34,10 +34,14 @@ const BuiltinTypes = eval.BuiltinTypes;
const cli_args = @import("cli_args.zig");
const roc_target = @import("target.zig");
pub const targets_validator = @import("targets_validator.zig");
const platform_validation = @import("platform_validation.zig");
comptime {
if (builtin.is_test) {
std.testing.refAllDecls(cli_args);
std.testing.refAllDecls(targets_validator);
std.testing.refAllDecls(platform_validation);
}
}
const bench = @import("bench.zig");
@ -69,7 +73,40 @@ const RocCrashed = builtins.host_abi.RocCrashed;
const TestOpsEnv = eval.TestOpsEnv;
const Allocators = base.Allocators;
const roc_interpreter_shim_lib = if (builtin.is_test) &[_]u8{} else if (builtin.target.os.tag == .windows) @embedFile("roc_interpreter_shim.lib") else @embedFile("libroc_interpreter_shim.a");
/// Embedded interpreter shim libraries for different targets.
/// The native shim is used for roc run and native builds.
/// Cross-compilation shims are used for roc build --target=<target>.
const ShimLibraries = struct {
/// Native shim (for host platform builds and roc run)
const native = if (builtin.is_test)
&[_]u8{}
else if (builtin.target.os.tag == .windows)
@embedFile("roc_interpreter_shim.lib")
else
@embedFile("libroc_interpreter_shim.a");
/// Cross-compilation target shims (Linux musl targets)
const x64musl = if (builtin.is_test) &[_]u8{} else @embedFile("targets/x64musl/libroc_interpreter_shim.a");
const arm64musl = if (builtin.is_test) &[_]u8{} else @embedFile("targets/arm64musl/libroc_interpreter_shim.a");
/// Cross-compilation target shims (Linux glibc targets)
const x64glibc = if (builtin.is_test) &[_]u8{} else @embedFile("targets/x64glibc/libroc_interpreter_shim.a");
const arm64glibc = if (builtin.is_test) &[_]u8{} else @embedFile("targets/arm64glibc/libroc_interpreter_shim.a");
/// Get the appropriate shim library bytes for the given target
pub fn forTarget(target: roc_target.RocTarget) []const u8 {
return switch (target) {
.x64musl => x64musl,
.arm64musl => arm64musl,
.x64glibc => x64glibc,
.arm64glibc => arm64glibc,
// Native/host targets use the native shim
.x64mac, .arm64mac, .x64win, .arm64win => native,
// Fallback for other targets (will use native, may not work for cross-compilation)
else => native,
};
}
};
test "main cli tests" {
_ = @import("libc_finder.zig");
@ -172,14 +209,11 @@ fn stderrWriter() *std.Io.Writer {
const posix = if (!is_windows) struct {
extern "c" fn shm_open(name: [*:0]const u8, oflag: c_int, mode: std.c.mode_t) c_int;
extern "c" fn shm_unlink(name: [*:0]const u8) c_int;
extern "c" fn mmap(addr: ?*anyopaque, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: std.c.off_t) ?*anyopaque;
extern "c" fn mmap(addr: ?*anyopaque, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: std.c.off_t) *anyopaque;
extern "c" fn munmap(addr: *anyopaque, len: usize) c_int;
extern "c" fn fcntl(fd: c_int, cmd: c_int, arg: c_int) c_int;
// fcntl constants
const F_GETFD = 1;
const F_SETFD = 2;
const FD_CLOEXEC = 1;
// MAP_FAILED is (void*)-1, not NULL
const MAP_FAILED: *anyopaque = @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))));
} else struct {};
// Windows shared memory functions
@ -677,7 +711,7 @@ fn mainArgs(allocs: *Allocators, args: []const []const u8) !void {
try rocRun(allocs, run_args);
},
.check => |check_args| rocCheck(allocs, check_args),
.build => |build_args| rocBuild(allocs, build_args),
.build => |build_args| try rocBuild(allocs, build_args),
.bundle => |bundle_args| rocBundle(allocs, bundle_args),
.unbundle => |unbundle_args| rocUnbundle(allocs, unbundle_args),
.fmt => |format_args| rocFormat(allocs, format_args),
@ -710,7 +744,10 @@ fn mainArgs(allocs: *Allocators, args: []const []const u8) !void {
/// Generate platform host shim object file using LLVM.
/// Returns the path to the generated object file (allocated from arena, no need to free), or null if LLVM unavailable.
fn generatePlatformHostShim(allocs: *Allocators, cache_dir: []const u8, entrypoint_names: []const []const u8, target: builder.RocTarget) !?[]const u8 {
/// If serialized_module is provided, it will be embedded in the binary (for roc build).
/// If serialized_module is null, the binary will use IPC to get module data (for roc run).
/// If debug is true, include debug information in the generated object file.
fn generatePlatformHostShim(allocs: *Allocators, cache_dir: []const u8, entrypoint_names: []const []const u8, target: builder.RocTarget, serialized_module: ?[]const u8, debug: bool) !?[]const u8 {
// Check if LLVM is available (this is a compile-time check)
if (!llvm_available) {
std.log.debug("LLVM not available, skipping platform host shim generation", .{});
@ -739,7 +776,8 @@ fn generatePlatformHostShim(allocs: *Allocators, cache_dir: []const u8, entrypoi
// Create the complete platform shim
// Note: Symbol names include platform-specific prefixes (underscore for macOS)
platform_host_shim.createInterpreterShim(&llvm_builder, entrypoints.items, target) catch |err| {
// serialized_module is null for roc run (IPC mode) or contains data for roc build (embedded mode)
platform_host_shim.createInterpreterShim(&llvm_builder, entrypoints.items, target, serialized_module) catch |err| {
std.log.err("Failed to create interpreter shim: {}", .{err});
return err;
};
@ -786,6 +824,7 @@ fn generatePlatformHostShim(allocs: *Allocators, cache_dir: []const u8, entrypoi
.output_path = object_path,
.optimization = .speed,
.target = target,
.debug = debug, // Use the debug flag passed from caller
};
if (builder.compileBitcodeToObject(allocs.gpa, compile_config)) |success| {
@ -894,9 +933,43 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
return err;
};
// Use native detection (typically musl) for shim generation to match embedded shim library
// Use native detection for shim generation to match embedded shim library
const shim_target = builder.RocTarget.detectNative();
// Validate platform header has targets section and supports native target
if (platform_paths.platform_source_path) |platform_source| {
if (platform_validation.validatePlatformHeader(allocs.arena, platform_source)) |validation| {
// Validate that the native target is supported
platform_validation.validateTargetSupported(validation.config, shim_target, .exe) catch |err| {
switch (err) {
error.UnsupportedTarget => {
// Create a nice formatted error report
const result = platform_validation.createUnsupportedTargetResult(
platform_source,
shim_target,
.exe,
validation.config,
);
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.UnsupportedTarget;
},
else => {},
}
};
} else |err| {
switch (err) {
error.MissingTargetsSection => {
// Warning only - platform may still work for native builds
// Don't block execution, just inform the user
std.log.debug("Platform at '{s}' has no targets section", .{platform_source});
},
else => {
std.log.debug("Could not validate platform header: {}", .{err});
},
}
}
}
// Extract entrypoints from platform source file
var entrypoints = std.array_list.Managed([]const u8).initCapacity(allocs.arena, 32) catch |err| {
std.log.err("Failed to allocate entrypoints list: {}", .{err});
@ -942,15 +1015,17 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
};
// Always extract to temp dir (unique per process, no race condition)
extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| {
// For roc run, we always use the native shim (null target)
extractReadRocFilePathShimLibrary(allocs, shim_path, null) catch |err| {
std.log.err("Failed to extract read roc file path shim library: {}", .{err});
return err;
};
// Generate platform host shim using the detected entrypoints
// Use temp dir to avoid race conditions when multiple processes run in parallel
const platform_shim_path = generatePlatformHostShim(allocs, temp_dir_path, entrypoints.items, shim_target) catch |err| {
// Pass null for serialized_module since roc run uses IPC mode
// Auto-enable debug when roc is built in debug mode (no explicit --debug flag for roc run)
const platform_shim_path = generatePlatformHostShim(allocs, temp_dir_path, entrypoints.items, shim_target, null, builtin.mode == .Debug) catch |err| {
std.log.err("Failed to generate platform host shim: {}", .{err});
return err;
};
@ -1071,15 +1146,22 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
linker.link(allocs, link_config) catch |err| switch (err) {
linker.LinkError.LLVMNotAvailable => {
std.log.err("LLD linker not available -- this is likely a test executable that was built without LLVM", .{});
const result = platform_validation.targets_validator.ValidationResult{ .linker_not_available = {} };
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return err;
},
linker.LinkError.LinkFailed => {
std.log.err("LLD linker failed to create executable", .{});
const result = platform_validation.targets_validator.ValidationResult{
.linker_failed = .{ .reason = "LLD linker failed" },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return err;
},
else => {
std.log.err("Failed to link executable: {}", .{err});
const result = platform_validation.targets_validator.ValidationResult{
.linker_failed = .{ .reason = @errorName(err) },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return err;
},
};
@ -1103,7 +1185,7 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void {
// Set up shared memory with ModuleEnv
std.log.debug("Setting up shared memory for Roc file: {s}", .{args.path});
const shm_result = setupSharedMemoryWithModuleEnv(allocs, args.path) catch |err| {
const shm_result = setupSharedMemoryWithModuleEnv(allocs, args.path, args.allow_errors) catch |err| {
std.log.err("Failed to set up shared memory with ModuleEnv: {}", .{err});
return err;
};
@ -1271,9 +1353,15 @@ fn runWithWindowsHandleInheritance(allocs: *Allocators, exe_path: []const u8, sh
if (exit_code != 0) {
std.log.debug("Child process {s} exited with code: {}", .{ exe_path, exit_code });
if (exit_code == 0xC0000005) { // STATUS_ACCESS_VIOLATION
std.log.err("Child process crashed with access violation (segfault)", .{});
const result = platform_validation.targets_validator.ValidationResult{
.process_crashed = .{ .exit_code = exit_code, .is_access_violation = true },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
} else if (exit_code >= 0xC0000000) { // NT status codes for exceptions
std.log.err("Child process crashed with exception code: 0x{X}", .{exit_code});
const result = platform_validation.targets_validator.ValidationResult{
.process_crashed = .{ .exit_code = exit_code, .is_access_violation = false },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
}
// Propagate the exit code (truncated to u8 for compatibility)
std.process.exit(@truncate(exit_code));
@ -1295,22 +1383,30 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
std.log.debug("Coordination file written successfully", .{});
// Configure fd inheritance - clear FD_CLOEXEC so child process inherits the fd
// NOTE: The doNotOptimizeAway calls are required to prevent the ReleaseFast
// optimizer from incorrectly optimizing away or reordering the fcntl calls.
const getfd_result = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0);
std.mem.doNotOptimizeAway(&getfd_result);
if (getfd_result < 0) {
std.log.err("Failed to get fd flags: {}", .{c._errno().*});
// Use std.posix.fcntl which properly handles the variadic C function.
const current_flags = std.posix.fcntl(shm_handle.fd, std.posix.F.GETFD, 0) catch |err| {
std.log.err("Failed to get fd flags: {}", .{err});
return error.FdConfigFailed;
}
};
const new_flags = getfd_result & ~@as(c_int, posix.FD_CLOEXEC);
std.mem.doNotOptimizeAway(&new_flags);
const setfd_result = posix.fcntl(shm_handle.fd, posix.F_SETFD, new_flags);
std.mem.doNotOptimizeAway(&setfd_result);
if (setfd_result < 0) {
std.log.err("Failed to set fd flags: {}", .{c._errno().*});
// Clear FD_CLOEXEC - the flag value is 1
const new_flags = current_flags & ~@as(usize, 1);
_ = std.posix.fcntl(shm_handle.fd, std.posix.F.SETFD, new_flags) catch |err| {
std.log.err("Failed to set fd flags: {}", .{err});
return error.FdConfigFailed;
};
// Debug-only verification that fd flags were actually cleared
if (comptime builtin.mode == .Debug) {
const verify_flags = std.posix.fcntl(shm_handle.fd, std.posix.F.GETFD, 0) catch |err| {
std.log.err("Failed to verify fd flags: {}", .{err});
return error.FdConfigFailed;
};
if ((verify_flags & 1) != 0) {
std.log.err("FD_CLOEXEC still set after clearing! fd={} flags={}", .{ shm_handle.fd, verify_flags });
return error.FdConfigFailed;
}
std.log.debug("fd={} FD_CLOEXEC cleared successfully", .{shm_handle.fd});
}
// Build argv slice using arena allocator (memory lives until arena is freed)
@ -1361,14 +1457,11 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand
}
},
.Signal => |signal| {
std.log.err("Child process {s} killed by signal: {}", .{ exe_path, signal });
if (signal == 11) { // SIGSEGV
std.log.err("Child process crashed with segmentation fault (SIGSEGV)", .{});
} else if (signal == 6) { // SIGABRT
std.log.err("Child process aborted (SIGABRT)", .{});
} else if (signal == 9) { // SIGKILL
std.log.err("Child process was killed (SIGKILL)", .{});
}
std.log.debug("Child process {s} killed by signal: {}", .{ exe_path, signal });
const result = platform_validation.targets_validator.ValidationResult{
.process_signaled = .{ .signal = signal },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
// Standard POSIX convention: exit with 128 + signal number
std.process.exit(128 +| @as(u8, @truncate(signal)));
},
@ -1458,7 +1551,7 @@ fn writeToWindowsSharedMemory(data: []const u8, total_size: usize) !SharedMemory
/// This parses, canonicalizes, and type-checks all modules, with the resulting ModuleEnvs
/// ending up in shared memory because all allocations were done into shared memory.
/// Platform type modules have their e_anno_only expressions converted to e_hosted_lambda.
pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []const u8) !SharedMemoryResult {
pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []const u8, allow_errors: bool) !SharedMemoryResult {
// Create shared memory with SharedMemoryAllocator
const page_size = try SharedMemoryAllocator.getSystemPageSize();
var shm = try SharedMemoryAllocator.create(SHARED_MEMORY_SIZE, page_size);
@ -1689,10 +1782,39 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
app_env.module_name = app_module_name;
try app_env.common.calcLineStarts(shm_allocator);
var error_count: usize = 0;
var app_parse_ast = try parse.parse(&app_env.common, allocs.gpa);
defer app_parse_ast.deinit(allocs.gpa);
app_parse_ast.store.emptyScratch();
if (app_parse_ast.hasErrors()) {
const stderr = stderrWriter();
defer stderr.flush() catch {};
for (app_parse_ast.tokenize_diagnostics.items) |diagnostic| {
error_count += 1;
var report = app_parse_ast.tokenizeDiagnosticToReport(diagnostic, allocs.gpa, roc_file_path) catch continue;
defer report.deinit();
reporting.renderReportToTerminal(&report, stderr, ColorPalette.ANSI, reporting.ReportingConfig.initColorTerminal()) catch continue;
}
for (app_parse_ast.parse_diagnostics.items) |diagnostic| {
error_count += 1;
var report = app_parse_ast.parseDiagnosticToReport(&app_env.common, diagnostic, allocs.gpa, roc_file_path) catch continue;
defer report.deinit();
reporting.renderReportToTerminal(&report, stderr, ColorPalette.ANSI, reporting.ReportingConfig.initColorTerminal()) catch continue;
}
// If errors are not allowed then we should not move past parsing. return early and let caller handle error/exit
if (!allow_errors) {
return SharedMemoryResult{
.handle = SharedMemoryHandle{
.fd = shm.handle,
.ptr = shm.base_ptr,
.size = shm.getUsedSize(),
},
.error_count = error_count,
};
}
}
app_parse_ast.store.emptyScratch();
try app_env.initCIRFields(app_module_name);
var app_module_envs_map = std.AutoHashMap(base.Ident.Idx, Can.AutoImportedType).init(allocs.gpa);
@ -1782,7 +1904,10 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
// The platform wraps app-provided functions (from `requires`) and exports them for the host.
// For example: `provides { main_for_host!: "main" }` where `main_for_host! = main!`
const platform_env = platform_main_env orelse {
std.log.err("No platform found. Every Roc app requires a platform.", .{});
const result = platform_validation.targets_validator.ValidationResult{
.no_platform_found = .{ .app_path = roc_file_path },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.NoPlatformFound;
};
const exports_slice = platform_env.store.sliceDefs(platform_env.exports);
@ -1848,7 +1973,7 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons
// Render all type problems (errors and warnings) exactly as roc check would
// Count errors so the caller can decide whether to proceed with execution
// Skip rendering in test mode to avoid polluting test output
const error_count = if (!builtin.is_test)
error_count += if (!builtin.is_test)
renderTypeProblems(allocs.gpa, &app_checker, &app_env, roc_file_path)
else
0;
@ -1896,6 +2021,10 @@ fn extractExposedModulesFromPlatform(allocs: *Allocators, roc_file_path: []const
// Check if this is a platform file with a platform header
switch (header) {
.platform => |platform_header| {
// Validate platform header has targets section (non-blocking warning)
// This helps platform authors know they need to add targets
_ = validatePlatformHeader(allocs, &parse_ast, roc_file_path);
// Get the exposes collection
const exposes_coll = parse_ast.store.getCollection(platform_header.exposes);
const exposes_items = parse_ast.store.exposedItemSlice(.{ .span = exposes_coll.span });
@ -1919,6 +2048,32 @@ fn extractExposedModulesFromPlatform(allocs: *Allocators, roc_file_path: []const
}
}
/// Validate a platform header and report any errors/warnings
/// Returns true if valid, false if there are validation issues
/// This currently only warns about missing targets sections - it doesn't block compilation
fn validatePlatformHeader(allocs: *Allocators, parse_ast: *const parse.AST, platform_path: []const u8) bool {
const validation_result = targets_validator.validatePlatformHasTargets(parse_ast.*, platform_path);
switch (validation_result) {
.valid => return true,
else => {
// Create and render the validation report
var report = targets_validator.createValidationReport(allocs.gpa, validation_result) catch {
std.log.warn("Platform at {s} is missing targets section", .{platform_path});
return false;
};
defer report.deinit();
// Render to stderr
if (!builtin.is_test) {
const stderr = stderrWriter();
reporting.renderReportToTerminal(&report, stderr, .ANSI, reporting.ReportingConfig.initColorTerminal()) catch {};
}
return false;
},
}
}
/// Compile a single module to shared memory (for platform modules)
fn compileModuleToSharedMemory(
allocs: *Allocators,
@ -2046,10 +2201,12 @@ fn writeToPosixSharedMemory(data: []const u8, total_size: usize) !SharedMemoryHa
0x0001, // MAP_SHARED
shm_fd,
0,
) orelse {
);
// mmap returns MAP_FAILED ((void*)-1) on error, not NULL
if (mapped_ptr == posix.MAP_FAILED) {
_ = c.close(shm_fd);
return error.SharedMemoryMapFailed;
};
}
const mapped_memory = @as([*]u8, @ptrCast(mapped_ptr))[0..total_size];
// Write length at the beginning
@ -2522,9 +2679,11 @@ fn extractEntrypointsFromPlatform(allocs: *Allocators, roc_file_path: []const u8
}
}
/// Extract the embedded roc_shim library to the specified path
/// This library contains the shim code that runs in child processes to read ModuleEnv from shared memory
pub fn extractReadRocFilePathShimLibrary(allocs: *Allocators, output_path: []const u8) !void {
/// Extract the embedded roc_shim library to the specified path for the given target.
/// This library contains the shim code that runs in child processes to read ModuleEnv from shared memory.
/// For native builds and roc run, use the native shim (pass null or native target).
/// For cross-compilation, pass the target to get the appropriate shim.
pub fn extractReadRocFilePathShimLibrary(allocs: *Allocators, output_path: []const u8, target: ?roc_target.RocTarget) !void {
_ = allocs; // unused but kept for consistency
if (builtin.is_test) {
@ -2534,11 +2693,17 @@ pub fn extractReadRocFilePathShimLibrary(allocs: *Allocators, output_path: []con
return;
}
// Get the appropriate shim for the target (or native if not specified)
const shim_data = if (target) |t|
ShimLibraries.forTarget(t)
else
ShimLibraries.native;
// Write the embedded shim library to the output path
const shim_file = try std.fs.cwd().createFile(output_path, .{});
defer shim_file.close();
try shim_file.writeAll(roc_interpreter_shim_lib);
try shim_file.writeAll(shim_data);
}
/// Format a bundle path validation reason into a user-friendly error message
@ -2684,6 +2849,43 @@ pub fn rocBundle(allocs: *Allocators, args: cli_args.BundleArgs) !void {
}
}
// Validate platform header if the first file looks like a platform
// This ensures bundles have proper targets sections
const main_file = file_paths.items[0];
if (std.mem.endsWith(u8, main_file, ".roc")) {
if (platform_validation.validatePlatformHeader(allocs.arena, main_file)) |validation| {
// Platform validation succeeded - validate all target files exist
if (platform_validation.validateAllTargetFilesExist(
allocs.arena,
validation.config,
validation.platform_dir,
)) |result| {
// Render the validation error with nice formatting
_ = platform_validation.renderValidationError(allocs.gpa, result, stderr);
return switch (result) {
.missing_target_file => error.MissingTargetFile,
.missing_files_directory => error.MissingFilesDirectory,
else => error.MissingTargetFile,
};
}
std.log.debug("Platform validation passed for: {s}", .{main_file});
} else |err| {
switch (err) {
error.MissingTargetsSection => {
// Only warn - file might be an app, not a platform
std.log.debug("File {s} has no targets section (may be an app)", .{main_file});
},
error.ParseError, error.FileReadError => {
// Parsing failed - could be invalid syntax or not a Roc file
std.log.debug("Could not parse {s} as platform: {}", .{ main_file, err });
},
else => {
std.log.warn("Platform validation warning: {}", .{err});
},
}
}
}
// Create temporary output file
const temp_filename = "temp_bundle.tar.zst";
const temp_file = try tmp_dir.createFile(temp_filename, .{
@ -2862,211 +3064,69 @@ fn rocBuild(allocs: *Allocators, args: cli_args.BuildArgs) !void {
return;
}
// Import needed modules
// Use embedded interpreter build approach
// This compiles the Roc app, serializes the ModuleEnv, and embeds it in the binary
try rocBuildEmbedded(allocs, args);
}
/// Build a standalone executable with the interpreter and embedded module data.
/// This is the primary build path that creates executables without requiring IPC.
fn rocBuildEmbedded(allocs: *Allocators, args: cli_args.BuildArgs) !void {
const target_mod = @import("target.zig");
const app_stub = @import("app_stub.zig");
const cross_compilation = @import("cross_compilation.zig");
std.log.info("Building {s} for cross-compilation", .{args.path});
std.log.info("Building {s} with embedded interpreter", .{args.path});
// Detect host target
const host_target = cross_compilation.detectHostTarget();
std.log.info("Host: {} ({s})", .{ host_target, host_target.toTriple() });
// Parse target if provided, otherwise use native with musl preference
// Parse target if provided, otherwise use native
const target = if (args.target) |target_str| blk: {
break :blk target_mod.RocTarget.fromString(target_str) orelse {
std.log.err("Invalid target: {s}", .{target_str});
std.log.err("Valid targets: x64musl, x64glibc, arm64musl, arm64glibc, etc.", .{});
const result = platform_validation.targets_validator.ValidationResult{
.invalid_target = .{ .target_str = target_str },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.InvalidTarget;
};
} else target_mod.RocTarget.detectNative();
std.log.info("Target: {} ({s})", .{ target, target.toTriple() });
std.log.debug("Target: {} ({s})", .{ target, target.toTriple() });
// Validate cross-compilation support
const cross_validation = cross_compilation.validateCrossCompilation(host_target, target);
switch (cross_validation) {
.supported => {
std.log.info("Cross-compilation from {s} to {s} is supported", .{ @tagName(host_target), @tagName(target) });
},
.unsupported_host_target, .unsupported_cross_compilation, .missing_toolchain => {
const stderr = stderrWriter();
try cross_compilation.printCrossCompilationError(stderr, cross_validation);
return error.UnsupportedCrossCompilation;
},
}
// Only support int test platform for cross-compilation
// Check if path contains "int" directory using cross-platform path handling
const path_contains_int = blk: {
var iter = std.fs.path.componentIterator(args.path) catch break :blk false;
while (iter.next()) |component| {
if (std.mem.eql(u8, component.name, "int")) {
break :blk true;
}
}
break :blk false;
};
const platform_type = if (path_contains_int)
"int"
else {
std.log.err("roc build cross-compilation currently only supports the int test platform", .{});
std.log.err("Your app path: {s}", .{args.path});
std.log.err("For str platform and other platforms, please use regular 'roc' command", .{});
return error.UnsupportedPlatform;
};
std.log.info("Detected platform type: {s}", .{platform_type});
// Get platform directory path
const platform_dir = if (std.mem.eql(u8, platform_type, "int"))
try std.fs.path.join(allocs.arena, &.{ "test", "int", "platform" })
else
try std.fs.path.join(allocs.arena, &.{ "test", "str", "platform" });
// Check that platform exists
std.fs.cwd().access(platform_dir, .{}) catch |err| {
std.log.err("Platform directory not found: {s} ({})", .{ platform_dir, err });
return err;
};
// Get target-specific host library path
// Use target OS to determine library filename, not host OS
const host_lib_filename = if (target.toOsTag() == .windows) "host.lib" else "libhost.a";
const host_lib_path = blk: {
// Try target-specific host library first
const target_specific_path = try std.fs.path.join(allocs.arena, &.{ platform_dir, "targets", @tagName(target), host_lib_filename });
std.fs.cwd().access(target_specific_path, .{}) catch {
// Fallback to generic host library
std.log.warn("Target-specific host library not found, falling back to generic: {s}", .{target_specific_path});
break :blk try std.fs.path.join(allocs.arena, &.{ platform_dir, host_lib_filename });
// Check for unsupported cross-compilation scenarios
// glibc targets require a full libc for linking, which is only available on Linux hosts
const host_os = builtin.target.os.tag;
if (target.isDynamic() and host_os != .linux) {
const result = platform_validation.targets_validator.ValidationResult{
.unsupported_glibc_cross = .{
.target = target,
.host_os = @tagName(host_os),
},
};
break :blk target_specific_path;
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.UnsupportedCrossCompilation;
}
std.fs.cwd().access(host_lib_path, .{}) catch |err| {
std.log.err("Host library not found: {s} ({})", .{ host_lib_path, err });
// Set up shared memory with ModuleEnv (same as roc run)
std.log.debug("Compiling Roc file: {s}", .{args.path});
const shm_handle = setupSharedMemoryWithModuleEnv(allocs, args.path, args.allow_errors) catch |err| {
std.log.err("Failed to compile Roc file: {}", .{err});
return err;
};
std.log.debug("Compilation complete, serialized size: {} bytes", .{shm_handle.handle.size});
// Get expected entrypoints for this platform
const entrypoints = try app_stub.getTestPlatformEntrypoints(allocs.gpa, platform_type);
defer allocs.gpa.free(entrypoints);
std.log.info("Expected entrypoints: {}", .{entrypoints.len});
for (entrypoints, 0..) |ep, i| {
std.log.info(" {}: roc__{s}", .{ i, ep.name });
}
// Create temp directory for build artifacts using Roc's cache system
const cache_config = CacheConfig{
.enabled = true,
.verbose = false,
};
var cache_manager = CacheManager.init(allocs.gpa, cache_config, Filesystem.default());
const cache_dir = try cache_manager.config.getCacheEntriesDir(allocs.arena);
const temp_dir = try std.fs.path.join(allocs.arena, &.{ cache_dir, "roc_build" });
std.fs.cwd().makePath(temp_dir) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return err,
};
// Generate app stub object file
const app_stub_obj = try app_stub.generateAppStubObject(allocs.arena, temp_dir, entrypoints, target);
// Get CRT files for the target
const crt_files = try target_mod.getVendoredCRTFiles(allocs.arena, target, platform_dir);
// Create object files list for linking
var object_files = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 16);
// Add our app stub and host library
try object_files.append(app_stub_obj);
try object_files.append(host_lib_path);
// Setup platform files based on target
var platform_files_pre = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 16);
var platform_files_post = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 16);
var extra_args = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 32);
// Add CRT files in correct order
if (crt_files.crt1_o) |crt1| try platform_files_pre.append(crt1);
if (crt_files.crti_o) |crti| try platform_files_pre.append(crti);
if (crt_files.crtn_o) |crtn| try platform_files_post.append(crtn);
// For static linking with musl, add libc.a
if (crt_files.libc_a) |libc| {
try platform_files_post.append(libc);
} else if (target.isDynamic()) {
// For dynamic linking with glibc, generate stub library for cross-compilation
// Check if we're doing actual cross-compilation
const is_cross_compiling = host_target != target;
if (is_cross_compiling) {
// For cross-compilation, use pre-built vendored stubs from the platform targets folder
const target_name = switch (target) {
.x64glibc => "x64glibc",
.arm64glibc => "arm64glibc",
else => {
std.log.err("Cross-compilation target {} not supported for glibc", .{target});
return error.UnsupportedTarget;
},
};
// Check if vendored stubs exist in the platform targets folder
const stub_dir = try std.fmt.allocPrint(allocs.arena, "test/int/platform/targets/{s}", .{target_name});
const stub_so_path = try std.fmt.allocPrint(allocs.arena, "{s}/libc.so.6", .{stub_dir});
// Verify the vendored stub exists
std.fs.cwd().access(stub_so_path, .{}) catch |err| {
std.log.err("Pre-built glibc stub not found: {s}", .{stub_so_path});
std.log.err("Error: {}", .{err});
std.log.err("This suggests the build system didn't generate the required stubs.", .{});
std.log.err("Try running 'zig build' first to generate platform target files.", .{});
return err;
};
// Use the vendored stub library
const stub_dir_arg = try std.fmt.allocPrint(allocs.arena, "-L{s}", .{stub_dir});
try extra_args.append(stub_dir_arg);
try extra_args.append("-lc");
std.log.info("Using pre-built glibc stub from platform targets: {s}", .{stub_dir});
// Clean up shared memory when done (we'll copy the data)
defer {
if (comptime is_windows) {
_ = ipc.platform.windows.UnmapViewOfFile(shm_handle.handle.ptr);
_ = ipc.platform.windows.CloseHandle(@ptrCast(shm_handle.handle.fd));
} else {
// For native compilation, use system libraries
const common_lib_paths = [_][]const u8{
"/lib/x86_64-linux-gnu",
"/usr/lib/x86_64-linux-gnu",
"/lib/aarch64-linux-gnu",
"/usr/lib/aarch64-linux-gnu",
"/lib64",
"/usr/lib64",
"/lib",
"/usr/lib",
};
for (common_lib_paths) |lib_path| {
// Check if the directory exists before adding it
std.fs.cwd().access(lib_path, .{}) catch continue;
const search_arg = try std.fmt.allocPrint(allocs.arena, "-L{s}", .{lib_path});
try extra_args.append(search_arg);
}
try extra_args.append("-lc");
_ = posix.munmap(shm_handle.handle.ptr, shm_handle.handle.size);
_ = c.close(shm_handle.handle.fd);
}
// Add dynamic linker path
if (target.getDynamicLinkerPath()) |dl_path| {
const dl_arg = try std.fmt.allocPrint(allocs.arena, "--dynamic-linker={s}", .{dl_path});
try extra_args.append(dl_arg);
} else |_| {}
}
// Extract serialized module data for embedding
const serialized_module = @as([*]u8, @ptrCast(shm_handle.handle.ptr))[0..shm_handle.handle.size];
// Determine output path
const base_output_path = if (args.output) |output|
const output_path = if (args.output) |output|
try allocs.arena.dupe(u8, output)
else blk: {
const basename = std.fs.path.basename(args.path);
@ -3077,13 +3137,188 @@ fn rocBuild(allocs: *Allocators, args: cli_args.BuildArgs) !void {
break :blk try allocs.arena.dupe(u8, name_without_ext);
};
// Add .exe extension on Windows if not already present
const output_path = if (target.toOsTag() == .windows and !std.mem.endsWith(u8, base_output_path, ".exe"))
try std.fmt.allocPrint(allocs.arena, "{s}.exe", .{base_output_path})
else
try allocs.arena.dupe(u8, base_output_path);
// Set up cache directory for build artifacts
const cache_config = CacheConfig{
.enabled = true,
.verbose = false,
};
var cache_manager = CacheManager.init(allocs.gpa, cache_config, Filesystem.default());
const cache_dir = try cache_manager.config.getCacheEntriesDir(allocs.arena);
const build_cache_dir = try std.fs.path.join(allocs.arena, &.{ cache_dir, "roc_build" });
std.fs.cwd().makePath(build_cache_dir) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return err,
};
// Get platform directory and host library (do this first to get platform source)
const app_dir = std.fs.path.dirname(args.path) orelse ".";
const platform_spec = extractPlatformSpecFromApp(allocs, args.path) catch |err| {
std.log.err("Failed to extract platform spec: {}", .{err});
return err;
};
std.log.debug("Platform spec: {s}", .{platform_spec});
// Resolve platform path
const platform_paths: ?PlatformPaths = if (std.mem.startsWith(u8, platform_spec, "./") or std.mem.startsWith(u8, platform_spec, "../"))
resolvePlatformSpecToPaths(allocs, platform_spec, app_dir) catch |err| blk: {
std.log.err("Failed to resolve platform paths: {}", .{err});
break :blk null;
}
else if (std.mem.startsWith(u8, platform_spec, "http://") or std.mem.startsWith(u8, platform_spec, "https://"))
resolveUrlPlatform(allocs, platform_spec) catch null
else
null;
// Validate platform header has targets section and get link configuration
// The targets section is REQUIRED - it defines exactly what to link
const platform_source = if (platform_paths) |pp| pp.platform_source_path else null;
const validation = if (platform_source) |ps|
platform_validation.validatePlatformHeader(allocs.arena, ps) catch |err| {
switch (err) {
error.MissingTargetsSection => {
const result = platform_validation.ValidationResult{
.missing_targets_section = .{ .platform_path = ps },
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.MissingTargetsSection;
},
else => {
std.log.err("Failed to validate platform header: {}", .{err});
return err;
},
}
}
else {
std.log.err("Platform source not found - cannot determine link configuration", .{});
return error.NoPlatformSource;
};
const targets_config = validation.config;
const platform_dir = validation.platform_dir;
// Validate that the requested target is supported and get its link spec
platform_validation.validateTargetSupported(targets_config, target, .exe) catch |err| {
switch (err) {
error.UnsupportedTarget => {
const result = platform_validation.createUnsupportedTargetResult(
platform_source.?,
target,
.exe,
targets_config,
);
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.UnsupportedTarget;
},
else => return err,
}
};
// Get the link spec for this target - tells us exactly what files to link
const link_spec = targets_config.getLinkSpec(target, .exe) orelse {
std.log.err("No link spec for target {s} - this shouldn't happen after validation", .{@tagName(target)});
return error.UnsupportedTarget;
};
// Build link file lists from the link spec
// Files before 'app' go in pre, files after 'app' go in post
const target_name = @tagName(target);
const files_dir = targets_config.files_dir orelse "targets";
var platform_files_pre = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 8);
var platform_files_post = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 8);
var hit_app = false;
for (link_spec.items) |item| {
switch (item) {
.file_path => |path| {
// Build full path: platform_dir/files_dir/target_name/path
const full_path = try std.fs.path.join(allocs.arena, &.{ platform_dir, files_dir, target_name, path });
// Validate the file exists
std.fs.cwd().access(full_path, .{}) catch {
const result = platform_validation.targets_validator.ValidationResult{
.missing_target_file = .{
.target = target,
.link_type = .exe,
.file_path = path,
.expected_full_path = full_path,
},
};
_ = platform_validation.renderValidationError(allocs.gpa, result, stderrWriter());
return error.MissingTargetFile;
};
if (!hit_app) {
try platform_files_pre.append(full_path);
} else {
try platform_files_post.append(full_path);
}
},
.app => {
hit_app = true;
},
.win_gui => {
// Windows subsystem flag - will be handled by linker
},
}
}
std.log.debug("Link spec: {} files before app, {} files after app", .{ platform_files_pre.items.len, platform_files_post.items.len });
// Extract entrypoints from the platform source file
std.log.debug("Extracting entrypoints from platform...", .{});
var entrypoints = std.array_list.Managed([]const u8).initCapacity(allocs.arena, 16) catch {
std.log.err("Failed to allocate entrypoints list", .{});
return error.OutOfMemory;
};
extractEntrypointsFromPlatform(allocs, platform_source.?, &entrypoints) catch |err| {
std.log.err("Failed to extract entrypoints: {}", .{err});
return err;
};
std.log.debug("Found {} entrypoints", .{entrypoints.items.len});
// Extract shim library (interpreter shim)
// Include target name in filename to support different targets in the same cache
const shim_filename = try std.fmt.allocPrint(allocs.arena, "libroc_shim_{s}.a", .{target_name});
const shim_path = try std.fs.path.join(allocs.arena, &.{ build_cache_dir, shim_filename });
std.fs.cwd().access(shim_path, .{}) catch {
// Shim not found, extract it
// For roc build, use the target-specific shim for cross-compilation support
std.log.debug("Extracting shim library for target {s} to {s}...", .{ target_name, shim_path });
extractReadRocFilePathShimLibrary(allocs, shim_path, target) catch |err| {
std.log.err("Failed to extract shim library: {}", .{err});
return err;
};
};
// Generate platform host shim with embedded module data
// Enable debug if explicitly requested via --debug OR if roc is built in debug mode
const enable_debug = args.debug or (builtin.mode == .Debug);
std.log.debug("Generating platform host shim with {} bytes of embedded data (debug={})...", .{ serialized_module.len, enable_debug });
const platform_shim_path = generatePlatformHostShim(allocs, build_cache_dir, entrypoints.items, target, serialized_module, enable_debug) catch |err| {
std.log.err("Failed to generate platform host shim: {}", .{err});
return err;
};
std.log.debug("Platform shim generated: {?s}", .{platform_shim_path});
// Link everything together
// object_files = the Roc application (interpreter shim + platform shim with embedded module)
// platform_files_pre/post = files declared in link spec before/after 'app'
var object_files = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 4);
try object_files.append(shim_path);
if (platform_shim_path) |psp| {
try object_files.append(psp);
}
// Extra linker args for system libraries (not platform-provided)
var extra_args = try std.array_list.Managed([]const u8).initCapacity(allocs.arena, 8);
if (target.isMacOS()) {
// macOS requires linking with system libraries
try extra_args.append("-lSystem");
}
// Use LLD for linking
const linker_mod = @import("linker.zig");
const target_abi = if (target.isStatic()) linker_mod.TargetAbi.musl else linker_mod.TargetAbi.gnu;
const link_config = linker_mod.LinkConfig{
@ -3100,7 +3335,27 @@ fn rocBuild(allocs: *Allocators, args: cli_args.BuildArgs) !void {
try linker_mod.link(allocs, link_config);
std.log.info("Successfully built executable: {s}", .{output_path});
std.log.info("Successfully built standalone executable: {s}", .{output_path});
}
/// Derive the base platform directory from a host library path.
/// If the path contains "/targets/<target>/", extract the directory before "targets".
/// This handles the case where the native host lib is in targets/x64musl/libhost.a
/// and we need to find targets/arm64musl/libhost.a for cross-compilation.
fn deriveBasePlatformDir(host_lib_path: []const u8) []const u8 {
var platform_dir = std.fs.path.dirname(host_lib_path) orelse ".";
// Check if we're already inside a targets/<native>/ directory
// by looking for the "targets" component in the path
if (std.mem.indexOf(u8, platform_dir, "/targets/")) |targets_idx| {
// Truncate to get the directory containing "targets"
return platform_dir[0..targets_idx];
} else if (std.mem.startsWith(u8, platform_dir, "targets/")) {
// Path starts with targets/, use current directory
return ".";
}
return platform_dir;
}
/// Information about a test (expect statement) to be evaluated

View file

@ -157,7 +157,7 @@ fn addRocExportedFunction(builder: *Builder, entrypoint_fn: Builder.Function.Ind
///
/// The generated library is then compiled using LLVM to an object file and linked with
/// both the host and the Roc interpreter to create a dev build executable.
pub fn createInterpreterShim(builder: *Builder, entrypoints: []const EntryPoint, target: RocTarget) !void {
pub fn createInterpreterShim(builder: *Builder, entrypoints: []const EntryPoint, target: RocTarget, serialized_module: ?[]const u8) !void {
// Add the extern roc_entrypoint declaration
const entrypoint_fn = try addRocEntrypoint(builder, target);
@ -165,4 +165,78 @@ pub fn createInterpreterShim(builder: *Builder, entrypoints: []const EntryPoint,
for (entrypoints) |entry| {
_ = try addRocExportedFunction(builder, entrypoint_fn, entry.name, entry.idx, target);
}
try addRocSerializedModule(builder, target, serialized_module);
}
/// Adds exported globals for serialized module data.
///
/// This creates two exported globals:
/// - roc__serialized_base_ptr: pointer to the serialized data (or null)
/// - roc__serialized_size: size of the serialized data in bytes (or 0)
///
/// When data is provided, an internal constant array is created and the base_ptr
/// points to it. When data is null, both values are set to null/zero.
fn addRocSerializedModule(builder: *Builder, target: RocTarget, serialized_module: ?[]const u8) !void {
const ptr_type = try builder.ptrType(.default);
// Determine usize type based on target pointer width
const usize_type: Builder.Type = switch (target.ptrBitWidth()) {
32 => .i32,
64 => .i64,
else => unreachable,
};
// Create platform-specific name for base_ptr
// Add underscore prefix for macOS (required for MachO symbol names)
const base_ptr_name_str = if (target.isMacOS())
try std.fmt.allocPrint(builder.gpa, "_roc__serialized_base_ptr", .{})
else
try builder.gpa.dupe(u8, "roc__serialized_base_ptr");
defer builder.gpa.free(base_ptr_name_str);
const base_ptr_name = try builder.strtabString(base_ptr_name_str);
// Create platform-specific name for size
const size_name_str = if (target.isMacOS())
try std.fmt.allocPrint(builder.gpa, "_roc__serialized_size", .{})
else
try builder.gpa.dupe(u8, "roc__serialized_size");
defer builder.gpa.free(size_name_str);
const size_name = try builder.strtabString(size_name_str);
if (serialized_module) |bytes| {
// Create a string constant for the byte data
const str = try builder.string(bytes);
const str_const = try builder.stringConst(str);
// Create an internal constant variable to hold the array
const internal_name = try builder.strtabString(".roc_serialized_data");
const array_var = try builder.addVariable(internal_name, str_const.typeOf(builder), .default);
try array_var.setInitializer(str_const, builder);
array_var.setLinkage(.internal, builder);
array_var.setMutability(.global, builder);
// Create the external base_ptr variable pointing to the internal array
const base_ptr_var = try builder.addVariable(base_ptr_name, ptr_type, .default);
try base_ptr_var.setInitializer(array_var.toConst(builder), builder);
base_ptr_var.setLinkage(.external, builder);
// Create the external size variable
const size_const = try builder.intConst(usize_type, bytes.len);
const size_var = try builder.addVariable(size_name, usize_type, .default);
try size_var.setInitializer(size_const, builder);
size_var.setLinkage(.external, builder);
} else {
// Create null pointer for base_ptr
const null_ptr = try builder.nullConst(ptr_type);
const base_ptr_var = try builder.addVariable(base_ptr_name, ptr_type, .default);
try base_ptr_var.setInitializer(null_ptr, builder);
base_ptr_var.setLinkage(.external, builder);
// Create zero size
const zero_size = try builder.intConst(usize_type, 0);
const size_var = try builder.addVariable(size_name, usize_type, .default);
try size_var.setInitializer(zero_size, builder);
size_var.setLinkage(.external, builder);
}
}

View file

@ -0,0 +1,306 @@
//! Platform header validation utilities.
//!
//! Provides shared validation logic for platform headers, including:
//! - Parsing platform headers to extract TargetsConfig
//! - Validating targets section exists
//! - Validating target files exist on disk
//! - Validating a specific target is supported
//!
//! This module is used by both `roc build` and `roc bundle` commands.
const std = @import("std");
const builtin = @import("builtin");
const parse = @import("parse");
const base = @import("base");
const reporting = @import("reporting");
const target_mod = @import("target.zig");
pub const targets_validator = @import("targets_validator.zig");
const TargetsConfig = target_mod.TargetsConfig;
const RocTarget = target_mod.RocTarget;
const LinkType = target_mod.LinkType;
const LinkItem = target_mod.LinkItem;
const TargetLinkSpec = target_mod.TargetLinkSpec;
const is_windows = builtin.target.os.tag == .windows;
var stderr_file_writer: std.fs.File.Writer = .{
.interface = std.fs.File.Writer.initInterface(&.{}),
.file = if (is_windows) undefined else std.fs.File.stderr(),
.mode = .streaming,
};
fn stderrWriter() *std.Io.Writer {
if (is_windows) stderr_file_writer.file = std.fs.File.stderr();
return &stderr_file_writer.interface;
}
/// Re-export ValidationResult for callers that need to create reports
pub const ValidationResult = targets_validator.ValidationResult;
/// Errors that can occur during platform validation
pub const ValidationError = error{
/// Platform header is missing required targets section
MissingTargetsSection,
/// Requested target is not declared in platform's targets section
UnsupportedTarget,
/// A file declared in targets section doesn't exist
MissingTargetFile,
/// Files directory specified in targets section doesn't exist
MissingFilesDirectory,
/// Failed to parse platform header
ParseError,
/// Failed to read platform source file
FileReadError,
/// Out of memory
OutOfMemory,
};
/// Result of platform validation with parsed config
pub const PlatformValidation = struct {
/// Parsed targets configuration
config: TargetsConfig,
/// Directory containing the platform (dirname of platform source)
platform_dir: []const u8,
};
/// Parse and validate a platform header.
/// Returns the TargetsConfig if valid, or an error with details.
pub fn validatePlatformHeader(
allocator: std.mem.Allocator,
platform_source_path: []const u8,
) ValidationError!PlatformValidation {
// Read platform source
const source = std.fs.cwd().readFileAlloc(allocator, platform_source_path, std.math.maxInt(usize)) catch {
renderFileReadError(allocator, platform_source_path);
return error.FileReadError;
};
// Parse platform header
var env = base.CommonEnv.init(allocator, source) catch {
std.log.err("Failed to initialize parse environment for: {s}", .{platform_source_path});
return error.ParseError;
};
const ast = parse.parse(&env, allocator) catch {
renderParseError(allocator, platform_source_path);
return error.ParseError;
};
// Extract TargetsConfig
const config = TargetsConfig.fromAST(allocator, ast) catch {
return error.ParseError;
} orelse {
renderMissingTargetsError(allocator, platform_source_path);
return error.MissingTargetsSection;
};
return .{
.config = config,
.platform_dir = std.fs.path.dirname(platform_source_path) orelse ".",
};
}
/// Render a file read error report to stderr.
fn renderFileReadError(allocator: std.mem.Allocator, path: []const u8) void {
var report = reporting.Report.init(allocator, "FILE READ ERROR", .fatal);
defer report.deinit();
report.document.addText("Failed to read platform source file:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" ") catch return;
report.document.addAnnotated(path, .path) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("Check that the file exists and you have read permissions.") catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
/// Render a parse error report to stderr.
fn renderParseError(allocator: std.mem.Allocator, path: []const u8) void {
var report = reporting.Report.init(allocator, "PARSE ERROR", .fatal);
defer report.deinit();
report.document.addText("Failed to parse platform header:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText(" ") catch return;
report.document.addAnnotated(path, .path) catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("Check that the file contains valid Roc syntax.") catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
/// Render a missing targets section error report to stderr.
fn renderMissingTargetsError(allocator: std.mem.Allocator, path: []const u8) void {
var report = reporting.Report.init(allocator, "MISSING TARGETS SECTION", .fatal);
defer report.deinit();
report.document.addText("Platform at ") catch return;
report.document.addAnnotated(path, .path) catch return;
report.document.addText(" does not have a 'targets:' section.") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addText("Platform headers must declare supported targets. Example:") catch return;
report.document.addLineBreak() catch return;
report.document.addLineBreak() catch return;
report.document.addCodeBlock(
\\ targets: {
\\ files: "targets/",
\\ exe: {
\\ x64linux: ["host.o", app],
\\ arm64linux: ["host.o", app],
\\ }
\\ }
) catch return;
report.document.addLineBreak() catch return;
reporting.renderReportToTerminal(
&report,
stderrWriter(),
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
}
/// Validate that a specific target is supported by the platform.
/// Returns error.UnsupportedTarget if the target is not in the config.
/// Does not log - caller should handle error reporting.
pub fn validateTargetSupported(
config: TargetsConfig,
target: RocTarget,
link_type: LinkType,
) ValidationError!void {
if (!config.supportsTarget(target, link_type)) {
return error.UnsupportedTarget;
}
}
/// Create a ValidationResult for an unsupported target error.
/// This can be passed to targets_validator.createValidationReport for nice error formatting.
pub fn createUnsupportedTargetResult(
platform_path: []const u8,
requested_target: RocTarget,
link_type: LinkType,
config: TargetsConfig,
) ValidationResult {
return .{
.unsupported_target = .{
.platform_path = platform_path,
.requested_target = requested_target,
.link_type = link_type,
.supported_targets = config.getSupportedTargets(link_type),
},
};
}
/// Render a validation error to stderr using the reporting infrastructure.
/// Returns true if a report was rendered, false if no report was needed.
pub fn renderValidationError(
allocator: std.mem.Allocator,
result: ValidationResult,
stderr: anytype,
) bool {
switch (result) {
.valid => return false,
else => {
var report = targets_validator.createValidationReport(allocator, result) catch {
// Fallback to simple logging if report creation fails
std.log.err("Platform validation failed", .{});
return true;
};
defer report.deinit();
reporting.renderReportToTerminal(
&report,
stderr,
.ANSI,
reporting.ReportingConfig.initColorTerminal(),
) catch {};
return true;
},
}
}
/// Validate all files declared in targets section exist on disk.
/// Uses existing targets_validator infrastructure.
/// Returns the ValidationResult for nice error reporting, or null if validation passed.
pub fn validateAllTargetFilesExist(
allocator: std.mem.Allocator,
config: TargetsConfig,
platform_dir_path: []const u8,
) ?ValidationResult {
var platform_dir = std.fs.cwd().openDir(platform_dir_path, .{}) catch {
return .{
.missing_files_directory = .{
.platform_path = platform_dir_path,
.files_dir = config.files_dir orelse "targets",
},
};
};
defer platform_dir.close();
const result = targets_validator.validateTargetFilesExist(allocator, config, platform_dir) catch {
return .{
.missing_files_directory = .{
.platform_path = platform_dir_path,
.files_dir = config.files_dir orelse "targets",
},
};
};
switch (result) {
.valid => return null,
else => return result,
}
}
// Tests
const testing = std.testing;
test "validateTargetSupported returns error for unsupported target" {
const config = TargetsConfig{
.files_dir = "targets",
.exe = &.{
.{ .target = .x64mac, .items = &.{.app} },
.{ .target = .arm64mac, .items = &.{.app} },
},
.static_lib = &.{},
.shared_lib = &.{},
};
// x64musl is not in the config, should error
const result = validateTargetSupported(config, .x64musl, .exe);
try testing.expectError(error.UnsupportedTarget, result);
}
test "validateTargetSupported succeeds for supported target" {
const config = TargetsConfig{
.files_dir = "targets",
.exe = &.{
.{ .target = .x64mac, .items = &.{.app} },
.{ .target = .arm64mac, .items = &.{.app} },
},
.static_lib = &.{},
.shared_lib = &.{},
};
// x64mac is in the config, should succeed
try validateTargetSupported(config, .x64mac, .exe);
}

View file

@ -1,288 +1,202 @@
//! Roc target definitions and system library path resolution
//! Roc target definitions and link configuration
//!
//! Re-exports RocTarget and adds link configuration types that depend on the parse module.
const std = @import("std");
const builtin = @import("builtin");
const parse = @import("parse");
const Allocator = std.mem.Allocator;
/// Roc's simplified targets
pub const RocTarget = enum {
// x64 (x86_64) targets
x64mac,
x64win,
x64freebsd,
x64openbsd,
x64netbsd,
x64musl,
x64glibc,
x64linux,
x64elf,
// Re-export RocTarget from the shared build module
pub const RocTarget = @import("roc_target").RocTarget;
// arm64 (aarch64) targets
arm64mac,
arm64win,
arm64linux,
arm64musl,
arm64glibc,
/// Individual link item from a targets section
/// Can be a file path (relative to files/ directory) or a special identifier
pub const LinkItem = union(enum) {
/// A file path (string literal in the source)
/// Path is relative to the targets/<target>/ directory
file_path: []const u8,
// arm32 targets
arm32linux,
arm32musl,
/// The compiled Roc application
app,
// WebAssembly
wasm32,
/// Windows GUI subsystem flag (/subsystem:windows)
win_gui,
};
/// Parse target from string
pub fn fromString(str: []const u8) ?RocTarget {
const enum_info = @typeInfo(RocTarget);
inline for (enum_info.@"enum".fields) |field| {
if (std.mem.eql(u8, str, field.name)) {
return @enumFromInt(field.value);
/// Link specification for a single target
/// Contains the ordered list of items to link for this target
pub const TargetLinkSpec = struct {
target: RocTarget,
items: []const LinkItem,
};
/// Type of output binary
pub const LinkType = enum {
/// Executable binary
exe,
/// Static library (.a, .lib)
static_lib,
/// Shared/dynamic library (.so, .dylib, .dll)
shared_lib,
};
/// Complete targets configuration from a platform header
pub const TargetsConfig = struct {
/// Base directory for target-specific files (e.g., "targets/")
files_dir: ?[]const u8,
/// Executable target specifications (in priority order)
exe: []const TargetLinkSpec,
/// Static library target specifications (in priority order)
static_lib: []const TargetLinkSpec,
/// Shared library target specifications (in priority order)
shared_lib: []const TargetLinkSpec,
/// Get the link spec for a specific target and link type
pub fn getLinkSpec(self: TargetsConfig, target: RocTarget, link_type: LinkType) ?TargetLinkSpec {
const specs = switch (link_type) {
.exe => self.exe,
.static_lib => self.static_lib,
.shared_lib => self.shared_lib,
};
for (specs) |spec| {
if (spec.target == target) {
return spec;
}
}
return null;
}
/// Get the OS tag for this RocTarget
pub fn toOsTag(self: RocTarget) std.Target.Os.Tag {
return switch (self) {
// x64 targets
.x64mac, .arm64mac => .macos,
.x64win, .arm64win => .windows,
.x64freebsd => .freebsd,
.x64openbsd => .openbsd,
.x64netbsd => .netbsd,
.x64musl, .x64glibc, .x64linux, .x64elf, .arm64musl, .arm64glibc, .arm64linux, .arm32musl, .arm32linux => .linux,
.wasm32 => .wasi,
/// Get the default target for a given link type based on the current system
/// Returns the first target in the list that's compatible with the current OS
pub fn getDefaultTarget(self: TargetsConfig, link_type: LinkType) ?RocTarget {
const specs = switch (link_type) {
.exe => self.exe,
.static_lib => self.static_lib,
.shared_lib => self.shared_lib,
};
}
/// Get the CPU architecture for this RocTarget
pub fn toCpuArch(self: RocTarget) std.Target.Cpu.Arch {
return switch (self) {
// x64 targets
.x64mac, .x64win, .x64freebsd, .x64openbsd, .x64netbsd, .x64musl, .x64glibc, .x64linux, .x64elf => .x86_64,
const native = RocTarget.detectNative();
const native_os = native.toOsTag();
// arm64 targets
.arm64mac, .arm64win, .arm64linux, .arm64musl, .arm64glibc => .aarch64,
// arm32 targets
.arm32linux, .arm32musl => .arm,
// WebAssembly
.wasm32 => .wasm32,
};
}
/// Convert Roc target to LLVM target triple
pub fn toTriple(self: RocTarget) []const u8 {
return switch (self) {
// x64 targets
.x64mac => "x86_64-apple-darwin",
.x64win => "x86_64-pc-windows-msvc",
.x64freebsd => "x86_64-unknown-freebsd",
.x64openbsd => "x86_64-unknown-openbsd",
.x64netbsd => "x86_64-unknown-netbsd",
.x64musl => "x86_64-unknown-linux-musl",
.x64glibc => "x86_64-unknown-linux-gnu",
.x64linux => "x86_64-unknown-linux-gnu",
.x64elf => "x86_64-unknown-none-elf",
// arm64 targets
.arm64mac => "aarch64-apple-darwin",
.arm64win => "aarch64-pc-windows-msvc",
.arm64linux => "aarch64-unknown-linux-gnu",
.arm64musl => "aarch64-unknown-linux-musl",
.arm64glibc => "aarch64-unknown-linux-gnu",
// arm32 targets
.arm32linux => "arm-unknown-linux-gnueabihf",
.arm32musl => "arm-unknown-linux-musleabihf",
// WebAssembly
.wasm32 => "wasm32-unknown-unknown",
};
}
/// Detect the current system's Roc target
pub fn detectNative() RocTarget {
const os = builtin.target.os.tag;
const arch = builtin.target.cpu.arch;
const abi = builtin.target.abi;
// Handle architecture first
switch (arch) {
.x86_64 => {
switch (os) {
.macos => return .x64mac,
.windows => return .x64win,
.freebsd => return .x64freebsd,
.openbsd => return .x64openbsd,
.netbsd => return .x64netbsd,
.linux => {
// Check ABI to determine musl vs glibc
return switch (abi) {
.musl, .musleabi, .musleabihf => .x64musl,
.gnu, .gnueabi, .gnueabihf, .gnux32 => .x64glibc,
else => .x64musl, // Default to musl for static linking
};
},
else => return .x64elf, // Generic fallback
}
},
.aarch64, .aarch64_be => {
switch (os) {
.macos => return .arm64mac,
.windows => return .arm64win,
.linux => {
// Check ABI to determine musl vs glibc
return switch (abi) {
.musl, .musleabi, .musleabihf => .arm64musl,
.gnu, .gnueabi, .gnueabihf => .arm64glibc,
else => .arm64musl, // Default to musl for static linking
};
},
else => return .arm64linux, // Generic ARM64 Linux
}
},
.arm => {
switch (os) {
.linux => {
// Default to musl for static linking
return .arm32musl;
},
else => return .arm32linux, // Generic ARM32 Linux
}
},
.wasm32 => return .wasm32,
else => {
// Default fallback based on OS
switch (os) {
.macos => return .x64mac,
.windows => return .x64win,
.linux => return .x64musl, // Default to musl
else => return .x64elf,
}
},
// First pass: look for exact OS match
for (specs) |spec| {
if (spec.target.toOsTag() == native_os) {
return spec.target;
}
}
// wasm32 is considered compatible with all OSes as a fallback
for (specs) |spec| {
if (spec.target == .wasm32) {
return spec.target;
}
}
return null;
}
/// Check if target uses dynamic linking (glibc targets)
pub fn isDynamic(self: RocTarget) bool {
return switch (self) {
.x64glibc, .arm64glibc, .x64linux, .arm64linux, .arm32linux => true,
else => false,
/// Check if a specific target is supported
pub fn supportsTarget(self: TargetsConfig, target: RocTarget, link_type: LinkType) bool {
return self.getLinkSpec(target, link_type) != null;
}
/// Get all supported targets for a link type
pub fn getSupportedTargets(self: TargetsConfig, link_type: LinkType) []const TargetLinkSpec {
return switch (link_type) {
.exe => self.exe,
.static_lib => self.static_lib,
.shared_lib => self.shared_lib,
};
}
/// Check if target uses static linking (musl targets)
pub fn isStatic(self: RocTarget) bool {
return switch (self) {
.x64musl, .arm64musl, .arm32musl => true,
else => false,
/// Create a TargetsConfig from a parsed AST
/// Returns null if the platform header has no targets section
pub fn fromAST(allocator: Allocator, ast: anytype) !?TargetsConfig {
const NodeStore = parse.NodeStore;
const store: *const NodeStore = &ast.store;
// Get the file node first, then get the header from it
const file = store.getFile();
const header = store.getHeader(file.header);
// Only platform headers have targets
const platform = switch (header) {
.platform => |p| p,
else => return null,
};
}
/// Check if target is macOS
pub fn isMacOS(self: RocTarget) bool {
return switch (self) {
.x64mac, .arm64mac => true,
else => false,
};
}
// If no targets section, return null
const targets_section_idx = platform.targets orelse return null;
const targets_section = store.getTargetsSection(targets_section_idx);
/// Check if target is Windows
pub fn isWindows(self: RocTarget) bool {
return switch (self) {
.x64win, .arm64win => true,
else => false,
};
}
// Extract files_dir from string literal token (StringPart token)
const files_dir: ?[]const u8 = if (targets_section.files_path) |tok_idx|
ast.resolve(tok_idx)
else
null;
/// Check if target is Linux-based
pub fn isLinux(self: RocTarget) bool {
return switch (self) {
.x64musl, .x64glibc, .x64linux, .arm64musl, .arm64glibc, .arm64linux, .arm32musl, .arm32linux => true,
else => false,
};
}
// Convert exe link type
var exe_specs = std.array_list.Managed(TargetLinkSpec).init(allocator);
errdefer exe_specs.deinit();
/// Get the dynamic linker path for this target
pub fn getDynamicLinkerPath(self: RocTarget) ![]const u8 {
return switch (self) {
// x64 glibc targets
.x64glibc, .x64linux => "/lib64/ld-linux-x86-64.so.2",
if (targets_section.exe) |exe_idx| {
const link_type = store.getTargetLinkType(exe_idx);
const entry_indices = store.targetEntrySlice(link_type.entries);
// arm64 glibc targets
.arm64glibc, .arm64linux => "/lib/ld-linux-aarch64.so.1",
for (entry_indices) |entry_idx| {
const entry = store.getTargetEntry(entry_idx);
// arm32 glibc targets
.arm32linux => "/lib/ld-linux-armhf.so.3",
// Parse target name from token
const target_name = ast.resolve(entry.target);
const target = RocTarget.fromString(target_name) orelse continue; // Skip unknown targets
// Static linking targets don't need dynamic linker
.x64musl, .arm64musl, .arm32musl => return error.StaticLinkingTarget,
// Convert files
var link_items = std.array_list.Managed(LinkItem).init(allocator);
errdefer link_items.deinit();
// macOS uses dyld
.x64mac, .arm64mac => "/usr/lib/dyld",
const file_indices = store.targetFileSlice(entry.files);
for (file_indices) |file_idx| {
const target_file = store.getTargetFile(file_idx);
// Windows doesn't use ELF-style dynamic linker
.x64win, .arm64win => return error.WindowsTarget,
switch (target_file) {
.string_literal => |tok| {
// The tok points to StringPart token containing the path
const path = ast.resolve(tok);
try link_items.append(.{ .file_path = path });
},
.special_ident => |tok| {
const ident = ast.resolve(tok);
if (std.mem.eql(u8, ident, "app")) {
try link_items.append(.app);
} else if (std.mem.eql(u8, ident, "win_gui")) {
try link_items.append(.win_gui);
}
// Skip unknown special identifiers
},
.malformed => continue, // Skip malformed entries
}
}
// BSD variants
.x64freebsd => "/libexec/ld-elf.so.1",
.x64openbsd => "/usr/libexec/ld.so",
.x64netbsd => "/usr/libexec/ld.elf_so",
try exe_specs.append(.{
.target = target,
.items = try link_items.toOwnedSlice(),
});
}
}
// Generic ELF doesn't have a specific linker
.x64elf => return error.NoKnownLinkerPath,
// static_lib and shared_lib to be added later
const empty_specs: []const TargetLinkSpec = &.{};
// WebAssembly doesn't use dynamic linker
.wasm32 => return error.WebAssemblyTarget,
return TargetsConfig{
.files_dir = files_dir,
.exe = try exe_specs.toOwnedSlice(),
.static_lib = empty_specs,
.shared_lib = empty_specs,
};
}
};
/// CRT (C runtime) file paths for linking
pub const CRTFiles = struct {
crt1_o: ?[]const u8 = null, // crt1.o or Scrt1.o (for PIE)
crti_o: ?[]const u8 = null, // crti.o
crtn_o: ?[]const u8 = null, // crtn.o
libc_a: ?[]const u8 = null, // libc.a (for static linking)
};
/// Get vendored CRT object files for a platform target
/// All CRT files must be provided by the platform in its targets/ directory
pub fn getVendoredCRTFiles(allocator: Allocator, target: RocTarget, platform_dir: []const u8) !CRTFiles {
// macOS and Windows targets don't need vendored CRT files - they use system libraries
if (target.isMacOS() or target.isWindows()) {
return CRTFiles{}; // Return empty CRTFiles struct
}
// Build path to the vendored CRT files
const target_subdir = switch (target) {
.x64musl => "x64musl",
.x64glibc => "x64glibc",
.arm64musl => "arm64musl",
.arm64glibc => "arm64glibc",
.arm32musl => "arm32musl",
.arm32linux => "arm32glibc",
else => return error.UnsupportedTargetForPlatform,
};
const targets_dir = try std.fs.path.join(allocator, &[_][]const u8{ platform_dir, "targets", target_subdir });
var result = CRTFiles{};
if (target.isStatic()) {
// For musl static linking
result.crt1_o = try std.fs.path.join(allocator, &[_][]const u8{ targets_dir, "crt1.o" });
result.libc_a = try std.fs.path.join(allocator, &[_][]const u8{ targets_dir, "libc.a" });
} else {
// For glibc dynamic linking
result.crt1_o = try std.fs.path.join(allocator, &[_][]const u8{ targets_dir, "Scrt1.o" });
result.crti_o = try std.fs.path.join(allocator, &[_][]const u8{ targets_dir, "crti.o" });
result.crtn_o = try std.fs.path.join(allocator, &[_][]const u8{ targets_dir, "crtn.o" });
}
return result;
}

View file

@ -0,0 +1,940 @@
//! Validation for platform targets section
//!
//! Validates that:
//! - Platform headers have a targets section (required)
//! - Files declared in the targets section exist in the filesystem
//! - Files in the targets directory match what's declared in the targets section
//!
//! This module is shared between bundle and unbundle operations.
const std = @import("std");
const Allocator = std.mem.Allocator;
const parse = @import("parse");
const target_mod = @import("target.zig");
const reporting = @import("reporting");
const RocTarget = target_mod.RocTarget;
const TargetsConfig = target_mod.TargetsConfig;
const LinkItem = target_mod.LinkItem;
const TargetLinkSpec = target_mod.TargetLinkSpec;
const LinkType = target_mod.LinkType;
const Report = reporting.Report;
const Severity = reporting.Severity;
/// Errors that can occur during targets validation
pub const ValidationError = error{
MissingTargetsSection,
MissingFilesDirectory,
MissingTargetFile,
ExtraFileInTargetsDir,
InvalidTargetName,
EmptyTargetsSection,
OutOfMemory,
};
/// Result of validating a targets section
pub const ValidationResult = union(enum) {
/// Validation passed
valid: void,
/// Platform header is missing the required targets section
missing_targets_section: struct {
platform_path: []const u8,
},
/// Files directory specified but doesn't exist
missing_files_directory: struct {
platform_path: []const u8,
files_dir: []const u8,
},
/// A file declared in targets doesn't exist
missing_target_file: struct {
target: RocTarget,
link_type: LinkType,
file_path: []const u8,
expected_full_path: []const u8,
},
/// Extra file found in targets directory that isn't declared
extra_file: struct {
target: RocTarget,
file_path: []const u8,
},
/// Targets section exists but has no target entries
empty_targets: struct {
platform_path: []const u8,
},
/// Requested target is not supported by this platform
unsupported_target: struct {
platform_path: []const u8,
requested_target: RocTarget,
link_type: LinkType,
supported_targets: []const TargetLinkSpec,
},
/// Cross-compilation requested but platform doesn't have host library for target
missing_cross_compile_host: struct {
platform_path: []const u8,
target: RocTarget,
expected_path: []const u8,
files_dir: []const u8,
},
/// glibc cross-compilation is not supported on non-Linux hosts
unsupported_glibc_cross: struct {
target: RocTarget,
host_os: []const u8,
},
/// App file doesn't have a platform
no_platform_found: struct {
app_path: []const u8,
},
/// Invalid target string provided
invalid_target: struct {
target_str: []const u8,
},
/// Linker failed to create executable
linker_failed: struct {
reason: []const u8,
},
/// Linker not available (LLVM not built)
linker_not_available: void,
/// Process crashed during execution (Windows)
process_crashed: struct {
exit_code: u32,
is_access_violation: bool,
},
/// Process killed by signal (Unix)
process_signaled: struct {
signal: u32,
},
};
/// Validate that a platform has a targets section
pub fn validatePlatformHasTargets(
ast: anytype,
platform_path: []const u8,
) ValidationResult {
const store = &ast.store;
// Get the file node first, then get the header from it
const file = store.getFile();
const header = store.getHeader(file.header);
// Only platform headers should have targets
const platform = switch (header) {
.platform => |p| p,
else => return .{ .valid = {} }, // Non-platform headers don't need targets
};
// Check if targets section exists
if (platform.targets == null) {
return .{ .missing_targets_section = .{
.platform_path = platform_path,
} };
}
return .{ .valid = {} };
}
/// Validate that files declared in targets section exist on disk
pub fn validateTargetFilesExist(
allocator: Allocator,
targets_config: TargetsConfig,
platform_dir: std.fs.Dir,
) !ValidationResult {
const files_dir_path = targets_config.files_dir orelse return .{ .valid = {} };
// Check if files directory exists
var files_dir = platform_dir.openDir(files_dir_path, .{}) catch {
return .{ .missing_files_directory = .{
.platform_path = "platform",
.files_dir = files_dir_path,
} };
};
defer files_dir.close();
// Validate exe targets
for (targets_config.exe) |spec| {
if (try validateTargetSpec(allocator, spec, .exe, files_dir)) |result| {
return result;
}
}
// Validate static_lib targets
for (targets_config.static_lib) |spec| {
if (try validateTargetSpec(allocator, spec, .static_lib, files_dir)) |result| {
return result;
}
}
// Validate shared_lib targets
for (targets_config.shared_lib) |spec| {
if (try validateTargetSpec(allocator, spec, .shared_lib, files_dir)) |result| {
return result;
}
}
return .{ .valid = {} };
}
fn validateTargetSpec(
allocator: Allocator,
spec: TargetLinkSpec,
link_type: LinkType,
files_dir: std.fs.Dir,
) !?ValidationResult {
// Get target subdirectory name
const target_subdir = @tagName(spec.target);
// Open target subdirectory
var target_dir = files_dir.openDir(target_subdir, .{}) catch {
// Target directory doesn't exist - this might be okay if there are no file items
var has_files = false;
for (spec.items) |item| {
switch (item) {
.file_path => {
has_files = true;
break;
},
.app, .win_gui => {},
}
}
if (has_files) {
const expected_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ "targets", target_subdir });
defer allocator.free(expected_path);
return .{ .missing_target_file = .{
.target = spec.target,
.link_type = link_type,
.file_path = target_subdir,
.expected_full_path = expected_path,
} };
}
return null;
};
defer target_dir.close();
// Check each file item exists
for (spec.items) |item| {
switch (item) {
.file_path => |path| {
// Check if file exists
target_dir.access(path, .{}) catch {
const expected_path = try std.fmt.allocPrint(allocator, "{s}/{s}/{s}", .{ "targets", target_subdir, path });
return .{ .missing_target_file = .{
.target = spec.target,
.link_type = link_type,
.file_path = path,
.expected_full_path = expected_path,
} };
};
},
.app, .win_gui => {
// Special identifiers don't need file validation
},
}
}
return null;
}
/// Create an error report for a validation failure
pub fn createValidationReport(
allocator: Allocator,
result: ValidationResult,
) !Report {
switch (result) {
.valid => unreachable, // Should not create report for valid result
.missing_targets_section => |info| {
var report = Report.init(allocator, "MISSING TARGETS SECTION", .runtime_error);
try report.document.addText("Platform headers must include a `targets` section that specifies");
try report.document.addLineBreak();
try report.document.addText("which targets this platform supports and what files to link.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("In ");
try report.document.addAnnotated(info.platform_path, .emphasized);
try report.document.addText(", add a targets section like:");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addCodeBlock(
\\ targets: {
\\ files: "targets/",
\\ exe: {
\\ x64linux: ["host.o", app],
\\ arm64linux: ["host.o", app],
\\ x64mac: ["host.o", app],
\\ arm64mac: ["host.o", app],
\\ }
\\ }
);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("The targets section declares:");
try report.document.addLineBreak();
try report.document.addText(" - `files`: Directory containing target-specific files");
try report.document.addLineBreak();
try report.document.addText(" - `exe`: Targets that build executables");
try report.document.addLineBreak();
try report.document.addText(" - Each target lists files to link in order, with `app` for the Roc application");
try report.document.addLineBreak();
return report;
},
.missing_files_directory => |info| {
var report = Report.init(allocator, "MISSING FILES DIRECTORY", .runtime_error);
try report.document.addText("The targets section specifies files directory ");
try report.document.addAnnotated(info.files_dir, .emphasized);
try report.document.addLineBreak();
try report.document.addText("but this directory doesn't exist.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Create the directory structure:");
try report.document.addLineBreak();
try report.document.addCodeBlock(
\\ targets/
\\ x64linux/
\\ host.o
\\ arm64linux/
\\ host.o
\\ ...
);
try report.document.addLineBreak();
return report;
},
.missing_target_file => |info| {
var report = Report.init(allocator, "MISSING TARGET FILE", .runtime_error);
try report.document.addText("The targets section declares file ");
try report.document.addAnnotated(info.file_path, .emphasized);
try report.document.addLineBreak();
try report.document.addText("for target ");
try report.document.addAnnotated(@tagName(info.target), .emphasized);
try report.document.addText(" but this file doesn't exist.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Expected file at: ");
try report.document.addAnnotated(info.expected_full_path, .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Either add the missing file or remove it from the targets section.");
try report.document.addLineBreak();
return report;
},
.extra_file => |info| {
var report = Report.init(allocator, "EXTRA FILE IN TARGETS", .warning);
try report.document.addText("Found file ");
try report.document.addAnnotated(info.file_path, .emphasized);
try report.document.addLineBreak();
try report.document.addText("in target directory for ");
try report.document.addAnnotated(@tagName(info.target), .emphasized);
try report.document.addLineBreak();
try report.document.addText("but this file isn't declared in the targets section.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("This file will not be included in the bundle.");
try report.document.addLineBreak();
try report.document.addText("Either add it to the targets section or delete it.");
try report.document.addLineBreak();
return report;
},
.empty_targets => |info| {
var report = Report.init(allocator, "EMPTY TARGETS SECTION", .runtime_error);
try report.document.addText("The targets section in ");
try report.document.addAnnotated(info.platform_path, .emphasized);
try report.document.addLineBreak();
try report.document.addText("doesn't declare any targets.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Add at least one target to the exe, static_lib, or shared_lib section.");
try report.document.addLineBreak();
return report;
},
.unsupported_target => |info| {
var report = Report.init(allocator, "UNSUPPORTED TARGET", .runtime_error);
try report.document.addText("The platform at ");
try report.document.addAnnotated(info.platform_path, .emphasized);
try report.document.addLineBreak();
try report.document.addText("does not support the ");
try report.document.addAnnotated(@tagName(info.requested_target), .emphasized);
try report.document.addText(" target for ");
try report.document.addAnnotated(@tagName(info.link_type), .emphasized);
try report.document.addText(" builds.");
try report.document.addLineBreak();
try report.document.addLineBreak();
if (info.supported_targets.len > 0) {
try report.document.addText("Supported targets for ");
try report.document.addAnnotated(@tagName(info.link_type), .emphasized);
try report.document.addText(":");
try report.document.addLineBreak();
for (info.supported_targets) |spec| {
try report.document.addText(" - ");
try report.document.addAnnotated(@tagName(spec.target), .emphasized);
try report.document.addLineBreak();
}
try report.document.addLineBreak();
} else {
try report.document.addText("This platform has no targets configured for ");
try report.document.addAnnotated(@tagName(info.link_type), .emphasized);
try report.document.addText(" builds.");
try report.document.addLineBreak();
try report.document.addLineBreak();
}
try report.document.addText("To add support, update the targets section in the platform header.");
try report.document.addLineBreak();
return report;
},
.missing_cross_compile_host => |info| {
var report = Report.init(allocator, "MISSING HOST LIBRARY FOR CROSS-COMPILATION", .runtime_error);
try report.document.addText("Cannot cross-compile for ");
try report.document.addAnnotated(@tagName(info.target), .emphasized);
try report.document.addText(": the platform doesn't provide");
try report.document.addLineBreak();
try report.document.addText("a pre-built host library for this target.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Expected host library at:");
try report.document.addLineBreak();
try report.document.addText(" ");
try report.document.addAnnotated(info.expected_path, .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Platform authors: build your host for this target and place it at:");
try report.document.addLineBreak();
try report.document.addText(" <platform>/");
// Trim trailing slash from files_dir for cleaner display
const trimmed_files_dir = std.mem.trimRight(u8, info.files_dir, "/");
try report.document.addAnnotated(trimmed_files_dir, .emphasized);
try report.document.addText("/");
try report.document.addAnnotated(@tagName(info.target), .emphasized);
try report.document.addText("/libhost.a");
try report.document.addLineBreak();
return report;
},
.unsupported_glibc_cross => |info| {
var report = Report.init(allocator, "GLIBC CROSS-COMPILATION NOT SUPPORTED", .runtime_error);
try report.document.addText("Cross-compilation to glibc targets (");
try report.document.addAnnotated(@tagName(info.target), .emphasized);
try report.document.addText(") is not supported on ");
try report.document.addAnnotated(info.host_os, .emphasized);
try report.document.addText(".");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("glibc targets require dynamic linking with libc symbols that");
try report.document.addLineBreak();
try report.document.addText("are only available on Linux.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Use a statically-linked musl target instead:");
try report.document.addLineBreak();
try report.document.addText(" ");
try report.document.addAnnotated("x64musl", .emphasized);
try report.document.addText(" or ");
try report.document.addAnnotated("arm64musl", .emphasized);
try report.document.addLineBreak();
return report;
},
.no_platform_found => |info| {
var report = Report.init(allocator, "NO PLATFORM FOUND", .runtime_error);
try report.document.addText("The file ");
try report.document.addAnnotated(info.app_path, .emphasized);
try report.document.addText(" doesn't have a platform.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Every Roc application needs a platform. Add a platform declaration:");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addCodeBlock(
\\app [main!] { pf: platform "../path/to/platform/main.roc" }
);
try report.document.addLineBreak();
return report;
},
.invalid_target => |info| {
var report = Report.init(allocator, "INVALID TARGET", .runtime_error);
try report.document.addText("The target ");
try report.document.addAnnotated(info.target_str, .emphasized);
try report.document.addText(" is not a valid build target.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Valid targets are:");
try report.document.addLineBreak();
try report.document.addText(" x64musl, arm64musl - Linux (static, portable)");
try report.document.addLineBreak();
try report.document.addText(" x64glibc, arm64glibc - Linux (dynamic, faster)");
try report.document.addLineBreak();
try report.document.addText(" x64mac, arm64mac - macOS");
try report.document.addLineBreak();
try report.document.addText(" x64win, arm64win - Windows");
try report.document.addLineBreak();
return report;
},
.linker_failed => |info| {
var report = Report.init(allocator, "LINKER FAILED", .runtime_error);
try report.document.addText("Failed to create executable: ");
try report.document.addAnnotated(info.reason, .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("This may indicate:");
try report.document.addLineBreak();
try report.document.addText(" - Missing platform host library (libhost.a)");
try report.document.addLineBreak();
try report.document.addText(" - Incompatible object files for the target");
try report.document.addLineBreak();
try report.document.addText(" - Missing system libraries");
try report.document.addLineBreak();
return report;
},
.linker_not_available => {
var report = Report.init(allocator, "LINKER NOT AVAILABLE", .runtime_error);
try report.document.addText("The LLD linker is not available.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("This typically occurs when running a test executable");
try report.document.addLineBreak();
try report.document.addText("that was built without LLVM support.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("To fix this, rebuild with LLVM enabled.");
try report.document.addLineBreak();
return report;
},
.process_crashed => |info| {
var report = Report.init(allocator, "PROCESS CRASHED", .runtime_error);
if (info.is_access_violation) {
try report.document.addText("The program crashed with an access violation (segmentation fault).");
} else {
var buf: [32]u8 = undefined;
const code_str = std.fmt.bufPrint(&buf, "0x{X}", .{info.exit_code}) catch "unknown";
try report.document.addText("The program crashed with exception code: ");
try report.document.addAnnotated(code_str, .emphasized);
}
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("This is likely a bug in the Roc compiler.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Please report this issue at:");
try report.document.addLineBreak();
try report.document.addText(" ");
try report.document.addAnnotated("https://github.com/roc-lang/roc/issues", .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Include a small reproduction of the code that causes this crash.");
try report.document.addLineBreak();
return report;
},
.process_signaled => |info| {
var report = Report.init(allocator, "PROCESS KILLED BY SIGNAL", .runtime_error);
const signal_name: []const u8 = switch (info.signal) {
11 => "SIGSEGV (Segmentation fault)",
6 => "SIGABRT (Aborted)",
9 => "SIGKILL (Killed)",
8 => "SIGFPE (Floating point exception)",
4 => "SIGILL (Illegal instruction)",
7 => "SIGBUS (Bus error)",
else => "Unknown signal",
};
try report.document.addText("The program was killed by signal ");
var buf: [8]u8 = undefined;
const sig_str = std.fmt.bufPrint(&buf, "{d}", .{info.signal}) catch "?";
try report.document.addAnnotated(sig_str, .emphasized);
try report.document.addText(": ");
try report.document.addAnnotated(signal_name, .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("This is likely a bug in the Roc compiler.");
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Please report this issue at:");
try report.document.addLineBreak();
try report.document.addText(" ");
try report.document.addAnnotated("https://github.com/roc-lang/roc/issues", .emphasized);
try report.document.addLineBreak();
try report.document.addLineBreak();
try report.document.addText("Include a small reproduction of the code that causes this crash.");
try report.document.addLineBreak();
return report;
},
}
}
test "createValidationReport generates correct report for missing_targets_section" {
const allocator = std.testing.allocator;
var report = try createValidationReport(allocator, .{
.missing_targets_section = .{ .platform_path = "test/platform/main.roc" },
});
defer report.deinit();
try std.testing.expectEqualStrings("MISSING TARGETS SECTION", report.title);
try std.testing.expectEqual(Severity.runtime_error, report.severity);
}
test "createValidationReport generates correct report for missing_files_directory" {
const allocator = std.testing.allocator;
var report = try createValidationReport(allocator, .{
.missing_files_directory = .{
.platform_path = "test/platform/main.roc",
.files_dir = "targets/",
},
});
defer report.deinit();
try std.testing.expectEqualStrings("MISSING FILES DIRECTORY", report.title);
try std.testing.expectEqual(Severity.runtime_error, report.severity);
}
test "createValidationReport generates correct report for missing_target_file" {
const allocator = std.testing.allocator;
var report = try createValidationReport(allocator, .{
.missing_target_file = .{
.target = .x64linux,
.link_type = .exe,
.file_path = "host.o",
.expected_full_path = "targets/x64linux/host.o",
},
});
defer report.deinit();
try std.testing.expectEqualStrings("MISSING TARGET FILE", report.title);
try std.testing.expectEqual(Severity.runtime_error, report.severity);
}
test "createValidationReport generates correct report for extra_file" {
const allocator = std.testing.allocator;
var report = try createValidationReport(allocator, .{
.extra_file = .{
.target = .x64linux,
.file_path = "unused.o",
},
});
defer report.deinit();
try std.testing.expectEqualStrings("EXTRA FILE IN TARGETS", report.title);
try std.testing.expectEqual(Severity.warning, report.severity);
}
test "createValidationReport generates correct report for empty_targets" {
const allocator = std.testing.allocator;
var report = try createValidationReport(allocator, .{
.empty_targets = .{ .platform_path = "test/platform/main.roc" },
});
defer report.deinit();
try std.testing.expectEqualStrings("EMPTY TARGETS SECTION", report.title);
try std.testing.expectEqual(Severity.runtime_error, report.severity);
}
test "validateTargetFilesExist returns valid when no files_dir specified" {
const allocator = std.testing.allocator;
const config = TargetsConfig{
.files_dir = null,
.exe = &.{},
.static_lib = &.{},
.shared_lib = &.{},
};
const result = try validateTargetFilesExist(allocator, config, std.fs.cwd());
try std.testing.expectEqual(ValidationResult{ .valid = {} }, result);
}
test "validatePlatformHasTargets detects missing targets section" {
const allocator = std.testing.allocator;
const base = @import("base");
// Platform without targets section
const source =
\\platform ""
\\ requires {} { main : {} }
\\ exposes []
\\ packages {}
\\ provides { main_for_host: "main" }
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
const result = validatePlatformHasTargets(ast, "test/platform/main.roc");
switch (result) {
.missing_targets_section => |info| {
try std.testing.expectEqualStrings("test/platform/main.roc", info.platform_path);
},
else => {
std.debug.print("Expected missing_targets_section but got {}\n", .{result});
return error.UnexpectedResult;
},
}
}
test "validatePlatformHasTargets accepts platform with targets section" {
const allocator = std.testing.allocator;
const base = @import("base");
// Platform with targets section
const source =
\\platform ""
\\ requires {} { main : {} }
\\ exposes []
\\ packages {}
\\ provides { main_for_host: "main" }
\\ targets: {
\\ exe: {
\\ x64linux: [app],
\\ arm64linux: [app],
\\ }
\\ }
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
const result = validatePlatformHasTargets(ast, "test/platform/main.roc");
try std.testing.expectEqual(ValidationResult{ .valid = {} }, result);
}
test "validatePlatformHasTargets skips non-platform headers" {
const allocator = std.testing.allocator;
const base = @import("base");
// App module (not a platform)
const source =
\\app [main] { pf: platform "some-platform" }
\\
\\main = {}
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
const result = validatePlatformHasTargets(ast, "app/main.roc");
// Non-platform headers should return valid (they don't need targets)
try std.testing.expectEqual(ValidationResult{ .valid = {} }, result);
}
test "validatePlatformHasTargets accepts platform with multiple target types" {
const allocator = std.testing.allocator;
const base = @import("base");
// Platform with exe and static_lib targets
const source =
\\platform ""
\\ requires {} { main : {} }
\\ exposes []
\\ packages {}
\\ provides { main_for_host: "main" }
\\ targets: {
\\ files: "targets/",
\\ exe: {
\\ x64linux: ["host.o", app],
\\ arm64mac: [app],
\\ },
\\ static_lib: {
\\ x64mac: ["libhost.a"],
\\ }
\\ }
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
const result = validatePlatformHasTargets(ast, "test/platform/main.roc");
try std.testing.expectEqual(ValidationResult{ .valid = {} }, result);
}
test "validatePlatformHasTargets accepts platform with win_gui target" {
const allocator = std.testing.allocator;
const base = @import("base");
// Platform with win_gui special identifier
const source =
\\platform ""
\\ requires {} { main : {} }
\\ exposes []
\\ packages {}
\\ provides { main_for_host: "main" }
\\ targets: {
\\ exe: {
\\ x64win: [win_gui],
\\ }
\\ }
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
const result = validatePlatformHasTargets(ast, "test/platform/main.roc");
try std.testing.expectEqual(ValidationResult{ .valid = {} }, result);
}
test "TargetsConfig.fromAST extracts targets configuration" {
const allocator = std.testing.allocator;
const base = @import("base");
// Platform with various targets
const source =
\\platform ""
\\ requires {} { main : {} }
\\ exposes []
\\ packages {}
\\ provides { main_for_host: "main" }
\\ targets: {
\\ files: "targets/",
\\ exe: {
\\ x64linux: ["host.o", app],
\\ arm64linux: [app],
\\ }
\\ }
\\
;
const source_copy = try allocator.dupe(u8, source);
defer allocator.free(source_copy);
var env = try base.CommonEnv.init(allocator, source_copy);
defer env.deinit(allocator);
var ast = try parse.parse(&env, allocator);
defer ast.deinit(allocator);
// Try to extract targets config from the AST
const maybe_config = try TargetsConfig.fromAST(allocator, ast);
try std.testing.expect(maybe_config != null);
const config = maybe_config.?;
defer {
for (config.exe) |spec| {
allocator.free(spec.items);
}
allocator.free(config.exe);
}
// Check files_dir
try std.testing.expect(config.files_dir != null);
try std.testing.expectEqualStrings("targets/", config.files_dir.?);
// Check exe targets
try std.testing.expectEqual(@as(usize, 2), config.exe.len);
}

View file

@ -9,10 +9,19 @@
//! IMPORTANT: Do NOT use --no-cache when running roc. The interpreted host doesn't change between
//! tests (we're testing app behaviour, not the platform), so using --no-cache would force unnecessary
//! re-linking on every test, making the test run much slower than is necessary.
//!
//! Test specs for IO-based tests are defined in fx_test_specs.zig and shared with
//! the cross-compilation test runner.
const std = @import("std");
const builtin = @import("builtin");
const testing = std.testing;
const fx_test_specs = @import("fx_test_specs.zig");
// Wire up tests from fx_test_specs module
comptime {
std.testing.refAllDecls(fx_test_specs);
}
const roc_binary_path = if (builtin.os.tag == .windows) ".\\zig-out\\bin\\roc.exe" else "./zig-out/bin/roc";
@ -132,74 +141,47 @@ fn checkTestSuccess(result: std.process.Child.RunResult) !void {
}
}
test "fx platform effectful functions" {
// ============================================================================
// IO Spec Tests (using shared specs from fx_test_specs.zig)
// ============================================================================
// These tests use the --test mode with IO specifications to verify that
// roc applications produce the expected stdout/stderr output for given stdin.
// The specs are defined in fx_test_specs.zig and shared with the cross-compile
// test runner.
test "fx platform IO spec tests" {
const allocator = testing.allocator;
const result = try runRocTest(
allocator,
"test/fx/app.roc",
"1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!",
);
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
var passed: usize = 0;
var failed: usize = 0;
try checkTestSuccess(result);
}
for (fx_test_specs.io_spec_tests) |spec| {
const result = runRocTest(allocator, spec.roc_file, spec.io_spec) catch |err| {
std.debug.print("\n[FAIL] {s}: failed to run: {}\n", .{ spec.roc_file, err });
failed += 1;
continue;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
test "fx platform with dotdot starting path" {
// Tests that relative paths starting with .. are handled correctly
const allocator = testing.allocator;
checkTestSuccess(result) catch |err| {
std.debug.print("\n[FAIL] {s}: {}\n", .{ spec.roc_file, err });
if (spec.description.len > 0) {
std.debug.print(" Description: {s}\n", .{spec.description});
}
failed += 1;
continue;
};
const result = try runRocTest(
allocator,
"test/fx/subdir/app.roc",
"1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!",
);
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
passed += 1;
}
try checkTestSuccess(result);
}
test "fx platform stdin to stdout" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/stdin_to_stdout.roc", "0<test input|1>test input");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform stdin echo" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/stdin_echo.roc", "0<hello world|1>hello world");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform stdin test with output" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/stdin_test.roc", "1>Before stdin|0<user input|1>After stdin");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform stdin simple" {
// stdin_simple reads from stdin and prints to stderr
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/stdin_simple.roc", "0<simple test|2>simple test");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
// Print summary
const total = passed + failed;
if (failed > 0) {
std.debug.print("\n{}/{} IO spec tests passed ({} failed)\n", .{ passed, total, failed });
return error.SomeTestsFailed;
}
}
test "fx platform expect with main" {
@ -395,17 +377,6 @@ test "fx platform checked directly finds sibling modules" {
}
}
test "fx platform opaque type with method" {
// Regression test: An opaque type with a method attached causes a segfault
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/opaque_with_method.roc", "1>My favourite color is Red");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform string interpolation type mismatch" {
const allocator = testing.allocator;
@ -490,50 +461,6 @@ test "fx platform run from different cwd" {
try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello from stdout!") != null);
}
test "question mark operator" {
// Tests the `?` operator for error propagation.
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/question_mark_operator.roc", "1>hello");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "numeric fold" {
// Tests List.fold with numeric accumulators.
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/numeric_fold.roc", "1>Sum: 15.0");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "List.for_each! with effectful callback" {
// Tests List.for_each! which iterates over a list and calls an effectful callback
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/list_for_each.roc", "1>Item: apple|1>Item: banana|1>Item: cherry");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "string literal pattern matching" {
// Tests pattern matching on string literals in match expressions.
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/string_pattern_matching.roc", "1>Hello Alice!|1>Hey Bob!");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "drop_prefix segfault regression" {
// Regression test: Calling drop_prefix on a string literal and assigning
// the result to an unused variable causes a segfault.
@ -608,66 +535,6 @@ test "big string equality regression" {
}
}
test "fx platform hello world" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/hello_world.roc", "1>Hello, world!");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform function wrapper stdout" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/function_wrapper_stdout.roc", "1>Hello from stdout!");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform function wrapper multiline" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/function_wrapper_multiline.roc", "1>Hello from stdout!|1>Line 2");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform multiline stdout" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/multiline_stdout.roc", "1>Hello|1>World");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform empty_list_get" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/empty_list_get.roc", "1>is err");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform str_interp_valid" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/str_interp_valid.roc", "1>Hello, World!");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform expect with toplevel numeric" {
const allocator = testing.allocator;
@ -833,46 +700,6 @@ test "fx platform expect with toplevel numeric" {
// try testing.expect(std.mem.indexOf(u8, run_result.stdout, "done") != null);
// }
test "fx platform numeric_lookup_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/numeric_lookup_test.roc", "1>done");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform string_lookup_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/string_lookup_test.roc", "1>hello");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform test_direct_string" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/test_direct_string.roc", "1>Hello");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform test_one_call" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/test_one_call.roc", "1>Hello");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform test_type_mismatch" {
const allocator = testing.allocator;
@ -907,84 +734,6 @@ test "fx platform test_type_mismatch" {
}
}
test "fx platform test_with_wrapper" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/test_with_wrapper.roc", "1>Hello");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_compare_test" {
const allocator = testing.allocator;
const result = try runRocTest(
allocator,
"test/fx/inspect_compare_test.roc",
"1>With to_inspect: Custom::Red|1>Without to_inspect: ColorWithoutInspect.Red|1>Primitive: 42",
);
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_custom_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/inspect_custom_test.roc", "1>Color::Red|1>Expected: Color::Red");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_nested_test" {
const allocator = testing.allocator;
const result = try runRocTest(
allocator,
"test/fx/inspect_nested_test.roc",
"1>{ color: Color::Red, count: 42, name: \"test\" }|1>Expected: { color: Color::Red, count: 42, name: \"test\" }",
);
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_no_method_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/inspect_no_method_test.roc", "1>Result: Color.Red|1>(Default rendering)");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_record_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/inspect_record_test.roc", "1>{ count: 42, name: \"test\" }");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform inspect_wrong_sig_test" {
const allocator = testing.allocator;
const result = try runRocTest(allocator, "test/fx/inspect_wrong_sig_test.roc", "1>Result: 1");
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
try checkTestSuccess(result);
}
test "fx platform issue8433" {
const allocator = testing.allocator;
@ -1018,7 +767,7 @@ test "fx platform issue8433" {
}
}
test "run aborts on errors by default" {
test "run aborts on type errors by default" {
// Tests that roc run aborts when there are type errors (without --allow-errors)
const allocator = testing.allocator;
@ -1039,7 +788,28 @@ test "run aborts on errors by default" {
try testing.expect(std.mem.indexOf(u8, run_result.stderr, "UNDEFINED VARIABLE") != null);
}
test "run with --allow-errors attempts execution despite errors" {
test "run aborts on parse errors by default" {
// Tests that roc run aborts when there are parse errors (without --allow-errors)
const allocator = testing.allocator;
const run_result = try std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{
"./zig-out/bin/roc",
"test/fx/parse_error.roc",
},
});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
// Should fail with type errors
try checkFailure(run_result);
// Should show the errors
try testing.expect(std.mem.indexOf(u8, run_result.stderr, "PARSE ERROR") != null);
}
test "run with --allow-errors attempts execution despite type errors" {
// Tests that roc run --allow-errors attempts to execute even with type errors
const allocator = testing.allocator;
@ -1138,6 +908,21 @@ test "fx platform sublist method on inferred type" {
try checkSuccess(run_result);
}
test "fx platform repeating pattern segfault" {
// Regression test: This test exposed a compiler bug where variables used multiple times
// in consuming positions didn't get proper refcount handling. Specifically,
// in `repeat_helper(acc.concat(list), list, n-1)`, the variable `list` is
// passed to both concat (consuming) and to the recursive call (consuming).
// The compiler must insert a copy/incref for the second use to avoid use-after-free.
const allocator = testing.allocator;
const run_result = try runRoc(allocator, "test/fx/repeating_pattern_segfault.roc", .{});
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
try checkSuccess(run_result);
}
test "fx platform runtime stack overflow" {
// Tests that stack overflow in a running Roc program is caught and reported
// with a helpful error message instead of crashing with a raw signal.
@ -1151,22 +936,26 @@ test "fx platform runtime stack overflow" {
defer allocator.free(run_result.stdout);
defer allocator.free(run_result.stderr);
// After stack overflow handling is implemented, we expect:
// 1. The process exits with code 134 (indicating stack overflow was caught)
// 2. Stderr contains a helpful message about stack overflow
// Stack overflow can be caught by either:
// 1. The Roc interpreter (exit code 1, "overflowed its stack memory" message) - most common
// 2. The SIGABRT signal handler (exit code 134) - if native stack overflow handling is used
switch (run_result.term) {
.Exited => |code| {
if (code == 134) {
// Stack overflow was caught and handled properly
// Stack overflow was caught by native signal handler
// Verify the helpful error message was printed
try testing.expect(std.mem.indexOf(u8, run_result.stderr, "overflowed its stack memory") != null);
} else if (code == 1) {
// Stack overflow was caught by the interpreter - this is the expected case
// The interpreter detects excessive work stack depth and reports the error
try testing.expect(std.mem.indexOf(u8, run_result.stderr, "overflowed its stack memory") != null);
} else if (code == 139) {
// Exit code 139 = 128 + 11 (SIGSEGV) - stack overflow was NOT handled
// The Roc program crashed with a segfault that wasn't caught
std.debug.print("\n", .{});
std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{});
std.debug.print("Process crashed with SIGSEGV (exit code 139).\n", .{});
std.debug.print("Expected: exit code 134 with stack overflow message\n", .{});
std.debug.print("Expected: exit code 1 or 134 with stack overflow message\n", .{});
return error.StackOverflowNotHandled;
} else {
std.debug.print("Unexpected exit code: {}\n", .{code});
@ -1179,7 +968,7 @@ test "fx platform runtime stack overflow" {
std.debug.print("\n", .{});
std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{});
std.debug.print("Process was killed by signal: {}\n", .{sig});
std.debug.print("Expected: exit code 134 with stack overflow message\n", .{});
std.debug.print("Expected: exit code 1 or 134 with stack overflow message\n", .{});
return error.StackOverflowNotHandled;
},
else => {

View file

@ -0,0 +1,229 @@
//! Shared test specifications for fx platform tests.
//!
//! This module defines IO specs for all fx platform tests that can be run
//! using the --test mode. These specs are shared between:
//! - Native Zig tests (fx_platform_test.zig)
//! - Unified test platform runner (test_runner.zig)
//!
//! IO Spec Format: "0<stdin|1>stdout|2>stderr" (pipe-separated)
//! - 0<text: stdin input
//! - 1>text: expected stdout output
//! - 2>text: expected stderr output
/// Test specification with a roc file path and expected IO spec
pub const TestSpec = struct {
/// Path to the roc file (relative to project root)
roc_file: []const u8,
/// IO spec for --test mode
io_spec: []const u8,
/// Optional description of what the test verifies
description: []const u8 = "",
};
/// All fx platform tests that can be run with --test mode IO specs.
/// These tests work with cross-compilation because they only test
/// the compiled binary's IO behavior, not build-time features.
pub const io_spec_tests = [_]TestSpec{
// Basic effectful function tests
.{
.roc_file = "test/fx/app.roc",
.io_spec = "1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!",
.description = "Basic effectful functions: Stdout.line!, Stderr.line!",
},
.{
.roc_file = "test/fx/subdir/app.roc",
.io_spec = "1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!",
.description = "Relative paths starting with ..",
},
// Stdin tests
.{
.roc_file = "test/fx/stdin_to_stdout.roc",
.io_spec = "0<test input|1>test input",
.description = "Stdin to stdout passthrough",
},
.{
.roc_file = "test/fx/stdin_echo.roc",
.io_spec = "0<hello world|1>hello world",
.description = "Stdin echo",
},
.{
.roc_file = "test/fx/stdin_test.roc",
.io_spec = "1>Before stdin|0<user input|1>After stdin",
.description = "Stdin with output before and after",
},
.{
.roc_file = "test/fx/stdin_simple.roc",
.io_spec = "0<simple test|2>simple test",
.description = "Stdin to stderr",
},
// Match expression tests
.{
.roc_file = "test/fx/match_str_return.roc",
.io_spec = "1>0",
.description = "Match expressions with string returns",
},
.{
.roc_file = "test/fx/match_with_wildcard.roc",
.io_spec = "1>0",
.description = "Wildcard patterns in match expressions",
},
// Opaque type tests
.{
.roc_file = "test/fx/opaque_with_method.roc",
.io_spec = "1>My favourite color is Red",
.description = "Opaque type with attached method",
},
// Language feature tests
.{
.roc_file = "test/fx/question_mark_operator.roc",
.io_spec = "1>hello",
.description = "Question mark operator for error propagation",
},
.{
.roc_file = "test/fx/numeric_fold.roc",
.io_spec = "1>Sum: 15.0",
.description = "List.fold with numeric accumulators",
},
.{
.roc_file = "test/fx/list_for_each.roc",
.io_spec = "1>Item: apple|1>Item: banana|1>Item: cherry",
.description = "List.for_each! with effectful callback",
},
.{
.roc_file = "test/fx/string_pattern_matching.roc",
.io_spec = "1>Hello Alice!|1>Hey Bob!",
.description = "Pattern matching on string literals",
},
// Basic output tests
.{
.roc_file = "test/fx/hello_world.roc",
.io_spec = "1>Hello, world!",
.description = "Hello world",
},
.{
.roc_file = "test/fx/function_wrapper_stdout.roc",
.io_spec = "1>Hello from stdout!",
.description = "Function wrapper stdout",
},
.{
.roc_file = "test/fx/function_wrapper_multiline.roc",
.io_spec = "1>Hello from stdout!|1>Line 2",
.description = "Function wrapper multiline output",
},
.{
.roc_file = "test/fx/multiline_stdout.roc",
.io_spec = "1>Hello|1>World",
.description = "Multiple stdout lines",
},
// List and string tests
.{
.roc_file = "test/fx/empty_list_get.roc",
.io_spec = "1>is err",
.description = "Empty list get returns error",
},
.{
.roc_file = "test/fx/str_interp_valid.roc",
.io_spec = "1>Hello, World!",
.description = "String interpolation",
},
// Lookup tests
.{
.roc_file = "test/fx/numeric_lookup_test.roc",
.io_spec = "1>done",
.description = "Numeric lookup",
},
.{
.roc_file = "test/fx/string_lookup_test.roc",
.io_spec = "1>hello",
.description = "String lookup",
},
.{
.roc_file = "test/fx/test_direct_string.roc",
.io_spec = "1>Hello",
.description = "Direct string output",
},
.{
.roc_file = "test/fx/test_one_call.roc",
.io_spec = "1>Hello",
.description = "Single function call",
},
.{
.roc_file = "test/fx/test_with_wrapper.roc",
.io_spec = "1>Hello",
.description = "Function with wrapper",
},
// Inspect tests
.{
.roc_file = "test/fx/inspect_compare_test.roc",
.io_spec = "1>With to_inspect: Custom::Red|1>Without to_inspect: ColorWithoutInspect.Red|1>Primitive: 42",
.description = "Inspect comparison with and without to_inspect",
},
.{
.roc_file = "test/fx/inspect_custom_test.roc",
.io_spec = "1>Color::Red|1>Expected: Color::Red",
.description = "Custom inspect implementation",
},
.{
.roc_file = "test/fx/inspect_nested_test.roc",
.io_spec = "1>{ color: Color::Red, count: 42, name: \"test\" }|1>Expected: { color: Color::Red, count: 42, name: \"test\" }",
.description = "Nested struct inspection",
},
.{
.roc_file = "test/fx/inspect_no_method_test.roc",
.io_spec = "1>Result: Color.Red|1>(Default rendering)",
.description = "Inspect without to_inspect method",
},
.{
.roc_file = "test/fx/inspect_record_test.roc",
.io_spec = "1>{ count: 42, name: \"test\" }",
.description = "Record inspection",
},
.{
.roc_file = "test/fx/inspect_wrong_sig_test.roc",
.io_spec = "1>Result: 1",
.description = "Inspect with wrong signature",
},
};
/// Get the total number of IO spec tests
pub fn getTestCount() usize {
return io_spec_tests.len;
}
/// Find a test spec by roc file path
pub fn findByPath(roc_file: []const u8) ?TestSpec {
for (io_spec_tests) |spec| {
if (std.mem.eql(u8, spec.roc_file, roc_file)) {
return spec;
}
}
return null;
}
const std = @import("std");
test "all test specs have valid paths" {
for (io_spec_tests) |spec| {
// Just verify the paths are non-empty and start with test/fx
try std.testing.expect(spec.roc_file.len > 0);
try std.testing.expect(std.mem.startsWith(u8, spec.roc_file, "test/fx"));
try std.testing.expect(spec.io_spec.len > 0);
}
}
test "find by path works" {
const found = findByPath("test/fx/hello_world.roc");
try std.testing.expect(found != null);
try std.testing.expectEqualStrings("1>Hello, world!", found.?.io_spec);
const not_found = findByPath("nonexistent.roc");
try std.testing.expect(not_found == null);
}

View file

@ -0,0 +1,202 @@
//! Platform configurations for test platforms.
//!
//! This module defines configurations for all test platforms, including:
//! - Available targets
//! - Test app discovery
//! - Platform capabilities (native exec, IO specs, valgrind)
const std = @import("std");
const fx_test_specs = @import("fx_test_specs.zig");
/// Target information
pub const TargetInfo = struct {
name: []const u8,
requires_linux: bool,
};
/// How test apps are discovered for a platform
pub const TestApps = union(enum) {
/// Single app file (like int/str)
single: []const u8,
/// List of test specs with IO expectations (like fx)
spec_list: []const fx_test_specs.TestSpec,
};
/// Platform configuration
pub const PlatformConfig = struct {
name: []const u8,
base_dir: []const u8,
targets: []const TargetInfo,
test_apps: TestApps,
supports_native_exec: bool,
supports_io_specs: bool,
valgrind_safe: bool,
};
/// All available cross-compilation targets (superset)
pub const all_cross_targets = [_][]const u8{
"x64musl",
"arm64musl",
"x64glibc",
"arm64glibc",
};
/// Standard targets for platforms with glibc support
const targets_with_glibc = [_]TargetInfo{
.{ .name = "x64musl", .requires_linux = false },
.{ .name = "arm64musl", .requires_linux = false },
.{ .name = "x64glibc", .requires_linux = true },
.{ .name = "arm64glibc", .requires_linux = true },
};
/// Standard targets for platforms without glibc support
const targets_musl_only = [_]TargetInfo{
.{ .name = "x64musl", .requires_linux = false },
.{ .name = "arm64musl", .requires_linux = false },
};
/// Targets for fx platforms (musl + Windows)
const targets_fx = [_]TargetInfo{
.{ .name = "x64musl", .requires_linux = false },
.{ .name = "arm64musl", .requires_linux = false },
.{ .name = "x64win", .requires_linux = false },
.{ .name = "arm64win", .requires_linux = false },
};
/// All platform configurations
pub const platforms = [_]PlatformConfig{
// INT PLATFORM - Integer operations
.{
.name = "int",
.base_dir = "test/int",
.targets = &targets_with_glibc,
.test_apps = .{ .single = "app.roc" },
.supports_native_exec = true,
.supports_io_specs = false,
.valgrind_safe = true,
},
// STR PLATFORM - String processing
.{
.name = "str",
.base_dir = "test/str",
.targets = &targets_with_glibc,
.test_apps = .{ .single = "app.roc" },
.supports_native_exec = true,
.supports_io_specs = false,
.valgrind_safe = true,
},
// FX PLATFORM - Effectful (stdout, stderr, stdin)
.{
.name = "fx",
.base_dir = "test/fx",
.targets = &targets_fx,
.test_apps = .{ .spec_list = &fx_test_specs.io_spec_tests },
.supports_native_exec = true,
.supports_io_specs = true,
.valgrind_safe = false, // Has stdin tests
},
// FX-OPEN PLATFORM - Effectful with open union errors
.{
.name = "fx-open",
.base_dir = "test/fx-open",
.targets = &targets_fx,
.test_apps = .{ .single = "app.roc" },
.supports_native_exec = true,
.supports_io_specs = false,
.valgrind_safe = true,
},
};
/// Find a platform configuration by name
pub fn findPlatform(name: []const u8) ?PlatformConfig {
for (platforms) |platform| {
if (std.mem.eql(u8, platform.name, name)) {
return platform;
}
}
return null;
}
/// Find a target in a platform's target list
pub fn findTarget(platform: PlatformConfig, target_name: []const u8) ?TargetInfo {
for (platform.targets) |target| {
if (std.mem.eql(u8, target.name, target_name)) {
return target;
}
}
return null;
}
/// Get list of all platform names
pub fn getPlatformNames() []const []const u8 {
comptime {
var names: [platforms.len][]const u8 = undefined;
for (platforms, 0..) |platform, i| {
names[i] = platform.name;
}
return &names;
}
}
/// Get test app paths for a platform
pub fn getTestApps(platform: PlatformConfig) []const []const u8 {
switch (platform.test_apps) {
.single => |app| {
const result = [_][]const u8{app};
return &result;
},
.spec_list => |specs| {
// Return just the roc_file paths
var paths: [specs.len][]const u8 = undefined;
for (specs, 0..) |spec, i| {
paths[i] = spec.roc_file;
}
return &paths;
},
}
}
test "findPlatform works" {
const int_platform = findPlatform("int");
try std.testing.expect(int_platform != null);
try std.testing.expectEqualStrings("test/int", int_platform.?.base_dir);
const fx_platform = findPlatform("fx");
try std.testing.expect(fx_platform != null);
try std.testing.expect(fx_platform.?.supports_io_specs);
const unknown = findPlatform("nonexistent");
try std.testing.expect(unknown == null);
}
test "findTarget works" {
const int_platform = findPlatform("int").?;
const musl = findTarget(int_platform, "x64musl");
try std.testing.expect(musl != null);
try std.testing.expect(!musl.?.requires_linux);
const glibc = findTarget(int_platform, "x64glibc");
try std.testing.expect(glibc != null);
try std.testing.expect(glibc.?.requires_linux);
const nonexistent = findTarget(int_platform, "x64windows");
try std.testing.expect(nonexistent == null);
}
test "fx platform has io specs" {
const fx_platform = findPlatform("fx").?;
try std.testing.expect(fx_platform.supports_io_specs);
switch (fx_platform.test_apps) {
.spec_list => |specs| {
try std.testing.expect(specs.len > 0);
},
.single => {
try std.testing.expect(false); // fx should have spec_list
},
}
}

View file

@ -396,6 +396,9 @@ test "roc check reports type error - plus operator with incompatible types" {
}
test "roc test/int/app.roc runs successfully" {
// Skip on Windows - test/int platform doesn't have Windows host libraries
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
const testing = std.testing;
const gpa = testing.allocator;
@ -409,6 +412,9 @@ test "roc test/int/app.roc runs successfully" {
}
test "roc test/str/app.roc runs successfully" {
// Skip on Windows - test/str platform doesn't have Windows host libraries
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
const testing = std.testing;
const gpa = testing.allocator;
@ -420,3 +426,155 @@ test "roc test/str/app.roc runs successfully" {
// 1. Command succeeded (zero exit code)
try testing.expect(result.term == .Exited and result.term.Exited == 0);
}
// =============================================================================
// roc build tests
// =============================================================================
test "roc build creates executable from test/int/app.roc" {
// Skip on Windows - test/int platform doesn't have Windows host libraries
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
const testing = std.testing;
const gpa = testing.allocator;
// Create a temp directory for the output
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpathAlloc(gpa, ".");
defer gpa.free(tmp_path);
const output_path = try std.fs.path.join(gpa, &.{ tmp_path, "test_app" });
defer gpa.free(output_path);
const output_arg = try std.fmt.allocPrint(gpa, "--output={s}", .{output_path});
defer gpa.free(output_arg);
const result = try util.runRoc(gpa, &.{ "build", output_arg }, "test/int/app.roc");
defer gpa.free(result.stdout);
defer gpa.free(result.stderr);
// Verify that:
// 1. Command succeeded (zero exit code)
try testing.expect(result.term == .Exited and result.term.Exited == 0);
// 2. Output file was created
const stat = tmp_dir.dir.statFile("test_app") catch |err| {
std.debug.print("Failed to stat output file: {}\nstderr: {s}\n", .{ err, result.stderr });
return err;
};
// 3. Output file is executable (non-zero size)
try testing.expect(stat.size > 0);
}
test "roc build executable runs correctly" {
// Skip on Windows - test/int platform doesn't have Windows host libraries
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
const testing = std.testing;
const gpa = testing.allocator;
// Create a temp directory for the output
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpathAlloc(gpa, ".");
defer gpa.free(tmp_path);
const output_path = try std.fs.path.join(gpa, &.{ tmp_path, "test_app" });
defer gpa.free(output_path);
const output_arg = try std.fmt.allocPrint(gpa, "--output={s}", .{output_path});
defer gpa.free(output_arg);
// Build the app
const build_result = try util.runRoc(gpa, &.{ "build", output_arg }, "test/int/app.roc");
defer gpa.free(build_result.stdout);
defer gpa.free(build_result.stderr);
try testing.expect(build_result.term == .Exited and build_result.term.Exited == 0);
// Run the built executable
const run_result = try std.process.Child.run(.{
.allocator = gpa,
.argv = &.{output_path},
.max_output_bytes = 10 * 1024 * 1024,
});
defer gpa.free(run_result.stdout);
defer gpa.free(run_result.stderr);
// Verify that:
// 1. Executable ran successfully
try testing.expect(run_result.term == .Exited and run_result.term.Exited == 0);
// 2. Output contains expected success message
const has_success = std.mem.indexOf(u8, run_result.stdout, "SUCCESS") != null or
std.mem.indexOf(u8, run_result.stdout, "PASSED") != null;
try testing.expect(has_success);
}
test "roc build fails with file not found error" {
const testing = std.testing;
const gpa = testing.allocator;
const result = try util.runRoc(gpa, &.{"build"}, "nonexistent_file.roc");
defer gpa.free(result.stdout);
defer gpa.free(result.stderr);
// Verify that:
// 1. Command failed (non-zero exit code)
try testing.expect(result.term != .Exited or result.term.Exited != 0);
// 2. Stderr contains file not found error
const has_error = std.mem.indexOf(u8, result.stderr, "FileNotFound") != null or
std.mem.indexOf(u8, result.stderr, "not found") != null or
std.mem.indexOf(u8, result.stderr, "Failed") != null;
try testing.expect(has_error);
}
test "roc build fails with invalid target error" {
const testing = std.testing;
const gpa = testing.allocator;
const result = try util.runRoc(gpa, &.{ "build", "--target=invalid_target_name" }, "test/int/app.roc");
defer gpa.free(result.stdout);
defer gpa.free(result.stderr);
// Verify that:
// 1. Command failed (non-zero exit code)
try testing.expect(result.term != .Exited or result.term.Exited != 0);
// 2. Stderr contains invalid target error
const has_error = std.mem.indexOf(u8, result.stderr, "Invalid target") != null or
std.mem.indexOf(u8, result.stderr, "invalid") != null;
try testing.expect(has_error);
}
test "roc build glibc target gives helpful error on non-Linux" {
const testing = std.testing;
const builtin = @import("builtin");
const gpa = testing.allocator;
// This test only applies on non-Linux platforms
if (builtin.os.tag == .linux) {
return; // Skip on Linux where glibc cross-compilation is supported
}
const result = try util.runRoc(gpa, &.{ "build", "--target=x64glibc" }, "test/int/app.roc");
defer gpa.free(result.stdout);
defer gpa.free(result.stderr);
// Verify that:
// 1. Command failed (non-zero exit code)
try testing.expect(result.term != .Exited or result.term.Exited != 0);
// 2. Stderr contains helpful error message about glibc not being supported
const has_glibc_error = std.mem.indexOf(u8, result.stderr, "glibc") != null;
try testing.expect(has_glibc_error);
// 3. Stderr suggests using musl instead
const suggests_musl = std.mem.indexOf(u8, result.stderr, "musl") != null;
try testing.expect(suggests_musl);
}

View file

@ -0,0 +1,389 @@
//! Shared execution logic for the test platform runner.
//!
//! This module provides common functions for:
//! - Cross-compilation of Roc apps
//! - Native build and execution
//! - Valgrind memory testing
//! - Result formatting and summary printing
const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
/// Result of a test execution
pub const TestResult = enum {
passed,
failed,
skipped,
};
/// Statistics for test run
pub const TestStats = struct {
passed: usize = 0,
failed: usize = 0,
skipped: usize = 0,
pub fn total(self: TestStats) usize {
return self.passed + self.failed + self.skipped;
}
pub fn record(self: *TestStats, result: TestResult) void {
switch (result) {
.passed => self.passed += 1,
.failed => self.failed += 1,
.skipped => self.skipped += 1,
}
}
};
/// Cross-compile a Roc app to a specific target.
/// Returns true if compilation succeeded.
pub fn crossCompile(
allocator: Allocator,
roc_binary: []const u8,
roc_file: []const u8,
target: []const u8,
output_name: []const u8,
) !TestResult {
const target_arg = try std.fmt.allocPrint(allocator, "--target={s}", .{target});
defer allocator.free(target_arg);
const output_arg = try std.fmt.allocPrint(allocator, "--output={s}", .{output_name});
defer allocator.free(output_arg);
const result = std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{
roc_binary,
"build",
target_arg,
output_arg,
roc_file,
},
}) catch |err| {
std.debug.print("FAIL (spawn error: {})\n", .{err});
return .failed;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
return handleProcessResult(result, output_name);
}
/// Build a Roc app natively (no cross-compilation).
/// Does NOT clean up the output file - caller is responsible for cleanup.
pub fn buildNative(
allocator: Allocator,
roc_binary: []const u8,
roc_file: []const u8,
output_name: []const u8,
) !TestResult {
const output_arg = try std.fmt.allocPrint(allocator, "--output={s}", .{output_name});
defer allocator.free(output_arg);
const result = std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{
roc_binary,
"build",
output_arg,
roc_file,
},
}) catch |err| {
std.debug.print("FAIL (spawn error: {})\n", .{err});
return .failed;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
// Don't cleanup - caller will run and then cleanup
return handleProcessResultNoCleanup(result, output_name);
}
/// Run a native executable and check for successful execution.
pub fn runNative(
allocator: Allocator,
exe_path: []const u8,
) !TestResult {
const result = std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{exe_path},
}) catch |err| {
std.debug.print("FAIL (spawn error: {})\n", .{err});
return .failed;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
switch (result.term) {
.Exited => |code| {
if (code == 0) {
std.debug.print("OK\n", .{});
// Print first few lines of output
if (result.stdout.len > 0) {
printTruncatedOutput(result.stdout, 3, " ");
}
return .passed;
} else {
std.debug.print("FAIL (exit code {d})\n", .{code});
if (result.stderr.len > 0) {
printTruncatedOutput(result.stderr, 5, " ");
}
return .failed;
}
},
.Signal => |sig| {
std.debug.print("FAIL (signal {d})\n", .{sig});
return .failed;
},
else => {
std.debug.print("FAIL (abnormal termination)\n", .{});
return .failed;
},
}
}
/// Run a Roc app with --test mode and IO spec verification.
pub fn runWithIoSpec(
allocator: Allocator,
roc_binary: []const u8,
roc_file: []const u8,
io_spec: []const u8,
) !TestResult {
const test_arg = try std.fmt.allocPrint(allocator, "--test={s}", .{io_spec});
defer allocator.free(test_arg);
const result = std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{
roc_binary,
"run",
test_arg,
roc_file,
},
}) catch |err| {
std.debug.print("FAIL (spawn error: {})\n", .{err});
return .failed;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
switch (result.term) {
.Exited => |code| {
if (code == 0) {
std.debug.print("OK\n", .{});
return .passed;
} else {
std.debug.print("FAIL (exit code {d})\n", .{code});
if (result.stderr.len > 0) {
printTruncatedOutput(result.stderr, 5, " ");
}
return .failed;
}
},
.Signal => |sig| {
std.debug.print("FAIL (signal {d})\n", .{sig});
return .failed;
},
else => {
std.debug.print("FAIL (abnormal termination)\n", .{});
return .failed;
},
}
}
/// Run a Roc app under valgrind.
/// Only works on Linux x86_64.
pub fn runWithValgrind(
allocator: Allocator,
roc_binary: []const u8,
roc_file: []const u8,
) !TestResult {
// Valgrind only works on Linux x86_64
if (builtin.os.tag != .linux or builtin.cpu.arch != .x86_64) {
std.debug.print("SKIP (valgrind requires Linux x86_64)\n", .{});
return .skipped;
}
const result = std.process.Child.run(.{
.allocator = allocator,
.argv = &[_][]const u8{
"./ci/custom_valgrind.sh",
roc_binary,
"--no-cache",
roc_file,
},
}) catch |err| {
std.debug.print("FAIL (spawn error: {})\n", .{err});
return .failed;
};
defer allocator.free(result.stdout);
defer allocator.free(result.stderr);
switch (result.term) {
.Exited => |code| {
if (code == 0) {
std.debug.print("OK\n", .{});
return .passed;
} else {
std.debug.print("FAIL (valgrind exit code {d})\n", .{code});
if (result.stderr.len > 0) {
printTruncatedOutput(result.stderr, 5, " ");
}
return .failed;
}
},
.Signal => |sig| {
std.debug.print("FAIL (signal {d})\n", .{sig});
return .failed;
},
else => {
std.debug.print("FAIL (abnormal termination)\n", .{});
return .failed;
},
}
}
/// Verify that required platform target files exist.
pub fn verifyPlatformFiles(
allocator: Allocator,
platform_dir: []const u8,
target: []const u8,
) !bool {
const libhost_path = try std.fmt.allocPrint(allocator, "{s}/platform/targets/{s}/libhost.a", .{ platform_dir, target });
defer allocator.free(libhost_path);
if (std.fs.cwd().access(libhost_path, .{})) |_| {
return true;
} else |_| {
return false;
}
}
/// Check if a target requires Linux host (glibc targets).
pub fn requiresLinuxHost(target: []const u8) bool {
return std.mem.indexOf(u8, target, "glibc") != null;
}
/// Check if we should skip this target on current host.
pub fn shouldSkipTarget(target: []const u8) bool {
if (requiresLinuxHost(target) and builtin.os.tag != .linux) {
return true;
}
return false;
}
/// Clean up a generated file.
pub fn cleanup(path: []const u8) void {
std.fs.cwd().deleteFile(path) catch {};
}
/// Print a section header.
pub fn printHeader(comptime fmt: []const u8, args: anytype) void {
std.debug.print("\n>>> " ++ fmt ++ "\n", args);
}
/// Print test summary.
pub fn printSummary(stats: TestStats) void {
std.debug.print("\n=== Summary ===\n", .{});
std.debug.print("Passed: {d}\n", .{stats.passed});
std.debug.print("Failed: {d}\n", .{stats.failed});
std.debug.print("Skipped: {d}\n", .{stats.skipped});
if (stats.failed > 0) {
std.debug.print("\nSome tests failed!\n", .{});
} else {
std.debug.print("\nAll tests passed!\n", .{});
}
}
/// Print a result line.
pub fn printResultLine(status: []const u8, target: []const u8, message: []const u8) void {
if (message.len > 0) {
std.debug.print("[{s}] {s} ({s})\n", .{ status, target, message });
} else {
std.debug.print("[{s}] {s}\n", .{ status, target });
}
}
// --- Internal helpers ---
fn handleProcessResult(result: std.process.Child.RunResult, output_name: []const u8) TestResult {
switch (result.term) {
.Exited => |code| {
if (code == 0) {
// Verify executable was created
if (std.fs.cwd().access(output_name, .{})) |_| {
std.debug.print("OK\n", .{});
// Clean up
cleanup(output_name);
return .passed;
} else |_| {
std.debug.print("FAIL (executable not created)\n", .{});
return .failed;
}
} else {
std.debug.print("FAIL (exit code {d})\n", .{code});
if (result.stderr.len > 0) {
printTruncatedOutput(result.stderr, 5, " ");
}
return .failed;
}
},
.Signal => |sig| {
std.debug.print("FAIL (signal {d})\n", .{sig});
return .failed;
},
else => {
std.debug.print("FAIL (abnormal termination)\n", .{});
return .failed;
},
}
}
fn handleProcessResultNoCleanup(result: std.process.Child.RunResult, output_name: []const u8) TestResult {
switch (result.term) {
.Exited => |code| {
if (code == 0) {
// Verify executable was created
if (std.fs.cwd().access(output_name, .{})) |_| {
std.debug.print("OK\n", .{});
// Don't clean up - caller will handle
return .passed;
} else |_| {
std.debug.print("FAIL (executable not created)\n", .{});
return .failed;
}
} else {
std.debug.print("FAIL (exit code {d})\n", .{code});
if (result.stderr.len > 0) {
printTruncatedOutput(result.stderr, 5, " ");
}
return .failed;
}
},
.Signal => |sig| {
std.debug.print("FAIL (signal {d})\n", .{sig});
return .failed;
},
else => {
std.debug.print("FAIL (abnormal termination)\n", .{});
return .failed;
},
}
}
fn printTruncatedOutput(output: []const u8, max_lines: usize, prefix: []const u8) void {
var lines = std.mem.splitScalar(u8, output, '\n');
var line_count: usize = 0;
while (lines.next()) |line| {
if (line_count >= max_lines) {
std.debug.print("{s}... (truncated)\n", .{prefix});
break;
}
if (line.len > 0) {
std.debug.print("{s}{s}\n", .{ prefix, line });
line_count += 1;
}
}
}

View file

@ -0,0 +1,452 @@
//! Unified test platform runner.
//!
//! This tool tests Roc test platforms with various modes:
//! - Cross-compilation to different targets
//! - Native build and execution
//! - Valgrind memory testing (Linux x86_64 only)
//! - IO spec verification (for fx platform)
//!
//! Usage:
//! test_runner <roc_binary> <platform> [options]
//!
//! Platforms:
//! int - Integer operations platform
//! str - String processing platform
//! fx - Effectful platform (stdout/stderr/stdin)
//! fx-open - Effectful with open union errors
//!
//! Options:
//! --target=<name> Target to test (default: all for platform)
//! Values: x64musl, arm64musl, x64glibc, arm64glibc, native
//! --mode=<mode> Test mode (default: all applicable)
//! Values: cross, native, valgrind
//! --verbose Show detailed output
//!
//! Examples:
//! test_runner ./zig-out/bin/roc int # All int tests
//! test_runner ./zig-out/bin/roc fx --target=x64musl # fx cross-compile to x64musl
//! test_runner ./zig-out/bin/roc str --mode=valgrind # str under valgrind
//! test_runner ./zig-out/bin/roc int --mode=native # int native only
const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const platform_config = @import("platform_config.zig");
const runner_core = @import("runner_core.zig");
const fx_test_specs = @import("fx_test_specs.zig");
const PlatformConfig = platform_config.PlatformConfig;
const TestStats = runner_core.TestStats;
const TestResult = runner_core.TestResult;
/// Test mode
const TestMode = enum {
cross,
native,
valgrind,
all,
};
/// Parsed command line arguments
const Args = struct {
roc_binary: []const u8,
platform_name: []const u8,
target_filter: ?[]const u8,
mode: TestMode,
verbose: bool,
/// Raw args buffer - caller must free via std.process.argsFree
raw_args: [][:0]u8,
};
/// Entry point for the unified test platform runner.
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const args = try parseArgs(allocator);
defer std.process.argsFree(allocator, args.raw_args);
// Look up the platform
const platform = platform_config.findPlatform(args.platform_name) orelse {
std.debug.print("Error: Unknown platform '{s}'\n", .{args.platform_name});
std.debug.print("Available platforms: int, str, fx, fx-open\n", .{});
std.process.exit(1);
};
// Validate target if specified
if (args.target_filter) |target_name| {
if (!std.mem.eql(u8, target_name, "native")) {
if (platform_config.findTarget(platform, target_name) == null) {
std.debug.print("Error: Target '{s}' not supported by platform '{s}'\n", .{ target_name, platform.name });
std.debug.print("Available targets: ", .{});
for (platform.targets, 0..) |t, i| {
if (i > 0) std.debug.print(", ", .{});
std.debug.print("{s}", .{t.name});
}
std.debug.print(", native\n", .{});
std.process.exit(1);
}
}
}
// Print banner
std.debug.print("=== Test Platform Runner ===\n", .{});
std.debug.print("Roc binary: {s}\n", .{args.roc_binary});
std.debug.print("Platform: {s}\n", .{platform.name});
if (args.target_filter) |t| {
std.debug.print("Target filter: {s}\n", .{t});
}
std.debug.print("Mode: {s}\n", .{@tagName(args.mode)});
std.debug.print("\n", .{});
var stats = TestStats{};
// Run tests based on mode
switch (args.mode) {
.cross => {
try runCrossCompileTests(allocator, args, platform, &stats);
},
.native => {
try runNativeTests(allocator, args, platform, &stats);
},
.valgrind => {
try runValgrindTests(allocator, args, platform, &stats);
},
.all => {
// Run cross-compilation tests
try runCrossCompileTests(allocator, args, platform, &stats);
// Run native tests
try runNativeTests(allocator, args, platform, &stats);
},
}
// Print summary
runner_core.printSummary(stats);
if (stats.failed > 0) {
std.process.exit(1);
}
}
fn runCrossCompileTests(
allocator: Allocator,
args: Args,
platform: PlatformConfig,
stats: *TestStats,
) !void {
runner_core.printHeader("Cross-compilation tests", .{});
// First verify platform files exist
std.debug.print("Verifying platform target files...\n", .{});
var verify_failed = false;
for (platform.targets) |target| {
// Apply target filter
if (args.target_filter) |filter| {
if (!std.mem.eql(u8, target.name, filter)) continue;
}
// Skip glibc on non-Linux
if (runner_core.shouldSkipTarget(target.name)) {
runner_core.printResultLine("SKIP", target.name, "glibc requires Linux host");
continue;
}
const exists = try runner_core.verifyPlatformFiles(allocator, platform.base_dir, target.name);
if (exists) {
runner_core.printResultLine("OK", target.name, "libhost.a exists");
} else {
runner_core.printResultLine("FAIL", target.name, "libhost.a missing");
verify_failed = true;
}
}
if (verify_failed) {
std.debug.print("\nPlatform verification failed. Aborting.\n", .{});
std.process.exit(1);
}
// Now run cross-compilation tests
std.debug.print("\n", .{});
switch (platform.test_apps) {
.single => |app_name| {
const roc_file = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ platform.base_dir, app_name });
defer allocator.free(roc_file);
for (platform.targets) |target| {
// Apply target filter
if (args.target_filter) |filter| {
if (!std.mem.eql(u8, target.name, filter)) continue;
}
// Skip glibc on non-Linux
if (runner_core.shouldSkipTarget(target.name)) {
stats.record(.skipped);
continue;
}
std.debug.print("Building {s} for {s}... ", .{ app_name, target.name });
const output_name = try std.fmt.allocPrint(allocator, "{s}_{s}", .{ platform.name, target.name });
defer allocator.free(output_name);
const result = try runner_core.crossCompile(allocator, args.roc_binary, roc_file, target.name, output_name);
stats.record(result);
}
},
.spec_list => |specs| {
for (platform.targets) |target| {
// Apply target filter
if (args.target_filter) |filter| {
if (!std.mem.eql(u8, target.name, filter)) continue;
}
// Skip glibc on non-Linux
if (runner_core.shouldSkipTarget(target.name)) {
stats.record(.skipped);
continue;
}
std.debug.print("Cross-compiling {d} tests for {s}...\n", .{ specs.len, target.name });
for (specs, 0..) |spec, i| {
const test_num = i + 1;
std.debug.print("[{d}/{d}] {s}... ", .{ test_num, specs.len, spec.roc_file });
const basename = std.fs.path.stem(spec.roc_file);
const output_name = try std.fmt.allocPrint(allocator, "{s}_{s}", .{ basename, target.name });
defer allocator.free(output_name);
const result = try runner_core.crossCompile(allocator, args.roc_binary, spec.roc_file, target.name, output_name);
stats.record(result);
}
}
},
}
}
fn runNativeTests(
allocator: Allocator,
args: Args,
platform: PlatformConfig,
stats: *TestStats,
) !void {
// Check if native target is filtered out
if (args.target_filter) |filter| {
if (!std.mem.eql(u8, filter, "native")) {
return; // Skip native tests if a specific cross target is requested
}
}
if (!platform.supports_native_exec) {
return;
}
runner_core.printHeader("Native build and execution tests", .{});
switch (platform.test_apps) {
.single => |app_name| {
const roc_file = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ platform.base_dir, app_name });
defer allocator.free(roc_file);
const output_name = try std.fmt.allocPrint(allocator, "{s}_native", .{platform.name});
defer allocator.free(output_name);
// Build
std.debug.print("Building {s} native... ", .{app_name});
const build_result = try runner_core.buildNative(allocator, args.roc_binary, roc_file, output_name);
stats.record(build_result);
if (build_result != .passed) {
return;
}
// Run
std.debug.print("Running native executable... ", .{});
const exe_path = try std.fmt.allocPrint(allocator, "./{s}", .{output_name});
defer allocator.free(exe_path);
const run_result = try runner_core.runNative(allocator, exe_path);
stats.record(run_result);
// Cleanup
runner_core.cleanup(output_name);
},
.spec_list => |specs| {
if (platform.supports_io_specs) {
// Use IO spec verification
std.debug.print("Running {d} IO spec tests...\n", .{specs.len});
for (specs, 0..) |spec, i| {
const test_num = i + 1;
std.debug.print("[{d}/{d}] {s}... ", .{ test_num, specs.len, spec.roc_file });
const result = try runner_core.runWithIoSpec(allocator, args.roc_binary, spec.roc_file, spec.io_spec);
stats.record(result);
}
} else {
// Just build and run each test
for (specs, 0..) |spec, i| {
const test_num = i + 1;
const basename = std.fs.path.stem(spec.roc_file);
const output_name = try std.fmt.allocPrint(allocator, "{s}_native", .{basename});
defer allocator.free(output_name);
std.debug.print("[{d}/{d}] Building {s}... ", .{ test_num, specs.len, spec.roc_file });
const build_result = try runner_core.buildNative(allocator, args.roc_binary, spec.roc_file, output_name);
stats.record(build_result);
if (build_result == .passed) {
const exe_path = try std.fmt.allocPrint(allocator, "./{s}", .{output_name});
defer allocator.free(exe_path);
std.debug.print(" Running... ", .{});
const run_result = try runner_core.runNative(allocator, exe_path);
stats.record(run_result);
runner_core.cleanup(output_name);
}
}
}
},
}
}
fn runValgrindTests(
allocator: Allocator,
args: Args,
platform: PlatformConfig,
stats: *TestStats,
) !void {
// Valgrind only works on Linux x86_64
if (builtin.os.tag != .linux or builtin.cpu.arch != .x86_64) {
std.debug.print("Skipping valgrind tests (requires Linux x86_64)\n", .{});
return;
}
if (!platform.valgrind_safe) {
std.debug.print("Skipping valgrind tests for {s} (has stdin tests)\n", .{platform.name});
return;
}
runner_core.printHeader("Valgrind memory tests", .{});
switch (platform.test_apps) {
.single => |app_name| {
const roc_file = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ platform.base_dir, app_name });
defer allocator.free(roc_file);
std.debug.print("Running {s} under valgrind... ", .{app_name});
const result = try runner_core.runWithValgrind(allocator, args.roc_binary, roc_file);
stats.record(result);
},
.spec_list => |specs| {
// For valgrind, only run tests that don't use stdin
var valgrind_safe_count: usize = 0;
for (specs) |spec| {
if (std.mem.indexOf(u8, spec.io_spec, "0<") == null) {
valgrind_safe_count += 1;
}
}
std.debug.print("Running {d} valgrind-safe tests...\n", .{valgrind_safe_count});
var test_num: usize = 0;
for (specs) |spec| {
// Skip tests that use stdin
if (std.mem.indexOf(u8, spec.io_spec, "0<") != null) {
continue;
}
test_num += 1;
std.debug.print("[{d}/{d}] {s}... ", .{ test_num, valgrind_safe_count, spec.roc_file });
const result = try runner_core.runWithValgrind(allocator, args.roc_binary, spec.roc_file);
stats.record(result);
}
},
}
}
fn parseArgs(allocator: Allocator) !Args {
const raw_args = try std.process.argsAlloc(allocator);
if (raw_args.len < 3) {
printUsage();
std.process.exit(1);
}
var args = Args{
.roc_binary = raw_args[1],
.platform_name = raw_args[2],
.target_filter = null,
.mode = .all,
.verbose = false,
.raw_args = raw_args,
};
// Parse options
var i: usize = 3;
while (i < raw_args.len) : (i += 1) {
const arg = raw_args[i];
if (std.mem.startsWith(u8, arg, "--target=")) {
args.target_filter = arg["--target=".len..];
} else if (std.mem.startsWith(u8, arg, "--mode=")) {
const mode_str = arg["--mode=".len..];
if (std.mem.eql(u8, mode_str, "cross")) {
args.mode = .cross;
} else if (std.mem.eql(u8, mode_str, "native")) {
args.mode = .native;
} else if (std.mem.eql(u8, mode_str, "valgrind")) {
args.mode = .valgrind;
} else if (std.mem.eql(u8, mode_str, "all")) {
args.mode = .all;
} else {
std.debug.print("Error: Unknown mode '{s}'\n", .{mode_str});
std.debug.print("Available modes: cross, native, valgrind, all\n", .{});
std.process.exit(1);
}
} else if (std.mem.eql(u8, arg, "--verbose")) {
args.verbose = true;
} else {
std.debug.print("Error: Unknown option '{s}'\n", .{arg});
printUsage();
std.process.exit(1);
}
}
return args;
}
fn printUsage() void {
std.debug.print(
\\Usage: test_runner <roc_binary> <platform> [options]
\\
\\Platforms:
\\ int - Integer operations platform
\\ str - String processing platform
\\ fx - Effectful platform (stdout/stderr/stdin)
\\ fx-open - Effectful with open union errors
\\
\\Options:
\\ --target=<name> Target to test (default: all for platform)
\\ Values: x64musl, arm64musl, x64glibc, arm64glibc, native
\\ --mode=<mode> Test mode (default: all applicable)
\\ Values: cross, native, valgrind, all
\\ --verbose Show detailed output
\\
\\Examples:
\\ test_runner ./zig-out/bin/roc int # All int tests
\\ test_runner ./zig-out/bin/roc fx --target=x64musl # fx cross-compile to x64musl
\\ test_runner ./zig-out/bin/roc str --mode=valgrind # str under valgrind
\\ test_runner ./zig-out/bin/roc int --mode=native # int native only
\\
, .{});
}

View file

@ -125,7 +125,7 @@ test "integration - shared memory setup and parsing" {
const roc_path = "test/int/app.roc";
// Test that we can set up shared memory with ModuleEnv
const shm_result = try main.setupSharedMemoryWithModuleEnv(&allocs, roc_path);
const shm_result = try main.setupSharedMemoryWithModuleEnv(&allocs, roc_path, true);
const shm_handle = shm_result.handle;
// Clean up shared memory resources
@ -170,7 +170,7 @@ test "integration - compilation pipeline for different platforms" {
for (test_apps) |roc_path| {
// Test the full compilation pipeline (parse -> canonicalize -> typecheck)
const shm_result = main.setupSharedMemoryWithModuleEnv(&allocs, roc_path) catch |err| {
const shm_result = main.setupSharedMemoryWithModuleEnv(&allocs, roc_path, true) catch |err| {
std.log.warn("Failed to set up shared memory for {s}: {}\n", .{ roc_path, err });
continue;
};
@ -212,7 +212,7 @@ test "integration - error handling for non-existent file" {
const roc_path = "test/nonexistent/app.roc";
// This should fail because the file doesn't exist
const result = main.setupSharedMemoryWithModuleEnv(&allocs, roc_path);
const result = main.setupSharedMemoryWithModuleEnv(&allocs, roc_path, true);
// We expect this to fail - the important thing is that it doesn't crash
if (result) |shm_result| {

View file

@ -376,6 +376,15 @@ pub const BuildEnv = struct {
pkg_sink_ctxs: std.array_list.Managed(*PkgSinkCtx),
// Owned schedule ctxs for pre-registration (one per package)
schedule_ctxs: std.array_list.Managed(*ScheduleCtx),
// Pending known module registrations (processed after schedulers are created)
pending_known_modules: std.array_list.Managed(PendingKnownModule),
/// Info about a known module registration that needs to be applied after schedulers exist
const PendingKnownModule = struct {
target_package: []const u8, // Package to register with (e.g., "app")
qualified_name: []const u8, // e.g., "pf.Stdout"
import_name: []const u8, // e.g., "pf.Stdout"
};
pub fn init(gpa: Allocator, mode: Mode, max_threads: usize) !BuildEnv {
// Allocate builtin modules on heap to prevent moves that would invalidate internal pointers
@ -396,6 +405,7 @@ pub const BuildEnv = struct {
.resolver_ctxs = std.array_list.Managed(*ResolverCtx).init(gpa),
.pkg_sink_ctxs = std.array_list.Managed(*PkgSinkCtx).init(gpa),
.schedule_ctxs = std.array_list.Managed(*ScheduleCtx).init(gpa),
.pending_known_modules = std.array_list.Managed(PendingKnownModule).init(gpa),
};
}
@ -425,6 +435,14 @@ pub const BuildEnv = struct {
for (self.schedule_ctxs.items) |p| self.gpa.destroy(p);
self.schedule_ctxs.deinit();
// Free pending known modules
for (self.pending_known_modules.items) |pkm| {
self.gpa.free(pkm.target_package);
self.gpa.free(pkm.qualified_name);
self.gpa.free(pkm.import_name);
}
self.pending_known_modules.deinit();
// Deinit schedulers
var sit = self.schedulers.iterator();
while (sit.next()) |e| {
@ -525,6 +543,9 @@ pub const BuildEnv = struct {
// Create per-package schedulers wired with a shared resolver and global queue hook
try self.createSchedulers();
// Register pending known modules now that schedulers exist
try self.processPendingKnownModules();
// Set back-pointer for dispatch
self.global_queue.build_env = self;
@ -533,11 +554,8 @@ pub const BuildEnv = struct {
try self.global_queue.start(self.gpa, self.max_threads, &self.sink);
}
// Seed root module into global queue via schedule hook (ModuleBuild will call back)
const root_sched = self.schedulers.getPtr(pkg_name).?;
try root_sched.*.buildRoot(pkg_root_file);
// Kick remaining packages by seeding their root files too
// Build platform and other dependency packages BEFORE the app
// This ensures platform module envs are available when app is canonicalized
var it = self.schedulers.iterator();
while (it.next()) |e| {
const name = e.key_ptr.*;
@ -546,6 +564,10 @@ pub const BuildEnv = struct {
try e.value_ptr.*.buildRoot(pkg.root_file);
}
// Seed root module into global queue via schedule hook (ModuleBuild will call back)
const root_sched = self.schedulers.getPtr(pkg_name).?;
try root_sched.*.buildRoot(pkg_root_file);
// Wait for all work to complete
if (builtin.target.cpu.arch != .wasm32 and self.mode == .multi_threaded) {
// Multi-threaded mode: wait for global queue to drain
@ -753,16 +775,22 @@ pub const BuildEnv = struct {
const qual = parts.qual;
const rest = parts.rest;
const ref = cur_pkg.shorthands.get(qual) orelse return;
const ref = cur_pkg.shorthands.get(qual) orelse {
return;
};
const target_pkg_name = ref.name;
const target_pkg = self.ws.packages.get(target_pkg_name) orelse return;
const target_pkg = self.ws.packages.get(target_pkg_name) orelse {
return;
};
const mod_path = self.ws.dottedToPath(target_pkg.root_dir, rest) catch {
return;
};
defer self.ws.gpa.free(mod_path);
const sched = self.ws.schedulers.get(target_pkg_name) orelse return;
const sched = self.ws.schedulers.get(target_pkg_name) orelse {
return;
};
sched.*.scheduleModule(rest, mod_path, 1) catch {
// Continue anyway - dependency resolution will handle missing modules
};
@ -790,10 +818,15 @@ pub const BuildEnv = struct {
const qual = parts.qual;
const rest = parts.rest;
const ref = cur_pkg.shorthands.get(qual) orelse return null;
const sched = self.ws.schedulers.get(ref.name) orelse return null;
const ref = cur_pkg.shorthands.get(qual) orelse {
return null;
};
const sched = self.ws.schedulers.get(ref.name) orelse {
return null;
};
return sched.*.getEnvIfDone(rest);
const result = sched.*.getEnvIfDone(rest);
return result;
}
fn resolverResolveLocalPath(ctx: ?*anyopaque, _: []const u8, root_dir: []const u8, import_name: []const u8) []const u8 {
@ -852,6 +885,8 @@ pub const BuildEnv = struct {
platform_alias: ?[]u8 = null,
platform_path: ?[]u8 = null,
shorthands: std.StringHashMapUnmanaged([]const u8) = .{},
/// Platform-exposed modules (e.g., Stdout, Stderr) that apps can import
exposes: std.ArrayListUnmanaged([]const u8) = .{},
fn deinit(self: *HeaderInfo, gpa: Allocator) void {
if (self.platform_alias) |a| freeSlice(gpa, a);
@ -862,6 +897,10 @@ pub const BuildEnv = struct {
freeConstSlice(gpa, e.value_ptr.*);
}
self.shorthands.deinit(gpa);
for (self.exposes.items) |e| {
freeConstSlice(gpa, e);
}
self.exposes.deinit(gpa);
}
};
@ -1125,6 +1164,22 @@ pub const BuildEnv = struct {
}
try info.shorthands.put(self.gpa, try self.gpa.dupe(u8, k), v);
}
// Extract platform-exposed modules (e.g., Stdout, Stderr)
// These are modules that apps can import from the platform
const exposes_coll = ast.store.getCollection(p.exposes);
const exposes_items = ast.store.exposedItemSlice(.{ .span = exposes_coll.span });
for (exposes_items) |item_idx| {
const item = ast.store.getExposedItem(item_idx);
const token_idx = switch (item) {
.upper_ident => |ui| ui.ident,
.upper_ident_star => |uis| uis.ident,
.lower_ident => |li| li.ident,
.malformed => continue, // Skip malformed items
};
const item_name = ast.resolve(token_idx);
try info.exposes.append(self.gpa, try self.gpa.dupe(u8, item_name));
}
},
.module => {
info.kind = .module;
@ -1431,6 +1486,22 @@ pub const BuildEnv = struct {
}
}
/// Register pending known modules with their target schedulers.
/// Also schedules the external modules so they'll be built before the app.
/// Called after createSchedulers() to ensure all schedulers exist.
fn processPendingKnownModules(self: *BuildEnv) !void {
for (self.pending_known_modules.items) |pkm| {
if (self.schedulers.get(pkm.target_package)) |sched| {
try sched.addKnownModule(pkm.qualified_name, pkm.import_name);
// Also schedule the external module so it gets built
// This is needed so the module is ready when we populate module_envs_map
if (sched.resolver) |res| {
res.scheduleExternal(res.ctx, pkm.target_package, pkm.import_name);
}
}
}
}
fn populatePackageShorthands(self: *BuildEnv, pkg_name: []const u8, info: *HeaderInfo) !void {
var pack = self.packages.getPtr(pkg_name).?;
@ -1470,6 +1541,46 @@ pub const BuildEnv = struct {
});
try self.populatePackageShorthands(dep_name, &child_info);
// Register platform-exposed modules as packages so apps can import them
// This is necessary for URL platforms where the platform directory is in a cache
const platform_dir = std.fs.path.dirname(abs) orelse ".";
for (child_info.exposes.items) |module_name| {
// Create path to the module file (e.g., Stdout.roc)
const module_filename = try std.fmt.allocPrint(self.gpa, "{s}.roc", .{module_name});
defer self.gpa.free(module_filename);
const module_path = try std.fs.path.join(self.gpa, &.{ platform_dir, module_filename });
defer self.gpa.free(module_path);
// Register this module as a package
// Only allocate if package doesn't exist (ensurePackage makes its own copy)
if (!self.packages.contains(module_name)) {
try self.ensurePackage(module_name, .module, module_path);
}
// Also add to app's shorthands so imports resolve correctly
const mod_key = try self.gpa.dupe(u8, module_name);
if (pack.shorthands.fetchRemove(mod_key)) |old_entry| {
freeConstSlice(self.gpa, old_entry.key);
freeConstSlice(self.gpa, old_entry.value.name);
freeConstSlice(self.gpa, old_entry.value.root_file);
}
try pack.shorthands.put(self.gpa, mod_key, .{
.name = try self.gpa.dupe(u8, module_name),
.root_file = try self.gpa.dupe(u8, module_path),
});
// Add to pending list - will be registered after schedulers are created
// Use the QUALIFIED name (e.g., "pf.Stdout") because that's how imports are tracked
const qualified_name = try std.fmt.allocPrint(self.gpa, "{s}.{s}", .{ alias, module_name });
try self.pending_known_modules.append(.{
.target_package = try self.gpa.dupe(u8, pkg_name),
.qualified_name = qualified_name,
.import_name = try self.gpa.dupe(u8, qualified_name),
});
}
}
// Common package dependencies

View file

@ -191,6 +191,18 @@ pub const PackageEnv = struct {
total_type_checking_ns: u64 = 0,
total_check_diagnostics_ns: u64 = 0,
// Additional known modules (e.g., from platform exposes) to include in module_envs_map
// These are modules that exist in external directories (like URL platform cache)
additional_known_modules: std.ArrayList(KnownModule),
/// Info about a known module from a platform or other package
pub const KnownModule = struct {
/// Qualified module name (e.g., "pf.Stdout")
qualified_name: []const u8,
/// Import name for resolver lookup (e.g., "pf.Stdout")
import_name: []const u8,
};
pub fn init(gpa: Allocator, package_name: []const u8, root_dir: []const u8, mode: Mode, max_threads: usize, sink: ReportSink, schedule_hook: ScheduleHook, compiler_version: []const u8, builtin_modules: *const BuiltinModules, file_provider: ?FileProvider) PackageEnv {
return .{
.gpa = gpa,
@ -206,6 +218,7 @@ pub const PackageEnv = struct {
.injector = std.ArrayList(Task).empty,
.modules = std.ArrayList(ModuleState).empty,
.discovered = std.ArrayList(ModuleId).empty,
.additional_known_modules = std.ArrayList(KnownModule).empty,
};
}
@ -237,9 +250,24 @@ pub const PackageEnv = struct {
.injector = std.ArrayList(Task).empty,
.modules = std.ArrayList(ModuleState).empty,
.discovered = std.ArrayList(ModuleId).empty,
.additional_known_modules = std.ArrayList(KnownModule).empty,
};
}
/// Add a module that should be recognized during canonicalization.
/// This is used for platform-exposed modules in URL platforms where the
/// modules exist in a cache directory, not the app's directory.
/// `qualified_name` is the full name like "pf.Stdout"
/// `import_name` is the import path for resolver lookup (e.g., "pf.Stdout")
pub fn addKnownModule(self: *PackageEnv, qualified_name: []const u8, import_name: []const u8) !void {
const qualified_copy = try self.gpa.dupe(u8, qualified_name);
const import_copy = try self.gpa.dupe(u8, import_name);
try self.additional_known_modules.append(self.gpa, .{
.qualified_name = qualified_copy,
.import_name = import_copy,
});
}
pub fn deinit(self: *PackageEnv) void {
// NOTE: builtin_modules is not owned by PackageEnv, so we don't deinit it here
@ -259,6 +287,13 @@ pub const PackageEnv = struct {
self.injector.deinit(self.gpa);
self.discovered.deinit(self.gpa);
self.emitted.deinit(self.gpa);
// Free additional known module names
for (self.additional_known_modules.items) |km| {
self.gpa.free(km.qualified_name);
self.gpa.free(km.import_name);
}
self.additional_known_modules.deinit(self.gpa);
}
/// Get the root module's env (first module added)
@ -630,6 +665,7 @@ pub const PackageEnv = struct {
// Use shared canonicalization function to ensure consistency with snapshot tool
// Pass sibling module names from the same directory so MODULE NOT FOUND isn't
// reported prematurely for modules that exist but haven't been loaded yet.
// Also include additional known modules from platform exposes (for URL platforms).
try canonicalizeModuleWithSiblings(
self.gpa,
env,
@ -637,6 +673,9 @@ pub const PackageEnv = struct {
self.builtin_modules.builtin_module.env,
self.builtin_modules.builtin_indices,
self.root_dir,
self.package_name,
self.resolver,
self.additional_known_modules.items,
);
const canon_end = if (@import("builtin").target.cpu.arch != .wasm32) std.time.nanoTimestamp() else 0;
@ -892,7 +931,8 @@ pub const PackageEnv = struct {
czer.deinit();
}
/// Canonicalization function that also discovers sibling .roc files in the same directory.
/// Canonicalization function that also discovers sibling .roc files in the same directory
/// and includes additional known modules (e.g., from platform exposes).
/// This prevents premature MODULE NOT FOUND errors for modules that exist but haven't been loaded yet.
fn canonicalizeModuleWithSiblings(
gpa: Allocator,
@ -901,6 +941,9 @@ pub const PackageEnv = struct {
builtin_module_env: *const ModuleEnv,
builtin_indices: can.CIR.BuiltinIndices,
root_dir: []const u8,
package_name: []const u8,
resolver: ?ImportResolver,
additional_known_modules: []const KnownModule,
) !void {
// Create module_envs map for auto-importing builtin types
var module_envs_map = std.AutoHashMap(base.Ident.Idx, Can.AutoImportedType).init(gpa);
@ -948,6 +991,23 @@ pub const PackageEnv = struct {
}
}
// Add additional known modules (e.g., from platform exposes for URL platforms)
// Use the resolver to get the ACTUAL module env if available
for (additional_known_modules) |km| {
const module_ident = try env.insertIdent(base.Ident.for_text(km.qualified_name));
const qualified_ident = try env.insertIdent(base.Ident.for_text(km.qualified_name));
if (!module_envs_map.contains(module_ident)) {
// Try to get the actual module env using the resolver
const actual_env: *const ModuleEnv = if (resolver) |res| blk: {
if (res.getEnv(res.ctx, package_name, km.import_name)) |mod_env| {
break :blk mod_env;
}
break :blk builtin_module_env;
} else builtin_module_env;
try module_envs_map.put(module_ident, .{ .env = actual_env, .qualified_type_ident = qualified_ident });
}
}
var czer = try Can.init(env, parse_ast, &module_envs_map);
try czer.canonicalizeFile();
czer.deinit();

View file

@ -43,18 +43,39 @@ const StackValue = @This();
fn increfLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(ptr.?);
if (ptr_int % @alignOf(RocStr) != 0) {
std.debug.panic("increfLayoutPtr(str): ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(RocStr) });
}
}
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.incref(1);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(ptr.?);
if (ptr_int % @alignOf(RocList) != 0) {
std.debug.panic("increfLayoutPtr(list): ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(RocList) });
}
}
const list_value = @as(*const RocList, @ptrCast(@alignCast(ptr.?))).*;
list_value.incref(1, false);
return;
}
if (layout.tag == .box) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(ptr.?);
if (ptr_int % @alignOf(usize) != 0) {
std.debug.panic("increfLayoutPtr(box): ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(usize) });
}
}
const slot: *usize = @ptrCast(@alignCast(ptr.?));
if (slot.* != 0) {
const data_ptr: [*]u8 = @as([*]u8, @ptrFromInt(slot.*));
@ -112,12 +133,26 @@ fn increfLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore)
fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore, ops: *RocOps) void {
if (layout.tag == .scalar and layout.data.scalar.tag == .str) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(ptr.?);
if (ptr_int % @alignOf(RocStr) != 0) {
std.debug.panic("decrefLayoutPtr(str): ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(RocStr) });
}
}
const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*;
roc_str.decref(ops);
return;
}
if (layout.tag == .list) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(ptr.?);
if (ptr_int % @alignOf(RocList) != 0) {
std.debug.panic("decrefLayoutPtr(list): ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(RocList) });
}
}
const list_header: *const RocList = @ptrCast(@alignCast(ptr.?));
const list_value = list_header.*;
const elem_layout = layout_cache.getLayout(layout.data.list);
@ -141,6 +176,13 @@ fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore,
}
if (layout.tag == .box) {
if (ptr == null) return;
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const box_ptr_int = @intFromPtr(ptr.?);
if (box_ptr_int % @alignOf(usize) != 0) {
std.debug.panic("decrefLayoutPtr(box): ptr=0x{x} is not {}-byte aligned", .{ box_ptr_int, @alignOf(usize) });
}
}
const slot: *usize = @ptrCast(@alignCast(ptr.?));
const raw_ptr = slot.*;
if (raw_ptr == 0) return;
@ -152,8 +194,21 @@ fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore,
const ptr_int = @intFromPtr(data_ptr);
const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11;
const unmasked_ptr = ptr_int & ~tag_mask;
const refcount_addr = unmasked_ptr - @sizeOf(isize);
// Verify alignment before @ptrFromInt for refcount
if (comptime builtin.mode == .Debug) {
if (refcount_addr % @alignOf(isize) != 0) {
std.debug.panic("decrefLayoutPtr: refcount_addr=0x{x} misaligned! unmasked=0x{x}, raw=0x{x}", .{
refcount_addr,
unmasked_ptr,
raw_ptr,
});
}
}
const payload_ptr = @as([*]u8, @ptrFromInt(unmasked_ptr));
const refcount_ptr: *isize = @as(*isize, @ptrFromInt(unmasked_ptr - @sizeOf(isize)));
const refcount_ptr: *isize = @as(*isize, @ptrFromInt(refcount_addr));
if (builtins.utils.rcUnique(refcount_ptr.*)) {
if (elem_layout.isRefcounted()) {
@ -203,18 +258,46 @@ fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore,
if (layout.tag == .closure) {
if (ptr == null) return;
// Get the closure header to find the captures layout
const closure_ptr_val = @intFromPtr(ptr.?);
if (closure_ptr_val % @alignOf(layout_mod.Closure) != 0) {
std.debug.panic("[decrefLayoutPtr] closure alignment error: ptr=0x{x} not aligned to {}", .{ closure_ptr_val, @alignOf(layout_mod.Closure) });
}
const closure_header: *const layout_mod.Closure = @ptrCast(@alignCast(ptr.?));
// Debug assert: check for obviously invalid layout indices (sentinel values like 0xAAAAAAAA)
const idx_as_usize = @intFromEnum(closure_header.captures_layout_idx);
if (comptime trace_refcount) {
traceRefcount("DECREF closure detail: ptr=0x{x} captures_layout_idx={} body_idx={}", .{
closure_ptr_val,
idx_as_usize,
@intFromEnum(closure_header.body_idx),
});
}
if (idx_as_usize > 0x1000000) { // 16 million layouts is way more than any real program would have
std.debug.panic("decrefLayoutPtr: closure has invalid captures_layout_idx=0x{x} (likely uninitialized or corrupted closure header at ptr={*})", .{ idx_as_usize, ptr.? });
}
const captures_layout = layout_cache.getLayout(closure_header.captures_layout_idx);
if (comptime trace_refcount) {
traceRefcount("DECREF closure captures_layout.tag={}", .{@intFromEnum(captures_layout.tag)});
}
// Only decref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (comptime trace_refcount) {
traceRefcount("DECREF closure record fields={}", .{record_data.fields.count});
}
if (record_data.fields.count > 0) {
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
if (comptime trace_refcount) {
traceRefcount("DECREF closure rec_ptr=0x{x}", .{@intFromPtr(rec_ptr)});
}
decrefLayoutPtr(captures_layout, rec_ptr, layout_cache, ops);
}
}
@ -260,7 +343,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
if (!src_str.isSmallStr()) {
const alloc_ptr = src_str.getAllocationPtr();
const rc_before: isize = if (alloc_ptr) |ptr| blk: {
if (@intFromPtr(ptr) % 8 != 0) break :blk -999;
if (@intFromPtr(ptr) % @alignOf(usize) != 0) break :blk -999;
const isizes: [*]isize = @ptrCast(@alignCast(ptr));
break :blk (isizes - 1)[0];
} else 0;
@ -280,24 +363,29 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
std.debug.assert(self.ptr != null);
const precision = self.layout.data.scalar.data.int;
const value = self.asI128();
const dest_ptr_val = @intFromPtr(dest_ptr);
switch (precision) {
.u8 => {
const typed_ptr: *u8 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(u8, value) orelse return error.IntegerOverflow;
},
.u16 => {
if (dest_ptr_val % @alignOf(u16) != 0) std.debug.panic("[copyToPtr] u16 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *u16 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(u16, value) orelse return error.IntegerOverflow;
},
.u32 => {
if (dest_ptr_val % @alignOf(u32) != 0) std.debug.panic("[copyToPtr] u32 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *u32 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(u32, value) orelse return error.IntegerOverflow;
},
.u64 => {
if (dest_ptr_val % @alignOf(u64) != 0) std.debug.panic("[copyToPtr] u64 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *u64 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(u64, value) orelse return error.IntegerOverflow;
},
.u128 => {
if (dest_ptr_val % @alignOf(u128) != 0) std.debug.panic("[copyToPtr] u128 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *u128 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(u128, value) orelse return error.IntegerOverflow;
},
@ -306,18 +394,22 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
typed_ptr.* = std.math.cast(i8, value) orelse return error.IntegerOverflow;
},
.i16 => {
if (dest_ptr_val % @alignOf(i16) != 0) std.debug.panic("[copyToPtr] i16 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *i16 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(i16, value) orelse return error.IntegerOverflow;
},
.i32 => {
if (dest_ptr_val % @alignOf(i32) != 0) std.debug.panic("[copyToPtr] i32 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *i32 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(i32, value) orelse return error.IntegerOverflow;
},
.i64 => {
if (dest_ptr_val % @alignOf(i64) != 0) std.debug.panic("[copyToPtr] i64 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *i64 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = std.math.cast(i64, value) orelse return error.IntegerOverflow;
},
.i128 => {
if (dest_ptr_val % @alignOf(i128) != 0) std.debug.panic("[copyToPtr] i128 alignment error: dest_ptr=0x{x}", .{dest_ptr_val});
const typed_ptr: *i128 = @ptrCast(@alignCast(dest_ptr));
typed_ptr.* = value;
},
@ -329,6 +421,17 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
}
if (self.layout.tag == .box) {
// Verify alignment before @alignCast for usize
if (comptime builtin.mode == .Debug) {
const box_src_ptr_val = @intFromPtr(self.ptr.?);
const box_dest_ptr_val = @intFromPtr(dest_ptr);
if (box_src_ptr_val % @alignOf(usize) != 0) {
std.debug.panic("[copyToPtr box] src alignment error: ptr=0x{x} not {}-byte aligned", .{ box_src_ptr_val, @alignOf(usize) });
}
if (box_dest_ptr_val % @alignOf(usize) != 0) {
std.debug.panic("[copyToPtr box] dest alignment error: ptr=0x{x} not {}-byte aligned", .{ box_dest_ptr_val, @alignOf(usize) });
}
}
const src_slot: *usize = @ptrCast(@alignCast(self.ptr.?));
const dest_slot: *usize = @ptrCast(@alignCast(dest_ptr));
dest_slot.* = src_slot.*;
@ -340,6 +443,13 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
}
if (self.layout.tag == .box_of_zst) {
// Verify alignment before @alignCast for usize
if (comptime builtin.mode == .Debug) {
const box_zst_dest_ptr_val = @intFromPtr(dest_ptr);
if (box_zst_dest_ptr_val % @alignOf(usize) != 0) {
std.debug.panic("[copyToPtr box_of_zst] dest alignment error: ptr=0x{x} not {}-byte aligned", .{ box_zst_dest_ptr_val, @alignOf(usize) });
}
}
const dest_slot: *usize = @ptrCast(@alignCast(dest_ptr));
dest_slot.* = 0;
return;
@ -348,6 +458,17 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
if (self.layout.tag == .list) {
// Copy the list header and incref the underlying data
std.debug.assert(self.ptr != null);
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const copyToPtr_src_ptr_int = @intFromPtr(self.ptr.?);
if (copyToPtr_src_ptr_int % @alignOf(builtins.list.RocList) != 0) {
std.debug.panic("copyToPtr(list): self.ptr=0x{x} is not {}-byte aligned", .{ copyToPtr_src_ptr_int, @alignOf(builtins.list.RocList) });
}
const copyToPtr_dest_ptr_int = @intFromPtr(dest_ptr);
if (copyToPtr_dest_ptr_int % @alignOf(builtins.list.RocList) != 0) {
std.debug.panic("copyToPtr(list): dest_ptr=0x{x} is not {}-byte aligned", .{ copyToPtr_dest_ptr_int, @alignOf(builtins.list.RocList) });
}
}
const src_list: *const builtins.list.RocList = @ptrCast(@alignCast(self.ptr.?));
const dest_list: *builtins.list.RocList = @ptrCast(@alignCast(dest_ptr));
dest_list.* = src_list.*;
@ -362,7 +483,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
if (src_list.getAllocationDataPtr()) |alloc_ptr| {
if (comptime trace_refcount) {
const rc_before: isize = blk: {
if (@intFromPtr(alloc_ptr) % 8 != 0) break :blk -999;
if (@intFromPtr(alloc_ptr) % @alignOf(usize) != 0) break :blk -999;
const isizes: [*]isize = @ptrCast(@alignCast(alloc_ptr));
break :blk (isizes - 1)[0];
};
@ -383,6 +504,17 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
if (self.layout.tag == .list_of_zst) {
// Copy the list header for ZST lists - no refcounting needed for ZSTs
std.debug.assert(self.ptr != null);
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const copyToPtr_zst_src_ptr_int = @intFromPtr(self.ptr.?);
if (copyToPtr_zst_src_ptr_int % @alignOf(builtins.list.RocList) != 0) {
std.debug.panic("copyToPtr(list_of_zst): self.ptr=0x{x} is not {}-byte aligned", .{ copyToPtr_zst_src_ptr_int, @alignOf(builtins.list.RocList) });
}
const copyToPtr_zst_dest_ptr_int = @intFromPtr(dest_ptr);
if (copyToPtr_zst_dest_ptr_int % @alignOf(builtins.list.RocList) != 0) {
std.debug.panic("copyToPtr(list_of_zst): dest_ptr=0x{x} is not {}-byte aligned", .{ copyToPtr_zst_dest_ptr_int, @alignOf(builtins.list.RocList) });
}
}
const src_list: *const builtins.list.RocList = @ptrCast(@alignCast(self.ptr.?));
const dest_list: *builtins.list.RocList = @ptrCast(@alignCast(dest_ptr));
dest_list.* = src_list.*;
@ -500,10 +632,17 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa
// Read discriminant to determine active variant
const disc_ptr = base_ptr + tu_data.discriminant_offset;
const disc_ptr_val = @intFromPtr(disc_ptr);
const discriminant: u32 = switch (tu_data.discriminant_size) {
1 => @as(*const u8, @ptrCast(disc_ptr)).*,
2 => @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*,
4 => @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*,
2 => blk: {
if (disc_ptr_val % @alignOf(u16) != 0) std.debug.panic("[copyToPtr tag_union] u16 disc alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*;
},
4 => blk: {
if (disc_ptr_val % @alignOf(u32) != 0) std.debug.panic("[copyToPtr tag_union] u32 disc alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*;
},
else => unreachable,
};
@ -569,24 +708,30 @@ pub fn asI128(self: StackValue) i128 {
std.debug.assert(self.layout.tag == .scalar and self.layout.data.scalar.tag == .int);
const precision = self.layout.data.scalar.data.int;
const ptr_val = @intFromPtr(self.ptr.?);
return switch (precision) {
.u8 => blk: {
const typed_ptr = @as(*const u8, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.u16 => blk: {
if (ptr_val % @alignOf(u16) != 0) std.debug.panic("[asI128] u16 alignment error: ptr=0x{x} is not 2-byte aligned", .{ptr_val});
const typed_ptr = @as(*const u16, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.u32 => blk: {
if (ptr_val % @alignOf(u32) != 0) std.debug.panic("[asI128] u32 alignment error: ptr=0x{x} is not 4-byte aligned", .{ptr_val});
const typed_ptr = @as(*const u32, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.u64 => blk: {
if (ptr_val % @alignOf(u64) != 0) std.debug.panic("[asI128] u64 alignment error: ptr=0x{x} is not 8-byte aligned", .{ptr_val});
const typed_ptr = @as(*const u64, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.u128 => blk: {
if (ptr_val % @alignOf(u128) != 0) std.debug.panic("[asI128] u128 alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
const typed_ptr = @as(*const u128, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, @intCast(typed_ptr.*));
},
@ -595,18 +740,22 @@ pub fn asI128(self: StackValue) i128 {
break :blk @as(i128, typed_ptr.*);
},
.i16 => blk: {
if (ptr_val % @alignOf(i16) != 0) std.debug.panic("[asI128] i16 alignment error: ptr=0x{x} is not 2-byte aligned", .{ptr_val});
const typed_ptr = @as(*const i16, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.i32 => blk: {
if (ptr_val % @alignOf(i32) != 0) std.debug.panic("[asI128] i32 alignment error: ptr=0x{x} is not 4-byte aligned", .{ptr_val});
const typed_ptr = @as(*const i32, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.i64 => blk: {
if (ptr_val % @alignOf(i64) != 0) std.debug.panic("[asI128] i64 alignment error: ptr=0x{x} is not 8-byte aligned", .{ptr_val});
const typed_ptr = @as(*const i64, @ptrCast(@alignCast(self.ptr.?)));
break :blk @as(i128, typed_ptr.*);
},
.i128 => blk: {
if (ptr_val % @alignOf(i128) != 0) std.debug.panic("[asI128] i128 alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
const typed_ptr = @as(*const i128, @ptrCast(@alignCast(self.ptr.?)));
break :blk typed_ptr.*;
},
@ -629,6 +778,7 @@ pub fn setInt(self: *StackValue, value: i128) error{IntegerOverflow}!void {
std.debug.assert(!self.is_initialized);
const precision = self.layout.data.scalar.data.int;
const ptr_val = @intFromPtr(self.ptr.?);
// Inline integer writing logic with proper type casting and alignment
// Use std.math.cast to safely check if value fits, returning error instead of panicking
@ -638,18 +788,22 @@ pub fn setInt(self: *StackValue, value: i128) error{IntegerOverflow}!void {
typed_ptr.* = std.math.cast(u8, value) orelse return error.IntegerOverflow;
},
.u16 => {
if (ptr_val % @alignOf(u16) != 0) std.debug.panic("[setInt] u16 alignment error: ptr=0x{x} is not 2-byte aligned", .{ptr_val});
const typed_ptr: *u16 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(u16, value) orelse return error.IntegerOverflow;
},
.u32 => {
if (ptr_val % @alignOf(u32) != 0) std.debug.panic("[setInt] u32 alignment error: ptr=0x{x} is not 4-byte aligned", .{ptr_val});
const typed_ptr: *u32 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(u32, value) orelse return error.IntegerOverflow;
},
.u64 => {
if (ptr_val % @alignOf(u64) != 0) std.debug.panic("[setInt] u64 alignment error: ptr=0x{x} is not 8-byte aligned", .{ptr_val});
const typed_ptr: *u64 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(u64, value) orelse return error.IntegerOverflow;
},
.u128 => {
if (ptr_val % @alignOf(u128) != 0) std.debug.panic("[setInt] u128 alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
const typed_ptr: *u128 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(u128, value) orelse return error.IntegerOverflow;
},
@ -658,18 +812,22 @@ pub fn setInt(self: *StackValue, value: i128) error{IntegerOverflow}!void {
typed_ptr.* = std.math.cast(i8, value) orelse return error.IntegerOverflow;
},
.i16 => {
if (ptr_val % @alignOf(i16) != 0) std.debug.panic("[setInt] i16 alignment error: ptr=0x{x} is not 2-byte aligned", .{ptr_val});
const typed_ptr: *i16 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(i16, value) orelse return error.IntegerOverflow;
},
.i32 => {
if (ptr_val % @alignOf(i32) != 0) std.debug.panic("[setInt] i32 alignment error: ptr=0x{x} is not 4-byte aligned", .{ptr_val});
const typed_ptr: *i32 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(i32, value) orelse return error.IntegerOverflow;
},
.i64 => {
if (ptr_val % @alignOf(i64) != 0) std.debug.panic("[setInt] i64 alignment error: ptr=0x{x} is not 8-byte aligned", .{ptr_val});
const typed_ptr: *i64 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = std.math.cast(i64, value) orelse return error.IntegerOverflow;
},
.i128 => {
if (ptr_val % @alignOf(i128) != 0) std.debug.panic("[setInt] i128 alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
const typed_ptr: *i128 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = value;
},
@ -711,6 +869,8 @@ pub fn setIntFromBytes(self: *StackValue, bytes: [16]u8, is_u128: bool) error{In
typed_ptr.* = std.math.cast(u64, u128_value) orelse return error.IntegerOverflow;
},
.u128 => {
const u128_ptr_val = @intFromPtr(self.ptr.?);
if (u128_ptr_val % @alignOf(u128) != 0) std.debug.panic("[setIntFromBytes] u128 alignment error: ptr=0x{x} is not 16-byte aligned", .{u128_ptr_val});
const typed_ptr: *u128 = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = u128_value;
},
@ -786,6 +946,9 @@ pub fn asDec(self: StackValue) RocDec {
std.debug.assert(self.layout.tag == .scalar and self.layout.data.scalar.tag == .frac);
std.debug.assert(self.layout.data.scalar.data.frac == .dec);
// RocDec contains i128 which requires 16-byte alignment
const ptr_val = @intFromPtr(self.ptr.?);
if (ptr_val % @alignOf(i128) != 0) std.debug.panic("[asDec] alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
const typed_ptr = @as(*const RocDec, @ptrCast(@alignCast(self.ptr.?)));
return typed_ptr.*;
}
@ -842,6 +1005,10 @@ pub fn setDec(self: *StackValue, value: RocDec) void {
// Avoid accidental overwrite, manually toggle this if updating an already initialized value
std.debug.assert(!self.is_initialized);
// RocDec contains i128 which requires 16-byte alignment
const ptr_val = @intFromPtr(self.ptr.?);
if (ptr_val % @alignOf(i128) != 0) std.debug.panic("[setDec] alignment error: ptr=0x{x} is not 16-byte aligned", .{ptr_val});
// Write the Dec value
const typed_ptr: *RocDec = @ptrCast(@alignCast(self.ptr.?));
typed_ptr.* = value;
@ -967,11 +1134,21 @@ pub const TagUnionAccessor = struct {
pub fn getDiscriminant(self: TagUnionAccessor) usize {
const base_ptr: [*]u8 = @ptrCast(self.base_value.ptr.?);
const disc_ptr = base_ptr + self.tu_data.discriminant_offset;
const disc_ptr_val = @intFromPtr(disc_ptr);
return switch (self.tu_data.discriminant_size) {
1 => @as(*const u8, @ptrCast(disc_ptr)).*,
2 => @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*,
4 => @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*,
8 => @intCast(@as(*const u64, @ptrCast(@alignCast(disc_ptr))).*),
2 => blk: {
if (disc_ptr_val % @alignOf(u16) != 0) std.debug.panic("[getDiscriminant] u16 alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*;
},
4 => blk: {
if (disc_ptr_val % @alignOf(u32) != 0) std.debug.panic("[getDiscriminant] u32 alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*;
},
8 => blk: {
if (disc_ptr_val % @alignOf(u64) != 0) std.debug.panic("[getDiscriminant] u64 alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @intCast(@as(*const u64, @ptrCast(@alignCast(disc_ptr))).*);
},
else => 0,
};
}
@ -1010,6 +1187,13 @@ pub fn asList(self: StackValue, layout_cache: *LayoutStore, element_layout: Layo
std.debug.assert(self.ptr != null);
std.debug.assert(self.layout.tag == .list or self.layout.tag == .list_of_zst);
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const ptr_int = @intFromPtr(self.ptr.?);
if (ptr_int % @alignOf(RocList) != 0) {
std.debug.panic("asList: self.ptr=0x{x} is not {}-byte aligned", .{ ptr_int, @alignOf(RocList) });
}
}
const header: *const RocList = @ptrCast(@alignCast(self.ptr.?));
return ListAccessor{
.base_value = self,
@ -1062,6 +1246,13 @@ pub const ListAccessor = struct {
fn storeListElementCount(list: *RocList, elements_refcounted: bool) void {
if (elements_refcounted and !list.isSeamlessSlice()) {
if (list.getAllocationDataPtr()) |source| {
// Verify alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const source_int = @intFromPtr(source);
if (source_int % @alignOf(usize) != 0) {
std.debug.panic("storeListElementCount: source=0x{x} is not {}-byte aligned", .{ source_int, @alignOf(usize) });
}
}
const ptr = @as([*]usize, @ptrCast(@alignCast(source))) - 2;
ptr[0] = list.length;
}
@ -1074,6 +1265,13 @@ fn copyListValueToPtr(
dest_ptr: *anyopaque,
dest_layout: Layout,
) error{ TypeMismatch, NullStackPointer }!void {
// Verify dest_ptr alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const dest_ptr_int = @intFromPtr(dest_ptr);
if (dest_ptr_int % @alignOf(RocList) != 0) {
std.debug.panic("copyListValueToPtr: dest_ptr=0x{x} is not {}-byte aligned", .{ dest_ptr_int, @alignOf(RocList) });
}
}
var dest_list: *RocList = @ptrCast(@alignCast(dest_ptr));
switch (dest_layout.tag) {
@ -1083,6 +1281,13 @@ fn copyListValueToPtr(
dest_list.* = RocList.empty();
return;
}
// Verify src.ptr alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const src_ptr_int_zst = @intFromPtr(src.ptr.?);
if (src_ptr_int_zst % @alignOf(RocList) != 0) {
std.debug.panic("copyListValueToPtr(list_of_zst): src.ptr=0x{x} is not {}-byte aligned", .{ src_ptr_int_zst, @alignOf(RocList) });
}
}
const src_list = @as(*const RocList, @ptrCast(@alignCast(src.ptr.?))).*;
dest_list.* = src_list;
dest_list.incref(1, false);
@ -1094,6 +1299,13 @@ fn copyListValueToPtr(
return;
}
if (src.layout.tag != .list) return error.TypeMismatch;
// Verify src.ptr alignment before @alignCast
if (comptime builtin.mode == .Debug) {
const src_ptr_int_list = @intFromPtr(src.ptr.?);
if (src_ptr_int_list % @alignOf(RocList) != 0) {
std.debug.panic("copyListValueToPtr(list): src.ptr=0x{x} is not {}-byte aligned", .{ src_ptr_int_list, @alignOf(RocList) });
}
}
const src_list = @as(*const RocList, @ptrCast(@alignCast(src.ptr.?))).*;
dest_list.* = src_list;
@ -1244,6 +1456,11 @@ pub fn asRocStr(self: StackValue) *RocStr {
pub fn asClosure(self: StackValue) *const Closure {
std.debug.assert(self.layout.tag == .closure);
std.debug.assert(self.ptr != null);
const ptr_val = @intFromPtr(self.ptr.?);
const required_align = @alignOf(Closure);
if (ptr_val % required_align != 0) {
std.debug.panic("[asClosure] ALIGNMENT MISMATCH: ptr=0x{x} required_align={} (mod={})", .{ ptr_val, required_align, ptr_val % required_align });
}
return @ptrCast(@alignCast(self.ptr.?));
}
@ -1279,7 +1496,7 @@ pub fn copyTo(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) vo
if (!src_str.isSmallStr()) {
const alloc_ptr = src_str.getAllocationPtr();
const rc_before: isize = if (alloc_ptr) |ptr| blk: {
if (@intFromPtr(ptr) % 8 != 0) break :blk -999;
if (@intFromPtr(ptr) % @alignOf(usize) != 0) break :blk -999;
const isizes: [*]isize = @ptrCast(@alignCast(ptr));
break :blk (isizes - 1)[0];
} else 0;
@ -1387,7 +1604,7 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
} else {
const alloc_ptr = roc_str.getAllocationPtr();
const rc_before: isize = if (alloc_ptr) |ptr| blk: {
if (@intFromPtr(ptr) % 8 != 0) {
if (@intFromPtr(ptr) % @alignOf(usize) != 0) {
traceRefcount("INCREF str ptr=0x{x} MISALIGNED!", .{@intFromPtr(ptr)});
break :blk -999;
}
@ -1449,10 +1666,17 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
// Read discriminant to determine active variant
const disc_ptr = base_ptr + tu_data.discriminant_offset;
const disc_ptr_val = @intFromPtr(disc_ptr);
const discriminant: u32 = switch (tu_data.discriminant_size) {
1 => @as(*const u8, @ptrCast(disc_ptr)).*,
2 => @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*,
4 => @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*,
2 => blk: {
if (disc_ptr_val % @alignOf(u16) != 0) std.debug.panic("[copyToPtr tag_union] u16 disc alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u16, @ptrCast(@alignCast(disc_ptr))).*;
},
4 => blk: {
if (disc_ptr_val % @alignOf(u32) != 0) std.debug.panic("[copyToPtr tag_union] u32 disc alignment error: disc_ptr=0x{x}", .{disc_ptr_val});
break :blk @as(*const u32, @ptrCast(@alignCast(disc_ptr))).*;
},
else => unreachable,
};
@ -1469,6 +1693,32 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void {
increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache);
return;
}
// Handle closures by incref'ing their captures (symmetric with decref)
if (self.layout.tag == .closure) {
if (self.ptr == null) return;
const closure_header: *const layout_mod.Closure = @ptrCast(@alignCast(self.ptr.?));
const captures_layout = layout_cache.getLayout(closure_header.captures_layout_idx);
// Only incref if there are actual captures (record with fields)
if (captures_layout.tag == .record) {
const record_data = layout_cache.getRecordData(captures_layout.data.record.idx);
if (record_data.fields.count > 0) {
if (comptime trace_refcount) {
traceRefcount("INCREF closure captures ptr=0x{x} fields={}", .{
@intFromPtr(self.ptr),
record_data.fields.count,
});
}
const header_size = @sizeOf(layout_mod.Closure);
const cap_align = captures_layout.alignment(layout_cache.targetUsize());
const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits()));
const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?));
const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off);
increfLayoutPtr(captures_layout, rec_ptr, layout_cache);
}
}
return;
}
}
/// Trace helper for refcount operations. Only active when built with -Dtrace-refcount=true.
@ -1511,7 +1761,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
const alloc_ptr = roc_str.getAllocationPtr();
// Only read refcount if pointer is aligned (safety check)
const rc_before: isize = if (alloc_ptr) |ptr| blk: {
if (@intFromPtr(ptr) % 8 != 0) {
if (@intFromPtr(ptr) % @alignOf(usize) != 0) {
traceRefcount("DECREF str ptr=0x{x} MISALIGNED!", .{@intFromPtr(ptr)});
break :blk -999;
}
@ -1653,6 +1903,9 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void {
},
.closure => {
decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops);
if (comptime trace_refcount) {
traceRefcount("DECREF closure DONE ptr=0x{x}", .{@intFromPtr(self.ptr)});
}
return;
},
.tag_union => {

File diff suppressed because it is too large Load diff

View file

@ -25,6 +25,13 @@ pub const StackOverflow = error{
StackOverflow,
};
fn assertAligned(ptr: anytype, alignment: usize, context: []const u8) void {
const addr = @intFromPtr(ptr);
if (addr % alignment != 0) {
std.debug.panic("{s}: ptr 0x{x} not {}-byte aligned", .{ context, addr, alignment });
}
}
/// Fixed-size stack memory allocator to be used when evaluating Roc IR
pub const Stack = struct {
allocator: std.mem.Allocator,
@ -51,6 +58,7 @@ pub const Stack = struct {
collections.max_roc_alignment,
@returnAddress(),
)) |allocation| {
assertAligned(allocation, collections.max_roc_alignment.toByteUnits(), "Stack.initCapacity");
return .{
.allocator = allocator,
.start = allocation,
@ -104,6 +112,7 @@ pub const Stack = struct {
const result = self.start + self.used + padding;
self.used = new_used;
assertAligned(result, alignment_bytes, "Stack.alloca");
return @ptrCast(result);
}

View file

@ -1181,7 +1181,6 @@ test "comptime eval - U8 valid max value" {
defer cleanupEvalModule(&result);
_ = try result.evaluator.evalAll();
// Debug: print any problems
if (result.problems.len() > 0) {
std.debug.print("\nU8 valid max problems ({d}):\n", .{result.problems.len()});
for (result.problems.problems.items) |problem| {
@ -1683,7 +1682,6 @@ test "comptime eval - F32 valid" {
defer cleanupEvalModule(&result);
_ = try result.evaluator.evalAll();
// Debug: print any problems
if (result.problems.len() > 0) {
std.debug.print("\nF32 problems ({d}):\n", .{result.problems.len()});
for (result.problems.problems.items) |problem| {

View file

@ -142,6 +142,15 @@ test "operator associativity - subtraction" {
try runExpectInt("100 - (50 - (25 - 5))", 70, .no_trace); // Right associative would give 70
}
test "operator associativity - mixed addition and subtraction" {
// Regression test: + and - should have equal precedence and be left-associative
// Previously + had higher precedence than -, causing 1 - 2 + 3 to parse as 1 - (2 + 3) = -4
try runExpectInt("1 - 2 + 3", 2, .no_trace); // (1 - 2) + 3 = 2, NOT 1 - (2 + 3) = -4
try runExpectInt("5 + 3 - 2", 6, .no_trace); // (5 + 3) - 2 = 6
try runExpectInt("10 - 5 + 3 - 2", 6, .no_trace); // ((10 - 5) + 3) - 2 = 6
try runExpectInt("1 + 2 - 3 + 4 - 5", -1, .no_trace); // (((1 + 2) - 3) + 4) - 5 = -1
}
test "operator associativity - multiplication" {
// Left associative: a * b * c should parse as (a * b) * c
try runExpectInt("2 * 3 * 4", 24, .no_trace); // (2 * 3) * 4 = 24
@ -1358,21 +1367,15 @@ test "nested match with Result type - regression" {
// ============================================================================
test "list equality - single element list - regression" {
// Regression test for segfault when comparing single element lists
// Bug report: `main! = || { _bool = [1] == [1] }`
try runExpectBool("[1] == [1]", true, .no_trace);
}
test "list equality - nested lists - regression" {
// Regression test for segfault when comparing nested lists
// Bug report: `_bool = [[1],[2]] == [[1],[2]]`
try runExpectBool("[[1],[2]] == [[1],[2]]", true, .no_trace);
try runExpectBool("[[1, 2]] == [[1, 2]]", true, .no_trace);
}
test "list equality - single string element list - regression" {
// Regression test for crash trying to compare numeric scalars instead of string scalars
// Bug report: `main! = || { _bool = [""] == [""] }`
try runExpectBool("[\"\"] == [\"\"]", true, .no_trace);
try runExpectBool("[\"hello\"] == [\"hello\"]", true, .no_trace);
}
test "if block with local bindings - regression" {

View file

@ -94,6 +94,7 @@ pub fn runExpectInt(src: []const u8, expected_int: i128, should_trace: enum { tr
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
// Check if this is an integer or Dec
const int_value = if (result.layout.tag == .scalar and result.layout.data.scalar.tag == .int) blk: {
@ -132,6 +133,7 @@ pub fn runExpectBool(src: []const u8, expected_bool: bool, should_trace: enum {
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
// For boolean results, read the underlying byte value
if (result.layout.tag == .scalar and result.layout.data.scalar.tag == .int) {
@ -171,6 +173,7 @@ pub fn runExpectF32(src: []const u8, expected_f32: f32, should_trace: enum { tra
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
const actual = result.asF32();
const epsilon: f32 = 0.0001;
@ -204,6 +207,7 @@ pub fn runExpectF64(src: []const u8, expected_f64: f64, should_trace: enum { tra
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
const actual = result.asF64();
const epsilon: f64 = 0.000000001;
@ -239,6 +243,7 @@ pub fn runExpectDec(src: []const u8, expected_dec_num: i128, should_trace: enum
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
const actual_dec = result.asDec();
if (actual_dec.num != expected_dec_num) {
@ -269,6 +274,7 @@ pub fn runExpectStr(src: []const u8, expected_str: []const u8, should_trace: enu
const ops = test_env_instance.get_ops();
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer interpreter.cleanupBindings(ops);
try std.testing.expect(result.layout.tag == .scalar);
try std.testing.expect(result.layout.data.scalar.tag == .str);
@ -320,6 +326,7 @@ pub fn runExpectTuple(src: []const u8, expected_elements: []const ExpectedElemen
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
// Verify we got a tuple layout
try std.testing.expect(result.layout.tag == .tuple);
@ -372,6 +379,7 @@ pub fn runExpectRecord(src: []const u8, expected_fields: []const ExpectedField,
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
// Verify we got a record layout
try std.testing.expect(result.layout.tag == .record);
@ -441,6 +449,7 @@ pub fn runExpectListI64(src: []const u8, expected_elements: []const i64, should_
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
// Verify we got a list layout
try std.testing.expect(result.layout.tag == .list or result.layout.tag == .list_of_zst);
@ -751,6 +760,7 @@ test "eval tag - already primitive" {
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
try std.testing.expect(result.layout.tag == .scalar);
try std.testing.expect(result.ptr != null);
@ -783,6 +793,7 @@ test "interpreter reuse across multiple evaluations" {
const result = try interpreter.eval(resources.expr_idx, ops);
const layout_cache = &interpreter.runtime_layout_store;
defer result.decref(layout_cache, ops);
defer interpreter.cleanupBindings(ops);
try std.testing.expect(result.layout.tag == .scalar);

View file

@ -52,12 +52,12 @@ test "list refcount complex - same record multiple times in list" {
test "list refcount complex - list of records with nested data" {
try runExpectInt(
\\{
\\ r1 = {nums: [1, 2]}
\\ r2 = {nums: [3, 4]}
\\ r1 = {inner: {val: 10}}
\\ r2 = {inner: {val: 20}}
\\ lst = [r1, r2]
\\ match lst { [first, ..] => match first.nums { [a, b] => a + b, _ => 0 }, _ => 0 }
\\ match lst { [first, ..] => first.inner.val, _ => 0 }
\\}
, 3, .no_trace);
, 10, .no_trace);
}
// ===== Lists of Tuples =====
@ -105,32 +105,32 @@ test "list refcount complex - list of tags with strings" {
test "list refcount complex - list of records of lists of strings" {
try runExpectStr(
\\{
\\ r1 = {words: ["a", "b"]}
\\ r2 = {words: ["c"]}
\\ r1 = {items: ["a", "b"]}
\\ r2 = {items: ["c", "d"]}
\\ lst = [r1, r2]
\\ match lst { [first, ..] => match first.words { [s, ..] => s, _ => "" }, _ => "" }
\\ match lst { [first, ..] => match first.items { [s, ..] => s, _ => "" }, _ => "" }
\\}
, "a", .no_trace);
}
test "list refcount complex - inline complex structure" {
try runExpectInt(
\\match [{val: [1, 2]}, {val: [3, 4]}] {
\\ [first, ..] => match first.val { [a, b] => a + b, _ => 0 },
\\ _ => 0
\\{
\\ data = [{val: 1}, {val: 2}]
\\ match data { [first, ..] => first.val, _ => 0 }
\\}
, 3, .no_trace);
, 1, .no_trace);
}
test "list refcount complex - deeply nested mixed structures" {
try runExpectStr(
try runExpectInt(
\\{
\\ inner = ["x"]
\\ rec = {data: inner}
\\ lst = [rec, rec]
\\ match lst { [first, ..] => match first.data { [s] => s, _ => "" }, _ => "" }
\\ inner = {x: 42}
\\ outer = {nested: inner}
\\ lst = [outer]
\\ match lst { [first, ..] => first.nested.x, _ => 0 }
\\}
, "x", .no_trace);
, 42, .no_trace);
}
test "list refcount complex - list of Ok/Err tags" {

View file

@ -61,11 +61,11 @@ test "list refcount containers - tuple with string list" {
test "list refcount containers - single field record with list" {
try runExpectInt(
\\{
\\ x = [1, 2]
\\ r = {lst: x}
\\ match r.lst { [a, b] => a + b, _ => 0 }
\\ lst = [1, 2, 3]
\\ r = {items: lst}
\\ match r.items { [a, b, c] => a + b + c, _ => 0 }
\\}
, 3, .no_trace);
, 6, .no_trace);
}
test "list refcount containers - multiple fields with lists" {
@ -73,38 +73,39 @@ test "list refcount containers - multiple fields with lists" {
\\{
\\ x = [1, 2]
\\ y = [3, 4]
\\ r = {a: x, b: y}
\\ match r.a { [first, ..] => first, _ => 0 }
\\ r = {first: x, second: y}
\\ match r.first { [a, b] => a + b, _ => 0 }
\\}
, 1, .no_trace);
, 3, .no_trace);
}
test "list refcount containers - same list in multiple fields" {
try runExpectInt(
\\{
\\ x = [1, 2]
\\ r = {a: x, b: x}
\\ match r.a { [first, ..] => first, _ => 0 }
\\ lst = [10, 20]
\\ r = {a: lst, b: lst}
\\ match r.a { [x, y] => x + y, _ => 0 }
\\}
, 1, .no_trace);
, 30, .no_trace);
}
test "list refcount containers - nested record with list" {
try runExpectInt(
\\{
\\ x = [1, 2]
\\ r = {inner: {lst: x}}
\\ match r.inner.lst { [a, b] => a + b, _ => 0 }
\\ lst = [5, 6]
\\ inner = {data: lst}
\\ outer = {nested: inner}
\\ match outer.nested.data { [a, b] => a + b, _ => 0 }
\\}
, 3, .no_trace);
, 11, .no_trace);
}
test "list refcount containers - record with string list" {
try runExpectStr(
\\{
\\ x = ["hello", "world"]
\\ r = {words: x}
\\ match r.words { [first, ..] => first, _ => "" }
\\ lst = ["hello", "world"]
\\ r = {items: lst}
\\ match r.items { [first, ..] => first, _ => "" }
\\}
, "hello", .no_trace);
}
@ -112,11 +113,11 @@ test "list refcount containers - record with string list" {
test "list refcount containers - record with mixed types" {
try runExpectInt(
\\{
\\ lst = [10, 20]
\\ r = {numbers: lst, name: "test"}
\\ match r.numbers { [a, b] => a + b, _ => 0 }
\\ lst = [1, 2, 3]
\\ r = {count: 42, items: lst}
\\ r.count
\\}
, 30, .no_trace);
, 42, .no_trace);
}
// ===== Tags with Lists =====
@ -160,10 +161,10 @@ test "list refcount containers - tuple of records with lists" {
\\{
\\ lst1 = [1, 2]
\\ lst2 = [3, 4]
\\ r1 = {data: lst1}
\\ r2 = {data: lst2}
\\ r1 = {items: lst1}
\\ r2 = {items: lst2}
\\ t = (r1, r2)
\\ match t { (first, _) => match first.data { [a, b] => a + b, _ => 0 }, _ => 0 }
\\ match t { (first, _) => match first.items { [a, b] => a + b, _ => 0 }, _ => 0 }
\\}
, 3, .no_trace);
}
@ -171,21 +172,21 @@ test "list refcount containers - tuple of records with lists" {
test "list refcount containers - record of tuples with lists" {
try runExpectInt(
\\{
\\ lst = [10, 20]
\\ tup = (lst, lst)
\\ r = {pair: tup}
\\ match r.pair { (first, _) => match first { [a, b] => a + b, _ => 0 }, _ => 0 }
\\ lst = [5, 6]
\\ t = (lst, 99)
\\ r = {data: t}
\\ match r.data { (items, _) => match items { [a, b] => a + b, _ => 0 }, _ => 0 }
\\}
, 30, .no_trace);
, 11, .no_trace);
}
test "list refcount containers - tag with record containing list" {
try runExpectInt(
\\{
\\ lst = [5, 10]
\\ r = {values: lst}
\\ tag = Data(r)
\\ match tag { Data(rec) => match rec.values { [a, b] => a + b, _ => 0 }, _ => 0 }
\\ lst = [7, 8]
\\ r = {items: lst}
\\ tag = Some(r)
\\ match tag { Some(rec) => match rec.items { [a, b] => a + b, _ => 0 }, None => 0 }
\\}
, 15, .no_trace);
}

View file

@ -96,3 +96,32 @@ test "list refcount function - nested function calls with lists" {
\\}
, 10, .no_trace);
}
test "list refcount function - same list twice in tuple returned from function" {
// This tests the exact pattern that causes the segfault in fx platform tests:
// A function that takes a list and returns a tuple containing that list twice.
// When the tuple is destructured and the first element is used, it should work.
try runExpectInt(
\\{
\\ make_pair = |lst| (lst, lst)
\\ x = [1, 2]
\\ t = make_pair(x)
\\ match t { (first, _) => match first { [a, b] => a + b, _ => 0 }, _ => 0 }
\\}
, 3, .no_trace);
}
test "list refcount function - same list twice passed to function" {
// Tests passing the same list twice as arguments to a function
try runExpectInt(
\\{
\\ add_lens = |a, b|
\\ match a {
\\ [first, ..] => match b { [second, ..] => first + second, _ => 0 },
\\ _ => 0
\\ }
\\ x = [1, 2]
\\ add_lens(x, x)
\\}
, 2, .no_trace);
}

View file

@ -1,6 +1,6 @@
//! List refcounting tests - Phase 9: Nested Lists
//!
//! CRITICAL PHASE: Lists within lists create recursive refcounting!
//! Lists within lists create recursive refcounting.
//!
//! This tests the most complex refcounting scenario:
//! - Outer list container refcount

View file

@ -1,6 +1,6 @@
//! List refcounting tests - Phase 4: Lists with Refcounted Elements (Strings)
//!
//! CRITICAL PHASE: This introduces two-level refcounting!
//! This phase introduces two-level refcounting:
//! - List container must be refcounted
//! - String elements must be refcounted
//!

View file

@ -3,10 +3,12 @@
//! memory safety, and interpreter integration.
const std = @import("std");
const builtin = @import("builtin");
const builtins = @import("builtins");
const base = @import("base");
const can = @import("can");
const types = @import("types");
const collections = @import("collections");
const import_mapping_mod = types.import_mapping;
const eval = @import("eval");
const ipc = @import("ipc");
@ -16,6 +18,13 @@ var shim_import_mapping = import_mapping_mod.ImportMapping.init(std.heap.page_al
const SharedMemoryAllocator = ipc.SharedMemoryAllocator;
// Global base pointer for the serialized header + env.
// Is a weak extern that can be overwritten by `roc build` when embedding module data.
// If null at runtime, we're in IPC mode (roc run) and read from shared memory.
// If non-null, we're in embedded mode (roc build) and data is compiled into the binary.
extern var roc__serialized_base_ptr: ?[*]align(1) u8;
extern var roc__serialized_size: usize;
// Global state for shared memory - initialized once per process
var shared_memory_initialized: std.atomic.Value(bool) = std.atomic.Value(bool).init(false);
var global_shm: ?SharedMemoryAllocator = null;
@ -35,6 +44,8 @@ const FIRST_ALLOC_OFFSET = 504; // 0x1f8 - First allocation starts at this offse
const MODULE_ENV_OFFSET = 0x10; // 8 bytes for u64, 4 bytes for u32, 4 bytes padding
// Header structure that matches the one in main.zig (multi-module format)
// For embedded mode: parent_base_addr == 0
// For IPC mode: parent_base_addr == actual parent address
const Header = struct {
parent_base_addr: u64,
module_count: u32,
@ -70,17 +81,17 @@ const ShimError = error{
export fn roc_entrypoint(entry_idx: u32, ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) callconv(.c) void {
evaluateFromSharedMemory(entry_idx, ops, ret_ptr, arg_ptr) catch |err| {
// Only show this generic error if we haven't already crashed with a more specific message
// (errors like Crash already triggered roc_crashed with details)
if (err != error.Crash) {
// (errors like Crash and StackOverflow already triggered roc_crashed with details)
if (err != error.Crash and err != error.StackOverflow) {
var buf: [256]u8 = undefined;
const msg2 = std.fmt.bufPrint(&buf, "Error evaluating from shared memory: {s}", .{@errorName(err)}) catch "Error evaluating from shared memory";
const msg2 = std.fmt.bufPrint(&buf, "Error evaluating: {s}", .{@errorName(err)}) catch "Error evaluating";
ops.crash(msg2);
}
};
}
/// Initialize shared memory and ModuleEnv once per process
fn initializeSharedMemoryOnce(roc_ops: *RocOps) ShimError!void {
fn initializeOnce(roc_ops: *RocOps) ShimError!void {
// Fast path: if already initialized, return immediately
if (shared_memory_initialized.load(.acquire)) {
return;
@ -98,18 +109,35 @@ fn initializeSharedMemoryOnce(roc_ops: *RocOps) ShimError!void {
const allocator = std.heap.page_allocator;
var buf: [256]u8 = undefined;
// Get page size
const page_size = SharedMemoryAllocator.getSystemPageSize() catch 4096;
if (roc__serialized_base_ptr == null) {
// Roc run path: Use the shared memory allocator.
// Create shared memory allocator from coordination info
var shm = SharedMemoryAllocator.fromCoordination(allocator, page_size) catch |err| {
const msg2 = std.fmt.bufPrint(&buf, "Failed to create shared memory allocator: {s}", .{@errorName(err)}) catch "Failed to create shared memory allocator";
roc_ops.crash(msg2);
return error.SharedMemoryError;
};
// Get page size
const page_size = SharedMemoryAllocator.getSystemPageSize() catch 4096;
// Set up ModuleEnv from shared memory
const setup_result = try setupModuleEnv(&shm, roc_ops);
// Create shared memory allocator from coordination info
// Note shm last the lifetime of the program and is never freed.
var shm = SharedMemoryAllocator.fromCoordination(allocator, page_size) catch |err| {
const msg2 = std.fmt.bufPrint(&buf, "Failed to create shared memory allocator: {s}", .{@errorName(err)}) catch "Failed to create shared memory allocator";
roc_ops.crash(msg2);
return error.SharedMemoryError;
};
// Validate memory layout - we need at least space for the header
const min_required_size = FIRST_ALLOC_OFFSET + @sizeOf(Header);
if (shm.total_size < min_required_size) {
const msg = std.fmt.bufPrint(&buf, "Invalid memory layout: size {} is too small (minimum required: {})", .{ shm.total_size, min_required_size }) catch "Invalid memory layout";
roc_ops.crash(msg);
return error.MemoryLayoutInvalid;
}
// setup base pointer
roc__serialized_base_ptr = shm.getBasePtr();
roc__serialized_size = shm.total_size;
}
// Set up ModuleEnv from serialized data (embedded or shared memory)
const setup_result = try setupModuleEnv(roc_ops);
// Load builtin modules from compiled binary (same as CLI does)
const builtin_modules = eval.BuiltinModules.init(allocator) catch |err| {
@ -119,7 +147,6 @@ fn initializeSharedMemoryOnce(roc_ops: *RocOps) ShimError!void {
};
// Store globals
global_shm = shm;
global_env_ptr = setup_result.primary_env;
global_app_env_ptr = setup_result.app_env;
global_builtin_modules = builtin_modules;
@ -128,13 +155,12 @@ fn initializeSharedMemoryOnce(roc_ops: *RocOps) ShimError!void {
shared_memory_initialized.store(true, .release);
}
/// Cross-platform shared memory evaluation
/// Cross-platform evaluation (works for both IPC and embedded modes)
fn evaluateFromSharedMemory(entry_idx: u32, roc_ops: *RocOps, ret_ptr: *anyopaque, arg_ptr: ?*anyopaque) ShimError!void {
// Initialize shared memory once per process
try initializeSharedMemoryOnce(roc_ops);
try initializeOnce(roc_ops);
// Use the global shared memory and environment
const shm = global_shm.?;
const env_ptr = global_env_ptr.?;
const app_env = global_app_env_ptr;
@ -146,7 +172,7 @@ fn evaluateFromSharedMemory(entry_idx: u32, roc_ops: *RocOps, ret_ptr: *anyopaqu
defer interpreter.deinit();
// Get expression info from shared memory using entry_idx
const base_ptr = shm.getBasePtr();
const base_ptr = roc__serialized_base_ptr.?;
var buf: [256]u8 = undefined;
// Read the header structure from shared memory
@ -159,7 +185,7 @@ fn evaluateFromSharedMemory(entry_idx: u32, roc_ops: *RocOps, ret_ptr: *anyopaqu
}
const def_offset = header_ptr.def_indices_offset + entry_idx * @sizeOf(u32);
const def_idx_raw = safe_memory.safeRead(u32, base_ptr, @intCast(def_offset), shm.total_size) catch |err| {
const def_idx_raw = safe_memory.safeRead(u32, base_ptr, @intCast(def_offset), roc__serialized_size) catch |err| {
const read_err = std.fmt.bufPrint(&buf, "Failed to read def_idx: {}", .{err}) catch "Failed to read def_idx";
roc_ops.crash(read_err);
return error.MemoryLayoutInvalid;
@ -180,23 +206,16 @@ const SetupResult = struct {
app_env: *ModuleEnv, // App env (for e_lookup_required resolution)
};
/// Set up ModuleEnv from shared memory with proper relocation (multi-module format)
fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!SetupResult {
// Validate memory layout - we need at least space for the header
const min_required_size = FIRST_ALLOC_OFFSET + @sizeOf(Header);
if (shm.total_size < min_required_size) {
var buf: [256]u8 = undefined;
const msg = std.fmt.bufPrint(&buf, "Invalid memory layout: size {} is too small (minimum required: {})", .{ shm.total_size, min_required_size }) catch "Invalid memory layout";
roc_ops.crash(msg);
return error.MemoryLayoutInvalid;
}
/// Set up ModuleEnv from serialized data with proper relocation (multi-module format)
/// Works for both IPC mode (roc run) and embedded mode (roc build)
fn setupModuleEnv(roc_ops: *RocOps) ShimError!SetupResult {
var buf: [256]u8 = undefined;
// Get base pointer
const base_ptr = shm.getBasePtr();
const base_ptr = roc__serialized_base_ptr.?;
const allocator = std.heap.page_allocator;
// Read parent's shared memory base address from header and calculate relocation offset
// For embedded mode: parent_base_addr == 0
// For IPC mode: parent_base_addr == actual parent address
const header_addr = @intFromPtr(base_ptr) + FIRST_ALLOC_OFFSET;
const header_ptr: *const Header = @ptrFromInt(header_addr);
const parent_base_addr = header_ptr.parent_base_addr;
@ -207,6 +226,23 @@ fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!Setup
// Use signed arithmetic to avoid overflow on 64-bit addresses
const offset: i64 = @as(i64, @intCast(child_base_addr)) - @as(i64, @intCast(parent_base_addr));
// Verify offset preserves alignment (ASLR can cause misaligned shared memory mapping)
if (comptime builtin.mode == .Debug) {
const REQUIRED_ALIGNMENT: u64 = collections.SERIALIZATION_ALIGNMENT.toByteUnits();
const abs_offset: u64 = @abs(offset);
if (abs_offset % REQUIRED_ALIGNMENT != 0) {
const err_msg = std.fmt.bufPrint(&buf, "Relocation offset 0x{x} not {}-byte aligned! parent=0x{x} child=0x{x}", .{
abs_offset,
REQUIRED_ALIGNMENT,
parent_base_addr,
child_base_addr,
}) catch "Relocation offset misaligned";
std.debug.print("[MAIN] {s}\n", .{err_msg});
roc_ops.crash(err_msg);
return error.MemoryLayoutInvalid;
}
}
// Sanity check for overflow potential
if (@abs(offset) > std.math.maxInt(isize) / 2) {
const err_msg = std.fmt.bufPrint(&buf, "Relocation offset too large: {}", .{offset}) catch "Relocation offset too large";
@ -216,6 +252,20 @@ fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!Setup
// Get module env offsets array
const module_envs_base_addr = @intFromPtr(base_ptr) + @as(usize, @intCast(header_ptr.module_envs_offset));
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (module_envs_base_addr % @alignOf(u64) != 0) {
const err_msg = std.fmt.bufPrint(&buf, "module_envs_base_addr misaligned: addr=0x{x}, base=0x{x}, offset=0x{x}", .{
module_envs_base_addr,
@intFromPtr(base_ptr),
header_ptr.module_envs_offset,
}) catch "module_envs_base_addr misaligned";
roc_ops.crash(err_msg);
return error.MemoryLayoutInvalid;
}
}
const module_env_offsets: [*]const u64 = @ptrFromInt(module_envs_base_addr);
// Load all module envs (platform modules first, app module last)
@ -229,6 +279,20 @@ fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!Setup
for (0..module_count - 1) |i| {
const module_env_offset = module_env_offsets[i];
const module_env_addr = @intFromPtr(base_ptr) + @as(usize, @intCast(module_env_offset));
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (module_env_addr % @alignOf(ModuleEnv) != 0) {
const err_msg = std.fmt.bufPrint(&buf, "module_env_addr[{}] misaligned: addr=0x{x}, offset=0x{x}", .{
i,
module_env_addr,
module_env_offset,
}) catch "module_env_addr misaligned";
roc_ops.crash(err_msg);
return error.MemoryLayoutInvalid;
}
}
const module_env_ptr: *ModuleEnv = @ptrFromInt(module_env_addr);
module_env_ptr.relocate(@intCast(offset));
module_env_ptr.gpa = allocator;
@ -240,6 +304,19 @@ fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!Setup
// Get and relocate the app module using the header's app_env_offset
const app_env_addr = @intFromPtr(base_ptr) + @as(usize, @intCast(header_ptr.app_env_offset));
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (app_env_addr % @alignOf(ModuleEnv) != 0) {
const err_msg = std.fmt.bufPrint(&buf, "app_env_addr misaligned: addr=0x{x}, offset=0x{x}", .{
app_env_addr,
header_ptr.app_env_offset,
}) catch "app_env_addr misaligned";
roc_ops.crash(err_msg);
return error.MemoryLayoutInvalid;
}
}
const app_env_ptr: *ModuleEnv = @ptrFromInt(app_env_addr);
app_env_ptr.relocate(@intCast(offset));
app_env_ptr.gpa = allocator;
@ -247,6 +324,19 @@ fn setupModuleEnv(shm: *SharedMemoryAllocator, roc_ops: *RocOps) ShimError!Setup
// Determine primary env: platform main if available, otherwise app
const primary_env: *ModuleEnv = if (header_ptr.platform_main_env_offset != 0) blk: {
const platform_env_addr = @intFromPtr(base_ptr) + @as(usize, @intCast(header_ptr.platform_main_env_offset));
// Verify alignment before @ptrFromInt
if (comptime builtin.mode == .Debug) {
if (platform_env_addr % @alignOf(ModuleEnv) != 0) {
const err_msg = std.fmt.bufPrint(&buf, "platform_env_addr misaligned: addr=0x{x}, offset=0x{x}", .{
platform_env_addr,
header_ptr.platform_main_env_offset,
}) catch "platform_env_addr misaligned";
roc_ops.crash(err_msg);
return error.MemoryLayoutInvalid;
}
}
const platform_env_ptr: *ModuleEnv = @ptrFromInt(platform_env_addr);
platform_env_ptr.relocate(@intCast(offset));
platform_env_ptr.gpa = allocator;

View file

@ -12,6 +12,15 @@ pub const Handle = platform.Handle;
pub const FdInfo = coordination.FdInfo;
pub const CoordinationError = coordination.CoordinationError;
/// A properly aligned header structure for sending a serialized ModuleEnv over IPC.
pub const ModuleEnvHeader = extern struct {
parent_base_addr: u64,
entry_count: u32,
_padding: u32, // Ensure 8-byte alignment
def_indices_offset: u64,
module_env_offset: u64,
};
test "ipc tests" {
std.testing.refAllDecls(@This());
std.testing.refAllDecls(@import("coordination.zig"));

View file

@ -84,6 +84,8 @@ pub const windows = if (is_windows) struct {
/// POSIX shared memory functions
pub const posix = if (!is_windows) struct {
// Note: mmap returns MAP_FAILED ((void*)-1) on error, NOT NULL
// So we declare it as non-optional and check against MAP_FAILED
pub extern "c" fn mmap(
addr: ?*anyopaque,
len: usize,
@ -91,7 +93,7 @@ pub const posix = if (!is_windows) struct {
flags: c_int,
fd: c_int,
offset: std.c.off_t,
) ?*anyopaque;
) *anyopaque;
pub extern "c" fn munmap(addr: *anyopaque, len: usize) c_int;
pub extern "c" fn close(fd: c_int) c_int;
@ -101,6 +103,7 @@ pub const posix = if (!is_windows) struct {
pub const PROT_READ = 0x01;
pub const PROT_WRITE = 0x02;
pub const MAP_SHARED = 0x0001;
pub const MAP_FAILED: *anyopaque = @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))));
} else struct {};
/// Shared memory errors
@ -311,20 +314,13 @@ pub fn mapMemory(handle: Handle, size: usize, base_addr: ?*anyopaque) SharedMemo
handle,
0,
);
// mmap returns MAP_FAILED (which is (void *)-1) on error, not null
// Need to check for both null and MAP_FAILED
if (ptr == null) {
std.log.err("POSIX: Failed to map shared memory - null returned (size: {})", .{size});
return error.MmapFailed;
}
const ptr_value = @intFromPtr(ptr.?);
if (ptr_value == std.math.maxInt(usize)) {
// This is MAP_FAILED (-1 cast to pointer)
// mmap returns MAP_FAILED ((void*)-1) on error, not NULL
if (ptr == posix.MAP_FAILED) {
const errno = std.c._errno().*;
std.log.err("POSIX: Failed to map shared memory - MAP_FAILED (size: {}, fd: {}, errno: {})", .{ size, handle, errno });
std.log.err("POSIX: Failed to map shared memory (size: {}, fd: {}, errno: {})", .{ size, handle, errno });
return error.MmapFailed;
}
return ptr.?;
return ptr;
},
else => return error.UnsupportedPlatform,
}

View file

@ -1,3 +1,5 @@
//! LSP server capability definitions for the Roc language server.
const std = @import("std");
/// Aggregates all server capabilities supported by the Roc LSP.

View file

@ -1,3 +1,5 @@
//! Document storage for tracking open text documents in the LSP server.
const std = @import("std");
/// Stores the latest contents of each open text document.

View file

@ -1,3 +1,5 @@
//! Handler for LSP `textDocument/didChange` notifications.
const std = @import("std");
const DocumentStore = @import("../document_store.zig").DocumentStore;

View file

@ -1,3 +1,5 @@
//! Handler for LSP `textDocument/didOpen` notifications.
const std = @import("std");
/// Handler for `textDocument/didOpen` notifications.

View file

@ -1,3 +1,5 @@
//! Tests for the LSP document store.
const std = @import("std");
const DocumentStore = @import("../document_store.zig").DocumentStore;

View file

@ -729,6 +729,30 @@ pub const Diagnostic = struct {
nominal_associated_cannot_have_final_expression,
type_alias_cannot_have_associated,
where_clause_not_allowed_in_type_declaration,
// Targets section parse errors
expected_targets,
expected_targets_colon,
expected_targets_open_curly,
expected_targets_close_curly,
expected_targets_field_name,
expected_targets_field_colon,
expected_targets_files_string,
unknown_targets_field,
// Target entry parse errors
expected_target_link_open_curly,
expected_target_link_close_curly,
expected_target_name,
expected_target_colon,
expected_target_files_open_square,
expected_target_files_close_square,
expected_target_file,
expected_target_file_string_end,
// Semantic warnings (detected at CLI time, not parse time)
targets_exe_empty,
targets_duplicate_target,
};
};
@ -1620,6 +1644,7 @@ pub const Header = union(enum) {
exposes: Collection.Idx,
packages: Collection.Idx,
provides: Collection.Idx,
targets: ?TargetsSection.Idx, // Required for new platforms, optional during migration
region: TokenizedRegion,
},
hosted: struct {
@ -1989,6 +2014,44 @@ pub const ExposedItem = union(enum) {
}
};
/// A targets section in a platform header
pub const TargetsSection = struct {
files_path: ?Token.Idx, // "files:" directive string literal
exe: ?TargetLinkType.Idx, // exe: { ... }
// static_lib and shared_lib to be added later
region: TokenizedRegion,
pub const Idx = enum(u32) { _ };
};
/// A link type section (exe, static_lib, shared_lib)
pub const TargetLinkType = struct {
entries: TargetEntry.Span,
region: TokenizedRegion,
pub const Idx = enum(u32) { _ };
};
/// Single target entry: x64musl: ["crt1.o", "host.o", app]
pub const TargetEntry = struct {
target: Token.Idx, // LowerIdent token (e.g., x64musl, arm64mac)
files: TargetFile.Span,
region: TokenizedRegion,
pub const Idx = enum(u32) { _ };
pub const Span = struct { span: base.DataSpan };
};
/// File item in target list
pub const TargetFile = union(enum) {
string_literal: Token.Idx, // "crt1.o"
special_ident: Token.Idx, // app, win_gui
malformed: struct { reason: Diagnostic.Tag, region: TokenizedRegion },
pub const Idx = enum(u32) { _ };
pub const Span = struct { span: base.DataSpan };
};
/// TODO
pub const TypeHeader = struct {
name: Token.Idx,

View file

@ -484,6 +484,33 @@ pub const Tag = enum {
/// Collection of type annotations
collection_ty_anno,
// Target section nodes
/// A targets section in a platform header
/// * main_token - files string token (or 0 if no files directive)
/// * lhs - exe TargetLinkType index (or 0 if none)
/// * rhs - reserved for future (static_lib, shared_lib)
targets_section,
/// A target link type section (exe, static_lib, shared_lib)
/// * lhs - start of entries span
/// * rhs - length of entries span
target_link_type,
/// A single target entry: x64musl: ["crt1.o", "host.o", app]
/// * main_token - target name identifier token
/// * lhs - start of files span
/// * rhs - length of files span
target_entry,
/// A string literal file in a target list: "crt1.o"
/// * main_token - string token
target_file_string,
/// A special identifier in a target list: app, win_gui
/// * main_token - identifier token
target_file_ident,
};
/// Unstructured information about a Node. These

View file

@ -40,6 +40,8 @@ scratch_type_annos: base.Scratch(AST.TypeAnno.Idx),
scratch_anno_record_fields: base.Scratch(AST.AnnoRecordField.Idx),
scratch_exposed_items: base.Scratch(AST.ExposedItem.Idx),
scratch_where_clauses: base.Scratch(AST.WhereClause.Idx),
scratch_target_entries: base.Scratch(AST.TargetEntry.Idx),
scratch_target_files: base.Scratch(AST.TargetFile.Idx),
/// Compile-time constants for union variant counts to ensure we don't miss cases
/// when adding/removing variants from AST unions. Update these when modifying the unions.
@ -74,6 +76,8 @@ pub fn initCapacity(gpa: std.mem.Allocator, capacity: usize) std.mem.Allocator.E
.scratch_anno_record_fields = try base.Scratch(AST.AnnoRecordField.Idx).init(gpa),
.scratch_exposed_items = try base.Scratch(AST.ExposedItem.Idx).init(gpa),
.scratch_where_clauses = try base.Scratch(AST.WhereClause.Idx).init(gpa),
.scratch_target_entries = try base.Scratch(AST.TargetEntry.Idx).init(gpa),
.scratch_target_files = try base.Scratch(AST.TargetFile.Idx).init(gpa),
};
_ = try store.nodes.append(gpa, .{
@ -108,6 +112,8 @@ pub fn deinit(store: *NodeStore) void {
store.scratch_anno_record_fields.deinit();
store.scratch_exposed_items.deinit();
store.scratch_where_clauses.deinit();
store.scratch_target_entries.deinit();
store.scratch_target_files.deinit();
}
/// Ensures that all scratch buffers in the store
@ -124,6 +130,8 @@ pub fn emptyScratch(store: *NodeStore) void {
store.scratch_anno_record_fields.clearFrom(0);
store.scratch_exposed_items.clearFrom(0);
store.scratch_where_clauses.clearFrom(0);
store.scratch_target_entries.clearFrom(0);
store.scratch_target_files.clearFrom(0);
}
/// Prints debug information about all nodes and scratch buffers in the store.
@ -240,6 +248,9 @@ pub fn addHeader(store: *NodeStore, header: AST.Header) std.mem.Allocator.Error!
try store.extra_data.append(store.gpa, @intFromEnum(platform.exposes));
try store.extra_data.append(store.gpa, @intFromEnum(platform.packages));
try store.extra_data.append(store.gpa, @intFromEnum(platform.provides));
// Store targets as optional (0 = null, val + OPTIONAL_VALUE_OFFSET = val)
const targets_val: u32 = if (platform.targets) |t| @intFromEnum(t) + OPTIONAL_VALUE_OFFSET else 0;
try store.extra_data.append(store.gpa, targets_val);
const ed_len = store.extra_data.items.len - ed_start;
node.data.lhs = @intCast(ed_start);
@ -1077,7 +1088,11 @@ pub fn getHeader(store: *const NodeStore, header_idx: AST.Header.Idx) AST.Header
},
.platform_header => {
const ed_start = node.data.lhs;
std.debug.assert(node.data.rhs == 5);
std.debug.assert(node.data.rhs == 6);
// Decode optional targets (0 = null, val = val - OPTIONAL_VALUE_OFFSET)
const targets_val = store.extra_data.items[ed_start + 5];
const targets: ?AST.TargetsSection.Idx = if (targets_val == 0) null else @enumFromInt(targets_val - OPTIONAL_VALUE_OFFSET);
return .{ .platform = .{
.name = node.main_token,
@ -1086,6 +1101,7 @@ pub fn getHeader(store: *const NodeStore, header_idx: AST.Header.Idx) AST.Header
.exposes = @enumFromInt(store.extra_data.items[ed_start + 2]),
.packages = @enumFromInt(store.extra_data.items[ed_start + 3]),
.provides = @enumFromInt(store.extra_data.items[ed_start + 4]),
.targets = targets,
.region = node.region,
} };
},
@ -2384,3 +2400,208 @@ pub fn clearScratchWhereClausesFrom(store: *NodeStore, start: u32) void {
pub fn whereClauseSlice(store: *const NodeStore, span: AST.WhereClause.Span) []AST.WhereClause.Idx {
return store.sliceFromSpan(AST.WhereClause.Idx, span.span);
}
// -----------------------------------------------------------------
// Target section functions
// -----------------------------------------------------------------
/// Adds a TargetsSection node and returns its index.
pub fn addTargetsSection(store: *NodeStore, section: AST.TargetsSection) std.mem.Allocator.Error!AST.TargetsSection.Idx {
const node = Node{
.tag = .targets_section,
.main_token = section.files_path orelse 0,
.data = .{
.lhs = if (section.exe) |e| @intFromEnum(e) + OPTIONAL_VALUE_OFFSET else 0,
.rhs = 0, // Reserved for static_lib, shared_lib
},
.region = section.region,
};
const nid = try store.nodes.append(store.gpa, node);
return @enumFromInt(@intFromEnum(nid));
}
/// Adds a TargetLinkType node and returns its index.
pub fn addTargetLinkType(store: *NodeStore, link_type: AST.TargetLinkType) std.mem.Allocator.Error!AST.TargetLinkType.Idx {
const node = Node{
.tag = .target_link_type,
.main_token = 0,
.data = .{
.lhs = link_type.entries.span.start,
.rhs = link_type.entries.span.len,
},
.region = link_type.region,
};
const nid = try store.nodes.append(store.gpa, node);
return @enumFromInt(@intFromEnum(nid));
}
/// Adds a TargetEntry node and returns its index.
pub fn addTargetEntry(store: *NodeStore, entry: AST.TargetEntry) std.mem.Allocator.Error!AST.TargetEntry.Idx {
const node = Node{
.tag = .target_entry,
.main_token = entry.target,
.data = .{
.lhs = entry.files.span.start,
.rhs = entry.files.span.len,
},
.region = entry.region,
};
const nid = try store.nodes.append(store.gpa, node);
return @enumFromInt(@intFromEnum(nid));
}
/// Adds a TargetFile node and returns its index.
pub fn addTargetFile(store: *NodeStore, file: AST.TargetFile) std.mem.Allocator.Error!AST.TargetFile.Idx {
var node = Node{
.tag = .malformed,
.main_token = 0,
.data = .{ .lhs = 0, .rhs = 0 },
.region = AST.TokenizedRegion.empty(),
};
switch (file) {
.string_literal => |tok| {
node.tag = .target_file_string;
node.main_token = tok;
},
.special_ident => |tok| {
node.tag = .target_file_ident;
node.main_token = tok;
},
.malformed => |m| {
node.tag = .malformed;
node.data.lhs = @intFromEnum(m.reason);
node.region = m.region;
},
}
const nid = try store.nodes.append(store.gpa, node);
return @enumFromInt(@intFromEnum(nid));
}
/// Returns the start position for a new Span of TargetEntry.Idxs in scratch
pub fn scratchTargetEntryTop(store: *NodeStore) u32 {
return store.scratch_target_entries.top();
}
/// Places a new AST.TargetEntry.Idx in the scratch.
pub fn addScratchTargetEntry(store: *NodeStore, idx: AST.TargetEntry.Idx) std.mem.Allocator.Error!void {
try store.scratch_target_entries.append(idx);
}
/// Creates a new span starting at start. Moves the items from scratch to extra_data.
pub fn targetEntrySpanFrom(store: *NodeStore, start: u32) std.mem.Allocator.Error!AST.TargetEntry.Span {
const end = store.scratch_target_entries.top();
defer store.scratch_target_entries.clearFrom(start);
var i = @as(usize, @intCast(start));
const ed_start = @as(u32, @intCast(store.extra_data.items.len));
while (i < end) {
try store.extra_data.append(store.gpa, @intFromEnum(store.scratch_target_entries.items.items[i]));
i += 1;
}
return .{ .span = .{ .start = ed_start, .len = @as(u32, @intCast(end)) - start } };
}
/// Clears any TargetEntry.Idxs added to scratch from start until the end.
pub fn clearScratchTargetEntriesFrom(store: *NodeStore, start: u32) void {
store.scratch_target_entries.clearFrom(start);
}
/// Returns a new TargetEntry slice for iteration.
pub fn targetEntrySlice(store: *const NodeStore, span: AST.TargetEntry.Span) []AST.TargetEntry.Idx {
return store.sliceFromSpan(AST.TargetEntry.Idx, span.span);
}
/// Returns the start position for a new Span of TargetFile.Idxs in scratch
pub fn scratchTargetFileTop(store: *NodeStore) u32 {
return store.scratch_target_files.top();
}
/// Places a new AST.TargetFile.Idx in the scratch.
pub fn addScratchTargetFile(store: *NodeStore, idx: AST.TargetFile.Idx) std.mem.Allocator.Error!void {
try store.scratch_target_files.append(idx);
}
/// Creates a new span starting at start. Moves the items from scratch to extra_data.
pub fn targetFileSpanFrom(store: *NodeStore, start: u32) std.mem.Allocator.Error!AST.TargetFile.Span {
const end = store.scratch_target_files.top();
defer store.scratch_target_files.clearFrom(start);
var i = @as(usize, @intCast(start));
const ed_start = @as(u32, @intCast(store.extra_data.items.len));
while (i < end) {
try store.extra_data.append(store.gpa, @intFromEnum(store.scratch_target_files.items.items[i]));
i += 1;
}
return .{ .span = .{ .start = ed_start, .len = @as(u32, @intCast(end)) - start } };
}
/// Clears any TargetFile.Idxs added to scratch from start until the end.
pub fn clearScratchTargetFilesFrom(store: *NodeStore, start: u32) void {
store.scratch_target_files.clearFrom(start);
}
/// Returns a new TargetFile slice for iteration.
pub fn targetFileSlice(store: *const NodeStore, span: AST.TargetFile.Span) []AST.TargetFile.Idx {
return store.sliceFromSpan(AST.TargetFile.Idx, span.span);
}
/// Retrieves a TargetsSection from a stored node.
pub fn getTargetsSection(store: *const NodeStore, idx: AST.TargetsSection.Idx) AST.TargetsSection {
const node = store.nodes.get(@enumFromInt(@intFromEnum(idx)));
std.debug.assert(node.tag == .targets_section);
const files_path: ?Token.Idx = if (node.main_token == 0) null else node.main_token;
const exe: ?AST.TargetLinkType.Idx = if (node.data.lhs == 0) null else @enumFromInt(node.data.lhs - OPTIONAL_VALUE_OFFSET);
return .{
.files_path = files_path,
.exe = exe,
.region = node.region,
};
}
/// Retrieves a TargetLinkType from a stored node.
pub fn getTargetLinkType(store: *const NodeStore, idx: AST.TargetLinkType.Idx) AST.TargetLinkType {
const node = store.nodes.get(@enumFromInt(@intFromEnum(idx)));
std.debug.assert(node.tag == .target_link_type);
return .{
.entries = .{ .span = .{ .start = node.data.lhs, .len = node.data.rhs } },
.region = node.region,
};
}
/// Retrieves a TargetEntry from a stored node.
pub fn getTargetEntry(store: *const NodeStore, idx: AST.TargetEntry.Idx) AST.TargetEntry {
const node = store.nodes.get(@enumFromInt(@intFromEnum(idx)));
std.debug.assert(node.tag == .target_entry);
return .{
.target = node.main_token,
.files = .{ .span = .{ .start = node.data.lhs, .len = node.data.rhs } },
.region = node.region,
};
}
/// Retrieves a TargetFile from a stored node.
pub fn getTargetFile(store: *const NodeStore, idx: AST.TargetFile.Idx) AST.TargetFile {
const node = store.nodes.get(@enumFromInt(@intFromEnum(idx)));
switch (node.tag) {
.target_file_string => {
return .{ .string_literal = node.main_token };
},
.target_file_ident => {
return .{ .special_ident = node.main_token };
},
.malformed => {
return .{ .malformed = .{
.reason = @enumFromInt(node.data.lhs),
.region = node.region,
} };
},
else => {
std.debug.panic("Expected a valid target_file tag, got {s}", .{@tagName(node.tag)});
},
}
}

View file

@ -552,6 +552,13 @@ pub fn parsePlatformHeader(self: *Parser) Error!AST.Header.Idx {
},
);
// Parse optional targets section
var targets: ?AST.TargetsSection.Idx = null;
if (self.peek() == .KwTargets) {
self.advance(); // Advance past 'targets'
targets = try self.parseTargetsSection();
}
return self.store.addHeader(.{ .platform = .{
.name = name,
.requires_rigids = rigids,
@ -559,6 +566,7 @@ pub fn parsePlatformHeader(self: *Parser) Error!AST.Header.Idx {
.exposes = exposes,
.packages = packages,
.provides = provides,
.targets = targets,
.region = .{ .start = start, .end = self.pos },
} });
}
@ -930,6 +938,210 @@ pub fn parseExposedItem(self: *Parser) Error!AST.ExposedItem.Idx {
}
}
// -----------------------------------------------------------------
// Target section parsing functions
// -----------------------------------------------------------------
/// Parses a single file item in a target list: "crt1.o" or app
pub fn parseTargetFile(self: *Parser) Error!AST.TargetFile.Idx {
const trace = tracy.trace(@src());
defer trace.end();
const start = self.pos;
switch (self.peek()) {
.StringStart => {
// Parse string literal: "crt1.o"
self.advance(); // Advance past StringStart
// Capture StringPart token (the actual content)
var content_tok = start;
if (self.peek() == .StringPart) {
content_tok = self.pos;
self.advance(); // Advance past StringPart
}
// Skip any remaining parts until StringEnd
while (self.peek() != .StringEnd and self.peek() != .EndOfFile) {
self.advance();
}
if (self.peek() == .EndOfFile) {
return try self.pushMalformed(AST.TargetFile.Idx, .expected_target_file_string_end, start);
}
self.advance(); // Advance past StringEnd
return try self.store.addTargetFile(.{ .string_literal = content_tok });
},
.LowerIdent => {
// Parse special identifier: win_gui or other lower idents
self.advance(); // Advance past LowerIdent
return try self.store.addTargetFile(.{ .special_ident = start });
},
.KwApp => {
// Parse 'app' keyword as special identifier
self.advance(); // Advance past KwApp
return try self.store.addTargetFile(.{ .special_ident = start });
},
else => {
return try self.pushMalformed(AST.TargetFile.Idx, .expected_target_file, start);
},
}
}
/// Parses a single target entry: x64musl: ["crt1.o", "host.o", app]
pub fn parseTargetEntry(self: *Parser) Error!AST.TargetEntry.Idx {
const trace = tracy.trace(@src());
defer trace.end();
const start = self.pos;
// Expect target name (lower identifier)
if (self.peek() != .LowerIdent) {
return try self.pushMalformed(AST.TargetEntry.Idx, .expected_target_name, start);
}
const target_name = self.pos;
self.advance(); // Advance past target name
// Expect colon
self.expect(.OpColon) catch {
return try self.pushMalformed(AST.TargetEntry.Idx, .expected_target_colon, start);
};
// Expect open square bracket
self.expect(.OpenSquare) catch {
return try self.pushMalformed(AST.TargetEntry.Idx, .expected_target_files_open_square, start);
};
// Parse file list
const files_top = self.store.scratchTargetFileTop();
self.parseCollectionSpan(AST.TargetFile.Idx, .CloseSquare, NodeStore.addScratchTargetFile, Parser.parseTargetFile) catch |err| {
switch (err) {
error.ExpectedNotFound => {
self.store.clearScratchTargetFilesFrom(files_top);
return try self.pushMalformed(AST.TargetEntry.Idx, .expected_target_files_close_square, start);
},
error.OutOfMemory => return error.OutOfMemory,
error.TooNested => return error.TooNested,
}
};
const files_span = try self.store.targetFileSpanFrom(files_top);
return try self.store.addTargetEntry(.{
.target = target_name,
.files = files_span,
.region = .{ .start = start, .end = self.pos },
});
}
/// Parses a target link type section: exe: { x64musl: [...], ... }
pub fn parseTargetLinkType(self: *Parser) Error!AST.TargetLinkType.Idx {
const trace = tracy.trace(@src());
defer trace.end();
const start = self.pos;
// Expect open curly brace
self.expect(.OpenCurly) catch {
return try self.pushMalformed(AST.TargetLinkType.Idx, .expected_target_link_open_curly, start);
};
// Parse target entries
const entries_top = self.store.scratchTargetEntryTop();
self.parseCollectionSpan(AST.TargetEntry.Idx, .CloseCurly, NodeStore.addScratchTargetEntry, Parser.parseTargetEntry) catch |err| {
switch (err) {
error.ExpectedNotFound => {
self.store.clearScratchTargetEntriesFrom(entries_top);
return try self.pushMalformed(AST.TargetLinkType.Idx, .expected_target_link_close_curly, start);
},
error.OutOfMemory => return error.OutOfMemory,
error.TooNested => return error.TooNested,
}
};
const entries_span = try self.store.targetEntrySpanFrom(entries_top);
return try self.store.addTargetLinkType(.{
.entries = entries_span,
.region = .{ .start = start, .end = self.pos },
});
}
/// Parses a targets section: targets: { files: "targets/", exe: { ... } }
pub fn parseTargetsSection(self: *Parser) Error!AST.TargetsSection.Idx {
const trace = tracy.trace(@src());
defer trace.end();
const start = self.pos;
// Expect colon after 'targets'
self.expect(.OpColon) catch {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_colon, start);
};
// Expect open curly brace
self.expect(.OpenCurly) catch {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_open_curly, start);
};
var files_path: ?TokenIdx = null;
var exe: ?AST.TargetLinkType.Idx = null;
// Parse fields until closing curly brace
// Field identification is done by value type, not field name (deferred to CLI)
while (self.peek() != .CloseCurly and self.peek() != .EndOfFile) {
// Expect field name (lower identifier)
if (self.peek() != .LowerIdent) {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_field_name, start);
}
self.advance(); // Advance past field name
// Expect colon
self.expect(.OpColon) catch {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_field_colon, start);
};
// Determine field type by what follows
switch (self.peek()) {
.StringStart => {
// Parse files path: "targets/"
self.advance(); // Advance past StringStart
// Capture StringPart token (the actual content)
if (self.peek() == .StringPart) {
files_path = self.pos;
self.advance(); // Advance past StringPart
}
// Skip any remaining parts until StringEnd
while (self.peek() != .StringEnd and self.peek() != .EndOfFile) {
self.advance();
}
if (self.peek() == .StringEnd) {
self.advance(); // Advance past StringEnd
}
},
.OpenCurly => {
// Parse link type section (exe, static_lib, shared_lib)
// For now, we only support exe
exe = try self.parseTargetLinkType();
},
else => {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_field_name, start);
},
}
// Consume optional comma
if (self.peek() == .Comma) {
self.advance();
}
}
// Expect closing curly brace
self.expect(.CloseCurly) catch {
return try self.pushMalformed(AST.TargetsSection.Idx, .expected_targets_close_curly, start);
};
return try self.store.addTargetsSection(.{
.files_path = files_path,
.exe = exe,
.region = .{ .start = start, .end = self.pos },
});
}
const StatementType = enum { top_level, in_body, in_associated_block };
/// Parse a top level roc statement
@ -3117,7 +3329,7 @@ fn getTokenBP(tok: Token.Tag) ?BinOpBp {
.OpSlash => .{ .left = 28, .right = 29 }, // 29 LEFT
.OpDoubleSlash => .{ .left = 26, .right = 27 }, // 27 LEFT
.OpPercent => .{ .left = 24, .right = 25 }, // 25 LEFT
.OpPlus => .{ .left = 22, .right = 23 }, // 23 LEFT
.OpPlus => .{ .left = 20, .right = 21 }, // 21 LEFT
.OpBinaryMinus => .{ .left = 20, .right = 21 }, // 21 LEFT
.OpDoubleQuestion => .{ .left = 18, .right = 19 }, // 19 LEFT
.OpQuestion => .{ .left = 16, .right = 17 }, // 17 LEFT

View file

@ -89,6 +89,7 @@ test "NodeStore round trip - Headers" {
.provides = rand_idx(AST.Collection.Idx),
.requires_rigids = rand_idx(AST.Collection.Idx),
.requires_signatures = rand_idx(AST.TypeAnno.Idx),
.targets = null,
.region = rand_region(),
},
});
@ -657,3 +658,86 @@ test "NodeStore round trip - Expr" {
return error.IncompleteExprTestCoverage;
}
}
test "NodeStore round trip - Targets" {
const gpa = testing.allocator;
var store = try NodeStore.initCapacity(gpa, NodeStore.AST_HEADER_NODE_COUNT);
defer store.deinit();
// Test TargetFile round trip
const target_files = [_]AST.TargetFile{
.{ .string_literal = rand_token_idx() },
.{ .special_ident = rand_token_idx() },
.{ .malformed = .{ .reason = .expected_targets_field_name, .region = rand_region() } },
};
for (target_files) |file| {
const idx = try store.addTargetFile(file);
const retrieved = store.getTargetFile(idx);
testing.expectEqualDeep(file, retrieved) catch |err| {
std.debug.print("\n\nOriginal TargetFile: {any}\n\n", .{file});
std.debug.print("Retrieved TargetFile: {any}\n\n", .{retrieved});
return err;
};
}
// Test TargetEntry round trip
const entry = AST.TargetEntry{
.target = rand_token_idx(),
.files = .{ .span = rand_span() },
.region = rand_region(),
};
const entry_idx = try store.addTargetEntry(entry);
const retrieved_entry = store.getTargetEntry(entry_idx);
testing.expectEqualDeep(entry, retrieved_entry) catch |err| {
std.debug.print("\n\nOriginal TargetEntry: {any}\n\n", .{entry});
std.debug.print("Retrieved TargetEntry: {any}\n\n", .{retrieved_entry});
return err;
};
// Test TargetLinkType round trip
const link_type = AST.TargetLinkType{
.entries = .{ .span = rand_span() },
.region = rand_region(),
};
const link_type_idx = try store.addTargetLinkType(link_type);
const retrieved_link_type = store.getTargetLinkType(link_type_idx);
testing.expectEqualDeep(link_type, retrieved_link_type) catch |err| {
std.debug.print("\n\nOriginal TargetLinkType: {any}\n\n", .{link_type});
std.debug.print("Retrieved TargetLinkType: {any}\n\n", .{retrieved_link_type});
return err;
};
// Test TargetsSection round trip
const section = AST.TargetsSection{
.files_path = rand_token_idx(),
.exe = link_type_idx,
.region = rand_region(),
};
const section_idx = try store.addTargetsSection(section);
const retrieved_section = store.getTargetsSection(section_idx);
testing.expectEqualDeep(section, retrieved_section) catch |err| {
std.debug.print("\n\nOriginal TargetsSection: {any}\n\n", .{section});
std.debug.print("Retrieved TargetsSection: {any}\n\n", .{retrieved_section});
return err;
};
// Test TargetsSection with null values
const section_nulls = AST.TargetsSection{
.files_path = null,
.exe = null,
.region = rand_region(),
};
const section_nulls_idx = try store.addTargetsSection(section_nulls);
const retrieved_section_nulls = store.getTargetsSection(section_nulls_idx);
testing.expectEqualDeep(section_nulls, retrieved_section_nulls) catch |err| {
std.debug.print("\n\nOriginal TargetsSection (nulls): {any}\n\n", .{section_nulls});
std.debug.print("Retrieved TargetsSection (nulls): {any}\n\n", .{retrieved_section_nulls});
return err;
};
}

View file

@ -158,6 +158,7 @@ pub const Token = struct {
KwProvides,
KwRequires,
KwReturn,
KwTargets,
KwVar,
KwWhere,
KwWhile,
@ -195,6 +196,7 @@ pub const Token = struct {
.KwProvides,
.KwRequires,
.KwReturn,
.KwTargets,
.KwVar,
.KwWhere,
.KwWhile,
@ -299,6 +301,7 @@ pub const Token = struct {
.KwProvides,
.KwRequires,
.KwReturn,
.KwTargets,
.KwVar,
.KwWhere,
.KwWhile,
@ -394,6 +397,7 @@ pub const Token = struct {
.{ "provides", .KwProvides },
.{ "requires", .KwRequires },
.{ "return", .KwReturn },
.{ "targets", .KwTargets },
.{ "var", .KwVar },
.{ "where", .KwWhere },
.{ "while", .KwWhile },
@ -2290,6 +2294,9 @@ fn rebuildBufferForTesting(buf: []const u8, tokens: *TokenizedBuffer, alloc: std
.KwReturn => {
try buf2.appendSlice("return");
},
.KwTargets => {
try buf2.appendSlice("targets");
},
.KwVar => {
try buf2.appendSlice("var");
},

View file

@ -858,6 +858,7 @@ pub const Repl = struct {
const output = try interpreter.renderValueRocWithType(result, result.rt_var, self.roc_ops);
result.decref(&interpreter.runtime_layout_store, self.roc_ops);
interpreter.cleanupBindings(self.roc_ops);
return .{ .expression = output };
}
};

258
src/target/mod.zig Normal file
View file

@ -0,0 +1,258 @@
//! Roc target definitions - shared between build.zig and CLI
//!
//! This module is importable by build.zig (build-time) and CLI code (runtime).
//! It contains no dependencies on compiler modules like `parse`.
const std = @import("std");
const builtin = @import("builtin");
/// Roc's simplified target representation.
/// Maps to specific OS/arch/ABI combinations for cross-compilation.
pub const RocTarget = enum {
// x64 (x86_64) targets
x64mac,
x64win,
x64freebsd,
x64openbsd,
x64netbsd,
x64musl,
x64glibc,
x64linux,
x64elf,
// arm64 (aarch64) targets
arm64mac,
arm64win,
arm64linux,
arm64musl,
arm64glibc,
// arm32 targets
arm32linux,
arm32musl,
// WebAssembly
wasm32,
/// Parse target from string (e.g., "arm64mac", "x64musl")
pub fn fromString(str: []const u8) ?RocTarget {
const enum_info = @typeInfo(RocTarget);
inline for (enum_info.@"enum".fields) |field| {
if (std.mem.eql(u8, str, field.name)) {
return @enumFromInt(field.value);
}
}
return null;
}
/// Convert a std.Target to a RocTarget.
/// This is the runtime equivalent of detectNative() which uses builtin.target.
pub fn fromStdTarget(target: std.Target) RocTarget {
const os = target.os.tag;
const arch = target.cpu.arch;
const abi = target.abi;
switch (arch) {
.x86_64 => {
switch (os) {
.macos => return .x64mac,
.windows => return .x64win,
.freebsd => return .x64freebsd,
.openbsd => return .x64openbsd,
.netbsd => return .x64netbsd,
.linux => {
return switch (abi) {
.musl, .musleabi, .musleabihf => .x64musl,
.gnu, .gnueabi, .gnueabihf, .gnux32 => .x64glibc,
else => .x64musl, // Default to musl for static linking
};
},
else => return .x64elf, // Generic fallback
}
},
.aarch64, .aarch64_be => {
switch (os) {
.macos => return .arm64mac,
.windows => return .arm64win,
.linux => {
return switch (abi) {
.musl, .musleabi, .musleabihf => .arm64musl,
.gnu, .gnueabi, .gnueabihf => .arm64glibc,
else => .arm64musl, // Default to musl for static linking
};
},
else => return .arm64linux, // Generic ARM64 Linux
}
},
.arm => {
switch (os) {
.linux => return .arm32musl, // Default to musl for static linking
else => return .arm32linux, // Generic ARM32 Linux
}
},
.wasm32 => return .wasm32,
else => {
// Default fallback based on OS
switch (os) {
.macos => return .x64mac,
.windows => return .x64win,
.linux => return .x64musl, // Default to musl
else => return .x64elf,
}
},
}
}
/// Detect the current system's Roc target (compile-time)
pub fn detectNative() RocTarget {
return fromStdTarget(builtin.target);
}
/// Get the string name of this target (e.g., "arm64mac", "x64musl")
pub fn toName(self: RocTarget) []const u8 {
return @tagName(self);
}
/// Get the OS tag for this RocTarget
pub fn toOsTag(self: RocTarget) std.Target.Os.Tag {
return switch (self) {
.x64mac, .arm64mac => .macos,
.x64win, .arm64win => .windows,
.x64freebsd => .freebsd,
.x64openbsd => .openbsd,
.x64netbsd => .netbsd,
.x64musl, .x64glibc, .x64linux, .x64elf, .arm64musl, .arm64glibc, .arm64linux, .arm32musl, .arm32linux => .linux,
.wasm32 => .wasi,
};
}
/// Get the CPU architecture for this RocTarget
pub fn toCpuArch(self: RocTarget) std.Target.Cpu.Arch {
return switch (self) {
// x64 targets
.x64mac, .x64win, .x64freebsd, .x64openbsd, .x64netbsd, .x64musl, .x64glibc, .x64linux, .x64elf => .x86_64,
// arm64 targets
.arm64mac, .arm64win, .arm64linux, .arm64musl, .arm64glibc => .aarch64,
// arm32 targets
.arm32linux, .arm32musl => .arm,
// WebAssembly
.wasm32 => .wasm32,
};
}
/// Convert Roc target to LLVM target triple
pub fn toTriple(self: RocTarget) []const u8 {
return switch (self) {
// x64 targets
.x64mac => "x86_64-apple-darwin",
.x64win => "x86_64-pc-windows-msvc",
.x64freebsd => "x86_64-unknown-freebsd",
.x64openbsd => "x86_64-unknown-openbsd",
.x64netbsd => "x86_64-unknown-netbsd",
.x64musl => "x86_64-unknown-linux-musl",
.x64glibc => "x86_64-unknown-linux-gnu",
.x64linux => "x86_64-unknown-linux-gnu",
.x64elf => "x86_64-unknown-none-elf",
// arm64 targets
.arm64mac => "aarch64-apple-darwin",
.arm64win => "aarch64-pc-windows-msvc",
.arm64linux => "aarch64-unknown-linux-gnu",
.arm64musl => "aarch64-unknown-linux-musl",
.arm64glibc => "aarch64-unknown-linux-gnu",
// arm32 targets
.arm32linux => "arm-unknown-linux-gnueabihf",
.arm32musl => "arm-unknown-linux-musleabihf",
// WebAssembly
.wasm32 => "wasm32-unknown-unknown",
};
}
/// Check if target uses dynamic linking (glibc targets)
pub fn isDynamic(self: RocTarget) bool {
return switch (self) {
.x64glibc, .arm64glibc, .x64linux, .arm64linux, .arm32linux => true,
else => false,
};
}
/// Check if target uses static linking (musl targets)
pub fn isStatic(self: RocTarget) bool {
return switch (self) {
.x64musl, .arm64musl, .arm32musl => true,
else => false,
};
}
/// Check if target is macOS
pub fn isMacOS(self: RocTarget) bool {
return switch (self) {
.x64mac, .arm64mac => true,
else => false,
};
}
/// Check if target is Windows
pub fn isWindows(self: RocTarget) bool {
return switch (self) {
.x64win, .arm64win => true,
else => false,
};
}
/// Check if target is Linux-based
pub fn isLinux(self: RocTarget) bool {
return switch (self) {
.x64musl, .x64glibc, .x64linux, .arm64musl, .arm64glibc, .arm64linux, .arm32musl, .arm32linux => true,
else => false,
};
}
/// Get the pointer bit width for this target
pub fn ptrBitWidth(self: RocTarget) u16 {
return switch (self.toCpuArch()) {
.x86_64, .aarch64, .aarch64_be => 64,
.arm, .wasm32 => 32,
else => 64, // Default to 64-bit
};
}
/// Get the dynamic linker path for this target
pub fn getDynamicLinkerPath(self: RocTarget) ![]const u8 {
return switch (self) {
// x64 glibc targets
.x64glibc, .x64linux => "/lib64/ld-linux-x86-64.so.2",
// arm64 glibc targets
.arm64glibc, .arm64linux => "/lib/ld-linux-aarch64.so.1",
// arm32 glibc targets
.arm32linux => "/lib/ld-linux-armhf.so.3",
// Static linking targets don't need dynamic linker
.x64musl, .arm64musl, .arm32musl => return error.StaticLinkingTarget,
// macOS uses dyld
.x64mac, .arm64mac => "/usr/lib/dyld",
// Windows doesn't use ELF-style dynamic linker
.x64win, .arm64win => return error.WindowsTarget,
// BSD variants
.x64freebsd => "/libexec/ld-elf.so.1",
.x64openbsd => "/usr/libexec/ld.so",
.x64netbsd => "/usr/libexec/ld.elf_so",
// Generic ELF doesn't have a specific linker
.x64elf => return error.NoKnownLinkerPath,
// WebAssembly doesn't use dynamic linker
.wasm32 => return error.WebAssemblyTarget,
};
}
};

View file

@ -334,13 +334,17 @@ pub const Instantiator = struct {
var fresh_args = std.ArrayList(Var).empty;
defer fresh_args.deinit(self.store.gpa);
// Use index-based iteration to avoid iterator invalidation
// (see comment in instantiateFunc for details)
const args_start: usize = @intFromEnum(tag_args.start);
for (0..tag_args.count) |i| {
const arg_var = self.store.vars.items.items[args_start + i];
const fresh_arg = try self.instantiateVar(arg_var);
try fresh_args.append(self.store.gpa, fresh_arg);
// Skip the loop entirely for tags with no arguments.
// This avoids accessing tag_args.start which may be undefined when count is 0.
if (tag_args.count > 0) {
// Use index-based iteration to avoid iterator invalidation
// (see comment in instantiateFunc for details)
const args_start: usize = @intFromEnum(tag_args.start);
for (0..tag_args.count) |i| {
const arg_var = self.store.vars.items.items[args_start + i];
const fresh_arg = try self.instantiateVar(arg_var);
try fresh_args.append(self.store.gpa, fresh_arg);
}
}
const fresh_args_range = try self.store.appendVars(fresh_args.items);

View file

@ -663,12 +663,12 @@ pub const Store = struct {
/// * update b to to the new desc value
/// * redirect a -> b
///
/// CRITICAL: The merge direction (a -> b) is load-bearing and must not be changed!
/// The merge direction (a -> b) is load-bearing and must not be changed.
/// Multiple parts of the unification algorithm depend on this specific order:
/// - When unifying aliases with structures, we rely on this order to ensure
/// that we don't loose alias context
/// that we don't lose alias context
///
// NOTE: The elm & the roc compiler this step differently
// NOTE: The elm & the roc compiler do this step differently
// * The elm compiler sets b to redirect to a
// * The roc compiler sets a to redirect to b
pub fn union_(self: *Self, a_var: Var, b_var: Var, new_desc: Desc) void {

View file

@ -3,6 +3,17 @@ platform ""
exposes [Stdout, Stderr, Stdin]
packages {}
provides { main_for_host!: "main" }
targets: {
files: "targets/",
exe: {
x64mac: ["libhost.a", app],
arm64mac: ["libhost.a", app],
x64musl: ["crt1.o", "libhost.a", app, "libc.a"],
arm64musl: ["crt1.o", "libhost.a", app, "libc.a"],
x64win: ["host.lib", app],
arm64win: ["host.lib", app],
}
}
import Stdout
import Stderr

8
test/fx/parse_error.roc Normal file
View file

@ -0,0 +1,8 @@
app [main!] {
pf: platform "./platform/main.roc",
}
import pf.Stdout
main! = |_args| {
Stdout.line!("Hello world")
Ok({})
}}

View file

@ -33,6 +33,28 @@ const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.po
const trace_refcount = build_options.trace_refcount;
pub const std_options: std.Options = .{
.logFn = std.log.defaultLog,
.log_level = .warn,
};
/// Override the default panic handler to avoid secondary crashes in stack trace generation
pub const panic = std.debug.FullPanic(panicImpl);
fn panicImpl(msg: []const u8, addr: ?usize) noreturn {
const stderr: std.fs.File = .stderr();
stderr.writeAll("\n=== PANIC (no stack trace) ===\n") catch {};
stderr.writeAll(msg) catch {};
if (addr) |a| {
var buf: [32]u8 = undefined;
const hex = std.fmt.bufPrint(&buf, " at address 0x{x}\n", .{a}) catch "";
stderr.writeAll(hex) catch {};
} else {
stderr.writeAll("\n") catch {};
}
std.process.abort();
}
/// Error message to display on stack overflow in a Roc program
const STACK_OVERFLOW_MESSAGE = "\nThis Roc application overflowed its stack memory and crashed.\n\n";
@ -226,16 +248,32 @@ fn parseTestSpec(allocator: std.mem.Allocator, spec: []const u8) ParseError![]Sp
/// Host environment - contains GeneralPurposeAllocator for leak detection
const HostEnv = struct {
gpa: std.heap.GeneralPurposeAllocator(.{}),
gpa: std.heap.GeneralPurposeAllocator(.{ .safety = true }),
test_state: TestState,
};
/// Roc allocation function with size-tracking metadata
fn rocAllocFn(roc_alloc: *builtins.host_abi.RocAlloc, env: *anyopaque) callconv(.c) void {
// Debug check: verify roc_alloc pointer alignment
const roc_alloc_addr = @intFromPtr(roc_alloc);
if (roc_alloc_addr % @alignOf(builtins.host_abi.RocAlloc) != 0) {
std.debug.panic("[rocAllocFn] roc_alloc ptr not aligned! addr=0x{x} required={}", .{ roc_alloc_addr, @alignOf(builtins.host_abi.RocAlloc) });
}
// Debug check: verify env is properly aligned for HostEnv
const env_addr = @intFromPtr(env);
if (env_addr % @alignOf(HostEnv) != 0) {
std.debug.panic("rocAllocFn: env=0x{x} not aligned to {} bytes", .{ env_addr, @alignOf(HostEnv) });
}
const host: *HostEnv = @ptrCast(@alignCast(env));
const allocator = host.gpa.allocator();
const align_enum = std.mem.Alignment.fromByteUnits(@as(usize, @intCast(roc_alloc.alignment)));
// The allocation must be at least 8-byte aligned because:
// 1. The refcount (isize/usize) is stored before the data and needs proper alignment
// 2. The builtins code casts data pointers to [*]isize for refcount access
const min_alignment: usize = @max(roc_alloc.alignment, @alignOf(usize));
const align_enum = std.mem.Alignment.fromByteUnits(min_alignment);
// Calculate additional bytes needed to store the size
const size_storage_bytes = @max(roc_alloc.alignment, @alignOf(usize));
@ -255,6 +293,12 @@ fn rocAllocFn(roc_alloc: *builtins.host_abi.RocAlloc, env: *anyopaque) callconv(
std.process.exit(1);
};
// Debug check: verify the allocator returned properly aligned memory
const base_addr = @intFromPtr(base_ptr);
if (base_addr % min_alignment != 0) {
@panic("Host allocator returned misaligned memory in rocAllocFn");
}
// Store the total size (including metadata) right before the user data
const size_ptr: *usize = @ptrFromInt(@intFromPtr(base_ptr) + size_storage_bytes - @sizeOf(usize));
size_ptr.* = total_size;
@ -262,6 +306,12 @@ fn rocAllocFn(roc_alloc: *builtins.host_abi.RocAlloc, env: *anyopaque) callconv(
// Return pointer to the user data (after the size metadata)
roc_alloc.answer = @ptrFromInt(@intFromPtr(base_ptr) + size_storage_bytes);
// Debug check: verify the returned pointer is also properly aligned
const answer_addr = @intFromPtr(roc_alloc.answer);
if (answer_addr % roc_alloc.alignment != 0) {
@panic("Host allocator returned misaligned answer in rocAllocFn");
}
if (trace_refcount) {
std.debug.print("[ALLOC] ptr=0x{x} size={d} align={d}\n", .{ @intFromPtr(roc_alloc.answer), roc_alloc.length, roc_alloc.alignment });
}
@ -269,9 +319,18 @@ fn rocAllocFn(roc_alloc: *builtins.host_abi.RocAlloc, env: *anyopaque) callconv(
/// Roc deallocation function with size-tracking metadata
fn rocDeallocFn(roc_dealloc: *builtins.host_abi.RocDealloc, env: *anyopaque) callconv(.c) void {
// Debug check: verify env is properly aligned for HostEnv
const env_addr = @intFromPtr(env);
if (env_addr % @alignOf(HostEnv) != 0) {
std.debug.panic("[rocDeallocFn] env=0x{x} not aligned to {} bytes", .{ env_addr, @alignOf(HostEnv) });
}
const host: *HostEnv = @ptrCast(@alignCast(env));
const allocator = host.gpa.allocator();
// Use same minimum alignment as alloc
const min_alignment: usize = @max(roc_dealloc.alignment, @alignOf(usize));
const align_enum = std.mem.Alignment.fromByteUnits(min_alignment);
// Calculate where the size metadata is stored
const size_storage_bytes = @max(roc_dealloc.alignment, @alignOf(usize));
const size_ptr: *const usize = @ptrFromInt(@intFromPtr(roc_dealloc.ptr) - @sizeOf(usize));
@ -289,9 +348,6 @@ fn rocDeallocFn(roc_dealloc: *builtins.host_abi.RocDealloc, env: *anyopaque) cal
// Calculate the base pointer (start of actual allocation)
const base_ptr: [*]u8 = @ptrFromInt(@intFromPtr(roc_dealloc.ptr) - size_storage_bytes);
// Use same alignment calculation as alloc
const align_enum = std.mem.Alignment.fromByteUnits(@as(usize, @intCast(roc_dealloc.alignment)));
// Free the memory (including the size metadata)
const slice = @as([*]u8, @ptrCast(base_ptr))[0..total_size];
allocator.rawFree(slice, align_enum, @returnAddress());
@ -299,9 +355,18 @@ fn rocDeallocFn(roc_dealloc: *builtins.host_abi.RocDealloc, env: *anyopaque) cal
/// Roc reallocation function with size-tracking metadata
fn rocReallocFn(roc_realloc: *builtins.host_abi.RocRealloc, env: *anyopaque) callconv(.c) void {
// Debug check: verify env is properly aligned for HostEnv
const env_addr = @intFromPtr(env);
if (env_addr % @alignOf(HostEnv) != 0) {
std.debug.panic("[rocReallocFn] env=0x{x} not aligned to {} bytes", .{ env_addr, @alignOf(HostEnv) });
}
const host: *HostEnv = @ptrCast(@alignCast(env));
const allocator = host.gpa.allocator();
// Use same minimum alignment as alloc
const min_alignment: usize = @max(roc_realloc.alignment, @alignOf(usize));
const align_enum = std.mem.Alignment.fromByteUnits(min_alignment);
// Calculate where the size metadata is stored for the old allocation
const size_storage_bytes = @max(roc_realloc.alignment, @alignOf(usize));
const old_size_ptr: *const usize = @ptrFromInt(@intFromPtr(roc_realloc.answer) - @sizeOf(usize));
@ -315,14 +380,27 @@ fn rocReallocFn(roc_realloc: *builtins.host_abi.RocRealloc, env: *anyopaque) cal
// Calculate new total size needed
const new_total_size = roc_realloc.new_length + size_storage_bytes;
// Perform reallocation
// Free old memory and allocate new with proper alignment
// This is necessary because Zig's realloc infers alignment from slice type ([]u8 = alignment 1)
// which could cause the new allocation to be misaligned
const old_slice = @as([*]u8, @ptrCast(old_base_ptr))[0..old_total_size];
const new_slice = allocator.realloc(old_slice, new_total_size) catch {
// Allocate new memory with proper alignment
const new_ptr = allocator.rawAlloc(new_total_size, align_enum, @returnAddress()) orelse {
const stderr: std.fs.File = .stderr();
stderr.writeAll("\x1b[31mHost error:\x1b[0m reallocation failed, out of memory\n") catch {};
std.process.exit(1);
};
// Copy old data to new location
const copy_size = @min(old_total_size, new_total_size);
@memcpy(new_ptr[0..copy_size], old_slice[0..copy_size]);
// Free old memory
allocator.rawFree(old_slice, align_enum, @returnAddress());
const new_slice = new_ptr[0..new_total_size];
// Store the new total size in the metadata
const new_size_ptr: *usize = @ptrFromInt(@intFromPtr(new_slice.ptr) + size_storage_bytes - @sizeOf(usize));
new_size_ptr.* = new_total_size;
@ -437,6 +515,11 @@ fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt
// Arguments struct for single Str parameter
const Args = extern struct { str: RocStr };
// Debug check: verify args_ptr is properly aligned for Args
const args_addr = @intFromPtr(args_ptr);
if (args_addr % @alignOf(Args) != 0) {
std.debug.panic("[hostedStderrLine] args_ptr=0x{x} not aligned to {} bytes", .{ args_addr, @alignOf(Args) });
}
const args: *Args = @ptrCast(@alignCast(args_ptr));
const message = args.str.asSlice();
@ -600,6 +683,11 @@ fn hostedStdoutLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt
// Arguments struct for single Str parameter
const Args = extern struct { str: RocStr };
// Debug check: verify args_ptr is properly aligned for Args
const args_addr = @intFromPtr(args_ptr);
if (args_addr % @alignOf(Args) != 0) {
std.debug.panic("[hostedStdoutLine] args_ptr=0x{x} not aligned to {} bytes", .{ args_addr, @alignOf(Args) });
}
const args: *Args = @ptrCast(@alignCast(args_ptr));
const message = args.str.asSlice();
@ -679,7 +767,7 @@ fn platform_main(test_spec: ?[]const u8, test_verbose: bool) !c_int {
_ = builtins.handlers.install(handleRocStackOverflow, handleRocAccessViolation, handleRocArithmeticError);
var host_env = HostEnv{
.gpa = std.heap.GeneralPurposeAllocator(.{}){},
.gpa = std.heap.GeneralPurposeAllocator(.{ .safety = true }){},
.test_state = TestState.init(),
};

View file

@ -3,6 +3,17 @@ platform ""
exposes [Stdout, Stderr, Stdin]
packages {}
provides { main_for_host!: "main" }
targets: {
files: "targets/",
exe: {
x64mac: ["libhost.a", app],
arm64mac: ["libhost.a", app],
x64musl: ["crt1.o", "libhost.a", app, "libc.a"],
arm64musl: ["crt1.o", "libhost.a", app, "libc.a"],
x64win: ["host.lib", app],
arm64win: ["host.lib", app],
}
}
import Stdout
import Stderr

View file

@ -0,0 +1,81 @@
app [main!] { pf: platform "./platform/main.roc" }
import pf.Stdout
demo_input = "11-22"
print! : Str => {}
print! = |msg| msg.split_on("\n").for_each!(Stdout.line!)
parse_range : Str -> Try((I64, I64), _)
parse_range = |range_str| {
match range_str.split_on("-") {
[a, b] => Ok((I64.from_str(a)?, I64.from_str(b)?))
_ => Err(InvalidRangeFormat)
}
}
repeat = |list, n| repeat_helper([], list, n)
repeat_helper = |acc, list, n| match n {
0 => acc
_ => repeat_helper(acc.concat(list), list, n - 1)
}
has_repeating_pattern : I64 => Bool
has_repeating_pattern = |x| {
s = x.to_str().to_utf8()
n = s.len()
# Check all divisors of n
var $d = 1
while $d <= n // 2 {
if n % $d == 0 {
# Check if repeating the first d characters n/d times equals s
slice = s.sublist({ start: 0, len: $d })
repeated = slice->repeat(n // $d)
if repeated == s { return True }
}
$d = $d + 1
}
False
}
part2! : Str => Try(I64, _)
part2! = |input| {
var $sum = 0
for range_str in input.trim().split_on(",") {
print!(range_str)
(start, end) = parse_range(range_str)?
var $x = start
while $x <= end {
if has_repeating_pattern($x) {
$sum = $sum + $x
}
$x = $x + 1
}
}
Ok($sum)
}
run! = || {
print!("Part 2 (demo): ${part2!(demo_input.trim())?.to_str()}")
Ok({})
}
main! = || {
match run!() {
Ok(_) => {}
Err(_) => {}
}
}

View file

@ -3,6 +3,17 @@ platform ""
exposes []
packages {}
provides { add_ints_for_host: "add_ints", multiply_ints_for_host: "multiply_ints" }
targets: {
files: "targets/",
exe: {
x64mac: ["libhost.a", app],
arm64mac: ["libhost.a", app],
x64musl: ["crt1.o", "libhost.a", app, "libc.a"],
arm64musl: ["crt1.o", "libhost.a", app, "libc.a"],
x64glibc: ["Scrt1.o", "crti.o", "libhost.a", app, "crtn.o", "libc.so"],
arm64glibc: ["Scrt1.o", "crti.o", "libhost.a", app, "crtn.o", "libc.so"],
}
}
add_ints_for_host : I64, I64 -> I64
add_ints_for_host = add_ints

View file

@ -1017,29 +1017,36 @@ main = {
(e-lambda
(args
(p-assign (ident "f")))
(e-lambda
(args
(p-assign (ident "container")))
(e-closure
(captures
(capture (ident "f")))
(e-lambda
(args
(p-assign (ident "default")))
(e-block
(s-let
(p-assign (ident "mapped"))
(e-dot-access (field "map")
(receiver
(e-lookup-local
(p-assign (ident "container"))))
(args
(e-lookup-local
(p-assign (ident "f"))))))
(e-dot-access (field "get_or")
(receiver
(e-lookup-local
(p-assign (ident "mapped"))))
(p-assign (ident "container")))
(e-closure
(captures
(capture (ident "container"))
(capture (ident "f")))
(e-lambda
(args
(e-lookup-local
(p-assign (ident "default"))))))))))
(p-assign (ident "default")))
(e-block
(s-let
(p-assign (ident "mapped"))
(e-dot-access (field "map")
(receiver
(e-lookup-local
(p-assign (ident "container"))))
(args
(e-lookup-local
(p-assign (ident "f"))))))
(e-dot-access (field "get_or")
(receiver
(e-lookup-local
(p-assign (ident "mapped"))))
(args
(e-lookup-local
(p-assign (ident "default"))))))))))))
(s-let
(p-assign (ident "num_container"))
(e-nominal (nominal "Container")

View file

@ -2657,7 +2657,7 @@ expect {
(exposes
(exposed (name "line!") (wildcard false))
(exposed (name "write!") (wildcard false))))
(s-import (module "MALFORMED_IMPORT")
(s-import (module "#malformed_import_0")
(exposes
(exposed (name "line!") (wildcard false))
(exposed (name "write!") (wildcard false))))

View file

@ -7,10 +7,16 @@ type=repl
~~~roc
» Try.Ok(1) == Try.Ok(1)
» Try.Ok(1) == Try.Ok(2)
» Try.Ok(1) != Try.Ok(1)
» Try.Ok(1) != Try.Ok(2)
~~~
# OUTPUT
Crash: e_closure: failed to resolve capture value
True
---
Crash: e_closure: failed to resolve capture value
False
---
False
---
True
# PROBLEMS
NIL

View file

@ -2543,7 +2543,7 @@ expect {
(exposes
(exposed (name "line!") (wildcard false))
(exposed (name "write!") (wildcard false))))
(s-import (module "MALFORMED_IMPORT")
(s-import (module "#malformed_import_0")
(exposes
(exposed (name "line!") (wildcard false))
(exposed (name "write!") (wildcard false))))

View file

@ -3,6 +3,17 @@ platform ""
exposes []
packages {}
provides { process_string_for_host: "process_string" }
targets: {
files: "targets/",
exe: {
x64mac: ["libhost.a", app],
arm64mac: ["libhost.a", app],
x64musl: ["crt1.o", "libhost.a", app, "libc.a"],
arm64musl: ["crt1.o", "libhost.a", app, "libc.a"],
x64glibc: ["Scrt1.o", "crti.o", "libhost.a", app, "crtn.o", "libc.so"],
arm64glibc: ["Scrt1.o", "crti.o", "libhost.a", app, "crtn.o", "libc.so"],
}
}
process_string_for_host : Str -> Str
process_string_for_host = process_string