From 203a2f1e8529bc0eb4e5b64c0eca23c793d9f6ea Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 2 Dec 2025 21:27:38 -0500 Subject: [PATCH 01/64] Add U8 range operations --- src/build/roc/Builtin.roc | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/src/build/roc/Builtin.roc b/src/build/roc/Builtin.roc index 7edee4d076..9ef0eb4ab5 100644 --- a/src/build/roc/Builtin.roc +++ b/src/build/roc/Builtin.roc @@ -313,6 +313,18 @@ Builtin :: [].{ from_numeral : Numeral -> Try(U8, [InvalidNumeral(Str), ..others]) from_str : Str -> Try(U8, [BadNumStr, ..others]) + ## List of integers beginning with this `U8` and ending with the other `U8`. + ## (Use [until] instead to end with the other `U8` minus one.) + ## Returns an empty list if this `U8` is greater than the other. + to : U8, U8 -> List(U8) + to = range_to + + ## List of integers beginning with this `U8` and ending with the other `U8` minus one. + ## (Use [to] instead to end with the other `U8` exactly, instead of minus one.) + ## Returns an empty list if this `U8` is greater than or equal to the other. + until : U8, U8 -> List(U8) + until = range_until + # Conversions to signed integers (I8 is lossy, others are safe) to_i8_wrap : U8 -> I8 to_i8_try : U8 -> Try(I8, [OutOfRange, ..others]) @@ -955,8 +967,29 @@ Builtin :: [].{ } } -# Private top-level function for unsafe list access -# This is a low-level operation that gets replaced by the compiler +range_to = |var $current, end| { + var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. + + while $current <= end { + $answer = $answer.append($current) + $current = $current + 1 + } + + $answer +} + +range_until = |var $current, end| { + var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. + + while $current < end { + $answer = $answer.append($current) + $current = $current + 1 + } + + $answer +} + +# Implemented by the compiler, does not perform bounds checks list_get_unsafe : List(item), U64 -> item # Unsafe conversion functions - these return simple records instead of Try types From a0171786bbe1d63c188efd3497ff75eb203431a4 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 08:03:24 -0500 Subject: [PATCH 02/64] Fix range bug --- src/build/roc/Builtin.roc | 4 +- src/canonicalize/Can.zig | 22 ++ src/eval/interpreter.zig | 356 +++++++++++++++++++-- src/fmt/fmt.zig | 5 + src/parse/AST.zig | 21 ++ src/parse/Node.zig | 4 + src/parse/NodeStore.zig | 13 +- src/parse/Parser.zig | 13 + src/types/generalize.zig | 8 +- test/snapshots/repl/u8_range_to.md | 19 ++ test/snapshots/repl/u8_range_until.md | 19 ++ test/snapshots/repl/var_in_lambda_param.md | 17 + 12 files changed, 472 insertions(+), 29 deletions(-) create mode 100644 test/snapshots/repl/u8_range_to.md create mode 100644 test/snapshots/repl/u8_range_until.md create mode 100644 test/snapshots/repl/var_in_lambda_param.md diff --git a/src/build/roc/Builtin.roc b/src/build/roc/Builtin.roc index 9ef0eb4ab5..2babc345c0 100644 --- a/src/build/roc/Builtin.roc +++ b/src/build/roc/Builtin.roc @@ -317,13 +317,13 @@ Builtin :: [].{ ## (Use [until] instead to end with the other `U8` minus one.) ## Returns an empty list if this `U8` is greater than the other. to : U8, U8 -> List(U8) - to = range_to + to = |start, end| range_to(start, end) ## List of integers beginning with this `U8` and ending with the other `U8` minus one. ## (Use [to] instead to end with the other `U8` exactly, instead of minus one.) ## Returns an empty list if this `U8` is greater than or equal to the other. until : U8, U8 -> List(U8) - until = range_until + until = |start, end| range_until(start, end) # Conversions to signed integers (I8 is lossy, others are safe) to_i8_wrap : U8 -> I8 diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index 456bdd05f6..f5f032b038 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -6403,6 +6403,28 @@ fn canonicalizePattern( return malformed_idx; } }, + .var_ident => |e| { + // Mutable variable binding in a pattern (e.g., `|var $x, y|`) + const region = self.parse_ir.tokenizedRegionToRegion(e.region); + if (self.parse_ir.tokens.resolveIdentifier(e.ident_tok)) |ident_idx| { + // Create a Pattern node for our mutable identifier + const pattern_idx = try self.env.addPattern(Pattern{ .assign = .{ + .ident = ident_idx, + } }, region); + + // Introduce the var with function boundary tracking (using scopeIntroduceVar) + _ = try self.scopeIntroduceVar(ident_idx, pattern_idx, region, true, Pattern.Idx); + + return pattern_idx; + } else { + const feature = try self.env.insertString("report an error when unable to resolve identifier"); + const malformed_idx = try self.env.pushMalformed(Pattern.Idx, Diagnostic{ .not_implemented = .{ + .feature = feature, + .region = Region.zero(), + } }); + return malformed_idx; + } + }, .underscore => |p| { const region = self.parse_ir.tokenizedRegionToRegion(p.region); const underscore_pattern = Pattern{ diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index b06dd6f616..6b998ff5b3 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -497,6 +497,10 @@ pub const Interpreter = struct { /// Evaluates a Roc expression and returns the result. pub fn eval(self: *Interpreter, expr_idx: can.CIR.Expr.Idx, roc_ops: *RocOps) Error!StackValue { + // Clear flex_type_context at the start of each top-level evaluation. + // This prevents stale type mappings from previous evaluations from + // interfering with polymorphic function instantiation. + self.flex_type_context.clearRetainingCapacity(); return try self.evalWithExpectedType(expr_idx, roc_ops, null); } @@ -5772,6 +5776,7 @@ pub const Interpreter = struct { nom.ident.ident_idx, self.root_env.idents.is_eq, roc_ops, + lhs.rt_var, ) catch |err| { // If method lookup fails, we can't compare this type if (err == error.MethodLookupFailed) { @@ -6233,6 +6238,7 @@ pub const Interpreter = struct { nom.ident.ident_idx, self.env.idents.to_inspect, roc_ops, + rt_var, ) catch return null; const method_func = maybe_method orelse return null; @@ -6845,6 +6851,7 @@ pub const Interpreter = struct { nominal_ident: base_pkg.Ident.Idx, method_name_ident: base_pkg.Ident.Idx, roc_ops: *RocOps, + receiver_rt_var: ?types.Var, ) Error!StackValue { // Get the module environment for this type's origin const origin_env = self.getModuleEnvForOrigin(origin_module) orelse { @@ -6890,6 +6897,31 @@ pub const Interpreter = struct { self.bindings.items.len = saved_bindings_len; } + // Propagate receiver type to flex_type_context BEFORE translating the method's type. + // This ensures that polymorphic methods like `to` have their type parameters mapped + // to the correct concrete type (e.g., U8) before the closure is created. + if (receiver_rt_var) |recv_rt_var| { + const def_ct_var = can.ModuleEnv.varFrom(target_def_idx); + const def_resolved = origin_env.types.resolveVar(def_ct_var); + + // If the method has a function type, extract its first parameter type + // and propagate mappings from the receiver type to it + if (def_resolved.desc.content == .structure) { + const flat = def_resolved.desc.content.structure; + switch (flat) { + .fn_pure, .fn_effectful, .fn_unbound => |fn_type| { + const param_vars = origin_env.types.sliceVars(fn_type.args); + if (param_vars.len > 0) { + // The first parameter is the receiver type (e.g., Num a) + // Propagate mappings from the concrete receiver to this type + try self.propagateFlexMappings(@constCast(origin_env), param_vars[0], recv_rt_var); + } + }, + else => {}, + } + } + } + // Translate the def's type var to runtime const def_var = can.ModuleEnv.varFrom(target_def_idx); const rt_def_var = try self.translateTypeVar(@constCast(origin_env), def_var); @@ -6908,6 +6940,7 @@ pub const Interpreter = struct { nominal_ident: base_pkg.Ident.Idx, method_name_ident: base_pkg.Ident.Idx, roc_ops: *RocOps, + receiver_rt_var: ?types.Var, ) Error!?StackValue { // Get the module environment for this type's origin const origin_env = self.getModuleEnvForOrigin(origin_module) orelse { @@ -6952,6 +6985,31 @@ pub const Interpreter = struct { self.bindings.items.len = saved_bindings_len; } + // Propagate receiver type to flex_type_context BEFORE translating the method's type. + // This ensures that polymorphic methods have their type parameters mapped + // to the correct concrete type before the closure is created. + if (receiver_rt_var) |recv_rt_var| { + const def_ct_var = can.ModuleEnv.varFrom(target_def_idx); + const def_resolved = origin_env.types.resolveVar(def_ct_var); + + // If the method has a function type, extract its first parameter type + // and propagate mappings from the receiver type to it + if (def_resolved.desc.content == .structure) { + const flat = def_resolved.desc.content.structure; + switch (flat) { + .fn_pure, .fn_effectful, .fn_unbound => |fn_type| { + const param_vars = origin_env.types.sliceVars(fn_type.args); + if (param_vars.len > 0) { + // The first parameter is the receiver type (e.g., Num a) + // Propagate mappings from the concrete receiver to this type + try self.propagateFlexMappings(@constCast(origin_env), param_vars[0], recv_rt_var); + } + }, + else => {}, + } + } + } + // Translate the def's type var to runtime const def_var = can.ModuleEnv.varFrom(target_def_idx); const rt_def_var = try self.translateTypeVar(@constCast(origin_env), def_var); @@ -7069,10 +7127,41 @@ pub const Interpreter = struct { try self.ensureVarLayoutCapacity(idx + 1); const slot_ptr = &self.var_to_layout_slot.items[idx]; - // If we have a flex var, default it to Dec - // This is the interpreter-time defaulting for numeric literals + // If we have a flex var, check if we have a mapping in flex_type_context + // This handles polymorphic functions where the type parameter needs to be resolved if (resolved.desc.content == .flex) { - // Directly return Dec's scalar layout + // Try to find a mapping for this flex var from any entry in flex_type_context + // Since this is a runtime flex var, we need to check if any context entry + // maps to a concrete type that we can use + if (self.flex_type_context.count() > 0) { + var it = self.flex_type_context.iterator(); + var first_rt_var: ?types.Var = null; + var all_same = true; + while (it.next()) |entry| { + const rt_var = entry.value_ptr.*; + const rt_resolved = self.runtime_types.resolveVar(rt_var); + // Only consider non-flex entries as candidates + if (rt_resolved.desc.content != .flex) { + if (first_rt_var) |first| { + const first_resolved = self.runtime_types.resolveVar(first); + if (first_resolved.var_ != rt_resolved.var_) { + all_same = false; + break; + } + } else { + first_rt_var = rt_var; + } + } + } + if (all_same) { + if (first_rt_var) |concrete_rt_var| { + // Recurse with the concrete type + return try self.getRuntimeLayout(concrete_rt_var); + } + } + } + + // Default to Dec for unresolved flex vars const dec_layout = layout.Layout.frac(types.Frac.Precision.dec); const dec_layout_idx = try self.runtime_layout_store.insertLayout(dec_layout); slot_ptr.* = @intFromEnum(dec_layout_idx) + 1; @@ -7331,6 +7420,104 @@ pub const Interpreter = struct { } } + /// Propagate flex type context mappings by walking compile-time and runtime types in parallel. + /// This is used when entering polymorphic functions to map flex vars in the function's type + /// to their concrete runtime types based on the arguments. + /// + /// For example, if CT type is `Num a` and RT type is `U8`, we need to extract `a` and map it to U8. + /// This ensures that when we later encounter just `a` (e.g., in `List a` for an empty list), + /// we can find the mapping. + fn propagateFlexMappings(self: *Interpreter, module: *can.ModuleEnv, ct_var: types.Var, rt_var: types.Var) Error!void { + const ct_resolved = module.types.resolveVar(ct_var); + const rt_resolved = self.runtime_types.resolveVar(rt_var); + + // If the CT type is a flex var, add the mapping directly + if (ct_resolved.desc.content == .flex) { + const flex_key = ModuleVarKey{ .module = module, .var_ = ct_resolved.var_ }; + try self.flex_type_context.put(flex_key, rt_var); + return; + } + + // If the CT type is a rigid var, also add to flex_type_context. + // This is needed because: in polymorphic functions, the parameter type might be rigid + // (from the function signature), but flex vars inside the function body were unified + // with this rigid var at compile time. After serialization, these unifications might + // not be preserved, so we need to map both the rigid var and any flex vars that might + // be looking for it. + if (ct_resolved.desc.content == .rigid) { + const flex_key = ModuleVarKey{ .module = module, .var_ = ct_resolved.var_ }; + try self.flex_type_context.put(flex_key, rt_var); + return; + } + + // If the CT type is a structure, walk its children and propagate recursively + if (ct_resolved.desc.content == .structure) { + const ct_flat = ct_resolved.desc.content.structure; + + switch (ct_flat) { + .nominal_type => |ct_nom| { + // For nominal types like `Num a`, extract the type args and map them + const ct_args = module.types.sliceNominalArgs(ct_nom); + + // If the RT type is also a nominal type, try to match up the args + if (rt_resolved.desc.content == .structure) { + if (rt_resolved.desc.content.structure == .nominal_type) { + const rt_nom = rt_resolved.desc.content.structure.nominal_type; + const rt_args = self.runtime_types.sliceNominalArgs(rt_nom); + + const min_args = @min(ct_args.len, rt_args.len); + for (0..min_args) |i| { + try self.propagateFlexMappings(module, ct_args[i], rt_args[i]); + } + + // If CT has more args than RT (common case: CT is `Num a` but RT is `U8` with no args), + // we need to map those CT args to the RT type itself. + // This handles the case where `Num a` in CT should map `a` to U8. + if (ct_args.len > rt_args.len) { + for (rt_args.len..ct_args.len) |i| { + try self.propagateFlexMappings(module, ct_args[i], rt_var); + } + } + } + } + }, + .tuple => |ct_tuple| { + if (rt_resolved.desc.content == .structure and rt_resolved.desc.content.structure == .tuple) { + const ct_elems = module.types.sliceVars(ct_tuple.elems); + const rt_tuple = rt_resolved.desc.content.structure.tuple; + const rt_elems = self.runtime_types.sliceVars(rt_tuple.elems); + + const min_elems = @min(ct_elems.len, rt_elems.len); + for (0..min_elems) |i| { + try self.propagateFlexMappings(module, ct_elems[i], rt_elems[i]); + } + } + }, + .fn_pure, .fn_effectful, .fn_unbound => { + // Function type propagation is complex - skip for now + // The main use case we need is nominal types like `Num a` + }, + .tag_union => { + // Tag union propagation is complex - skip for now + // This case is less common for the numeric range use case we're fixing + }, + .record => { + // Record propagation is complex - skip for now + // This case is less common for the numeric range use case we're fixing + }, + else => { + // For other structure types, no recursive propagation needed + }, + } + } + + // Also add a mapping for the outer type itself (in case it's referenced directly) + if (ct_resolved.desc.content == .flex or ct_resolved.desc.content == .rigid) { + const flex_key = ModuleVarKey{ .module = module, .var_ = ct_resolved.var_ }; + try self.flex_type_context.put(flex_key, rt_var); + } + } + /// Translate a compile-time type variable from a module's type store to the runtime type store. /// Handles most structural types: tag unions, tuples, records, functions, and nominal types. /// Uses caching to handle recursive types and avoid duplicate work. @@ -7339,19 +7526,29 @@ pub const Interpreter = struct { const key = ModuleVarKey{ .module = module, .var_ = resolved.var_ }; - // Check flex_type_context BEFORE translate_cache for flex types. - // This is critical for polymorphic functions: the same compile-time flex var + // Check flex_type_context BEFORE translate_cache for flex and rigid types. + // This is critical for polymorphic functions: the same compile-time flex/rigid var // may need to translate to different runtime types depending on calling context. // For example, `sum = |num| 0 + num` called as U64.to_str(sum(2400)) needs // the literal 0 to become U64, not the cached Dec default. - if (resolved.desc.content == .flex) { + if (resolved.desc.content == .flex or resolved.desc.content == .rigid) { if (self.flex_type_context.get(key)) |context_rt_var| { return context_rt_var; } } - if (self.translate_cache.get(key)) |found| { - return found; + // Skip translate_cache for flex/rigid vars when inside a polymorphic function. + // The cache may have stale mappings from a different calling context where the + // flex var defaulted to Dec, but we now have a concrete type from flex_type_context. + // We check if flex_type_context has ANY entries as a proxy for "inside polymorphic call". + const in_polymorphic_context = self.flex_type_context.count() > 0; + const skip_cache_for_this_var = in_polymorphic_context and + (resolved.desc.content == .flex or resolved.desc.content == .rigid); + + if (!skip_cache_for_this_var) { + if (self.translate_cache.get(key)) |found| { + return found; + } } // Insert a placeholder to break cycles during recursive type translation. @@ -7585,7 +7782,50 @@ pub const Interpreter = struct { .flex => |flex| { // Note: flex_type_context is checked at the top of translateTypeVar, // before the translate_cache lookup. If we reach here, there was no - // contextual override, so we create a fresh flex var. + // contextual override. + // + // However, if we're in a polymorphic function context (flex_type_context is non-empty) + // and there's exactly one mapping, we should use it. This handles the case where + // a flex var inside a function body (e.g., the element type of an empty list) + // was unified with the function's type parameter at compile time, but the + // union-find structure wasn't preserved during serialization. + // + // For example, in `range_to = |current, end| { var answer = [] ... }`: + // - The function has type `Num a, Num a -> List (Num a)` with rigid `a` + // - The empty list `[]` has element type `Num flex_b` where `flex_b` was unified with `a` + // - After serialization, `flex_b` and `a` are different vars + // - If we mapped `a -> U8` from the call arguments, we should use U8 for `flex_b` too + // + // Check if all entries in flex_type_context map to the same runtime type. + // This handles the case where multiple var entries exist (e.g., from parameters + // and internal type vars) but they all represent the same type parameter. + const ctx_count = self.flex_type_context.count(); + if (ctx_count > 0) { + var it = self.flex_type_context.iterator(); + var first_rt_var: ?types.Var = null; + var all_same = true; + while (it.next()) |entry| { + const rt_var = entry.value_ptr.*; + if (first_rt_var) |first| { + // Check if this entry maps to the same runtime type + // by comparing the resolved root var + const first_resolved = self.runtime_types.resolveVar(first); + const this_resolved = self.runtime_types.resolveVar(rt_var); + // If they resolve to the same root var, they're the same type + if (first_resolved.var_ != this_resolved.var_) { + all_same = false; + break; + } + } else { + first_rt_var = rt_var; + } + } + if (all_same) { + if (first_rt_var) |rt_var| { + break :blk rt_var; + } + } + } // Translate the flex's name from source module's ident store to runtime ident store (if present) const rt_name: ?base_pkg.Ident.Idx = if (flex.name) |name| blk_name: { @@ -10188,7 +10428,61 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(expr_idx); break :blk try self.translateTypeVar(self.env, ct_var); }; - const derived_layout = try self.getRuntimeLayout(rt_var); + + // Get the element type from the list type and use flex_type_context for it + const list_resolved = self.runtime_types.resolveVar(rt_var); + var final_rt_var = rt_var; + if (list_resolved.desc.content == .structure) { + if (list_resolved.desc.content.structure == .nominal_type) { + const list_nom = list_resolved.desc.content.structure.nominal_type; + const list_args = self.runtime_types.sliceNominalArgs(list_nom); + if (list_args.len > 0) { + const elem_var = list_args[0]; + const elem_resolved = self.runtime_types.resolveVar(elem_var); + // If element type is a flex var and we have mappings, use the mapped type + if (elem_resolved.desc.content == .flex and self.flex_type_context.count() > 0) { + var it = self.flex_type_context.iterator(); + var first_concrete: ?types.Var = null; + var all_same = true; + while (it.next()) |entry| { + const mapped_var = entry.value_ptr.*; + const mapped_resolved = self.runtime_types.resolveVar(mapped_var); + if (mapped_resolved.desc.content != .flex) { + if (first_concrete) |first| { + const first_resolved = self.runtime_types.resolveVar(first); + if (first_resolved.var_ != mapped_resolved.var_) { + all_same = false; + break; + } + } else { + first_concrete = mapped_var; + } + } + } + if (all_same) { + if (first_concrete) |concrete_elem_var| { + // Create a new List type with the concrete element type + // Get the backing var from the original list type + const backing_var = self.runtime_types.getNominalBackingVar(list_nom); + // Create new nominal content + const args = [_]types.Var{concrete_elem_var}; + const new_list_content = self.runtime_types.mkNominal( + list_nom.ident, + backing_var, + &args, + list_nom.origin_module, + list_nom.is_opaque, + ) catch unreachable; + // Create a new Var from that content + final_rt_var = self.runtime_types.freshFromContent(new_list_content) catch unreachable; + } + } + } + } + } + } + + const derived_layout = try self.getRuntimeLayout(final_rt_var); // Ensure we have a proper list layout even if the type variable defaulted to Dec. const list_layout = if (derived_layout.tag == .list or derived_layout.tag == .list_of_zst) @@ -10634,11 +10928,9 @@ pub const Interpreter = struct { // Re-evaluate the numeric expression with the expected type. // Set up flex_type_context so flex vars in the expression // translate to the expected type instead of defaulting to Dec. - const saved_flex_ctx = try self.flex_type_context.clone(); - defer { - self.flex_type_context.deinit(); - self.flex_type_context = saved_flex_ctx; - } + // Note: We no longer save/restore flex_type_context here because + // the type mappings need to persist across the call chain for + // polymorphic functions from pre-compiled modules like Builtin. try self.setupFlexContextForNumericExpr(root_expr_idx, b.source_env, exp_var); const result = try self.evalWithExpectedType(root_expr_idx, roc_ops, exp_var); @@ -12034,6 +12326,7 @@ pub const Interpreter = struct { nom.ident.ident_idx, self.env.idents.to_inspect, roc_ops, + ir.inner_rt_var, ), else => null, } @@ -12469,9 +12762,9 @@ pub const Interpreter = struct { // Only add mapping if the argument has a concrete type (structure) if (arg_rt_resolved.desc.content == .structure) { const param_ct_var = can.ModuleEnv.varFrom(param); - const param_resolved = self.env.types.resolveVar(param_ct_var); - const flex_key = ModuleVarKey{ .module = self.env, .var_ = param_resolved.var_ }; - try self.flex_type_context.put(flex_key, vars[idx]); + // Propagate flex mappings from the compile-time type to runtime type. + // This walks both types in parallel and maps any flex vars found in CT to their RT counterparts. + try self.propagateFlexMappings(self.env, param_ct_var, vars[idx]); } } } @@ -12548,10 +12841,10 @@ pub const Interpreter = struct { self.rigid_subst = saved; } - // Restore flex_type_context if we added parameter type mappings + // Note: Don't restore flex_type_context (same rationale as normal return case) if (cleanup.saved_flex_type_context) |saved| { - self.flex_type_context.deinit(); - self.flex_type_context = saved; + var saved_copy = saved; + saved_copy.deinit(); } // Restore environment and cleanup bindings @@ -12581,10 +12874,21 @@ pub const Interpreter = struct { self.rigid_subst = saved; } - // Restore flex_type_context if we added parameter type mappings + // Note: We intentionally do NOT restore flex_type_context here. + // The type mappings need to persist across the call chain for polymorphic + // functions from pre-compiled modules like Builtin. When a function returns + // a value that is used in subsequent calls (e.g., method dispatch returning + // a closure that is then invoked), those later calls need the type mappings + // from the original call arguments. + // + // The mappings are keyed by compile-time type vars, so mappings from different + // call sites with different type vars won't conflict. For the same polymorphic + // function called multiple times with different concrete types, the later call + // will overwrite the mapping with the new concrete type, which is correct. if (cleanup.saved_flex_type_context) |saved| { - self.flex_type_context.deinit(); - self.flex_type_context = saved; + // Just free the saved context, don't restore it + var saved_copy = saved; + saved_copy.deinit(); } // Restore environment and cleanup bindings @@ -12630,6 +12934,7 @@ pub const Interpreter = struct { nominal_info.ident, ua.method_ident, roc_ops, + ua.operand_rt_var, ); defer method_func.decref(&self.runtime_layout_store, roc_ops); @@ -12795,6 +13100,7 @@ pub const Interpreter = struct { nominal_info.?.ident, ba.method_ident, roc_ops, + ba.receiver_rt_var, ); defer method_func.decref(&self.runtime_layout_store, roc_ops); @@ -12955,6 +13261,7 @@ pub const Interpreter = struct { nominal_info.ident, da.field_name, roc_ops, + effective_receiver_rt_var, ) catch |err| { receiver_value.decref(&self.runtime_layout_store, roc_ops); if (err == error.MethodLookupFailed) { @@ -13571,6 +13878,7 @@ pub const Interpreter = struct { nom.ident.ident_idx, self.env.idents.to_inspect, roc_ops, + ir.rt_var, ), else => null, } diff --git a/src/fmt/fmt.zig b/src/fmt/fmt.zig index 090d899e67..ad464ca27a 100644 --- a/src/fmt/fmt.zig +++ b/src/fmt/fmt.zig @@ -1319,6 +1319,11 @@ const Formatter = struct { region = i.region; try fmt.formatIdent(i.ident_tok, null); }, + .var_ident => |i| { + region = i.region; + try fmt.pushAll("var "); + try fmt.formatIdent(i.ident_tok, null); + }, .tag => |t| { region = t.region; diff --git a/src/parse/AST.zig b/src/parse/AST.zig index c6cf7e1730..f4df949655 100644 --- a/src/parse/AST.zig +++ b/src/parse/AST.zig @@ -1242,6 +1242,11 @@ pub const Pattern = union(enum) { ident_tok: Token.Idx, region: TokenizedRegion, }, + /// A mutable variable binding in a pattern, e.g., `var $x` in `|var $x, y|` + var_ident: struct { + ident_tok: Token.Idx, + region: TokenizedRegion, + }, tag: struct { tag_tok: Token.Idx, args: Pattern.Span, @@ -1305,6 +1310,7 @@ pub const Pattern = union(enum) { pub fn to_tokenized_region(self: @This()) TokenizedRegion { return switch (self) { .ident => |p| p.region, + .var_ident => |p| p.region, .tag => |p| p.region, .int => |p| p.region, .frac => |p| p.region, @@ -1339,6 +1345,21 @@ pub const Pattern = union(enum) { try tree.endNode(begin, attrs); }, + .var_ident => |ident| { + const begin = tree.beginNode(); + try tree.pushStaticAtom("p-var-ident"); + try ast.appendRegionInfoToSexprTree(env, tree, ident.region); + + // Add raw attribute + const raw_begin = tree.beginNode(); + try tree.pushStaticAtom("raw"); + try tree.pushString(ast.resolve(ident.ident_tok)); + const attrs2 = tree.beginNode(); + try tree.endNode(raw_begin, attrs2); + const attrs = tree.beginNode(); + + try tree.endNode(begin, attrs); + }, .tag => |tag| { const begin = tree.beginNode(); try tree.pushStaticAtom("p-tag"); diff --git a/src/parse/Node.zig b/src/parse/Node.zig index cf716a5753..2590c7c636 100644 --- a/src/parse/Node.zig +++ b/src/parse/Node.zig @@ -250,6 +250,10 @@ pub const Tag = enum { /// * lhs - LHS DESCRIPTION /// * rhs - RHS DESCRIPTION ident_patt, + /// Mutable variable binding in pattern + /// Example: `var $x` in `|var $x, y|` + /// * main_token - the identifier token + var_ident_patt, /// DESCRIPTION /// Example: EXAMPLE /// * lhs - LHS DESCRIPTION diff --git a/src/parse/NodeStore.zig b/src/parse/NodeStore.zig index 864762525e..98e82407b1 100644 --- a/src/parse/NodeStore.zig +++ b/src/parse/NodeStore.zig @@ -46,7 +46,7 @@ pub const AST_HEADER_NODE_COUNT = 6; /// Count of the statement nodes in the AST pub const AST_STATEMENT_NODE_COUNT = 13; /// Count of the pattern nodes in the AST -pub const AST_PATTERN_NODE_COUNT = 14; +pub const AST_PATTERN_NODE_COUNT = 15; /// Count of the type annotation nodes in the AST pub const AST_TYPE_ANNO_NODE_COUNT = 10; /// Count of the expression nodes in the AST @@ -478,6 +478,11 @@ pub fn addPattern(store: *NodeStore, pattern: AST.Pattern) std.mem.Allocator.Err node.region = i.region; node.main_token = i.ident_tok; }, + .var_ident => |i| { + node.tag = .var_ident_patt; + node.region = i.region; + node.main_token = i.ident_tok; + }, .tag => |t| { const data_start = @as(u32, @intCast(store.extra_data.items.len)); try store.extra_data.append(store.gpa, t.args.span.len); @@ -1387,6 +1392,12 @@ pub fn getPattern(store: *const NodeStore, pattern_idx: AST.Pattern.Idx) AST.Pat .region = node.region, } }; }, + .var_ident_patt => { + return .{ .var_ident = .{ + .ident_tok = node.main_token, + .region = node.region, + } }; + }, .tag_patt => { const args_start = node.data.lhs; diff --git a/src/parse/Parser.zig b/src/parse/Parser.zig index 66d9a4446b..73cb4046ae 100644 --- a/src/parse/Parser.zig +++ b/src/parse/Parser.zig @@ -1452,6 +1452,19 @@ pub fn parsePattern(self: *Parser, alternatives: Alternatives) Error!AST.Pattern .region = .{ .start = start, .end = self.pos }, } }); }, + .KwVar => { + // Mutable variable binding in pattern, e.g., `var $x` + self.advance(); + if (self.peek() != .LowerIdent) { + return try self.pushMalformed(AST.Pattern.Idx, .var_must_have_ident, self.pos); + } + const ident_tok = self.pos; + self.advance(); + pattern = try self.store.addPattern(.{ .var_ident = .{ + .ident_tok = ident_tok, + .region = .{ .start = start, .end = self.pos }, + } }); + }, .NamedUnderscore => { self.advance(); pattern = try self.store.addPattern(.{ .ident = .{ diff --git a/src/types/generalize.zig b/src/types/generalize.zig index ae520abb2b..ff4609110d 100644 --- a/src/types/generalize.zig +++ b/src/types/generalize.zig @@ -205,12 +205,16 @@ pub const Generalizer = struct { if (@intFromEnum(resolved.desc.rank) < rank_to_generalize_int) { // Rank was lowered during adjustment - variable escaped try var_pool.addVarToRank(resolved.var_, resolved.desc.rank); - } else if (self.hasNumeralConstraint(resolved.desc.content)) { - // Flex var with numeric constraint - don't generalize. + } else if (rank_to_generalize_int == @intFromEnum(Rank.top_level) and self.hasNumeralConstraint(resolved.desc.content)) { + // Flex var with numeric constraint at TOP LEVEL - don't generalize. // This ensures numeric literals like `x = 15` stay monomorphic so that // later usage like `I64.to_str(x)` can constrain x to I64. // Without this, let-generalization would create a fresh copy at each use, // leaving the original as an unconstrained flex var that defaults to Dec. + // + // However, inside lambdas (rank > top_level), we DO generalize numeric + // literals so that polymorphic functions like `|a| a + 1` work correctly. + // The numeric literal takes on the type of the function parameter. try var_pool.addVarToRank(resolved.var_, resolved.desc.rank); } else { // Rank unchanged - safe to generalize diff --git a/test/snapshots/repl/u8_range_to.md b/test/snapshots/repl/u8_range_to.md new file mode 100644 index 0000000000..70f07c3c46 --- /dev/null +++ b/test/snapshots/repl/u8_range_to.md @@ -0,0 +1,19 @@ +# META +~~~ini +description=U8.to - creates a list of integers from start to end (inclusive) +type=repl +~~~ +# SOURCE +~~~roc +» 1u8.to(5u8) +» 0u8.to(0u8) +» 5u8.to(3u8) +~~~ +# OUTPUT +[1, 2, 3, 4, 5] +--- +[0] +--- +[] +# PROBLEMS +NIL diff --git a/test/snapshots/repl/u8_range_until.md b/test/snapshots/repl/u8_range_until.md new file mode 100644 index 0000000000..ff3ad487c9 --- /dev/null +++ b/test/snapshots/repl/u8_range_until.md @@ -0,0 +1,19 @@ +# META +~~~ini +description=U8.until - creates a list of integers from start to end (exclusive) +type=repl +~~~ +# SOURCE +~~~roc +» 0u8.until(3u8) +» 1u8.until(1u8) +» 5u8.until(3u8) +~~~ +# OUTPUT +[0, 1, 2] +--- +[] +--- +[] +# PROBLEMS +NIL diff --git a/test/snapshots/repl/var_in_lambda_param.md b/test/snapshots/repl/var_in_lambda_param.md new file mode 100644 index 0000000000..8f44f44222 --- /dev/null +++ b/test/snapshots/repl/var_in_lambda_param.md @@ -0,0 +1,17 @@ +# META +~~~ini +description=Test var in lambda parameters +type=repl +~~~ +# SOURCE +~~~roc +» f = |var $x, y| { $x = $x + y + $x } +» f(1, 2) +~~~ +# OUTPUT +assigned `f` +--- +3 +# PROBLEMS +NIL From a939fb4a062633dc51d2f0611f9c49783098e64e Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 13:13:05 -0500 Subject: [PATCH 03/64] Generalize numbers inside lambdas --- src/check/test/type_checking_integration.zig | 7 ++++--- src/parse/test/ast_node_store_test.zig | 6 ++++++ test/snapshots/repl/numeric_multiple_diff_types.md | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/check/test/type_checking_integration.zig b/src/check/test/type_checking_integration.zig index 56f56ef832..7147602e8c 100644 --- a/src/check/test/type_checking_integration.zig +++ b/src/check/test/type_checking_integration.zig @@ -1346,9 +1346,10 @@ test "check type - expect" { \\ x \\} ; - // With no let-generalization for numeric flex vars, the `x == 1` comparison - // adds an is_eq constraint to x (since x is not generalized and remains monomorphic) - try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.is_eq : a, a -> Bool, a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]"); + // Inside lambdas, numeric flex vars ARE generalized (to support polymorphic functions). + // Each use of `x` gets a fresh instance, so constraints from `x == 1` don't + // propagate to the generalized type. Only `from_numeral` from the def is captured. + try checkTypesModule(source, .{ .pass = .last_def }, "a where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]"); } test "check type - expect not bool" { diff --git a/src/parse/test/ast_node_store_test.zig b/src/parse/test/ast_node_store_test.zig index ce0dd9f48d..5d27c011e7 100644 --- a/src/parse/test/ast_node_store_test.zig +++ b/src/parse/test/ast_node_store_test.zig @@ -281,6 +281,12 @@ test "NodeStore round trip - Pattern" { .region = rand_region(), }, }); + try patterns.append(gpa, AST.Pattern{ + .var_ident = .{ + .ident_tok = rand_token_idx(), + .region = rand_region(), + }, + }); try patterns.append(gpa, AST.Pattern{ .tag = .{ .args = AST.Pattern.Span{ .span = rand_span() }, diff --git a/test/snapshots/repl/numeric_multiple_diff_types.md b/test/snapshots/repl/numeric_multiple_diff_types.md index aa43546c09..a41bc4869a 100644 --- a/test/snapshots/repl/numeric_multiple_diff_types.md +++ b/test/snapshots/repl/numeric_multiple_diff_types.md @@ -1,6 +1,6 @@ # META ~~~ini -description=Numeric without annotation, multiple uses with different types (produces type error) +description=Numeric without annotation, multiple uses with different types (each use gets fresh type) type=repl ~~~ # SOURCE @@ -17,6 +17,6 @@ assigned `a` --- assigned `b` --- -TYPE MISMATCH +"4242.0" # PROBLEMS NIL From 20dec64cb054dc2e2ed91ab471da327bb661f730 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 15:27:09 -0500 Subject: [PATCH 04/64] Add infinite loop guards in debug builds --- src/check/Check.zig | 4 +++ src/check/unify.zig | 4 +++ src/eval/interpreter.zig | 37 ++-------------------- src/types/TypeWriter.zig | 3 ++ src/types/debug.zig | 67 ++++++++++++++++++++++++++++++++++++++++ src/types/mod.zig | 1 + src/types/store.zig | 5 +++ 7 files changed, 87 insertions(+), 34 deletions(-) create mode 100644 src/types/debug.zig diff --git a/src/check/Check.zig b/src/check/Check.zig index dfb97b84ca..c5021b29b7 100644 --- a/src/check/Check.zig +++ b/src/check/Check.zig @@ -3111,7 +3111,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected) // Here, we unwrap the function, following aliases, to get // the actual function we want to check against var var_ = expected_var; + var guard = types_mod.debug.IterationGuard.init("checkExpr.lambda.unwrapExpectedFunc"); while (true) { + guard.tick(); switch (self.types.resolveVar(var_).desc.content) { .structure => |flat_type| { switch (flat_type) { @@ -3306,7 +3308,9 @@ fn checkExpr(self: *Self, expr_idx: CIR.Expr.Idx, env: *Env, expected: Expected) // Here, we unwrap the function, following aliases, to get // the actual function we want to check against var var_ = func_var; + var guard = types_mod.debug.IterationGuard.init("checkExpr.call.unwrapFuncVar"); while (true) { + guard.tick(); switch (self.types.resolveVar(var_).desc.content) { .structure => |flat_type| { switch (flat_type) { diff --git a/src/check/unify.zig b/src/check/unify.zig index 4cedef6710..fdfeee7954 100644 --- a/src/check/unify.zig +++ b/src/check/unify.zig @@ -1838,7 +1838,9 @@ const Unifier = struct { // then recursiv var ext = record_ext; + var guard = types_mod.debug.IterationGuard.init("gatherRecordFields"); while (true) { + guard.tick(); switch (ext) { .unbound => { return .{ .ext = ext, .range = range }; @@ -2288,7 +2290,9 @@ const Unifier = struct { // then loop gathering extensible tags var ext_var = tag_union.ext; + var guard = types_mod.debug.IterationGuard.init("gatherTagUnionTags"); while (true) { + guard.tick(); switch (self.types_store.resolveVar(ext_var).desc.content) { .flex => { return .{ .ext = ext_var, .range = range }; diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 6b998ff5b3..a0e1a8680f 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -7127,41 +7127,10 @@ pub const Interpreter = struct { try self.ensureVarLayoutCapacity(idx + 1); const slot_ptr = &self.var_to_layout_slot.items[idx]; - // If we have a flex var, check if we have a mapping in flex_type_context - // This handles polymorphic functions where the type parameter needs to be resolved + // If we have a flex var, default to Dec. + // Note: flex_type_context mappings are handled in translateTypeVar, not here. + // This function receives runtime type vars that should already be resolved. if (resolved.desc.content == .flex) { - // Try to find a mapping for this flex var from any entry in flex_type_context - // Since this is a runtime flex var, we need to check if any context entry - // maps to a concrete type that we can use - if (self.flex_type_context.count() > 0) { - var it = self.flex_type_context.iterator(); - var first_rt_var: ?types.Var = null; - var all_same = true; - while (it.next()) |entry| { - const rt_var = entry.value_ptr.*; - const rt_resolved = self.runtime_types.resolveVar(rt_var); - // Only consider non-flex entries as candidates - if (rt_resolved.desc.content != .flex) { - if (first_rt_var) |first| { - const first_resolved = self.runtime_types.resolveVar(first); - if (first_resolved.var_ != rt_resolved.var_) { - all_same = false; - break; - } - } else { - first_rt_var = rt_var; - } - } - } - if (all_same) { - if (first_rt_var) |concrete_rt_var| { - // Recurse with the concrete type - return try self.getRuntimeLayout(concrete_rt_var); - } - } - } - - // Default to Dec for unresolved flex vars const dec_layout = layout.Layout.frac(types.Frac.Precision.dec); const dec_layout_idx = try self.runtime_layout_store.insertLayout(dec_layout); slot_ptr.* = @intFromEnum(dec_layout_idx) + 1; diff --git a/src/types/TypeWriter.zig b/src/types/TypeWriter.zig index 1d87ff5b15..8607fc0e63 100644 --- a/src/types/TypeWriter.zig +++ b/src/types/TypeWriter.zig @@ -9,6 +9,7 @@ const std = @import("std"); const base = @import("base"); const types_mod = @import("types.zig"); const import_mapping_mod = @import("import_mapping.zig"); +const debug = @import("debug.zig"); const TypesStore = @import("store.zig").Store; const Allocator = std.mem.Allocator; @@ -610,7 +611,9 @@ fn gatherRecordFields(self: *TypeWriter, fields: RecordField.SafeMultiList.Range } var ext = initial_ext; + var guard = debug.IterationGuard.init("TypeWriter.gatherRecordFields"); while (true) { + guard.tick(); const resolved = self.types.resolveVar(ext); switch (resolved.desc.content) { .flex => |flex| { diff --git a/src/types/debug.zig b/src/types/debug.zig new file mode 100644 index 0000000000..c3d064d364 --- /dev/null +++ b/src/types/debug.zig @@ -0,0 +1,67 @@ +//! Debug utilities for type checking +//! +//! These utilities are only active in debug builds and help catch infinite loops +//! in type-checking code by limiting the number of iterations. + +const std = @import("std"); +const builtin = @import("builtin"); + +/// Maximum number of iterations before panicking in debug builds. +/// This is set high enough to handle legitimate complex types but low enough +/// to catch infinite loops quickly during development. +pub const MAX_ITERATIONS: u32 = 100_000; + +/// A debug-only iteration guard that panics if a loop exceeds MAX_ITERATIONS. +/// In release builds, this is a no-op. +/// +/// Usage: +/// ``` +/// var guard = IterationGuard.init("myFunction"); +/// while (condition) { +/// guard.tick(); +/// // ... loop body +/// } +/// ``` +pub const IterationGuard = struct { + count: u32, + location: []const u8, + + const Self = @This(); + + pub fn init(location: []const u8) Self { + return .{ + .count = 0, + .location = location, + }; + } + + /// Call this at the start of each loop iteration. + /// In debug builds, panics if MAX_ITERATIONS is exceeded. + /// In release builds, this is a no-op that should be optimized away. + pub inline fn tick(self: *Self) void { + if (builtin.mode == .Debug) { + self.count += 1; + if (self.count > MAX_ITERATIONS) { + std.debug.panic( + "Infinite loop detected in type-checking at '{s}' after {d} iterations. " ++ + "This usually indicates a cyclic type or bug in the type checker.", + .{ self.location, self.count }, + ); + } + } + } + + /// Returns the current iteration count (useful for debugging). + pub fn getCount(self: *const Self) u32 { + return self.count; + } +}; + +test "IterationGuard does not panic for normal iteration counts" { + var guard = IterationGuard.init("test"); + var i: u32 = 0; + while (i < 1000) : (i += 1) { + guard.tick(); + } + try std.testing.expectEqual(@as(u32, 1000), guard.getCount()); +} diff --git a/src/types/mod.zig b/src/types/mod.zig index e66acb63f7..59eaeb2978 100644 --- a/src/types/mod.zig +++ b/src/types/mod.zig @@ -12,6 +12,7 @@ pub const store = @import("store.zig"); pub const instantiate = @import("instantiate.zig"); pub const generalize = @import("generalize.zig"); pub const import_mapping = @import("import_mapping.zig"); +pub const debug = @import("debug.zig"); pub const TypeWriter = @import("TypeWriter.zig"); diff --git a/src/types/store.zig b/src/types/store.zig index 201550f4f5..1e43d19dbe 100644 --- a/src/types/store.zig +++ b/src/types/store.zig @@ -7,6 +7,7 @@ const collections = @import("collections"); const serialization = @import("serialization"); const types = @import("types.zig"); +const debug = @import("debug.zig"); const Allocator = std.mem.Allocator; const Desc = types.Descriptor; @@ -589,7 +590,9 @@ pub const Store = struct { if (initial_var != redirected_root_var) { var compressed_slot_idx = Self.varToSlotIdx(initial_var); var compressed_slot: Slot = self.slots.get(compressed_slot_idx); + var guard = debug.IterationGuard.init("resolveVarAndCompressPath"); while (true) { + guard.tick(); switch (compressed_slot) { .redirect => |next_redirect_var| { self.slots.set(compressed_slot_idx, Slot{ .redirect = redirected_root_var }); @@ -611,8 +614,10 @@ pub const Store = struct { var redirected_slot: Slot = self.slots.get(redirected_slot_idx); var is_root = true; + var guard = debug.IterationGuard.init("resolveVar"); while (true) { + guard.tick(); switch (redirected_slot) { .redirect => |next_redirect_var| { redirected_slot_idx = Self.varToSlotIdx(next_redirect_var); From e54fe1381415e3a3aef7be2ee24020dc56853e4b Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 18:34:25 -0500 Subject: [PATCH 05/64] Properly fix some polymorphic vars --- src/eval/interpreter.zig | 265 +++++++++++++++++++++++++++++++++------ 1 file changed, 229 insertions(+), 36 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index a0e1a8680f..d081400b9e 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -5377,6 +5377,20 @@ pub const Interpreter = struct { return self.orderNumericValues(lhs_value, rhs_value); } + const CompareOp = enum { gt, gte, lt, lte, eq }; + + /// Compare two numeric values using the specified comparison operation + fn compareNumericValues(self: *Interpreter, lhs: StackValue, rhs: StackValue, op: CompareOp) !bool { + const order = try self.compareNumericScalars(lhs, rhs); + return switch (op) { + .gt => order == .gt, + .gte => order == .gt or order == .eq, + .lt => order == .lt, + .lte => order == .lt or order == .eq, + .eq => order == .eq, + }; + } + fn orderNumericValues(self: *Interpreter, lhs: NumericValue, rhs: NumericValue) !std.math.Order { return switch (lhs) { .int => self.orderInt(lhs.int, rhs), @@ -5852,7 +5866,9 @@ pub const Interpreter = struct { fn resolveBaseVar(self: *Interpreter, runtime_var: types.Var) types.store.ResolvedVarDesc { var current = self.runtime_types.resolveVar(runtime_var); + var guard = types.debug.IterationGuard.init("resolveBaseVar"); while (true) { + guard.tick(); switch (current.desc.content) { .alias => |al| { const backing = self.runtime_types.getAliasBackingVar(al); @@ -5875,10 +5891,14 @@ pub const Interpreter = struct { defer var_stack.deinit(); try var_stack.append(runtime_var); + var outer_guard = types.debug.IterationGuard.init("appendUnionTags.outer"); while (var_stack.items.len > 0) { + outer_guard.tick(); const current_var = var_stack.pop().?; var resolved = self.runtime_types.resolveVar(current_var); + var inner_guard = types.debug.IterationGuard.init("appendUnionTags.expand"); expand: while (true) { + inner_guard.tick(); switch (resolved.desc.content) { .alias => |al| { const backing = self.runtime_types.getAliasBackingVar(al); @@ -6427,7 +6447,12 @@ pub const Interpreter = struct { switch (pat) { .assign => |_| { // Bind entire value to this pattern - const copied = try self.pushCopy(value, roc_ops); + var copied = try self.pushCopy(value, roc_ops); + // If the value doesn't have an rt_var (e.g., list elements from pattern matching), + // use the pattern's type. Otherwise preserve the value's original type. + if (copied.rt_var == null) { + copied.rt_var = value_rt_var; + } try out_binds.append(.{ .pattern_idx = pattern_idx, .value = copied, .expr_idx = expr_idx, .source_env = self.env }); return true; }, @@ -6438,7 +6463,11 @@ pub const Interpreter = struct { return false; } - const alias_value = try self.pushCopy(value, roc_ops); + var alias_value = try self.pushCopy(value, roc_ops); + // If the value doesn't have an rt_var, use the pattern's type + if (alias_value.rt_var == null) { + alias_value.rt_var = value_rt_var; + } try out_binds.append(.{ .pattern_idx = pattern_idx, .value = alias_value, .expr_idx = expr_idx, .source_env = self.env }); return true; }, @@ -6510,18 +6539,47 @@ pub const Interpreter = struct { // which may differ from the type system's layout if runtime defaulting occurred. const list_layout = value.layout; - const list_rt_var = try self.translateTypeVar(self.env, can.ModuleEnv.varFrom(pattern_idx)); - const list_rt_content = self.runtime_types.resolveVar(list_rt_var).desc.content; - std.debug.assert(list_rt_content == .structure); - std.debug.assert(list_rt_content.structure == .nominal_type); + // Check if the list value itself is polymorphic (from a polymorphic function) + const value_rt_resolved = self.runtime_types.resolveVar(value_rt_var); + const list_is_polymorphic = value_rt_resolved.desc.content == .flex or + value_rt_resolved.desc.content == .rigid; - // Extract the element type variable from the List type - // Note: nominal.vars contains [backing_var, elem_var] for List types - // where backing_var is the ProvidedByCompiler tag union, and elem_var is the element type - const nominal = list_rt_content.structure.nominal_type; - const vars = self.runtime_types.sliceVars(nominal.vars.nonempty); - std.debug.assert(vars.len == 2); // List has backing var + elem var - const elem_rt_var = vars[1]; + // Get element type from the list value's type if available, otherwise from the pattern + // Using the value's type preserves proper method bindings through polymorphic calls + const elem_rt_var: types.Var = if (list_is_polymorphic) blk: { + // List came from polymorphic context - create a fresh flex variable for elements + // so they maintain their polymorphic nature + break :blk try self.runtime_types.fresh(); + } else if (value_rt_resolved.desc.content == .structure and + value_rt_resolved.desc.content.structure == .nominal_type) + blk: { + // Use the element type from the list value's actual type + // This preserves method bindings through polymorphic function calls + const nominal = value_rt_resolved.desc.content.structure.nominal_type; + const vars = self.runtime_types.sliceVars(nominal.vars.nonempty); + if (vars.len == 2) { + break :blk vars[1]; // element type is second var + } + // Fallback to pattern translation if structure is unexpected + const list_rt_var = try self.translateTypeVar(self.env, can.ModuleEnv.varFrom(pattern_idx)); + const list_rt_content = self.runtime_types.resolveVar(list_rt_var).desc.content; + std.debug.assert(list_rt_content == .structure); + std.debug.assert(list_rt_content.structure == .nominal_type); + const nom = list_rt_content.structure.nominal_type; + const pattern_vars = self.runtime_types.sliceVars(nom.vars.nonempty); + std.debug.assert(pattern_vars.len == 2); + break :blk pattern_vars[1]; + } else blk: { + // Value's type is not a nominal List type - extract from pattern + const list_rt_var = try self.translateTypeVar(self.env, can.ModuleEnv.varFrom(pattern_idx)); + const list_rt_content = self.runtime_types.resolveVar(list_rt_var).desc.content; + std.debug.assert(list_rt_content == .structure); + std.debug.assert(list_rt_content.structure == .nominal_type); + const nominal = list_rt_content.structure.nominal_type; + const vars = self.runtime_types.sliceVars(nominal.vars.nonempty); + std.debug.assert(vars.len == 2); + break :blk vars[1]; + }; // Get element layout from the actual list layout, not from the type system. // The list's runtime layout may differ from the type system's expectation @@ -7114,9 +7172,12 @@ pub const Interpreter = struct { // Apply rigid variable substitution if this is a rigid variable // Follow the substitution chain until we reach a non-rigid variable or run out of substitutions - // Note: Cycles are prevented by unification, so this chain must terminate + // Use a counter to prevent infinite loops from cyclic substitutions + var count: u32 = 0; while (resolved.desc.content == .rigid) { if (self.rigid_subst.get(resolved.var_)) |substituted_var| { + count += 1; + if (count > 1000) break; // Prevent infinite loops resolved = self.runtime_types.resolveVar(substituted_var); } else { break; @@ -7891,9 +7952,13 @@ pub const Interpreter = struct { // Check if this variable has a substitution active (for generic function instantiation) const final_var = if (self.rigid_subst.get(out_var)) |substituted| blk: { - // Recursively check if the substituted variable also has a substitution + // Follow the substitution chain to find the final variable + // Use a counter to prevent infinite loops from cyclic substitutions var current = substituted; + var count: u32 = 0; while (self.rigid_subst.get(current)) |next_subst| { + count += 1; + if (count > 1000) break; // Prevent infinite loops current = next_subst; } break :blk current; @@ -7923,9 +7988,12 @@ pub const Interpreter = struct { } const instantiated = switch (resolved.desc.content) { - .rigid => blk: { + .rigid => |rigid| blk: { // Replace rigid with fresh flex that can be unified - const fresh = try self.runtime_types.fresh(); + // IMPORTANT: Copy the rigid's constraints so numeric constraints are preserved + const fresh = try self.runtime_types.freshFromContent(.{ + .flex = .{ .name = rigid.name, .constraints = rigid.constraints }, + }); try subst_map.put(resolved.var_, fresh); break :blk fresh; }, @@ -8059,7 +8127,9 @@ pub const Interpreter = struct { } var current_ext = tag_union.ext; + var guard = types.debug.IterationGuard.init("interpreter.gatherTags"); while (true) { + guard.tick(); const resolved_ext = module.types.resolveVar(current_ext); switch (resolved_ext.desc.content) { .structure => |ext_flat_type| { @@ -8187,11 +8257,14 @@ pub const Interpreter = struct { // Apply rigid substitutions to ret_var if needed // Follow the substitution chain until we reach a non-rigid variable or run out of substitutions - // Note: Cycles are prevented by unification, so this chain must terminate + // Use a counter to prevent infinite loops from cyclic substitutions var resolved_ret = self.runtime_types.resolveVar(ret_var); var substituted_ret = ret_var; + var ret_count: u32 = 0; while (resolved_ret.desc.content == .rigid) { if (self.rigid_subst.get(resolved_ret.var_)) |subst_var| { + ret_count += 1; + if (ret_count > 1000) break; // Prevent infinite loops substituted_ret = subst_var; resolved_ret = self.runtime_types.resolveVar(subst_var); } else { @@ -9541,11 +9614,12 @@ pub const Interpreter = struct { if (elems.len == 0) { // Empty list - create immediately const list_layout = try self.getRuntimeLayout(list_rt_var); - const dest = try self.pushRaw(list_layout, 0); + var dest = try self.pushRaw(list_layout, 0); if (dest.ptr != null) { const header: *RocList = @ptrCast(@alignCast(dest.ptr.?)); header.* = RocList.empty(); } + dest.rt_var = list_rt_var; try value_stack.push(dest); } else { // Get element type variable from first element @@ -10231,7 +10305,15 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(expr_idx); break :blk try self.translateTypeVar(self.env, ct_var); }; - const layout_val = try self.getRuntimeLayout(rt_var); + var layout_val = try self.getRuntimeLayout(rt_var); + + // If the layout isn't a numeric type (e.g., ZST from unconstrained flex/rigid), + // default to Dec since we're evaluating a numeric literal + const is_numeric_layout = layout_val.tag == .scalar and + (layout_val.data.scalar.tag == .int or layout_val.data.scalar.tag == .frac); + if (!is_numeric_layout) { + layout_val = layout.Layout.frac(types.Frac.Precision.dec); + } var value = try self.pushRaw(layout_val, 0); value.is_initialized = false; @@ -11604,11 +11686,12 @@ pub const Interpreter = struct { if (total_count == 0) { // Empty list (shouldn't happen as it's handled directly) const list_layout = try self.getRuntimeLayout(lc.list_rt_var); - const dest = try self.pushRaw(list_layout, 0); + var dest = try self.pushRaw(list_layout, 0); if (dest.ptr != null) { const header: *RocList = @ptrCast(@alignCast(dest.ptr.?)); header.* = RocList.empty(); } + dest.rt_var = lc.list_rt_var; try value_stack.push(dest); } else { // Pop all collected values from the value stack @@ -11670,7 +11753,10 @@ pub const Interpreter = struct { val.decref(&self.runtime_layout_store, roc_ops); } - try value_stack.push(dest); + // Set the runtime type variable so method dispatch works correctly + var result = dest; + result.rt_var = lc.list_rt_var; + try value_stack.push(result); } } return true; @@ -12131,6 +12217,10 @@ pub const Interpreter = struct { const scrutinee = try self.pushCopy(scrutinee_temp, roc_ops); scrutinee_temp.decref(&self.runtime_layout_store, roc_ops); + // Use the scrutinee's own rt_var if available (preserves type through polymorphic calls), + // otherwise fall back to the translated scrutinee type from the match expression + const effective_scrutinee_rt_var = scrutinee.rt_var orelse mb.scrutinee_rt_var; + // Try branches starting from current_branch var branch_idx = mb.current_branch; while (branch_idx < mb.branches.len) : (branch_idx += 1) { @@ -12147,7 +12237,7 @@ pub const Interpreter = struct { if (!try self.patternMatchesBind( self.env.store.getMatchBranchPattern(bp_idx).pattern, scrutinee, - mb.scrutinee_rt_var, + effective_scrutinee_rt_var, roc_ops, &temp_binds, @enumFromInt(0), @@ -12793,8 +12883,13 @@ pub const Interpreter = struct { self.early_return_value = null; var return_val = return_val_in; - if (cleanup.call_ret_rt_var) |rt_var| { - return_val.rt_var = rt_var; + // Only set rt_var if the return value doesn't already have one. + // This preserves the original type for identity-like functions where + // the return value is the same as an input (which already has a valid rt_var). + if (return_val.rt_var == null) { + if (cleanup.call_ret_rt_var) |rt_var| { + return_val.rt_var = rt_var; + } } // Pop active closure if needed @@ -12867,8 +12962,13 @@ pub const Interpreter = struct { self.trimBindingList(&self.bindings, cleanup.saved_bindings_len, roc_ops); if (cleanup.arg_rt_vars_to_free) |vars| self.allocator.free(vars); - if (cleanup.call_ret_rt_var) |rt_var| { - result.rt_var = rt_var; + // Only set rt_var if the result doesn't already have one. + // This preserves the original type for identity-like functions where + // the return value is the same as an input (which already has a valid rt_var). + if (result.rt_var == null) { + if (cleanup.call_ret_rt_var) |rt_var| { + result.rt_var = rt_var; + } } try value_stack.push(result); return true; @@ -12996,19 +13096,115 @@ pub const Interpreter = struct { const lhs = value_stack.pop() orelse return error.Crash; defer lhs.decref(&self.runtime_layout_store, roc_ops); + // Prefer the runtime type from the evaluated value if it's more concrete + // (i.e., has a structure type rather than flex/rigid from polymorphic calls) + // Track if the value came from a polymorphic context (flex/rigid rt_var) + var effective_receiver_rt_var = ba.receiver_rt_var; + var value_is_polymorphic = false; + if (lhs.rt_var) |val_rt_var| { + const val_resolved = self.runtime_types.resolveVar(val_rt_var); + // Only use the value's type if it's concrete (has structure/alias) + if (val_resolved.desc.content == .structure or val_resolved.desc.content == .alias) { + effective_receiver_rt_var = val_rt_var; + } else if (val_resolved.desc.content == .flex or val_resolved.desc.content == .rigid) { + // The value came from a polymorphic context + value_is_polymorphic = true; + } + } + + // Check if effective type is still flex/rigid after trying value's rt_var + // Track whether we had to default to Dec so we know to use direct numeric handling + var defaulted_to_dec = false; + const resolved_check = self.runtime_types.resolveVar(effective_receiver_rt_var); + if (resolved_check.desc.content == .flex or resolved_check.desc.content == .rigid) { + // No concrete type info available, default to Dec for numeric operations + const dec_content = try self.mkNumberTypeContentRuntime("Dec"); + const dec_var = try self.runtime_types.freshFromContent(dec_content); + effective_receiver_rt_var = dec_var; + defaulted_to_dec = true; + } else if (value_is_polymorphic) { + // The value is polymorphic but we have a concrete type from CIR - mark as polymorphic + // so we use direct numeric handling instead of method dispatch + defaulted_to_dec = true; + } + // Resolve the lhs type - const lhs_resolved = self.runtime_types.resolveVar(ba.receiver_rt_var); + const lhs_resolved = self.runtime_types.resolveVar(effective_receiver_rt_var); // Get nominal type info, or handle anonymous structural types // Follow aliases to get to the underlying type - var current_var = ba.receiver_rt_var; + var current_var = effective_receiver_rt_var; var current_resolved = lhs_resolved; + var alias_count: u32 = 0; while (current_resolved.desc.content == .alias) { + alias_count += 1; + if (alias_count > 1000) break; // Prevent infinite loops const alias = current_resolved.desc.content.alias; current_var = self.runtime_types.getAliasBackingVar(alias); current_resolved = self.runtime_types.resolveVar(current_var); } + // Check if we can use low-level numeric comparison based on layout + // This handles cases where method dispatch would fail (e.g., polymorphic values) + // Only use direct handling when we had to default to Dec due to flex/rigid types + const is_numeric_layout = lhs.layout.tag == .scalar and + (lhs.layout.data.scalar.tag == .int or lhs.layout.data.scalar.tag == .frac); + if (is_numeric_layout and defaulted_to_dec) { + // Handle numeric comparisons directly via low-level ops + if (ba.method_ident == self.root_env.idents.is_gt) { + const result = try self.compareNumericValues(lhs, rhs, .gt); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_gte) { + const result = try self.compareNumericValues(lhs, rhs, .gte); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_lt) { + const result = try self.compareNumericValues(lhs, rhs, .lt); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_lte) { + const result = try self.compareNumericValues(lhs, rhs, .lte); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_eq) { + const result = try self.compareNumericValues(lhs, rhs, .eq); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } + // Handle numeric arithmetic directly via evalDecBinop + if (ba.method_ident == self.root_env.idents.plus) { + const result = try self.evalDecBinop(.add, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.minus) { + const result = try self.evalDecBinop(.sub, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.times) { + const result = try self.evalDecBinop(.mul, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.div_by) { + const result = try self.evalDecBinop(.div, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.div_trunc_by) { + const result = try self.evalDecBinop(.div_trunc, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.rem_by) { + const result = try self.evalDecBinop(.rem, lhs.layout, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } + } + const nominal_info: ?struct { origin: base_pkg.Ident.Idx, ident: base_pkg.Ident.Idx } = switch (current_resolved.desc.content) { .structure => |s| switch (s) { .nominal_type => |nom| .{ @@ -13018,7 +13214,7 @@ pub const Interpreter = struct { .record, .tuple, .tag_union, .empty_record, .empty_tag_union => blk: { // Anonymous structural types have implicit is_eq if (ba.method_ident == self.root_env.idents.is_eq) { - var result = self.valuesStructurallyEqual(lhs, ba.receiver_rt_var, rhs, ba.rhs_rt_var, roc_ops) catch |err| { + var result = self.valuesStructurallyEqual(lhs, effective_receiver_rt_var, rhs, ba.rhs_rt_var, roc_ops) catch |err| { if (err == error.NotImplemented) { self.triggerCrash("Structural equality not implemented for this type", false, roc_ops); return error.Crash; @@ -13035,13 +13231,10 @@ pub const Interpreter = struct { }, else => null, }, - // Flex, rigid, and error vars are unresolved type variables (e.g., numeric literals defaulting to Dec, - // or type parameters in generic functions). For is_eq, use structural equality which works - // for all numeric types and generic type parameters with is_eq constraints. - // Error types can occur during generic instantiation when types couldn't be resolved. + // Flex, rigid, and error vars are unresolved type variables .flex, .rigid, .err => blk: { if (ba.method_ident == self.root_env.idents.is_eq) { - var result = self.valuesStructurallyEqual(lhs, ba.receiver_rt_var, rhs, ba.rhs_rt_var, roc_ops) catch |err| { + var result = self.valuesStructurallyEqual(lhs, effective_receiver_rt_var, rhs, ba.rhs_rt_var, roc_ops) catch |err| { if (err == error.NotImplemented) { self.triggerCrash("Structural equality not implemented for this type", false, roc_ops); return error.Crash; @@ -13069,7 +13262,7 @@ pub const Interpreter = struct { nominal_info.?.ident, ba.method_ident, roc_ops, - ba.receiver_rt_var, + effective_receiver_rt_var, ); defer method_func.decref(&self.runtime_layout_store, roc_ops); From b25427b90c956eeb248c8170ae0495540dbd3e8f Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Thu, 4 Dec 2025 11:59:52 +1100 Subject: [PATCH 06/64] initial implementatio of FX "test mode" behavioural tests --- src/cli/test/fx_platform_test.zig | 711 ++++++++---------------------- test/fx/platform/host.zig | 325 +++++++++++++- 2 files changed, 481 insertions(+), 555 deletions(-) diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 567b21c8f2..cba217062b 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -149,176 +149,115 @@ fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_inp }; } +/// Runs a roc app with --test mode using the given IO spec. +/// Spec format: "0stdout|2>stderr" (pipe-separated) +/// Returns success if the app's IO matches the spec exactly. +fn runRocTest(allocator: std.mem.Allocator, roc_file: []const u8, spec: []const u8) !std.process.Child.RunResult { + try ensureRocBinary(allocator); + return try std.process.Child.run(.{ + .allocator = allocator, + .argv = &[_][]const u8{ + roc_binary_path, + "--no-cache", + roc_file, + "--", + "--test", + spec, + }, + }); +} + +/// Helper to check if a test mode run succeeded (exit code 0, empty output) +fn checkTestSuccess(result: std.process.Child.RunResult) !void { + switch (result.term) { + .Exited => |code| { + if (code != 0) { + std.debug.print("Test failed with exit code {}\n", .{code}); + std.debug.print("STDERR: {s}\n", .{result.stderr}); + return error.TestFailed; + } + }, + .Signal => |sig| { + std.debug.print("Process terminated by signal: {}\n", .{sig}); + std.debug.print("STDERR: {s}\n", .{result.stderr}); + return error.SegFault; + }, + else => { + std.debug.print("Test terminated abnormally: {}\n", .{result.term}); + std.debug.print("STDERR: {s}\n", .{result.stderr}); + return error.TestFailed; + }, + } +} + test "fx platform effectful functions" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest( + allocator, + "test/fx/app.roc", + "1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!", + ); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - // Run the app directly with the roc CLI (not build, just run) - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/app.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - // Verify stdout contains expected messages - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello from stdout!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 1 to stdout") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 3 to stdout") != null); - - // Verify stderr contains expected messages - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Error from stderr!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 2 to stderr") != null); - - // Verify stderr messages are NOT in stdout - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Error from stderr!") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 2 to stderr") == null); - - // Verify stdout messages are NOT in stderr - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Hello from stdout!") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 1 to stdout") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 3 to stdout") == null); + try checkTestSuccess(result); } test "fx platform with dotdot starting path" { + // Tests that relative paths starting with .. are handled correctly const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest( + allocator, + "test/fx/subdir/app.roc", + "1>Hello from stdout!|1>Line 1 to stdout|2>Line 2 to stderr|1>Line 3 to stdout|2>Error from stderr!", + ); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - // Run the app from a subdirectory that uses ../ at the START of its platform path - // This tests that relative paths starting with .. are handled correctly - // Bug: paths starting with ../ fail with TypeMismatch, while ./path/../ works - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/subdir/app.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - // Verify stdout contains expected messages - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello from stdout!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 1 to stdout") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 3 to stdout") != null); - - // Verify stderr contains expected messages - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Error from stderr!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 2 to stderr") != null); - - // Verify stderr messages are NOT in stdout - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Error from stderr!") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 2 to stderr") == null); - - // Verify stdout messages are NOT in stderr - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Hello from stdout!") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 1 to stdout") == null); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "Line 3 to stdout") == null); + try checkTestSuccess(result); } test "fx platform stdin to stdout" { const allocator = testing.allocator; - const result = try runRocWithStdin(allocator, "test/fx/stdin_to_stdout.roc", "test input\n"); + const result = try runRocTest(allocator, "test/fx/stdin_to_stdout.roc", "0test input"); defer allocator.free(result.stdout); defer allocator.free(result.stderr); - if (result.term != .Exited or result.term.Exited != 0) { - std.debug.print("Test failed with term: {}\n", .{result.term}); - std.debug.print("STDOUT:\n{s}\n", .{result.stdout}); - std.debug.print("STDERR:\n{s}\n", .{result.stderr}); - return error.TestFailed; - } - try testing.expect(std.mem.indexOf(u8, result.stdout, "test input") != null); + try checkTestSuccess(result); } test "fx platform stdin echo" { const allocator = testing.allocator; - const result = try runRocWithStdin(allocator, "test/fx/stdin_echo.roc", "hello world\n"); + const result = try runRocTest(allocator, "test/fx/stdin_echo.roc", "0hello world"); defer allocator.free(result.stdout); defer allocator.free(result.stderr); - if (result.term != .Exited or result.term.Exited != 0) { - std.debug.print("Test failed with term: {}\n", .{result.term}); - std.debug.print("STDOUT:\n{s}\n", .{result.stdout}); - std.debug.print("STDERR:\n{s}\n", .{result.stderr}); - return error.TestFailed; - } - try testing.expect(std.mem.indexOf(u8, result.stdout, "hello world") != null); + try checkTestSuccess(result); } test "fx platform stdin test with output" { const allocator = testing.allocator; - const result = try runRocWithStdin(allocator, "test/fx/stdin_test.roc", "user input\n"); + const result = try runRocTest(allocator, "test/fx/stdin_test.roc", "1>Before stdin|0After stdin"); defer allocator.free(result.stdout); defer allocator.free(result.stderr); - if (result.term != .Exited or result.term.Exited != 0) { - std.debug.print("Test failed with term: {}\n", .{result.term}); - std.debug.print("STDOUT:\n{s}\n", .{result.stdout}); - std.debug.print("STDERR:\n{s}\n", .{result.stderr}); - return error.TestFailed; - } - try testing.expect(std.mem.indexOf(u8, result.stdout, "Before stdin") != null); - try testing.expect(std.mem.indexOf(u8, result.stdout, "After stdin") != null); + try checkTestSuccess(result); } test "fx platform stdin simple" { + // stdin_simple reads from stdin and prints to stderr const allocator = testing.allocator; - const result = try runRocWithStdin(allocator, "test/fx/stdin_simple.roc", "simple test\n"); + const result = try runRocTest(allocator, "test/fx/stdin_simple.roc", "0simple test"); defer allocator.free(result.stdout); defer allocator.free(result.stderr); - if (result.term != .Exited or result.term.Exited != 0) { - std.debug.print("Test failed with term: {}\n", .{result.term}); - std.debug.print("STDOUT:\n{s}\n", .{result.stdout}); - std.debug.print("STDERR:\n{s}\n", .{result.stderr}); - return error.TestFailed; - } - // stdin_simple reads from stdin and prints to stderr - try testing.expect(std.mem.indexOf(u8, result.stderr, "simple test") != null); + try checkTestSuccess(result); } test "fx platform expect with main" { @@ -354,32 +293,25 @@ test "fx platform expect with numeric literal" { } test "fx platform match returning string" { + // Tests that match expressions with string returns work correctly const allocator = testing.allocator; - // Run the app that has a match expression returning a string - // This tests that match expressions with string returns work correctly - const run_result = try runRoc(allocator, "test/fx/match_str_return.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/match_str_return.roc", "1>0"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - - // The app should run successfully and exit with code 0 - // It outputs "0" from the match expression - try testing.expectEqualStrings("0\n", run_result.stdout); - try testing.expectEqualStrings("", run_result.stderr); + try checkTestSuccess(result); } test "fx platform match with wildcard" { + // Tests that wildcard patterns in match expressions work correctly const allocator = testing.allocator; - // Run an app that uses a match expression with a wildcard pattern - // This tests that wildcard patterns in match expressions work correctly - const run_result = try runRoc(allocator, "test/fx/match_with_wildcard.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/match_with_wildcard.roc", "1>0"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); + try checkTestSuccess(result); } test "fx platform dbg missing return value" { @@ -525,17 +457,13 @@ test "fx platform checked directly finds sibling modules" { test "fx platform opaque type with method" { // Regression test: An opaque type with a method attached causes a segfault - // when running the app. This test will pass once the bug is fixed. const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/opaque_with_method.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/opaque_with_method.roc", "1>My favourite color is Red"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - - // Verify the output contains the expected string - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "My favourite color is Red") != null); + try checkTestSuccess(result); } test "fx platform string interpolation type mismatch" { @@ -630,57 +558,44 @@ test "question mark operator" { // Tests the `?` operator for error propagation. const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/question_mark_operator.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/question_mark_operator.roc", "1>hello"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - // The ? operator should unwrap Ok values and return "hello" - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "hello") != null); + try checkTestSuccess(result); } test "numeric fold" { // Tests List.fold with numeric accumulators. const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/numeric_fold.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/numeric_fold.roc", "1>Sum: 15.0"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - // Verify we get the correct sum: 1+2+3+4+5 = 15 - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Sum: 15") != null); + try checkTestSuccess(result); } test "List.for_each! with effectful callback" { // Tests List.for_each! which iterates over a list and calls an effectful callback const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/list_for_each.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/list_for_each.roc", "1>Item: apple|1>Item: banana|1>Item: cherry"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - - // Verify each item is printed - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Item: apple") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Item: banana") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Item: cherry") != null); + try checkTestSuccess(result); } test "string literal pattern matching" { // Tests pattern matching on string literals in match expressions. const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/string_pattern_matching.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/string_pattern_matching.roc", "1>Hello Alice!|1>Hey Bob!"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - - // Verify string patterns match correctly - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello Alice!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hey Bob!") != null); + try checkTestSuccess(result); } test "drop_prefix segfault regression" { @@ -760,69 +675,61 @@ test "big string equality regression" { test "fx platform hello world" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/hello_world.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/hello_world.roc", "1>Hello, world!"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello, world!") != null); + try checkTestSuccess(result); } test "fx platform function wrapper stdout" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/function_wrapper_stdout.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/function_wrapper_stdout.roc", "1>Hello from stdout!"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello from stdout!") != null); + try checkTestSuccess(result); } test "fx platform function wrapper multiline" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/function_wrapper_multiline.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/function_wrapper_multiline.roc", "1>Hello from stdout!|1>Line 2"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello from stdout!") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Line 2") != null); + try checkTestSuccess(result); } test "fx platform multiline stdout" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/multiline_stdout.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/multiline_stdout.roc", "1>Hello|1>World"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "World") != null); + try checkTestSuccess(result); } test "fx platform empty_list_get" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/empty_list_get.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/empty_list_get.roc", "1>is err"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "is err") != null); + try checkTestSuccess(result); } test "fx platform str_interp_valid" { const allocator = testing.allocator; - const run_result = try runRoc(allocator, "test/fx/str_interp_valid.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); + const result = try runRocTest(allocator, "test/fx/str_interp_valid.roc", "1>Hello, World!"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - try checkSuccess(run_result); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello, World!") != null); + try checkTestSuccess(result); } test "fx platform expect with toplevel numeric" { @@ -1001,141 +908,41 @@ test "fx platform expect with toplevel numeric" { test "fx platform numeric_lookup_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/numeric_lookup_test.roc", "1>done"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/numeric_lookup_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "done") != null); + try checkTestSuccess(result); } test "fx platform string_lookup_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/string_lookup_test.roc", "1>hello"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/string_lookup_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "hello") != null); + try checkTestSuccess(result); } test "fx platform test_direct_string" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/test_direct_string.roc", "1>Hello"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_direct_string.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); + try checkTestSuccess(result); } test "fx platform test_one_call" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/test_one_call.roc", "1>Hello"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_one_call.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); + try checkTestSuccess(result); } test "fx platform test_type_mismatch" { @@ -1177,251 +984,79 @@ test "fx platform test_type_mismatch" { test "fx platform test_with_wrapper" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/test_with_wrapper.roc", "1>Hello"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_with_wrapper.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); + try checkTestSuccess(result); } test "fx platform inspect_compare_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest( + allocator, + "test/fx/inspect_compare_test.roc", + "1>With to_inspect: Custom::Red|1>Without to_inspect: ColorWithoutInspect.Red|1>Primitive: 42", + ); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_compare_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "With to_inspect: Custom::Red") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Without to_inspect: ColorWithoutInspect.Red") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Primitive: 42") != null); + try checkTestSuccess(result); } test "fx platform inspect_custom_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/inspect_custom_test.roc", "1>Color::Red|1>Expected: Color::Red"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_custom_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Color::Red") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Expected: Color::Red") != null); + try checkTestSuccess(result); } test "fx platform inspect_nested_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest( + allocator, + "test/fx/inspect_nested_test.roc", + "1>{ color: Color::Red, count: 42, name: \"test\" }|1>Expected: { color: Color::Red, count: 42, name: \"test\" }", + ); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_nested_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "{ color: Color::Red, count: 42, name: \"test\" }") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Expected: { color: Color::Red, count: 42, name: \"test\" }") != null); + try checkTestSuccess(result); } test "fx platform inspect_no_method_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/inspect_no_method_test.roc", "1>Result: Color.Red|1>(Default rendering)"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_no_method_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Result: Color.Red") != null); - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "(Default rendering)") != null); + try checkTestSuccess(result); } test "fx platform inspect_record_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/inspect_record_test.roc", "1>{ count: 42, name: \"test\" }"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_record_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "{ count: 42, name: \"test\" }") != null); + try checkTestSuccess(result); } test "fx platform inspect_wrong_sig_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + const result = try runRocTest(allocator, "test/fx/inspect_wrong_sig_test.roc", "1>Result: 1"); + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/inspect_wrong_sig_test.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Result: 1") != null); + try checkTestSuccess(result); } test "fx platform issue8433" { diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index 2b3b30bf95..67a593f79d 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -5,9 +5,84 @@ const build_options = @import("build_options"); const trace_refcount = build_options.trace_refcount; +/// Type of IO operation in test spec +const EffectType = enum(u8) { + stdin_input, // 0< + stdout_expect, // 1> + stderr_expect, // 2> +}; + +/// A single entry in the test spec +const SpecEntry = struct { + effect_type: EffectType, + value: []const u8, + spec_line: usize, // For error reporting +}; + +/// Test state for simulated IO mode +const TestState = struct { + enabled: bool, + verbose: bool, + entries: []const SpecEntry, + current_index: usize, + failed: bool, + failure_info: ?FailureInfo, + + const FailureInfo = struct { + expected_type: EffectType, + expected_value: []const u8, + actual_type: EffectType, + spec_line: usize, + }; + + fn init() TestState { + return .{ + .enabled = false, + .verbose = false, + .entries = &.{}, + .current_index = 0, + .failed = false, + .failure_info = null, + }; + } +}; + +/// Parse test spec string into array of SpecEntry +/// Format: "0output|2>error" (pipe-separated) +fn parseTestSpec(allocator: std.mem.Allocator, spec: []const u8) ![]SpecEntry { + var entries = try std.ArrayList(SpecEntry).initCapacity(allocator, 8); + errdefer entries.deinit(allocator); + + var line_num: usize = 1; + // Split on pipe character + var iter = std.mem.splitScalar(u8, spec, '|'); + + while (iter.next()) |line| { + defer line_num += 1; + + if (line.len < 2) continue; // Skip empty/short lines + + const effect_type: EffectType = blk: { + if (line[0] == '0' and line[1] == '<') break :blk .stdin_input; + if (line[0] == '1' and line[1] == '>') break :blk .stdout_expect; + if (line[0] == '2' and line[1] == '>') break :blk .stderr_expect; + continue; // Skip invalid lines + }; + + try entries.append(allocator, .{ + .effect_type = effect_type, + .value = line[2..], + .spec_line = line_num, + }); + } + + return try entries.toOwnedSlice(allocator); +} + /// Host environment - contains GeneralPurposeAllocator for leak detection const HostEnv = struct { gpa: std.heap.GeneralPurposeAllocator(.{}), + test_state: TestState, }; /// Roc allocation function with size-tracking metadata @@ -163,16 +238,47 @@ fn __main() callconv(.c) void {} // C compatible main for runtime fn main(argc: c_int, argv: [*][*:0]u8) callconv(.c) c_int { - _ = argc; - _ = argv; - platform_main() catch |err| { - const stderr: std.fs.File = .stderr(); - stderr.writeAll("HOST ERROR: ") catch {}; - stderr.writeAll(@errorName(err)) catch {}; - stderr.writeAll("\n") catch {}; + // Parse --test or --test-verbose argument + var test_spec: ?[]const u8 = null; + var test_verbose: bool = false; + var i: usize = 1; + const arg_count: usize = @intCast(argc); + const stderr_file: std.fs.File = .stderr(); + while (i < arg_count) : (i += 1) { + const arg = std.mem.span(argv[i]); + if (std.mem.eql(u8, arg, "--test-verbose")) { + if (i + 1 < arg_count) { + i += 1; + test_spec = std.mem.span(argv[i]); + test_verbose = true; + } else { + stderr_file.writeAll("Error: --test-verbose requires a spec argument\n") catch {}; + return 1; + } + } else if (std.mem.eql(u8, arg, "--test")) { + if (i + 1 < arg_count) { + i += 1; + test_spec = std.mem.span(argv[i]); + } else { + stderr_file.writeAll("Error: --test requires a spec argument\n") catch {}; + return 1; + } + } else if (arg.len >= 2 and arg[0] == '-' and arg[1] == '-') { + stderr_file.writeAll("Error: unknown flag '") catch {}; + stderr_file.writeAll(arg) catch {}; + stderr_file.writeAll("'\n") catch {}; + stderr_file.writeAll("Usage: [--test ] [--test-verbose ]\n") catch {}; + return 1; + } + } + + const exit_code = platform_main(test_spec, test_verbose) catch |err| { + stderr_file.writeAll("HOST ERROR: ") catch {}; + stderr_file.writeAll(@errorName(err)) catch {}; + stderr_file.writeAll("\n") catch {}; return 1; }; - return 0; + return exit_code; } // Use the actual RocStr from builtins instead of defining our own @@ -182,14 +288,59 @@ const RocStr = builtins.str.RocStr; /// Follows RocCall ABI: (ops, ret_ptr, args_ptr) /// Returns {} and takes Str as argument fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void { - _ = ops; _ = ret_ptr; // Return value is {} which is zero-sized // Arguments struct for single Str parameter const Args = extern struct { str: RocStr }; const args: *Args = @ptrCast(@alignCast(args_ptr)); - const message = args.str.asSlice(); + + const host: *HostEnv = @ptrCast(@alignCast(ops.env)); + + // Test mode: verify output matches expected + if (host.test_state.enabled) { + const stderr_file: std.fs.File = .stderr(); + if (host.test_state.current_index < host.test_state.entries.len) { + const entry = host.test_state.entries[host.test_state.current_index]; + if (entry.effect_type == .stderr_expect and std.mem.eql(u8, entry.value, message)) { + host.test_state.current_index += 1; + if (host.test_state.verbose) { + stderr_file.writeAll("[OK] stderr: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\"\n") catch {}; + } + return; // Match! + } + // Mismatch + host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = entry.effect_type, + .expected_value = entry.value, + .actual_type = .stderr_expect, + .spec_line = entry.spec_line, + }; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stderr: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\" (expected ") catch {}; + stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {}; + stderr_file.writeAll(": \"") catch {}; + stderr_file.writeAll(entry.value) catch {}; + stderr_file.writeAll("\")\n") catch {}; + } + } else { + // Extra output not in spec + host.test_state.failed = true; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stderr: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\" (unexpected - no more expected operations)\n") catch {}; + } + } + return; + } + + // Normal mode: write to stderr const stderr: std.fs.File = .stderr(); stderr.writeAll(message) catch {}; stderr.writeAll("\n") catch {}; @@ -201,19 +352,61 @@ fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void { _ = args_ptr; // Argument is {} which is zero-sized - // Read a line from stdin + const host: *HostEnv = @ptrCast(@alignCast(ops.env)); + const result: *RocStr = @ptrCast(@alignCast(ret_ptr)); + + // Test mode: consume next stdin_input entry from spec + if (host.test_state.enabled) { + const stderr_file: std.fs.File = .stderr(); + if (host.test_state.current_index < host.test_state.entries.len) { + const entry = host.test_state.entries[host.test_state.current_index]; + if (entry.effect_type == .stdin_input) { + host.test_state.current_index += 1; + result.* = RocStr.fromSlice(entry.value, ops); + if (host.test_state.verbose) { + stderr_file.writeAll("[OK] stdin: \"") catch {}; + stderr_file.writeAll(entry.value) catch {}; + stderr_file.writeAll("\"\n") catch {}; + } + return; + } + // Wrong type - expected stdin but spec has output + host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = entry.effect_type, + .expected_value = entry.value, + .actual_type = .stdin_input, + .spec_line = entry.spec_line, + }; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stdin read (expected ") catch {}; + stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {}; + stderr_file.writeAll(": \"") catch {}; + stderr_file.writeAll(entry.value) catch {}; + stderr_file.writeAll("\")\n") catch {}; + } + } else { + // Ran out of entries - app tried to read more stdin than provided + host.test_state.failed = true; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stdin read (unexpected - no more expected operations)\n") catch {}; + } + } + result.* = RocStr.empty(); + return; + } + + // Normal mode: Read a line from stdin var buffer: [4096]u8 = undefined; const stdin_file: std.fs.File = .stdin(); const bytes_read = stdin_file.read(&buffer) catch { // Return empty string on error - const result: *RocStr = @ptrCast(@alignCast(ret_ptr)); result.* = RocStr.empty(); return; }; // Handle EOF (no bytes read) if (bytes_read == 0) { - const result: *RocStr = @ptrCast(@alignCast(ret_ptr)); result.* = RocStr.empty(); return; } @@ -233,7 +426,6 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr // Create RocStr from the read line and return it // RocStr.fromSlice handles allocation internally (either inline for small strings // or via roc_alloc for big strings with proper refcount tracking) - const result: *RocStr = @ptrCast(@alignCast(ret_ptr)); result.* = RocStr.fromSlice(line, ops); } @@ -241,14 +433,59 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr /// Follows RocCall ABI: (ops, ret_ptr, args_ptr) /// Returns {} and takes Str as argument fn hostedStdoutLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr: *anyopaque) callconv(.c) void { - _ = ops; _ = ret_ptr; // Return value is {} which is zero-sized // Arguments struct for single Str parameter const Args = extern struct { str: RocStr }; const args: *Args = @ptrCast(@alignCast(args_ptr)); - const message = args.str.asSlice(); + + const host: *HostEnv = @ptrCast(@alignCast(ops.env)); + + // Test mode: verify output matches expected + if (host.test_state.enabled) { + const stderr_file: std.fs.File = .stderr(); + if (host.test_state.current_index < host.test_state.entries.len) { + const entry = host.test_state.entries[host.test_state.current_index]; + if (entry.effect_type == .stdout_expect and std.mem.eql(u8, entry.value, message)) { + host.test_state.current_index += 1; + if (host.test_state.verbose) { + stderr_file.writeAll("[OK] stdout: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\"\n") catch {}; + } + return; // Match! + } + // Mismatch + host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = entry.effect_type, + .expected_value = entry.value, + .actual_type = .stdout_expect, + .spec_line = entry.spec_line, + }; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stdout: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\" (expected ") catch {}; + stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {}; + stderr_file.writeAll(": \"") catch {}; + stderr_file.writeAll(entry.value) catch {}; + stderr_file.writeAll("\")\n") catch {}; + } + } else { + // Extra output not in spec + host.test_state.failed = true; + if (host.test_state.verbose) { + stderr_file.writeAll("[FAIL] stdout: \"") catch {}; + stderr_file.writeAll(message) catch {}; + stderr_file.writeAll("\" (unexpected - no more expected operations)\n") catch {}; + } + } + return; + } + + // Normal mode: write to stdout const stdout: std.fs.File = .stdout(); stdout.writeAll(message) catch {}; stdout.writeAll("\n") catch {}; @@ -263,11 +500,25 @@ const hosted_function_ptrs = [_]builtins.host_abi.HostedFn{ }; /// Platform host entrypoint -fn platform_main() !void { +fn platform_main(test_spec: ?[]const u8, test_verbose: bool) !c_int { var host_env = HostEnv{ .gpa = std.heap.GeneralPurposeAllocator(.{}){}, + .test_state = TestState.init(), }; + + // Parse test spec if provided + if (test_spec) |spec| { + host_env.test_state.entries = try parseTestSpec(host_env.gpa.allocator(), spec); + host_env.test_state.enabled = true; + host_env.test_state.verbose = test_verbose; + } + defer { + // Free test entries if allocated + if (host_env.test_state.entries.len > 0) { + host_env.gpa.allocator().free(host_env.test_state.entries); + } + const leaked = host_env.gpa.deinit(); if (leaked == .leak) { std.log.err("\x1b[33mMemory leak detected!\x1b[0m", .{}); @@ -298,4 +549,44 @@ fn platform_main() !void { // causing a segfault if you pass null. This should be changed! Dereferencing // garbage memory is obviously pointless, and there's no reason we should do it. roc__main(&roc_ops, @as(*anyopaque, @ptrCast(&ret)), @as(*anyopaque, @ptrCast(&args))); + + // Check test results if in test mode + if (host_env.test_state.enabled) { + // Check if test failed or not all entries were consumed + if (host_env.test_state.failed or host_env.test_state.current_index != host_env.test_state.entries.len) { + const stderr_file: std.fs.File = .stderr(); + + // Print failure info + if (host_env.test_state.failure_info) |info| { + var buf: [512]u8 = undefined; + const msg = std.fmt.bufPrint(&buf, "TEST FAILED at spec line {d}:\n Expected: {s} \"{s}\"\n Got: {s}\n", .{ + info.spec_line, + effectTypeName(info.expected_type), + info.expected_value, + effectTypeName(info.actual_type), + }) catch "TEST FAILED\n"; + stderr_file.writeAll(msg) catch {}; + } else if (host_env.test_state.current_index < host_env.test_state.entries.len) { + // Not all entries were consumed + const remaining = host_env.test_state.entries.len - host_env.test_state.current_index; + var buf: [256]u8 = undefined; + const msg = std.fmt.bufPrint(&buf, "TEST FAILED: {d} expected IO operation(s) not performed\n", .{remaining}) catch "TEST FAILED: expected IO operations not performed\n"; + stderr_file.writeAll(msg) catch {}; + } else { + stderr_file.writeAll("TEST FAILED\n") catch {}; + } + + return 1; + } + } + + return 0; +} + +fn effectTypeName(effect_type: EffectType) []const u8 { + return switch (effect_type) { + .stdin_input => "stdin", + .stdout_expect => "stdout", + .stderr_expect => "stderr", + }; } From 849e35cc8810e8f57f514220e8f0a886abacbda3 Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Thu, 4 Dec 2025 12:10:53 +1100 Subject: [PATCH 07/64] speed up -- don't rebuild roc or the intereted host on each test --- build.zig | 2 + src/cli/test/fx_platform_test.zig | 99 +++++++------------------------ 2 files changed, 25 insertions(+), 76 deletions(-) diff --git a/build.zig b/build.zig index b1c8a0850c..c251c73968 100644 --- a/build.zig +++ b/build.zig @@ -1345,6 +1345,8 @@ pub fn build(b: *std.Build) void { } // Ensure host library is copied before running the test run_fx_platform_test.step.dependOn(©_test_fx_host.step); + // Ensure roc binary is built before running the test + run_fx_platform_test.step.dependOn(roc_step); tests_summary.addRun(&run_fx_platform_test.step); } diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index cba217062b..8396c252bc 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -2,6 +2,13 @@ //! //! Tests that platform-provided hosted functions (like Stdout.line! and Stderr.line!) //! can be properly invoked from Roc applications. +//! +//! NOTE: These tests depend on the roc binary being built via build.zig. The test step +//! has a dependency on roc_step, so the binary will be built automatically before tests run. +//! +//! IMPORTANT: Do NOT use --no-cache when running roc. The interpreted host doesn't change between +//! tests (we're testing app behaviour, not the platform), so using --no-cache would force unnecessary +//! re-linking on every test, making the test run much slower than is necessary. const std = @import("std"); const builtin = @import("builtin"); @@ -9,25 +16,6 @@ const testing = std.testing; const roc_binary_path = if (builtin.os.tag == .windows) ".\\zig-out\\bin\\roc.exe" else "./zig-out/bin/roc"; -/// Ensures the roc binary is up-to-date by always rebuilding it. -/// This is needed because these tests spawn the roc CLI as a child process, -/// and a stale binary will cause test failures even if the test code is correct. -fn ensureRocBinary(allocator: std.mem.Allocator) !void { - // Always rebuild to ensure the binary is up-to-date with the latest source changes. - // This prevents confusing test failures when the binary exists but is stale. - const build_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ "zig", "build", "roc" }, - }); - defer allocator.free(build_result.stdout); - defer allocator.free(build_result.stderr); - - if (build_result.term != .Exited or build_result.term.Exited != 0) { - std.debug.print("Failed to build roc binary:\n{s}\n", .{build_result.stderr}); - return error.RocBuildFailed; - } -} - /// Options for running roc commands const RunOptions = struct { /// Additional command line arguments (e.g., "test", "check") @@ -37,30 +25,11 @@ const RunOptions = struct { }; /// Runs a roc command and returns the result. -/// Automatically adds --no-cache for non-test/non-check commands to ensure fresh builds. fn runRoc(allocator: std.mem.Allocator, roc_file: []const u8, options: RunOptions) !std.process.Child.RunResult { - try ensureRocBinary(allocator); - var args = std.ArrayList([]const u8){}; defer args.deinit(allocator); try args.append(allocator, roc_binary_path); - - // Determine if this is a test command - const is_test = blk: { - for (options.extra_args) |arg| { - if (std.mem.eql(u8, arg, "test")) { - break :blk true; - } - } - break :blk false; - }; - - // Add --no-cache before other args for non-test commands (including check) - if (!is_test) { - try args.append(allocator, "--no-cache"); - } - try args.appendSlice(allocator, options.extra_args); try args.append(allocator, roc_file); @@ -113,8 +82,7 @@ fn checkFailure(result: std.process.Child.RunResult) !void { } fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_input: []const u8) !std.process.Child.RunResult { - try ensureRocBinary(allocator); - var child = std.process.Child.init(&[_][]const u8{ "./zig-out/bin/roc", roc_file }, allocator); + var child = std.process.Child.init(&[_][]const u8{ roc_binary_path, roc_file }, allocator); child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; @@ -153,12 +121,10 @@ fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_inp /// Spec format: "0stdout|2>stderr" (pipe-separated) /// Returns success if the app's IO matches the spec exactly. fn runRocTest(allocator: std.mem.Allocator, roc_file: []const u8, spec: []const u8) !std.process.Child.RunResult { - try ensureRocBinary(allocator); return try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ roc_binary_path, - "--no-cache", roc_file, "--", "--test", @@ -333,15 +299,13 @@ test "fx platform dbg missing return value" { test "fx platform check unused state var reports correct errors" { const allocator = testing.allocator; - try ensureRocBinary(allocator); - // Run `roc check` on an app with unused variables and type annotations // This test checks that the compiler reports the correct errors and doesn't // produce extraneous unrelated errors from platform module resolution const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "check", "test/fx/unused_state_var.roc", }, @@ -424,7 +388,7 @@ test "fx platform checked directly finds sibling modules" { const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "check", "test/fx/platform/main.roc", }, @@ -469,14 +433,12 @@ test "fx platform opaque type with method" { test "fx platform string interpolation type mismatch" { const allocator = testing.allocator; - try ensureRocBinary(allocator); - // Run an app that tries to interpolate a U8 (non-Str) type in a string. // This should fail with a type error because string interpolation only accepts Str. const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/num_method_call.roc", "--allow-errors", }, @@ -515,8 +477,6 @@ test "fx platform run from different cwd" { // running from a subdirectory correctly. const allocator = testing.allocator; - try ensureRocBinary(allocator); - // Get absolute path to roc binary since we'll change cwd const roc_abs_path = try std.fs.cwd().realpathAlloc(allocator, roc_binary_path); defer allocator.free(roc_abs_path); @@ -735,13 +695,11 @@ test "fx platform str_interp_valid" { test "fx platform expect with toplevel numeric" { const allocator = testing.allocator; - try ensureRocBinary(allocator); - // Run the app const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/expect_with_toplevel_numeric.roc", }, }); @@ -771,7 +729,7 @@ test "fx platform expect with toplevel numeric" { const test_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test", "test/fx/expect_with_toplevel_numeric.roc", }, @@ -801,12 +759,11 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test7" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); - +// // const run_result = try std.process.Child.run(.{ // .allocator = allocator, // .argv = &[_][]const u8{ -// "./zig-out/bin/roc", +// "roc_binary_path", // "test/fx/test7.roc", // }, // }); @@ -837,12 +794,11 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test8" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); - +// // const run_result = try std.process.Child.run(.{ // .allocator = allocator, // .argv = &[_][]const u8{ -// "./zig-out/bin/roc", +// "roc_binary_path", // "test/fx/test8.roc", // }, // }); @@ -873,12 +829,11 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test9" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); - +// // const run_result = try std.process.Child.run(.{ // .allocator = allocator, // .argv = &[_][]const u8{ -// "./zig-out/bin/roc", +// "roc_binary_path", // "test/fx/test9.roc", // }, // }); @@ -948,12 +903,10 @@ test "fx platform test_one_call" { test "fx platform test_type_mismatch" { const allocator = testing.allocator; - try ensureRocBinary(allocator); - const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/test_type_mismatch.roc", }, }); @@ -1062,12 +1015,10 @@ test "fx platform inspect_wrong_sig_test" { test "fx platform issue8433" { const allocator = testing.allocator; - try ensureRocBinary(allocator); - const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/issue8433.roc", }, }); @@ -1098,12 +1049,10 @@ test "run aborts on errors by default" { // Tests that roc run aborts when there are type errors (without --allow-errors) const allocator = testing.allocator; - try ensureRocBinary(allocator); - const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/run_allow_errors.roc", }, }); @@ -1121,12 +1070,10 @@ test "run with --allow-errors attempts execution despite errors" { // Tests that roc run --allow-errors attempts to execute even with type errors const allocator = testing.allocator; - try ensureRocBinary(allocator); - const run_result = try std.process.Child.run(.{ .allocator = allocator, .argv = &[_][]const u8{ - "./zig-out/bin/roc", + roc_binary_path, "test/fx/run_allow_errors.roc", "--allow-errors", }, From 6d40d80e8c7e96e8d4de6778d5d9c00aca58b9c8 Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Thu, 4 Dec 2025 12:31:25 +1100 Subject: [PATCH 08/64] improve docs, parsing, and error reporting --- src/cli/test/fx_platform_test.zig | 50 +++------ test/fx/platform/host.zig | 164 +++++++++++++++++++++++++----- 2 files changed, 150 insertions(+), 64 deletions(-) diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 8396c252bc..9b0a1ea3b2 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -67,6 +67,7 @@ fn checkSuccess(result: std.process.Child.RunResult) !void { } /// Helper to check if a run result indicates failure (non-zero exit code) +/// This verifies the process exited cleanly with a non-zero code, NOT that it crashed. fn checkFailure(result: std.process.Child.RunResult) !void { switch (result.term) { .Exited => |code| { @@ -74,49 +75,24 @@ fn checkFailure(result: std.process.Child.RunResult) !void { std.debug.print("ERROR: roc succeeded but we expected it to fail\n", .{}); return error.UnexpectedSuccess; } + // Non-zero exit code is expected - this is a clean failure + }, + .Signal => |sig| { + // A crash is NOT the same as a clean failure - report it as an error + std.debug.print("ERROR: Process crashed with signal {} (expected clean failure with non-zero exit code)\n", .{sig}); + std.debug.print("STDOUT: {s}\n", .{result.stdout}); + std.debug.print("STDERR: {s}\n", .{result.stderr}); + return error.SegFault; }, else => { - // Non-zero exit is expected + std.debug.print("ERROR: Process terminated abnormally: {} (expected clean failure with non-zero exit code)\n", .{result.term}); + std.debug.print("STDOUT: {s}\n", .{result.stdout}); + std.debug.print("STDERR: {s}\n", .{result.stderr}); + return error.RunFailed; }, } } -fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_input: []const u8) !std.process.Child.RunResult { - var child = std.process.Child.init(&[_][]const u8{ roc_binary_path, roc_file }, allocator); - child.stdin_behavior = .Pipe; - child.stdout_behavior = .Pipe; - child.stderr_behavior = .Pipe; - - try child.spawn(); - - // Write stdin and close - if (child.stdin) |stdin| { - try stdin.writeAll(stdin_input); - stdin.close(); - child.stdin = null; - } - - // Collect stdout - const stdout = if (child.stdout) |stdout_pipe| - try stdout_pipe.readToEndAlloc(allocator, std.math.maxInt(usize)) - else - try allocator.dupe(u8, ""); - - // Collect stderr - const stderr = if (child.stderr) |stderr_pipe| - try stderr_pipe.readToEndAlloc(allocator, std.math.maxInt(usize)) - else - try allocator.dupe(u8, ""); - - const term = try child.wait(); - - return .{ - .term = term, - .stdout = stdout, - .stderr = stderr, - }; -} - /// Runs a roc app with --test mode using the given IO spec. /// Spec format: "0stdout|2>stderr" (pipe-separated) /// Returns success if the app's IO matches the spec exactly. diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index 67a593f79d..2b6066b350 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -1,4 +1,30 @@ -///! Platform host that tests effectful functions writing to stdout and stderr. +//! Platform host for testing effectful Roc applications. +//! +//! This host provides stdin/stdout/stderr effects and includes a test mode for +//! verifying IO behavior without performing actual syscalls. +//! +//! ## Test Mode +//! +//! Run with `--test ` to simulate IO and verify behavior: +//! ``` +//! ./zig-out/bin/roc app.roc -- --test "1>Hello, world!" +//! ``` +//! +//! Spec format uses pipe-separated operations: +//! - `0output` - expect "output" on stdout +//! - `2>output` - expect "output" on stderr +//! +//! Example with multiple operations: +//! ``` +//! --test "0Before stdin|1>After stdin" +//! ``` +//! +//! Use `--test-verbose ` for detailed output during test execution. +//! +//! Exit codes: +//! - 0: All expectations matched in order +//! - 1: Test failed (mismatch, missing output, extra output, or invalid spec) const std = @import("std"); const builtins = @import("builtins"); const build_options = @import("build_options"); @@ -32,6 +58,7 @@ const TestState = struct { expected_type: EffectType, expected_value: []const u8, actual_type: EffectType, + actual_value: []const u8, spec_line: usize, }; @@ -47,36 +74,58 @@ const TestState = struct { } }; +/// Parse error for invalid spec format +const ParseError = error{ + InvalidSpecFormat, + OutOfMemory, +}; + /// Parse test spec string into array of SpecEntry /// Format: "0output|2>error" (pipe-separated) -fn parseTestSpec(allocator: std.mem.Allocator, spec: []const u8) ![]SpecEntry { - var entries = try std.ArrayList(SpecEntry).initCapacity(allocator, 8); +/// Returns error if any segment doesn't start with a valid pattern (0<, 1>, 2>) +fn parseTestSpec(allocator: std.mem.Allocator, spec: []const u8) ParseError![]SpecEntry { + var entries = std.ArrayList(SpecEntry).initCapacity(allocator, 8) catch return ParseError.OutOfMemory; errdefer entries.deinit(allocator); var line_num: usize = 1; // Split on pipe character var iter = std.mem.splitScalar(u8, spec, '|'); - while (iter.next()) |line| { + while (iter.next()) |segment| { defer line_num += 1; - if (line.len < 2) continue; // Skip empty/short lines + // Skip empty segments (e.g., trailing pipe) + if (segment.len == 0) continue; + + // Check for valid pattern prefix + if (segment.len < 2) { + const stderr_file: std.fs.File = .stderr(); + stderr_file.writeAll("Error: Invalid spec segment '") catch {}; + stderr_file.writeAll(segment) catch {}; + stderr_file.writeAll("' - must start with 0<, 1>, or 2>\n") catch {}; + return ParseError.InvalidSpecFormat; + } const effect_type: EffectType = blk: { - if (line[0] == '0' and line[1] == '<') break :blk .stdin_input; - if (line[0] == '1' and line[1] == '>') break :blk .stdout_expect; - if (line[0] == '2' and line[1] == '>') break :blk .stderr_expect; - continue; // Skip invalid lines + if (segment[0] == '0' and segment[1] == '<') break :blk .stdin_input; + if (segment[0] == '1' and segment[1] == '>') break :blk .stdout_expect; + if (segment[0] == '2' and segment[1] == '>') break :blk .stderr_expect; + // Invalid pattern - report error + const stderr_file: std.fs.File = .stderr(); + stderr_file.writeAll("Error: Invalid spec segment '") catch {}; + stderr_file.writeAll(segment) catch {}; + stderr_file.writeAll("' - must start with 0<, 1>, or 2>\n") catch {}; + return ParseError.InvalidSpecFormat; }; - try entries.append(allocator, .{ + entries.append(allocator, .{ .effect_type = effect_type, - .value = line[2..], + .value = segment[2..], .spec_line = line_num, - }); + }) catch return ParseError.OutOfMemory; } - return try entries.toOwnedSlice(allocator); + return entries.toOwnedSlice(allocator) catch ParseError.OutOfMemory; } /// Host environment - contains GeneralPurposeAllocator for leak detection @@ -311,12 +360,14 @@ fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt } return; // Match! } - // Mismatch + // Mismatch - must allocate a copy of the message since the RocStr may be freed + const actual_copy = host.gpa.allocator().dupe(u8, message) catch ""; host.test_state.failed = true; host.test_state.failure_info = .{ .expected_type = entry.effect_type, .expected_value = entry.value, .actual_type = .stderr_expect, + .actual_value = actual_copy, .spec_line = entry.spec_line, }; if (host.test_state.verbose) { @@ -329,8 +380,16 @@ fn hostedStderrLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt stderr_file.writeAll("\")\n") catch {}; } } else { - // Extra output not in spec + // Extra output not in spec - must allocate a copy of the message + const actual_copy = host.gpa.allocator().dupe(u8, message) catch ""; host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = .stderr_expect, // We expected nothing + .expected_value = "", + .actual_type = .stderr_expect, + .actual_value = actual_copy, + .spec_line = 0, + }; if (host.test_state.verbose) { stderr_file.writeAll("[FAIL] stderr: \"") catch {}; stderr_file.writeAll(message) catch {}; @@ -376,6 +435,7 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr .expected_type = entry.effect_type, .expected_value = entry.value, .actual_type = .stdin_input, + .actual_value = "(stdin read)", .spec_line = entry.spec_line, }; if (host.test_state.verbose) { @@ -388,6 +448,13 @@ fn hostedStdinLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_ptr } else { // Ran out of entries - app tried to read more stdin than provided host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = .stdin_input, + .expected_value = "", + .actual_type = .stdin_input, + .actual_value = "(stdin read)", + .spec_line = 0, + }; if (host.test_state.verbose) { stderr_file.writeAll("[FAIL] stdin read (unexpected - no more expected operations)\n") catch {}; } @@ -456,12 +523,14 @@ fn hostedStdoutLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt } return; // Match! } - // Mismatch + // Mismatch - must allocate a copy of the message since the RocStr may be freed + const actual_copy = host.gpa.allocator().dupe(u8, message) catch ""; host.test_state.failed = true; host.test_state.failure_info = .{ .expected_type = entry.effect_type, .expected_value = entry.value, .actual_type = .stdout_expect, + .actual_value = actual_copy, .spec_line = entry.spec_line, }; if (host.test_state.verbose) { @@ -474,8 +543,16 @@ fn hostedStdoutLine(ops: *builtins.host_abi.RocOps, ret_ptr: *anyopaque, args_pt stderr_file.writeAll("\")\n") catch {}; } } else { - // Extra output not in spec + // Extra output not in spec - must allocate a copy of the message + const actual_copy = host.gpa.allocator().dupe(u8, message) catch ""; host.test_state.failed = true; + host.test_state.failure_info = .{ + .expected_type = .stdout_expect, // We expected nothing + .expected_value = "", + .actual_type = .stdout_expect, + .actual_value = actual_copy, + .spec_line = 0, + }; if (host.test_state.verbose) { stderr_file.writeAll("[FAIL] stdout: \"") catch {}; stderr_file.writeAll(message) catch {}; @@ -514,6 +591,13 @@ fn platform_main(test_spec: ?[]const u8, test_verbose: bool) !c_int { } defer { + // Free duplicated actual_value if allocated (on test failure) + if (host_env.test_state.failure_info) |info| { + if (info.actual_value.len > 0) { + host_env.gpa.allocator().free(info.actual_value); + } + } + // Free test entries if allocated if (host_env.test_state.entries.len > 0) { host_env.gpa.allocator().free(host_env.test_state.entries); @@ -558,20 +642,46 @@ fn platform_main(test_spec: ?[]const u8, test_verbose: bool) !c_int { // Print failure info if (host_env.test_state.failure_info) |info| { - var buf: [512]u8 = undefined; - const msg = std.fmt.bufPrint(&buf, "TEST FAILED at spec line {d}:\n Expected: {s} \"{s}\"\n Got: {s}\n", .{ - info.spec_line, - effectTypeName(info.expected_type), - info.expected_value, - effectTypeName(info.actual_type), - }) catch "TEST FAILED\n"; - stderr_file.writeAll(msg) catch {}; + if (info.spec_line == 0) { + // Extra/unexpected output + stderr_file.writeAll("TEST FAILED: Unexpected ") catch {}; + stderr_file.writeAll(effectTypeName(info.actual_type)) catch {}; + stderr_file.writeAll(" output: \"") catch {}; + stderr_file.writeAll(info.actual_value) catch {}; + stderr_file.writeAll("\"\n") catch {}; + } else { + var buf: [512]u8 = undefined; + const msg = std.fmt.bufPrint(&buf, "TEST FAILED at spec line {d}:\n Expected: {s} \"{s}\"\n Got: {s} \"{s}\"\n", .{ + info.spec_line, + effectTypeName(info.expected_type), + info.expected_value, + effectTypeName(info.actual_type), + info.actual_value, + }) catch "TEST FAILED\n"; + stderr_file.writeAll(msg) catch {}; + } } else if (host_env.test_state.current_index < host_env.test_state.entries.len) { - // Not all entries were consumed + // Not all entries were consumed - list what's remaining const remaining = host_env.test_state.entries.len - host_env.test_state.current_index; var buf: [256]u8 = undefined; - const msg = std.fmt.bufPrint(&buf, "TEST FAILED: {d} expected IO operation(s) not performed\n", .{remaining}) catch "TEST FAILED: expected IO operations not performed\n"; + const msg = std.fmt.bufPrint(&buf, "TEST FAILED: {d} expected IO operation(s) not performed:\n", .{remaining}) catch "TEST FAILED: expected IO operations not performed\n"; stderr_file.writeAll(msg) catch {}; + + // List up to 5 unconsumed entries + const max_to_show: usize = 5; + var shown: usize = 0; + for (host_env.test_state.entries[host_env.test_state.current_index..]) |entry| { + if (shown >= max_to_show) { + stderr_file.writeAll(" ...\n") catch {}; + break; + } + stderr_file.writeAll(" - ") catch {}; + stderr_file.writeAll(effectTypeName(entry.effect_type)) catch {}; + stderr_file.writeAll(": \"") catch {}; + stderr_file.writeAll(entry.value) catch {}; + stderr_file.writeAll("\"\n") catch {}; + shown += 1; + } } else { stderr_file.writeAll("TEST FAILED\n") catch {}; } From 720039953503db6bdcf02770bd7e3c9ade71e5d8 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 20:44:05 -0500 Subject: [PATCH 09/64] Fix more intepreter logic --- src/eval/StackValue.zig | 443 +++++++------ src/eval/comptime_evaluator.zig | 57 +- src/eval/interpreter.zig | 1049 ++++++++++++++++++++++--------- src/eval/render_helpers.zig | 40 +- src/eval/test/helpers.zig | 7 +- src/repl/eval.zig | 11 +- 6 files changed, 1049 insertions(+), 558 deletions(-) diff --git a/src/eval/StackValue.zig b/src/eval/StackValue.zig index 1c65b0af7c..931fd410ab 100644 --- a/src/eval/StackValue.zig +++ b/src/eval/StackValue.zig @@ -34,14 +34,203 @@ const Expr = CIR.Expr; const StackValue = @This(); +// ============================================================================ +// Internal helper functions for memory operations that don't need rt_var +// ============================================================================ + +/// Increment reference count for a value given its layout and pointer. +/// Used internally when we don't need full StackValue type information. +fn increfLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore) void { + if (layout.tag == .scalar and layout.data.scalar.tag == .str) { + if (ptr == null) return; + const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*; + roc_str.incref(1); + return; + } + if (layout.tag == .list) { + if (ptr == null) return; + const list_value = @as(*const RocList, @ptrCast(@alignCast(ptr.?))).*; + list_value.incref(1, false); + return; + } + if (layout.tag == .box) { + if (ptr == null) return; + const slot: *usize = @ptrCast(@alignCast(ptr.?)); + if (slot.* != 0) { + const data_ptr: [*]u8 = @as([*]u8, @ptrFromInt(slot.*)); + builtins.utils.increfDataPtrC(@as(?[*]u8, data_ptr), 1); + } + return; + } + if (layout.tag == .record) { + if (ptr == null) return; + const record_data = layout_cache.getRecordData(layout.data.record.idx); + if (record_data.fields.count == 0) return; + + const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields()); + const base_ptr = @as([*]u8, @ptrCast(ptr.?)); + + var field_index: usize = 0; + while (field_index < field_layouts.len) : (field_index += 1) { + const field_info = field_layouts.get(field_index); + const field_layout = layout_cache.getLayout(field_info.layout); + const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index)); + const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset)); + increfLayoutPtr(field_layout, field_ptr, layout_cache); + } + return; + } + if (layout.tag == .tuple) { + if (ptr == null) return; + const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx); + if (tuple_data.fields.count == 0) return; + + const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields()); + const base_ptr = @as([*]u8, @ptrCast(ptr.?)); + + var elem_index: usize = 0; + while (elem_index < element_layouts.len) : (elem_index += 1) { + const elem_info = element_layouts.get(elem_index); + const elem_layout = layout_cache.getLayout(elem_info.layout); + const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index)); + const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset)); + increfLayoutPtr(elem_layout, elem_ptr, layout_cache); + } + return; + } + if (layout.tag == .tag_union) { + if (ptr == null) return; + // For unions, we need to read the tag and incref the appropriate payload + // This is complex - for now just skip (caller should handle specific union types) + return; + } + // Other layout types (scalar ints/floats, zst, etc.) don't need refcounting +} + +/// Decrement reference count for a value given its layout and pointer. +/// Used internally when we don't need full StackValue type information. +fn decrefLayoutPtr(layout: Layout, ptr: ?*anyopaque, layout_cache: *LayoutStore, ops: *RocOps) void { + if (layout.tag == .scalar and layout.data.scalar.tag == .str) { + if (ptr == null) return; + const roc_str = @as(*const RocStr, @ptrCast(@alignCast(ptr.?))).*; + roc_str.decref(ops); + return; + } + if (layout.tag == .list) { + if (ptr == null) return; + const list_header: *const RocList = @ptrCast(@alignCast(ptr.?)); + const list_value = list_header.*; + const elem_layout = layout_cache.getLayout(layout.data.list); + const alignment_u32: u32 = @intCast(elem_layout.alignment(layout_cache.targetUsize()).toByteUnits()); + const element_width: usize = @intCast(layout_cache.layoutSize(elem_layout)); + const elements_refcounted = elem_layout.isRefcounted(); + + // Decref elements when unique + if (list_value.isUnique()) { + if (list_value.getAllocationDataPtr()) |source| { + const count = list_value.getAllocationElementCount(elements_refcounted); + var idx: usize = 0; + while (idx < count) : (idx += 1) { + const elem_ptr = source + idx * element_width; + decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops); + } + } + } + list_value.decref(alignment_u32, element_width, elements_refcounted, null, &builtins.list.rcNone, ops); + return; + } + if (layout.tag == .box) { + if (ptr == null) return; + const slot: *usize = @ptrCast(@alignCast(ptr.?)); + const raw_ptr = slot.*; + if (raw_ptr == 0) return; + const data_ptr = @as([*]u8, @ptrFromInt(raw_ptr)); + const target_usize = layout_cache.targetUsize(); + const elem_layout = layout_cache.getLayout(layout.data.box); + const elem_alignment: u32 = @intCast(elem_layout.alignment(target_usize).toByteUnits()); + + const ptr_int = @intFromPtr(data_ptr); + const tag_mask: usize = if (@sizeOf(usize) == 8) 0b111 else 0b11; + const unmasked_ptr = ptr_int & ~tag_mask; + const payload_ptr = @as([*]u8, @ptrFromInt(unmasked_ptr)); + const refcount_ptr: *isize = @as(*isize, @ptrFromInt(unmasked_ptr - @sizeOf(isize))); + + if (builtins.utils.rcUnique(refcount_ptr.*)) { + if (elem_layout.isRefcounted()) { + decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops); + } + } + builtins.utils.decrefDataPtrC(@as(?[*]u8, payload_ptr), elem_alignment, false, ops); + slot.* = 0; + return; + } + if (layout.tag == .record) { + if (ptr == null) return; + const record_data = layout_cache.getRecordData(layout.data.record.idx); + if (record_data.fields.count == 0) return; + + const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields()); + const base_ptr = @as([*]u8, @ptrCast(ptr.?)); + + var field_index: usize = 0; + while (field_index < field_layouts.len) : (field_index += 1) { + const field_info = field_layouts.get(field_index); + const field_layout = layout_cache.getLayout(field_info.layout); + const field_offset = layout_cache.getRecordFieldOffset(layout.data.record.idx, @intCast(field_index)); + const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset)); + decrefLayoutPtr(field_layout, field_ptr, layout_cache, ops); + } + return; + } + if (layout.tag == .tuple) { + if (ptr == null) return; + const tuple_data = layout_cache.getTupleData(layout.data.tuple.idx); + if (tuple_data.fields.count == 0) return; + + const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields()); + const base_ptr = @as([*]u8, @ptrCast(ptr.?)); + + var elem_index: usize = 0; + while (elem_index < element_layouts.len) : (elem_index += 1) { + const elem_info = element_layouts.get(elem_index); + const elem_layout = layout_cache.getLayout(elem_info.layout); + const elem_offset = layout_cache.getTupleElementOffset(layout.data.tuple.idx, @intCast(elem_index)); + const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset)); + decrefLayoutPtr(elem_layout, elem_ptr, layout_cache, ops); + } + return; + } + if (layout.tag == .closure) { + if (ptr == null) return; + // Get the closure header to find the captures layout + const closure_header: *const layout_mod.Closure = @ptrCast(@alignCast(ptr.?)); + const captures_layout = layout_cache.getLayout(closure_header.captures_layout_idx); + + // Only decref if there are actual captures (record with fields) + if (captures_layout.tag == .record) { + const record_data = layout_cache.getRecordData(captures_layout.data.record.idx); + if (record_data.fields.count > 0) { + const header_size = @sizeOf(layout_mod.Closure); + const cap_align = captures_layout.alignment(layout_cache.targetUsize()); + const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits())); + const base_ptr: [*]u8 = @ptrCast(@alignCast(ptr.?)); + const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off); + decrefLayoutPtr(captures_layout, rec_ptr, layout_cache, ops); + } + } + return; + } + // Other layout types (scalar ints/floats, zst, etc.) don't need refcounting +} + /// Type and memory layout information for the result value layout: Layout, /// Ptr to the actual value in stack memory ptr: ?*anyopaque, /// Flag to track whether the memory has been initialized is_initialized: bool = false, -/// Optional runtime type variable for type information (used in constant folding) -rt_var: ?types.Var = null, +/// Runtime type variable for type information (used for method dispatch and constant folding) +rt_var: types.Var, /// Copy this stack value to a destination pointer with bounds checking pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopaque, _: *RocOps) !void { @@ -226,13 +415,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index)); const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset)); - const field_value = StackValue{ - .layout = field_layout, - .ptr = field_ptr, - .is_initialized = true, - }; - - field_value.incref(layout_cache); + increfLayoutPtr(field_layout, field_ptr, layout_cache); } return; } @@ -263,13 +446,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index)); const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset)); - const elem_value = StackValue{ - .layout = elem_layout, - .ptr = elem_ptr, - .is_initialized = true, - }; - - elem_value.incref(layout_cache); + increfLayoutPtr(elem_layout, elem_ptr, layout_cache); } return; } @@ -304,29 +481,8 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?)); const rec_ptr: [*]u8 = @ptrCast(base_ptr + aligned_off); - // Iterate over each field in the captures record and incref all fields. - // We call incref on ALL fields (not just isRefcounted()) because: - // - For directly refcounted types (str, list, box): increfs them - // - For nested records/tuples: recursively handles their contents - // - For scalars: incref is a no-op - // This is symmetric with decref. - const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields()); - var field_index: usize = 0; - while (field_index < field_layouts.len) : (field_index += 1) { - const field_info = field_layouts.get(field_index); - const field_layout = layout_cache.getLayout(field_info.layout); - - const field_offset = layout_cache.getRecordFieldOffset(captures_layout.data.record.idx, @intCast(field_index)); - const field_ptr = @as(*anyopaque, @ptrCast(rec_ptr + field_offset)); - - const field_value = StackValue{ - .layout = field_layout, - .ptr = field_ptr, - .is_initialized = true, - }; - - field_value.incref(layout_cache); - } + // Incref the entire captures record (which handles all fields recursively) + increfLayoutPtr(captures_layout, @ptrCast(rec_ptr), layout_cache); } } return; @@ -365,13 +521,7 @@ pub fn copyToPtr(self: StackValue, layout_cache: *LayoutStore, dest_ptr: *anyopa } // Incref only the active variant's payload (at offset 0) - const payload_value = StackValue{ - .layout = variant_layout, - .ptr = @as(*anyopaque, @ptrCast(base_ptr)), - .is_initialized = true, - }; - - payload_value.incref(layout_cache); + increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache); return; } @@ -722,7 +872,7 @@ pub const TupleAccessor = struct { element_layouts: layout_mod.TupleField.SafeMultiList.Slice, /// Get a StackValue for the element at the given original index (before sorting) - pub fn getElement(self: TupleAccessor, original_index: usize) !StackValue { + pub fn getElement(self: TupleAccessor, original_index: usize, elem_rt_var: types.Var) !StackValue { // Find the sorted index corresponding to this original index const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds; @@ -748,13 +898,24 @@ pub const TupleAccessor = struct { .layout = element_layout, .ptr = element_ptr, .is_initialized = true, // Elements in existing tuples are initialized + .rt_var = elem_rt_var, }; } + /// Get just the element pointer without needing type information (for internal operations like setElement) + pub fn getElementPtr(self: TupleAccessor, original_index: usize) !*anyopaque { + const sorted_index = self.findElementIndexByOriginal(original_index) orelse return error.TupleIndexOutOfBounds; + std.debug.assert(self.base_value.is_initialized); + std.debug.assert(self.base_value.ptr != null); + const element_offset = self.layout_cache.getTupleElementOffset(self.tuple_layout.data.tuple.idx, @intCast(sorted_index)); + const base_ptr = @as([*]u8, @ptrCast(self.base_value.ptr.?)); + return @as(*anyopaque, @ptrCast(base_ptr + element_offset)); + } + /// Set an element by copying from a source StackValue pub fn setElement(self: TupleAccessor, index: usize, source: StackValue, ops: *RocOps) !void { - const dest_element = try self.getElement(index); - try source.copyToPtr(self.layout_cache, dest_element.ptr.?, ops); + const dest_ptr = try self.getElementPtr(index); + try source.copyToPtr(self.layout_cache, dest_ptr, ops); } /// Find the sorted element index corresponding to an original tuple position @@ -871,11 +1032,11 @@ pub const ListAccessor = struct { return self.list.len(); } - pub fn getElement(self: ListAccessor, index: usize) !StackValue { + pub fn getElement(self: ListAccessor, index: usize, elem_rt_var: types.Var) !StackValue { if (index >= self.list.len()) return error.ListIndexOutOfBounds; if (self.element_size == 0) { - return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true }; + return StackValue{ .layout = self.element_layout, .ptr = null, .is_initialized = true, .rt_var = elem_rt_var }; } const base_ptr = self.list.bytes orelse return error.NullStackPointer; @@ -884,8 +1045,18 @@ pub const ListAccessor = struct { .layout = self.element_layout, .ptr = @ptrCast(base_ptr + offset), .is_initialized = true, + .rt_var = elem_rt_var, }; } + + /// Get just the element pointer without needing type information (for internal operations) + pub fn getElementPtr(self: ListAccessor, index: usize) !?*anyopaque { + if (index >= self.list.len()) return error.ListIndexOutOfBounds; + if (self.element_size == 0) return null; + const base_ptr = self.list.bytes orelse return error.NullStackPointer; + const offset = index * self.element_size; + return @ptrCast(base_ptr + offset); + } }; fn storeListElementCount(list: *RocList, elements_refcounted: bool) void { @@ -961,7 +1132,7 @@ pub const RecordAccessor = struct { field_layouts: layout_mod.RecordField.SafeMultiList.Slice, /// Get a StackValue for the field at the given index - pub fn getFieldByIndex(self: RecordAccessor, index: usize) !StackValue { + pub fn getFieldByIndex(self: RecordAccessor, index: usize, field_rt_var: types.Var) !StackValue { if (index >= self.field_layouts.len) { return error.RecordIndexOutOfBounds; } @@ -988,11 +1159,12 @@ pub const RecordAccessor = struct { .layout = field_layout, .ptr = field_ptr, .is_initialized = true, // Fields in existing records are initialized + .rt_var = field_rt_var, }; } /// Get a StackValue for the field with the given name - pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx) !?StackValue { + pub fn getFieldByName(self: RecordAccessor, field_name_idx: Ident.Idx, field_rt_var: types.Var) !?StackValue { const field_offset = self.layout_cache.getRecordFieldOffsetByName( self.record_layout.data.record.idx, field_name_idx, @@ -1026,12 +1198,13 @@ pub const RecordAccessor = struct { .layout = field_layout.?, .ptr = field_ptr, .is_initialized = true, + .rt_var = field_rt_var, }; } /// Set a field by copying from a source StackValue pub fn setFieldByIndex(self: RecordAccessor, index: usize, source: StackValue, ops: *RocOps) !void { - const dest_field = try self.getFieldByIndex(index); + const dest_field = try self.getFieldByIndex(index, source.rt_var); try source.copyToPtr(self.layout_cache, dest_field.ptr.?, ops); } @@ -1168,15 +1341,6 @@ pub fn copyTo(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) vo ); } -/// Create a StackValue view of a memory region (no copy) -pub fn fromPtr(layout: Layout, ptr: *anyopaque) StackValue { - return StackValue{ - .layout = layout, - .ptr = ptr, - .is_initialized = true, - }; -} - /// Copy value data to another StackValue WITHOUT incrementing refcounts (move semantics) pub fn copyWithoutRefcount(self: StackValue, dest: StackValue, layout_cache: *LayoutStore) void { std.debug.assert(self.is_initialized); @@ -1269,56 +1433,12 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void { } // Handle records by recursively incref'ing each field (symmetric with decref) if (self.layout.tag == .record) { - if (self.ptr == null) return; - const record_data = layout_cache.getRecordData(self.layout.data.record.idx); - if (record_data.fields.count == 0) return; - - const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields()); - const base_ptr = @as([*]u8, @ptrCast(self.ptr.?)); - - var field_index: usize = 0; - while (field_index < field_layouts.len) : (field_index += 1) { - const field_info = field_layouts.get(field_index); - const field_layout = layout_cache.getLayout(field_info.layout); - - const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index)); - const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset)); - - const field_value = StackValue{ - .layout = field_layout, - .ptr = field_ptr, - .is_initialized = true, - }; - - field_value.incref(layout_cache); - } + increfLayoutPtr(self.layout, self.ptr, layout_cache); return; } // Handle tuples by recursively incref'ing each element (symmetric with decref) if (self.layout.tag == .tuple) { - if (self.ptr == null) return; - const tuple_data = layout_cache.getTupleData(self.layout.data.tuple.idx); - if (tuple_data.fields.count == 0) return; - - const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields()); - const base_ptr = @as([*]u8, @ptrCast(self.ptr.?)); - - var elem_index: usize = 0; - while (elem_index < element_layouts.len) : (elem_index += 1) { - const elem_info = element_layouts.get(elem_index); - const elem_layout = layout_cache.getLayout(elem_info.layout); - - const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index)); - const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset)); - - const elem_value = StackValue{ - .layout = elem_layout, - .ptr = elem_ptr, - .is_initialized = true, - }; - - elem_value.incref(layout_cache); - } + increfLayoutPtr(self.layout, self.ptr, layout_cache); return; } // Handle tag unions by reading discriminant and incref'ing only the active variant's payload @@ -1342,17 +1462,11 @@ pub fn incref(self: StackValue, layout_cache: *LayoutStore) void { const variant_layout = layout_cache.getLayout(variants.get(discriminant).payload_layout); // Incref only the active variant's payload (at offset 0) - const payload_value = StackValue{ - .layout = variant_layout, - .ptr = @as(*anyopaque, @ptrCast(base_ptr)), - .is_initialized = true, - }; - if (comptime trace_refcount) { traceRefcount("INCREF tag_union disc={} variant_layout.tag={}", .{ discriminant, @intFromEnum(variant_layout.tag) }); } - payload_value.incref(layout_cache); + increfLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache); return; } } @@ -1450,12 +1564,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void { var idx: usize = 0; while (idx < count) : (idx += 1) { const elem_ptr = source + idx * element_width; - const elem_value = StackValue{ - .layout = elem_layout, - .ptr = @ptrCast(elem_ptr), - .is_initialized = true, - }; - elem_value.decref(layout_cache, ops); + decrefLayoutPtr(elem_layout, @ptrCast(elem_ptr), layout_cache, ops); } } } @@ -1498,12 +1607,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void { if (builtins.utils.rcUnique(refcount_ptr.*)) { if (elem_layout.isRefcounted()) { - const payload_value = StackValue{ - .layout = elem_layout, - .ptr = @ptrCast(@alignCast(payload_ptr)), - .is_initialized = true, - }; - payload_value.decref(layout_cache, ops); + decrefLayoutPtr(elem_layout, @ptrCast(@alignCast(payload_ptr)), layout_cache, ops); } } @@ -1523,26 +1627,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void { }); } - const field_layouts = layout_cache.record_fields.sliceRange(record_data.getFields()); - const base_ptr = @as([*]u8, @ptrCast(self.ptr.?)); - - var field_index: usize = 0; - while (field_index < field_layouts.len) : (field_index += 1) { - const field_info = field_layouts.get(field_index); - const field_layout = layout_cache.getLayout(field_info.layout); - - const field_offset = layout_cache.getRecordFieldOffset(self.layout.data.record.idx, @intCast(field_index)); - const field_ptr = @as(*anyopaque, @ptrCast(base_ptr + field_offset)); - - const field_value = StackValue{ - .layout = field_layout, - .ptr = field_ptr, - .is_initialized = true, - }; - - field_value.decref(layout_cache, ops); - } - + decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops); return; }, .box_of_zst => { @@ -1563,61 +1648,11 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void { }); } - const element_layouts = layout_cache.tuple_fields.sliceRange(tuple_data.getFields()); - const base_ptr = @as([*]u8, @ptrCast(self.ptr.?)); - - var elem_index: usize = 0; - while (elem_index < element_layouts.len) : (elem_index += 1) { - const elem_info = element_layouts.get(elem_index); - const elem_layout = layout_cache.getLayout(elem_info.layout); - - const elem_offset = layout_cache.getTupleElementOffset(self.layout.data.tuple.idx, @intCast(elem_index)); - const elem_ptr = @as(*anyopaque, @ptrCast(base_ptr + elem_offset)); - - const elem_value = StackValue{ - .layout = elem_layout, - .ptr = elem_ptr, - .is_initialized = true, - }; - - elem_value.decref(layout_cache, ops); - } - + decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops); return; }, .closure => { - if (self.ptr == null) return; - // Get the closure header to find the captures layout - const closure = self.asClosure(); - const captures_layout = layout_cache.getLayout(closure.captures_layout_idx); - - // Only decref if there are actual captures (record with fields) - if (captures_layout.tag == .record) { - const record_data = layout_cache.getRecordData(captures_layout.data.record.idx); - if (record_data.fields.count > 0) { - if (comptime trace_refcount) { - traceRefcount("DECREF closure ptr=0x{x} captures={}", .{ - @intFromPtr(self.ptr), - record_data.fields.count, - }); - } - - // Calculate the offset to the captures record (after header, with alignment) - const header_size = @sizeOf(layout_mod.Closure); - const cap_align = captures_layout.alignment(layout_cache.targetUsize()); - const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits())); - const base_ptr: [*]u8 = @ptrCast(@alignCast(self.ptr.?)); - const rec_ptr: *anyopaque = @ptrCast(base_ptr + aligned_off); - - // Create a StackValue for the captures record and decref it - const captures_value = StackValue{ - .layout = captures_layout, - .ptr = rec_ptr, - .is_initialized = true, - }; - captures_value.decref(layout_cache, ops); - } - } + decrefLayoutPtr(self.layout, self.ptr, layout_cache, ops); return; }, .tag_union => { @@ -1649,13 +1684,7 @@ pub fn decref(self: StackValue, layout_cache: *LayoutStore, ops: *RocOps) void { } // Decref only the active variant's payload (at offset 0) - const payload_value = StackValue{ - .layout = variant_layout, - .ptr = @as(*anyopaque, @ptrCast(base_ptr)), - .is_initialized = true, - }; - - payload_value.decref(layout_cache, ops); + decrefLayoutPtr(variant_layout, @as(*anyopaque, @ptrCast(base_ptr)), layout_cache, ops); return; }, else => {}, diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 9f34364e59..52cc4ed15e 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -351,16 +351,8 @@ pub const ComptimeEvaluator = struct { // Convert StackValue to CIR expression based on layout const layout = stack_value.layout; - // Get the runtime type variable from the StackValue first, or fall back to expression type - const rt_var: types_mod.Var = if (stack_value.rt_var) |sv_rt_var| - sv_rt_var - else blk: { - // Fall back to expression type variable - const ct_var = ModuleEnv.varFrom(def.expr); - break :blk self.interpreter.translateTypeVar(self.env, ct_var) catch { - return error.NotImplemented; - }; - }; + // Get the runtime type variable from the StackValue + const rt_var = stack_value.rt_var; const resolved = self.interpreter.runtime_types.resolveVar(rt_var); // Check if it's a tag union type @@ -496,10 +488,14 @@ pub const ComptimeEvaluator = struct { fn foldTagUnionScalar(self: *ComptimeEvaluator, def_idx: CIR.Def.Idx, expr_idx: CIR.Expr.Idx, stack_value: eval_mod.StackValue) !void { _ = def_idx; // unused now that we get rt_var from stack_value // The value is the tag index directly (scalar integer) + // Verify the layout is actually a scalar int before extracting + if (stack_value.layout.tag != .scalar or stack_value.layout.data.scalar.tag != .int) { + return error.NotImplemented; + } const tag_index: usize = @intCast(stack_value.asI128()); - // Get the runtime type variable from the StackValue (already validated in tryFoldConstant) - const rt_var = stack_value.rt_var orelse return error.NotImplemented; + // Get the runtime type variable from the StackValue + const rt_var = stack_value.rt_var; // Get the list of tags for this union type var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator); @@ -546,17 +542,18 @@ pub const ComptimeEvaluator = struct { var acc = try stack_value.asTuple(&self.interpreter.runtime_layout_store); // Element 1 is the tag discriminant - getElement takes original index directly - const tag_field = try acc.getElement(1); + const tag_elem_rt_var = try self.interpreter.runtime_types.fresh(); + const tag_field = try acc.getElement(1, tag_elem_rt_var); // Extract tag index if (tag_field.layout.tag != .scalar or tag_field.layout.data.scalar.tag != .int) { return error.NotImplemented; } - const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true }; + const tmp_sv = eval_mod.StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = tag_elem_rt_var }; const tag_index: usize = @intCast(tmp_sv.asI128()); - // Get the runtime type variable from the StackValue (already validated in tryFoldConstant) - const rt_var = stack_value.rt_var orelse return error.NotImplemented; + // Get the runtime type variable from the StackValue + const rt_var = stack_value.rt_var; // Get the list of tags for this union type var tag_list = std.array_list.AlignedManaged(types_mod.Tag, null).init(self.allocator); @@ -999,7 +996,8 @@ pub const ComptimeEvaluator = struct { } // Build is_negative Bool - const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0); + const bool_rt_var = try self.interpreter.getCanonicalBoolRuntimeVar(); + const is_neg_value = try self.interpreter.pushRaw(layout_mod.Layout.int(.u8), 0, bool_rt_var); if (is_neg_value.ptr) |ptr| { @as(*u8, @ptrCast(@alignCast(ptr))).* = @intFromBool(num_lit_info.is_negative); } @@ -1195,7 +1193,8 @@ pub const ComptimeEvaluator = struct { const list_layout_idx = try self.interpreter.runtime_layout_store.insertList(layout_mod.Idx.u8); const list_layout = self.interpreter.runtime_layout_store.getLayout(list_layout_idx); - const dest = try self.interpreter.pushRaw(list_layout, 0); + // Use placeholder rt_var for U8 list + const dest = try self.interpreter.pushRaw(list_layout, 0, @enumFromInt(0)); if (dest.ptr == null) return dest; const header: *builtins.list.RocList = @ptrCast(@alignCast(dest.ptr.?)); @@ -1246,7 +1245,8 @@ pub const ComptimeEvaluator = struct { const record_layout_idx = try self.interpreter.runtime_layout_store.putRecord(self.env, &field_layouts, &field_names); const record_layout = self.interpreter.runtime_layout_store.getLayout(record_layout_idx); - var dest = try self.interpreter.pushRaw(record_layout, 0); + // Use placeholder rt_var for numeral record + var dest = try self.interpreter.pushRaw(record_layout, 0, @enumFromInt(0)); var accessor = try dest.asRecord(&self.interpreter.runtime_layout_store); // Use self.env for field lookups since the record was built with self.env's idents @@ -1319,7 +1319,8 @@ pub const ComptimeEvaluator = struct { // Use layout store's env for field lookups since records use that env's idents const layout_env = self.interpreter.runtime_layout_store.env; const tag_idx = accessor.findFieldIndex(layout_env.idents.tag) orelse return true; - const tag_field = accessor.getFieldByIndex(tag_idx) catch return true; + const tag_rt_var = self.interpreter.runtime_types.fresh() catch return true; + const tag_field = accessor.getFieldByIndex(tag_idx, tag_rt_var) catch return true; if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { const tag_value = tag_field.asI128(); @@ -1347,7 +1348,8 @@ pub const ComptimeEvaluator = struct { var accessor = result.asTuple(&self.interpreter.runtime_layout_store) catch return true; // Element 1 is tag discriminant - getElement takes original index directly - const tag_field = accessor.getElement(1) catch return true; + const tag_elem_rt_var = self.interpreter.runtime_types.fresh() catch return true; + const tag_field = accessor.getElement(1, tag_elem_rt_var) catch return true; if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { const tag_value = tag_field.asI128(); @@ -1401,7 +1403,10 @@ pub const ComptimeEvaluator = struct { // This should never happen - Try type must have a payload field return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (missing payload field)", .{}); }; - const payload_field = try_accessor.getFieldByIndex(payload_idx) catch { + const payload_rt_var = self.interpreter.runtime_types.fresh() catch { + return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not create rt_var)", .{}); + }; + const payload_field = try_accessor.getFieldByIndex(payload_idx, payload_rt_var) catch { return try std.fmt.allocPrint(self.allocator, "Internal error: from_numeral returned malformed Try value (could not access payload)", .{}); }; @@ -1416,7 +1421,10 @@ pub const ComptimeEvaluator = struct { // Check if this has a payload field (for the Str) // Single-tag unions might not have a "tag" field, so we look for payload first if (err_accessor.findFieldIndex(layout_env.idents.payload)) |err_payload_idx| { - const err_payload = err_accessor.getFieldByIndex(err_payload_idx) catch { + const err_payload_rt_var = self.interpreter.runtime_types.fresh() catch { + return try std.fmt.allocPrint(self.allocator, "Internal error: could not create rt_var for InvalidNumeral payload", .{}); + }; + const err_payload = err_accessor.getFieldByIndex(err_payload_idx, err_payload_rt_var) catch { return try std.fmt.allocPrint(self.allocator, "Internal error: could not access InvalidNumeral payload", .{}); }; return try self.extractStrFromValue(err_payload); @@ -1426,7 +1434,8 @@ pub const ComptimeEvaluator = struct { // Iterate through fields looking for a Str var field_idx: usize = 0; while (true) : (field_idx += 1) { - const field = err_accessor.getFieldByIndex(field_idx) catch break; + const iter_field_rt_var = self.interpreter.runtime_types.fresh() catch break; + const field = err_accessor.getFieldByIndex(field_idx, iter_field_rt_var) catch break; if (field.layout.tag == .scalar and field.layout.data.scalar.tag == .str) { return try self.extractStrFromValue(field); } diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index d081400b9e..93a0c8bfc9 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -37,6 +37,7 @@ const BuiltinTypes = @import("builtins.zig").BuiltinTypes; const RefcountContext = struct { layout_store: *layout.Store, elem_layout: Layout, + elem_rt_var: types.Var, roc_ops: *RocOps, }; @@ -47,6 +48,7 @@ fn listElementInc(context_opaque: ?*anyopaque, elem_ptr: ?[*]u8) callconv(.c) vo .layout = context.elem_layout, .ptr = @ptrCast(elem_ptr), .is_initialized = true, + .rt_var = context.elem_rt_var, }; elem_value.incref(context.layout_store); } @@ -58,6 +60,7 @@ fn listElementDec(context_opaque: ?*anyopaque, elem_ptr: ?[*]u8) callconv(.c) vo .layout = context.elem_layout, .ptr = @ptrCast(elem_ptr), .is_initialized = true, + .rt_var = context.elem_rt_var, }; elem_value.decref(context.layout_store, context.roc_ops); } @@ -251,6 +254,7 @@ pub const Interpreter = struct { // Track active closures during calls (for capture lookup) active_closures: std.array_list.Managed(StackValue), canonical_bool_rt_var: ?types.Var, + canonical_str_rt_var: ?types.Var, // Used to unwrap extensible tags scratch_tags: std.array_list.Managed(types.Tag), /// Builtin types required by the interpreter (Bool, Try, etc.) @@ -407,6 +411,7 @@ pub const Interpreter = struct { .bindings = try std.array_list.Managed(Binding).initCapacity(allocator, 8), .active_closures = try std.array_list.Managed(StackValue).initCapacity(allocator, 4), .canonical_bool_rt_var = null, + .canonical_str_rt_var = null, .scratch_tags = try std.array_list.Managed(types.Tag).initCapacity(allocator, 8), .builtins = builtin_types, .def_stack = try std.array_list.Managed(DefInProgress).initCapacity(allocator, 4), @@ -583,13 +588,15 @@ pub const Interpreter = struct { const tuple_idx = try self.runtime_layout_store.putTuple(param_layouts); const tuple_layout = self.runtime_layout_store.getLayout(tuple_idx); - args_tuple_value = StackValue{ .layout = tuple_layout, .ptr = args_ptr, .is_initialized = true }; + // Use first element's rt_var as placeholder - this tuple is internal-only, + // elements get their own rt_vars when extracted via getElement + args_tuple_value = StackValue{ .layout = tuple_layout, .ptr = args_ptr, .is_initialized = true, .rt_var = param_rt_vars[0] }; args_accessor = try args_tuple_value.asTuple(&self.runtime_layout_store); var j: usize = 0; while (j < params.len) : (j += 1) { // getElement expects original index and converts to sorted internally - const arg_value = try args_accessor.getElement(j); + const arg_value = try args_accessor.getElement(j, param_rt_vars[j]); const matched = try self.patternMatchesBind(params[j], arg_value, param_rt_vars[j], roc_ops, &temp_binds, @enumFromInt(0)); if (!matched) return error.TypeMismatch; } @@ -664,15 +671,15 @@ pub const Interpreter = struct { return true; } - fn pushStr(self: *Interpreter) !StackValue { + fn pushStr(self: *Interpreter, rt_var: types.Var) !StackValue { const layout_val = Layout.str(); const size: u32 = self.runtime_layout_store.layoutSize(layout_val); if (size == 0) { - return StackValue{ .layout = layout_val, .ptr = null, .is_initialized = false }; + return StackValue{ .layout = layout_val, .ptr = null, .is_initialized = false, .rt_var = rt_var }; } const alignment = layout_val.alignment(self.runtime_layout_store.targetUsize()); const ptr = try self.stack_memory.alloca(size, alignment); - return StackValue{ .layout = layout_val, .ptr = ptr, .is_initialized = true }; + return StackValue{ .layout = layout_val, .ptr = ptr, .is_initialized = true, .rt_var = rt_var }; } /// Create a constant/static string using the arena allocator. @@ -739,10 +746,10 @@ pub const Interpreter = struct { return RocStr.fromSlice(rendered, roc_ops); } - pub fn pushRaw(self: *Interpreter, layout_val: Layout, initial_size: usize) !StackValue { + pub fn pushRaw(self: *Interpreter, layout_val: Layout, initial_size: usize, rt_var: types.Var) !StackValue { const size: u32 = if (initial_size == 0) self.runtime_layout_store.layoutSize(layout_val) else @intCast(initial_size); if (size == 0) { - return StackValue{ .layout = layout_val, .ptr = null, .is_initialized = true }; + return StackValue{ .layout = layout_val, .ptr = null, .is_initialized = true, .rt_var = rt_var }; } const target_usize = self.runtime_layout_store.targetUsize(); var alignment = layout_val.alignment(target_usize); @@ -751,13 +758,13 @@ pub const Interpreter = struct { alignment = alignment.max(captures_layout.alignment(target_usize)); } const ptr = try self.stack_memory.alloca(size, alignment); - return StackValue{ .layout = layout_val, .ptr = ptr, .is_initialized = true }; + return StackValue{ .layout = layout_val, .ptr = ptr, .is_initialized = true, .rt_var = rt_var }; } /// Push raw bytes with a specific size and alignment (for building records/tuples) - pub fn pushRawBytes(self: *Interpreter, size: usize, alignment: usize) !StackValue { + pub fn pushRawBytes(self: *Interpreter, size: usize, alignment: usize, rt_var: types.Var) !StackValue { if (size == 0) { - return StackValue{ .layout = .{ .tag = .zst, .data = undefined }, .ptr = null, .is_initialized = true }; + return StackValue{ .layout = .{ .tag = .zst, .data = undefined }, .ptr = null, .is_initialized = true, .rt_var = rt_var }; } const align_enum: std.mem.Alignment = switch (alignment) { 1 => .@"1", @@ -768,7 +775,7 @@ pub const Interpreter = struct { else => .@"1", }; const ptr = try self.stack_memory.alloca(@intCast(size), align_enum); - return StackValue{ .layout = .{ .tag = .zst, .data = undefined }, .ptr = ptr, .is_initialized = true }; + return StackValue{ .layout = .{ .tag = .zst, .data = undefined }, .ptr = ptr, .is_initialized = true, .rt_var = rt_var }; } pub fn pushCopy(self: *Interpreter, src: StackValue, roc_ops: *RocOps) !StackValue { @@ -830,9 +837,11 @@ pub const Interpreter = struct { // Make a unique copy of the list for sorting const elements_refcounted = elem_layout.isRefcounted(); + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; @@ -862,15 +871,18 @@ pub const Interpreter = struct { const elem0_ptr = working_list.bytes.? + 0 * elem_size; const elem1_ptr = working_list.bytes.? + 1 * elem_size; + // elem_rt_var already declared above for RefcountContext const elem0_value = StackValue{ .layout = elem_layout, .ptr = @ptrCast(elem0_ptr), .is_initialized = true, + .rt_var = elem_rt_var, }; const elem1_value = StackValue{ .layout = elem_layout, .ptr = @ptrCast(elem1_ptr), .is_initialized = true, + .rt_var = elem_rt_var, }; // Copy elements for comparison (compare_fn will consume them) @@ -888,6 +900,7 @@ pub const Interpreter = struct { .list_len = list_len, .elem_size = elem_size, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, } } }); saved_rigid_subst = null; // Ownership transferred to continuation @@ -959,7 +972,7 @@ pub const Interpreter = struct { // Allocate space for the return value using the actual return type const return_layout = try self.getRuntimeLayout(return_rt_var); - const result_value = try self.pushRaw(return_layout, 0); + const result_value = try self.pushRaw(return_layout, 0, return_rt_var); // Get return pointer (for ZST returns, use a dummy stack address) const ret_ptr = if (result_value.ptr) |p| p else @as(*anyopaque, @ptrFromInt(@intFromPtr(&result_value))); @@ -1066,7 +1079,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_a_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_a_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1106,7 +1119,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1129,7 +1142,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1152,7 +1165,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1193,7 +1206,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1216,7 +1229,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = str_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, str_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1279,7 +1292,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = string_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1307,7 +1320,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = string_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1334,7 +1347,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = string_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1361,7 +1374,7 @@ pub const Interpreter = struct { // Allocate space for the result string const result_layout = string_arg.layout; // Str layout - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; // Copy the result string structure to the output @@ -1381,8 +1394,9 @@ pub const Interpreter = struct { const string: *const RocStr = @ptrCast(@alignCast(string_arg.ptr.?)); const byte_count = builtins.str.countUtf8Bytes(string.*); + const result_rt_var = return_rt_var orelse unreachable; const result_layout = layout.Layout.int(.u64); - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; try out.setInt(@intCast(byte_count)); out.is_initialized = true; @@ -1398,8 +1412,9 @@ pub const Interpreter = struct { const result_str = builtins.str.withCapacityC(capacity, roc_ops); + const result_rt_var = return_rt_var orelse try self.getCanonicalStrRuntimeVar(); const result_layout = layout.Layout.str(); - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const result_ptr: *RocStr = @ptrCast(@alignCast(out.ptr.?)); @@ -1424,7 +1439,7 @@ pub const Interpreter = struct { const result_str = builtins.str.reserveC(string.*, spare, roc_ops); const result_layout = string_arg.layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; const result_ptr: *RocStr = @ptrCast(@alignCast(out.ptr.?)); @@ -1444,7 +1459,7 @@ pub const Interpreter = struct { const result_str = builtins.str.strReleaseExcessCapacity(roc_ops, string.*); const result_layout = string_arg.layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, string_arg.rt_var); out.is_initialized = false; const result_ptr: *RocStr = @ptrCast(@alignCast(out.ptr.?)); @@ -1469,7 +1484,7 @@ pub const Interpreter = struct { }; const result_layout = try self.getRuntimeLayout(result_rt_var); - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const result_ptr: *builtins.list.RocList = @ptrCast(@alignCast(out.ptr.?)); @@ -1488,8 +1503,9 @@ pub const Interpreter = struct { const roc_list: *const builtins.list.RocList = @ptrCast(@alignCast(list_arg.ptr.?)); const result_str = builtins.str.fromUtf8Lossy(roc_list.*, roc_ops); + const result_rt_var = return_rt_var orelse try self.getCanonicalStrRuntimeVar(); const result_layout = layout.Layout.str(); - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const result_ptr: *RocStr = @ptrCast(@alignCast(out.ptr.?)); @@ -1545,11 +1561,15 @@ pub const Interpreter = struct { // Return Ok(string) if (result_layout.tag == .tuple) { // Tuple (payload, tag) - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asTuple(&self.runtime_layout_store); + // Create fresh vars for element access (payload is Str, discriminant is int) + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const disc_rt_var = try self.runtime_types.fresh(); + // Element 0 is the payload - clear it first since it's a union - const payload_field = try acc.getElement(0); + const payload_field = try acc.getElement(0, str_rt_var); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -1562,7 +1582,7 @@ pub const Interpreter = struct { } // Element 1 is the tag discriminant - const tag_field = try acc.getElement(1); + const tag_field = try acc.getElement(1, disc_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -1573,7 +1593,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .record) { // Record { tag, payload } - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse { @@ -1585,8 +1605,12 @@ pub const Interpreter = struct { return error.Crash; }; + // Create fresh vars for field access (payload is Str, discriminant is int) + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const disc_rt_var = try self.runtime_types.fresh(); + // Write tag discriminant - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const tag_field = try acc.getFieldByIndex(tag_field_idx, disc_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -1594,7 +1618,7 @@ pub const Interpreter = struct { } // Clear payload area first since it's a union - const payload_field = try acc.getFieldByIndex(payload_field_idx); + const payload_field = try acc.getFieldByIndex(payload_field_idx, str_rt_var); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -1610,7 +1634,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tag_union) { // Tag union layout with proper variant info - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); const tu_data = self.runtime_layout_store.getTagUnionData(result_layout.data.tag_union.idx); if (dest.ptr) |base_ptr| { @@ -1647,11 +1671,12 @@ pub const Interpreter = struct { // Return Err(BadUtf8({ problem: Utf8Problem, index: U64 })) if (result_layout.tag == .tuple) { // Tuple (payload, tag) - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asTuple(&self.runtime_layout_store); // Element 1 is the tag discriminant - const tag_field = try acc.getElement(1); + const disc_rt_var = try self.runtime_types.fresh(); + const tag_field = try acc.getElement(1, disc_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -1659,17 +1684,20 @@ pub const Interpreter = struct { } // Element 0 is the payload - need to construct BadUtf8 record - const payload_field = try acc.getElement(0); + const payload_rt_var = try self.runtime_types.fresh(); + const payload_field = try acc.getElement(0, payload_rt_var); if (payload_field.layout.tag == .tuple) { // BadUtf8 is represented as a tuple containing the error record var err_tuple = try payload_field.asTuple(&self.runtime_layout_store); // First element should be the record { problem, index } - const inner_payload = try err_tuple.getElement(0); + const inner_rt_var = try self.runtime_types.fresh(); + const inner_payload = try err_tuple.getElement(0, inner_rt_var); if (inner_payload.layout.tag == .record) { var inner_acc = try inner_payload.asRecord(&self.runtime_layout_store); // Set problem field (tag union represented as u8) if (inner_acc.findFieldIndex(self.env.idents.problem)) |problem_idx| { - const problem_field = try inner_acc.getFieldByIndex(problem_idx); + const problem_rt = try self.runtime_types.fresh(); + const problem_field = try inner_acc.getFieldByIndex(problem_idx, problem_rt); if (problem_field.ptr) |ptr| { const typed_ptr: *u8 = @ptrCast(@alignCast(ptr)); typed_ptr.* = @intFromEnum(result.problem_code); @@ -1677,7 +1705,8 @@ pub const Interpreter = struct { } // Set index field (U64) if (inner_acc.findFieldIndex(self.env.idents.index)) |index_idx| { - const index_field = try inner_acc.getFieldByIndex(index_idx); + const index_rt = try self.runtime_types.fresh(); + const index_field = try inner_acc.getFieldByIndex(index_idx, index_rt); if (index_field.ptr) |ptr| { const typed_ptr: *u64 = @ptrCast(@alignCast(ptr)); typed_ptr.* = result.byte_index; @@ -1685,7 +1714,8 @@ pub const Interpreter = struct { } } // Set BadUtf8 tag discriminant (index 0 since it's the only variant) - const err_tag = try err_tuple.getElement(1); + const inner_disc_rt_var = try self.runtime_types.fresh(); + const err_tag = try err_tuple.getElement(1, inner_disc_rt_var); if (err_tag.layout.tag == .scalar and err_tag.layout.data.scalar.tag == .int) { var tmp = err_tag; tmp.is_initialized = false; @@ -1695,7 +1725,8 @@ pub const Interpreter = struct { // Payload is a record with tag and payload for BadUtf8 var err_rec = try payload_field.asRecord(&self.runtime_layout_store); if (err_rec.findFieldIndex(self.env.idents.tag)) |tag_idx| { - const inner_tag = try err_rec.getFieldByIndex(tag_idx); + const field_rt = try self.runtime_types.fresh(); + const inner_tag = try err_rec.getFieldByIndex(tag_idx, field_rt); if (inner_tag.layout.tag == .scalar and inner_tag.layout.data.scalar.tag == .int) { var tmp = inner_tag; tmp.is_initialized = false; @@ -1703,18 +1734,21 @@ pub const Interpreter = struct { } } if (err_rec.findFieldIndex(self.env.idents.payload)) |inner_payload_idx| { - const inner_payload = try err_rec.getFieldByIndex(inner_payload_idx); + const field_rt = try self.runtime_types.fresh(); + const inner_payload = try err_rec.getFieldByIndex(inner_payload_idx, field_rt); if (inner_payload.layout.tag == .record) { var inner_acc = try inner_payload.asRecord(&self.runtime_layout_store); if (inner_acc.findFieldIndex(self.env.idents.problem)) |problem_idx| { - const problem_field = try inner_acc.getFieldByIndex(problem_idx); + const field_rt2 = try self.runtime_types.fresh(); + const problem_field = try inner_acc.getFieldByIndex(problem_idx, field_rt2); if (problem_field.ptr) |ptr| { const typed_ptr: *u8 = @ptrCast(@alignCast(ptr)); typed_ptr.* = @intFromEnum(result.problem_code); } } if (inner_acc.findFieldIndex(self.env.idents.index)) |index_idx| { - const index_field = try inner_acc.getFieldByIndex(index_idx); + const field_rt2 = try self.runtime_types.fresh(); + const index_field = try inner_acc.getFieldByIndex(index_idx, field_rt2); if (index_field.ptr) |ptr| { const typed_ptr: *u64 = @ptrCast(@alignCast(ptr)); typed_ptr.* = result.byte_index; @@ -1728,7 +1762,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .record) { // Record { tag, payload } - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse { @@ -1741,7 +1775,8 @@ pub const Interpreter = struct { }; // Write tag discriminant for Err - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const field_rt = try self.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(tag_field_idx, field_rt); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -1749,28 +1784,33 @@ pub const Interpreter = struct { } // Write error payload - need to construct BadUtf8({ problem, index }) - const outer_payload = try acc.getFieldByIndex(payload_field_idx); + const payload_rt = try self.runtime_types.fresh(); + const outer_payload = try acc.getFieldByIndex(payload_field_idx, payload_rt); if (outer_payload.layout.tag == .tuple) { var err_tuple = try outer_payload.asTuple(&self.runtime_layout_store); - const inner_payload = try err_tuple.getElement(0); + const inner_rt_var = try self.runtime_types.fresh(); + const inner_payload = try err_tuple.getElement(0, inner_rt_var); if (inner_payload.layout.tag == .record) { var inner_acc = try inner_payload.asRecord(&self.runtime_layout_store); if (inner_acc.findFieldIndex(self.env.idents.problem)) |problem_idx| { - const problem_field = try inner_acc.getFieldByIndex(problem_idx); + const field_rt2 = try self.runtime_types.fresh(); + const problem_field = try inner_acc.getFieldByIndex(problem_idx, field_rt2); if (problem_field.ptr) |ptr| { const typed_ptr: *u8 = @ptrCast(@alignCast(ptr)); typed_ptr.* = @intFromEnum(result.problem_code); } } if (inner_acc.findFieldIndex(self.env.idents.index)) |index_idx| { - const index_field = try inner_acc.getFieldByIndex(index_idx); + const field_rt2 = try self.runtime_types.fresh(); + const index_field = try inner_acc.getFieldByIndex(index_idx, field_rt2); if (index_field.ptr) |ptr| { const typed_ptr: *u64 = @ptrCast(@alignCast(ptr)); typed_ptr.* = result.byte_index; } } } - const err_tag = try err_tuple.getElement(1); + const err_disc_rt_var = try self.runtime_types.fresh(); + const err_tag = try err_tuple.getElement(1, err_disc_rt_var); if (err_tag.layout.tag == .scalar and err_tag.layout.data.scalar.tag == .int) { var tmp = err_tag; tmp.is_initialized = false; @@ -1779,7 +1819,8 @@ pub const Interpreter = struct { } else if (outer_payload.layout.tag == .record) { var err_rec = try outer_payload.asRecord(&self.runtime_layout_store); if (err_rec.findFieldIndex(self.env.idents.tag)) |inner_tag_idx| { - const inner_tag = try err_rec.getFieldByIndex(inner_tag_idx); + const field_rt2 = try self.runtime_types.fresh(); + const inner_tag = try err_rec.getFieldByIndex(inner_tag_idx, field_rt2); if (inner_tag.layout.tag == .scalar and inner_tag.layout.data.scalar.tag == .int) { var tmp = inner_tag; tmp.is_initialized = false; @@ -1787,18 +1828,21 @@ pub const Interpreter = struct { } } if (err_rec.findFieldIndex(self.env.idents.payload)) |inner_payload_idx| { - const inner_payload = try err_rec.getFieldByIndex(inner_payload_idx); + const field_rt2 = try self.runtime_types.fresh(); + const inner_payload = try err_rec.getFieldByIndex(inner_payload_idx, field_rt2); if (inner_payload.layout.tag == .record) { var inner_acc = try inner_payload.asRecord(&self.runtime_layout_store); if (inner_acc.findFieldIndex(self.env.idents.problem)) |problem_idx| { - const problem_field = try inner_acc.getFieldByIndex(problem_idx); + const field_rt3 = try self.runtime_types.fresh(); + const problem_field = try inner_acc.getFieldByIndex(problem_idx, field_rt3); if (problem_field.ptr) |ptr| { const typed_ptr: *u8 = @ptrCast(@alignCast(ptr)); typed_ptr.* = @intFromEnum(result.problem_code); } } if (inner_acc.findFieldIndex(self.env.idents.index)) |index_idx| { - const index_field = try inner_acc.getFieldByIndex(index_idx); + const field_rt3 = try self.runtime_types.fresh(); + const index_field = try inner_acc.getFieldByIndex(index_idx, field_rt3); if (index_field.ptr) |ptr| { const typed_ptr: *u64 = @ptrCast(@alignCast(ptr)); typed_ptr.* = result.byte_index; @@ -1812,7 +1856,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tag_union) { // Tag union layout with proper variant info for Err case - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); const tu_data = self.runtime_layout_store.getTagUnionData(result_layout.data.tag_union.idx); if (dest.ptr) |base_ptr| { @@ -1920,16 +1964,15 @@ pub const Interpreter = struct { break :blk expected_layout; }; - var out = try self.pushRaw(result_layout, 0); + // Get the proper List(Str) type for rt_var + const list_str_rt_var = try self.mkListStrTypeRuntime(); + var out = try self.pushRaw(result_layout, 0, list_str_rt_var); out.is_initialized = false; const result_ptr: *builtins.list.RocList = @ptrCast(@alignCast(out.ptr.?)); result_ptr.* = result_list; out.is_initialized = true; - // Set rt_var to the proper List(Str) type so method dispatch works correctly - // We create the type ourselves because return_rt_var might be a flex var - out.rt_var = try self.mkListStrTypeRuntime(); return out; }, .str_join_with => { @@ -1948,7 +1991,8 @@ pub const Interpreter = struct { const result_str = builtins.str.strJoinWithC(roc_list.*, separator.*, roc_ops); const result_layout = layout.Layout.str(); - var out = try self.pushRaw(result_layout, 0); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + var out = try self.pushRaw(result_layout, 0, str_rt_var); out.is_initialized = false; const result_ptr: *RocStr = @ptrCast(@alignCast(out.ptr.?)); @@ -1972,7 +2016,8 @@ pub const Interpreter = struct { const len_u64: u64 = @intCast(len_usize); const result_layout = layout.Layout.int(.u64); - var out = try self.pushRaw(result_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; try out.setInt(@intCast(len_u64)); out.is_initialized = true; @@ -2005,7 +2050,7 @@ pub const Interpreter = struct { // Handle ZST lists specially - they don't actually allocate if (result_layout.tag == .list_of_zst) { // For ZST lists, capacity doesn't matter - just return an empty list - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const result_ptr: *builtins.list.RocList = @ptrCast(@alignCast(out.ptr.?)); result_ptr.* = builtins.list.RocList.empty(); @@ -2025,9 +2070,11 @@ pub const Interpreter = struct { const elements_refcounted = elem_layout.isRefcounted(); // Set up context for refcount callbacks + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; @@ -2043,7 +2090,7 @@ pub const Interpreter = struct { ); // Allocate space for the result list - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; // Copy the result list structure to the output @@ -2077,10 +2124,12 @@ pub const Interpreter = struct { if (elem_size == 0) { // ZST element - return zero-sized value + const elem_rt_var = return_rt_var orelse try self.runtime_types.fresh(); return StackValue{ .layout = elem_layout, .ptr = null, .is_initialized = true, + .rt_var = elem_rt_var, }; } @@ -2090,10 +2139,12 @@ pub const Interpreter = struct { std.debug.assert(elem_ptr != null); // Create StackValue pointing to the element + const elem_rt_var = return_rt_var orelse try self.runtime_types.fresh(); const elem_value = StackValue{ .layout = elem_layout, .ptr = @ptrCast(elem_ptr.?), .is_initialized = true, + .rt_var = elem_rt_var, }; // Copy to new location and increment refcount @@ -2177,7 +2228,8 @@ pub const Interpreter = struct { // (handles refcounting internally), but we're working with StackValues that // have their own lifetime management - the caller will decref the args. const total_count = list_a.len() + list_b.len(); - var out = try self.pushRaw(result_layout, 0); + const result_rt_var = return_rt_var orelse list_a_arg.rt_var; + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const header: *builtins.list.RocList = @ptrCast(@alignCast(out.ptr.?)); @@ -2209,9 +2261,11 @@ pub const Interpreter = struct { // Handle refcounting for copied elements - increment refcount for each element // since we copied them (the elements are now shared with the original lists) if (elements_refcounted) { + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; if (runtime_list.bytes) |buffer| { @@ -2263,9 +2317,11 @@ pub const Interpreter = struct { const update_mode = if (roc_list.isUnique()) builtins.utils.UpdateMode.InPlace else builtins.utils.UpdateMode.Immutable; // Set up context for refcount callbacks + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; @@ -2301,7 +2357,7 @@ pub const Interpreter = struct { // Allocate space for the result list const result_layout = roc_list_arg.layout; // Same layout as input - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, roc_list_arg.rt_var); out.is_initialized = false; // Copy the result list structure to the output @@ -2334,9 +2390,11 @@ pub const Interpreter = struct { const elements_refcounted = elem_layout.isRefcounted(); // Set up context for refcount callbacks + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; @@ -2356,7 +2414,7 @@ pub const Interpreter = struct { // Allocate space for the result list const result_layout = list_arg.layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, list_arg.rt_var); out.is_initialized = false; // Copy the result list structure to the output @@ -2378,8 +2436,10 @@ pub const Interpreter = struct { // Access second argument as a record and extract its specific fields const sublist_config = args[1].asRecord(&self.runtime_layout_store) catch unreachable; // When fields are alphabetically sorted, 0 will be `len` and 1 will be `start` - const sublist_start_stack = sublist_config.getFieldByIndex(1) catch unreachable; - const sublist_len_stack = sublist_config.getFieldByIndex(0) catch unreachable; + const field_rt = try self.runtime_types.fresh(); + const sublist_start_stack = sublist_config.getFieldByIndex(1, field_rt) catch unreachable; + const field_rt2 = try self.runtime_types.fresh(); + const sublist_len_stack = sublist_config.getFieldByIndex(0, field_rt2) catch unreachable; const sublist_start: u64 = @intCast(sublist_start_stack.asI128()); const sublist_len: u64 = @intCast(sublist_len_stack.asI128()); @@ -2394,9 +2454,11 @@ pub const Interpreter = struct { const elements_refcounted = elem_layout.isRefcounted(); // Set up context for refcount callbacks + const elem_rt_var = try self.runtime_types.fresh(); var refcount_context = RefcountContext{ .layout_store = &self.runtime_layout_store, .elem_layout = elem_layout, + .elem_rt_var = elem_rt_var, .roc_ops = roc_ops, }; @@ -2415,7 +2477,7 @@ pub const Interpreter = struct { // Allocate space for the result list const result_layout = list_arg.layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, list_arg.rt_var); out.is_initialized = false; // Copy the result list structure to the output @@ -2619,7 +2681,7 @@ pub const Interpreter = struct { const num_val = try self.extractNumericValue(args[0]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (num_val) { @@ -2637,7 +2699,7 @@ pub const Interpreter = struct { const num_val = try self.extractNumericValue(args[0]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (num_val) { @@ -2657,7 +2719,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2693,7 +2755,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2725,7 +2787,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2757,7 +2819,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2789,7 +2851,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2841,7 +2903,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2894,7 +2956,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -2946,7 +3008,7 @@ pub const Interpreter = struct { const rhs = try self.extractNumericValue(args[1]); const result_layout = args[0].layout; - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, args[0].rt_var); out.is_initialized = false; switch (lhs) { @@ -3053,7 +3115,7 @@ pub const Interpreter = struct { // Construct the result tag union if (result_layout.tag == .scalar) { // Simple tag with no payload (shouldn't happen for Try) - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const tag_idx: usize = if (in_range) ok_index orelse 0 else err_index orelse 1; try out.setInt(@intCast(tag_idx)); @@ -3061,14 +3123,15 @@ pub const Interpreter = struct { return out; } else if (result_layout.tag == .record) { // Record { tag, payload } - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); // Layout should guarantee tag and payload fields exist - if not, it's a compiler bug const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse unreachable; const payload_field_idx = acc.findFieldIndex(self.env.idents.payload) orelse unreachable; // Write tag discriminant - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const field_rt = try self.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(tag_field_idx, field_rt); // Tag field should be scalar int - if not, it's a compiler bug std.debug.assert(tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int); var tmp = tag_field; @@ -3077,7 +3140,8 @@ pub const Interpreter = struct { try tmp.setInt(@intCast(tag_idx)); // Clear payload area - const payload_field = try acc.getFieldByIndex(payload_field_idx); + const field_rt2 = try self.runtime_types.fresh(); + const payload_field = try acc.getFieldByIndex(payload_field_idx, field_rt2); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -3146,16 +3210,19 @@ pub const Interpreter = struct { const layout_env = self.runtime_layout_store.env; // Field lookups should succeed - missing fields is a compiler bug const is_neg_idx = acc.findFieldIndex(layout_env.idents.is_negative) orelse unreachable; - const is_neg_field = acc.getFieldByIndex(is_neg_idx) catch unreachable; + const field_rt = try self.runtime_types.fresh(); + const is_neg_field = acc.getFieldByIndex(is_neg_idx, field_rt) catch unreachable; const is_negative = getRuntimeU8(is_neg_field) != 0; // Get digits_before_pt field (List(U8)) const before_idx = acc.findFieldIndex(layout_env.idents.digits_before_pt) orelse unreachable; - const before_field = acc.getFieldByIndex(before_idx) catch unreachable; + const field_rt2 = try self.runtime_types.fresh(); + const before_field = acc.getFieldByIndex(before_idx, field_rt2) catch unreachable; // Get digits_after_pt field (List(U8)) const after_idx = acc.findFieldIndex(layout_env.idents.digits_after_pt) orelse unreachable; - const after_field = acc.getFieldByIndex(after_idx) catch unreachable; + const field_rt3 = try self.runtime_types.fresh(); + const after_field = acc.getFieldByIndex(after_idx, field_rt3) catch unreachable; // Extract list data from digits_before_pt const before_list: *const builtins.list.RocList = @ptrCast(@alignCast(before_field.ptr.?)); @@ -3356,7 +3423,7 @@ pub const Interpreter = struct { // Construct the result tag union if (result_layout.tag == .scalar) { // Simple tag with no payload - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const tag_idx: usize = if (in_range) ok_index orelse 0 else err_index orelse 1; try out.setInt(@intCast(tag_idx)); @@ -3364,7 +3431,7 @@ pub const Interpreter = struct { return out; } else if (result_layout.tag == .record) { // Record { tag, payload } - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var result_acc = try dest.asRecord(&self.runtime_layout_store); // Use layout_env for field lookups since record fields use layout store's env idents // Layout should guarantee tag and payload fields exist - if not, it's a compiler bug @@ -3372,7 +3439,8 @@ pub const Interpreter = struct { const payload_field_idx = result_acc.findFieldIndex(layout_env.idents.payload) orelse unreachable; // Write tag discriminant - const tag_field = try result_acc.getFieldByIndex(tag_field_idx); + const tag_rt = try self.runtime_types.fresh(); + const tag_field = try result_acc.getFieldByIndex(tag_field_idx, tag_rt); // Tag field should be scalar int - if not, it's a compiler bug std.debug.assert(tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int); var tmp = tag_field; @@ -3381,7 +3449,8 @@ pub const Interpreter = struct { try tmp.setInt(@intCast(tag_idx)); // Clear payload area - const payload_field = try result_acc.getFieldByIndex(payload_field_idx); + const payload_rt = try self.runtime_types.fresh(); + const payload_field = try result_acc.getFieldByIndex(payload_field_idx, payload_rt); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -3528,13 +3597,15 @@ pub const Interpreter = struct { .ptr = outer_payload_ptr, .layout = err_payload_layout, .is_initialized = true, + .rt_var = err_payload_var.?, }; var err_acc = try err_inner.asRecord(&self.runtime_layout_store); // Set the tag to InvalidNumeral (index 0, assuming it's the first/only tag) // Use layout store's env for field lookup to match comptime_evaluator if (err_acc.findFieldIndex(layout_env.idents.tag)) |inner_tag_idx| { - const inner_tag_field = try err_acc.getFieldByIndex(inner_tag_idx); + const inner_tag_rt = try self.runtime_types.fresh(); + const inner_tag_field = try err_acc.getFieldByIndex(inner_tag_idx, inner_tag_rt); if (inner_tag_field.layout.tag == .scalar and inner_tag_field.layout.data.scalar.tag == .int) { var inner_tmp = inner_tag_field; inner_tmp.is_initialized = false; @@ -3544,7 +3615,8 @@ pub const Interpreter = struct { // Set the payload to the Str if (err_acc.findFieldIndex(layout_env.idents.payload)) |inner_payload_idx| { - const inner_payload_field = try err_acc.getFieldByIndex(inner_payload_idx); + const inner_payload_rt = try self.runtime_types.fresh(); + const inner_payload_field = try err_acc.getFieldByIndex(inner_payload_idx, inner_payload_rt); if (inner_payload_field.ptr) |str_ptr| { const str_dest: *RocStr = @ptrCast(@alignCast(str_ptr)); str_dest.* = roc_str; @@ -3568,14 +3640,15 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tuple) { // Tuple (payload, tag) - tag unions are now represented as tuples - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var result_acc = try dest.asTuple(&self.runtime_layout_store); // Element 0 is payload, Element 1 is tag discriminant // getElement takes original index directly // Write tag discriminant (element 1) - const tag_field = try result_acc.getElement(1); + const tag_elem_rt_var = try self.runtime_types.fresh(); + const tag_field = try result_acc.getElement(1, tag_elem_rt_var); // Tag field should be scalar int - if not, it's a compiler bug std.debug.assert(tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int); var tmp = tag_field; @@ -3584,7 +3657,8 @@ pub const Interpreter = struct { try tmp.setInt(@intCast(tag_idx)); // Clear payload area (element 0) - const payload_field = try result_acc.getElement(0); + const payload_elem_rt_var = try self.runtime_types.fresh(); + const payload_field = try result_acc.getElement(0, payload_elem_rt_var); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -3706,7 +3780,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tag_union) { // Tag union layout: payload at offset 0, discriminant at discriminant_offset - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); const tu_data = self.runtime_layout_store.getTagUnionData(result_layout.data.tag_union.idx); // Write tag discriminant at discriminant_offset @@ -3888,7 +3962,8 @@ pub const Interpreter = struct { const roc_dec: *const RocDec = @ptrCast(@alignCast(dec_arg.ptr.?)); const result_str = builtins.dec.to_str(roc_dec.*, roc_ops); - const value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(value.ptr.?)); roc_str_ptr.* = result_str; return value; @@ -4186,12 +4261,11 @@ pub const Interpreter = struct { /// Helper to create a simple boolean StackValue (for low-level builtins) fn makeBoolValue(self: *Interpreter, value: bool) !StackValue { const bool_layout = Layout.int(.u8); - var bool_value = try self.pushRaw(bool_layout, 0); + const bool_rt_var = try self.getCanonicalBoolRuntimeVar(); + var bool_value = try self.pushRaw(bool_layout, 0, bool_rt_var); bool_value.is_initialized = false; try bool_value.setInt(@intFromBool(value)); bool_value.is_initialized = true; - // Store the Bool runtime type variable for constant folding - bool_value.rt_var = try self.getCanonicalBoolRuntimeVar(); return bool_value; } @@ -4208,7 +4282,8 @@ pub const Interpreter = struct { var buf: [40]u8 = undefined; // 40 is enough for i128 const result = std.fmt.bufPrint(&buf, "{}", .{int_value}) catch unreachable; - const value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(value.ptr.?)); roc_str_ptr.* = RocStr.init(&buf, result.len, roc_ops); return value; @@ -4227,7 +4302,8 @@ pub const Interpreter = struct { var buf: [400]u8 = undefined; const result = std.fmt.bufPrint(&buf, "{d}", .{float_value}) catch unreachable; - const value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(value.ptr.?)); roc_str_ptr.* = RocStr.init(&buf, result.len, roc_ops); return value; @@ -4244,7 +4320,8 @@ pub const Interpreter = struct { const to_value: To = @intCast(from_value); const to_layout = Layout.int(comptime intTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4278,7 +4355,8 @@ pub const Interpreter = struct { @intCast(from_value); const to_layout = Layout.int(comptime intTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4329,7 +4407,7 @@ pub const Interpreter = struct { // Construct the result tag union if (result_layout.tag == .scalar) { // Simple tag with no payload (shouldn't happen for Try with payload) - var out = try self.pushRaw(result_layout, 0); + var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; const tag_idx: usize = if (in_range) ok_index orelse 0 else err_index orelse 1; try out.setInt(@intCast(tag_idx)); @@ -4337,14 +4415,15 @@ pub const Interpreter = struct { return out; } else if (result_layout.tag == .record) { // Record { tag, payload } - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); // Layout should guarantee tag and payload fields exist - if not, it's a compiler bug const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse unreachable; const payload_field_idx = acc.findFieldIndex(self.env.idents.payload) orelse unreachable; // Write tag discriminant - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const field_rt = try self.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(tag_field_idx, field_rt); // Tag field should be scalar int - if not, it's a compiler bug std.debug.assert(tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int); var tmp = tag_field; @@ -4353,7 +4432,8 @@ pub const Interpreter = struct { try tmp.setInt(@intCast(tag_idx)); // Clear payload area - const payload_field = try acc.getFieldByIndex(payload_field_idx); + const field_rt2 = try self.runtime_types.fresh(); + const payload_field = try acc.getFieldByIndex(payload_field_idx, field_rt2); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -4374,13 +4454,14 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tuple) { // Tuple (payload, tag) - tag unions are now represented as tuples - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var result_acc = try dest.asTuple(&self.runtime_layout_store); // Element 0 is payload, Element 1 is tag discriminant // Write tag discriminant (element 1) - const tag_field = try result_acc.getElement(1); + const tag_elem_rt_var = try self.runtime_types.fresh(); + const tag_field = try result_acc.getElement(1, tag_elem_rt_var); // Tag field should be scalar int - if not, it's a compiler bug std.debug.assert(tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int); var tmp = tag_field; @@ -4389,7 +4470,8 @@ pub const Interpreter = struct { try tmp.setInt(@intCast(tag_idx)); // Clear payload area (element 0) - const payload_field = try result_acc.getElement(0); + const payload_elem_rt_var = try self.runtime_types.fresh(); + const payload_field = try result_acc.getElement(0, payload_elem_rt_var); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -4410,7 +4492,7 @@ pub const Interpreter = struct { return dest; } else if (result_layout.tag == .tag_union) { // Tag union layout: payload at offset 0, discriminant at discriminant_offset - const dest = try self.pushRaw(result_layout, 0); + const dest = try self.pushRaw(result_layout, 0, result_rt_var); const tu_data = self.runtime_layout_store.getTagUnionData(result_layout.data.tag_union.idx); // Write tag discriminant at discriminant_offset @@ -4457,7 +4539,8 @@ pub const Interpreter = struct { const to_value: To = @floatFromInt(from_value); const to_layout = Layout.frac(comptime fracTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4475,7 +4558,8 @@ pub const Interpreter = struct { const dec_value = RocDec{ .num = @as(i128, from_value) * RocDec.one_point_zero_i128 }; const dec_layout = Layout.frac(.dec); - var out = try self.pushRaw(dec_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(dec_layout, 0, result_rt_var); out.is_initialized = false; @as(*RocDec, @ptrCast(@alignCast(out.ptr.?))).* = dec_value; out.is_initialized = true; @@ -4520,7 +4604,8 @@ pub const Interpreter = struct { const to_value: To = floatToIntSaturating(From, To, from_value); const to_layout = Layout.int(comptime intTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4560,7 +4645,8 @@ pub const Interpreter = struct { const to_value: To = @floatCast(from_value); const to_layout = Layout.frac(comptime fracTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4577,7 +4663,8 @@ pub const Interpreter = struct { const to_value: To = @floatCast(from_value); const to_layout = Layout.frac(comptime fracTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4618,7 +4705,8 @@ pub const Interpreter = struct { const to_value: To = std.math.cast(To, whole_part) orelse if (whole_part < 0) std.math.minInt(To) else std.math.maxInt(To); const to_layout = Layout.int(comptime intTypeFromZigType(To)); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*To, @ptrCast(@alignCast(out.ptr.?))).* = to_value; out.is_initialized = true; @@ -4679,7 +4767,8 @@ pub const Interpreter = struct { const f32_value: f32 = @floatCast(f64_value); const to_layout = Layout.frac(.f32); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*f32, @ptrCast(@alignCast(out.ptr.?))).* = f32_value; out.is_initialized = true; @@ -4715,7 +4804,8 @@ pub const Interpreter = struct { const f64_value = dec_value.toF64(); const to_layout = Layout.frac(.f64); - var out = try self.pushRaw(to_layout, 0); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRaw(to_layout, 0, result_rt_var); out.is_initialized = false; @as(*f64, @ptrCast(@alignCast(out.ptr.?))).* = f64_value; out.is_initialized = true; @@ -4733,7 +4823,8 @@ pub const Interpreter = struct { // For now, allocate raw bytes and set them directly // The tuple is (val_or_memory_garbage: Dec, success: Bool) const tuple_size: usize = 24; // 16 bytes Dec + padding + 1 byte bool - var out = try self.pushRawBytes(tuple_size, 16); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRawBytes(tuple_size, 16, result_rt_var); out.is_initialized = false; // Write Dec at offset 0 @@ -4754,7 +4845,8 @@ pub const Interpreter = struct { fn buildSuccessValRecordF32(self: *Interpreter, success: bool, val: f32) !StackValue { // Layout: tuple (F32, Bool) where element 0 is F32 (4 bytes) and element 1 is Bool (1 byte) const tuple_size: usize = 8; // 4 bytes F32 + padding + 1 byte bool - var out = try self.pushRawBytes(tuple_size, 4); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRawBytes(tuple_size, 4, result_rt_var); out.is_initialized = false; // Write F32 at offset 0 @@ -4778,7 +4870,8 @@ pub const Interpreter = struct { const tuple_size: usize = val_size + 2; // val + 2 bools const padded_size = (tuple_size + val_align - 1) / val_align * val_align; - var out = try self.pushRawBytes(padded_size, val_align); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRawBytes(padded_size, val_align, result_rt_var); out.is_initialized = false; // Write val at offset 0 @@ -4801,7 +4894,8 @@ pub const Interpreter = struct { fn buildIsIntValRecord(self: *Interpreter, is_int: bool, val: i128) !StackValue { // Layout: tuple (I128, Bool) const tuple_size: usize = 24; // 16 bytes I128 + padding + 1 byte bool - var out = try self.pushRawBytes(tuple_size, 16); + const result_rt_var = try self.runtime_types.fresh(); + var out = try self.pushRawBytes(tuple_size, 16, result_rt_var); out.is_initialized = false; // Write I128 at offset 0 @@ -4911,7 +5005,7 @@ pub const Interpreter = struct { const result_layout = try self.getRuntimeLayout(result_rt_var); const tag_indices = try self.getTryTagIndices(result_rt_var); - return self.buildTryResultWithValue(T, result_layout, tag_indices.ok, tag_indices.err, success, parsed orelse 0); + return self.buildTryResultWithValue(T, result_layout, tag_indices.ok, tag_indices.err, success, parsed orelse 0, result_rt_var); } /// Helper for parsing float from string (Str -> Try(T, [BadNumStr])) @@ -4925,7 +5019,7 @@ pub const Interpreter = struct { const result_layout = try self.getRuntimeLayout(result_rt_var); const tag_indices = try self.getTryTagIndices(result_rt_var); - return self.buildTryResultWithValue(T, result_layout, tag_indices.ok, tag_indices.err, success, parsed orelse 0); + return self.buildTryResultWithValue(T, result_layout, tag_indices.ok, tag_indices.err, success, parsed orelse 0, result_rt_var); } /// Helper for parsing Dec from string (Str -> Try(Dec, [BadNumStr])) @@ -4939,7 +5033,7 @@ pub const Interpreter = struct { // Dec is stored as i128 internally const dec_val: i128 = if (parsed) |dec| dec.num else 0; - return self.buildTryResultWithValue(i128, result_layout, tag_indices.ok, tag_indices.err, success, dec_val); + return self.buildTryResultWithValue(i128, result_layout, tag_indices.ok, tag_indices.err, success, dec_val, result_rt_var); } /// Build a Try result with a value payload @@ -4951,24 +5045,27 @@ pub const Interpreter = struct { err_index: ?usize, success: bool, value: T, + result_rt_var: types.Var, ) !StackValue { const tag_idx: usize = if (success) ok_index orelse 0 else err_index orelse 1; if (result_layout.tag == .record) { - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var result_acc = try dest.asRecord(&self.runtime_layout_store); const layout_env = self.runtime_layout_store.env; const tag_field_idx = result_acc.findFieldIndex(layout_env.idents.tag) orelse unreachable; const payload_field_idx = result_acc.findFieldIndex(layout_env.idents.payload) orelse unreachable; // Write tag discriminant - const tag_field = try result_acc.getFieldByIndex(tag_field_idx); + const field_rt = try self.runtime_types.fresh(); + const tag_field = try result_acc.getFieldByIndex(tag_field_idx, field_rt); var tmp = tag_field; tmp.is_initialized = false; try tmp.setInt(@intCast(tag_idx)); // Clear and write payload - const payload_field = try result_acc.getFieldByIndex(payload_field_idx); + const field_rt2 = try self.runtime_types.fresh(); + const payload_field = try result_acc.getFieldByIndex(payload_field_idx, field_rt2); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -4980,17 +5077,19 @@ pub const Interpreter = struct { } return dest; } else if (result_layout.tag == .tuple) { - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); var result_acc = try dest.asTuple(&self.runtime_layout_store); // Write tag discriminant (element 1) - const tag_field = try result_acc.getElement(1); + const tag_elem_rt_var = try self.runtime_types.fresh(); + const tag_field = try result_acc.getElement(1, tag_elem_rt_var); var tmp = tag_field; tmp.is_initialized = false; try tmp.setInt(@intCast(tag_idx)); // Clear and write payload (element 0) - const payload_field = try result_acc.getElement(0); + const payload_elem_rt_var = try self.runtime_types.fresh(); + const payload_field = try result_acc.getElement(0, payload_elem_rt_var); if (payload_field.ptr) |payload_ptr| { const payload_bytes_len = self.runtime_layout_store.layoutSize(payload_field.layout); if (payload_bytes_len > 0) { @@ -5002,7 +5101,7 @@ pub const Interpreter = struct { } return dest; } else if (result_layout.tag == .tag_union) { - var dest = try self.pushRaw(result_layout, 0); + var dest = try self.pushRaw(result_layout, 0, result_rt_var); const tu_data = self.runtime_layout_store.getTagUnionData(result_layout.data.tag_union.idx); const base_ptr: [*]u8 = @ptrCast(dest.ptr.?); @@ -5079,7 +5178,8 @@ pub const Interpreter = struct { // For expression: push empty record {} as result const empty_record_layout_idx = try self.runtime_layout_store.ensureEmptyRecordLayout(); const empty_record_layout = self.runtime_layout_store.getLayout(empty_record_layout_idx); - const empty_record_value = try self.pushRaw(empty_record_layout, 0); + const empty_record_rt_var = try self.runtime_types.fresh(); + const empty_record_value = try self.pushRaw(empty_record_layout, 0, empty_record_rt_var); try value_stack.push(empty_record_value); } } @@ -5201,7 +5301,7 @@ pub const Interpreter = struct { fn evalDecBinop( self: *Interpreter, op: can.CIR.Expr.Binop.Op, - result_layout: Layout, + _: Layout, // Ignored - we always use Dec layout for proper alignment lhs: StackValue, rhs: StackValue, roc_ops: *RocOps, @@ -5224,7 +5324,9 @@ pub const Interpreter = struct { else => @panic("evalDecBinop: unhandled decimal operation"), }; - var out = try self.pushRaw(result_layout, 0); + // Use proper Dec layout to ensure 16-byte alignment for RocDec + const dec_layout = Layout.frac(.dec); + var out = try self.pushRaw(dec_layout, 0, lhs.rt_var); out.is_initialized = true; if (out.ptr) |ptr| { const dest: *RocDec = @ptrCast(@alignCast(ptr)); @@ -5233,6 +5335,165 @@ pub const Interpreter = struct { return out; } + /// Evaluate a binary operation on numeric values (int, f32, f64, or dec) + /// This function dispatches to the appropriate type-specific operation. + fn evalNumericBinop( + self: *Interpreter, + op: can.CIR.Expr.Binop.Op, + lhs: StackValue, + rhs: StackValue, + roc_ops: *RocOps, + ) !StackValue { + const lhs_val = try self.extractNumericValue(lhs); + const rhs_val = try self.extractNumericValue(rhs); + const result_layout = lhs.layout; + + var out = try self.pushRaw(result_layout, 0, lhs.rt_var); + out.is_initialized = false; + + switch (op) { + .add => switch (lhs_val) { + .int => |l| switch (rhs_val) { + .int => |r| try out.setInt(l + r), + .dec => |r| try out.setInt(l + @divTrunc(r.num, RocDec.one_point_zero_i128)), + else => return error.TypeMismatch, + }, + .f32 => |l| switch (rhs_val) { + .f32 => |r| out.setF32(l + r), + else => return error.TypeMismatch, + }, + .f64 => |l| switch (rhs_val) { + .f64 => |r| out.setF64(l + r), + else => return error.TypeMismatch, + }, + .dec => |l| switch (rhs_val) { + .dec => |r| out.setDec(RocDec.add(l, r, roc_ops)), + .int => |r| out.setDec(RocDec.add(l, RocDec{ .num = @as(i128, r) * RocDec.one_point_zero_i128 }, roc_ops)), + else => return error.TypeMismatch, + }, + }, + .sub => switch (lhs_val) { + .int => |l| switch (rhs_val) { + .int => |r| try out.setInt(l - r), + .dec => |r| try out.setInt(l - @divTrunc(r.num, RocDec.one_point_zero_i128)), + else => return error.TypeMismatch, + }, + .f32 => |l| switch (rhs_val) { + .f32 => |r| out.setF32(l - r), + else => return error.TypeMismatch, + }, + .f64 => |l| switch (rhs_val) { + .f64 => |r| out.setF64(l - r), + else => return error.TypeMismatch, + }, + .dec => |l| switch (rhs_val) { + .dec => |r| out.setDec(RocDec.sub(l, r, roc_ops)), + .int => |r| out.setDec(RocDec.sub(l, RocDec{ .num = @as(i128, r) * RocDec.one_point_zero_i128 }, roc_ops)), + else => return error.TypeMismatch, + }, + }, + .mul => switch (lhs_val) { + .int => |l| switch (rhs_val) { + .int => |r| try out.setInt(l * r), + .dec => |r| try out.setInt(l * @divTrunc(r.num, RocDec.one_point_zero_i128)), + else => return error.TypeMismatch, + }, + .f32 => |l| switch (rhs_val) { + .f32 => |r| out.setF32(l * r), + else => return error.TypeMismatch, + }, + .f64 => |l| switch (rhs_val) { + .f64 => |r| out.setF64(l * r), + else => return error.TypeMismatch, + }, + .dec => |l| switch (rhs_val) { + .dec => |r| out.setDec(RocDec.mul(l, r, roc_ops)), + .int => |r| out.setDec(RocDec.mul(l, RocDec{ .num = @as(i128, r) * RocDec.one_point_zero_i128 }, roc_ops)), + else => return error.TypeMismatch, + }, + }, + .div, .div_trunc => switch (lhs_val) { + .int => |l| switch (rhs_val) { + .int => |r| { + if (r == 0) return error.DivisionByZero; + try out.setInt(@divTrunc(l, r)); + }, + else => return error.TypeMismatch, + }, + .f32 => |l| switch (rhs_val) { + .f32 => |r| { + if (r == 0) return error.DivisionByZero; + if (op == .div_trunc) { + out.setF32(std.math.trunc(l / r)); + } else { + out.setF32(l / r); + } + }, + else => return error.TypeMismatch, + }, + .f64 => |l| switch (rhs_val) { + .f64 => |r| { + if (r == 0) return error.DivisionByZero; + if (op == .div_trunc) { + out.setF64(std.math.trunc(l / r)); + } else { + out.setF64(l / r); + } + }, + else => return error.TypeMismatch, + }, + .dec => |l| switch (rhs_val) { + .dec => |r| { + if (r.num == 0) return error.DivisionByZero; + out.setDec(RocDec.div(l, r, roc_ops)); + }, + .int => |r| { + if (r == 0) return error.DivisionByZero; + out.setDec(RocDec.div(l, RocDec{ .num = @as(i128, r) * RocDec.one_point_zero_i128 }, roc_ops)); + }, + else => return error.TypeMismatch, + }, + }, + .rem => switch (lhs_val) { + .int => |l| switch (rhs_val) { + .int => |r| { + if (r == 0) return error.DivisionByZero; + try out.setInt(@rem(l, r)); + }, + else => return error.TypeMismatch, + }, + .f32 => |l| switch (rhs_val) { + .f32 => |r| { + if (r == 0) return error.DivisionByZero; + out.setF32(@rem(l, r)); + }, + else => return error.TypeMismatch, + }, + .f64 => |l| switch (rhs_val) { + .f64 => |r| { + if (r == 0) return error.DivisionByZero; + out.setF64(@rem(l, r)); + }, + else => return error.TypeMismatch, + }, + .dec => |l| switch (rhs_val) { + .dec => |r| { + if (r.num == 0) return error.DivisionByZero; + out.setDec(RocDec.rem(l, r, roc_ops)); + }, + .int => |r| { + if (r == 0) return error.DivisionByZero; + out.setDec(RocDec.rem(l, RocDec{ .num = @as(i128, r) * RocDec.one_point_zero_i128 }, roc_ops)); + }, + else => return error.TypeMismatch, + }, + }, + else => return error.TypeMismatch, + } + out.is_initialized = true; + return out; + } + fn evalFloatBinop( self: *Interpreter, comptime FloatT: type, @@ -5574,9 +5835,10 @@ pub const Interpreter = struct { var index: usize = 0; while (index < elem_vars.len) : (index += 1) { // getElement expects original index and converts to sorted internally - const lhs_elem = try lhs_acc.getElement(index); - const rhs_elem = try rhs_acc.getElement(index); - const elems_equal = try self.valuesStructurallyEqual(lhs_elem, elem_vars[index], rhs_elem, elem_vars[index], roc_ops); + const elem_rt_var = elem_vars[index]; + const lhs_elem = try lhs_acc.getElement(index, elem_rt_var); + const rhs_elem = try rhs_acc.getElement(index, elem_rt_var); + const elems_equal = try self.valuesStructurallyEqual(lhs_elem, elem_rt_var, rhs_elem, elem_rt_var, roc_ops); if (!elems_equal) { return false; } @@ -5628,9 +5890,9 @@ pub const Interpreter = struct { var idx: usize = 0; while (idx < field_count) : (idx += 1) { - const lhs_field = try lhs_rec.getFieldByIndex(idx); - const rhs_field = try rhs_rec.getFieldByIndex(idx); const field_var = field_slice.items(.var_)[idx]; + const lhs_field = try lhs_rec.getFieldByIndex(idx, field_var); + const rhs_field = try rhs_rec.getFieldByIndex(idx, field_var); const fields_equal = try self.valuesStructurallyEqual(lhs_field, field_var, rhs_field, field_var, roc_ops); if (!fields_equal) { return false; @@ -5721,9 +5983,10 @@ pub const Interpreter = struct { var idx: usize = 0; while (idx < arg_vars.len) : (idx += 1) { // getElement expects original index and converts to sorted internally - const lhs_elem = try lhs_tuple.getElement(idx); - const rhs_elem = try rhs_tuple.getElement(idx); - const args_equal = try self.valuesStructurallyEqual(lhs_elem, arg_vars[idx], rhs_elem, arg_vars[idx], roc_ops); + const arg_rt_var = arg_vars[idx]; + const lhs_elem = try lhs_tuple.getElement(idx, arg_rt_var); + const rhs_elem = try rhs_tuple.getElement(idx, arg_rt_var); + const args_equal = try self.valuesStructurallyEqual(lhs_elem, arg_rt_var, rhs_elem, arg_rt_var, roc_ops); if (!args_equal) { return false; } @@ -5864,6 +6127,47 @@ pub const Interpreter = struct { return backing_rt_var; } + pub fn getCanonicalStrRuntimeVar(self: *Interpreter) !types.Var { + if (self.canonical_str_rt_var) |cached| return cached; + // Use the dynamic str_stmt index (from the Str module) + const str_decl_idx = self.builtins.str_stmt; + + // Get the statement from the Str module + const str_stmt = self.builtins.str_env.store.getStatement(str_decl_idx); + + // For nominal type declarations, we need to get the backing type, not the nominal wrapper + const ct_var = switch (str_stmt) { + .s_nominal_decl => blk: { + // The type of the declaration is the nominal type, but we want its backing + const nom_var = can.ModuleEnv.varFrom(str_decl_idx); + const nom_resolved = self.builtins.str_env.types.resolveVar(nom_var); + if (nom_resolved.desc.content == .structure) { + if (nom_resolved.desc.content.structure == .nominal_type) { + const nt = nom_resolved.desc.content.structure.nominal_type; + const backing_var = self.builtins.str_env.types.getNominalBackingVar(nt); + break :blk backing_var; + } + } + break :blk nom_var; + }, + else => can.ModuleEnv.varFrom(str_decl_idx), + }; + + // Use str_env to translate since str_stmt is from the Str module + // Cast away const - translateTypeVar doesn't actually mutate the module + const nominal_rt_var = try self.translateTypeVar(@constCast(self.builtins.str_env), ct_var); + const nominal_resolved = self.runtime_types.resolveVar(nominal_rt_var); + const backing_rt_var = switch (nominal_resolved.desc.content) { + .structure => |st| switch (st) { + .nominal_type => |nt| self.runtime_types.getNominalBackingVar(nt), + else => nominal_rt_var, + }, + else => nominal_rt_var, + }; + self.canonical_str_rt_var = backing_rt_var; + return backing_rt_var; + } + fn resolveBaseVar(self: *Interpreter, runtime_var: types.Var) types.store.ResolvedVarDesc { var current = self.runtime_types.resolveVar(runtime_var); var guard = types.debug.IterationGuard.init("resolveBaseVar"); @@ -6005,16 +6309,18 @@ pub const Interpreter = struct { .record => { var acc = try value.asRecord(&self.runtime_layout_store); const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse return error.TypeMismatch; - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const disc_rt_var = try self.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(tag_field_idx, disc_rt_var); var tag_index: usize = undefined; if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { - var tmp = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true }; + var tmp = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = tag_field.rt_var }; tag_index = @intCast(tmp.asI128()); } else return error.TypeMismatch; var payload_value: ?StackValue = null; if (acc.findFieldIndex(self.env.idents.payload)) |payload_idx| { - payload_value = try acc.getFieldByIndex(payload_idx); + const payload_rt_var = try self.runtime_types.fresh(); + payload_value = try acc.getFieldByIndex(payload_idx, payload_rt_var); if (payload_value) |field_value| { var tag_list = std.array_list.AlignedManaged(types.Tag, null).init(self.allocator); defer tag_list.deinit(); @@ -6046,6 +6352,7 @@ pub const Interpreter = struct { .layout = effective_layout, .ptr = field_value.ptr, .is_initialized = field_value.is_initialized, + .rt_var = field_value.rt_var, }; } else { // For multiple args, use the layout from the stored field @@ -6053,6 +6360,7 @@ pub const Interpreter = struct { .layout = field_value.layout, .ptr = field_value.ptr, .is_initialized = field_value.is_initialized, + .rt_var = field_value.rt_var, }; } } @@ -6064,17 +6372,30 @@ pub const Interpreter = struct { // Tag unions are now represented as tuples (payload, tag) var acc = try value.asTuple(&self.runtime_layout_store); + // Get tuple element rt_vars if available from value's type + const tuple_elem_vars: ?[]const types.Var = blk: { + const resolved = self.runtime_types.resolveVar(value.rt_var); + if (resolved.desc.content == .structure) { + if (resolved.desc.content.structure == .tuple) { + break :blk self.runtime_types.sliceVars(resolved.desc.content.structure.tuple.elems); + } + } + break :blk null; + }; + // Element 1 is the tag discriminant - getElement takes original index directly - const tag_field = try acc.getElement(1); + const discrim_rt_var = if (tuple_elem_vars) |vars| (if (vars.len > 1) vars[1] else value.rt_var) else value.rt_var; + const tag_field = try acc.getElement(1, discrim_rt_var); var tag_index: usize = undefined; if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { - var tmp = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true }; + var tmp = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = tag_field.rt_var }; tag_index = @intCast(tmp.asI128()); } else return error.TypeMismatch; // Element 0 is the payload - getElement takes original index directly var payload_value: ?StackValue = null; - const payload_field = acc.getElement(0) catch null; + const payload_rt_var = if (tuple_elem_vars) |vars| (if (vars.len > 0) vars[0] else value.rt_var) else value.rt_var; + const payload_field = acc.getElement(0, payload_rt_var) catch null; if (payload_field) |field_value| { var tag_list = std.array_list.AlignedManaged(types.Tag, null).init(self.allocator); defer tag_list.deinit(); @@ -6106,6 +6427,7 @@ pub const Interpreter = struct { .layout = effective_layout, .ptr = field_value.ptr, .is_initialized = field_value.is_initialized, + .rt_var = arg_var, }; } else { // For multiple args, use the layout from the stored field @@ -6114,6 +6436,7 @@ pub const Interpreter = struct { .layout = field_value.layout, .ptr = field_value.ptr, .is_initialized = field_value.is_initialized, + .rt_var = field_value.rt_var, }; } } @@ -6151,14 +6474,18 @@ pub const Interpreter = struct { .layout = effective_layout, .ptr = value.ptr, // Payload is at offset 0 .is_initialized = true, + .rt_var = arg_var, }; } else { // Multiple args: the payload is a tuple at offset 0 const variant_layout = acc.getVariantLayout(tag_index); + // For multiple args, we need a tuple type - use value's rt_var as fallback + // since the exact tuple type construction is complex payload_value = StackValue{ .layout = variant_layout, .ptr = value.ptr, .is_initialized = true, + .rt_var = value.rt_var, }; } @@ -6330,6 +6657,7 @@ pub const Interpreter = struct { source: RocList, start: usize, count: usize, + rt_var: types.Var, ) !StackValue { // Apply layout correction if needed. // This handles cases where the type system's layout doesn't match the actual @@ -6347,7 +6675,7 @@ pub const Interpreter = struct { } } else list_layout; - const dest = try self.pushRaw(actual_list_layout, 0); + const dest = try self.pushRaw(actual_list_layout, 0, rt_var); if (dest.ptr == null) return dest; const header: *RocList = @ptrCast(@alignCast(dest.ptr.?)); @@ -6447,12 +6775,8 @@ pub const Interpreter = struct { switch (pat) { .assign => |_| { // Bind entire value to this pattern - var copied = try self.pushCopy(value, roc_ops); - // If the value doesn't have an rt_var (e.g., list elements from pattern matching), - // use the pattern's type. Otherwise preserve the value's original type. - if (copied.rt_var == null) { - copied.rt_var = value_rt_var; - } + const copied = try self.pushCopy(value, roc_ops); + // pushCopy preserves rt_var from value try out_binds.append(.{ .pattern_idx = pattern_idx, .value = copied, .expr_idx = expr_idx, .source_env = self.env }); return true; }, @@ -6463,11 +6787,7 @@ pub const Interpreter = struct { return false; } - var alias_value = try self.pushCopy(value, roc_ops); - // If the value doesn't have an rt_var, use the pattern's type - if (alias_value.rt_var == null) { - alias_value.rt_var = value_rt_var; - } + const alias_value = try self.pushCopy(value, roc_ops); try out_binds.append(.{ .pattern_idx = pattern_idx, .value = alias_value, .expr_idx = expr_idx, .source_env = self.env }); return true; }, @@ -6520,7 +6840,7 @@ pub const Interpreter = struct { while (idx < pat_ids.len) : (idx += 1) { if (idx >= accessor.getElementCount()) return false; // getElement expects original index and converts to sorted internally - const elem_value = try accessor.getElement(idx); + const elem_value = try accessor.getElement(idx, elem_vars[idx]); const before = out_binds.items.len; const matched = try self.patternMatchesBind(pat_ids[idx], elem_value, elem_vars[idx], roc_ops, out_binds, expr_idx); if (!matched) { @@ -6601,7 +6921,7 @@ pub const Interpreter = struct { var idx: usize = 0; while (idx < prefix_len) : (idx += 1) { - const elem_value = try accessor.getElement(idx); + const elem_value = try accessor.getElement(idx, elem_rt_var); const before = out_binds.items.len; const matched = try self.patternMatchesBind(non_rest_patterns[idx], elem_value, elem_rt_var, roc_ops, out_binds, expr_idx); if (!matched) { @@ -6614,7 +6934,7 @@ pub const Interpreter = struct { while (suffix_idx < suffix_len) : (suffix_idx += 1) { const suffix_pattern_idx = non_rest_patterns[prefix_len + suffix_idx]; const element_idx = total_len - suffix_len + suffix_idx; - const elem_value = try accessor.getElement(element_idx); + const elem_value = try accessor.getElement(element_idx, elem_rt_var); const before = out_binds.items.len; const matched = try self.patternMatchesBind(suffix_pattern_idx, elem_value, elem_rt_var, roc_ops, out_binds, expr_idx); if (!matched) { @@ -6625,7 +6945,7 @@ pub const Interpreter = struct { if (rest_info.pattern) |rest_pat_idx| { const rest_len = total_len - prefix_len - suffix_len; - const rest_value = try self.makeListSliceValue(list_layout, elem_layout, accessor.list, prefix_len, rest_len); + const rest_value = try self.makeListSliceValue(list_layout, elem_layout, accessor.list, prefix_len, rest_len, value_rt_var); defer rest_value.decref(&self.runtime_layout_store, roc_ops); const before = out_binds.items.len; if (!try self.patternMatchesBind(rest_pat_idx, rest_value, value_rt_var, roc_ops, out_binds, expr_idx)) { @@ -6639,7 +6959,7 @@ pub const Interpreter = struct { if (total_len != non_rest_patterns.len) return false; var idx: usize = 0; while (idx < non_rest_patterns.len) : (idx += 1) { - const elem_value = try accessor.getElement(idx); + const elem_value = try accessor.getElement(idx, elem_rt_var); const before = out_binds.items.len; const matched = try self.patternMatchesBind(non_rest_patterns[idx], elem_value, elem_rt_var, roc_ops, out_binds, expr_idx); if (!matched) { @@ -6666,9 +6986,9 @@ pub const Interpreter = struct { const destruct = self.env.store.getRecordDestruct(destruct_idx); const field_index = accessor.findFieldIndex(destruct.label) orelse return false; - const field_value = try accessor.getFieldByIndex(field_index); const field_ct_var = can.ModuleEnv.varFrom(destruct_idx); const field_var = try self.translateTypeVar(self.env, field_ct_var); + const field_value = try accessor.getFieldByIndex(field_index, field_var); const inner_pattern_idx = switch (destruct.kind) { .Required => |p_idx| p_idx, @@ -6692,16 +7012,14 @@ pub const Interpreter = struct { defer tag_list.deinit(); try self.appendUnionTags(value_rt_var, &tag_list); - // Build tag list from value's original rt_var if available. + // Build tag list from value's original rt_var. // This is critical when a value was created with a narrower type (e.g., [Ok]) // and is later matched against a wider type (e.g., Try = [Err, Ok]). // The discriminant stored in the value is based on the original type's ordering, // so we need the original type's tag list to translate it to a tag name. var value_tag_list = std.array_list.AlignedManaged(types.Tag, null).init(self.allocator); defer value_tag_list.deinit(); - if (value.rt_var) |orig_rt_var| { - try self.appendUnionTags(orig_rt_var, &value_tag_list); - } + try self.appendUnionTags(value.rt_var, &value_tag_list); const tag_data = try self.extractTagValue(value, value_rt_var); @@ -6774,7 +7092,7 @@ pub const Interpreter = struct { return false; } // getElement expects original index and converts to sorted internally - const elem_val = try payload_tuple.getElement(j); + const elem_val = try payload_tuple.getElement(j, arg_vars[j]); if (!try self.patternMatchesBind(arg_patterns[j], elem_val, arg_vars[j], roc_ops, out_binds, expr_idx)) { self.trimBindingList(out_binds, start_len, roc_ops); return false; @@ -8468,6 +8786,8 @@ pub const Interpreter = struct { elem_size: usize, /// Element layout elem_layout: layout.Layout, + /// Element runtime type variable + elem_rt_var: types.Var, }; pub const AndShortCircuit = struct { @@ -9205,7 +9525,8 @@ pub const Interpreter = struct { const segments = self.env.store.sliceExpr(str_expr.span); if (segments.len == 0) { // Empty string - return immediately - const value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const value = try self.pushStr(str_rt_var); const roc_str: *RocStr = @ptrCast(@alignCast(value.ptr.?)); roc_str.* = RocStr.empty(); try value_stack.push(value); @@ -9586,7 +9907,11 @@ pub const Interpreter = struct { // Compute tuple layout with no elements const tuple_layout_idx = try self.runtime_layout_store.putTuple(&[0]Layout{}); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - const value = try self.pushRaw(tuple_layout, 0); + const tuple_rt_var = expected_rt_var orelse blk: { + const ct_var = can.ModuleEnv.varFrom(expr_idx); + break :blk try self.translateTypeVar(self.env, ct_var); + }; + const value = try self.pushRaw(tuple_layout, 0, tuple_rt_var); try value_stack.push(value); } else { // Schedule collection of elements @@ -9614,12 +9939,11 @@ pub const Interpreter = struct { if (elems.len == 0) { // Empty list - create immediately const list_layout = try self.getRuntimeLayout(list_rt_var); - var dest = try self.pushRaw(list_layout, 0); + const dest = try self.pushRaw(list_layout, 0, list_rt_var); if (dest.ptr != null) { const header: *RocList = @ptrCast(@alignCast(dest.ptr.?)); header.* = RocList.empty(); } - dest.rt_var = list_rt_var; try value_stack.push(dest); } else { // Get element type variable from first element @@ -9665,7 +9989,7 @@ pub const Interpreter = struct { } else if (fields.len == 0) { // Empty record with no extension - create immediately const rec_layout = try self.getRuntimeLayout(rt_var); - const dest = try self.pushRaw(rec_layout, 0); + const dest = try self.pushRaw(rec_layout, 0, rt_var); try value_stack.push(dest); } else { // Non-empty record without extension @@ -9893,12 +10217,11 @@ pub const Interpreter = struct { if (layout_val.tag == .scalar) { // No payload union - just set discriminant - var out = try self.pushRaw(layout_val, 0); + var out = try self.pushRaw(layout_val, 0, rt_var); if (layout_val.data.scalar.tag == .int) { out.is_initialized = false; try out.setInt(@intCast(tag_index)); out.is_initialized = true; - out.rt_var = rt_var; try value_stack.push(out); } else { self.triggerCrash("e_tag: scalar layout is not int", false, roc_ops); @@ -10315,7 +10638,7 @@ pub const Interpreter = struct { layout_val = layout.Layout.frac(types.Frac.Precision.dec); } - var value = try self.pushRaw(layout_val, 0); + var value = try self.pushRaw(layout_val, 0, rt_var); value.is_initialized = false; switch (layout_val.tag) { .scalar => switch (layout_val.data.scalar.tag) { @@ -10364,7 +10687,7 @@ pub const Interpreter = struct { break :blk try self.translateTypeVar(self.env, ct_var); }; const layout_val = try self.getRuntimeLayout(rt_var); - const value = try self.pushRaw(layout_val, 0); + const value = try self.pushRaw(layout_val, 0, rt_var); if (value.ptr) |ptr| { const typed_ptr: *f32 = @ptrCast(@alignCast(ptr)); typed_ptr.* = lit.value; @@ -10384,7 +10707,7 @@ pub const Interpreter = struct { break :blk try self.translateTypeVar(self.env, ct_var); }; const layout_val = try self.getRuntimeLayout(rt_var); - const value = try self.pushRaw(layout_val, 0); + const value = try self.pushRaw(layout_val, 0, rt_var); if (value.ptr) |ptr| { const typed_ptr: *f64 = @ptrCast(@alignCast(ptr)); typed_ptr.* = lit.value; @@ -10404,7 +10727,7 @@ pub const Interpreter = struct { break :blk try self.translateTypeVar(self.env, ct_var); }; const layout_val = try self.getRuntimeLayout(rt_var); - const value = try self.pushRaw(layout_val, 0); + const value = try self.pushRaw(layout_val, 0, rt_var); if (value.ptr) |ptr| { const typed_ptr: *RocDec = @ptrCast(@alignCast(ptr)); typed_ptr.* = dec_lit.value; @@ -10431,7 +10754,7 @@ pub const Interpreter = struct { layout_val.data.scalar.tag == .frac and layout_val.data.scalar.data.frac == .dec); - const value = try self.pushRaw(layout_val, 0); + const value = try self.pushRaw(layout_val, 0, rt_var); if (value.ptr) |ptr| { const typed_ptr: *RocDec = @ptrCast(@alignCast(ptr)); const scale_factor = std.math.pow(i128, 10, RocDec.decimal_places - small.value.denominator_power_of_ten); @@ -10448,7 +10771,8 @@ pub const Interpreter = struct { _: *RocOps, ) Error!StackValue { const content = self.env.getString(seg.literal); - const value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const value = try self.pushStr(str_rt_var); const roc_str: *RocStr = @ptrCast(@alignCast(value.ptr.?)); // Use arena allocator for string literals - freed wholesale at interpreter deinit roc_str.* = try self.createConstantStr(content); @@ -10466,7 +10790,7 @@ pub const Interpreter = struct { break :blk try self.translateTypeVar(self.env, ct_var); }; const rec_layout = try self.getRuntimeLayout(rt_var); - return try self.pushRaw(rec_layout, 0); + return try self.pushRaw(rec_layout, 0, rt_var); } /// Evaluate an empty list literal (e_empty_list) @@ -10545,7 +10869,7 @@ pub const Interpreter = struct { break :blk Layout{ .tag = .list, .data = .{ .list = elem_layout_idx } }; }; - const dest = try self.pushRaw(list_layout, 0); + const dest = try self.pushRaw(list_layout, 0, final_rt_var); if (dest.ptr) |ptr| { const header: *RocList = @ptrCast(@alignCast(ptr)); header.* = RocList.empty(); @@ -10586,25 +10910,45 @@ pub const Interpreter = struct { // Handle different layout representations if (layout_val.tag == .scalar) { - var out = try self.pushRaw(layout_val, 0); + var out = try self.pushRaw(layout_val, 0, rt_var); if (layout_val.data.scalar.tag == .int) { out.is_initialized = false; try out.setInt(@intCast(tag_index)); out.is_initialized = true; - out.rt_var = rt_var; return out; } self.triggerCrash("e_zero_argument_tag: scalar layout is not int", false, roc_ops); return error.Crash; } else if (layout_val.tag == .record) { // Record { tag: Discriminant, payload: ZST } - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); const tag_idx = acc.findFieldIndex(self.env.idents.tag) orelse { self.triggerCrash("e_zero_argument_tag: tag field not found", false, roc_ops); return error.Crash; }; - const tag_field = try acc.getFieldByIndex(tag_idx); + // Get rt_var for the tag field from the record type + const record_resolved = self.runtime_types.resolveVar(rt_var); + const tag_rt_var = blk: { + if (record_resolved.desc.content == .structure) { + const flat = record_resolved.desc.content.structure; + const fields_range = switch (flat) { + .record => |rec| rec.fields, + .record_unbound => |fields| fields, + else => break :blk try self.runtime_types.fresh(), + }; + const fields = self.runtime_types.getRecordFieldsSlice(fields_range); + var i: usize = 0; + while (i < fields.len) : (i += 1) { + const f = fields.get(i); + if (f.name == self.env.idents.tag) { + break :blk f.var_; + } + } + } + break :blk try self.runtime_types.fresh(); + }; + const tag_field = try acc.getFieldByIndex(tag_idx, tag_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -10613,14 +10957,18 @@ pub const Interpreter = struct { self.triggerCrash("e_zero_argument_tag: record tag field is not scalar int", false, roc_ops); return error.Crash; } - dest.rt_var = rt_var; return dest; } else if (layout_val.tag == .tuple) { // Tuple (payload, tag) - tag unions are now represented as tuples - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, rt_var); var acc = try dest.asTuple(&self.runtime_layout_store); - // Element 1 is the tag discriminant - const tag_field = try acc.getElement(1); + // Element 1 is the tag discriminant - get its rt_var from the tuple type + const tuple_resolved = self.runtime_types.resolveVar(rt_var); + const elem_rt_var = if (tuple_resolved.desc.content == .structure and tuple_resolved.desc.content.structure == .tuple) blk: { + const elem_vars = self.runtime_types.sliceVars(tuple_resolved.desc.content.structure.tuple.elems); + break :blk if (elem_vars.len > 1) elem_vars[1] else rt_var; + } else rt_var; + const tag_field = try acc.getElement(1, elem_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -10629,7 +10977,6 @@ pub const Interpreter = struct { self.triggerCrash("e_zero_argument_tag: tuple tag field is not scalar int", false, roc_ops); return error.Crash; } - dest.rt_var = rt_var; return dest; } self.triggerCrash("e_zero_argument_tag: unexpected layout type", false, roc_ops); @@ -10645,33 +10992,58 @@ pub const Interpreter = struct { roc_ops: *RocOps, ) Error!StackValue { if (layout_val.tag == .record) { - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse { self.triggerCrash("e_tag: tag field not found", false, roc_ops); return error.Crash; }; - const tag_field = try acc.getFieldByIndex(tag_field_idx); + // Get rt_var for the tag field from the record type + const record_resolved = self.runtime_types.resolveVar(rt_var); + const tag_rt_var = blk: { + if (record_resolved.desc.content == .structure) { + const flat = record_resolved.desc.content.structure; + const fields_range = switch (flat) { + .record => |rec| rec.fields, + .record_unbound => |fields| fields, + else => break :blk try self.runtime_types.fresh(), + }; + const fields = self.runtime_types.getRecordFieldsSlice(fields_range); + var i: usize = 0; + while (i < fields.len) : (i += 1) { + const f = fields.get(i); + if (f.name == self.env.idents.tag) { + break :blk f.var_; + } + } + } + break :blk try self.runtime_types.fresh(); + }; + const tag_field = try acc.getFieldByIndex(tag_field_idx, tag_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; try tmp.setInt(@intCast(tag_index)); } - dest.rt_var = rt_var; return dest; } else if (layout_val.tag == .tuple) { - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, rt_var); var acc = try dest.asTuple(&self.runtime_layout_store); - const tag_field = try acc.getElement(1); + // Get element rt_var from tuple type + const tuple_resolved = self.runtime_types.resolveVar(rt_var); + const elem_rt_var = if (tuple_resolved.desc.content == .structure and tuple_resolved.desc.content.structure == .tuple) blk: { + const elem_vars = self.runtime_types.sliceVars(tuple_resolved.desc.content.structure.tuple.elems); + break :blk if (elem_vars.len > 1) elem_vars[1] else rt_var; + } else rt_var; + const tag_field = try acc.getElement(1, elem_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; try tmp.setInt(@intCast(tag_index)); } - dest.rt_var = rt_var; return dest; } else if (layout_val.tag == .tag_union) { - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, rt_var); // Write discriminant at discriminant_offset const tu_data = self.runtime_layout_store.getTagUnionData(layout_val.data.tag_union.idx); const base_ptr: [*]u8 = @ptrCast(dest.ptr.?); @@ -10684,7 +11056,6 @@ pub const Interpreter = struct { else => {}, } dest.is_initialized = true; - dest.rt_var = rt_var; return dest; } self.triggerCrash("e_tag: unexpected layout in finalizeTagNoPayload", false, roc_ops); @@ -10715,7 +11086,7 @@ pub const Interpreter = struct { self.triggerCrash("e_lambda: expected closure layout", false, roc_ops); return error.Crash; } - const value = try self.pushRaw(closure_layout, 0); + const value = try self.pushRaw(closure_layout, 0, rt_var); self.registerDefValue(expr_idx, value); if (value.ptr) |ptr| { const header: *layout.Closure = @ptrCast(@alignCast(ptr)); @@ -10745,7 +11116,7 @@ pub const Interpreter = struct { break :blk try self.translateTypeVar(self.env, ct_var); }; const closure_layout = try self.getRuntimeLayout(rt_var); - const value = try self.pushRaw(closure_layout, 0); + const value = try self.pushRaw(closure_layout, 0, rt_var); self.registerDefValue(expr_idx, value); if (value.ptr) |ptr| { const header: *layout.Closure = @ptrCast(@alignCast(ptr)); @@ -10767,6 +11138,10 @@ pub const Interpreter = struct { expr_idx: can.CIR.Expr.Idx, hosted: @TypeOf(@as(can.CIR.Expr, undefined).e_hosted_lambda), ) Error!StackValue { + // Get the rt_var from the expression's type + const ct_var = can.ModuleEnv.varFrom(expr_idx); + const rt_var = try self.translateTypeVar(self.env, ct_var); + // Manually create a closure layout since hosted functions might have flex types const closure_layout = Layout{ .tag = .closure, @@ -10776,7 +11151,7 @@ pub const Interpreter = struct { }, }, }; - const value = try self.pushRaw(closure_layout, 0); + const value = try self.pushRaw(closure_layout, 0, rt_var); self.registerDefValue(expr_idx, value); if (value.ptr) |ptr| { const header: *layout.Closure = @ptrCast(@alignCast(ptr)); @@ -10832,7 +11207,10 @@ pub const Interpreter = struct { const captures_layout_idx = try self.runtime_layout_store.putRecord(self.runtime_layout_store.env, field_layouts, field_names); const captures_layout = self.runtime_layout_store.getLayout(captures_layout_idx); const closure_layout = Layout.closure(captures_layout_idx); - const value = try self.pushRaw(closure_layout, 0); + // Get rt_var for the closure + const ct_var = can.ModuleEnv.varFrom(expr_idx); + const closure_rt_var = try self.translateTypeVar(self.env, ct_var); + const value = try self.pushRaw(closure_layout, 0, closure_rt_var); self.registerDefValue(expr_idx, value); if (value.ptr) |ptr| { @@ -10851,7 +11229,7 @@ pub const Interpreter = struct { const aligned_off = std.mem.alignForward(usize, header_size, @intCast(cap_align.toByteUnits())); const base: [*]u8 = @ptrCast(@alignCast(ptr)); const rec_ptr: *anyopaque = @ptrCast(base + aligned_off); - const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true }; + const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true, .rt_var = closure_rt_var }; var accessor = try rec_val.asRecord(&self.runtime_layout_store); for (caps, 0..) |_, cap_i| { const cap_val = capture_values[cap_i]; @@ -10888,10 +11266,12 @@ pub const Interpreter = struct { const aligned_off = std.mem.alignForward(usize, header_sz, @intCast(cap_align.toByteUnits())); const base: [*]u8 = @ptrCast(@alignCast(cls_val.ptr.?)); const rec_ptr: *anyopaque = @ptrCast(base + aligned_off); - const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true }; + // Use the closure's rt_var for the captures record + const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true, .rt_var = cls_val.rt_var }; var rec_acc = (rec_val.asRecord(&self.runtime_layout_store)) catch continue; if (rec_acc.findFieldIndex(cap.name)) |fidx| { - if (rec_acc.getFieldByIndex(fidx) catch null) |field_val| { + const field_rt_var = self.runtime_types.fresh() catch continue; + if (rec_acc.getFieldByIndex(fidx, field_rt_var) catch null) |field_val| { return field_val; } } @@ -11017,10 +11397,11 @@ pub const Interpreter = struct { const aligned_off = std.mem.alignForward(usize, header_sz, @intCast(cap_align.toByteUnits())); const base: [*]u8 = @ptrCast(@alignCast(cls_val.ptr.?)); const rec_ptr: *anyopaque = @ptrCast(base + aligned_off); - const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true }; + const rec_val = StackValue{ .layout = captures_layout, .ptr = rec_ptr, .is_initialized = true, .rt_var = cls_val.rt_var }; var accessor = try rec_val.asRecord(&self.runtime_layout_store); if (accessor.findFieldIndex(var_ident)) |fidx| { - const field_val = try accessor.getFieldByIndex(fidx); + const field_rt = try self.runtime_types.fresh(); + const field_val = try accessor.getFieldByIndex(fidx, field_rt); return try self.pushCopy(field_val, roc_ops); } } @@ -11161,7 +11542,7 @@ pub const Interpreter = struct { params = lam_expr.e_lambda.args; } } else return; - const ph = try self.pushRaw(closure_layout, 0); + const ph = try self.pushRaw(closure_layout, 0, patt_rt_var); if (ph.ptr) |ptr| { const header: *layout.Closure = @ptrCast(@alignCast(ptr)); header.* = .{ @@ -11621,7 +12002,11 @@ pub const Interpreter = struct { // Empty tuple (shouldn't happen as it's handled directly) const tuple_layout_idx = try self.runtime_layout_store.putTuple(&[0]Layout{}); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - const tuple_val = try self.pushRaw(tuple_layout, 0); + // Create empty tuple type var + const empty_range = try self.runtime_types.appendVars(&[0]types.Var{}); + const empty_tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = empty_range } } }; + const empty_tuple_rt_var = try self.runtime_types.freshFromContent(empty_tuple_content); + const tuple_val = try self.pushRaw(tuple_layout, 0, empty_tuple_rt_var); try value_stack.push(tuple_val); } else { // Gather layouts and values @@ -11633,18 +12018,28 @@ pub const Interpreter = struct { var values = try self.allocator.alloc(StackValue, total_count); defer self.allocator.free(values); + // Collect element rt_vars for constructing tuple type + var elem_rt_vars = try self.allocator.alloc(types.Var, total_count); + defer self.allocator.free(elem_rt_vars); + // Pop values in reverse order (last evaluated is on top) var i: usize = total_count; while (i > 0) { i -= 1; values[i] = value_stack.pop() orelse return error.Crash; elem_layouts[i] = values[i].layout; + elem_rt_vars[i] = values[i].rt_var; } + // Create tuple type from element types + const elem_vars_range = try self.runtime_types.appendVars(elem_rt_vars); + const tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = elem_vars_range } } }; + const tuple_rt_var = try self.runtime_types.freshFromContent(tuple_content); + // Create tuple layout const tuple_layout_idx = try self.runtime_layout_store.putTuple(elem_layouts); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - var dest = try self.pushRaw(tuple_layout, 0); + var dest = try self.pushRaw(tuple_layout, 0, tuple_rt_var); var accessor = try dest.asTuple(&self.runtime_layout_store); if (total_count != accessor.getElementCount()) return error.TypeMismatch; @@ -11686,12 +12081,11 @@ pub const Interpreter = struct { if (total_count == 0) { // Empty list (shouldn't happen as it's handled directly) const list_layout = try self.getRuntimeLayout(lc.list_rt_var); - var dest = try self.pushRaw(list_layout, 0); + const dest = try self.pushRaw(list_layout, 0, lc.list_rt_var); if (dest.ptr != null) { const header: *RocList = @ptrCast(@alignCast(dest.ptr.?)); header.* = RocList.empty(); } - dest.rt_var = lc.list_rt_var; try value_stack.push(dest); } else { // Pop all collected values from the value stack @@ -11712,7 +12106,7 @@ pub const Interpreter = struct { const correct_elem_idx = try self.runtime_layout_store.insertLayout(actual_elem_layout); const actual_list_layout = Layout{ .tag = .list, .data = .{ .list = correct_elem_idx } }; - const dest = try self.pushRaw(actual_list_layout, 0); + const dest = try self.pushRaw(actual_list_layout, 0, lc.list_rt_var); if (dest.ptr == null) { // Decref all values before returning for (values) |val| { @@ -11864,7 +12258,7 @@ pub const Interpreter = struct { try self.ensureVarLayoutCapacity(root_idx + 1); self.var_to_layout_slot.items[root_idx] = @intFromEnum(record_layout_idx) + 1; - var dest = try self.pushRaw(rec_layout, 0); + var dest = try self.pushRaw(rec_layout, 0, rc.rt_var); var accessor = try dest.asRecord(&self.runtime_layout_store); // Copy base record fields first @@ -11874,7 +12268,8 @@ pub const Interpreter = struct { while (idx < base_accessor.getFieldCount()) : (idx += 1) { const info = base_accessor.field_layouts.get(idx); const dest_field_idx = accessor.findFieldIndex(info.name) orelse return error.TypeMismatch; - const base_field_value = try base_accessor.getFieldByIndex(idx); + const field_rt = try self.runtime_types.fresh(); + const base_field_value = try base_accessor.getFieldByIndex(idx, field_rt); try accessor.setFieldByIndex(dest_field_idx, base_field_value, roc_ops); } } @@ -11892,7 +12287,8 @@ pub const Interpreter = struct { if (base_value_opt) |base_value| { var base_accessor = try base_value.asRecord(&self.runtime_layout_store); if (base_accessor.findFieldIndex(translated_name) != null) { - const existing = try accessor.getFieldByIndex(dest_field_idx); + const field_rt = try self.runtime_types.fresh(); + const existing = try accessor.getFieldByIndex(dest_field_idx, field_rt); existing.decref(&self.runtime_layout_store, roc_ops); } } @@ -11988,7 +12384,7 @@ pub const Interpreter = struct { if (tc.layout_type == 0) { // Record layout { tag, payload } - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, tc.rt_var); var acc = try dest.asRecord(&self.runtime_layout_store); const tag_field_idx = acc.findFieldIndex(self.env.idents.tag) orelse { for (values) |v| v.decref(&self.runtime_layout_store, roc_ops); @@ -12002,7 +12398,8 @@ pub const Interpreter = struct { }; // Write tag discriminant - const tag_field = try acc.getFieldByIndex(tag_field_idx); + const field_rt = try self.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(tag_field_idx, field_rt); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -12010,7 +12407,8 @@ pub const Interpreter = struct { } // Write payload - const payload_field = try acc.getFieldByIndex(payload_field_idx); + const field_rt2 = try self.runtime_types.fresh(); + const payload_field = try acc.getFieldByIndex(payload_field_idx, field_rt2); if (payload_field.ptr) |payload_ptr| { if (total_count == 1) { try values[0].copyToPtr(&self.runtime_layout_store, payload_ptr, roc_ops); @@ -12018,12 +12416,19 @@ pub const Interpreter = struct { // Multiple args - create tuple payload var elem_layouts = try self.allocator.alloc(Layout, total_count); defer self.allocator.free(elem_layouts); + var elem_rt_vars = try self.allocator.alloc(types.Var, total_count); + defer self.allocator.free(elem_rt_vars); for (values, 0..) |val, idx| { elem_layouts[idx] = val.layout; + elem_rt_vars[idx] = val.rt_var; } const tuple_layout_idx = try self.runtime_layout_store.putTuple(elem_layouts); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true }; + // Create tuple type from element types + const elem_vars_range = try self.runtime_types.appendVars(elem_rt_vars); + const tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = elem_vars_range } } }; + const tuple_rt_var = try self.runtime_types.freshFromContent(tuple_content); + var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true, .rt_var = tuple_rt_var }; var tup_acc = try tuple_dest.asTuple(&self.runtime_layout_store); for (values, 0..) |val, idx| { try tup_acc.setElement(idx, val, roc_ops); @@ -12034,15 +12439,25 @@ pub const Interpreter = struct { for (values) |val| { val.decref(&self.runtime_layout_store, roc_ops); } - dest.rt_var = tc.rt_var; try value_stack.push(dest); } else if (tc.layout_type == 1) { // Tuple layout (payload, tag) - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, tc.rt_var); var acc = try dest.asTuple(&self.runtime_layout_store); + // Compute element rt_vars for tuple access + // Element 0 = payload, Element 1 = discriminant (int) + const discriminant_rt_var = try self.runtime_types.fresh(); + const payload_rt_var: types.Var = if (total_count == 1) + tc.arg_rt_vars[0] + else if (total_count > 0) blk: { + const elem_vars_range = try self.runtime_types.appendVars(tc.arg_rt_vars); + const tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = elem_vars_range } } }; + break :blk try self.runtime_types.freshFromContent(tuple_content); + } else try self.runtime_types.fresh(); + // Write tag discriminant (element 1) - const tag_field = try acc.getElement(1); + const tag_field = try acc.getElement(1, discriminant_rt_var); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { var tmp = tag_field; tmp.is_initialized = false; @@ -12050,7 +12465,7 @@ pub const Interpreter = struct { } // Write payload (element 0) - const payload_field = try acc.getElement(0); + const payload_field = try acc.getElement(0, payload_rt_var); if (payload_field.ptr) |payload_ptr| { if (total_count == 1) { // Check for layout mismatch and handle it @@ -12063,11 +12478,11 @@ pub const Interpreter = struct { var elem_layouts_fixed = [2]Layout{ values[0].layout, tag_field.layout }; const proper_tuple_idx = try self.runtime_layout_store.putTuple(&elem_layouts_fixed); const proper_tuple_layout = self.runtime_layout_store.getLayout(proper_tuple_idx); - var proper_dest = try self.pushRaw(proper_tuple_layout, 0); + var proper_dest = try self.pushRaw(proper_tuple_layout, 0, tc.rt_var); var proper_acc = try proper_dest.asTuple(&self.runtime_layout_store); // Write tag - const proper_tag_field = try proper_acc.getElement(1); + const proper_tag_field = try proper_acc.getElement(1, discriminant_rt_var); if (proper_tag_field.layout.tag == .scalar and proper_tag_field.layout.data.scalar.tag == .int) { var tmp = proper_tag_field; tmp.is_initialized = false; @@ -12075,7 +12490,7 @@ pub const Interpreter = struct { } // Write payload - const proper_payload_field = try proper_acc.getElement(0); + const proper_payload_field = try proper_acc.getElement(0, values[0].rt_var); if (proper_payload_field.ptr) |proper_ptr| { try values[0].copyToPtr(&self.runtime_layout_store, proper_ptr, roc_ops); } @@ -12093,12 +12508,19 @@ pub const Interpreter = struct { // Multiple args - create tuple payload var elem_layouts = try self.allocator.alloc(Layout, total_count); defer self.allocator.free(elem_layouts); + var elem_rt_vars = try self.allocator.alloc(types.Var, total_count); + defer self.allocator.free(elem_rt_vars); for (values, 0..) |val, idx| { elem_layouts[idx] = val.layout; + elem_rt_vars[idx] = val.rt_var; } const tuple_layout_idx = try self.runtime_layout_store.putTuple(elem_layouts); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true }; + // Create tuple type from element types + const elem_vars_range = try self.runtime_types.appendVars(elem_rt_vars); + const tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = elem_vars_range } } }; + const tuple_rt_var = try self.runtime_types.freshFromContent(tuple_content); + var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true, .rt_var = tuple_rt_var }; var tup_acc = try tuple_dest.asTuple(&self.runtime_layout_store); for (values, 0..) |val, idx| { try tup_acc.setElement(idx, val, roc_ops); @@ -12109,7 +12531,6 @@ pub const Interpreter = struct { for (values) |val| { val.decref(&self.runtime_layout_store, roc_ops); } - dest.rt_var = tc.rt_var; try value_stack.push(dest); } else if (tc.layout_type == 2) { // Tag union layout: payload at offset 0, discriminant at discriminant_offset @@ -12141,11 +12562,14 @@ pub const Interpreter = struct { var elem_layouts_fixed = [2]Layout{ values[0].layout, disc_layout }; const proper_tuple_idx = try self.runtime_layout_store.putTuple(&elem_layouts_fixed); const proper_tuple_layout = self.runtime_layout_store.getLayout(proper_tuple_idx); - var proper_dest = try self.pushRaw(proper_tuple_layout, 0); + var proper_dest = try self.pushRaw(proper_tuple_layout, 0, tc.rt_var); var proper_acc = try proper_dest.asTuple(&self.runtime_layout_store); + // Create fresh vars for tuple element access + const disc_rt_var = try self.runtime_types.fresh(); + // Write tag discriminant (element 1) - const proper_tag_field = try proper_acc.getElement(1); + const proper_tag_field = try proper_acc.getElement(1, disc_rt_var); if (proper_tag_field.layout.tag == .scalar and proper_tag_field.layout.data.scalar.tag == .int) { var tmp = proper_tag_field; tmp.is_initialized = false; @@ -12153,7 +12577,7 @@ pub const Interpreter = struct { } // Write payload (element 0) - const proper_payload_field = try proper_acc.getElement(0); + const proper_payload_field = try proper_acc.getElement(0, values[0].rt_var); if (proper_payload_field.ptr) |proper_ptr| { try values[0].copyToPtr(&self.runtime_layout_store, proper_ptr, roc_ops); } @@ -12161,13 +12585,12 @@ pub const Interpreter = struct { for (values) |val| { val.decref(&self.runtime_layout_store, roc_ops); } - proper_dest.rt_var = tc.rt_var; try value_stack.push(proper_dest); return true; } } - var dest = try self.pushRaw(layout_val, 0); + var dest = try self.pushRaw(layout_val, 0, tc.rt_var); // Write discriminant const base_ptr: [*]u8 = @ptrCast(dest.ptr.?); @@ -12188,12 +12611,19 @@ pub const Interpreter = struct { // Multiple args - create tuple payload at offset 0 var elem_layouts = try self.allocator.alloc(Layout, total_count); defer self.allocator.free(elem_layouts); + var elem_rt_vars = try self.allocator.alloc(types.Var, total_count); + defer self.allocator.free(elem_rt_vars); for (values, 0..) |val, idx| { elem_layouts[idx] = val.layout; + elem_rt_vars[idx] = val.rt_var; } const tuple_layout_idx = try self.runtime_layout_store.putTuple(elem_layouts); const tuple_layout = self.runtime_layout_store.getLayout(tuple_layout_idx); - var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true }; + // Create tuple type from element types + const elem_vars_range = try self.runtime_types.appendVars(elem_rt_vars); + const tuple_content = types.Content{ .structure = .{ .tuple = .{ .elems = elem_vars_range } } }; + const tuple_rt_var = try self.runtime_types.freshFromContent(tuple_content); + var tuple_dest = StackValue{ .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true, .rt_var = tuple_rt_var }; var tup_acc = try tuple_dest.asTuple(&self.runtime_layout_store); for (values, 0..) |val, idx| { try tup_acc.setElement(idx, val, roc_ops); @@ -12217,9 +12647,8 @@ pub const Interpreter = struct { const scrutinee = try self.pushCopy(scrutinee_temp, roc_ops); scrutinee_temp.decref(&self.runtime_layout_store, roc_ops); - // Use the scrutinee's own rt_var if available (preserves type through polymorphic calls), - // otherwise fall back to the translated scrutinee type from the match expression - const effective_scrutinee_rt_var = scrutinee.rt_var orelse mb.scrutinee_rt_var; + // Use the scrutinee's own rt_var (preserves type through polymorphic calls) + const effective_scrutinee_rt_var = scrutinee.rt_var; // Try branches starting from current_branch var branch_idx = mb.current_branch; @@ -12349,7 +12778,7 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(ec.expr_idx); const rt_var = try self.translateTypeVar(self.env, ct_var); const layout_val = try self.getRuntimeLayout(rt_var); - const result = try self.pushRaw(layout_val, 0); + const result = try self.pushRaw(layout_val, 0, rt_var); try value_stack.push(result); return true; } @@ -12368,7 +12797,7 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(dp.expr_idx); const rt_var = try self.translateTypeVar(self.env, ct_var); const layout_val = try self.getRuntimeLayout(rt_var); - const result = try self.pushRaw(layout_val, 0); + const result = try self.pushRaw(layout_val, 0, rt_var); try value_stack.push(result); return true; }, @@ -12434,7 +12863,8 @@ pub const Interpreter = struct { // Fall back to default rendering const rendered = try self.renderValueRocWithType(value, ir.inner_rt_var, roc_ops); defer self.allocator.free(rendered); - const str_value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const str_value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(str_value.ptr.?)); roc_str_ptr.* = RocStr.fromSlice(rendered, roc_ops); try value_stack.push(str_value); @@ -12493,7 +12923,8 @@ pub const Interpreter = struct { defer self.allocator.free(rendered); // Create a RocStr from the rendered bytes and push it - const str_value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const str_value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(str_value.ptr.?)); roc_str_ptr.* = RocStr.fromSlice(rendered, roc_ops); try value_stack.push(str_value); @@ -12517,7 +12948,8 @@ pub const Interpreter = struct { seg_value.decref(&self.runtime_layout_store, roc_ops); // Push as string value - const str_value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const str_value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(str_value.ptr.?)); roc_str_ptr.* = segment_str; try value_stack.push(str_value); @@ -12582,7 +13014,8 @@ pub const Interpreter = struct { break :blk RocStr.fromSlice(buffer, roc_ops); }; - const result = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const result = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(result.ptr.?)); roc_str_ptr.* = result_str; try value_stack.push(result); @@ -12598,7 +13031,8 @@ pub const Interpreter = struct { // Use arena allocator for string literals - freed wholesale at interpreter deinit const content = self.env.getString(next_seg_expr.e_str_segment.literal); const seg_str = try self.createConstantStr(content); - const seg_value = try self.pushStr(); + const str_rt_var = try self.getCanonicalStrRuntimeVar(); + const seg_value = try self.pushStr(str_rt_var); const roc_str_ptr: *RocStr = @ptrCast(@alignCast(seg_value.ptr.?)); roc_str_ptr.* = seg_str; try value_stack.push(seg_value); @@ -12883,13 +13317,10 @@ pub const Interpreter = struct { self.early_return_value = null; var return_val = return_val_in; - // Only set rt_var if the return value doesn't already have one. - // This preserves the original type for identity-like functions where - // the return value is the same as an input (which already has a valid rt_var). - if (return_val.rt_var == null) { - if (cleanup.call_ret_rt_var) |rt_var| { - return_val.rt_var = rt_var; - } + // Update rt_var if we have a specific return type expected. + // This allows caller to override the return type for proper type tracking. + if (cleanup.call_ret_rt_var) |rt_var| { + return_val.rt_var = rt_var; } // Pop active closure if needed @@ -12962,13 +13393,10 @@ pub const Interpreter = struct { self.trimBindingList(&self.bindings, cleanup.saved_bindings_len, roc_ops); if (cleanup.arg_rt_vars_to_free) |vars| self.allocator.free(vars); - // Only set rt_var if the result doesn't already have one. - // This preserves the original type for identity-like functions where - // the return value is the same as an input (which already has a valid rt_var). - if (result.rt_var == null) { - if (cleanup.call_ret_rt_var) |rt_var| { - result.rt_var = rt_var; - } + // Update rt_var if we have a specific return type expected. + // This allows caller to override the return type for proper type tracking. + if (cleanup.call_ret_rt_var) |rt_var| { + result.rt_var = rt_var; } try value_stack.push(result); return true; @@ -13101,15 +13529,14 @@ pub const Interpreter = struct { // Track if the value came from a polymorphic context (flex/rigid rt_var) var effective_receiver_rt_var = ba.receiver_rt_var; var value_is_polymorphic = false; - if (lhs.rt_var) |val_rt_var| { - const val_resolved = self.runtime_types.resolveVar(val_rt_var); - // Only use the value's type if it's concrete (has structure/alias) - if (val_resolved.desc.content == .structure or val_resolved.desc.content == .alias) { - effective_receiver_rt_var = val_rt_var; - } else if (val_resolved.desc.content == .flex or val_resolved.desc.content == .rigid) { - // The value came from a polymorphic context - value_is_polymorphic = true; - } + const val_rt_var = lhs.rt_var; + const val_resolved = self.runtime_types.resolveVar(val_rt_var); + // Only use the value's type if it's concrete (has structure/alias) + if (val_resolved.desc.content == .structure or val_resolved.desc.content == .alias) { + effective_receiver_rt_var = val_rt_var; + } else if (val_resolved.desc.content == .flex or val_resolved.desc.content == .rigid) { + // The value came from a polymorphic context + value_is_polymorphic = true; } // Check if effective type is still flex/rigid after trying value's rt_var @@ -13177,29 +13604,29 @@ pub const Interpreter = struct { try value_stack.push(result_val); return true; } - // Handle numeric arithmetic directly via evalDecBinop + // Handle numeric arithmetic via type-aware evalNumericBinop if (ba.method_ident == self.root_env.idents.plus) { - const result = try self.evalDecBinop(.add, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.add, lhs, rhs, roc_ops); try value_stack.push(result); return true; } else if (ba.method_ident == self.root_env.idents.minus) { - const result = try self.evalDecBinop(.sub, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.sub, lhs, rhs, roc_ops); try value_stack.push(result); return true; } else if (ba.method_ident == self.root_env.idents.times) { - const result = try self.evalDecBinop(.mul, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.mul, lhs, rhs, roc_ops); try value_stack.push(result); return true; } else if (ba.method_ident == self.root_env.idents.div_by) { - const result = try self.evalDecBinop(.div, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.div, lhs, rhs, roc_ops); try value_stack.push(result); return true; } else if (ba.method_ident == self.root_env.idents.div_trunc_by) { - const result = try self.evalDecBinop(.div_trunc, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.div_trunc, lhs, rhs, roc_ops); try value_stack.push(result); return true; } else if (ba.method_ident == self.root_env.idents.rem_by) { - const result = try self.evalDecBinop(.rem, lhs.layout, lhs, rhs, roc_ops); + const result = try self.evalNumericBinop(.rem, lhs, rhs, roc_ops); try value_stack.push(result); return true; } @@ -13368,29 +13795,39 @@ pub const Interpreter = struct { var accessor = try receiver_value.asRecord(&self.runtime_layout_store); const field_idx = accessor.findFieldIndex(da.field_name) orelse return error.TypeMismatch; - const field_value = try accessor.getFieldByIndex(field_idx); + + // Get the field's rt_var from the receiver's record type + const receiver_resolved = self.runtime_types.resolveVar(receiver_value.rt_var); + const field_rt_var = blk: { + if (receiver_resolved.desc.content == .structure) { + const flat = receiver_resolved.desc.content.structure; + const fields_range = switch (flat) { + .record => |rec| rec.fields, + .record_unbound => |fields| fields, + else => break :blk try self.runtime_types.fresh(), + }; + const fields = self.runtime_types.getRecordFieldsSlice(fields_range); + var i: usize = 0; + while (i < fields.len) : (i += 1) { + const f = fields.get(i); + if (f.name == da.field_name) { + break :blk f.var_; + } + } + } + break :blk try self.runtime_types.fresh(); + }; + + const field_value = try accessor.getFieldByIndex(field_idx, field_rt_var); const result = try self.pushCopy(field_value, roc_ops); try value_stack.push(result); return true; } // Method call - resolve receiver type for dispatch - // Always prefer the runtime type from the evaluated value if available, + // Always prefer the runtime type from the evaluated value, // as it's more accurate than the compile-time type (which may be incorrectly inferred) - var effective_receiver_rt_var = da.receiver_rt_var; - if (receiver_value.rt_var) |val_rt_var| { - // Use the runtime type from evaluation (e.g., split_on returns List Str) - effective_receiver_rt_var = val_rt_var; - } else { - // Fall back to compile-time type, with Dec default for unresolved types - const receiver_resolved_check = self.runtime_types.resolveVar(da.receiver_rt_var); - if (receiver_resolved_check.desc.content == .flex or receiver_resolved_check.desc.content == .rigid) { - // No type info available, default to Dec for numeric operations - const dec_content = try self.mkNumberTypeContentRuntime("Dec"); - const dec_var = try self.runtime_types.freshFromContent(dec_content); - effective_receiver_rt_var = dec_var; - } - } + const effective_receiver_rt_var = receiver_value.rt_var; // Don't use resolveBaseVar here - we need to keep the nominal type // for method dispatch (resolveBaseVar unwraps nominal types to their backing) @@ -13801,6 +14238,7 @@ pub const Interpreter = struct { .ptr = elem_ptr, .layout = elem_layout, .is_initialized = true, + .rt_var = fl.patt_rt_var, }; elem_value.incref(&self.runtime_layout_store); @@ -13868,6 +14306,7 @@ pub const Interpreter = struct { .ptr = elem_ptr, .layout = fl.elem_layout, .is_initialized = true, + .rt_var = fl.patt_rt_var, }; elem_value.incref(&self.runtime_layout_store); @@ -14257,11 +14696,13 @@ pub const Interpreter = struct { .layout = sc.elem_layout, .ptr = @ptrCast(elem_at_inner), .is_initialized = true, + .rt_var = sc.elem_rt_var, }; const elem_current_value = StackValue{ .layout = sc.elem_layout, .ptr = @ptrCast(elem_at_current), .is_initialized = true, + .rt_var = sc.elem_rt_var, }; // Copy elements for comparison @@ -14281,6 +14722,7 @@ pub const Interpreter = struct { .list_len = sc.list_len, .elem_size = sc.elem_size, .elem_layout = sc.elem_layout, + .elem_rt_var = sc.elem_rt_var, } } }); saved_rigid_subst = null; @@ -14338,11 +14780,13 @@ pub const Interpreter = struct { .layout = sc.elem_layout, .ptr = @ptrCast(elem_at_outer), .is_initialized = true, + .rt_var = sc.elem_rt_var, }; const elem_prev_value = StackValue{ .layout = sc.elem_layout, .ptr = @ptrCast(elem_at_prev), .is_initialized = true, + .rt_var = sc.elem_rt_var, }; // Copy elements for comparison @@ -14360,6 +14804,7 @@ pub const Interpreter = struct { .list_len = sc.list_len, .elem_size = sc.elem_size, .elem_layout = sc.elem_layout, + .elem_rt_var = sc.elem_rt_var, } } }); saved_rigid_subst = null; diff --git a/src/eval/render_helpers.zig b/src/eval/render_helpers.zig index ed1ce97fa4..5df9f9812b 100644 --- a/src/eval/render_helpers.zig +++ b/src/eval/render_helpers.zig @@ -130,7 +130,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. const count = tup_acc.getElementCount(); if (count > 0) { // Get tag index from the last element - const tag_elem = try tup_acc.getElement(count - 1); + // Use placeholder rt_var for tag discriminant (it's always an integer) + const tag_elem = try tup_acc.getElement(count - 1, @enumFromInt(0)); if (tag_elem.layout.tag == .scalar and tag_elem.layout.data.scalar.tag == .int) { if (std.math.cast(usize, tag_elem.asI128())) |tag_idx| { tag_index = tag_idx; @@ -150,26 +151,28 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. if (arg_vars.len == 1) { // Single payload: first element // Get the correct layout from the type variable, not the payload union layout - const payload_elem = try tup_acc.getElement(0); const arg_var = arg_vars[0]; + const payload_elem = try tup_acc.getElement(0, arg_var); const layout_idx = try ctx.layout_store.addTypeVar(arg_var, ctx.type_scope); const arg_layout = ctx.layout_store.getLayout(layout_idx); const payload_value = StackValue{ .layout = arg_layout, .ptr = payload_elem.ptr, .is_initialized = payload_elem.is_initialized, + .rt_var = arg_var, }; const rendered = try renderValueRocWithType(ctx, payload_value, arg_var); defer gpa.free(rendered); try out.appendSlice(rendered); } else { // Multiple payloads: first element is a nested tuple containing all payload args - const payload_elem = try tup_acc.getElement(0); + // Use placeholder rt_var for the tuple (we have the individual element types) + const payload_elem = try tup_acc.getElement(0, @enumFromInt(0)); if (payload_elem.layout.tag == .tuple) { var payload_tup = try payload_elem.asTuple(ctx.layout_store); var j: usize = 0; while (j < arg_vars.len) : (j += 1) { - const elem_value = try payload_tup.getElement(j); + const elem_value = try payload_tup.getElement(j, arg_vars[j]); const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -189,9 +192,10 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. } else if (value.layout.tag == .record) { var acc = try value.asRecord(ctx.layout_store); if (acc.findFieldIndex(ctx.env.idents.tag)) |idx| { - const tag_field = try acc.getFieldByIndex(idx); + const field_rt = try ctx.runtime_types.fresh(); + const tag_field = try acc.getFieldByIndex(idx, field_rt); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { - const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true }; + const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; // Only treat as tag if value fits in usize (valid tag discriminants are small) if (std.math.cast(usize, tmp_sv.asI128())) |tag_idx| { tag_index = tag_idx; @@ -205,7 +209,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. errdefer out.deinit(); try out.appendSlice(tag_name); if (acc.findFieldIndex(ctx.env.idents.payload)) |pidx| { - const payload = try acc.getFieldByIndex(pidx); + const field_rt = try ctx.runtime_types.fresh(); + const payload = try acc.getFieldByIndex(pidx, field_rt); const args_range = tags.items(.args)[tag_index]; const arg_vars = ctx.runtime_types.sliceVars(toVarRange(args_range)); if (arg_vars.len > 0) { @@ -218,6 +223,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = arg_layout, .ptr = payload.ptr, .is_initialized = payload.is_initialized, + .rt_var = arg_var, }; const rendered = try renderValueRocWithType(ctx, payload_value, arg_var); defer gpa.free(rendered); @@ -237,6 +243,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = tuple_layout, .ptr = payload.ptr, .is_initialized = payload.is_initialized, + .rt_var = @enumFromInt(0), }; if (tuple_size == 0 or payload.ptr == null) { var j: usize = 0; @@ -247,6 +254,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = elem_layouts[j], .ptr = null, .is_initialized = true, + .rt_var = arg_vars[j], }, arg_vars[j], ); @@ -259,7 +267,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. var j: usize = 0; while (j < arg_vars.len) : (j += 1) { const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch; - const elem_value = try tup_acc.getElement(sorted_idx); + const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]); const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -308,6 +316,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = arg_layout, .ptr = payload_ptr, .is_initialized = true, + .rt_var = arg_var, }; const rendered = try renderValueRocWithType(ctx, payload_value, arg_var); defer gpa.free(rendered); @@ -333,6 +342,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = elem_layouts[j], .ptr = null, .is_initialized = true, + .rt_var = arg_vars[j], }, arg_vars[j], ); @@ -345,12 +355,13 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true, + .rt_var = @enumFromInt(0), }; var tup_acc = try tuple_value.asTuple(ctx.layout_store); var j: usize = 0; while (j < arg_vars.len) : (j += 1) { const sorted_idx = tup_acc.findElementIndexByOriginal(j) orelse return error.TypeMismatch; - const elem_value = try tup_acc.getElement(sorted_idx); + const elem_value = try tup_acc.getElement(sorted_idx, arg_vars[j]); const rendered = try renderValueRocWithType(ctx, elem_value, arg_vars[j]); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -383,6 +394,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = payload_layout, .ptr = null, .is_initialized = true, + .rt_var = payload_var, }; switch (value.layout.tag) { @@ -464,7 +476,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. const idx = acc.findFieldIndex(f.name) orelse { std.debug.panic("Record field not found in layout: type says field '{s}' exists but layout doesn't have it", .{name_text}); }; - const field_val = try acc.getFieldByIndex(idx); + const field_rt = try ctx.runtime_types.fresh(); + const field_val = try acc.getFieldByIndex(idx, field_rt); const rendered = try renderValueRocWithType(ctx, field_val, f.var_); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -537,7 +550,8 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { const count = acc.getElementCount(); var i: usize = 0; while (i < count) : (i += 1) { - const elem = try acc.getElement(i); + // Use placeholder rt_var (no type info available in this context) + const elem = try acc.getElement(i, @enumFromInt(0)); const rendered = try renderValueRoc(ctx, elem); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -560,7 +574,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { while (i < len) : (i += 1) { if (roc_list.bytes) |bytes| { const elem_ptr: *anyopaque = @ptrCast(bytes + i * elem_size); - const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true }; + const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; const rendered = try renderValueRoc(ctx, elem_val); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -601,7 +615,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { const field_layout = ctx.layout_store.getLayout(fld.layout); const base_ptr: [*]u8 = @ptrCast(@alignCast(value.ptr.?)); const field_ptr: *anyopaque = @ptrCast(base_ptr + offset); - const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true }; + const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; const rendered = try renderValueRoc(ctx, field_val); defer gpa.free(rendered); try out.appendSlice(rendered); diff --git a/src/eval/test/helpers.zig b/src/eval/test/helpers.zig index da0448df03..2b800f21bd 100644 --- a/src/eval/test/helpers.zig +++ b/src/eval/test/helpers.zig @@ -331,7 +331,8 @@ pub fn runExpectTuple(src: []const u8, expected_elements: []const ExpectedElemen for (expected_elements) |expected_element| { // Get the element at the specified index - const element = try tuple_accessor.getElement(@intCast(expected_element.index)); + // Use placeholder rt_var (test helper without type information) + const element = try tuple_accessor.getElement(@intCast(expected_element.index), @enumFromInt(0)); // Check if this is an integer or Dec try std.testing.expect(element.layout.tag == .scalar); @@ -397,6 +398,7 @@ pub fn runExpectRecord(src: []const u8, expected_fields: []const ExpectedField, .layout = field_layout, .ptr = field_ptr, .is_initialized = true, + .rt_var = @enumFromInt(0), }; // Check if this is an integer or Dec const int_val = if (field_layout.data.scalar.tag == .int) blk: { @@ -453,7 +455,8 @@ pub fn runExpectListI64(src: []const u8, expected_elements: []const i64, should_ try std.testing.expectEqual(expected_elements.len, list_accessor.len()); for (expected_elements, 0..) |expected_val, i| { - const element = try list_accessor.getElement(i); + // Use placeholder rt_var (test helper without type information) + const element = try list_accessor.getElement(i, @enumFromInt(0)); // Check if this is an integer try std.testing.expect(element.layout.tag == .scalar); diff --git a/src/repl/eval.zig b/src/repl/eval.zig index 4a8e494b01..0aee7dcd85 100644 --- a/src/repl/eval.zig +++ b/src/repl/eval.zig @@ -855,16 +855,7 @@ pub const Repl = struct { try self.generateAndStoreDebugHtml(module_env, final_expr_idx); } - const output = blk: { - if (result.rt_var) |rt_var| { - break :blk try interpreter.renderValueRocWithType(result, rt_var, self.roc_ops); - } - const expr_ct_var = can.ModuleEnv.varFrom(final_expr_idx); - const expr_rt_var = interpreter.translateTypeVar(module_env, expr_ct_var) catch { - break :blk try interpreter.renderValueRoc(result); - }; - break :blk try interpreter.renderValueRocWithType(result, expr_rt_var, self.roc_ops); - }; + const output = try interpreter.renderValueRocWithType(result, result.rt_var, self.roc_ops); result.decref(&interpreter.runtime_layout_store, self.roc_ops); return .{ .expression = output }; From 051ac6d9cbe402bdb151eb803e27317f781c8fd6 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 22:12:53 -0500 Subject: [PATCH 10/64] Fix getting canonical Str runtime var --- src/eval/interpreter.zig | 93 ++++++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 32 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 93a0c8bfc9..d3805b1028 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -6130,42 +6130,17 @@ pub const Interpreter = struct { pub fn getCanonicalStrRuntimeVar(self: *Interpreter) !types.Var { if (self.canonical_str_rt_var) |cached| return cached; // Use the dynamic str_stmt index (from the Str module) - const str_decl_idx = self.builtins.str_stmt; - - // Get the statement from the Str module - const str_stmt = self.builtins.str_env.store.getStatement(str_decl_idx); - - // For nominal type declarations, we need to get the backing type, not the nominal wrapper - const ct_var = switch (str_stmt) { - .s_nominal_decl => blk: { - // The type of the declaration is the nominal type, but we want its backing - const nom_var = can.ModuleEnv.varFrom(str_decl_idx); - const nom_resolved = self.builtins.str_env.types.resolveVar(nom_var); - if (nom_resolved.desc.content == .structure) { - if (nom_resolved.desc.content.structure == .nominal_type) { - const nt = nom_resolved.desc.content.structure.nominal_type; - const backing_var = self.builtins.str_env.types.getNominalBackingVar(nt); - break :blk backing_var; - } - } - break :blk nom_var; - }, - else => can.ModuleEnv.varFrom(str_decl_idx), - }; + // We need the nominal type itself (not the backing type) so that method dispatch + // can look up methods like split_on, drop_prefix, etc. + const ct_var = can.ModuleEnv.varFrom(self.builtins.str_stmt); // Use str_env to translate since str_stmt is from the Str module // Cast away const - translateTypeVar doesn't actually mutate the module const nominal_rt_var = try self.translateTypeVar(@constCast(self.builtins.str_env), ct_var); - const nominal_resolved = self.runtime_types.resolveVar(nominal_rt_var); - const backing_rt_var = switch (nominal_resolved.desc.content) { - .structure => |st| switch (st) { - .nominal_type => |nt| self.runtime_types.getNominalBackingVar(nt), - else => nominal_rt_var, - }, - else => nominal_rt_var, - }; - self.canonical_str_rt_var = backing_rt_var; - return backing_rt_var; + // Return the nominal type, not the backing type - method dispatch needs the nominal + // type to look up methods like split_on, drop_prefix, etc. + self.canonical_str_rt_var = nominal_rt_var; + return nominal_rt_var; } fn resolveBaseVar(self: *Interpreter, runtime_var: types.Var) types.store.ResolvedVarDesc { @@ -13680,6 +13655,60 @@ pub const Interpreter = struct { }; if (nominal_info == null) { + // Before failing, check if this is a numeric operation we can handle directly + if (is_numeric_layout) { + // Handle numeric arithmetic via type-aware evalNumericBinop as fallback + if (ba.method_ident == self.root_env.idents.plus) { + const result = try self.evalNumericBinop(.add, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.minus) { + const result = try self.evalNumericBinop(.sub, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.times) { + const result = try self.evalNumericBinop(.mul, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.div_by) { + const result = try self.evalNumericBinop(.div, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.div_trunc_by) { + const result = try self.evalNumericBinop(.div_trunc, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.rem_by) { + const result = try self.evalNumericBinop(.rem, lhs, rhs, roc_ops); + try value_stack.push(result); + return true; + } else if (ba.method_ident == self.root_env.idents.is_gt) { + const result = try self.compareNumericValues(lhs, rhs, .gt); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_gte) { + const result = try self.compareNumericValues(lhs, rhs, .gte); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_lt) { + const result = try self.compareNumericValues(lhs, rhs, .lt); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_lte) { + const result = try self.compareNumericValues(lhs, rhs, .lte); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } else if (ba.method_ident == self.root_env.idents.is_eq) { + const result = try self.compareNumericValues(lhs, rhs, .eq); + const result_val = try self.makeBoolValue(if (ba.negate_result) !result else result); + try value_stack.push(result_val); + return true; + } + } return error.InvalidMethodReceiver; } From fd22eff4eefc213dfe202c6b95bfedbf1b4793ef Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 22:53:39 -0500 Subject: [PATCH 11/64] Fix CI failure --- src/build/roc/Builtin.roc | 36 ++++++++++++++++++------------------ src/eval/interpreter.zig | 14 ++++++++------ 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/build/roc/Builtin.roc b/src/build/roc/Builtin.roc index a041b4ed32..e77e1f7e78 100644 --- a/src/build/roc/Builtin.roc +++ b/src/build/roc/Builtin.roc @@ -331,15 +331,15 @@ Builtin :: [].{ from_numeral : Numeral -> Try(U8, [InvalidNumeral(Str), ..others]) from_str : Str -> Try(U8, [BadNumStr, ..others]) - ## List of integers beginning with this `U8` and ending with the other `U8`. - ## (Use [until] instead to end with the other `U8` minus one.) - ## Returns an empty list if this `U8` is greater than the other. + # # List of integers beginning with this `U8` and ending with the other `U8`. + # # (Use [until] instead to end with the other `U8` minus one.) + # # Returns an empty list if this `U8` is greater than the other. to : U8, U8 -> List(U8) to = |start, end| range_to(start, end) - ## List of integers beginning with this `U8` and ending with the other `U8` minus one. - ## (Use [to] instead to end with the other `U8` exactly, instead of minus one.) - ## Returns an empty list if this `U8` is greater than or equal to the other. + # # List of integers beginning with this `U8` and ending with the other `U8` minus one. + # # (Use [to] instead to end with the other `U8` exactly, instead of minus one.) + # # Returns an empty list if this `U8` is greater than or equal to the other. until : U8, U8 -> List(U8) until = |start, end| range_until(start, end) @@ -990,25 +990,25 @@ Builtin :: [].{ } range_to = |var $current, end| { - var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. + var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. - while $current <= end { - $answer = $answer.append($current) - $current = $current + 1 - } + while $current <= end { + $answer = $answer.append($current) + $current = $current + 1 + } - $answer + $answer } range_until = |var $current, end| { - var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. + var $answer = [] # Not bothering with List.with_capacity because this will become an iterator once those exist. - while $current < end { - $answer = $answer.append($current) - $current = $current + 1 - } + while $current < end { + $answer = $answer.append($current) + $current = $current + 1 + } - $answer + $answer } # Implemented by the compiler, does not perform bounds checks diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index fc250d1fa2..0abc93cf29 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -1481,13 +1481,15 @@ pub const Interpreter = struct { // Get the result layout - should be List(U8). // If return_rt_var is a flex that would default to a scalar, // we need to ensure we get a proper list layout for correct refcounting. + const result_rt_var = return_rt_var orelse { + self.triggerCrash("str_to_utf8 requires return type info", false, roc_ops); + return error.Crash; + }; const result_layout = blk: { - if (return_rt_var) |rt_var| { - const maybe_layout = try self.getRuntimeLayout(rt_var); - // If the layout is a list, use it - if (maybe_layout.tag == .list or maybe_layout.tag == .list_of_zst) { - break :blk maybe_layout; - } + const maybe_layout = try self.getRuntimeLayout(result_rt_var); + // If the layout is a list, use it + if (maybe_layout.tag == .list or maybe_layout.tag == .list_of_zst) { + break :blk maybe_layout; } // Fallback: create a proper List(U8) layout const u8_layout_idx = try self.runtime_layout_store.insertLayout(Layout.int(.u8)); From 42d4adc9c0f974a130ced404d13fe2f03dd0e06b Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 3 Dec 2025 23:34:55 -0500 Subject: [PATCH 12/64] Fix flaky tests --- src/cli/main.zig | 23 +++++------------------ test/fx/appc.roc | 15 +++++++++++++++ test/fx/hello.roc | 10 ++++++++++ 3 files changed, 30 insertions(+), 18 deletions(-) create mode 100644 test/fx/appc.roc create mode 100644 test/fx/hello.roc diff --git a/src/cli/main.zig b/src/cli/main.zig index 95abe71289..8cef8133d9 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -986,7 +986,7 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void { } else { // POSIX: Use existing file descriptor inheritance approach std.log.debug("Using POSIX file descriptor inheritance approach", .{}); - runWithPosixFdInheritance(allocs, exe_path, shm_handle, &cache_manager, args.app_args) catch |err| { + runWithPosixFdInheritance(allocs, exe_path, shm_handle, args.app_args) catch |err| { return err; }; } @@ -1132,25 +1132,12 @@ fn runWithWindowsHandleInheritance(allocs: *Allocators, exe_path: []const u8, sh } /// Run child process using POSIX file descriptor inheritance (existing approach for Unix) -fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_manager: *CacheManager, app_args: []const []const u8) !void { - // Get cache directory for temporary files - const temp_cache_dir = cache_manager.config.getTempDir(allocs.arena) catch |err| { - std.log.err("Failed to get temp cache directory: {}", .{err}); - return err; - }; - - // Ensure temp cache directory exists - std.fs.cwd().makePath(temp_cache_dir) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => { - std.log.err("Failed to create temp cache directory: {}", .{err}); - return err; - }, - }; - +fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, app_args: []const []const u8) !void { // Create temporary directory structure for fd communication + // Use system temp directory (not roc cache) to avoid race conditions when + // cache is cleared while child process is running std.log.debug("Creating temporary directory structure for fd communication", .{}); - const temp_exe_path = createTempDirStructure(allocs, exe_path, shm_handle, temp_cache_dir) catch |err| { + const temp_exe_path = createTempDirStructure(allocs, exe_path, shm_handle, null) catch |err| { std.log.err("Failed to create temp dir structure: {}", .{err}); return err; }; diff --git a/test/fx/appc.roc b/test/fx/appc.roc new file mode 100644 index 0000000000..844154279a --- /dev/null +++ b/test/fx/appc.roc @@ -0,0 +1,15 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout +import pf.Stderr + +str : Str -> Str +str = |s| s + +main! = || { + Stdout.line!(str("Hello from stdout!")) + Stdout.line!(str("Line 1 to stdout")) + Stderr.line!(str("Line 2 to stderr")) + Stdout.line!(str("Line 3 to stdout")) + Stderr.line!(str("Error from stderr!")) +} diff --git a/test/fx/hello.roc b/test/fx/hello.roc new file mode 100644 index 0000000000..66ec5bb751 --- /dev/null +++ b/test/fx/hello.roc @@ -0,0 +1,10 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout +import pf.Sdtin + +main! = || { + Stdout.line!("What's your name?") + name = Stdin.line!() + Stdout.line!("Hello, ${name}") +} From 8278ccd33524408fcdc8db28027c1907654ee0c2 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 00:16:44 -0500 Subject: [PATCH 13/64] Improve flaky test fix --- src/cli/main.zig | 253 +++++++++++++++++++++++++++++++-------- src/ipc/coordination.zig | 6 +- test/fx/appc.roc | 15 --- test/fx/hello.roc | 10 -- 4 files changed, 207 insertions(+), 77 deletions(-) delete mode 100644 test/fx/appc.roc delete mode 100644 test/fx/hello.roc diff --git a/src/cli/main.zig b/src/cli/main.zig index 8cef8133d9..c531c146f6 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -387,11 +387,101 @@ fn generateRandomSuffix(allocs: *Allocators) ![]u8 { return suffix; } +/// Create a unique temporary directory with PID-based naming. +/// Returns the path to the directory (allocated from arena, no need to free). +/// Uses system temp directory to avoid race conditions when cache is cleared. +pub fn createUniqueTempDir(allocs: *Allocators) ![]const u8 { + // Use system temp directory (not roc cache) to avoid race conditions + const temp_dir = if (comptime is_windows) + std.process.getEnvVarOwned(allocs.arena, "TEMP") catch + std.process.getEnvVarOwned(allocs.arena, "TMP") catch try allocs.arena.dupe(u8, "C:\\Windows\\Temp") + else + std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp"); + + const normalized_temp_dir = if (comptime is_windows) + std.mem.trimRight(u8, temp_dir, "/\\") + else + std.mem.trimRight(u8, temp_dir, "/"); + + // Get the current process ID for uniqueness + const pid = if (comptime is_windows) + std.os.windows.GetCurrentProcessId() + else + std.c.getpid(); + + // Try PID-based name first, then fall back to random suffix up to 5 times + var attempt: u8 = 0; + while (attempt < 6) : (attempt += 1) { + const dir_path = if (attempt == 0) blk: { + // First attempt: use PID only + break :blk if (comptime is_windows) + try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}", .{ normalized_temp_dir, pid }) + else + try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}", .{ normalized_temp_dir, pid }); + } else blk: { + // Subsequent attempts: use PID + random 8-char suffix + const random_suffix = try generateRandomSuffix(allocs); + break :blk if (comptime is_windows) + try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix }) + else + try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}", .{ normalized_temp_dir, pid, random_suffix }); + }; + + // Try to create the directory + std.fs.cwd().makeDir(dir_path) catch |err| switch (err) { + error.PathAlreadyExists => { + // Directory already exists, try again with a new random suffix + continue; + }, + else => { + return err; + }, + }; + + return dir_path; + } + + // Failed after 6 attempts (1 with PID only, 5 with PID + random suffix) + return error.FailedToCreateUniqueTempDir; +} + +/// Write shared memory coordination file (.txt) next to the executable. +/// This is the file that the child process reads to find the shared memory fd. +pub fn writeFdCoordinationFile(allocs: *Allocators, temp_exe_path: []const u8, shm_handle: SharedMemoryHandle) !void { + // The coordination file is at {temp_dir}.txt where temp_dir is the directory containing the exe + const temp_dir = std.fs.path.dirname(temp_exe_path) orelse return error.InvalidPath; + + // Ensure we have no trailing slashes + var dir_path = temp_dir; + while (dir_path.len > 0 and (dir_path[dir_path.len - 1] == '/' or dir_path[dir_path.len - 1] == '\\')) { + dir_path = dir_path[0 .. dir_path.len - 1]; + } + + const fd_file_path = try std.fmt.allocPrint(allocs.arena, "{s}.txt", .{dir_path}); + + // Create the file (exclusive - fail if exists to detect collisions) + const fd_file = std.fs.cwd().createFile(fd_file_path, .{ .exclusive = true }) catch |err| switch (err) { + error.PathAlreadyExists => { + // File already exists - this is unexpected since we have unique temp dirs + std.log.err("Coordination file already exists at '{s}'", .{fd_file_path}); + return err; + }, + else => return err, + }; + defer fd_file.close(); + + // Write shared memory info to file + const fd_str = try std.fmt.allocPrint(allocs.arena, "{}\n{}", .{ shm_handle.fd, shm_handle.size }); + try fd_file.writeAll(fd_str); + try fd_file.sync(); +} + /// Create the temporary directory structure for fd communication. /// Returns the path to the executable in the temp directory (allocated from arena, no need to free). /// If a cache directory is provided, it will be used for temporary files; otherwise /// falls back to the system temp directory. -pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 { +/// The exe_display_name is the name that will appear in `ps` output (e.g., "app.roc"). +pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, exe_display_name: []const u8, shm_handle: SharedMemoryHandle, cache_dir: ?[]const u8) ![]const u8 { // Use provided cache dir or fall back to system temp directory const temp_dir = if (cache_dir) |dir| try allocs.arena.dupe(u8, dir) @@ -401,20 +491,34 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han else std.process.getEnvVarOwned(allocs.arena, "TMPDIR") catch try allocs.arena.dupe(u8, "/tmp"); - // Try up to 10 times to create a unique directory - var attempt: u8 = 0; - while (attempt < 10) : (attempt += 1) { - const random_suffix = try generateRandomSuffix(allocs); + const normalized_temp_dir = if (comptime is_windows) + std.mem.trimRight(u8, temp_dir, "/\\") + else + std.mem.trimRight(u8, temp_dir, "/"); - // Create the full path with .txt suffix first - const normalized_temp_dir = if (comptime is_windows) - std.mem.trimRight(u8, temp_dir, "/\\") - else - std.mem.trimRight(u8, temp_dir, "/"); - const dir_name_with_txt = if (comptime is_windows) - try std.fmt.allocPrint(allocs.arena, "{s}\\roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix }) - else - try std.fmt.allocPrint(allocs.arena, "{s}/roc-tmp-{s}.txt", .{ normalized_temp_dir, random_suffix }); + // Get the current process ID for uniqueness + const pid = if (comptime is_windows) + std.os.windows.GetCurrentProcessId() + else + std.c.getpid(); + + // Try PID-based name first, then fall back to random suffix up to 5 times + var attempt: u8 = 0; + while (attempt < 6) : (attempt += 1) { + const dir_name_with_txt = if (attempt == 0) blk: { + // First attempt: use PID only + break :blk if (comptime is_windows) + try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}.txt", .{ normalized_temp_dir, pid }) + else + try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}.txt", .{ normalized_temp_dir, pid }); + } else blk: { + // Subsequent attempts: use PID + random 8-char suffix + const random_suffix = try generateRandomSuffix(allocs); + break :blk if (comptime is_windows) + try std.fmt.allocPrint(allocs.arena, "{s}\\roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix }) + else + try std.fmt.allocPrint(allocs.arena, "{s}/roc-{d}-{s}.txt", .{ normalized_temp_dir, pid, random_suffix }); + }; // Get the directory path by slicing off the .txt suffix const dir_path_len = dir_name_with_txt.len - 4; // Remove ".txt" @@ -456,9 +560,8 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han try fd_file.sync(); // Ensure data is written to disk fd_file.close(); - // Create hardlink to executable in temp directory - const exe_basename = std.fs.path.basename(exe_path); - const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_basename }); + // Create hardlink to executable in temp directory with display name + const temp_exe_path = try std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name }); // Try to create a hardlink first (more efficient than copying) createHardlink(allocs, exe_path, temp_exe_path) catch { @@ -470,7 +573,7 @@ pub fn createTempDirStructure(allocs: *Allocators, exe_path: []const u8, shm_han return temp_exe_path; } - // Failed after 10 attempts + // Failed after 6 attempts (1 with PID only, 5 with PID + random suffix) return error.FailedToCreateUniqueTempDir; } @@ -724,26 +827,51 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void { }, }; - // Generate executable name based on the roc file path - // TODO use something more interesting like a hash from the platform.main or platform/host.a etc - const exe_base_name = std.fmt.allocPrint(allocs.arena, "roc_run_{}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| { - std.log.err("Failed to generate executable name: {}", .{err}); - return err; - }; + // The final executable name seen in `ps` is the roc filename (e.g., "app.roc") + const exe_display_name = std.fs.path.basename(args.path); - // Add .exe extension on Windows - const exe_name = if (builtin.target.os.tag == .windows) - std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_base_name}) catch |err| { - std.log.err("Failed to generate executable name with extension: {}", .{err}); + // Display name for temp directory (what shows in ps) + const exe_display_name_with_ext = if (builtin.target.os.tag == .windows) + std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_display_name}) catch |err| { + std.log.err("Failed to generate display name with extension: {}", .{err}); return err; } else - allocs.arena.dupe(u8, exe_base_name) catch |err| { - std.log.err("Failed to duplicate executable name: {}", .{err}); + allocs.arena.dupe(u8, exe_display_name) catch |err| { + std.log.err("Failed to duplicate display name: {}", .{err}); return err; }; - const exe_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_name }) catch |err| { + // Cache executable name uses hash of path (no PID - collision is fine since same content) + const exe_cache_name = std.fmt.allocPrint(allocs.arena, "roc_{x}", .{std.hash.crc.Crc32.hash(args.path)}) catch |err| { + std.log.err("Failed to generate cache executable name: {}", .{err}); + return err; + }; + + const exe_cache_name_with_ext = if (builtin.target.os.tag == .windows) + std.fmt.allocPrint(allocs.arena, "{s}.exe", .{exe_cache_name}) catch |err| { + std.log.err("Failed to generate cache name with extension: {}", .{err}); + return err; + } + else + allocs.arena.dupe(u8, exe_cache_name) catch |err| { + std.log.err("Failed to duplicate cache name: {}", .{err}); + return err; + }; + + const exe_cache_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, exe_cache_name_with_ext }) catch |err| { + std.log.err("Failed to create cache executable path: {}", .{err}); + return err; + }; + + // Create unique temp directory for this build (uses PID for uniqueness) + const temp_dir_path = createUniqueTempDir(allocs) catch |err| { + std.log.err("Failed to create temp directory: {}", .{err}); + return err; + }; + + // The executable is built directly in the temp dir with the display name + const exe_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, exe_display_name_with_ext }) catch |err| { std.log.err("Failed to create executable path: {}", .{err}); return err; }; @@ -780,15 +908,26 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void { return error.NoPlatformSource; } - // Check if the interpreter executable already exists (cached) - const exe_exists = if (args.no_cache) false else blk: { - std.fs.accessAbsolute(exe_path, .{}) catch { + // Check if the interpreter executable already exists in cache + const cache_exists = if (args.no_cache) false else blk: { + std.fs.accessAbsolute(exe_cache_path, .{}) catch { break :blk false; }; break :blk true; }; - if (!exe_exists) { + if (cache_exists) { + // Cached executable exists - hardlink from cache to temp dir + std.log.debug("Using cached executable: {s}", .{exe_cache_path}); + createHardlink(allocs, exe_cache_path, exe_path) catch |err| { + // If hardlinking fails, fall back to copying + std.log.debug("Hardlink from cache failed, copying: {}", .{err}); + std.fs.cwd().copyFile(exe_cache_path, std.fs.cwd(), exe_path, .{}) catch |copy_err| { + std.log.err("Failed to copy cached executable: {}", .{copy_err}); + return copy_err; + }; + }; + } else { // Check for cached shim library, extract if not present const shim_filename = if (builtin.target.os.tag == .windows) "roc_shim.lib" else "libroc_shim.a"; @@ -948,6 +1087,22 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void { return err; }, }; + + // After building, hardlink to cache for future runs + // Force-hardlink (delete existing first) since hash collision means identical content + std.log.debug("Caching executable to: {s}", .{exe_cache_path}); + std.fs.cwd().deleteFile(exe_cache_path) catch |err| switch (err) { + error.FileNotFound => {}, // OK, doesn't exist + else => std.log.debug("Could not delete existing cache file: {}", .{err}), + }; + createHardlink(allocs, exe_path, exe_cache_path) catch |err| { + // If hardlinking fails, fall back to copying + std.log.debug("Hardlink to cache failed, copying: {}", .{err}); + std.fs.cwd().copyFile(exe_path, std.fs.cwd(), exe_cache_path, .{}) catch |copy_err| { + // Non-fatal - just means future runs won't be cached + std.log.debug("Failed to copy to cache: {}", .{copy_err}); + }; + }; } // Set up shared memory with ModuleEnv @@ -1132,16 +1287,16 @@ fn runWithWindowsHandleInheritance(allocs: *Allocators, exe_path: []const u8, sh } /// Run child process using POSIX file descriptor inheritance (existing approach for Unix) +/// The exe_path should already be in a unique temp directory created by createUniqueTempDir. fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_handle: SharedMemoryHandle, app_args: []const []const u8) !void { - // Create temporary directory structure for fd communication - // Use system temp directory (not roc cache) to avoid race conditions when - // cache is cleared while child process is running - std.log.debug("Creating temporary directory structure for fd communication", .{}); - const temp_exe_path = createTempDirStructure(allocs, exe_path, shm_handle, null) catch |err| { - std.log.err("Failed to create temp dir structure: {}", .{err}); + // Write the coordination file (.txt) next to the executable + // The executable is already in a unique temp directory + std.log.debug("Writing fd coordination file for: {s}", .{exe_path}); + writeFdCoordinationFile(allocs, exe_path, shm_handle) catch |err| { + std.log.err("Failed to write fd coordination file: {}", .{err}); return err; }; - std.log.debug("Temporary executable created at: {s}", .{temp_exe_path}); + std.log.debug("Coordination file written successfully", .{}); // Configure fd inheritance var flags = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0); @@ -1162,7 +1317,7 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand std.log.err("Failed to allocate argv: {}", .{err}); return err; }; - argv[0] = temp_exe_path; + argv[0] = exe_path; for (app_args, 0..) |arg, i| { argv[1 + i] = arg; } @@ -1179,10 +1334,10 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand child.stderr_behavior = .Inherit; // Spawn the child process - std.log.debug("Spawning child process: {s} with {} app args", .{ temp_exe_path, app_args.len }); + std.log.debug("Spawning child process: {s} with {} app args", .{ exe_path, app_args.len }); std.log.debug("Child process working directory: {s}", .{child.cwd.?}); child.spawn() catch |err| { - std.log.err("Failed to spawn {s}: {}", .{ temp_exe_path, err }); + std.log.err("Failed to spawn {s}: {}", .{ exe_path, err }); return err; }; std.log.debug("Child process spawned successfully (PID: {})", .{child.id}); @@ -1200,12 +1355,12 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand std.log.debug("Child process completed successfully", .{}); } else { // Propagate the exit code from the child process to our parent - std.log.debug("Child process {s} exited with code: {}", .{ temp_exe_path, exit_code }); + std.log.debug("Child process {s} exited with code: {}", .{ exe_path, exit_code }); std.process.exit(exit_code); } }, .Signal => |signal| { - std.log.err("Child process {s} killed by signal: {}", .{ temp_exe_path, signal }); + std.log.err("Child process {s} killed by signal: {}", .{ exe_path, signal }); if (signal == 11) { // SIGSEGV std.log.err("Child process crashed with segmentation fault (SIGSEGV)", .{}); } else if (signal == 6) { // SIGABRT @@ -1217,11 +1372,11 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand std.process.exit(128 +| @as(u8, @truncate(signal))); }, .Stopped => |signal| { - std.log.err("Child process {s} stopped by signal: {}", .{ temp_exe_path, signal }); + std.log.err("Child process {s} stopped by signal: {}", .{ exe_path, signal }); return error.ProcessStopped; }, .Unknown => |status| { - std.log.err("Child process {s} terminated with unknown status: {}", .{ temp_exe_path, status }); + std.log.err("Child process {s} terminated with unknown status: {}", .{ exe_path, status }); return error.ProcessUnknownTermination; }, } diff --git a/src/ipc/coordination.zig b/src/ipc/coordination.zig index c04206bba6..46c5a8300d 100644 --- a/src/ipc/coordination.zig +++ b/src/ipc/coordination.zig @@ -100,9 +100,9 @@ fn readFdInfoFromFile(allocator: std.mem.Allocator) CoordinationError!FdInfo { }; const dir_basename = std.fs.path.basename(exe_dir); - // Verify it has the expected prefix - if (!std.mem.startsWith(u8, dir_basename, "roc-tmp-")) { - std.log.err("Unexpected directory name: expected 'roc-tmp-*', got '{s}'", .{dir_basename}); + // Verify it has the expected prefix (roc-{pid} or roc-{pid}-{suffix}) + if (!std.mem.startsWith(u8, dir_basename, "roc-")) { + std.log.err("Unexpected directory name: expected 'roc-*', got '{s}'", .{dir_basename}); return error.FdInfoReadFailed; } diff --git a/test/fx/appc.roc b/test/fx/appc.roc deleted file mode 100644 index 844154279a..0000000000 --- a/test/fx/appc.roc +++ /dev/null @@ -1,15 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout -import pf.Stderr - -str : Str -> Str -str = |s| s - -main! = || { - Stdout.line!(str("Hello from stdout!")) - Stdout.line!(str("Line 1 to stdout")) - Stderr.line!(str("Line 2 to stderr")) - Stdout.line!(str("Line 3 to stdout")) - Stderr.line!(str("Error from stderr!")) -} diff --git a/test/fx/hello.roc b/test/fx/hello.roc deleted file mode 100644 index 66ec5bb751..0000000000 --- a/test/fx/hello.roc +++ /dev/null @@ -1,10 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout -import pf.Sdtin - -main! = || { - Stdout.line!("What's your name?") - name = Stdin.line!() - Stdout.line!("Hello, ${name}") -} From 71cd2cd2f478c65a55dfced12289a70d47402101 Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Thu, 4 Dec 2025 16:56:27 +1100 Subject: [PATCH 14/64] try fix --- src/cli/main.zig | 17 +++++++++++------ src/ipc/platform.zig | 18 ++++++++++++++---- test/fx-open/app_with_custom_error.roc | 10 +++++----- test/fx-open/platform/main.roc | 5 ++++- 4 files changed, 34 insertions(+), 16 deletions(-) diff --git a/src/cli/main.zig b/src/cli/main.zig index 95abe71289..2c0b258ea4 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -1156,16 +1156,21 @@ fn runWithPosixFdInheritance(allocs: *Allocators, exe_path: []const u8, shm_hand }; std.log.debug("Temporary executable created at: {s}", .{temp_exe_path}); - // Configure fd inheritance - var flags = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0); - if (flags < 0) { + // Configure fd inheritance - clear FD_CLOEXEC so child process inherits the fd + // NOTE: The doNotOptimizeAway calls are required to prevent the ReleaseFast + // optimizer from incorrectly optimizing away or reordering the fcntl calls. + const getfd_result = posix.fcntl(shm_handle.fd, posix.F_GETFD, 0); + std.mem.doNotOptimizeAway(&getfd_result); + if (getfd_result < 0) { std.log.err("Failed to get fd flags: {}", .{c._errno().*}); return error.FdConfigFailed; } - flags &= ~@as(c_int, posix.FD_CLOEXEC); - - if (posix.fcntl(shm_handle.fd, posix.F_SETFD, flags) < 0) { + const new_flags = getfd_result & ~@as(c_int, posix.FD_CLOEXEC); + std.mem.doNotOptimizeAway(&new_flags); + const setfd_result = posix.fcntl(shm_handle.fd, posix.F_SETFD, new_flags); + std.mem.doNotOptimizeAway(&setfd_result); + if (setfd_result < 0) { std.log.err("Failed to set fd flags: {}", .{c._errno().*}); return error.FdConfigFailed; } diff --git a/src/ipc/platform.zig b/src/ipc/platform.zig index 85ee49987d..ded07d2446 100644 --- a/src/ipc/platform.zig +++ b/src/ipc/platform.zig @@ -310,11 +310,21 @@ pub fn mapMemory(handle: Handle, size: usize, base_addr: ?*anyopaque) SharedMemo posix.MAP_SHARED, handle, 0, - ) orelse { - std.log.err("POSIX: Failed to map shared memory (size: {})", .{size}); + ); + // mmap returns MAP_FAILED (which is (void *)-1) on error, not null + // Need to check for both null and MAP_FAILED + if (ptr == null) { + std.log.err("POSIX: Failed to map shared memory - null returned (size: {})", .{size}); return error.MmapFailed; - }; - return ptr; + } + const ptr_value = @intFromPtr(ptr.?); + if (ptr_value == std.math.maxInt(usize)) { + // This is MAP_FAILED (-1 cast to pointer) + const errno = std.c._errno().*; + std.log.err("POSIX: Failed to map shared memory - MAP_FAILED (size: {}, fd: {}, errno: {})", .{ size, handle, errno }); + return error.MmapFailed; + } + return ptr.?; }, else => return error.UnsupportedPlatform, } diff --git a/test/fx-open/app_with_custom_error.roc b/test/fx-open/app_with_custom_error.roc index 0ade3fe294..c43a72ea47 100644 --- a/test/fx-open/app_with_custom_error.roc +++ b/test/fx-open/app_with_custom_error.roc @@ -5,9 +5,9 @@ import pf.Stdout # Test: both Exit and CustomError in different branches # This triggers the type error main! = |args| { - if List.is_empty(args) { - Err(Exit(42)) - } else { - Err(CustomError) - } + if List.is_empty(args) { + Err(Exit(42)) + } else { + Err(CustomError) + } } diff --git a/test/fx-open/platform/main.roc b/test/fx-open/platform/main.roc index 8dadecf2ad..d51e0c3f47 100644 --- a/test/fx-open/platform/main.roc +++ b/test/fx-open/platform/main.roc @@ -13,5 +13,8 @@ main_for_host! = |args| match main!(args) { Ok({}) => 0 Err(Exit(code)) => code - _ => 1 + Err(other) => { + Stderr.line!("exited with other error: ${inspect other}) + 1 + } } From df980b284f80ea56cb5a067ee2fac6945a5b64a6 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 01:01:39 -0500 Subject: [PATCH 15/64] Add a new test, fix some tests --- src/cli/main.zig | 25 ++++++++--------------- src/eval/interpreter.zig | 5 ++++- test/snapshots/repl/numeric_sum_to_str.md | 22 ++++++++++++++++++++ 3 files changed, 34 insertions(+), 18 deletions(-) create mode 100644 test/snapshots/repl/numeric_sum_to_str.md diff --git a/src/cli/main.zig b/src/cli/main.zig index c531c146f6..077f3e7661 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -929,32 +929,23 @@ fn rocRun(allocs: *Allocators, args: cli_args.RunArgs) !void { }; } else { - // Check for cached shim library, extract if not present + // Extract shim library to temp dir to avoid race conditions const shim_filename = if (builtin.target.os.tag == .windows) "roc_shim.lib" else "libroc_shim.a"; - const shim_path = std.fs.path.join(allocs.arena, &.{ exe_cache_dir, shim_filename }) catch |err| { + const shim_path = std.fs.path.join(allocs.arena, &.{ temp_dir_path, shim_filename }) catch |err| { std.log.err("Failed to create shim library path: {}", .{err}); return err; }; - // Extract shim if not cached or if --no-cache is used - const shim_exists = if (args.no_cache) false else blk: { - std.fs.cwd().access(shim_path, .{}) catch { - break :blk false; - }; - break :blk true; + // Always extract to temp dir (unique per process, no race condition) + extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| { + std.log.err("Failed to extract read roc file path shim library: {}", .{err}); + return err; }; - if (!shim_exists) { - // Shim not found in cache or cache disabled, extract it - extractReadRocFilePathShimLibrary(allocs, shim_path) catch |err| { - std.log.err("Failed to extract read roc file path shim library: {}", .{err}); - return err; - }; - } - // Generate platform host shim using the detected entrypoints + // Use temp dir to avoid race conditions when multiple processes run in parallel - const platform_shim_path = generatePlatformHostShim(allocs, exe_cache_dir, entrypoints.items, shim_target) catch |err| { + const platform_shim_path = generatePlatformHostShim(allocs, temp_dir_path, entrypoints.items, shim_target) catch |err| { std.log.err("Failed to generate platform host shim: {}", .{err}); return err; }; diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 0abc93cf29..51db5d2ffe 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -6596,7 +6596,10 @@ pub const Interpreter = struct { } // Copy the value to pass to the method - const copied_value = self.pushCopy(value, roc_ops) catch return null; + // Important: use the correct rt_var (from the type system) not value.rt_var + // (which may be a fresh variable from record field access) + var copied_value = self.pushCopy(value, roc_ops) catch return null; + copied_value.rt_var = rt_var; // Bind the parameter self.bindings.append(.{ diff --git a/test/snapshots/repl/numeric_sum_to_str.md b/test/snapshots/repl/numeric_sum_to_str.md new file mode 100644 index 0000000000..b34b17895a --- /dev/null +++ b/test/snapshots/repl/numeric_sum_to_str.md @@ -0,0 +1,22 @@ +# META +~~~ini +description=Numeric sum then convert to I16 string +type=repl +~~~ +# SOURCE +~~~roc +» a = 4 +» b = 5 +» sum = a + b +» I16.to_str(sum) +~~~ +# OUTPUT +assigned `a` +--- +assigned `b` +--- +assigned `sum` +--- +"9" +# PROBLEMS +NIL From 70ec5156f719aa3a783c0e0fec2ef4024e5f4fe1 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 07:51:44 -0500 Subject: [PATCH 16/64] Fix more tests --- src/eval/interpreter.zig | 250 +++++++++++++++++++++++++++++++++--- src/eval/test/eval_test.zig | 4 +- 2 files changed, 236 insertions(+), 18 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index e6e513fd9a..34b374f16e 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -2150,27 +2150,89 @@ pub const Interpreter = struct { // Null pointer from list_get_unsafe is a compiler bug - bounds should have been checked std.debug.assert(elem_ptr != null); - // Get element runtime type. - // Priority: return_rt_var (from call site), then extract from list's rt_var, - // finally fall back to fresh type. + // Get element runtime type from the list's attached type. + // Priority: extract from list's concrete type first, as it has actual type info. + // Only fall back to return_rt_var if it's concrete and list type is polymorphic. const elem_rt_var: types.Var = blk: { - // First try return_rt_var (the declared return type from call site) - if (return_rt_var) |rv| { - break :blk rv; - } - // Try extracting from the list's attached type + // First try extracting from the list's attached type - this has concrete type info const list_resolved = self.runtime_types.resolveVar(list_arg.rt_var); if (list_resolved.desc.content == .structure) { if (list_resolved.desc.content.structure == .nominal_type) { const nom = list_resolved.desc.content.structure.nominal_type; const vars = self.runtime_types.sliceVars(nom.vars.nonempty); - // vars[0] is backing var, vars[1] is element type - if (vars.len >= 2) { - break :blk vars[1]; + // For List(elem), vars[0] is backing, vars[1] is element type + if (vars.len == 2) { + const elem_var = vars[1]; + // Follow aliases to check if underlying type is concrete + var elem_resolved = self.runtime_types.resolveVar(elem_var); + var unwrap_count: u32 = 0; + while (elem_resolved.desc.content == .alias and unwrap_count < 100) : (unwrap_count += 1) { + const backing = self.runtime_types.getAliasBackingVar(elem_resolved.desc.content.alias); + elem_resolved = self.runtime_types.resolveVar(backing); + } + // If element type is concrete (structure or alias to structure), create a fresh copy + // to avoid corruption from later unifications during equality checking + if (elem_resolved.desc.content == .structure) { + const fresh_var = try self.runtime_types.freshFromContent(elem_resolved.desc.content); + break :blk fresh_var; + } + // If element type got corrupted (content is .err), skip to fallbacks + // instead of using the corrupted type + if (elem_resolved.desc.content != .err) { + // If element type is a flex var, try flex_type_context for mapped type + if (elem_resolved.desc.content == .flex and self.flex_type_context.count() > 0) { + var it = self.flex_type_context.iterator(); + while (it.next()) |entry| { + const mapped_var = entry.value_ptr.*; + const mapped_resolved = self.runtime_types.resolveVar(mapped_var); + if (mapped_resolved.desc.content == .structure) { + const fresh_var = try self.runtime_types.freshFromContent(mapped_resolved.desc.content); + break :blk fresh_var; + } + } + } + // Element type is not concrete but we have it from the list + // Still create a fresh copy to avoid corruption + const fresh_var = try self.runtime_types.freshFromContent(elem_resolved.desc.content); + break :blk fresh_var; + } + // Element type is corrupted (.err) - fall through to other fallbacks } } } - break :blk try self.runtime_types.fresh(); + // List came from polymorphic context - try return_rt_var if it's concrete + if (return_rt_var) |rv| { + var rv_resolved = self.runtime_types.resolveVar(rv); + var unwrap_count: u32 = 0; + while (rv_resolved.desc.content == .alias and unwrap_count < 100) : (unwrap_count += 1) { + const backing = self.runtime_types.getAliasBackingVar(rv_resolved.desc.content.alias); + rv_resolved = self.runtime_types.resolveVar(backing); + } + if (rv_resolved.desc.content == .structure) { + break :blk rv; + } + } + // Check flex_type_context for concrete type + if ((list_resolved.desc.content == .flex or list_resolved.desc.content == .rigid) and + self.flex_type_context.count() > 0) + { + var it = self.flex_type_context.iterator(); + while (it.next()) |entry| { + const mapped_var = entry.value_ptr.*; + const mapped_resolved = self.runtime_types.resolveVar(mapped_var); + if (mapped_resolved.desc.content == .structure and + mapped_resolved.desc.content.structure == .nominal_type) + { + const nom = mapped_resolved.desc.content.structure.nominal_type; + const vars = self.runtime_types.sliceVars(nom.vars.nonempty); + if (vars.len == 2) { + break :blk vars[1]; + } + } + } + } + // Final fallback: create type from layout (handles corrupted types) + break :blk try self.createTypeFromLayout(elem_layout); }; // Create StackValue pointing to the element @@ -7458,6 +7520,107 @@ pub const Interpreter = struct { return try self.runtime_types.freshFromContent(list_content); } + /// Create List(element_type) for runtime type propagation. + /// Used when a list's type variable resolved to flex and we need a proper nominal type. + fn createListTypeWithElement(self: *Interpreter, element_rt_var: types.Var) !types.Var { + const origin_module_id = self.root_env.idents.builtin_module; + + // Create Builtin.List type with the given element type + const list_type_name = "Builtin.List"; + const list_type_name_ident = try self.runtime_layout_store.env.insertIdent(base_pkg.Ident.for_text(list_type_name)); + const list_type_ident = types.TypeIdent{ .ident_idx = list_type_name_ident }; + + const empty_tag_union_content = types.Content{ .structure = .empty_tag_union }; + const ext_var = try self.runtime_types.freshFromContent(empty_tag_union_content); + const empty_tag_union = types.TagUnion{ + .tags = types.Tag.SafeMultiList.Range.empty(), + .ext = ext_var, + }; + const list_backing_content = types.Content{ .structure = .{ .tag_union = empty_tag_union } }; + const list_backing_var = try self.runtime_types.freshFromContent(list_backing_content); + + // Create a fresh copy of the element type to avoid corruption from later unifications. + // If we use the original element_rt_var directly, it can be unified with other types + // during evaluation (e.g., during equality checking), corrupting this list type. + const elem_resolved = self.runtime_types.resolveVar(element_rt_var); + const fresh_elem_var = try self.runtime_types.freshFromContent(elem_resolved.desc.content); + + // List has one type argument (element type) + const type_args: [1]types.Var = .{fresh_elem_var}; + const list_content = try self.runtime_types.mkNominal(list_type_ident, list_backing_var, &type_args, origin_module_id, false); + return try self.runtime_types.freshFromContent(list_content); + } + + /// Create a type variable from a layout. Used as a fallback when type info is corrupted. + /// Recursively handles nested types (e.g., List(List(Dec))). + fn createTypeFromLayout(self: *Interpreter, lay: layout.Layout) !types.Var { + return switch (lay.tag) { + .list, .list_of_zst => blk: { + // Get element layout and recursively create element type + const elem_layout = self.runtime_layout_store.getLayout(lay.data.list); + const elem_type = try self.createTypeFromLayout(elem_layout); + // Create List type with element type + break :blk try self.createListTypeWithElement(elem_type); + }, + .scalar => blk: { + const scalar = lay.data.scalar; + switch (scalar.tag) { + .int => { + const type_name = switch (scalar.data.int) { + .i8 => "I8", + .i16 => "I16", + .i32 => "I32", + .i64 => "I64", + .i128 => "I128", + .u8 => "U8", + .u16 => "U16", + .u32 => "U32", + .u64 => "U64", + .u128 => "U128", + }; + const content = try self.mkNumberTypeContentRuntime(type_name); + break :blk try self.runtime_types.freshFromContent(content); + }, + .frac => { + const type_name = switch (scalar.data.frac) { + .dec => "Dec", + .f32 => "F32", + .f64 => "F64", + }; + const content = try self.mkNumberTypeContentRuntime(type_name); + break :blk try self.runtime_types.freshFromContent(content); + }, + .str => { + // Create Str type + const origin_module_id = self.root_env.idents.builtin_module; + const str_type_name = "Builtin.Str"; + const str_type_name_ident = try self.runtime_layout_store.env.insertIdent(base_pkg.Ident.for_text(str_type_name)); + const str_type_ident = types.TypeIdent{ .ident_idx = str_type_name_ident }; + const empty_tag_union_content = types.Content{ .structure = .empty_tag_union }; + const ext_var = try self.runtime_types.freshFromContent(empty_tag_union_content); + const empty_tag_union = types.TagUnion{ + .tags = types.Tag.SafeMultiList.Range.empty(), + .ext = ext_var, + }; + const str_backing_content = types.Content{ .structure = .{ .tag_union = empty_tag_union } }; + const str_backing_var = try self.runtime_types.freshFromContent(str_backing_content); + const no_type_args: []const types.Var = &.{}; + const str_content = try self.runtime_types.mkNominal(str_type_ident, str_backing_var, no_type_args, origin_module_id, false); + break :blk try self.runtime_types.freshFromContent(str_content); + }, + else => { + // Default to fresh var for unknown scalar types + break :blk try self.runtime_types.fresh(); + }, + } + }, + else => { + // For other layouts, create a fresh var (fallback) + return try self.runtime_types.fresh(); + }, + }; + } + /// Create nominal number type content for runtime types (e.g., Dec, I64, F64) fn mkNumberTypeContentRuntime(self: *Interpreter, type_name: []const u8) !types.Content { // Use root_env.idents for consistent module reference @@ -10619,6 +10782,7 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(expr_idx); break :blk try self.translateTypeVar(self.env, ct_var); }; + var layout_val = try self.getRuntimeLayout(layout_rt_var); // If the layout isn't a numeric type (e.g., ZST from unconstrained flex/rigid), @@ -10663,6 +10827,40 @@ pub const Interpreter = struct { else => return error.TypeMismatch, } value.is_initialized = true; + + // If the rt_var is still flex but we evaluated to a numeric type, + // update the rt_var to a concrete numeric type for method dispatch. + // This is needed because getRuntimeLayout defaults flex vars to Dec layout + // but doesn't update the rt_var itself. + const rt_resolved = self.runtime_types.resolveVar(value.rt_var); + if (rt_resolved.desc.content == .flex) { + // Create concrete type based on the layout we used + const concrete_rt_var = switch (layout_val.tag) { + .scalar => switch (layout_val.data.scalar.tag) { + .int => switch (layout_val.data.scalar.data.int) { + .i8 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("I8")), + .i16 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("I16")), + .i32 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("I32")), + .i64 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("I64")), + .i128 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("I128")), + .u8 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("U8")), + .u16 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("U16")), + .u32 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("U32")), + .u64 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("U64")), + .u128 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("U128")), + }, + .frac => switch (layout_val.data.scalar.data.frac) { + .f32 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("F32")), + .f64 => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("F64")), + .dec => try self.runtime_types.freshFromContent(try self.mkNumberTypeContentRuntime("Dec")), + }, + else => value.rt_var, + }, + else => value.rt_var, + }; + value.rt_var = concrete_rt_var; + } + return value; } @@ -12064,9 +12262,19 @@ pub const Interpreter = struct { .elem_rt_var = lc.elem_rt_var, .list_rt_var = lc.list_rt_var, } } }); + // Only pass expected_rt_var if it's concrete (not flex/rigid). + // This ensures nested lists compute their own concrete types + // instead of inheriting a polymorphic type from the outer list. + const elem_expected_rt_var: ?types.Var = blk: { + const elem_resolved = self.runtime_types.resolveVar(lc.elem_rt_var); + if (elem_resolved.desc.content == .flex or elem_resolved.desc.content == .rigid) { + break :blk null; + } + break :blk lc.elem_rt_var; + }; try work_stack.push(.{ .eval_expr = .{ .expr_idx = lc.remaining_elems[0], - .expected_rt_var = lc.elem_rt_var, + .expected_rt_var = elem_expected_rt_var, } }); } else { // All elements evaluated - finalize the list @@ -12143,9 +12351,21 @@ pub const Interpreter = struct { val.decref(&self.runtime_layout_store, roc_ops); } - // Set the runtime type variable so method dispatch works correctly + // Set the runtime type variable so method dispatch works correctly. + // Always use the actual element's rt_var to construct the list type, + // since it reflects the concrete types from evaluation. + var final_list_rt_var = lc.list_rt_var; + const first_elem_rt_resolved = self.runtime_types.resolveVar(values[0].rt_var); + + // If actual element has a concrete type (not flex), create a new List type + // with the concrete element type. Always use createListTypeWithElement to + // ensure fresh backing vars are created (reusing backing vars causes corruption). + if (first_elem_rt_resolved.desc.content != .flex) { + final_list_rt_var = try self.createListTypeWithElement(values[0].rt_var); + } + var result = dest; - result.rt_var = lc.list_rt_var; + result.rt_var = final_list_rt_var; try value_stack.push(result); } } diff --git a/src/eval/test/eval_test.zig b/src/eval/test/eval_test.zig index f39e8f511d..c23d157558 100644 --- a/src/eval/test/eval_test.zig +++ b/src/eval/test/eval_test.zig @@ -1366,9 +1366,7 @@ test "list equality - single element list - regression" { test "list equality - nested lists - regression" { // Regression test for segfault when comparing nested lists // Bug report: `_bool = [[1],[2]] == [[1],[2]]` - // TODO: Fix element type extraction in list_get_unsafe for nested lists with ranges branch - return error.SkipZigTest; - // try runExpectBool("[[1],[2]] == [[1],[2]]", true, .no_trace); + try runExpectBool("[[1],[2]] == [[1],[2]]", true, .no_trace); } test "if block with local bindings - regression" { From fc7c9410998148b4332f81b8cf23c350658263db Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 08:02:45 -0500 Subject: [PATCH 17/64] Fix Windows --- src/cli/main.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cli/main.zig b/src/cli/main.zig index 077f3e7661..78ec396b33 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -342,7 +342,7 @@ fn createHardlink(allocs: *Allocators, source: []const u8, dest: []const u8) !vo lpFileName: [*:0]const u16, lpExistingFileName: [*:0]const u16, lpSecurityAttributes: ?*anyopaque, - ) callconv(std.os.windows.WINAPI) std.os.windows.BOOL; + ) callconv(.winapi) std.os.windows.BOOL; }; if (kernel32.CreateHardLinkW(dest_w, source_w, null) == 0) { From 0a68d2a6b585b23016173303a6fb6c425d936925 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 09:34:06 -0500 Subject: [PATCH 18/64] Fix IterationGuard test in release mode --- src/types/debug.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/types/debug.zig b/src/types/debug.zig index c3d064d364..7456e28574 100644 --- a/src/types/debug.zig +++ b/src/types/debug.zig @@ -63,5 +63,8 @@ test "IterationGuard does not panic for normal iteration counts" { while (i < 1000) : (i += 1) { guard.tick(); } - try std.testing.expectEqual(@as(u32, 1000), guard.getCount()); + // In release builds, tick() is a no-op so count stays at 0. + // In debug builds, count should be 1000. + const expected: u32 = if (builtin.mode == .Debug) 1000 else 0; + try std.testing.expectEqual(expected, guard.getCount()); } From ac69c1b01e6b64742f4c1d833b68ea53f413ea12 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 10:15:05 -0500 Subject: [PATCH 19/64] Revise generalization docs and add a test --- src/types/generalize.zig | 8 +++-- .../numeric_let_generalize_in_block.md | 33 +++++++++++++++++++ .../repl/numeric_multiple_diff_types.md | 14 ++++++++ test/snapshots/repl/numeric_sum_to_str.md | 8 +++++ 4 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 test/snapshots/numeric_let_generalize_in_block.md diff --git a/src/types/generalize.zig b/src/types/generalize.zig index ff4609110d..830eca732e 100644 --- a/src/types/generalize.zig +++ b/src/types/generalize.zig @@ -212,9 +212,11 @@ pub const Generalizer = struct { // Without this, let-generalization would create a fresh copy at each use, // leaving the original as an unconstrained flex var that defaults to Dec. // - // However, inside lambdas (rank > top_level), we DO generalize numeric - // literals so that polymorphic functions like `|a| a + 1` work correctly. - // The numeric literal takes on the type of the function parameter. + // However, at rank > top_level (inside lambdas OR inside nested blocks), + // we DO generalize numeric literals. This allows: + // - Polymorphic functions like `|a| a + 1` to work correctly + // - Numeric literals in blocks like `{ n = 42; use_as_i64(n); use_as_dec(n) }` + // to be used polymorphically within that block's scope. try var_pool.addVarToRank(resolved.var_, resolved.desc.rank); } else { // Rank unchanged - safe to generalize diff --git a/test/snapshots/numeric_let_generalize_in_block.md b/test/snapshots/numeric_let_generalize_in_block.md new file mode 100644 index 0000000000..fb540ad0b4 --- /dev/null +++ b/test/snapshots/numeric_let_generalize_in_block.md @@ -0,0 +1,33 @@ +# META +~~~ini +description=Numeric let-generalization inside nested block (rank > top_level) +type=expr +~~~ + +# NOTES +This test demonstrates that numeric literals inside nested blocks (rank > top_level) +ARE let-generalized, allowing the same numeric variable to be used with different +concrete numeric types within the block. + +This is different from top-level behavior where numeric literals stay monomorphic +so that later usages can constrain them to a specific type. + +The key insight is that rank > top_level can occur in two situations: +1. Inside lambdas (e.g., `|a| a + 1`) +2. Inside nested blocks (e.g., `{ n = 42; ... }`) + +In both cases, numeric literals are generalized. + +# SOURCE +~~~roc +{ + n = 42 + a = I64.to_str(n) + b = Dec.to_str(n) + Str.concat(a, b) +} +~~~ +# EXPECTED +NIL +# PROBLEMS +NIL diff --git a/test/snapshots/repl/numeric_multiple_diff_types.md b/test/snapshots/repl/numeric_multiple_diff_types.md index a41bc4869a..7d287046e8 100644 --- a/test/snapshots/repl/numeric_multiple_diff_types.md +++ b/test/snapshots/repl/numeric_multiple_diff_types.md @@ -3,6 +3,20 @@ description=Numeric without annotation, multiple uses with different types (each use gets fresh type) type=repl ~~~ + +# NOTES +This test demonstrates that in the REPL, a numeric literal assigned without +annotation can be used with different concrete types in subsequent statements. + +Each use of `x` gets a fresh instantiation of the type, allowing it to be +constrained to I64 in one statement and Dec in another. + +This is the expected behavior for polymorphic numeric literals - each use +site gets its own copy of the type variable that can be independently constrained. + +Compare this to the non-REPL test `numeric_let_generalize_in_block.md` which +demonstrates the same polymorphic behavior inside nested blocks. + # SOURCE ~~~roc » x = 42 diff --git a/test/snapshots/repl/numeric_sum_to_str.md b/test/snapshots/repl/numeric_sum_to_str.md index b34b17895a..ed79aeea5d 100644 --- a/test/snapshots/repl/numeric_sum_to_str.md +++ b/test/snapshots/repl/numeric_sum_to_str.md @@ -3,6 +3,14 @@ description=Numeric sum then convert to I16 string type=repl ~~~ + +# NOTES +This test demonstrates numeric operations in the REPL where the final usage +constrains the type to I16. + +The numeric literals `a`, `b`, and `sum` are polymorphic until `I16.to_str(sum)` +constrains the result to I16. + # SOURCE ~~~roc » a = 4 From 5473853fb54efeaf2e56f247c4f61e064d653d6b Mon Sep 17 00:00:00 2001 From: Luke Boswell Date: Fri, 5 Dec 2025 02:30:23 +1100 Subject: [PATCH 20/64] add required package for valgrind --- .github/workflows/ci_zig.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci_zig.yml b/.github/workflows/ci_zig.yml index 5e21453e43..80374a0891 100644 --- a/.github/workflows/ci_zig.yml +++ b/.github/workflows/ci_zig.yml @@ -208,6 +208,8 @@ jobs: # We can re-evaluate as new version of zig/valgrind come out. if: ${{ matrix.os == 'ubuntu-22.04' }} run: | + # Install libc6-dbg which is required for Valgrind's function redirections + sudo apt-get update && sudo apt-get install -y libc6-dbg sudo snap install valgrind --classic valgrind --version ./ci/custom_valgrind.sh ./zig-out/bin/snapshot --debug --verbose From 677ff620077595ba154d5b031f1e82a48059fa9a Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 10:50:42 -0500 Subject: [PATCH 21/64] Ban enumFromInt(0) --- build.zig | 156 +++++++++++++++++ src/canonicalize/Can.zig | 42 ++--- src/canonicalize/NodeStore.zig | 35 ++-- src/canonicalize/test/anno_only_test.zig | 13 +- src/canonicalize/test/import_store_test.zig | 8 +- .../test/import_validation_test.zig | 6 +- src/check/snapshot.zig | 4 +- src/check/test/unify_test.zig | 75 +++++--- src/cli/main.zig | 2 +- src/collections/safe_list.zig | 165 +++++++++++------- src/compile/test/type_printing_bug_test.zig | 3 +- src/eval/comptime_evaluator.zig | 19 +- src/eval/interpreter.zig | 64 ++++--- src/eval/render_helpers.zig | 22 +-- src/parse/NodeStore.zig | 7 +- src/parse/Parser.zig | 2 +- src/types/store.zig | 28 ++- .../numeric_let_generalize_in_block.md | 88 ++++++++-- .../repl/numeric_multiple_diff_types.md | 14 -- test/snapshots/repl/numeric_sum_to_str.md | 8 - 20 files changed, 531 insertions(+), 230 deletions(-) diff --git a/build.zig b/build.zig index b1c8a0850c..2744416fdf 100644 --- a/build.zig +++ b/build.zig @@ -292,6 +292,158 @@ const CheckTypeCheckerPatternsStep = struct { } }; +/// Build step that checks for @enumFromInt(0) usage in all .zig files. +/// +/// We forbid @enumFromInt(0) because it hides bugs and makes them harder to debug. +/// If we need a placeholder value that we believe will never be read, we should +/// use `undefined` instead - that way our intent is clear, and it can fail in a +/// more obvious way if our assumption is incorrect. +const CheckEnumFromIntZeroStep = struct { + step: Step, + + fn create(b: *std.Build) *CheckEnumFromIntZeroStep { + const self = b.allocator.create(CheckEnumFromIntZeroStep) catch @panic("OOM"); + self.* = .{ + .step = Step.init(.{ + .id = Step.Id.custom, + .name = "check-enum-from-int-zero", + .owner = b, + .makeFn = make, + }), + }; + return self; + } + + fn make(step: *Step, options: Step.MakeOptions) !void { + _ = options; + const b = step.owner; + const allocator = b.allocator; + + var violations = std.ArrayList(Violation).empty; + defer violations.deinit(allocator); + + // Recursively scan src/ for .zig files + var dir = std.fs.cwd().openDir("src", .{ .iterate = true }) catch |err| { + return step.fail("Failed to open src directory: {}", .{err}); + }; + defer dir.close(); + + try scanDirectoryForEnumFromIntZero(allocator, dir, "src", &violations); + + if (violations.items.len > 0) { + std.debug.print("\n", .{}); + std.debug.print("=" ** 80 ++ "\n", .{}); + std.debug.print("FORBIDDEN PATTERN: @enumFromInt(0)\n", .{}); + std.debug.print("=" ** 80 ++ "\n\n", .{}); + + std.debug.print( + \\Using @enumFromInt(0) is forbidden in this codebase. + \\ + \\WHY THIS RULE EXISTS: + \\ @enumFromInt(0) hides bugs and makes them harder to debug. It creates + \\ a "valid-looking" value that can silently propagate through the code + \\ when something goes wrong. + \\ + \\WHAT TO DO INSTEAD: + \\ If you need a placeholder value that you believe will never be read, + \\ use `undefined` instead. This makes your intent clear, and if your + \\ assumption is wrong and the value IS read, it will fail more obviously. + \\ + \\ When using `undefined`, add a comment explaining why it's correct there + \\ (e.g., where it will be overwritten before being read). + \\ + \\ Example - WRONG: + \\ .anno = @enumFromInt(0), // placeholder - will be replaced + \\ + \\ Example - RIGHT: + \\ .anno = undefined, // overwritten in Phase 1.7 before use + \\ + \\VIOLATIONS FOUND: + \\ + , .{}); + + for (violations.items) |violation| { + std.debug.print(" {s}:{d}: {s}\n", .{ + violation.file_path, + violation.line_number, + violation.line_content, + }); + } + + std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{}); + + return step.fail( + "Found {d} uses of @enumFromInt(0). Use `undefined` instead with a comment explaining why. " ++ + "See above for details.", + .{violations.items.len}, + ); + } + } + + const Violation = struct { + file_path: []const u8, + line_number: usize, + line_content: []const u8, + }; + + fn scanDirectoryForEnumFromIntZero( + allocator: std.mem.Allocator, + dir: std.fs.Dir, + path_prefix: []const u8, + violations: *std.ArrayList(Violation), + ) !void { + var walker = try dir.walk(allocator); + defer walker.deinit(); + + while (try walker.next()) |entry| { + if (entry.kind != .file) continue; + if (!std.mem.endsWith(u8, entry.path, ".zig")) continue; + + // Skip test files - they may legitimately need @enumFromInt(0) for test indices + if (std.mem.endsWith(u8, entry.path, "_test.zig")) continue; + if (std.mem.indexOf(u8, entry.path, "test/") != null) continue; + if (std.mem.startsWith(u8, entry.path, "test")) continue; + + const full_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ path_prefix, entry.path }); + + const file = dir.openFile(entry.path, .{}) catch continue; + defer file.close(); + + const content = file.readToEndAlloc(allocator, 10 * 1024 * 1024) catch continue; + defer allocator.free(content); + + var line_number: usize = 1; + var line_start: usize = 0; + + for (content, 0..) |char, i| { + if (char == '\n') { + const line = content[line_start..i]; + + const trimmed = std.mem.trim(u8, line, " \t"); + // Skip comments + if (std.mem.startsWith(u8, trimmed, "//")) { + line_number += 1; + line_start = i + 1; + continue; + } + + // Check for @enumFromInt(0) usage + if (std.mem.indexOf(u8, line, "@enumFromInt(0)") != null) { + try violations.append(allocator, .{ + .file_path = full_path, + .line_number = line_number, + .line_content = try allocator.dupe(u8, trimmed), + }); + } + + line_number += 1; + line_start = i + 1; + } + } + } + } +}; + fn checkFxPlatformTestCoverage(step: *Step) !void { const b = step.owner; std.debug.print("---- checking fx platform test coverage ----\n", .{}); @@ -1272,6 +1424,10 @@ pub fn build(b: *std.Build) void { const check_patterns = CheckTypeCheckerPatternsStep.create(b); test_step.dependOn(&check_patterns.step); + // Add check for @enumFromInt(0) usage + const check_enum_from_int = CheckEnumFromIntZeroStep.create(b); + test_step.dependOn(&check_enum_from_int.step); + test_step.dependOn(&tests_summary.step); b.default_step.dependOn(playground_step); diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index d5054e399e..f9c46e09df 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -483,13 +483,13 @@ fn processTypeDeclFirstPass( .alias => Statement{ .s_alias_decl = .{ .header = final_header_idx, - .anno = @enumFromInt(0), // placeholder - will be replaced below + .anno = undefined, // overwritten below before use }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = final_header_idx, - .anno = @enumFromInt(0), // placeholder - will be replaced below + .anno = undefined, // overwritten below before use .is_opaque = type_decl.kind == .@"opaque", }, }, @@ -503,13 +503,13 @@ fn processTypeDeclFirstPass( .alias => Statement{ .s_alias_decl = .{ .header = final_header_idx, - .anno = @enumFromInt(0), // placeholder - will be replaced + .anno = undefined, // overwritten below before use }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = final_header_idx, - .anno = @enumFromInt(0), // placeholder - will be replaced + .anno = undefined, // overwritten below before use .is_opaque = type_decl.kind == .@"opaque", }, }, @@ -636,13 +636,13 @@ fn introduceTypeNameOnly( .alias => Statement{ .s_alias_decl = .{ .header = header_idx, - .anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7 + .anno = undefined, // overwritten in Phase 1.7 before use }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = header_idx, - .anno = @enumFromInt(0), // placeholder - will be updated in Phase 1.7 + .anno = undefined, // overwritten in Phase 1.7 before use .is_opaque = type_decl.kind == .@"opaque", }, }, @@ -2598,8 +2598,8 @@ fn addToExposedScope( // Add to exposed_items for permanent storage (unconditionally) try self.env.addExposedById(ident_idx); - // Use a dummy pattern index - we just need to track that it's exposed - const dummy_idx = @as(Pattern.Idx, @enumFromInt(0)); + // Use undefined pattern index - we just need to track that the ident is exposed + const dummy_idx: Pattern.Idx = undefined; try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx); } @@ -2631,8 +2631,8 @@ fn addToExposedScope( // Don't add types to exposed_items - types are not values // Only add to type_bindings for type resolution - // Use a dummy statement index - we just need to track that it's exposed - const dummy_idx = @as(Statement.Idx, @enumFromInt(0)); + // Use undefined statement index - we just need to track that the type is exposed + const dummy_idx: Statement.Idx = undefined; try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx }); } @@ -2664,8 +2664,8 @@ fn addToExposedScope( // Don't add types to exposed_items - types are not values // Only add to type_bindings for type resolution - // Use a dummy statement index - we just need to track that it's exposed - const dummy_idx = @as(Statement.Idx, @enumFromInt(0)); + // Use undefined statement index - we just need to track that the type is exposed + const dummy_idx: Statement.Idx = undefined; try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx }); } @@ -2715,8 +2715,8 @@ fn addPlatformProvidesItems( // Add to exposed_items for permanent storage try self.env.addExposedById(ident_idx); - // Add to exposed_scope so it becomes an export - const dummy_idx = @as(Pattern.Idx, @enumFromInt(0)); + // Add to exposed_scope so it becomes an export - undefined since index isn't read + const dummy_idx: Pattern.Idx = undefined; try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx); // Also track in exposed_ident_texts @@ -5171,7 +5171,7 @@ pub fn canonicalizeExpr( .patterns = ok_branch_pat_span, .value = ok_lookup_idx, .guard = null, - .redundant = @enumFromInt(0), + .redundant = undefined, // set during type checking }, region, ); @@ -5245,7 +5245,7 @@ pub fn canonicalizeExpr( .patterns = err_branch_pat_span, .value = return_expr_idx, .guard = null, - .redundant = @enumFromInt(0), + .redundant = undefined, // set during type checking }, region, ); @@ -5259,7 +5259,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = @enumFromInt(0), // Will be set during type checking + .exhaustive = undefined, // set during type checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -5567,7 +5567,7 @@ pub fn canonicalizeExpr( .patterns = branch_pat_span, .value = value_idx, .guard = null, - .redundant = @enumFromInt(0), // TODO + .redundant = undefined, // set during type checking }, region, ); @@ -5587,7 +5587,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = @enumFromInt(0), // Will be set during type checking + .exhaustive = undefined, // set during type checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -7596,8 +7596,8 @@ fn processCollectedTypeVars(self: *Self) std.mem.Allocator.Error!void { // Collect problems for this type variable const is_single_use = !found_another; - // Use a dummy AST annotation index since we don't have the context - try collectTypeVarProblems(first_ident, is_single_use, @enumFromInt(0), &self.scratch_type_var_problems); + // Use undefined AST annotation index since we don't have the context here + try collectTypeVarProblems(first_ident, is_single_use, undefined, &self.scratch_type_var_problems); } // Report any problems we found diff --git a/src/canonicalize/NodeStore.zig b/src/canonicalize/NodeStore.zig index 4bf4977844..4ef4f9e933 100644 --- a/src/canonicalize/NodeStore.zig +++ b/src/canonicalize/NodeStore.zig @@ -674,9 +674,11 @@ pub fn getExpr(store: *const NodeStore, expr: CIR.Expr.Idx) CIR.Expr { .expr_suffix_single_question, .expr_record_builder, => { - return CIR.Expr{ .e_runtime_error = .{ - .diagnostic = @enumFromInt(0), - } }; + return CIR.Expr{ + .e_runtime_error = .{ + .diagnostic = undefined, // deserialized runtime errors don't preserve diagnostics + }, + }; }, .expr_ellipsis => { return CIR.Expr{ .e_ellipsis = .{} }; @@ -1512,7 +1514,7 @@ pub fn addExpr(store: *NodeStore, expr: CIR.Expr, region: base.Region) Allocator .data_1 = 0, .data_2 = 0, .data_3 = 0, - .tag = @enumFromInt(0), + .tag = undefined, // set below in switch }; switch (expr) { @@ -2144,7 +2146,7 @@ pub fn addPatternRecordField(store: *NodeStore, patternRecordField: CIR.PatternR _ = store; _ = patternRecordField; - return @enumFromInt(0); + @panic("TODO: addPatternRecordField not implemented"); } /// Adds a type annotation to the store. @@ -2156,7 +2158,7 @@ pub fn addTypeAnno(store: *NodeStore, typeAnno: CIR.TypeAnno, region: base.Regio .data_1 = 0, .data_2 = 0, .data_3 = 0, - .tag = @enumFromInt(0), + .tag = undefined, // set below in switch }; switch (typeAnno) { @@ -2861,7 +2863,7 @@ pub fn addDiagnostic(store: *NodeStore, reason: CIR.Diagnostic) Allocator.Error! .data_1 = 0, .data_2 = 0, .data_3 = 0, - .tag = @enumFromInt(0), + .tag = undefined, // set below in switch }; var region = base.Region.zero(); @@ -3742,7 +3744,9 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify nodes try testing.expectEqual(@as(usize, 1), deserialized.nodes.len()); - const retrieved_node = deserialized.nodes.get(@enumFromInt(0)); + // Named constant for the first node index in the deserialized data + const first_node_idx: Node.Idx = @enumFromInt(0); + const retrieved_node = deserialized.nodes.get(first_node_idx); try testing.expectEqual(Node.Tag.expr_int, retrieved_node.tag); try testing.expectEqual(@as(u32, 0), retrieved_node.data_1); @@ -3755,7 +3759,9 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify regions try testing.expectEqual(@as(usize, 1), deserialized.regions.len()); - const retrieved_region = deserialized.regions.get(@enumFromInt(0)); + // Named constant for the first region index in the deserialized data + const first_region_idx: Region.Idx = @enumFromInt(0); + const retrieved_region = deserialized.regions.get(first_region_idx); try testing.expectEqual(region.start.offset, retrieved_region.start.offset); try testing.expectEqual(region.end.offset, retrieved_region.end.offset); } @@ -3845,19 +3851,24 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { // Verify nodes try testing.expectEqual(@as(usize, 3), deserialized.nodes.len()); + // Named constants for accessing deserialized nodes at specific indices + const first_node_idx: Node.Idx = @enumFromInt(0); + const second_node_idx: Node.Idx = @enumFromInt(1); + const third_node_idx: Node.Idx = @enumFromInt(2); + // Verify var node - const retrieved_var = deserialized.nodes.get(@enumFromInt(0)); + const retrieved_var = deserialized.nodes.get(first_node_idx); try testing.expectEqual(Node.Tag.expr_var, retrieved_var.tag); try testing.expectEqual(@as(u32, 5), retrieved_var.data_1); // Verify list node - const retrieved_list = deserialized.nodes.get(@enumFromInt(1)); + const retrieved_list = deserialized.nodes.get(second_node_idx); try testing.expectEqual(Node.Tag.expr_list, retrieved_list.tag); try testing.expectEqual(@as(u32, 10), retrieved_list.data_1); try testing.expectEqual(@as(u32, 3), retrieved_list.data_2); // Verify float node and extra data - const retrieved_float = deserialized.nodes.get(@enumFromInt(2)); + const retrieved_float = deserialized.nodes.get(third_node_idx); try testing.expectEqual(Node.Tag.expr_frac_f64, retrieved_float.tag); const retrieved_float_u32s = deserialized.extra_data.items.items[0..2]; const retrieved_float_u64: u64 = @bitCast(retrieved_float_u32s.*); diff --git a/src/canonicalize/test/anno_only_test.zig b/src/canonicalize/test/anno_only_test.zig index 31824348c1..8ad29d3252 100644 --- a/src/canonicalize/test/anno_only_test.zig +++ b/src/canonicalize/test/anno_only_test.zig @@ -23,14 +23,15 @@ test "e_anno_only can be used in statements" { // used as part of s_decl statements, which is how standalone // type annotations are represented after canonicalization. - const pattern_idx: CIR.Pattern.Idx = @enumFromInt(0); - const expr_idx: CIR.Expr.Idx = @enumFromInt(0); - const anno_idx: CIR.Annotation.Idx = @enumFromInt(0); + // Use named constants to make the intent clear - these represent the first indices + const first_pattern_idx: CIR.Pattern.Idx = @enumFromInt(0); + const first_expr_idx: CIR.Expr.Idx = @enumFromInt(0); + const first_anno_idx: CIR.Annotation.Idx = @enumFromInt(0); const stmt = CIR.Statement{ .s_decl = .{ - .pattern = pattern_idx, - .expr = expr_idx, - .anno = anno_idx, + .pattern = first_pattern_idx, + .expr = first_expr_idx, + .anno = first_anno_idx, } }; // Verify the statement was created correctly diff --git a/src/canonicalize/test/import_store_test.zig b/src/canonicalize/test/import_store_test.zig index 3a68998473..6360257dfa 100644 --- a/src/canonicalize/test/import_store_test.zig +++ b/src/canonicalize/test/import_store_test.zig @@ -215,8 +215,12 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { const str_idx_0 = deserialized.imports.items.items[0]; const str_idx_1 = deserialized.imports.items.items[1]; + // Named constants for first and second import indices + const first_import_idx: Import.Idx = @enumFromInt(0); + const second_import_idx: Import.Idx = @enumFromInt(1); + try testing.expect(deserialized.map.contains(str_idx_0)); try testing.expect(deserialized.map.contains(str_idx_1)); - try testing.expectEqual(@as(Import.Idx, @enumFromInt(0)), deserialized.map.get(str_idx_0).?); - try testing.expectEqual(@as(Import.Idx, @enumFromInt(1)), deserialized.map.get(str_idx_1).?); + try testing.expectEqual(first_import_idx, deserialized.map.get(str_idx_0).?); + try testing.expectEqual(second_import_idx, deserialized.map.get(str_idx_1).?); } diff --git a/src/canonicalize/test/import_validation_test.zig b/src/canonicalize/test/import_validation_test.zig index 49c3c8e626..d39b9c4f6d 100644 --- a/src/canonicalize/test/import_validation_test.zig +++ b/src/canonicalize/test/import_validation_test.zig @@ -359,10 +359,10 @@ test "Import.Idx is u32" { const back_to_u32 = @intFromEnum(import_idx); try testing.expectEqual(test_idx, back_to_u32); // Test that we can create valid Import.Idx values - const idx1: CIR.Import.Idx = @enumFromInt(0); - const idx2: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value + const first_import_idx: CIR.Import.Idx = @enumFromInt(0); + const max_import_idx: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value // Verify they are distinct - try testing.expect(idx1 != idx2); + try testing.expect(first_import_idx != max_import_idx); // Verify the size in memory try testing.expectEqual(@sizeOf(u32), @sizeOf(CIR.Import.Idx)); } diff --git a/src/check/snapshot.zig b/src/check/snapshot.zig index 24870a7a2c..0cc5054244 100644 --- a/src/check/snapshot.zig +++ b/src/check/snapshot.zig @@ -323,8 +323,8 @@ pub const Store = struct { return SnapshotStaticDispatchConstraint{ .fn_name = constraint.fn_name, .fn_content = try self.deepCopyVarInternal(store, type_writer, constraint.fn_var), - // Dispatcher will be set when collecting constraints during write - .dispatcher = @enumFromInt(0), + // Dispatcher is set when collecting constraints during write + .dispatcher = undefined, }; } diff --git a/src/check/test/unify_test.zig b/src/check/test/unify_test.zig index c171f70960..8a34280787 100644 --- a/src/check/test/unify_test.zig +++ b/src/check/test/unify_test.zig @@ -790,8 +790,10 @@ test "partitionFields - same record" { var env = try TestEnv.init(gpa); defer env.deinit(); - const field_x = try env.mkRecordField("field_x", @enumFromInt(0)); - const field_y = try env.mkRecordField("field_y", @enumFromInt(1)); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const field_x = try env.mkRecordField("field_x", placeholder_var_0); + const field_y = try env.mkRecordField("field_y", placeholder_var_1); const range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ field_x, field_y }); @@ -813,9 +815,12 @@ test "partitionFields - disjoint fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const a1 = try env.mkRecordField("a1", @enumFromInt(0)); - const a2 = try env.mkRecordField("a2", @enumFromInt(1)); - const b1 = try env.mkRecordField("b1", @enumFromInt(2)); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const a1 = try env.mkRecordField("a1", placeholder_var_0); + const a2 = try env.mkRecordField("a2", placeholder_var_1); + const b1 = try env.mkRecordField("b1", placeholder_var_2); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, a2 }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{b1}); @@ -839,9 +844,12 @@ test "partitionFields - overlapping fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const a1 = try env.mkRecordField("a1", @enumFromInt(0)); - const both = try env.mkRecordField("both", @enumFromInt(1)); - const b1 = try env.mkRecordField("b1", @enumFromInt(2)); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const a1 = try env.mkRecordField("a1", placeholder_var_0); + const both = try env.mkRecordField("both", placeholder_var_1); + const b1 = try env.mkRecordField("b1", placeholder_var_2); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, both }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ b1, both }); @@ -868,9 +876,12 @@ test "partitionFields - reordering is normalized" { var env = try TestEnv.init(gpa); defer env.deinit(); - const f1 = try env.mkRecordField("f1", @enumFromInt(0)); - const f2 = try env.mkRecordField("f2", @enumFromInt(1)); - const f3 = try env.mkRecordField("f3", @enumFromInt(2)); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const f1 = try env.mkRecordField("f1", placeholder_var_0); + const f2 = try env.mkRecordField("f2", placeholder_var_1); + const f3 = try env.mkRecordField("f3", placeholder_var_2); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f3, f1, f2 }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f1, f2, f3 }); @@ -1027,8 +1038,10 @@ test "partitionTags - same tags" { var env = try TestEnv.init(gpa); defer env.deinit(); - const tag_x = try env.mkTag("X", &[_]Var{@enumFromInt(0)}); - const tag_y = try env.mkTag("Y", &[_]Var{@enumFromInt(1)}); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const tag_x = try env.mkTag("X", &[_]Var{placeholder_var_0}); + const tag_y = try env.mkTag("Y", &[_]Var{placeholder_var_1}); const range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ tag_x, tag_y }); @@ -1050,9 +1063,12 @@ test "partitionTags - disjoint fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const a1 = try env.mkTag("A1", &[_]Var{@enumFromInt(0)}); - const a2 = try env.mkTag("A2", &[_]Var{@enumFromInt(1)}); - const b1 = try env.mkTag("B1", &[_]Var{@enumFromInt(2)}); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const a1 = try env.mkTag("A1", &[_]Var{placeholder_var_0}); + const a2 = try env.mkTag("A2", &[_]Var{placeholder_var_1}); + const b1 = try env.mkTag("B1", &[_]Var{placeholder_var_2}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, a2 }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{b1}); @@ -1076,9 +1092,12 @@ test "partitionTags - overlapping tags" { var env = try TestEnv.init(gpa); defer env.deinit(); - const a1 = try env.mkTag("A", &[_]Var{@enumFromInt(0)}); - const both = try env.mkTag("Both", &[_]Var{@enumFromInt(1)}); - const b1 = try env.mkTag("B", &[_]Var{@enumFromInt(2)}); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const a1 = try env.mkTag("A", &[_]Var{placeholder_var_0}); + const both = try env.mkTag("Both", &[_]Var{placeholder_var_1}); + const b1 = try env.mkTag("B", &[_]Var{placeholder_var_2}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, both }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ b1, both }); @@ -1105,9 +1124,12 @@ test "partitionTags - reordering is normalized" { var env = try TestEnv.init(gpa); defer env.deinit(); - const f1 = try env.mkTag("F1", &[_]Var{@enumFromInt(0)}); - const f2 = try env.mkTag("F2", &[_]Var{@enumFromInt(1)}); - const f3 = try env.mkTag("F3", &[_]Var{@enumFromInt(2)}); + const placeholder_var_0: Var = @enumFromInt(0); + const placeholder_var_1: Var = @enumFromInt(1); + const placeholder_var_2: Var = @enumFromInt(2); + const f1 = try env.mkTag("F1", &[_]Var{placeholder_var_0}); + const f2 = try env.mkTag("F2", &[_]Var{placeholder_var_1}); + const f3 = try env.mkTag("F3", &[_]Var{placeholder_var_2}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f3, f1, f2 }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f1, f2, f3 }); @@ -1487,7 +1509,8 @@ test "unify - flex with constraints vs structure captures deferred check" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*; + const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); + const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; try std.testing.expectEqual( env.module_env.types.resolveVar(structure_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1522,7 +1545,8 @@ test "unify - structure vs flex with constraints captures deferred check (revers // Check that constraint was captured (note: vars might be swapped due to merge order) try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*; + const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); + const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; try std.testing.expectEqual( env.module_env.types.resolveVar(flex_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1575,7 +1599,8 @@ test "unify - flex vs nominal type captures constraint" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(@enumFromInt(0)).*; + const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); + const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; try std.testing.expectEqual( env.module_env.types.resolveVar(nominal_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, diff --git a/src/cli/main.zig b/src/cli/main.zig index 78ec396b33..cfc66282e1 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -1747,7 +1747,7 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(qualified_type_name)); const auto_type = Can.AutoImportedType{ .env = platform_env, - .statement_idx = @enumFromInt(0), // Non-null triggers qualified name building + .statement_idx = undefined, // non-null triggers qualified name building; actual index isn't read .qualified_type_ident = type_qualified_ident, }; diff --git a/src/collections/safe_list.zig b/src/collections/safe_list.zig index 8dd6abbf1a..2d307629e9 100644 --- a/src/collections/safe_list.zig +++ b/src/collections/safe_list.zig @@ -23,7 +23,7 @@ pub fn SafeRange(comptime Idx: type) type { /// An empty range pub fn empty() Self { - return .{ .start = @enumFromInt(0), .count = 0 }; + return .{ .start = undefined, .count = 0 }; } // Drop first elem from the span, if possible @@ -365,10 +365,11 @@ pub fn SafeList(comptime T: type) type { /// Iterate over all items in this list. pub fn iter(self: *const SafeList(T)) Iterator { + const first_idx: Idx = @enumFromInt(0); return Iterator{ .array = self, .len = self.len(), - .current = @enumFromInt(0), + .current = first_idx, }; } }; @@ -961,9 +962,10 @@ test "SafeList edge cases serialization" { const serialized_ptr = @as(*Container.Serialized, @ptrCast(@alignCast(buffer.ptr))); const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); + const first_idx: SafeList(u8).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 0), deserialized.list_u32.len()); try testing.expectEqual(@as(usize, 1), deserialized.list_u8.len()); - try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(@enumFromInt(0)).*); + try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(first_idx).*); } } @@ -1048,11 +1050,15 @@ test "SafeList CompactWriter complete roundtrip example" { const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); // Step 8: Verify data is accessible and correct + const first_idx: SafeList(u32).Idx = @enumFromInt(0); + const second_idx: SafeList(u32).Idx = @enumFromInt(1); + const third_idx: SafeList(u32).Idx = @enumFromInt(2); + const fourth_idx: SafeList(u32).Idx = @enumFromInt(3); try testing.expectEqual(@as(usize, 4), deserialized.len()); - try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).*); - try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).*); - try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).*); + try testing.expectEqual(@as(u32, 100), deserialized.get(first_idx).*); + try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).*); + try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).*); + try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).*); } test "SafeList CompactWriter multiple lists with different alignments" { @@ -1155,10 +1161,13 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 3 * @sizeOf(u8); + const u8_first_idx: SafeList(u8).Idx = @enumFromInt(0); + const u8_second_idx: SafeList(u8).Idx = @enumFromInt(1); + const u8_third_idx: SafeList(u8).Idx = @enumFromInt(2); try testing.expectEqual(@as(usize, 3), deser_u8.len()); - try testing.expectEqual(@as(u8, 10), deser_u8.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u8, 20), deser_u8.get(@enumFromInt(1)).*); - try testing.expectEqual(@as(u8, 30), deser_u8.get(@enumFromInt(2)).*); + try testing.expectEqual(@as(u8, 10), deser_u8.get(u8_first_idx).*); + try testing.expectEqual(@as(u8, 20), deser_u8.get(u8_second_idx).*); + try testing.expectEqual(@as(u8, 30), deser_u8.get(u8_third_idx).*); // 2. Deserialize u16 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized)); @@ -1169,9 +1178,11 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u16)); offset += 2 * @sizeOf(u16); + const u16_first_idx: SafeList(u16).Idx = @enumFromInt(0); + const u16_second_idx: SafeList(u16).Idx = @enumFromInt(1); try testing.expectEqual(@as(usize, 2), deser_u16.len()); - try testing.expectEqual(@as(u16, 1000), deser_u16.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u16, 2000), deser_u16.get(@enumFromInt(1)).*); + try testing.expectEqual(@as(u16, 1000), deser_u16.get(u16_first_idx).*); + try testing.expectEqual(@as(u16, 2000), deser_u16.get(u16_second_idx).*); // 3. Deserialize u32 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized)); @@ -1182,11 +1193,15 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u32)); offset += 4 * @sizeOf(u32); + const u32_first_idx: SafeList(u32).Idx = @enumFromInt(0); + const u32_second_idx: SafeList(u32).Idx = @enumFromInt(1); + const u32_third_idx: SafeList(u32).Idx = @enumFromInt(2); + const u32_fourth_idx: SafeList(u32).Idx = @enumFromInt(3); try testing.expectEqual(@as(usize, 4), deser_u32.len()); - try testing.expectEqual(@as(u32, 100_000), deser_u32.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@enumFromInt(1)).*); - try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@enumFromInt(2)).*); - try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@enumFromInt(3)).*); + try testing.expectEqual(@as(u32, 100_000), deser_u32.get(u32_first_idx).*); + try testing.expectEqual(@as(u32, 200_000), deser_u32.get(u32_second_idx).*); + try testing.expectEqual(@as(u32, 300_000), deser_u32.get(u32_third_idx).*); + try testing.expectEqual(@as(u32, 400_000), deser_u32.get(u32_fourth_idx).*); // 4. Deserialize u64 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized)); @@ -1197,22 +1212,26 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u64)); offset += 2 * @sizeOf(u64); + const u64_first_idx: SafeList(u64).Idx = @enumFromInt(0); + const u64_second_idx: SafeList(u64).Idx = @enumFromInt(1); try testing.expectEqual(@as(usize, 2), deser_u64.len()); - try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@enumFromInt(1)).*); + try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(u64_first_idx).*); + try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(u64_second_idx).*); // 5. Deserialize struct list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(AlignedStruct).Serialized)); const s_struct = @as(*SafeList(AlignedStruct).Serialized, @ptrCast(@alignCast(buffer.ptr + offset))); const deser_struct = s_struct.deserialize(@as(i64, @intCast(base_addr))); + const struct_first_idx: SafeList(AlignedStruct).Idx = @enumFromInt(0); + const struct_second_idx: SafeList(AlignedStruct).Idx = @enumFromInt(1); try testing.expectEqual(@as(usize, 2), deser_struct.len()); - const item0 = deser_struct.get(@enumFromInt(0)); + const item0 = deser_struct.get(struct_first_idx); try testing.expectEqual(@as(u32, 42), item0.x); try testing.expectEqual(@as(u64, 1337), item0.y); try testing.expectEqual(@as(u8, 255), item0.z); - const item1 = deser_struct.get(@enumFromInt(1)); + const item1 = deser_struct.get(struct_second_idx); try testing.expectEqual(@as(u32, 99), item1.x); try testing.expectEqual(@as(u64, 9999), item1.y); try testing.expectEqual(@as(u8, 128), item1.z); @@ -1318,10 +1337,13 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 3; // 3 u8 elements + const d1_first_idx: SafeList(u8).Idx = @enumFromInt(0); + const d1_second_idx: SafeList(u8).Idx = @enumFromInt(1); + const d1_third_idx: SafeList(u8).Idx = @enumFromInt(2); try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 1), d1.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u8, 2), d1.get(@enumFromInt(1)).*); - try testing.expectEqual(@as(u8, 3), d1.get(@enumFromInt(2)).*); + try testing.expectEqual(@as(u8, 1), d1.get(d1_first_idx).*); + try testing.expectEqual(@as(u8, 2), d1.get(d1_second_idx).*); + try testing.expectEqual(@as(u8, 3), d1.get(d1_third_idx).*); // 2. Second list - u64 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized)); @@ -1331,9 +1353,11 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u64)); offset += 2 * @sizeOf(u64); // 2 u64 elements + const d2_first_idx: SafeList(u64).Idx = @enumFromInt(0); + const d2_second_idx: SafeList(u64).Idx = @enumFromInt(1); try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u64, 1_000_000), d2.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u64, 2_000_000), d2.get(@enumFromInt(1)).*); + try testing.expectEqual(@as(u64, 1_000_000), d2.get(d2_first_idx).*); + try testing.expectEqual(@as(u64, 2_000_000), d2.get(d2_second_idx).*); // 3. Third list - u16 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized)); @@ -1343,19 +1367,24 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u16)); offset += 4 * @sizeOf(u16); // 4 u16 elements + const d3_first_idx: SafeList(u16).Idx = @enumFromInt(0); + const d3_second_idx: SafeList(u16).Idx = @enumFromInt(1); + const d3_third_idx: SafeList(u16).Idx = @enumFromInt(2); + const d3_fourth_idx: SafeList(u16).Idx = @enumFromInt(3); try testing.expectEqual(@as(usize, 4), d3.len()); - try testing.expectEqual(@as(u16, 100), d3.get(@enumFromInt(0)).*); - try testing.expectEqual(@as(u16, 200), d3.get(@enumFromInt(1)).*); - try testing.expectEqual(@as(u16, 300), d3.get(@enumFromInt(2)).*); - try testing.expectEqual(@as(u16, 400), d3.get(@enumFromInt(3)).*); + try testing.expectEqual(@as(u16, 100), d3.get(d3_first_idx).*); + try testing.expectEqual(@as(u16, 200), d3.get(d3_second_idx).*); + try testing.expectEqual(@as(u16, 300), d3.get(d3_third_idx).*); + try testing.expectEqual(@as(u16, 400), d3.get(d3_fourth_idx).*); // 4. Fourth list - u32 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized)); const s4 = @as(*SafeList(u32).Serialized, @ptrCast(@alignCast(buffer.ptr + offset))); const d4 = s4.deserialize(@as(i64, @intCast(base))); + const d4_first_idx: SafeList(u32).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 1), d4.len()); - try testing.expectEqual(@as(u32, 42), d4.get(@enumFromInt(0)).*); + try testing.expectEqual(@as(u32, 42), d4.get(d4_first_idx).*); } test "SafeList CompactWriter brute-force alignment verification" { @@ -1475,8 +1504,9 @@ test "SafeList CompactWriter brute-force alignment verification" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 1; // 1 u8 element + const u8_first_idx: SafeList(u8).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 1), d_u8.len()); - try testing.expectEqual(@as(u8, 42), d_u8.get(@enumFromInt(0)).*); + try testing.expectEqual(@as(u8, 42), d_u8.get(u8_first_idx).*); // Second list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(T).Serialized)); @@ -1551,28 +1581,32 @@ test "SafeMultiList CompactWriter roundtrip with file" { const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); // Verify the data + const first_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(0); + const second_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(1); + const third_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(2); + const fourth_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(3); try testing.expectEqual(@as(usize, 4), deserialized.len()); // Verify all the data - try testing.expectEqual(@as(u32, 100), deserialized.get(@enumFromInt(0)).id); - try testing.expectEqual(@as(u64, 1000), deserialized.get(@enumFromInt(0)).value); - try testing.expectEqual(true, deserialized.get(@enumFromInt(0)).flag); - try testing.expectEqual(@as(u8, 10), deserialized.get(@enumFromInt(0)).data); + try testing.expectEqual(@as(u32, 100), deserialized.get(first_idx).id); + try testing.expectEqual(@as(u64, 1000), deserialized.get(first_idx).value); + try testing.expectEqual(true, deserialized.get(first_idx).flag); + try testing.expectEqual(@as(u8, 10), deserialized.get(first_idx).data); - try testing.expectEqual(@as(u32, 200), deserialized.get(@enumFromInt(1)).id); - try testing.expectEqual(@as(u64, 2000), deserialized.get(@enumFromInt(1)).value); - try testing.expectEqual(false, deserialized.get(@enumFromInt(1)).flag); - try testing.expectEqual(@as(u8, 20), deserialized.get(@enumFromInt(1)).data); + try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).id); + try testing.expectEqual(@as(u64, 2000), deserialized.get(second_idx).value); + try testing.expectEqual(false, deserialized.get(second_idx).flag); + try testing.expectEqual(@as(u8, 20), deserialized.get(second_idx).data); - try testing.expectEqual(@as(u32, 300), deserialized.get(@enumFromInt(2)).id); - try testing.expectEqual(@as(u64, 3000), deserialized.get(@enumFromInt(2)).value); - try testing.expectEqual(true, deserialized.get(@enumFromInt(2)).flag); - try testing.expectEqual(@as(u8, 30), deserialized.get(@enumFromInt(2)).data); + try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).id); + try testing.expectEqual(@as(u64, 3000), deserialized.get(third_idx).value); + try testing.expectEqual(true, deserialized.get(third_idx).flag); + try testing.expectEqual(@as(u8, 30), deserialized.get(third_idx).data); - try testing.expectEqual(@as(u32, 400), deserialized.get(@enumFromInt(3)).id); - try testing.expectEqual(@as(u64, 4000), deserialized.get(@enumFromInt(3)).value); - try testing.expectEqual(false, deserialized.get(@enumFromInt(3)).flag); - try testing.expectEqual(@as(u8, 40), deserialized.get(@enumFromInt(3)).data); + try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).id); + try testing.expectEqual(@as(u64, 4000), deserialized.get(fourth_idx).value); + try testing.expectEqual(false, deserialized.get(fourth_idx).flag); + try testing.expectEqual(@as(u8, 40), deserialized.get(fourth_idx).data); } test "SafeMultiList empty list CompactWriter roundtrip" { @@ -1702,30 +1736,35 @@ test "SafeMultiList CompactWriter multiple lists different alignments" { const base = @as(i64, @intCast(@intFromPtr(buffer.ptr))); // Deserialize list1 (at offset1) + const d1_first_idx: SafeMultiList(Type1).Idx = @enumFromInt(0); + const d1_second_idx: SafeMultiList(Type1).Idx = @enumFromInt(1); + const d1_third_idx: SafeMultiList(Type1).Idx = @enumFromInt(2); const d1_serialized = @as(*SafeMultiList(Type1).Serialized, @ptrCast(@alignCast(buffer.ptr + offset1))); const d1 = d1_serialized.deserialize(base); try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 10), d1.get(@enumFromInt(0)).a); - try testing.expectEqual(@as(u16, 100), d1.get(@enumFromInt(0)).b); - try testing.expectEqual(@as(u8, 20), d1.get(@enumFromInt(1)).a); - try testing.expectEqual(@as(u16, 200), d1.get(@enumFromInt(1)).b); - try testing.expectEqual(@as(u8, 30), d1.get(@enumFromInt(2)).a); - try testing.expectEqual(@as(u16, 300), d1.get(@enumFromInt(2)).b); + try testing.expectEqual(@as(u8, 10), d1.get(d1_first_idx).a); + try testing.expectEqual(@as(u16, 100), d1.get(d1_first_idx).b); + try testing.expectEqual(@as(u8, 20), d1.get(d1_second_idx).a); + try testing.expectEqual(@as(u16, 200), d1.get(d1_second_idx).b); + try testing.expectEqual(@as(u8, 30), d1.get(d1_third_idx).a); + try testing.expectEqual(@as(u16, 300), d1.get(d1_third_idx).b); // Deserialize list2 (at offset2) + const d2_first_idx: SafeMultiList(Type2).Idx = @enumFromInt(0); const d2_serialized = @as(*SafeMultiList(Type2).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u32, 1000), d2.get(@enumFromInt(0)).x); - try testing.expectEqual(@as(u64, 10000), d2.get(@enumFromInt(0)).y); + try testing.expectEqual(@as(u32, 1000), d2.get(d2_first_idx).x); + try testing.expectEqual(@as(u64, 10000), d2.get(d2_first_idx).y); // Deserialize list3 (at offset3) + const d3_first_idx: SafeMultiList(Type3).Idx = @enumFromInt(0); const d3_serialized = @as(*SafeMultiList(Type3).Serialized, @ptrCast(@alignCast(buffer.ptr + offset3))); const d3 = d3_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d3.len()); - try testing.expectEqual(@as(u64, 999), d3.get(@enumFromInt(0)).id); - try testing.expectEqual(@as(u8, 42), d3.get(@enumFromInt(0)).data); - try testing.expectEqual(true, d3.get(@enumFromInt(0)).flag); + try testing.expectEqual(@as(u64, 999), d3.get(d3_first_idx).id); + try testing.expectEqual(@as(u8, 42), d3.get(d3_first_idx).data); + try testing.expectEqual(true, d3.get(d3_first_idx).flag); } test "SafeMultiList CompactWriter brute-force alignment verification" { @@ -1815,10 +1854,11 @@ test "SafeMultiList CompactWriter brute-force alignment verification" { const d2_serialized = @as(*SafeMultiList(TestType).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); if (length > 0) { + const d2_first_idx: SafeMultiList(TestType).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 1), d2.len()); - try testing.expectEqual(@as(u8, 255), d2.get(@enumFromInt(0)).a); - try testing.expectEqual(@as(u32, 999999), d2.get(@enumFromInt(0)).b); - try testing.expectEqual(@as(u64, 888888888), d2.get(@enumFromInt(0)).c); + try testing.expectEqual(@as(u8, 255), d2.get(d2_first_idx).a); + try testing.expectEqual(@as(u32, 999999), d2.get(d2_first_idx).b); + try testing.expectEqual(@as(u64, 888888888), d2.get(d2_first_idx).c); } else { try testing.expectEqual(@as(usize, 0), d2.len()); } @@ -2287,7 +2327,8 @@ test "SafeMultiList.Serialized roundtrip" { try testing.expectEqual(@as(u8, 64), c_values[2]); // Check get() method - const item1 = list.get(@as(SafeMultiList(TestStruct).Idx, @enumFromInt(0))); + const first_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(0); + const item1 = list.get(first_idx); try testing.expectEqual(@as(u32, 100), item1.a); try testing.expectEqual(@as(f32, 1.5), item1.b); try testing.expectEqual(@as(u8, 255), item1.c); diff --git a/src/compile/test/type_printing_bug_test.zig b/src/compile/test/type_printing_bug_test.zig index 183d6fd099..4bd20e1830 100644 --- a/src/compile/test/type_printing_bug_test.zig +++ b/src/compile/test/type_printing_bug_test.zig @@ -16,6 +16,7 @@ const AST = parse.AST; test "canonicalizeAndTypeCheckModule preserves Try types in type printing" { const testing = std.testing; const gpa = testing.allocator; + const first_var: types.Var = @enumFromInt(0); const source = \\app [main] { pf: platform "platform.roc" } @@ -75,7 +76,7 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" { const ident_text = env.getIdent(ident_idx); if (std.mem.eql(u8, ident_text, "map_result")) { // Get the type variable from the first definition - it's the first in the defs list - map_result_var = @enumFromInt(0); // First variable + map_result_var = first_var; break; } } diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 52cc4ed15e..564085d9ff 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -466,7 +466,8 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var const variant_var: types_mod.Var = bool_rt_var; - var ext_var: types_mod.Var = @enumFromInt(0); + // ext_var is a placeholder that will be set if this is a tag_union type + var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { if (resolved.desc.content.structure == .tag_union) { @@ -517,7 +518,8 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var from type information const resolved = self.interpreter.runtime_types.resolveVar(rt_var); const variant_var: types_mod.Var = rt_var; - var ext_var: types_mod.Var = @enumFromInt(0); + // ext_var is a placeholder that will be set if this is a tag_union type + var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { if (resolved.desc.content.structure == .tag_union) { @@ -575,7 +577,8 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var from type information const resolved = self.interpreter.runtime_types.resolveVar(rt_var); const variant_var: types_mod.Var = rt_var; - var ext_var: types_mod.Var = @enumFromInt(0); + // ext_var is a placeholder that will be set if this is a tag_union type + var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { if (resolved.desc.content.structure == .tag_union) { @@ -1133,7 +1136,7 @@ pub const ComptimeEvaluator = struct { try self.interpreter.bindings.append(.{ .pattern_idx = params[0], .value = num_literal_record, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // No source expression for synthetic binding .source_env = origin_env, }); defer _ = self.interpreter.bindings.pop(); @@ -1193,8 +1196,8 @@ pub const ComptimeEvaluator = struct { const list_layout_idx = try self.interpreter.runtime_layout_store.insertList(layout_mod.Idx.u8); const list_layout = self.interpreter.runtime_layout_store.getLayout(list_layout_idx); - // Use placeholder rt_var for U8 list - const dest = try self.interpreter.pushRaw(list_layout, 0, @enumFromInt(0)); + // rt_var not needed for List(U8) construction - only layout matters + const dest = try self.interpreter.pushRaw(list_layout, 0, undefined); if (dest.ptr == null) return dest; const header: *builtins.list.RocList = @ptrCast(@alignCast(dest.ptr.?)); @@ -1245,8 +1248,8 @@ pub const ComptimeEvaluator = struct { const record_layout_idx = try self.interpreter.runtime_layout_store.putRecord(self.env, &field_layouts, &field_names); const record_layout = self.interpreter.runtime_layout_store.getLayout(record_layout_idx); - // Use placeholder rt_var for numeral record - var dest = try self.interpreter.pushRaw(record_layout, 0, @enumFromInt(0)); + // rt_var not needed for Numeral record construction - only layout matters + var dest = try self.interpreter.pushRaw(record_layout, 0, undefined); var accessor = try dest.asRecord(&self.interpreter.runtime_layout_store); // Use self.env for field lookups since the record was built with self.env's idents diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 34b374f16e..bd5b85a2c2 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -407,7 +407,7 @@ pub const Interpreter = struct { .import_mapping = import_mapping, .unify_scratch = try unify.Scratch.init(allocator), .type_writer = try types.TypeWriter.initFromParts(allocator, rt_types_ptr, env.common.getIdentStore(), null), - .stack_memory = try stack.Stack.initCapacity(allocator, 8 * 1024 * 1024), // 8MB stack + .stack_memory = try stack.Stack.initCapacity(allocator, 64 * 1024 * 1024), // 64 MiB stack .bindings = try std.array_list.Managed(Binding).initCapacity(allocator, 8), .active_closures = try std.array_list.Managed(StackValue).initCapacity(allocator, 4), .canonical_bool_rt_var = null, @@ -597,7 +597,8 @@ pub const Interpreter = struct { while (j < params.len) : (j += 1) { // getElement expects original index and converts to sorted internally const arg_value = try args_accessor.getElement(j, param_rt_vars[j]); - const matched = try self.patternMatchesBind(params[j], arg_value, param_rt_vars[j], roc_ops, &temp_binds, @enumFromInt(0)); + // expr_idx not used in this context - binding happens during function call setup + const matched = try self.patternMatchesBind(params[j], arg_value, param_rt_vars[j], roc_ops, &temp_binds, undefined); if (!matched) return error.TypeMismatch; } } @@ -921,13 +922,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -6688,7 +6689,7 @@ pub const Interpreter = struct { self.bindings.append(.{ .pattern_idx = params[0], .value = copied_value, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }) catch return null; @@ -11339,7 +11340,7 @@ pub const Interpreter = struct { .tag = .closure, .data = .{ .closure = .{ - .captures_layout_idx = @enumFromInt(0), + .captures_layout_idx = undefined, // No captures for hosted functions }, }, }; @@ -12879,13 +12880,14 @@ pub const Interpreter = struct { temp_binds.deinit(); } + // expr_idx not used for match pattern bindings if (!try self.patternMatchesBind( self.env.store.getMatchBranchPattern(bp_idx).pattern, scrutinee, effective_scrutinee_rt_var, roc_ops, &temp_binds, - @enumFromInt(0), + undefined, )) { continue; } @@ -13091,7 +13093,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = value, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }); @@ -13483,7 +13485,8 @@ pub const Interpreter = struct { // Use patternMatchesBind to properly handle complex patterns (e.g., list destructuring) // patternMatchesBind borrows the value and creates copies for bindings, so we need to // decref the original arg_value after successful binding - if (!try self.patternMatchesBind(param, arg_values[idx], param_rt_var, roc_ops, &self.bindings, @enumFromInt(0))) { + // expr_idx not used for function parameter bindings + if (!try self.patternMatchesBind(param, arg_values[idx], param_rt_var, roc_ops, &self.bindings, undefined)) { // Pattern match failed - cleanup and error self.env = saved_env; _ = self.active_closures.pop(); @@ -13689,7 +13692,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = operand, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for unary operator method parameter bindings .source_env = self.env, }); @@ -14092,14 +14095,15 @@ pub const Interpreter = struct { // of lhs/rhs at the function start will correctly free the originals while // the bindings retain their own references. // Use effective rt_vars from values if available. - if (!try self.patternMatchesBind(params[0], lhs, effective_receiver_rt_var, roc_ops, &self.bindings, @enumFromInt(0))) { + // expr_idx not used for binary operator method parameter bindings + if (!try self.patternMatchesBind(params[0], lhs, effective_receiver_rt_var, roc_ops, &self.bindings, undefined)) { self.flex_type_context.deinit(); self.flex_type_context = saved_flex_type_context; self.env = saved_env; _ = self.active_closures.pop(); return error.TypeMismatch; } - if (!try self.patternMatchesBind(params[1], rhs, rhs.rt_var, roc_ops, &self.bindings, @enumFromInt(0))) { + if (!try self.patternMatchesBind(params[1], rhs, rhs.rt_var, roc_ops, &self.bindings, undefined)) { // Clean up the first binding we added self.trimBindingList(&self.bindings, saved_bindings_len, roc_ops); self.flex_type_context.deinit(); @@ -14284,7 +14288,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = receiver_value, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for field access method parameter bindings .source_env = self.env, }); @@ -14516,7 +14520,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = receiver_value, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for method call parameter bindings .source_env = self.env, }); @@ -14525,7 +14529,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[1 + idx], .value = arg, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for method call parameter bindings .source_env = self.env, }); } @@ -14599,7 +14603,8 @@ pub const Interpreter = struct { // Bind the pattern const loop_bindings_start = self.bindings.items.len; - if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, @enumFromInt(0))) { + // expr_idx not used for for-loop pattern bindings + if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, undefined)) { elem_value.decref(&self.runtime_layout_store, roc_ops); list_value.decref(&self.runtime_layout_store, roc_ops); return error.TypeMismatch; @@ -14667,7 +14672,8 @@ pub const Interpreter = struct { // Bind the pattern const new_loop_bindings_start = self.bindings.items.len; - if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, @enumFromInt(0))) { + // expr_idx not used for for-loop pattern bindings + if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, undefined)) { elem_value.decref(&self.runtime_layout_store, roc_ops); fl.list_value.decref(&self.runtime_layout_store, roc_ops); return error.TypeMismatch; @@ -14919,7 +14925,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = value, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }); @@ -15093,13 +15099,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -15175,13 +15181,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = @enumFromInt(0), + .expr_idx = undefined, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -15590,8 +15596,9 @@ test "interpreter: cross-module method resolution should find methods in origin try interp.module_ids.put(interp.allocator, module_a_ident, module_a_id); // Create an Import.Idx for module A - const import_idx: can.CIR.Import.Idx = @enumFromInt(0); - try interp.import_envs.put(interp.allocator, import_idx, &module_a); + // Using first import index for test purposes + const first_import_idx: can.CIR.Import.Idx = @enumFromInt(0); + try interp.import_envs.put(interp.allocator, first_import_idx, &module_a); // Verify we can retrieve module A's environment const found_env = interp.getModuleEnvForOrigin(module_a_ident); @@ -15652,10 +15659,11 @@ test "interpreter: transitive module method resolution (A imports B imports C)" try interp.module_ids.put(interp.allocator, module_c_ident, module_c_id); // Create Import.Idx entries for both modules - const import_b_idx: can.CIR.Import.Idx = @enumFromInt(0); - const import_c_idx: can.CIR.Import.Idx = @enumFromInt(1); - try interp.import_envs.put(interp.allocator, import_b_idx, &module_b); - try interp.import_envs.put(interp.allocator, import_c_idx, &module_c); + // Using sequential import indices for test purposes + const first_import_idx: can.CIR.Import.Idx = @enumFromInt(0); + const second_import_idx: can.CIR.Import.Idx = @enumFromInt(1); + try interp.import_envs.put(interp.allocator, first_import_idx, &module_b); + try interp.import_envs.put(interp.allocator, second_import_idx, &module_c); // Verify we can retrieve all module environments try std.testing.expectEqual(module_b.module_name_idx, interp.getModuleEnvForOrigin(module_b_ident).?.module_name_idx); diff --git a/src/eval/render_helpers.zig b/src/eval/render_helpers.zig index 5df9f9812b..d15fc0c574 100644 --- a/src/eval/render_helpers.zig +++ b/src/eval/render_helpers.zig @@ -130,8 +130,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. const count = tup_acc.getElementCount(); if (count > 0) { // Get tag index from the last element - // Use placeholder rt_var for tag discriminant (it's always an integer) - const tag_elem = try tup_acc.getElement(count - 1, @enumFromInt(0)); + // rt_var not needed for tag discriminant access (it's always an integer) + const tag_elem = try tup_acc.getElement(count - 1, undefined); if (tag_elem.layout.tag == .scalar and tag_elem.layout.data.scalar.tag == .int) { if (std.math.cast(usize, tag_elem.asI128())) |tag_idx| { tag_index = tag_idx; @@ -166,8 +166,8 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. try out.appendSlice(rendered); } else { // Multiple payloads: first element is a nested tuple containing all payload args - // Use placeholder rt_var for the tuple (we have the individual element types) - const payload_elem = try tup_acc.getElement(0, @enumFromInt(0)); + // rt_var undefined for tuple access (we have the individual element types) + const payload_elem = try tup_acc.getElement(0, undefined); if (payload_elem.layout.tag == .tuple) { var payload_tup = try payload_elem.asTuple(ctx.layout_store); var j: usize = 0; @@ -195,7 +195,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. const field_rt = try ctx.runtime_types.fresh(); const tag_field = try acc.getFieldByIndex(idx, field_rt); if (tag_field.layout.tag == .scalar and tag_field.layout.data.scalar.tag == .int) { - const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; + const tmp_sv = StackValue{ .layout = tag_field.layout, .ptr = tag_field.ptr, .is_initialized = true, .rt_var = undefined }; // Only treat as tag if value fits in usize (valid tag discriminants are small) if (std.math.cast(usize, tmp_sv.asI128())) |tag_idx| { tag_index = tag_idx; @@ -243,7 +243,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = tuple_layout, .ptr = payload.ptr, .is_initialized = payload.is_initialized, - .rt_var = @enumFromInt(0), + .rt_var = undefined, // not needed - type known from layout }; if (tuple_size == 0 or payload.ptr == null) { var j: usize = 0; @@ -355,7 +355,7 @@ pub fn renderValueRocWithType(ctx: *RenderCtx, value: StackValue, rt_var: types. .layout = tuple_layout, .ptr = payload_ptr, .is_initialized = true, - .rt_var = @enumFromInt(0), + .rt_var = undefined, // not needed - type known from layout }; var tup_acc = try tuple_value.asTuple(ctx.layout_store); var j: usize = 0; @@ -550,8 +550,8 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { const count = acc.getElementCount(); var i: usize = 0; while (i < count) : (i += 1) { - // Use placeholder rt_var (no type info available in this context) - const elem = try acc.getElement(i, @enumFromInt(0)); + // rt_var undefined (no type info available in this context) + const elem = try acc.getElement(i, undefined); const rendered = try renderValueRoc(ctx, elem); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -574,7 +574,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { while (i < len) : (i += 1) { if (roc_list.bytes) |bytes| { const elem_ptr: *anyopaque = @ptrCast(bytes + i * elem_size); - const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; + const elem_val = StackValue{ .layout = elem_layout, .ptr = elem_ptr, .is_initialized = true, .rt_var = undefined }; const rendered = try renderValueRoc(ctx, elem_val); defer gpa.free(rendered); try out.appendSlice(rendered); @@ -615,7 +615,7 @@ pub fn renderValueRoc(ctx: *RenderCtx, value: StackValue) ![]u8 { const field_layout = ctx.layout_store.getLayout(fld.layout); const base_ptr: [*]u8 = @ptrCast(@alignCast(value.ptr.?)); const field_ptr: *anyopaque = @ptrCast(base_ptr + offset); - const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true, .rt_var = @enumFromInt(0) }; + const field_val = StackValue{ .layout = field_layout, .ptr = field_ptr, .is_initialized = true, .rt_var = undefined }; const rendered = try renderValueRoc(ctx, field_val); defer gpa.free(rendered); try out.appendSlice(rendered); diff --git a/src/parse/NodeStore.zig b/src/parse/NodeStore.zig index 98e82407b1..c48ba7b8bc 100644 --- a/src/parse/NodeStore.zig +++ b/src/parse/NodeStore.zig @@ -21,6 +21,9 @@ const sexpr = base.sexpr; /// packing optional data into u32 fields where 0 would otherwise be ambiguous. const OPTIONAL_VALUE_OFFSET: u32 = 1; +/// The root node is always stored at index 0 in the node list. +pub const root_node_idx: Node.List.Idx = @enumFromInt(0); + const NodeStore = @This(); gpa: std.mem.Allocator, @@ -166,7 +169,7 @@ pub fn addMalformed(store: *NodeStore, comptime T: type, reason: Diagnostic.Tag, /// Adds a file node to the store. pub fn addFile(store: *NodeStore, file: AST.File) std.mem.Allocator.Error!void { try store.extra_data.append(store.gpa, @intFromEnum(file.header)); - store.nodes.set(@enumFromInt(0), .{ + store.nodes.set(root_node_idx, .{ .tag = .root, .main_token = 0, .data = .{ .lhs = file.statements.span.start, .rhs = file.statements.span.len }, @@ -1019,7 +1022,7 @@ pub fn addTypeAnno(store: *NodeStore, anno: AST.TypeAnno) std.mem.Allocator.Erro /// TODO pub fn getFile(store: *const NodeStore) AST.File { - const node = store.nodes.get(@enumFromInt(0)); + const node = store.nodes.get(root_node_idx); const header_ed_idx = @as(usize, @intCast(node.data.lhs + node.data.rhs)); const header = store.extra_data.items[header_ed_idx]; return .{ diff --git a/src/parse/Parser.zig b/src/parse/Parser.zig index 73cb4046ae..7f97292e94 100644 --- a/src/parse/Parser.zig +++ b/src/parse/Parser.zig @@ -197,7 +197,7 @@ pub fn parseFile(self: *Parser) Error!void { self.store.emptyScratch(); try self.store.addFile(.{ - .header = @as(AST.Header.Idx, @enumFromInt(0)), + .header = undefined, // overwritten below after parseHeader() .statements = AST.Statement.Span{ .span = base.DataSpan.empty() }, .region = AST.TokenizedRegion.empty(), }); diff --git a/src/types/store.zig b/src/types/store.zig index 1e43d19dbe..49fd1382e2 100644 --- a/src/types/store.zig +++ b/src/types/store.zig @@ -1391,13 +1391,21 @@ test "SlotStore.Serialized roundtrip" { const gpa = std.testing.allocator; const CompactWriter = collections.CompactWriter; + // Named indices for test clarity + const desc_idx_100: DescStore.Idx = @enumFromInt(100); + const var_0: Var = @enumFromInt(0); + const desc_idx_200: DescStore.Idx = @enumFromInt(200); + const slot_idx_0: SlotStore.Idx = @enumFromInt(0); + const slot_idx_1: SlotStore.Idx = @enumFromInt(1); + const slot_idx_2: SlotStore.Idx = @enumFromInt(2); + var slot_store = try SlotStore.init(gpa, 4); defer slot_store.deinit(gpa); // Add some slots - _ = try slot_store.insert(gpa, .{ .root = @enumFromInt(100) }); - _ = try slot_store.insert(gpa, .{ .redirect = @enumFromInt(0) }); - _ = try slot_store.insert(gpa, .{ .root = @enumFromInt(200) }); + _ = try slot_store.insert(gpa, .{ .root = desc_idx_100 }); + _ = try slot_store.insert(gpa, .{ .redirect = var_0 }); + _ = try slot_store.insert(gpa, .{ .root = desc_idx_200 }); // Create temp file var tmp_dir = std.testing.tmpDir(.{}); @@ -1432,15 +1440,19 @@ test "SlotStore.Serialized roundtrip" { // Verify try std.testing.expectEqual(@as(u64, 3), deserialized.backing.len()); - try std.testing.expectEqual(Slot{ .root = @enumFromInt(100) }, deserialized.get(@enumFromInt(0))); - try std.testing.expectEqual(Slot{ .redirect = @enumFromInt(0) }, deserialized.get(@enumFromInt(1))); - try std.testing.expectEqual(Slot{ .root = @enumFromInt(200) }, deserialized.get(@enumFromInt(2))); + try std.testing.expectEqual(Slot{ .root = desc_idx_100 }, deserialized.get(slot_idx_0)); + try std.testing.expectEqual(Slot{ .redirect = var_0 }, deserialized.get(slot_idx_1)); + try std.testing.expectEqual(Slot{ .root = desc_idx_200 }, deserialized.get(slot_idx_2)); } test "DescStore.Serialized roundtrip" { const gpa = std.testing.allocator; const CompactWriter = collections.CompactWriter; + // Named indices for test clarity + const desc_idx_0: DescStore.Idx = @enumFromInt(0); + const desc_idx_1: DescStore.Idx = @enumFromInt(1); + var desc_store = try DescStore.init(gpa, 4); defer desc_store.deinit(gpa); @@ -1497,8 +1509,8 @@ test "DescStore.Serialized roundtrip" { // Verify try std.testing.expectEqual(@as(usize, 2), deserialized.backing.items.len); - try std.testing.expectEqual(desc1, deserialized.get(@enumFromInt(0))); - try std.testing.expectEqual(desc2, deserialized.get(@enumFromInt(1))); + try std.testing.expectEqual(desc1, deserialized.get(desc_idx_0)); + try std.testing.expectEqual(desc2, deserialized.get(desc_idx_1)); } test "Store.Serialized roundtrip" { diff --git a/test/snapshots/numeric_let_generalize_in_block.md b/test/snapshots/numeric_let_generalize_in_block.md index fb540ad0b4..a75ca45b6f 100644 --- a/test/snapshots/numeric_let_generalize_in_block.md +++ b/test/snapshots/numeric_let_generalize_in_block.md @@ -3,21 +3,6 @@ description=Numeric let-generalization inside nested block (rank > top_level) type=expr ~~~ - -# NOTES -This test demonstrates that numeric literals inside nested blocks (rank > top_level) -ARE let-generalized, allowing the same numeric variable to be used with different -concrete numeric types within the block. - -This is different from top-level behavior where numeric literals stay monomorphic -so that later usages can constrain them to a specific type. - -The key insight is that rank > top_level can occur in two situations: -1. Inside lambdas (e.g., `|a| a + 1`) -2. Inside nested blocks (e.g., `{ n = 42; ... }`) - -In both cases, numeric literals are generalized. - # SOURCE ~~~roc { @@ -31,3 +16,76 @@ In both cases, numeric literals are generalized. NIL # PROBLEMS NIL +# TOKENS +~~~zig +OpenCurly, +LowerIdent,OpAssign,Int, +LowerIdent,OpAssign,UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,CloseRound, +LowerIdent,OpAssign,UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,CloseRound, +UpperIdent,NoSpaceDotLowerIdent,NoSpaceOpenRound,LowerIdent,Comma,LowerIdent,CloseRound, +CloseCurly, +EndOfFile, +~~~ +# PARSE +~~~clojure +(e-block + (statements + (s-decl + (p-ident (raw "n")) + (e-int (raw "42"))) + (s-decl + (p-ident (raw "a")) + (e-apply + (e-ident (raw "I64.to_str")) + (e-ident (raw "n")))) + (s-decl + (p-ident (raw "b")) + (e-apply + (e-ident (raw "Dec.to_str")) + (e-ident (raw "n")))) + (e-apply + (e-ident (raw "Str.concat")) + (e-ident (raw "a")) + (e-ident (raw "b"))))) +~~~ +# FORMATTED +~~~roc +{ + n = 42 + a = I64.to_str(n) + b = Dec.to_str(n) + Str.concat(a, b) +} +~~~ +# CANONICALIZE +~~~clojure +(e-block + (s-let + (p-assign (ident "n")) + (e-num (value "42"))) + (s-let + (p-assign (ident "a")) + (e-call + (e-lookup-external + (builtin)) + (e-lookup-local + (p-assign (ident "n"))))) + (s-let + (p-assign (ident "b")) + (e-call + (e-lookup-external + (builtin)) + (e-lookup-local + (p-assign (ident "n"))))) + (e-call + (e-lookup-external + (builtin)) + (e-lookup-local + (p-assign (ident "a"))) + (e-lookup-local + (p-assign (ident "b"))))) +~~~ +# TYPES +~~~clojure +(expr (type "Str")) +~~~ diff --git a/test/snapshots/repl/numeric_multiple_diff_types.md b/test/snapshots/repl/numeric_multiple_diff_types.md index 7d287046e8..a41bc4869a 100644 --- a/test/snapshots/repl/numeric_multiple_diff_types.md +++ b/test/snapshots/repl/numeric_multiple_diff_types.md @@ -3,20 +3,6 @@ description=Numeric without annotation, multiple uses with different types (each use gets fresh type) type=repl ~~~ - -# NOTES -This test demonstrates that in the REPL, a numeric literal assigned without -annotation can be used with different concrete types in subsequent statements. - -Each use of `x` gets a fresh instantiation of the type, allowing it to be -constrained to I64 in one statement and Dec in another. - -This is the expected behavior for polymorphic numeric literals - each use -site gets its own copy of the type variable that can be independently constrained. - -Compare this to the non-REPL test `numeric_let_generalize_in_block.md` which -demonstrates the same polymorphic behavior inside nested blocks. - # SOURCE ~~~roc » x = 42 diff --git a/test/snapshots/repl/numeric_sum_to_str.md b/test/snapshots/repl/numeric_sum_to_str.md index ed79aeea5d..b34b17895a 100644 --- a/test/snapshots/repl/numeric_sum_to_str.md +++ b/test/snapshots/repl/numeric_sum_to_str.md @@ -3,14 +3,6 @@ description=Numeric sum then convert to I16 string type=repl ~~~ - -# NOTES -This test demonstrates numeric operations in the REPL where the final usage -constrains the type to I16. - -The numeric literals `a`, `b`, and `sum` are polymorphic until `I16.to_str(sum)` -constrains the result to I16. - # SOURCE ~~~roc » a = 4 From 4ce8ec086dbee04d21a940cfa573b1ecff47d4ca Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 11:30:21 -0500 Subject: [PATCH 22/64] Fix various things --- src/canonicalize/CIR.zig | 5 +- src/canonicalize/Can.zig | 16 +- src/canonicalize/NodeStore.zig | 6 +- src/canonicalize/TypeAnnotation.zig | 7 +- src/collections/safe_list.zig | 149 +++++++--------- src/eval/interpreter.zig | 72 ++++---- src/parse/NodeStore.zig | 2 +- src/types/store.zig | 16 +- src/types/types.zig | 1 + .../snapshots/issue/test_error_propagation.md | 28 ++- .../issue/underscore_error_propagation.md | 79 +++------ test/snapshots/issue/underscore_error_type.md | 166 ++++++++---------- test/snapshots/issue/usage_test.md | 38 ++-- .../pass/underscore_in_type_alias.md | 128 +------------- ...derscore_prefixed_param_in_nominal_type.md | 40 +---- ...nderscore_prefixed_param_in_opaque_type.md | 40 +---- test/snapshots/plume_package/Color.md | 79 +++++---- 17 files changed, 312 insertions(+), 560 deletions(-) diff --git a/src/canonicalize/CIR.zig b/src/canonicalize/CIR.zig index a218d71225..c20b84404f 100644 --- a/src/canonicalize/CIR.zig +++ b/src/canonicalize/CIR.zig @@ -721,7 +721,10 @@ pub fn fromF64(f: f64) ?RocDec { /// Represents an import statement in a module pub const Import = struct { - pub const Idx = enum(u32) { _ }; + pub const Idx = enum(u32) { + zero = 0, + _, + }; /// Sentinel value indicating unresolved import (max u32) pub const UNRESOLVED_MODULE: u32 = std.math.maxInt(u32); diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index db59dac387..0a7dffd0a3 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -460,8 +460,8 @@ fn processTypeDeclFirstPass( // Type was already introduced - check if it's a placeholder (anno = 0) or a real declaration const existing_stmt = self.env.store.getStatement(existing_stmt_idx); const is_placeholder = switch (existing_stmt) { - .s_alias_decl => |alias| @intFromEnum(alias.anno) == 0, - .s_nominal_decl => |nominal| @intFromEnum(nominal.anno) == 0, + .s_alias_decl => |alias| alias.anno == .placeholder, + .s_nominal_decl => |nominal| nominal.anno == .placeholder, else => false, }; @@ -485,13 +485,13 @@ fn processTypeDeclFirstPass( .alias => Statement{ .s_alias_decl = .{ .header = final_header_idx, - .anno = undefined, // overwritten below before use + .anno = .placeholder, // placeholder, will be overwritten }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = final_header_idx, - .anno = undefined, // overwritten below before use + .anno = .placeholder, // placeholder, will be overwritten .is_opaque = type_decl.kind == .@"opaque", }, }, @@ -505,13 +505,13 @@ fn processTypeDeclFirstPass( .alias => Statement{ .s_alias_decl = .{ .header = final_header_idx, - .anno = undefined, // overwritten below before use + .anno = .placeholder, // placeholder, will be overwritten }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = final_header_idx, - .anno = undefined, // overwritten below before use + .anno = .placeholder, // placeholder, will be overwritten .is_opaque = type_decl.kind == .@"opaque", }, }, @@ -638,13 +638,13 @@ fn introduceTypeNameOnly( .alias => Statement{ .s_alias_decl = .{ .header = header_idx, - .anno = undefined, // overwritten in Phase 1.7 before use + .anno = .placeholder, // placeholder, overwritten in Phase 1.7 }, }, .nominal, .@"opaque" => Statement{ .s_nominal_decl = .{ .header = header_idx, - .anno = undefined, // overwritten in Phase 1.7 before use + .anno = .placeholder, // placeholder, overwritten in Phase 1.7 .is_opaque = type_decl.kind == .@"opaque", }, }, diff --git a/src/canonicalize/NodeStore.zig b/src/canonicalize/NodeStore.zig index 0a100c51ca..b3b32420b1 100644 --- a/src/canonicalize/NodeStore.zig +++ b/src/canonicalize/NodeStore.zig @@ -3742,7 +3742,7 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify nodes try testing.expectEqual(@as(usize, 1), deserialized.nodes.len()); // Named constant for the first node index in the deserialized data - const first_node_idx: Node.Idx = @enumFromInt(0); + const first_node_idx: Node.Idx = .zero; const retrieved_node = deserialized.nodes.get(first_node_idx); try testing.expectEqual(Node.Tag.expr_int, retrieved_node.tag); try testing.expectEqual(@as(u32, 0), retrieved_node.data_1); @@ -3757,7 +3757,7 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify regions try testing.expectEqual(@as(usize, 1), deserialized.regions.len()); // Named constant for the first region index in the deserialized data - const first_region_idx: Region.Idx = @enumFromInt(0); + const first_region_idx: Region.Idx = .zero; const retrieved_region = deserialized.regions.get(first_region_idx); try testing.expectEqual(region.start.offset, retrieved_region.start.offset); try testing.expectEqual(region.end.offset, retrieved_region.end.offset); @@ -3849,7 +3849,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { try testing.expectEqual(@as(usize, 3), deserialized.nodes.len()); // Named constants for accessing deserialized nodes at specific indices - const first_node_idx: Node.Idx = @enumFromInt(0); + const first_node_idx: Node.Idx = .zero; const second_node_idx: Node.Idx = @enumFromInt(1); const third_node_idx: Node.Idx = @enumFromInt(2); diff --git a/src/canonicalize/TypeAnnotation.zig b/src/canonicalize/TypeAnnotation.zig index e1da457671..e8fcd025a4 100644 --- a/src/canonicalize/TypeAnnotation.zig +++ b/src/canonicalize/TypeAnnotation.zig @@ -96,7 +96,12 @@ pub const TypeAnno = union(enum) { diagnostic: CIR.Diagnostic.Idx, // The error that occurred }, - pub const Idx = enum(u32) { _ }; + pub const Idx = enum(u32) { + /// Placeholder value indicating the anno hasn't been set yet. + /// Used during forward reference resolution. + placeholder = 0, + _, + }; pub const Span = extern struct { span: DataSpan }; pub fn pushToSExprTree(self: *const @This(), ir: *const ModuleEnv, tree: *SExprTree, type_anno_idx: TypeAnno.Idx) std.mem.Allocator.Error!void { diff --git a/src/collections/safe_list.zig b/src/collections/safe_list.zig index 5801b9b272..ba0c3ce1e5 100644 --- a/src/collections/safe_list.zig +++ b/src/collections/safe_list.zig @@ -101,6 +101,8 @@ pub fn SafeList(comptime T: type) type { /// An index for an item in the list. pub const Idx = enum(u32) { + /// The first valid index in the list. + zero = 0, _, /// Get the raw u32 value for storage @@ -372,11 +374,10 @@ pub fn SafeList(comptime T: type) type { /// Iterate over all items in this list. pub fn iter(self: *const SafeList(T)) Iterator { - const first_idx: Idx = @enumFromInt(0); return Iterator{ .array = self, .len = self.len(), - .current = first_idx, + .current = .zero, }; } }; @@ -980,10 +981,9 @@ test "SafeList edge cases serialization" { const serialized_ptr = @as(*Container.Serialized, @ptrCast(@alignCast(buffer.ptr))); const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); - const first_idx: SafeList(u8).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 0), deserialized.list_u32.len()); try testing.expectEqual(@as(usize, 1), deserialized.list_u8.len()); - try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(first_idx).*); + try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(.zero).*); } } @@ -1068,15 +1068,12 @@ test "SafeList CompactWriter complete roundtrip example" { const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); // Step 8: Verify data is accessible and correct - const first_idx: SafeList(u32).Idx = @enumFromInt(0); - const second_idx: SafeList(u32).Idx = @enumFromInt(1); - const third_idx: SafeList(u32).Idx = @enumFromInt(2); - const fourth_idx: SafeList(u32).Idx = @enumFromInt(3); + const Idx = SafeList(u32).Idx; try testing.expectEqual(@as(usize, 4), deserialized.len()); - try testing.expectEqual(@as(u32, 100), deserialized.get(first_idx).*); - try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).*); - try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).*); - try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).*); + try testing.expectEqual(@as(u32, 100), deserialized.get(.zero).*); + try testing.expectEqual(@as(u32, 200), deserialized.get(@as(Idx, @enumFromInt(1))).*); + try testing.expectEqual(@as(u32, 300), deserialized.get(@as(Idx, @enumFromInt(2))).*); + try testing.expectEqual(@as(u32, 400), deserialized.get(@as(Idx, @enumFromInt(3))).*); } test "SafeList CompactWriter multiple lists with different alignments" { @@ -1179,13 +1176,11 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 3 * @sizeOf(u8); - const u8_first_idx: SafeList(u8).Idx = @enumFromInt(0); - const u8_second_idx: SafeList(u8).Idx = @enumFromInt(1); - const u8_third_idx: SafeList(u8).Idx = @enumFromInt(2); + const U8Idx = SafeList(u8).Idx; try testing.expectEqual(@as(usize, 3), deser_u8.len()); - try testing.expectEqual(@as(u8, 10), deser_u8.get(u8_first_idx).*); - try testing.expectEqual(@as(u8, 20), deser_u8.get(u8_second_idx).*); - try testing.expectEqual(@as(u8, 30), deser_u8.get(u8_third_idx).*); + try testing.expectEqual(@as(u8, 10), deser_u8.get(.zero).*); + try testing.expectEqual(@as(u8, 20), deser_u8.get(@as(U8Idx, @enumFromInt(1))).*); + try testing.expectEqual(@as(u8, 30), deser_u8.get(@as(U8Idx, @enumFromInt(2))).*); // 2. Deserialize u16 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized)); @@ -1196,11 +1191,10 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u16)); offset += 2 * @sizeOf(u16); - const u16_first_idx: SafeList(u16).Idx = @enumFromInt(0); - const u16_second_idx: SafeList(u16).Idx = @enumFromInt(1); + const U16Idx = SafeList(u16).Idx; try testing.expectEqual(@as(usize, 2), deser_u16.len()); - try testing.expectEqual(@as(u16, 1000), deser_u16.get(u16_first_idx).*); - try testing.expectEqual(@as(u16, 2000), deser_u16.get(u16_second_idx).*); + try testing.expectEqual(@as(u16, 1000), deser_u16.get(.zero).*); + try testing.expectEqual(@as(u16, 2000), deser_u16.get(@as(U16Idx, @enumFromInt(1))).*); // 3. Deserialize u32 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized)); @@ -1211,15 +1205,12 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u32)); offset += 4 * @sizeOf(u32); - const u32_first_idx: SafeList(u32).Idx = @enumFromInt(0); - const u32_second_idx: SafeList(u32).Idx = @enumFromInt(1); - const u32_third_idx: SafeList(u32).Idx = @enumFromInt(2); - const u32_fourth_idx: SafeList(u32).Idx = @enumFromInt(3); + const U32Idx = SafeList(u32).Idx; try testing.expectEqual(@as(usize, 4), deser_u32.len()); - try testing.expectEqual(@as(u32, 100_000), deser_u32.get(u32_first_idx).*); - try testing.expectEqual(@as(u32, 200_000), deser_u32.get(u32_second_idx).*); - try testing.expectEqual(@as(u32, 300_000), deser_u32.get(u32_third_idx).*); - try testing.expectEqual(@as(u32, 400_000), deser_u32.get(u32_fourth_idx).*); + try testing.expectEqual(@as(u32, 100_000), deser_u32.get(.zero).*); + try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@as(U32Idx, @enumFromInt(1))).*); + try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@as(U32Idx, @enumFromInt(2))).*); + try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@as(U32Idx, @enumFromInt(3))).*); // 4. Deserialize u64 list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized)); @@ -1230,26 +1221,24 @@ test "SafeList CompactWriter multiple lists with different alignments" { offset = std.mem.alignForward(usize, offset, @alignOf(u64)); offset += 2 * @sizeOf(u64); - const u64_first_idx: SafeList(u64).Idx = @enumFromInt(0); - const u64_second_idx: SafeList(u64).Idx = @enumFromInt(1); + const U64Idx = SafeList(u64).Idx; try testing.expectEqual(@as(usize, 2), deser_u64.len()); - try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(u64_first_idx).*); - try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(u64_second_idx).*); + try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(.zero).*); + try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@as(U64Idx, @enumFromInt(1))).*); // 5. Deserialize struct list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(AlignedStruct).Serialized)); const s_struct = @as(*SafeList(AlignedStruct).Serialized, @ptrCast(@alignCast(buffer.ptr + offset))); const deser_struct = s_struct.deserialize(@as(i64, @intCast(base_addr))); - const struct_first_idx: SafeList(AlignedStruct).Idx = @enumFromInt(0); - const struct_second_idx: SafeList(AlignedStruct).Idx = @enumFromInt(1); + const StructIdx = SafeList(AlignedStruct).Idx; try testing.expectEqual(@as(usize, 2), deser_struct.len()); - const item0 = deser_struct.get(struct_first_idx); + const item0 = deser_struct.get(.zero); try testing.expectEqual(@as(u32, 42), item0.x); try testing.expectEqual(@as(u64, 1337), item0.y); try testing.expectEqual(@as(u8, 255), item0.z); - const item1 = deser_struct.get(struct_second_idx); + const item1 = deser_struct.get(@as(StructIdx, @enumFromInt(1))); try testing.expectEqual(@as(u32, 99), item1.x); try testing.expectEqual(@as(u64, 9999), item1.y); try testing.expectEqual(@as(u8, 128), item1.z); @@ -1355,13 +1344,11 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 3; // 3 u8 elements - const d1_first_idx: SafeList(u8).Idx = @enumFromInt(0); - const d1_second_idx: SafeList(u8).Idx = @enumFromInt(1); - const d1_third_idx: SafeList(u8).Idx = @enumFromInt(2); + const D1Idx = SafeList(u8).Idx; try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 1), d1.get(d1_first_idx).*); - try testing.expectEqual(@as(u8, 2), d1.get(d1_second_idx).*); - try testing.expectEqual(@as(u8, 3), d1.get(d1_third_idx).*); + try testing.expectEqual(@as(u8, 1), d1.get(.zero).*); + try testing.expectEqual(@as(u8, 2), d1.get(@as(D1Idx, @enumFromInt(1))).*); + try testing.expectEqual(@as(u8, 3), d1.get(@as(D1Idx, @enumFromInt(2))).*); // 2. Second list - u64 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u64).Serialized)); @@ -1371,11 +1358,10 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u64)); offset += 2 * @sizeOf(u64); // 2 u64 elements - const d2_first_idx: SafeList(u64).Idx = @enumFromInt(0); - const d2_second_idx: SafeList(u64).Idx = @enumFromInt(1); + const D2Idx = SafeList(u64).Idx; try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u64, 1_000_000), d2.get(d2_first_idx).*); - try testing.expectEqual(@as(u64, 2_000_000), d2.get(d2_second_idx).*); + try testing.expectEqual(@as(u64, 1_000_000), d2.get(.zero).*); + try testing.expectEqual(@as(u64, 2_000_000), d2.get(@as(D2Idx, @enumFromInt(1))).*); // 3. Third list - u16 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u16).Serialized)); @@ -1385,24 +1371,20 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { offset = std.mem.alignForward(usize, offset, @alignOf(u16)); offset += 4 * @sizeOf(u16); // 4 u16 elements - const d3_first_idx: SafeList(u16).Idx = @enumFromInt(0); - const d3_second_idx: SafeList(u16).Idx = @enumFromInt(1); - const d3_third_idx: SafeList(u16).Idx = @enumFromInt(2); - const d3_fourth_idx: SafeList(u16).Idx = @enumFromInt(3); + const D3Idx = SafeList(u16).Idx; try testing.expectEqual(@as(usize, 4), d3.len()); - try testing.expectEqual(@as(u16, 100), d3.get(d3_first_idx).*); - try testing.expectEqual(@as(u16, 200), d3.get(d3_second_idx).*); - try testing.expectEqual(@as(u16, 300), d3.get(d3_third_idx).*); - try testing.expectEqual(@as(u16, 400), d3.get(d3_fourth_idx).*); + try testing.expectEqual(@as(u16, 100), d3.get(.zero).*); + try testing.expectEqual(@as(u16, 200), d3.get(@as(D3Idx, @enumFromInt(1))).*); + try testing.expectEqual(@as(u16, 300), d3.get(@as(D3Idx, @enumFromInt(2))).*); + try testing.expectEqual(@as(u16, 400), d3.get(@as(D3Idx, @enumFromInt(3))).*); // 4. Fourth list - u32 offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(u32).Serialized)); const s4 = @as(*SafeList(u32).Serialized, @ptrCast(@alignCast(buffer.ptr + offset))); const d4 = s4.deserialize(@as(i64, @intCast(base))); - const d4_first_idx: SafeList(u32).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 1), d4.len()); - try testing.expectEqual(@as(u32, 42), d4.get(d4_first_idx).*); + try testing.expectEqual(@as(u32, 42), d4.get(.zero).*); } test "SafeList CompactWriter brute-force alignment verification" { @@ -1522,9 +1504,8 @@ test "SafeList CompactWriter brute-force alignment verification" { offset = std.mem.alignForward(usize, offset, @alignOf(u8)); offset += 1; // 1 u8 element - const u8_first_idx: SafeList(u8).Idx = @enumFromInt(0); try testing.expectEqual(@as(usize, 1), d_u8.len()); - try testing.expectEqual(@as(u8, 42), d_u8.get(u8_first_idx).*); + try testing.expectEqual(@as(u8, 42), d_u8.get(.zero).*); // Second list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(T).Serialized)); @@ -1599,28 +1580,28 @@ test "SafeMultiList CompactWriter roundtrip with file" { const deserialized = serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); // Verify the data - const first_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(0); - const second_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(1); - const third_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(2); - const fourth_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(3); + const Idx = SafeMultiList(TestStruct).Idx; try testing.expectEqual(@as(usize, 4), deserialized.len()); // Verify all the data - try testing.expectEqual(@as(u32, 100), deserialized.get(first_idx).id); - try testing.expectEqual(@as(u64, 1000), deserialized.get(first_idx).value); - try testing.expectEqual(true, deserialized.get(first_idx).flag); - try testing.expectEqual(@as(u8, 10), deserialized.get(first_idx).data); + try testing.expectEqual(@as(u32, 100), deserialized.get(.zero).id); + try testing.expectEqual(@as(u64, 1000), deserialized.get(.zero).value); + try testing.expectEqual(true, deserialized.get(.zero).flag); + try testing.expectEqual(@as(u8, 10), deserialized.get(.zero).data); + const second_idx: Idx = @enumFromInt(1); try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).id); try testing.expectEqual(@as(u64, 2000), deserialized.get(second_idx).value); try testing.expectEqual(false, deserialized.get(second_idx).flag); try testing.expectEqual(@as(u8, 20), deserialized.get(second_idx).data); + const third_idx: Idx = @enumFromInt(2); try testing.expectEqual(@as(u32, 300), deserialized.get(third_idx).id); try testing.expectEqual(@as(u64, 3000), deserialized.get(third_idx).value); try testing.expectEqual(true, deserialized.get(third_idx).flag); try testing.expectEqual(@as(u8, 30), deserialized.get(third_idx).data); + const fourth_idx: Idx = @enumFromInt(3); try testing.expectEqual(@as(u32, 400), deserialized.get(fourth_idx).id); try testing.expectEqual(@as(u64, 4000), deserialized.get(fourth_idx).value); try testing.expectEqual(false, deserialized.get(fourth_idx).flag); @@ -1754,35 +1735,31 @@ test "SafeMultiList CompactWriter multiple lists different alignments" { const base = @as(i64, @intCast(@intFromPtr(buffer.ptr))); // Deserialize list1 (at offset1) - const d1_first_idx: SafeMultiList(Type1).Idx = @enumFromInt(0); - const d1_second_idx: SafeMultiList(Type1).Idx = @enumFromInt(1); - const d1_third_idx: SafeMultiList(Type1).Idx = @enumFromInt(2); + const D1Idx = SafeMultiList(Type1).Idx; const d1_serialized = @as(*SafeMultiList(Type1).Serialized, @ptrCast(@alignCast(buffer.ptr + offset1))); const d1 = d1_serialized.deserialize(base); try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 10), d1.get(d1_first_idx).a); - try testing.expectEqual(@as(u16, 100), d1.get(d1_first_idx).b); - try testing.expectEqual(@as(u8, 20), d1.get(d1_second_idx).a); - try testing.expectEqual(@as(u16, 200), d1.get(d1_second_idx).b); - try testing.expectEqual(@as(u8, 30), d1.get(d1_third_idx).a); - try testing.expectEqual(@as(u16, 300), d1.get(d1_third_idx).b); + try testing.expectEqual(@as(u8, 10), d1.get(.zero).a); + try testing.expectEqual(@as(u16, 100), d1.get(.zero).b); + try testing.expectEqual(@as(u8, 20), d1.get(@as(D1Idx, @enumFromInt(1))).a); + try testing.expectEqual(@as(u16, 200), d1.get(@as(D1Idx, @enumFromInt(1))).b); + try testing.expectEqual(@as(u8, 30), d1.get(@as(D1Idx, @enumFromInt(2))).a); + try testing.expectEqual(@as(u16, 300), d1.get(@as(D1Idx, @enumFromInt(2))).b); // Deserialize list2 (at offset2) - const d2_first_idx: SafeMultiList(Type2).Idx = @enumFromInt(0); const d2_serialized = @as(*SafeMultiList(Type2).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u32, 1000), d2.get(d2_first_idx).x); - try testing.expectEqual(@as(u64, 10000), d2.get(d2_first_idx).y); + try testing.expectEqual(@as(u32, 1000), d2.get(.zero).x); + try testing.expectEqual(@as(u64, 10000), d2.get(.zero).y); // Deserialize list3 (at offset3) - const d3_first_idx: SafeMultiList(Type3).Idx = @enumFromInt(0); const d3_serialized = @as(*SafeMultiList(Type3).Serialized, @ptrCast(@alignCast(buffer.ptr + offset3))); const d3 = d3_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d3.len()); - try testing.expectEqual(@as(u64, 999), d3.get(d3_first_idx).id); - try testing.expectEqual(@as(u8, 42), d3.get(d3_first_idx).data); - try testing.expectEqual(true, d3.get(d3_first_idx).flag); + try testing.expectEqual(@as(u64, 999), d3.get(.zero).id); + try testing.expectEqual(@as(u8, 42), d3.get(.zero).data); + try testing.expectEqual(true, d3.get(.zero).flag); } test "SafeMultiList CompactWriter brute-force alignment verification" { @@ -1872,7 +1849,7 @@ test "SafeMultiList CompactWriter brute-force alignment verification" { const d2_serialized = @as(*SafeMultiList(TestType).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); if (length > 0) { - const d2_first_idx: SafeMultiList(TestType).Idx = @enumFromInt(0); + const d2_first_idx: SafeMultiList(TestType).Idx = .zero; try testing.expectEqual(@as(usize, 1), d2.len()); try testing.expectEqual(@as(u8, 255), d2.get(d2_first_idx).a); try testing.expectEqual(@as(u32, 999999), d2.get(d2_first_idx).b); @@ -2345,7 +2322,7 @@ test "SafeMultiList.Serialized roundtrip" { try testing.expectEqual(@as(u8, 64), c_values[2]); // Check get() method - const first_idx: SafeMultiList(TestStruct).Idx = @enumFromInt(0); + const first_idx: SafeMultiList(TestStruct).Idx = .zero; const item1 = list.get(first_idx); try testing.expectEqual(@as(u32, 100), item1.a); try testing.expectEqual(@as(f32, 1.5), item1.b); diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index c5cc2f7383..303b096896 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -185,16 +185,13 @@ pub const Interpreter = struct { const Binding = struct { pattern_idx: can.CIR.Pattern.Idx, value: StackValue, - expr_idx: can.CIR.Expr.Idx, + /// Optional expression index. Null for bindings that don't have an associated + /// expression (e.g., function parameters, method parameters, etc. where the + /// binding comes from a pattern match rather than a def expression). + expr_idx: ?can.CIR.Expr.Idx, /// The source module environment where this binding was created. /// Used to distinguish bindings from different modules with the same pattern_idx. source_env: *const can.ModuleEnv, - - /// Sentinel value for bindings that don't have an associated expression. - /// Used for function parameters, method parameters, etc. where the binding - /// comes from a pattern match rather than a def expression. - /// The code in evalLookupLocal checks for this to skip expr-based logic. - const no_expr_idx: can.CIR.Expr.Idx = @enumFromInt(0); }; const DefInProgress = struct { pattern_idx: can.CIR.Pattern.Idx, @@ -606,7 +603,7 @@ pub const Interpreter = struct { // getElement expects original index and converts to sorted internally const arg_value = try args_accessor.getElement(j, param_rt_vars[j]); // expr_idx not used in this context - binding happens during function call setup - const matched = try self.patternMatchesBind(params[j], arg_value, param_rt_vars[j], roc_ops, &temp_binds, Binding.no_expr_idx); + const matched = try self.patternMatchesBind(params[j], arg_value, param_rt_vars[j], roc_ops, &temp_binds, null); if (!matched) return error.TypeMismatch; } } @@ -930,13 +927,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -6697,7 +6694,7 @@ pub const Interpreter = struct { self.bindings.append(.{ .pattern_idx = params[0], .value = copied_value, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for inspect method parameter bindings + .expr_idx = null, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }) catch return null; @@ -6851,7 +6848,7 @@ pub const Interpreter = struct { value_rt_var: types.Var, roc_ops: *RocOps, out_binds: *std.array_list.AlignedManaged(Binding, null), - expr_idx: can.CIR.Expr.Idx, + expr_idx: ?can.CIR.Expr.Idx, ) !bool { const pat = self.env.store.getPattern(pattern_idx); switch (pat) { @@ -9542,9 +9539,8 @@ pub const Interpreter = struct { const b = self.bindings.items[i]; if (b.pattern_idx == lookup.pattern_idx) { // Found the binding - recursively check what it points to - const expr_idx_int: u32 = @intFromEnum(b.expr_idx); - if (expr_idx_int != 0) { - return self.findRootNumericLiteral(b.expr_idx, b.source_env); + if (b.expr_idx) |binding_expr_idx| { + return self.findRootNumericLiteral(binding_expr_idx, b.source_env); } return null; } @@ -9594,9 +9590,8 @@ pub const Interpreter = struct { i -= 1; const b = self.bindings.items[i]; if (b.pattern_idx == lookup.pattern_idx) { - const expr_idx_int: u32 = @intFromEnum(b.expr_idx); - if (expr_idx_int != 0) { - try self.setupFlexContextForNumericExpr(b.expr_idx, b.source_env, target_rt_var); + if (b.expr_idx) |binding_expr_idx| { + try self.setupFlexContextForNumericExpr(binding_expr_idx, b.source_env, target_rt_var); } return; } @@ -11531,9 +11526,8 @@ pub const Interpreter = struct { (b.source_env.module_name_idx == self.env.module_name_idx); if (b.pattern_idx == lookup.pattern_idx and same_module) { // Check if this binding came from an e_anno_only expression - const expr_idx_int: u32 = @intFromEnum(b.expr_idx); - if (expr_idx_int != 0) { - const binding_expr = self.env.store.getExpr(b.expr_idx); + if (b.expr_idx) |expr_idx| { + const binding_expr = self.env.store.getExpr(expr_idx); if (binding_expr == .e_anno_only and b.value.layout.tag != .closure) { self.triggerCrash("This value has no implementation. It is only a type annotation for now.", false, roc_ops); return error.Crash; @@ -11555,7 +11549,7 @@ pub const Interpreter = struct { const layouts_differ = !cached_layout.eql(expected_layout); if (layouts_differ) { // Check if the binding expression is a numeric literal (direct or via lookup) - const root_numeric_expr = self.findRootNumericLiteral(b.expr_idx, b.source_env); + const root_numeric_expr = self.findRootNumericLiteral(expr_idx, b.source_env); if (root_numeric_expr) |root_expr_idx| { // Re-evaluate the numeric expression with the expected type. // Set up flex_type_context so flex vars in the expression @@ -13101,7 +13095,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = value, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for inspect method parameter bindings + .expr_idx = null, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }); @@ -13494,7 +13488,7 @@ pub const Interpreter = struct { // patternMatchesBind borrows the value and creates copies for bindings, so we need to // decref the original arg_value after successful binding // expr_idx not used for function parameter bindings - if (!try self.patternMatchesBind(param, arg_values[idx], param_rt_var, roc_ops, &self.bindings, Binding.no_expr_idx)) { + if (!try self.patternMatchesBind(param, arg_values[idx], param_rt_var, roc_ops, &self.bindings, null)) { // Pattern match failed - cleanup and error self.env = saved_env; _ = self.active_closures.pop(); @@ -13700,7 +13694,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = operand, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for unary operator method parameter bindings + .expr_idx = null, // expr_idx not used for unary operator method parameter bindings .source_env = self.env, }); @@ -14104,14 +14098,14 @@ pub const Interpreter = struct { // the bindings retain their own references. // Use effective rt_vars from values if available. // expr_idx not used for binary operator method parameter bindings - if (!try self.patternMatchesBind(params[0], lhs, effective_receiver_rt_var, roc_ops, &self.bindings, Binding.no_expr_idx)) { + if (!try self.patternMatchesBind(params[0], lhs, effective_receiver_rt_var, roc_ops, &self.bindings, null)) { self.flex_type_context.deinit(); self.flex_type_context = saved_flex_type_context; self.env = saved_env; _ = self.active_closures.pop(); return error.TypeMismatch; } - if (!try self.patternMatchesBind(params[1], rhs, rhs.rt_var, roc_ops, &self.bindings, Binding.no_expr_idx)) { + if (!try self.patternMatchesBind(params[1], rhs, rhs.rt_var, roc_ops, &self.bindings, null)) { // Clean up the first binding we added self.trimBindingList(&self.bindings, saved_bindings_len, roc_ops); self.flex_type_context.deinit(); @@ -14296,7 +14290,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = receiver_value, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for field access method parameter bindings + .expr_idx = null, // expr_idx not used for field access method parameter bindings .source_env = self.env, }); @@ -14528,7 +14522,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = receiver_value, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for method call parameter bindings + .expr_idx = null, // expr_idx not used for method call parameter bindings .source_env = self.env, }); @@ -14537,7 +14531,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[1 + idx], .value = arg, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for method call parameter bindings + .expr_idx = null, // expr_idx not used for method call parameter bindings .source_env = self.env, }); } @@ -14612,7 +14606,7 @@ pub const Interpreter = struct { // Bind the pattern const loop_bindings_start = self.bindings.items.len; // expr_idx not used for for-loop pattern bindings - if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, Binding.no_expr_idx)) { + if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, null)) { elem_value.decref(&self.runtime_layout_store, roc_ops); list_value.decref(&self.runtime_layout_store, roc_ops); return error.TypeMismatch; @@ -14681,7 +14675,7 @@ pub const Interpreter = struct { // Bind the pattern const new_loop_bindings_start = self.bindings.items.len; // expr_idx not used for for-loop pattern bindings - if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, Binding.no_expr_idx)) { + if (!try self.patternMatchesBind(fl.pattern, elem_value, fl.patt_rt_var, roc_ops, &self.bindings, null)) { elem_value.decref(&self.runtime_layout_store, roc_ops); fl.list_value.decref(&self.runtime_layout_store, roc_ops); return error.TypeMismatch; @@ -14933,7 +14927,7 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = params[0], .value = value, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for inspect method parameter bindings + .expr_idx = null, // expr_idx not used for inspect method parameter bindings .source_env = self.env, }); @@ -15107,13 +15101,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -15189,13 +15183,13 @@ pub const Interpreter = struct { try self.bindings.append(.{ .pattern_idx = cmp_params[0], .value = arg0, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); try self.bindings.append(.{ .pattern_idx = cmp_params[1], .value = arg1, - .expr_idx = Binding.no_expr_idx, // expr_idx not used for comparison function parameter bindings + .expr_idx = null, // expr_idx not used for comparison function parameter bindings .source_env = self.env, }); @@ -15605,7 +15599,7 @@ test "interpreter: cross-module method resolution should find methods in origin // Create an Import.Idx for module A // Using first import index for test purposes - const first_import_idx: can.CIR.Import.Idx = @enumFromInt(0); + const first_import_idx: can.CIR.Import.Idx = .zero; try interp.import_envs.put(interp.allocator, first_import_idx, &module_a); // Verify we can retrieve module A's environment @@ -15668,7 +15662,7 @@ test "interpreter: transitive module method resolution (A imports B imports C)" // Create Import.Idx entries for both modules // Using sequential import indices for test purposes - const first_import_idx: can.CIR.Import.Idx = @enumFromInt(0); + const first_import_idx: can.CIR.Import.Idx = .zero; const second_import_idx: can.CIR.Import.Idx = @enumFromInt(1); try interp.import_envs.put(interp.allocator, first_import_idx, &module_b); try interp.import_envs.put(interp.allocator, second_import_idx, &module_c); diff --git a/src/parse/NodeStore.zig b/src/parse/NodeStore.zig index c48ba7b8bc..a2db6b2163 100644 --- a/src/parse/NodeStore.zig +++ b/src/parse/NodeStore.zig @@ -22,7 +22,7 @@ const sexpr = base.sexpr; const OPTIONAL_VALUE_OFFSET: u32 = 1; /// The root node is always stored at index 0 in the node list. -pub const root_node_idx: Node.List.Idx = @enumFromInt(0); +pub const root_node_idx: Node.List.Idx = .zero; const NodeStore = @This(); diff --git a/src/types/store.zig b/src/types/store.zig index 17a43521cc..5bdd13c4df 100644 --- a/src/types/store.zig +++ b/src/types/store.zig @@ -1013,7 +1013,10 @@ const SlotStore = struct { } /// A type-safe index into the store - const Idx = enum(u32) { _ }; + const Idx = enum(u32) { + zero = 0, + _, + }; }; /// Represents a store of descriptors @@ -1116,7 +1119,10 @@ const DescStore = struct { /// A type-safe index into the store /// This type is made public below - const Idx = enum(u32) { _ }; + const Idx = enum(u32) { + zero = 0, + _, + }; }; /// An index into the desc store @@ -1394,9 +1400,9 @@ test "SlotStore.Serialized roundtrip" { // Named indices for test clarity const desc_idx_100: DescStore.Idx = @enumFromInt(100); - const var_0: Var = @enumFromInt(0); + const var_0: Var = .zero; const desc_idx_200: DescStore.Idx = @enumFromInt(200); - const slot_idx_0: SlotStore.Idx = @enumFromInt(0); + const slot_idx_0: SlotStore.Idx = .zero; const slot_idx_1: SlotStore.Idx = @enumFromInt(1); const slot_idx_2: SlotStore.Idx = @enumFromInt(2); @@ -1451,7 +1457,7 @@ test "DescStore.Serialized roundtrip" { const CompactWriter = collections.CompactWriter; // Named indices for test clarity - const desc_idx_0: DescStore.Idx = @enumFromInt(0); + const desc_idx_0: DescStore.Idx = .zero; const desc_idx_1: DescStore.Idx = @enumFromInt(1); var desc_store = try DescStore.init(gpa, 4); diff --git a/src/types/types.zig b/src/types/types.zig index ec3ccab2ea..b59ee501ad 100644 --- a/src/types/types.zig +++ b/src/types/types.zig @@ -33,6 +33,7 @@ test { /// A type variable pub const Var = enum(u32) { + zero = 0, _, /// A safe list of type variables diff --git a/test/snapshots/issue/test_error_propagation.md b/test/snapshots/issue/test_error_propagation.md index 70094d1318..027dac3f3e 100644 --- a/test/snapshots/issue/test_error_propagation.md +++ b/test/snapshots/issue/test_error_propagation.md @@ -14,7 +14,7 @@ value = "test" ~~~ # EXPECTED UNDERSCORE IN TYPE ALIAS - test_error_propagation.md:1:1:1:1 -TYPE REDECLARED - test_error_propagation.md:3:1:3:21 +TYPE MISMATCH - test_error_propagation.md:6:9:6:15 # PROBLEMS **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -27,23 +27,19 @@ BadBase := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _GoodAlias_ is being redeclared. - -The redeclaration is here: -**test_error_propagation.md:3:1:3:21:** +**TYPE MISMATCH** +This expression is used in an unexpected way: +**test_error_propagation.md:6:9:6:15:** ```roc -GoodAlias := BadBase +value = "test" ``` -^^^^^^^^^^^^^^^^^^^^ + ^^^^^^ -But _GoodAlias_ was already declared here: -**test_error_propagation.md:3:1:3:21:** -```roc -GoodAlias := BadBase -``` -^^^^^^^^^^^^^^^^^^^^ +It has the type: + _Str_ +But the type annotation says it should have the type: + _GoodAlias_ # TOKENS ~~~zig @@ -97,12 +93,12 @@ NO CHANGE ~~~clojure (inferred-types (defs - (patt (type "Str"))) + (patt (type "Error"))) (type_decls (nominal (type "BadBase") (ty-header (name "BadBase"))) (nominal (type "GoodAlias") (ty-header (name "GoodAlias")))) (expressions - (expr (type "Str")))) + (expr (type "Error")))) ~~~ diff --git a/test/snapshots/issue/underscore_error_propagation.md b/test/snapshots/issue/underscore_error_propagation.md index f892fa258c..b3a2a1be59 100644 --- a/test/snapshots/issue/underscore_error_propagation.md +++ b/test/snapshots/issue/underscore_error_propagation.md @@ -21,9 +21,8 @@ goodValue = "test" ~~~ # EXPECTED UNDERSCORE IN TYPE ALIAS - underscore_error_propagation.md:1:1:1:1 -TYPE REDECLARED - underscore_error_propagation.md:3:1:3:22 -TYPE REDECLARED - underscore_error_propagation.md:8:1:8:16 -TYPE REDECLARED - underscore_error_propagation.md:10:1:10:24 +TYPE MISMATCH - underscore_error_propagation.md:6:9:6:15 +TYPE MISMATCH - underscore_error_propagation.md:13:13:13:19 # PROBLEMS **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -36,59 +35,33 @@ BadBase := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _BadDerived_ is being redeclared. - -The redeclaration is here: -**underscore_error_propagation.md:3:1:3:22:** +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_propagation.md:6:9:6:15:** ```roc -BadDerived := BadBase +value = "test" ``` -^^^^^^^^^^^^^^^^^^^^^ + ^^^^^^ -But _BadDerived_ was already declared here: -**underscore_error_propagation.md:3:1:3:22:** +It has the type: + _Str_ + +But the type annotation says it should have the type: + _BadDerived_ + +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_propagation.md:13:13:13:19:** ```roc -BadDerived := BadBase +goodValue = "test" ``` -^^^^^^^^^^^^^^^^^^^^^ + ^^^^^^ +It has the type: + _Str_ -**TYPE REDECLARED** -The type _GoodBase_ is being redeclared. - -The redeclaration is here: -**underscore_error_propagation.md:8:1:8:16:** -```roc -GoodBase := Str -``` -^^^^^^^^^^^^^^^ - -But _GoodBase_ was already declared here: -**underscore_error_propagation.md:8:1:8:16:** -```roc -GoodBase := Str -``` -^^^^^^^^^^^^^^^ - - -**TYPE REDECLARED** -The type _GoodDerived_ is being redeclared. - -The redeclaration is here: -**underscore_error_propagation.md:10:1:10:24:** -```roc -GoodDerived := GoodBase -``` -^^^^^^^^^^^^^^^^^^^^^^^ - -But _GoodDerived_ was already declared here: -**underscore_error_propagation.md:10:1:10:24:** -```roc -GoodDerived := GoodBase -``` -^^^^^^^^^^^^^^^^^^^^^^^ - +But the type annotation says it should have the type: + _GoodDerived_ # TOKENS ~~~zig @@ -172,8 +145,8 @@ NO CHANGE ~~~clojure (inferred-types (defs - (patt (type "Str")) - (patt (type "Str"))) + (patt (type "Error")) + (patt (type "Error"))) (type_decls (nominal (type "BadBase") (ty-header (name "BadBase"))) @@ -184,6 +157,6 @@ NO CHANGE (nominal (type "GoodDerived") (ty-header (name "GoodDerived")))) (expressions - (expr (type "Str")) - (expr (type "Str")))) + (expr (type "Error")) + (expr (type "Error")))) ~~~ diff --git a/test/snapshots/issue/underscore_error_type.md b/test/snapshots/issue/underscore_error_type.md index eabee588a7..c2bbd60c18 100644 --- a/test/snapshots/issue/underscore_error_type.md +++ b/test/snapshots/issue/underscore_error_type.md @@ -32,17 +32,17 @@ quux = ("hello", 42) ~~~ # EXPECTED UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:1:1:1:1 -TYPE REDECLARED - underscore_error_type.md:6:1:6:19 UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:6:17:6:17 UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:6:12:6:16 -TYPE REDECLARED - underscore_error_type.md:11:1:11:38 -UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:1:1:1:1 -TYPE REDECLARED - underscore_error_type.md:16:1:16:22 UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:1:1:1:1 UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:1:1:1:1 -TYPE REDECLARED - underscore_error_type.md:21:1:21:21 +UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:1:1:1:1 UNDERSCORE IN TYPE ALIAS - underscore_error_type.md:21:14:21:14 MISSING METHOD - underscore_error_type.md:4:7:4:9 +TYPE MISMATCH - underscore_error_type.md:9:7:9:16 +TYPE MISMATCH - underscore_error_type.md:14:7:14:32 +TYPE MISMATCH - underscore_error_type.md:19:7:19:12 +TYPE MISMATCH - underscore_error_type.md:24:8:24:21 # PROBLEMS **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -55,24 +55,6 @@ BadType := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _BadList_ is being redeclared. - -The redeclaration is here: -**underscore_error_type.md:6:1:6:19:** -```roc -BadList := List(_) -``` -^^^^^^^^^^^^^^^^^^ - -But _BadList_ was already declared here: -**underscore_error_type.md:6:1:6:19:** -```roc -BadList := List(_) -``` -^^^^^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -95,53 +77,6 @@ BadList := List(_) Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _BadRecord_ is being redeclared. - -The redeclaration is here: -**underscore_error_type.md:11:1:11:38:** -```roc -BadRecord := { field: _, other: U32 } -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -But _BadRecord_ was already declared here: -**underscore_error_type.md:11:1:11:38:** -```roc -BadRecord := { field: _, other: U32 } -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - -**UNDERSCORE IN TYPE ALIAS** -Underscores are not allowed in type alias declarations. - -**underscore_error_type.md:1:1:1:1:** -```roc -BadType := _ -``` -^ - -Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. - -**TYPE REDECLARED** -The type _BadFunction_ is being redeclared. - -The redeclaration is here: -**underscore_error_type.md:16:1:16:22:** -```roc -BadFunction := _ -> _ -``` -^^^^^^^^^^^^^^^^^^^^^ - -But _BadFunction_ was already declared here: -**underscore_error_type.md:16:1:16:22:** -```roc -BadFunction := _ -> _ -``` -^^^^^^^^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -164,23 +99,16 @@ BadType := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _BadTuple_ is being redeclared. +**UNDERSCORE IN TYPE ALIAS** +Underscores are not allowed in type alias declarations. -The redeclaration is here: -**underscore_error_type.md:21:1:21:21:** +**underscore_error_type.md:1:1:1:1:** ```roc -BadTuple := (_, U32) +BadType := _ ``` -^^^^^^^^^^^^^^^^^^^^ - -But _BadTuple_ was already declared here: -**underscore_error_type.md:21:1:21:21:** -```roc -BadTuple := (_, U32) -``` -^^^^^^^^^^^^^^^^^^^^ +^ +Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -207,6 +135,62 @@ The value's type, which does not have a method named **from_numeral**, is: **Hint:** For this to work, the type would need to have a method named **from_numeral** associated with it in the type's declaration. +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_type.md:9:7:9:16:** +```roc +bar = [1, 2, 3] +``` + ^^^^^^^^^ + +It has the type: + _List(a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]_ + +But the type annotation says it should have the type: + _BadList_ + +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_type.md:14:7:14:32:** +```roc +baz = { field: "hi", other: 5 } +``` + ^^^^^^^^^^^^^^^^^^^^^^^^^ + +It has the type: + _{ field: Str, other: a } where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]_ + +But the type annotation says it should have the type: + _BadRecord_ + +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_type.md:19:7:19:12:** +```roc +qux = |x| x +``` + ^^^^^ + +It has the type: + _a -> a_ + +But the type annotation says it should have the type: + _BadFunction_ + +**TYPE MISMATCH** +This expression is used in an unexpected way: +**underscore_error_type.md:24:8:24:21:** +```roc +quux = ("hello", 42) +``` + ^^^^^^^^^^^^^ + +It has the type: + _(Str, a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]_ + +But the type annotation says it should have the type: + _BadTuple_ + # TOKENS ~~~zig UpperIdent,OpColonEqual,Underscore, @@ -404,10 +388,10 @@ quux = ("hello", 42) (inferred-types (defs (patt (type "BadType")) - (patt (type "List(a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")) - (patt (type "{ field: Str, other: a } where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")) - (patt (type "a -> a")) - (patt (type "(Str, a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]"))) + (patt (type "Error")) + (patt (type "Error")) + (patt (type "Error")) + (patt (type "Error"))) (type_decls (nominal (type "BadType") (ty-header (name "BadType"))) @@ -421,8 +405,8 @@ quux = ("hello", 42) (ty-header (name "BadTuple")))) (expressions (expr (type "BadType")) - (expr (type "List(a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")) - (expr (type "{ field: Str, other: a } where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")) - (expr (type "a -> a")) - (expr (type "(Str, a) where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")))) + (expr (type "Error")) + (expr (type "Error")) + (expr (type "Error")) + (expr (type "Error")))) ~~~ diff --git a/test/snapshots/issue/usage_test.md b/test/snapshots/issue/usage_test.md index 2e9367be2b..912ae4fb44 100644 --- a/test/snapshots/issue/usage_test.md +++ b/test/snapshots/issue/usage_test.md @@ -14,8 +14,8 @@ value = 42 ~~~ # EXPECTED UNDERSCORE IN TYPE ALIAS - usage_test.md:1:1:1:1 -TYPE REDECLARED - usage_test.md:3:1:3:14 UNDERSCORE IN TYPE ALIAS - usage_test.md:1:1:1:1 +MISSING METHOD - usage_test.md:6:9:6:11 # PROBLEMS **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -28,24 +28,6 @@ UnusedType := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _UsedType_ is being redeclared. - -The redeclaration is here: -**usage_test.md:3:1:3:14:** -```roc -UsedType := _ -``` -^^^^^^^^^^^^^ - -But _UsedType_ was already declared here: -**usage_test.md:3:1:3:14:** -```roc -UsedType := _ -``` -^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -57,6 +39,20 @@ UnusedType := _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. +**MISSING METHOD** +This **from_numeral** method is being called on a value whose type doesn't have that method: +**usage_test.md:6:9:6:11:** +```roc +value = 42 +``` + ^^ + +The value's type, which does not have a method named **from_numeral**, is: + + _UsedType_ + +**Hint:** For this to work, the type would need to have a method named **from_numeral** associated with it in the type's declaration. + # TOKENS ~~~zig UpperIdent,OpColonEqual,Underscore, @@ -107,12 +103,12 @@ NO CHANGE ~~~clojure (inferred-types (defs - (patt (type "a where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]"))) + (patt (type "UsedType"))) (type_decls (nominal (type "UnusedType") (ty-header (name "UnusedType"))) (nominal (type "UsedType") (ty-header (name "UsedType")))) (expressions - (expr (type "a where [a.from_numeral : Numeral -> Try(a, [InvalidNumeral(Str)])]")))) + (expr (type "UsedType")))) ~~~ diff --git a/test/snapshots/pass/underscore_in_type_alias.md b/test/snapshots/pass/underscore_in_type_alias.md index 833062c914..ed698b6940 100644 --- a/test/snapshots/pass/underscore_in_type_alias.md +++ b/test/snapshots/pass/underscore_in_type_alias.md @@ -21,20 +21,14 @@ TagType := [Some(_), None] ~~~ # EXPECTED UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 -TYPE REDECLARED - underscore_in_type_alias.md:3:1:3:15 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 -TYPE REDECLARED - underscore_in_type_alias.md:5:1:5:23 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:5:21:5:21 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:5:16:5:20 -TYPE REDECLARED - underscore_in_type_alias.md:7:1:7:39 -UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 -TYPE REDECLARED - underscore_in_type_alias.md:9:1:9:23 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 -TYPE REDECLARED - underscore_in_type_alias.md:11:1:11:25 +UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:11:15:11:15 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:1:1:1:1 -TYPE REDECLARED - underscore_in_type_alias.md:13:1:13:27 UNDERSCORE IN TYPE ALIAS - underscore_in_type_alias.md:13:18:13:18 # PROBLEMS **UNDERSCORE IN TYPE ALIAS** @@ -48,24 +42,6 @@ MyType : _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _OtherType_ is being redeclared. - -The redeclaration is here: -**underscore_in_type_alias.md:3:1:3:15:** -```roc -OtherType := _ -``` -^^^^^^^^^^^^^^ - -But _OtherType_ was already declared here: -**underscore_in_type_alias.md:3:1:3:15:** -```roc -OtherType := _ -``` -^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -77,24 +53,6 @@ MyType : _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _ComplexType_ is being redeclared. - -The redeclaration is here: -**underscore_in_type_alias.md:5:1:5:23:** -```roc -ComplexType := List(_) -``` -^^^^^^^^^^^^^^^^^^^^^^ - -But _ComplexType_ was already declared here: -**underscore_in_type_alias.md:5:1:5:23:** -```roc -ComplexType := List(_) -``` -^^^^^^^^^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -117,53 +75,6 @@ ComplexType := List(_) Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _RecordType_ is being redeclared. - -The redeclaration is here: -**underscore_in_type_alias.md:7:1:7:39:** -```roc -RecordType := { field: _, other: U32 } -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -But _RecordType_ was already declared here: -**underscore_in_type_alias.md:7:1:7:39:** -```roc -RecordType := { field: _, other: U32 } -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - -**UNDERSCORE IN TYPE ALIAS** -Underscores are not allowed in type alias declarations. - -**underscore_in_type_alias.md:1:1:1:1:** -```roc -MyType : _ -``` -^ - -Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. - -**TYPE REDECLARED** -The type _FunctionType_ is being redeclared. - -The redeclaration is here: -**underscore_in_type_alias.md:9:1:9:23:** -```roc -FunctionType := _ -> _ -``` -^^^^^^^^^^^^^^^^^^^^^^ - -But _FunctionType_ was already declared here: -**underscore_in_type_alias.md:9:1:9:23:** -```roc -FunctionType := _ -> _ -``` -^^^^^^^^^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -186,23 +97,16 @@ MyType : _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _TupleType_ is being redeclared. +**UNDERSCORE IN TYPE ALIAS** +Underscores are not allowed in type alias declarations. -The redeclaration is here: -**underscore_in_type_alias.md:11:1:11:25:** +**underscore_in_type_alias.md:1:1:1:1:** ```roc -TupleType := (_, U32, _) +MyType : _ ``` -^^^^^^^^^^^^^^^^^^^^^^^^ - -But _TupleType_ was already declared here: -**underscore_in_type_alias.md:11:1:11:25:** -```roc -TupleType := (_, U32, _) -``` -^^^^^^^^^^^^^^^^^^^^^^^^ +^ +Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. @@ -226,24 +130,6 @@ MyType : _ Underscores in type annotations mean "I don't care about this type", which doesn't make sense when declaring a type. If you need a placeholder type variable, use a named type variable like `a` instead. -**TYPE REDECLARED** -The type _TagType_ is being redeclared. - -The redeclaration is here: -**underscore_in_type_alias.md:13:1:13:27:** -```roc -TagType := [Some(_), None] -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -But _TagType_ was already declared here: -**underscore_in_type_alias.md:13:1:13:27:** -```roc -TagType := [Some(_), None] -``` -^^^^^^^^^^^^^^^^^^^^^^^^^^ - - **UNDERSCORE IN TYPE ALIAS** Underscores are not allowed in type alias declarations. diff --git a/test/snapshots/pass/underscore_prefixed_param_in_nominal_type.md b/test/snapshots/pass/underscore_prefixed_param_in_nominal_type.md index cf85da94c7..091b58978b 100644 --- a/test/snapshots/pass/underscore_prefixed_param_in_nominal_type.md +++ b/test/snapshots/pass/underscore_prefixed_param_in_nominal_type.md @@ -15,45 +15,9 @@ NominalType2(_a, b) := b NominalType3(a, _b) := a ~~~ # EXPECTED -TYPE REDECLARED - underscore_prefixed_param_in_nominal_type.md:5:1:5:25 -TYPE REDECLARED - underscore_prefixed_param_in_nominal_type.md:8:1:8:25 +NIL # PROBLEMS -**TYPE REDECLARED** -The type _NominalType2_ is being redeclared. - -The redeclaration is here: -**underscore_prefixed_param_in_nominal_type.md:5:1:5:25:** -```roc -NominalType2(_a, b) := b -``` -^^^^^^^^^^^^^^^^^^^^^^^^ - -But _NominalType2_ was already declared here: -**underscore_prefixed_param_in_nominal_type.md:5:1:5:25:** -```roc -NominalType2(_a, b) := b -``` -^^^^^^^^^^^^^^^^^^^^^^^^ - - -**TYPE REDECLARED** -The type _NominalType3_ is being redeclared. - -The redeclaration is here: -**underscore_prefixed_param_in_nominal_type.md:8:1:8:25:** -```roc -NominalType3(a, _b) := a -``` -^^^^^^^^^^^^^^^^^^^^^^^^ - -But _NominalType3_ was already declared here: -**underscore_prefixed_param_in_nominal_type.md:8:1:8:25:** -```roc -NominalType3(a, _b) := a -``` -^^^^^^^^^^^^^^^^^^^^^^^^ - - +NIL # TOKENS ~~~zig UpperIdent,NoSpaceOpenRound,NamedUnderscore,CloseRound,OpColonEqual,UpperIdent, diff --git a/test/snapshots/pass/underscore_prefixed_param_in_opaque_type.md b/test/snapshots/pass/underscore_prefixed_param_in_opaque_type.md index 45f61b8bb0..b60f7475f6 100644 --- a/test/snapshots/pass/underscore_prefixed_param_in_opaque_type.md +++ b/test/snapshots/pass/underscore_prefixed_param_in_opaque_type.md @@ -15,45 +15,9 @@ OpaqueType2(_a, b) :: b OpaqueType3(a, _b) :: a ~~~ # EXPECTED -TYPE REDECLARED - underscore_prefixed_param_in_opaque_type.md:5:1:5:24 -TYPE REDECLARED - underscore_prefixed_param_in_opaque_type.md:8:1:8:24 +NIL # PROBLEMS -**TYPE REDECLARED** -The type _OpaqueType2_ is being redeclared. - -The redeclaration is here: -**underscore_prefixed_param_in_opaque_type.md:5:1:5:24:** -```roc -OpaqueType2(_a, b) :: b -``` -^^^^^^^^^^^^^^^^^^^^^^^ - -But _OpaqueType2_ was already declared here: -**underscore_prefixed_param_in_opaque_type.md:5:1:5:24:** -```roc -OpaqueType2(_a, b) :: b -``` -^^^^^^^^^^^^^^^^^^^^^^^ - - -**TYPE REDECLARED** -The type _OpaqueType3_ is being redeclared. - -The redeclaration is here: -**underscore_prefixed_param_in_opaque_type.md:8:1:8:24:** -```roc -OpaqueType3(a, _b) :: a -``` -^^^^^^^^^^^^^^^^^^^^^^^ - -But _OpaqueType3_ was already declared here: -**underscore_prefixed_param_in_opaque_type.md:8:1:8:24:** -```roc -OpaqueType3(a, _b) :: a -``` -^^^^^^^^^^^^^^^^^^^^^^^ - - +NIL # TOKENS ~~~zig UpperIdent,NoSpaceOpenRound,NamedUnderscore,CloseRound,OpDoubleColon,UpperIdent, diff --git a/test/snapshots/plume_package/Color.md b/test/snapshots/plume_package/Color.md index 9c9ada6de5..1cac226efa 100644 --- a/test/snapshots/plume_package/Color.md +++ b/test/snapshots/plume_package/Color.md @@ -79,7 +79,6 @@ is_named_color = |str|{ ~~~ # EXPECTED MODULE HEADER DEPRECATED - Color.md:1:1:8:2 -TYPE REDECLARED - Color.md:10:1:15:2 UNUSED VARIABLE - Color.md:30:5:30:25 DOES NOT EXIST - Color.md:50:34:50:44 DOES NOT EXIST - Color.md:50:52:50:62 @@ -97,6 +96,8 @@ MISSING METHOD - Color.md:38:23:38:43 MISSING METHOD - Color.md:39:23:39:43 MISSING METHOD - Color.md:40:23:40:43 MISSING METHOD - Color.md:62:12:62:26 +MISSING METHOD - Color.md:56:26:56:32 +MISSING METHOD - Color.md:57:32:57:38 MISSING METHOD - Color.md:58:23:58:29 # PROBLEMS **MODULE HEADER DEPRECATED** @@ -118,32 +119,6 @@ module [ ``` -**TYPE REDECLARED** -The type _Color_ is being redeclared. - -The redeclaration is here: -**Color.md:10:1:15:2:** -```roc -Color := [ - RGB(U8, U8, U8), - RGBA(U8, U8, U8, Dec), - Named(Str), - Hex(Str), -] -``` - -But _Color_ was already declared here: -**Color.md:10:1:15:2:** -```roc -Color := [ - RGB(U8, U8, U8), - RGBA(U8, U8, U8, Dec), - Named(Str), - Hex(Str), -] -``` - - **UNUSED VARIABLE** Variable `is_char_in_hex_range` is not used anywhere in your code. @@ -351,6 +326,34 @@ The value's type, which does not have a method named **is_named_color**, is: **Hint:** For this to work, the type would need to have a method named **is_named_color** associated with it in the type's declaration. +**MISSING METHOD** +This **to_str** method is being called on a value whose type doesn't have that method: +**Color.md:56:26:56:32:** +```roc +expect rgb(124, 56, 245).to_str() == "rgb(124, 56, 245)" +``` + ^^^^^^ + +The value's type, which does not have a method named **to_str**, is: + + _Color_ + +**Hint:** For this to work, the type would need to have a method named **to_str** associated with it in the type's declaration. + +**MISSING METHOD** +This **to_str** method is being called on a value whose type doesn't have that method: +**Color.md:57:32:57:38:** +```roc +expect rgba(124, 56, 245, 255).to_str() == "rgba(124, 56, 245, 1.0)" +``` + ^^^^^^ + +The value's type, which does not have a method named **to_str**, is: + + _Color_ + +**Hint:** For this to work, the type would need to have a method named **to_str** associated with it in the type's declaration. + **MISSING METHOD** This **map_ok** method is being called on a value whose type doesn't have that method: **Color.md:58:23:58:29:** @@ -361,7 +364,7 @@ expect hex("#ff00ff").map_ok(to_str) == Ok("#ff00ff") The value's type, which does not have a method named **map_ok**, is: - _Try(Error, [InvalidHex(Str)])_ + _Try(Color, [InvalidHex(Str)])_ **Hint:** For this to work, the type would need to have a method named **map_ok** associated with it in the type's declaration. @@ -1304,20 +1307,20 @@ is_named_color = |str| { ~~~clojure (inferred-types (defs - (patt (type "U8, U8, U8 -> Error")) - (patt (type "U8, U8, U8, U8 -> Error")) - (patt (type "Str -> Try(Error, [InvalidHex(Str)])")) - (patt (type "Error -> Error")) - (patt (type "Str -> Try(Error, [UnknownColor(Str)])")) + (patt (type "U8, U8, U8 -> Color")) + (patt (type "U8, U8, U8, U8 -> Color")) + (patt (type "Str -> Try(Color, [InvalidHex(Str)])")) + (patt (type "Color -> Error")) + (patt (type "Str -> Try(Color, [UnknownColor(Str)])")) (patt (type "_arg -> Error"))) (type_decls (nominal (type "Color") (ty-header (name "Color")))) (expressions - (expr (type "U8, U8, U8 -> Error")) - (expr (type "U8, U8, U8, U8 -> Error")) - (expr (type "Str -> Try(Error, [InvalidHex(Str)])")) - (expr (type "Error -> Error")) - (expr (type "Str -> Try(Error, [UnknownColor(Str)])")) + (expr (type "U8, U8, U8 -> Color")) + (expr (type "U8, U8, U8, U8 -> Color")) + (expr (type "Str -> Try(Color, [InvalidHex(Str)])")) + (expr (type "Color -> Error")) + (expr (type "Str -> Try(Color, [UnknownColor(Str)])")) (expr (type "_arg -> Error")))) ~~~ From 581f34a07122cfdddd05cc8eff4b3402349d66c4 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 14:30:21 -0500 Subject: [PATCH 23/64] Fix some bugs --- src/canonicalize/Can.zig | 158 +++++++++++++++++++++++++++++++++---- src/canonicalize/Scope.zig | 2 +- src/cli/main.zig | 60 +++++--------- 3 files changed, 161 insertions(+), 59 deletions(-) diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index 0a7dffd0a3..0c5b7cf511 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -1212,7 +1212,6 @@ fn processAssociatedItemsSecondPass( const parent_text = self.env.getIdent(parent_name); const name_text = self.env.getIdent(name_ident); const qualified_idx = try self.env.insertQualifiedIdent(parent_text, name_text); - // Create anno-only def with the qualified name const def_idx = try self.createAnnoOnlyDef(qualified_idx, type_anno_idx, where_clauses, region); @@ -4124,15 +4123,6 @@ pub fn canonicalizeExpr( break :blk_qualified; } - // Check if this is a package-qualified import (e.g., "pf.Stdout") - // These are cross-package imports resolved by the workspace resolver - const is_pkg_qualified = if (module_info) |info| info.is_package_qualified else false; - if (is_pkg_qualified) { - // Package-qualified import - member resolution happens via the resolver - // Fall through to normal identifier lookup - break :blk_qualified; - } - // Generate a more helpful error for auto-imported types (List, Bool, Try, etc.) const is_auto_imported_type = if (self.module_envs) |envs_map| envs_map.contains(module_name) @@ -10826,14 +10816,143 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca return null; }; - // This is a module-qualified lookup + // This IS a module-qualified lookup - we must handle it completely here. + // After this point, returning null would cause incorrect fallback to regular field access. const right_expr = self.parse_ir.store.getExpr(field_access.right); - if (right_expr != .ident) return null; + const region = self.parse_ir.tokenizedRegionToRegion(field_access.region); + + // Handle method calls on module-qualified types (e.g., Stdout.line!(...)) + if (right_expr == .apply) { + const apply = right_expr.apply; + const method_expr = self.parse_ir.store.getExpr(apply.@"fn"); + if (method_expr != .ident) { + // Module-qualified call with non-ident function (e.g., Module.(complex_expr)(...)) + // This is malformed - report error + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{ + .region = region, + } }); + } + + const method_ident = method_expr.ident; + const method_name = self.parse_ir.tokens.resolveIdentifier(method_ident.token) orelse { + // Couldn't resolve method name token + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{ + .region = region, + } }); + }; + + // Check if this is a type module (like Stdout) - we need to create a method call on the nominal type + if (self.module_envs) |envs_map| { + if (envs_map.get(module_name)) |auto_imported_type| { + if (auto_imported_type.statement_idx) |stmt_idx| { + // This is an imported type module - create an e_dot_access for the method call + const module_name_text = auto_imported_type.env.module_name; + const auto_import_idx = try self.getOrCreateAutoImport(module_name_text); + + const target_node_idx = auto_imported_type.env.getExposedNodeIndexByStatementIdx(stmt_idx) orelse { + std.debug.panic("Failed to find exposed node for statement index {} in module '{s}'", .{ stmt_idx, module_name_text }); + }; + + // Create the receiver - a reference to the nominal type + const receiver_idx = try self.env.addExpr(CIR.Expr{ + .e_lookup_external = .{ + .module_idx = auto_import_idx, + .target_node_idx = target_node_idx, + .region = self.parse_ir.tokenizedRegionToRegion(method_ident.region), + }, + }, self.parse_ir.tokenizedRegionToRegion(method_ident.region)); + + // Canonicalize the arguments + const scratch_top = self.env.store.scratchExprTop(); + for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| { + if (try self.canonicalizeExpr(arg_idx)) |canonicalized| { + try self.env.store.addScratchExpr(canonicalized.get_idx()); + } + // Note: if arg canonicalization fails, it will have pushed its own diagnostic + } + const args = try self.env.store.exprSpanFrom(scratch_top); + + // Create the method call expression + const method_region = self.parse_ir.tokenizedRegionToRegion(method_ident.region); + const expr_idx = try self.env.addExpr(CIR.Expr{ + .e_dot_access = .{ + .receiver = receiver_idx, + .field_name = method_name, + .field_name_region = method_region, + .args = args, + }, + }, region); + return expr_idx; + } + } + } + + // Module exists but is not a type module with a statement_idx - it's a regular module + // This means it's something like `SomeModule.someFunc(args)` where someFunc is a regular export + // We need to look up the function and create a call + const field_text = self.env.getIdent(method_name); + const target_node_idx_opt: ?u16 = if (self.module_envs) |envs_map| blk: { + if (envs_map.get(module_name)) |auto_imported_type| { + const module_env = auto_imported_type.env; + if (module_env.common.findIdent(field_text)) |target_ident| { + break :blk module_env.getExposedNodeIndexById(target_ident); + } else { + break :blk null; + } + } else { + break :blk null; + } + } else null; + + if (target_node_idx_opt) |target_node_idx| { + // Found the function - create a lookup and call it + const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{ + .module_idx = import_idx, + .target_node_idx = target_node_idx, + .region = region, + } }, region); + + // Canonicalize the arguments + const scratch_top = self.env.store.scratchExprTop(); + for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| { + if (try self.canonicalizeExpr(arg_idx)) |canonicalized| { + try self.env.store.addScratchExpr(canonicalized.get_idx()); + } + } + const args_span = try self.env.store.exprSpanFrom(scratch_top); + + // Create the call expression + const call_expr_idx = try self.env.addExpr(CIR.Expr{ + .e_call = .{ + .func = func_expr_idx, + .args = args_span, + .called_via = CalledVia.apply, + }, + }, region); + return call_expr_idx; + } else { + // Function not found in module + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{ + .ident = method_name, + .region = region, + } }); + } + } + + // Handle simple field access (not a method call) + if (right_expr != .ident) { + // Module-qualified access with non-ident, non-apply right side - malformed + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{ + .region = region, + } }); + } const right_ident = right_expr.ident; - const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse return null; - - const region = self.parse_ir.tokenizedRegionToRegion(field_access.region); + const field_name = self.parse_ir.tokens.resolveIdentifier(right_ident.token) orelse { + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .expr_not_canonicalized = .{ + .region = region, + } }); + }; // Check if this is a tag access on an auto-imported nominal type (e.g., Bool.True) if (self.module_envs) |envs_map| { @@ -10890,8 +11009,13 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca } } else null; - // If we didn't find a valid node index, return null to fall through to error handling - const target_node_idx = target_node_idx_opt orelse return null; + // If we didn't find a valid node index, report an error (don't fall back) + const target_node_idx = target_node_idx_opt orelse { + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .qualified_ident_does_not_exist = .{ + .ident = field_name, + .region = region, + } }); + }; // Create the e_lookup_external expression with Import.Idx const expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{ diff --git a/src/canonicalize/Scope.zig b/src/canonicalize/Scope.zig index c9e4dda2ed..04758be7e5 100644 --- a/src/canonicalize/Scope.zig +++ b/src/canonicalize/Scope.zig @@ -363,7 +363,7 @@ pub fn lookupTypeVar(scope: *const Scope, name: Ident.Idx) TypeVarLookupResult { /// Look up a module alias in this scope pub fn lookupModuleAlias(scope: *const Scope, name: Ident.Idx) ModuleAliasLookupResult { - // Search by comparing text content, not identifier index + // Search by comparing .idx values (u29 index into string interner) var iter = scope.module_aliases.iterator(); while (iter.next()) |entry| { if (name.idx == entry.key_ptr.idx) { diff --git a/src/cli/main.zig b/src/cli/main.zig index 0714eaf247..fe4f32f6b5 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -1554,44 +1554,12 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons const module_env_ptr = try compileModuleToSharedMemory( allocs, module_path, - module_filename, + module_name, // Use just "Stdout" (not "Stdout.roc") so type-module detection works shm_allocator, &builtin_modules, &.{}, ); - // Add exposed item aliases with "pf." prefix for import resolution - // The canonicalizer builds lookup names like "Stdout.roc.pf.Stdout.line!" - // because the import "pf.Stdout" creates an alias Stdout -> pf.Stdout, - // and scopeLookupModule returns "pf.Stdout" which becomes part of the qualified name. - // We need to add aliases that match this pattern. - module_env_ptr.common.exposed_items.ensureSorted(shm_allocator); - const exposed_entries = module_env_ptr.common.exposed_items.items.entries.items; - for (exposed_entries) |entry| { - const key_ident: base.Ident.Idx = @bitCast(entry.key); - const key_text = module_env_ptr.common.getIdent(key_ident); - - // Check if this is a qualified name like "Stdout.roc.Stdout.line!" - // We want to create an alias "Stdout.roc.pf.Stdout.line!" - // The pattern is: "{module}.roc.{Type}.{method}" - // We want to create: "{module}.roc.pf.{Type}.{method}" - if (std.mem.indexOf(u8, key_text, ".roc.")) |roc_pos| { - const prefix = key_text[0 .. roc_pos + 5]; // "Stdout.roc." - const suffix = key_text[roc_pos + 5 ..]; // "Stdout.line!" - - // Create the aliased name "Stdout.roc.pf.Stdout.line!" - const aliased_name = try std.fmt.allocPrint(shm_allocator, "{s}pf.{s}", .{ prefix, suffix }); - // Note: We don't defer free because this is allocated in shm_allocator (shared memory) - - // Insert the aliased name into the platform env's ident table - const aliased_ident = try module_env_ptr.insertIdent(base.Ident.for_text(aliased_name)); - - // First add to exposed items, then set node index - try module_env_ptr.common.exposed_items.addExposedById(shm_allocator, @bitCast(aliased_ident)); - try module_env_ptr.common.exposed_items.setNodeIndexById(shm_allocator, @bitCast(aliased_ident), entry.value); - } - } - // Store platform modules at indices 0..N-2, app will be at N-1 module_env_offsets_ptr[i] = @intFromPtr(module_env_ptr) - @intFromPtr(shm.base_ptr); platform_env_ptrs[i] = module_env_ptr; @@ -1737,19 +1705,29 @@ pub fn setupSharedMemoryWithModuleEnv(allocs: *Allocators, roc_file_path: []cons // Two keys are needed for each platform module: // 1. "pf.Stdout" - used during import validation (import pf.Stdout) // 2. "Stdout" - used during expression canonicalization (Stdout.line!) - // Also set statement_idx to a non-null value to trigger qualified name lookup, - // since associated items are stored as "Stdout.roc.Stdout.line!", not just "line!". + // Also set statement_idx to the actual type node index, which is needed for + // creating e_nominal_external and e_lookup_external expressions. for (exposed_modules.items, 0..) |module_name, i| { const platform_env = platform_env_ptrs[i]; - // For platform modules, the qualified type name is "ModuleName.roc.ModuleName" - // This matches how associated items are stored (e.g., "Stdout.roc.Stdout.line!") + // For platform modules (type modules), the qualified type name is just the type name. + // Type modules like Stdout.roc store associated items as "Stdout.line!" (not "Stdout.roc.Stdout.line!") + // because processTypeDeclFirstPass uses parent_name=null for top-level types. // Insert into app_env (calling module) since Ident.Idx values are not transferable between stores. - const qualified_type_name = try std.fmt.allocPrint(allocs.gpa, "{s}.roc.{s}", .{ module_name, module_name }); - defer allocs.gpa.free(qualified_type_name); - const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(qualified_type_name)); + const type_qualified_ident = try app_env.insertIdent(base.Ident.for_text(module_name)); + + // Look up the type in the platform module's exposed_items to get the actual node index + const type_ident_in_platform = platform_env.common.findIdent(module_name) orelse { + std.log.err("Platform module '{s}' does not expose a type named '{s}'", .{ module_name, module_name }); + return error.MissingTypeInPlatformModule; + }; + const type_node_idx = platform_env.getExposedNodeIndexById(type_ident_in_platform) orelse { + std.log.err("Platform module type '{s}' has no node index in exposed_items", .{module_name}); + return error.MissingNodeIndexForPlatformType; + }; + const auto_type = Can.AutoImportedType{ .env = platform_env, - .statement_idx = undefined, // non-null triggers qualified name building; actual index isn't read + .statement_idx = @enumFromInt(type_node_idx), // actual type node index for e_lookup_external .qualified_type_ident = type_qualified_ident, }; From b3c2744ed2c417ad9fd41b04510411ddb6ba3098 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 17:24:08 -0500 Subject: [PATCH 24/64] wasm fixes etc. --- src/canonicalize/Can.zig | 131 +++++++++++++++++++++------------------ src/eval/interpreter.zig | 11 +++- test/fx/stdin_test.roc | 11 ++-- 3 files changed, 81 insertions(+), 72 deletions(-) diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index 0c5b7cf511..f829685d69 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -53,8 +53,10 @@ in_statement_position: bool = true, scopes: std.ArrayList(Scope) = .{}, /// Special scope for rigid type variables in annotations type_vars_scope: base.Scratch(TypeVarScope), -/// Special scope for tracking exposed items from module header -exposed_scope: Scope = undefined, +/// Set of identifiers exposed from this module header (values not used) +exposed_idents: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{}, +/// Set of types exposed from this module header (values not used) +exposed_types: std.AutoHashMapUnmanaged(Ident.Idx, void) = .{}, /// Track exposed identifiers by text to handle changing indices exposed_ident_texts: std.StringHashMapUnmanaged(Region) = .{}, /// Track exposed types by text to handle changing indices @@ -182,7 +184,8 @@ pub fn deinit( const gpa = self.env.gpa; self.type_vars_scope.deinit(); - self.exposed_scope.deinit(gpa); + self.exposed_idents.deinit(gpa); + self.exposed_types.deinit(gpa); self.exposed_ident_texts.deinit(gpa); self.exposed_type_texts.deinit(gpa); self.placeholder_idents.deinit(gpa); @@ -236,7 +239,6 @@ pub fn init( .scratch_record_fields = try base.Scratch(types.RecordField).init(gpa), .scratch_seen_record_fields = try base.Scratch(SeenRecordField).init(gpa), .type_vars_scope = try base.Scratch(TypeVarScope).init(gpa), - .exposed_scope = Scope.init(false), .scratch_tags = try base.Scratch(types.Tag).init(gpa), .scratch_free_vars = try base.Scratch(Pattern.Idx).init(gpa), .scratch_captures = try base.Scratch(Pattern.Idx).init(gpa), @@ -1748,7 +1750,7 @@ pub fn canonicalizeFile( // canonicalize_header_packages(); - // First, process the header to create exposed_scope and set module_kind + // First, process the header to populate exposed_idents/exposed_types and set module_kind const header = self.parse_ir.store.getHeader(file.header); switch (header) { .module => |h| { @@ -2553,11 +2555,9 @@ fn createExposedScope( self: *Self, exposes: AST.Collection.Idx, ) std.mem.Allocator.Error!void { - const gpa = self.env.gpa; - - // Reset exposed_scope (already initialized in init) - self.exposed_scope.deinit(gpa); - self.exposed_scope = Scope.init(false); + // Clear exposed sets (they're already initialized with default values) + self.exposed_idents.clearRetainingCapacity(); + self.exposed_types.clearRetainingCapacity(); try self.addToExposedScope(exposes); } @@ -2596,9 +2596,8 @@ fn addToExposedScope( // Add to exposed_items for permanent storage (unconditionally) try self.env.addExposedById(ident_idx); - // Use undefined pattern index - we just need to track that the ident is exposed - const dummy_idx: Pattern.Idx = undefined; - try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx); + // Just track that this identifier is exposed + try self.exposed_idents.put(gpa, ident_idx, {}); } // Store by text in a temporary hash map, since indices may change @@ -2629,9 +2628,8 @@ fn addToExposedScope( // Don't add types to exposed_items - types are not values // Only add to type_bindings for type resolution - // Use undefined statement index - we just need to track that the type is exposed - const dummy_idx: Statement.Idx = undefined; - try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx }); + // Just track that this type is exposed + try self.exposed_types.put(gpa, ident_idx, {}); } // Store by text in a temporary hash map, since indices may change @@ -2662,9 +2660,8 @@ fn addToExposedScope( // Don't add types to exposed_items - types are not values // Only add to type_bindings for type resolution - // Use undefined statement index - we just need to track that the type is exposed - const dummy_idx: Statement.Idx = undefined; - try self.exposed_scope.type_bindings.put(gpa, ident_idx, Scope.TypeBinding{ .local_nominal = dummy_idx }); + // Just track that this type is exposed + try self.exposed_types.put(gpa, ident_idx, {}); } // Store by text in a temporary hash map, since indices may change @@ -2712,9 +2709,8 @@ fn addPlatformProvidesItems( // Add to exposed_items for permanent storage try self.env.addExposedById(ident_idx); - // Add to exposed_scope so it becomes an export - undefined since index isn't read - const dummy_idx: Pattern.Idx = undefined; - try self.exposed_scope.put(gpa, .ident, ident_idx, dummy_idx); + // Track that this identifier is exposed (for exports) + try self.exposed_idents.put(gpa, ident_idx, {}); // Also track in exposed_ident_texts const token_region = self.parse_ir.tokens.resolve(@intCast(field.name)); @@ -2816,7 +2812,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void { const defs_slice = self.env.store.sliceDefs(self.env.all_defs); // Check each definition to see if it corresponds to an exposed item. - // We check exposed_scope.idents which only contains items from the exposing clause, + // We check exposed_idents which only contains items from the exposing clause, // not associated items like "Color.as_str" which are registered separately. for (defs_slice) |def_idx| { const def = self.env.store.getDef(def_idx); @@ -2824,7 +2820,7 @@ fn populateExports(self: *Self) std.mem.Allocator.Error!void { if (pattern == .assign) { // Check if this identifier was explicitly exposed in the module header - if (self.exposed_scope.idents.contains(pattern.assign.ident)) { + if (self.exposed_idents.contains(pattern.assign.ident)) { try self.env.store.addScratchDef(def_idx); } } @@ -5177,7 +5173,7 @@ pub fn canonicalizeExpr( .patterns = ok_branch_pat_span, .value = ok_lookup_idx, .guard = null, - .redundant = undefined, // set during type checking + .redundant = .zero, // placeholder; set during type checking }, region, ); @@ -5251,7 +5247,7 @@ pub fn canonicalizeExpr( .patterns = err_branch_pat_span, .value = return_expr_idx, .guard = null, - .redundant = undefined, // set during type checking + .redundant = .zero, // placeholder; set during type checking }, region, ); @@ -5265,7 +5261,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = undefined, // set during type checking + .exhaustive = .zero, // placeholder; set during type checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -5573,7 +5569,7 @@ pub fn canonicalizeExpr( .patterns = branch_pat_span, .value = value_idx, .guard = null, - .redundant = undefined, // set during type checking + .redundant = .zero, // placeholder; set during type checking }, region, ); @@ -5593,7 +5589,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = undefined, // set during type checking + .exhaustive = .zero, // placeholder; set during type checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -10841,48 +10837,59 @@ fn tryModuleQualifiedLookup(self: *Self, field_access: AST.BinOp) std.mem.Alloca } }); }; - // Check if this is a type module (like Stdout) - we need to create a method call on the nominal type + // Check if this is a type module (like Stdout) - look up the qualified method name directly if (self.module_envs) |envs_map| { if (envs_map.get(module_name)) |auto_imported_type| { - if (auto_imported_type.statement_idx) |stmt_idx| { - // This is an imported type module - create an e_dot_access for the method call - const module_name_text = auto_imported_type.env.module_name; + if (auto_imported_type.statement_idx != null) { + // This is an imported type module (like Stdout) + // Look up the qualified method name (e.g., "Stdout.line!") in the module's exposed items + const module_env = auto_imported_type.env; + const module_name_text = module_env.module_name; const auto_import_idx = try self.getOrCreateAutoImport(module_name_text); - const target_node_idx = auto_imported_type.env.getExposedNodeIndexByStatementIdx(stmt_idx) orelse { - std.debug.panic("Failed to find exposed node for statement index {} in module '{s}'", .{ stmt_idx, module_name_text }); - }; + // Build the qualified method name: "TypeName.method_name" + const type_name_text = self.env.getIdent(module_name); + const method_name_text = self.env.getIdent(method_name); + const qualified_method_name = try self.env.insertQualifiedIdent(type_name_text, method_name_text); + const qualified_text = self.env.getIdent(qualified_method_name); - // Create the receiver - a reference to the nominal type - const receiver_idx = try self.env.addExpr(CIR.Expr{ - .e_lookup_external = .{ - .module_idx = auto_import_idx, - .target_node_idx = target_node_idx, - .region = self.parse_ir.tokenizedRegionToRegion(method_ident.region), - }, - }, self.parse_ir.tokenizedRegionToRegion(method_ident.region)); + // Look up the qualified method in the module's exposed items + if (module_env.common.findIdent(qualified_text)) |method_ident_idx| { + if (module_env.getExposedNodeIndexById(method_ident_idx)) |method_node_idx| { + // Found the method! Create e_lookup_external + e_call + const func_expr_idx = try self.env.addExpr(CIR.Expr{ .e_lookup_external = .{ + .module_idx = auto_import_idx, + .target_node_idx = method_node_idx, + .region = region, + } }, region); - // Canonicalize the arguments - const scratch_top = self.env.store.scratchExprTop(); - for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| { - if (try self.canonicalizeExpr(arg_idx)) |canonicalized| { - try self.env.store.addScratchExpr(canonicalized.get_idx()); + // Canonicalize the arguments + const scratch_top = self.env.store.scratchExprTop(); + for (self.parse_ir.store.exprSlice(apply.args)) |arg_idx| { + if (try self.canonicalizeExpr(arg_idx)) |canonicalized| { + try self.env.store.addScratchExpr(canonicalized.get_idx()); + } + } + const args_span = try self.env.store.exprSpanFrom(scratch_top); + + // Create the call expression + const call_expr_idx = try self.env.addExpr(CIR.Expr{ + .e_call = .{ + .func = func_expr_idx, + .args = args_span, + .called_via = CalledVia.apply, + }, + }, region); + return call_expr_idx; } - // Note: if arg canonicalization fails, it will have pushed its own diagnostic } - const args = try self.env.store.exprSpanFrom(scratch_top); - // Create the method call expression - const method_region = self.parse_ir.tokenizedRegionToRegion(method_ident.region); - const expr_idx = try self.env.addExpr(CIR.Expr{ - .e_dot_access = .{ - .receiver = receiver_idx, - .field_name = method_name, - .field_name_region = method_region, - .args = args, - }, - }, region); - return expr_idx; + // Method not found in module - generate error + return try self.env.pushMalformed(Expr.Idx, Diagnostic{ .nested_value_not_found = .{ + .parent_name = module_name, + .nested_name = method_name, + .region = region, + } }); } } } diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 303b096896..49de1822fe 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -5,6 +5,10 @@ const std = @import("std"); const builtin = @import("builtin"); const build_options = @import("build_options"); + +/// Stack size for the interpreter. WASM targets use a smaller stack to avoid +/// memory pressure from repeated allocations that can't be efficiently coalesced. +const stack_size: u32 = if (builtin.cpu.arch == .wasm32) 4 * 1024 * 1024 else 64 * 1024 * 1024; const trace_eval = build_options.trace_eval; const trace_refcount = if (@hasDecl(build_options, "trace_refcount")) build_options.trace_refcount else false; const base_pkg = @import("base"); @@ -412,7 +416,7 @@ pub const Interpreter = struct { .import_mapping = import_mapping, .unify_scratch = try unify.Scratch.init(allocator), .type_writer = try types.TypeWriter.initFromParts(allocator, rt_types_ptr, env.common.getIdentStore(), null), - .stack_memory = try stack.Stack.initCapacity(allocator, 64 * 1024 * 1024), // 64 MiB stack + .stack_memory = try stack.Stack.initCapacity(allocator, stack_size), .bindings = try std.array_list.Managed(Binding).initCapacity(allocator, 8), .active_closures = try std.array_list.Managed(StackValue).initCapacity(allocator, 4), .canonical_bool_rt_var = null, @@ -11338,12 +11342,13 @@ pub const Interpreter = struct { const ct_var = can.ModuleEnv.varFrom(expr_idx); const rt_var = try self.translateTypeVar(self.env, ct_var); - // Manually create a closure layout since hosted functions might have flex types + // Get a ZST layout for hosted functions (they have no captures) + const zst_idx = try self.runtime_layout_store.ensureZstLayout(); const closure_layout = Layout{ .tag = .closure, .data = .{ .closure = .{ - .captures_layout_idx = undefined, // No captures for hosted functions + .captures_layout_idx = zst_idx, }, }, }; diff --git a/test/fx/stdin_test.roc b/test/fx/stdin_test.roc index ab47340b65..1de990f1f8 100644 --- a/test/fx/stdin_test.roc +++ b/test/fx/stdin_test.roc @@ -1,13 +1,10 @@ app [main!] { pf: platform "./platform/main.roc" } -import pf.Stdout import pf.Stdin - -str : Str -> Str -str = |s| s +import pf.Stdout main! = || { - Stdout.line!(str("Before stdin")) - temp = Stdin.line!() - Stdout.line!(str("After stdin")) + Stdout.line!("Before stdin") + _line = Stdin.line!() + Stdout.line!("After stdin") } From e19c8c85870664786c4f96f3e96dcd0ac4f2fcba Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 20:11:02 -0500 Subject: [PATCH 25/64] Replace some zero values with undefined --- src/canonicalize/Can.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index 8208617b0f..1818c8ab02 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -5171,7 +5171,7 @@ pub fn canonicalizeExpr( .patterns = ok_branch_pat_span, .value = ok_lookup_idx, .guard = null, - .redundant = .zero, // placeholder; set during type checking + .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking }, region, ); @@ -5245,7 +5245,7 @@ pub fn canonicalizeExpr( .patterns = err_branch_pat_span, .value = return_expr_idx, .guard = null, - .redundant = .zero, // placeholder; set during type checking + .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking }, region, ); @@ -5259,7 +5259,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = .zero, // placeholder; set during type checking + .exhaustive = undefined, // currently unused, but reserved for future exhaustiveness checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -5636,7 +5636,7 @@ pub fn canonicalizeExpr( .patterns = branch_pat_span, .value = value_idx, .guard = null, - .redundant = .zero, // placeholder; set during type checking + .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking }, region, ); @@ -5656,7 +5656,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = .zero, // placeholder; set during type checking + .exhaustive = undefined, // currently unused, but reserved for future exhaustiveness checking }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); From 9dec5ad4953a9dc53428464d31605d514f1ad4ce Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 21:00:35 -0500 Subject: [PATCH 26/64] Rename .zero to .first --- src/canonicalize/CIR.zig | 2 +- src/canonicalize/NodeStore.zig | 65 +++++++++++++++------------------- src/collections/safe_list.zig | 58 +++++++++++++++--------------- src/eval/interpreter.zig | 4 +-- src/parse/NodeStore.zig | 2 +- src/types/store.zig | 56 +++++++++++++++-------------- src/types/types.zig | 1 - 7 files changed, 91 insertions(+), 97 deletions(-) diff --git a/src/canonicalize/CIR.zig b/src/canonicalize/CIR.zig index 787a39f041..d71e228294 100644 --- a/src/canonicalize/CIR.zig +++ b/src/canonicalize/CIR.zig @@ -720,7 +720,7 @@ pub fn fromF64(f: f64) ?RocDec { /// Represents an import statement in a module pub const Import = struct { pub const Idx = enum(u32) { - zero = 0, + first = 0, _, }; diff --git a/src/canonicalize/NodeStore.zig b/src/canonicalize/NodeStore.zig index e1b7abe4fe..1393e45f11 100644 --- a/src/canonicalize/NodeStore.zig +++ b/src/canonicalize/NodeStore.zig @@ -3691,7 +3691,7 @@ test "NodeStore basic CompactWriter roundtrip" { .data_2 = 0, .data_3 = 0, }; - _ = try original.nodes.append(gpa, node1); + const node1_idx = try original.nodes.append(gpa, node1); // Add integer value to extra_data (i128 as 4 u32s) const value: i128 = 42; @@ -3706,7 +3706,7 @@ test "NodeStore basic CompactWriter roundtrip" { .start = .{ .offset = 0 }, .end = .{ .offset = 5 }, }; - _ = try original.regions.append(gpa, region); + const region1_idx = try original.regions.append(gpa, region); // Create a temp file var tmp_dir = testing.tmpDir(.{}); @@ -3739,9 +3739,7 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify nodes try testing.expectEqual(@as(usize, 1), deserialized.nodes.len()); - // Named constant for the first node index in the deserialized data - const first_node_idx: Node.Idx = .zero; - const retrieved_node = deserialized.nodes.get(first_node_idx); + const retrieved_node = deserialized.nodes.get(node1_idx); try testing.expectEqual(Node.Tag.expr_int, retrieved_node.tag); try testing.expectEqual(@as(u32, 0), retrieved_node.data_1); @@ -3754,9 +3752,7 @@ test "NodeStore basic CompactWriter roundtrip" { // Verify regions try testing.expectEqual(@as(usize, 1), deserialized.regions.len()); - // Named constant for the first region index in the deserialized data - const first_region_idx: Region.Idx = .zero; - const retrieved_region = deserialized.regions.get(first_region_idx); + const retrieved_region = deserialized.regions.get(region1_idx); try testing.expectEqual(region.start.offset, retrieved_region.start.offset); try testing.expectEqual(region.end.offset, retrieved_region.end.offset); } @@ -3776,7 +3772,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { .data_2 = 0, .data_3 = 0, }; - _ = try original.nodes.append(gpa, var_node); + const var_node_idx = try original.nodes.append(gpa, var_node); // Add expression list node const list_node = Node{ @@ -3785,7 +3781,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { .data_2 = 3, // elems len .data_3 = 0, }; - _ = try original.nodes.append(gpa, list_node); + const list_node_idx = try original.nodes.append(gpa, list_node); // Add float node with extra data const float_node = Node{ @@ -3794,7 +3790,7 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { .data_2 = 0, .data_3 = 0, }; - _ = try original.nodes.append(gpa, float_node); + const float_node_idx = try original.nodes.append(gpa, float_node); // Add float value to extra_data const float_value: f64 = 3.14159; @@ -3805,14 +3801,12 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { } // Add regions for each node - const regions = [_]Region{ - .{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } }, - .{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } }, - .{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } }, - }; - for (regions) |region| { - _ = try original.regions.append(gpa, region); - } + const region1 = Region{ .start = .{ .offset = 0 }, .end = .{ .offset = 5 } }; + const region2 = Region{ .start = .{ .offset = 10 }, .end = .{ .offset = 20 } }; + const region3 = Region{ .start = .{ .offset = 25 }, .end = .{ .offset = 32 } }; + const region1_idx = try original.regions.append(gpa, region1); + const region2_idx = try original.regions.append(gpa, region2); + const region3_idx = try original.regions.append(gpa, region3); // Create a temp file var tmp_dir = testing.tmpDir(.{}); @@ -3846,37 +3840,36 @@ test "NodeStore multiple nodes CompactWriter roundtrip" { // Verify nodes try testing.expectEqual(@as(usize, 3), deserialized.nodes.len()); - // Named constants for accessing deserialized nodes at specific indices - const first_node_idx: Node.Idx = .zero; - const second_node_idx: Node.Idx = @enumFromInt(1); - const third_node_idx: Node.Idx = @enumFromInt(2); - - // Verify var node - const retrieved_var = deserialized.nodes.get(first_node_idx); + // Verify var node using captured index + const retrieved_var = deserialized.nodes.get(var_node_idx); try testing.expectEqual(Node.Tag.expr_var, retrieved_var.tag); try testing.expectEqual(@as(u32, 5), retrieved_var.data_1); - // Verify list node - const retrieved_list = deserialized.nodes.get(second_node_idx); + // Verify list node using captured index + const retrieved_list = deserialized.nodes.get(list_node_idx); try testing.expectEqual(Node.Tag.expr_list, retrieved_list.tag); try testing.expectEqual(@as(u32, 10), retrieved_list.data_1); try testing.expectEqual(@as(u32, 3), retrieved_list.data_2); - // Verify float node and extra data - const retrieved_float = deserialized.nodes.get(third_node_idx); + // Verify float node and extra data using captured index + const retrieved_float = deserialized.nodes.get(float_node_idx); try testing.expectEqual(Node.Tag.expr_frac_f64, retrieved_float.tag); const retrieved_float_u32s = deserialized.extra_data.items.items[0..2]; const retrieved_float_u64: u64 = @bitCast(retrieved_float_u32s.*); const retrieved_float_value: f64 = @bitCast(retrieved_float_u64); try testing.expectApproxEqAbs(float_value, retrieved_float_value, 0.0001); - // Verify regions + // Verify regions using captured indices try testing.expectEqual(@as(usize, 3), deserialized.regions.len()); - for (regions, 0..) |expected_region, i| { - const retrieved_region = deserialized.regions.get(@enumFromInt(i)); - try testing.expectEqual(expected_region.start.offset, retrieved_region.start.offset); - try testing.expectEqual(expected_region.end.offset, retrieved_region.end.offset); - } + const retrieved_region1 = deserialized.regions.get(region1_idx); + try testing.expectEqual(region1.start.offset, retrieved_region1.start.offset); + try testing.expectEqual(region1.end.offset, retrieved_region1.end.offset); + const retrieved_region2 = deserialized.regions.get(region2_idx); + try testing.expectEqual(region2.start.offset, retrieved_region2.start.offset); + try testing.expectEqual(region2.end.offset, retrieved_region2.end.offset); + const retrieved_region3 = deserialized.regions.get(region3_idx); + try testing.expectEqual(region3.start.offset, retrieved_region3.start.offset); + try testing.expectEqual(region3.end.offset, retrieved_region3.end.offset); // Verify scratch is null (deserialized NodeStores don't allocate scratch) try testing.expect(deserialized.scratch == null); diff --git a/src/collections/safe_list.zig b/src/collections/safe_list.zig index 2d43053621..0c7298b93a 100644 --- a/src/collections/safe_list.zig +++ b/src/collections/safe_list.zig @@ -100,7 +100,7 @@ pub fn SafeList(comptime T: type) type { /// An index for an item in the list. pub const Idx = enum(u32) { /// The first valid index in the list. - zero = 0, + first = 0, _, /// Get the raw u32 value for storage @@ -375,7 +375,7 @@ pub fn SafeList(comptime T: type) type { return Iterator{ .array = self, .len = self.len(), - .current = .zero, + .current = .first, }; } }; @@ -403,7 +403,7 @@ pub fn SafeMultiList(comptime T: type) type { items: std.MultiArrayList(T) = .{}, /// Index of an item in the list. - pub const Idx = enum(u32) { zero = 0, _ }; + pub const Idx = enum(u32) { first = 0, _ }; /// A non-type-safe slice of the list. pub const Slice = std.MultiArrayList(T).Slice; @@ -468,7 +468,7 @@ pub fn SafeMultiList(comptime T: type) type { pub fn appendSlice(self: *SafeMultiList(T), gpa: Allocator, elems: []const T) std.mem.Allocator.Error!Range { if (elems.len == 0) { - return .{ .start = .zero, .count = 0 }; + return .{ .start = .first, .count = 0 }; } const start_length = self.len(); try self.items.ensureUnusedCapacity(gpa, elems.len); @@ -981,7 +981,7 @@ test "SafeList edge cases serialization" { try testing.expectEqual(@as(usize, 0), deserialized.list_u32.len()); try testing.expectEqual(@as(usize, 1), deserialized.list_u8.len()); - try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(.zero).*); + try testing.expectEqual(@as(u8, 123), deserialized.list_u8.get(.first).*); } } @@ -1068,7 +1068,7 @@ test "SafeList CompactWriter complete roundtrip example" { // Step 8: Verify data is accessible and correct const Idx = SafeList(u32).Idx; try testing.expectEqual(@as(usize, 4), deserialized.len()); - try testing.expectEqual(@as(u32, 100), deserialized.get(.zero).*); + try testing.expectEqual(@as(u32, 100), deserialized.get(.first).*); try testing.expectEqual(@as(u32, 200), deserialized.get(@as(Idx, @enumFromInt(1))).*); try testing.expectEqual(@as(u32, 300), deserialized.get(@as(Idx, @enumFromInt(2))).*); try testing.expectEqual(@as(u32, 400), deserialized.get(@as(Idx, @enumFromInt(3))).*); @@ -1176,7 +1176,7 @@ test "SafeList CompactWriter multiple lists with different alignments" { const U8Idx = SafeList(u8).Idx; try testing.expectEqual(@as(usize, 3), deser_u8.len()); - try testing.expectEqual(@as(u8, 10), deser_u8.get(.zero).*); + try testing.expectEqual(@as(u8, 10), deser_u8.get(.first).*); try testing.expectEqual(@as(u8, 20), deser_u8.get(@as(U8Idx, @enumFromInt(1))).*); try testing.expectEqual(@as(u8, 30), deser_u8.get(@as(U8Idx, @enumFromInt(2))).*); @@ -1191,7 +1191,7 @@ test "SafeList CompactWriter multiple lists with different alignments" { const U16Idx = SafeList(u16).Idx; try testing.expectEqual(@as(usize, 2), deser_u16.len()); - try testing.expectEqual(@as(u16, 1000), deser_u16.get(.zero).*); + try testing.expectEqual(@as(u16, 1000), deser_u16.get(.first).*); try testing.expectEqual(@as(u16, 2000), deser_u16.get(@as(U16Idx, @enumFromInt(1))).*); // 3. Deserialize u32 list @@ -1205,7 +1205,7 @@ test "SafeList CompactWriter multiple lists with different alignments" { const U32Idx = SafeList(u32).Idx; try testing.expectEqual(@as(usize, 4), deser_u32.len()); - try testing.expectEqual(@as(u32, 100_000), deser_u32.get(.zero).*); + try testing.expectEqual(@as(u32, 100_000), deser_u32.get(.first).*); try testing.expectEqual(@as(u32, 200_000), deser_u32.get(@as(U32Idx, @enumFromInt(1))).*); try testing.expectEqual(@as(u32, 300_000), deser_u32.get(@as(U32Idx, @enumFromInt(2))).*); try testing.expectEqual(@as(u32, 400_000), deser_u32.get(@as(U32Idx, @enumFromInt(3))).*); @@ -1221,7 +1221,7 @@ test "SafeList CompactWriter multiple lists with different alignments" { const U64Idx = SafeList(u64).Idx; try testing.expectEqual(@as(usize, 2), deser_u64.len()); - try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(.zero).*); + try testing.expectEqual(@as(u64, 10_000_000_000), deser_u64.get(.first).*); try testing.expectEqual(@as(u64, 20_000_000_000), deser_u64.get(@as(U64Idx, @enumFromInt(1))).*); // 5. Deserialize struct list @@ -1231,7 +1231,7 @@ test "SafeList CompactWriter multiple lists with different alignments" { const StructIdx = SafeList(AlignedStruct).Idx; try testing.expectEqual(@as(usize, 2), deser_struct.len()); - const item0 = deser_struct.get(.zero); + const item0 = deser_struct.get(.first); try testing.expectEqual(@as(u32, 42), item0.x); try testing.expectEqual(@as(u64, 1337), item0.y); try testing.expectEqual(@as(u8, 255), item0.z); @@ -1344,7 +1344,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { const D1Idx = SafeList(u8).Idx; try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 1), d1.get(.zero).*); + try testing.expectEqual(@as(u8, 1), d1.get(.first).*); try testing.expectEqual(@as(u8, 2), d1.get(@as(D1Idx, @enumFromInt(1))).*); try testing.expectEqual(@as(u8, 3), d1.get(@as(D1Idx, @enumFromInt(2))).*); @@ -1358,7 +1358,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { const D2Idx = SafeList(u64).Idx; try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u64, 1_000_000), d2.get(.zero).*); + try testing.expectEqual(@as(u64, 1_000_000), d2.get(.first).*); try testing.expectEqual(@as(u64, 2_000_000), d2.get(@as(D2Idx, @enumFromInt(1))).*); // 3. Third list - u16 @@ -1371,7 +1371,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { const D3Idx = SafeList(u16).Idx; try testing.expectEqual(@as(usize, 4), d3.len()); - try testing.expectEqual(@as(u16, 100), d3.get(.zero).*); + try testing.expectEqual(@as(u16, 100), d3.get(.first).*); try testing.expectEqual(@as(u16, 200), d3.get(@as(D3Idx, @enumFromInt(1))).*); try testing.expectEqual(@as(u16, 300), d3.get(@as(D3Idx, @enumFromInt(2))).*); try testing.expectEqual(@as(u16, 400), d3.get(@as(D3Idx, @enumFromInt(3))).*); @@ -1382,7 +1382,7 @@ test "SafeList CompactWriter interleaved pattern with alignment tracking" { const d4 = s4.deserialize(@as(i64, @intCast(base))); try testing.expectEqual(@as(usize, 1), d4.len()); - try testing.expectEqual(@as(u32, 42), d4.get(.zero).*); + try testing.expectEqual(@as(u32, 42), d4.get(.first).*); } test "SafeList CompactWriter brute-force alignment verification" { @@ -1503,7 +1503,7 @@ test "SafeList CompactWriter brute-force alignment verification" { offset += 1; // 1 u8 element try testing.expectEqual(@as(usize, 1), d_u8.len()); - try testing.expectEqual(@as(u8, 42), d_u8.get(.zero).*); + try testing.expectEqual(@as(u8, 42), d_u8.get(.first).*); // Second list offset = std.mem.alignForward(usize, offset, @alignOf(SafeList(T).Serialized)); @@ -1582,10 +1582,10 @@ test "SafeMultiList CompactWriter roundtrip with file" { try testing.expectEqual(@as(usize, 4), deserialized.len()); // Verify all the data - try testing.expectEqual(@as(u32, 100), deserialized.get(.zero).id); - try testing.expectEqual(@as(u64, 1000), deserialized.get(.zero).value); - try testing.expectEqual(true, deserialized.get(.zero).flag); - try testing.expectEqual(@as(u8, 10), deserialized.get(.zero).data); + try testing.expectEqual(@as(u32, 100), deserialized.get(.first).id); + try testing.expectEqual(@as(u64, 1000), deserialized.get(.first).value); + try testing.expectEqual(true, deserialized.get(.first).flag); + try testing.expectEqual(@as(u8, 10), deserialized.get(.first).data); const second_idx: Idx = @enumFromInt(1); try testing.expectEqual(@as(u32, 200), deserialized.get(second_idx).id); @@ -1737,8 +1737,8 @@ test "SafeMultiList CompactWriter multiple lists different alignments" { const d1_serialized = @as(*SafeMultiList(Type1).Serialized, @ptrCast(@alignCast(buffer.ptr + offset1))); const d1 = d1_serialized.deserialize(base); try testing.expectEqual(@as(usize, 3), d1.len()); - try testing.expectEqual(@as(u8, 10), d1.get(.zero).a); - try testing.expectEqual(@as(u16, 100), d1.get(.zero).b); + try testing.expectEqual(@as(u8, 10), d1.get(.first).a); + try testing.expectEqual(@as(u16, 100), d1.get(.first).b); try testing.expectEqual(@as(u8, 20), d1.get(@as(D1Idx, @enumFromInt(1))).a); try testing.expectEqual(@as(u16, 200), d1.get(@as(D1Idx, @enumFromInt(1))).b); try testing.expectEqual(@as(u8, 30), d1.get(@as(D1Idx, @enumFromInt(2))).a); @@ -1748,16 +1748,16 @@ test "SafeMultiList CompactWriter multiple lists different alignments" { const d2_serialized = @as(*SafeMultiList(Type2).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d2.len()); - try testing.expectEqual(@as(u32, 1000), d2.get(.zero).x); - try testing.expectEqual(@as(u64, 10000), d2.get(.zero).y); + try testing.expectEqual(@as(u32, 1000), d2.get(.first).x); + try testing.expectEqual(@as(u64, 10000), d2.get(.first).y); // Deserialize list3 (at offset3) const d3_serialized = @as(*SafeMultiList(Type3).Serialized, @ptrCast(@alignCast(buffer.ptr + offset3))); const d3 = d3_serialized.deserialize(base); try testing.expectEqual(@as(usize, 2), d3.len()); - try testing.expectEqual(@as(u64, 999), d3.get(.zero).id); - try testing.expectEqual(@as(u8, 42), d3.get(.zero).data); - try testing.expectEqual(true, d3.get(.zero).flag); + try testing.expectEqual(@as(u64, 999), d3.get(.first).id); + try testing.expectEqual(@as(u8, 42), d3.get(.first).data); + try testing.expectEqual(true, d3.get(.first).flag); } test "SafeMultiList CompactWriter brute-force alignment verification" { @@ -1847,7 +1847,7 @@ test "SafeMultiList CompactWriter brute-force alignment verification" { const d2_serialized = @as(*SafeMultiList(TestType).Serialized, @ptrCast(@alignCast(buffer.ptr + offset2))); const d2 = d2_serialized.deserialize(base); if (length > 0) { - const d2_first_idx: SafeMultiList(TestType).Idx = .zero; + const d2_first_idx: SafeMultiList(TestType).Idx = .first; try testing.expectEqual(@as(usize, 1), d2.len()); try testing.expectEqual(@as(u8, 255), d2.get(d2_first_idx).a); try testing.expectEqual(@as(u32, 999999), d2.get(d2_first_idx).b); @@ -2319,7 +2319,7 @@ test "SafeMultiList.Serialized roundtrip" { try testing.expectEqual(@as(u8, 64), c_values[2]); // Check get() method - const first_idx: SafeMultiList(TestStruct).Idx = .zero; + const first_idx: SafeMultiList(TestStruct).Idx = .first; const item1 = list.get(first_idx); try testing.expectEqual(@as(u32, 100), item1.a); try testing.expectEqual(@as(f32, 1.5), item1.b); diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index d332cd72e3..435bbfc005 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -15598,7 +15598,7 @@ test "interpreter: cross-module method resolution should find methods in origin // Create an Import.Idx for module A // Using first import index for test purposes - const first_import_idx: can.CIR.Import.Idx = .zero; + const first_import_idx: can.CIR.Import.Idx = .first; try interp.import_envs.put(interp.allocator, first_import_idx, &module_a); // Verify we can retrieve module A's environment @@ -15661,7 +15661,7 @@ test "interpreter: transitive module method resolution (A imports B imports C)" // Create Import.Idx entries for both modules // Using sequential import indices for test purposes - const first_import_idx: can.CIR.Import.Idx = .zero; + const first_import_idx: can.CIR.Import.Idx = .first; const second_import_idx: can.CIR.Import.Idx = @enumFromInt(1); try interp.import_envs.put(interp.allocator, first_import_idx, &module_b); try interp.import_envs.put(interp.allocator, second_import_idx, &module_c); diff --git a/src/parse/NodeStore.zig b/src/parse/NodeStore.zig index a2db6b2163..cb81dc2cf3 100644 --- a/src/parse/NodeStore.zig +++ b/src/parse/NodeStore.zig @@ -22,7 +22,7 @@ const sexpr = base.sexpr; const OPTIONAL_VALUE_OFFSET: u32 = 1; /// The root node is always stored at index 0 in the node list. -pub const root_node_idx: Node.List.Idx = .zero; +pub const root_node_idx: Node.List.Idx = .first; const NodeStore = @This(); diff --git a/src/types/store.zig b/src/types/store.zig index 7c7c35afec..2639dd8ec6 100644 --- a/src/types/store.zig +++ b/src/types/store.zig @@ -1012,7 +1012,7 @@ const SlotStore = struct { /// A type-safe index into the store const Idx = enum(u32) { - zero = 0, + first = 0, _, }; }; @@ -1118,7 +1118,7 @@ const DescStore = struct { /// A type-safe index into the store /// This type is made public below const Idx = enum(u32) { - zero = 0, + first = 0, _, }; }; @@ -1396,21 +1396,27 @@ test "SlotStore.Serialized roundtrip" { const gpa = std.testing.allocator; const CompactWriter = collections.CompactWriter; - // Named indices for test clarity - const desc_idx_100: DescStore.Idx = @enumFromInt(100); - const var_0: Var = .zero; - const desc_idx_200: DescStore.Idx = @enumFromInt(200); - const slot_idx_0: SlotStore.Idx = .zero; - const slot_idx_1: SlotStore.Idx = @enumFromInt(1); - const slot_idx_2: SlotStore.Idx = @enumFromInt(2); + // Use a real Store to get real Var and DescStore.Idx values + var store = try Store.init(gpa); + defer store.deinit(); + // Create real type variables - fresh() creates a flex var with a root slot + const var_a = try store.fresh(); + const var_b = try store.fresh(); + const var_c = try store.fresh(); + + // Get the DescStore.Idx from the root slots + const desc_idx_a = store.getSlot(var_a).root; + const desc_idx_c = store.getSlot(var_c).root; + + // Create a separate SlotStore for serialization testing var slot_store = try SlotStore.init(gpa, 4); defer slot_store.deinit(gpa); - // Add some slots - _ = try slot_store.insert(gpa, .{ .root = desc_idx_100 }); - _ = try slot_store.insert(gpa, .{ .redirect = var_0 }); - _ = try slot_store.insert(gpa, .{ .root = desc_idx_200 }); + // Add slots and capture returned indices + const slot_a = try slot_store.insert(gpa, .{ .root = desc_idx_a }); + const slot_b = try slot_store.insert(gpa, .{ .redirect = var_b }); + const slot_c = try slot_store.insert(gpa, .{ .root = desc_idx_c }); // Create temp file var tmp_dir = std.testing.tmpDir(.{}); @@ -1443,25 +1449,21 @@ test "SlotStore.Serialized roundtrip" { const deser_ptr = @as(*SlotStore.Serialized, @ptrCast(@alignCast(buffer.ptr))); const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); - // Verify + // Verify using captured indices try std.testing.expectEqual(@as(u64, 3), deserialized.backing.len()); - try std.testing.expectEqual(Slot{ .root = desc_idx_100 }, deserialized.get(slot_idx_0)); - try std.testing.expectEqual(Slot{ .redirect = var_0 }, deserialized.get(slot_idx_1)); - try std.testing.expectEqual(Slot{ .root = desc_idx_200 }, deserialized.get(slot_idx_2)); + try std.testing.expectEqual(Slot{ .root = desc_idx_a }, deserialized.get(slot_a)); + try std.testing.expectEqual(Slot{ .redirect = var_b }, deserialized.get(slot_b)); + try std.testing.expectEqual(Slot{ .root = desc_idx_c }, deserialized.get(slot_c)); } test "DescStore.Serialized roundtrip" { const gpa = std.testing.allocator; const CompactWriter = collections.CompactWriter; - // Named indices for test clarity - const desc_idx_0: DescStore.Idx = .zero; - const desc_idx_1: DescStore.Idx = @enumFromInt(1); - var desc_store = try DescStore.init(gpa, 4); defer desc_store.deinit(gpa); - // Add some descriptors + // Add some descriptors and capture returned indices const desc1 = Descriptor{ .content = Content{ .flex = Flex.init() }, .rank = Rank.generalized, @@ -1473,8 +1475,8 @@ test "DescStore.Serialized roundtrip" { .mark = Mark.visited, }; - _ = try desc_store.insert(gpa, desc1); - _ = try desc_store.insert(gpa, desc2); + const desc_idx_1 = try desc_store.insert(gpa, desc1); + const desc_idx_2 = try desc_store.insert(gpa, desc2); // Create temp file var tmp_dir = std.testing.tmpDir(.{}); @@ -1512,10 +1514,10 @@ test "DescStore.Serialized roundtrip" { const deserialized = deser_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr)))); // Note: deserialize already handles relocation, don't call relocate again - // Verify + // Verify using captured indices try std.testing.expectEqual(@as(usize, 2), deserialized.backing.items.len); - try std.testing.expectEqual(desc1, deserialized.get(desc_idx_0)); - try std.testing.expectEqual(desc2, deserialized.get(desc_idx_1)); + try std.testing.expectEqual(desc1, deserialized.get(desc_idx_1)); + try std.testing.expectEqual(desc2, deserialized.get(desc_idx_2)); } test "Store.Serialized roundtrip" { diff --git a/src/types/types.zig b/src/types/types.zig index b59ee501ad..ec3ccab2ea 100644 --- a/src/types/types.zig +++ b/src/types/types.zig @@ -33,7 +33,6 @@ test { /// A type variable pub const Var = enum(u32) { - zero = 0, _, /// A safe list of type variables From a98a981b4e7dbe3c910244852f8e8800b66d8e90 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 21:30:21 -0500 Subject: [PATCH 27/64] Remove more hardcoded enum values --- src/canonicalize/test/anno_only_test.zig | 14 +-- src/canonicalize/test/import_store_test.zig | 2 +- .../test/import_validation_test.zig | 2 +- src/check/test/unify_test.zig | 97 +++++++++---------- src/compile/test/type_printing_bug_test.zig | 7 +- src/eval/test/helpers.zig | 10 +- src/eval/test/stack_test.zig | 26 ++--- 7 files changed, 77 insertions(+), 81 deletions(-) diff --git a/src/canonicalize/test/anno_only_test.zig b/src/canonicalize/test/anno_only_test.zig index 8ad29d3252..5812d90437 100644 --- a/src/canonicalize/test/anno_only_test.zig +++ b/src/canonicalize/test/anno_only_test.zig @@ -23,15 +23,15 @@ test "e_anno_only can be used in statements" { // used as part of s_decl statements, which is how standalone // type annotations are represented after canonicalization. - // Use named constants to make the intent clear - these represent the first indices - const first_pattern_idx: CIR.Pattern.Idx = @enumFromInt(0); - const first_expr_idx: CIR.Expr.Idx = @enumFromInt(0); - const first_anno_idx: CIR.Annotation.Idx = @enumFromInt(0); + // Use arbitrary non-zero indices for construction test + const pattern_idx: CIR.Pattern.Idx = @enumFromInt(42); + const expr_idx: CIR.Expr.Idx = @enumFromInt(42); + const anno_idx: CIR.Annotation.Idx = @enumFromInt(42); const stmt = CIR.Statement{ .s_decl = .{ - .pattern = first_pattern_idx, - .expr = first_expr_idx, - .anno = first_anno_idx, + .pattern = pattern_idx, + .expr = expr_idx, + .anno = anno_idx, } }; // Verify the statement was created correctly diff --git a/src/canonicalize/test/import_store_test.zig b/src/canonicalize/test/import_store_test.zig index 6360257dfa..0418fab2aa 100644 --- a/src/canonicalize/test/import_store_test.zig +++ b/src/canonicalize/test/import_store_test.zig @@ -216,7 +216,7 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { const str_idx_1 = deserialized.imports.items.items[1]; // Named constants for first and second import indices - const first_import_idx: Import.Idx = @enumFromInt(0); + const first_import_idx: Import.Idx = .first; const second_import_idx: Import.Idx = @enumFromInt(1); try testing.expect(deserialized.map.contains(str_idx_0)); diff --git a/src/canonicalize/test/import_validation_test.zig b/src/canonicalize/test/import_validation_test.zig index dd6fab92f2..881eb97219 100644 --- a/src/canonicalize/test/import_validation_test.zig +++ b/src/canonicalize/test/import_validation_test.zig @@ -359,7 +359,7 @@ test "Import.Idx is u32" { const back_to_u32 = @intFromEnum(import_idx); try testing.expectEqual(test_idx, back_to_u32); // Test that we can create valid Import.Idx values - const first_import_idx: CIR.Import.Idx = @enumFromInt(0); + const first_import_idx: CIR.Import.Idx = .first; const max_import_idx: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value // Verify they are distinct try testing.expect(first_import_idx != max_import_idx); diff --git a/src/check/test/unify_test.zig b/src/check/test/unify_test.zig index 99a5777c07..a9eb182d62 100644 --- a/src/check/test/unify_test.zig +++ b/src/check/test/unify_test.zig @@ -790,10 +790,10 @@ test "partitionFields - same record" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const field_x = try env.mkRecordField("field_x", placeholder_var_0); - const field_y = try env.mkRecordField("field_y", placeholder_var_1); + const var_x = try env.module_env.types.fresh(); + const var_y = try env.module_env.types.fresh(); + const field_x = try env.mkRecordField("field_x", var_x); + const field_y = try env.mkRecordField("field_y", var_y); const range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ field_x, field_y }); @@ -815,12 +815,12 @@ test "partitionFields - disjoint fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const a1 = try env.mkRecordField("a1", placeholder_var_0); - const a2 = try env.mkRecordField("a2", placeholder_var_1); - const b1 = try env.mkRecordField("b1", placeholder_var_2); + const var_a1 = try env.module_env.types.fresh(); + const var_a2 = try env.module_env.types.fresh(); + const var_b1 = try env.module_env.types.fresh(); + const a1 = try env.mkRecordField("a1", var_a1); + const a2 = try env.mkRecordField("a2", var_a2); + const b1 = try env.mkRecordField("b1", var_b1); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, a2 }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{b1}); @@ -844,12 +844,12 @@ test "partitionFields - overlapping fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const a1 = try env.mkRecordField("a1", placeholder_var_0); - const both = try env.mkRecordField("both", placeholder_var_1); - const b1 = try env.mkRecordField("b1", placeholder_var_2); + const var_a1 = try env.module_env.types.fresh(); + const var_both = try env.module_env.types.fresh(); + const var_b1 = try env.module_env.types.fresh(); + const a1 = try env.mkRecordField("a1", var_a1); + const both = try env.mkRecordField("both", var_both); + const b1 = try env.mkRecordField("b1", var_b1); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ a1, both }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ b1, both }); @@ -876,12 +876,12 @@ test "partitionFields - reordering is normalized" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const f1 = try env.mkRecordField("f1", placeholder_var_0); - const f2 = try env.mkRecordField("f2", placeholder_var_1); - const f3 = try env.mkRecordField("f3", placeholder_var_2); + const var_f1 = try env.module_env.types.fresh(); + const var_f2 = try env.module_env.types.fresh(); + const var_f3 = try env.module_env.types.fresh(); + const f1 = try env.mkRecordField("f1", var_f1); + const f2 = try env.mkRecordField("f2", var_f2); + const f3 = try env.mkRecordField("f3", var_f3); const a_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f3, f1, f2 }); const b_range = try env.scratch.appendSliceGatheredFields(&[_]RecordField{ f1, f2, f3 }); @@ -1038,10 +1038,10 @@ test "partitionTags - same tags" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const tag_x = try env.mkTag("X", &[_]Var{placeholder_var_0}); - const tag_y = try env.mkTag("Y", &[_]Var{placeholder_var_1}); + const var_x = try env.module_env.types.fresh(); + const var_y = try env.module_env.types.fresh(); + const tag_x = try env.mkTag("X", &[_]Var{var_x}); + const tag_y = try env.mkTag("Y", &[_]Var{var_y}); const range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ tag_x, tag_y }); @@ -1063,12 +1063,12 @@ test "partitionTags - disjoint fields" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const a1 = try env.mkTag("A1", &[_]Var{placeholder_var_0}); - const a2 = try env.mkTag("A2", &[_]Var{placeholder_var_1}); - const b1 = try env.mkTag("B1", &[_]Var{placeholder_var_2}); + const var_a1 = try env.module_env.types.fresh(); + const var_a2 = try env.module_env.types.fresh(); + const var_b1 = try env.module_env.types.fresh(); + const a1 = try env.mkTag("A1", &[_]Var{var_a1}); + const a2 = try env.mkTag("A2", &[_]Var{var_a2}); + const b1 = try env.mkTag("B1", &[_]Var{var_b1}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, a2 }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{b1}); @@ -1092,12 +1092,12 @@ test "partitionTags - overlapping tags" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const a1 = try env.mkTag("A", &[_]Var{placeholder_var_0}); - const both = try env.mkTag("Both", &[_]Var{placeholder_var_1}); - const b1 = try env.mkTag("B", &[_]Var{placeholder_var_2}); + const var_a = try env.module_env.types.fresh(); + const var_both = try env.module_env.types.fresh(); + const var_b = try env.module_env.types.fresh(); + const a1 = try env.mkTag("A", &[_]Var{var_a}); + const both = try env.mkTag("Both", &[_]Var{var_both}); + const b1 = try env.mkTag("B", &[_]Var{var_b}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ a1, both }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ b1, both }); @@ -1124,12 +1124,12 @@ test "partitionTags - reordering is normalized" { var env = try TestEnv.init(gpa); defer env.deinit(); - const placeholder_var_0: Var = @enumFromInt(0); - const placeholder_var_1: Var = @enumFromInt(1); - const placeholder_var_2: Var = @enumFromInt(2); - const f1 = try env.mkTag("F1", &[_]Var{placeholder_var_0}); - const f2 = try env.mkTag("F2", &[_]Var{placeholder_var_1}); - const f3 = try env.mkTag("F3", &[_]Var{placeholder_var_2}); + const var_f1 = try env.module_env.types.fresh(); + const var_f2 = try env.module_env.types.fresh(); + const var_f3 = try env.module_env.types.fresh(); + const f1 = try env.mkTag("F1", &[_]Var{var_f1}); + const f2 = try env.mkTag("F2", &[_]Var{var_f2}); + const f3 = try env.mkTag("F3", &[_]Var{var_f3}); const a_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f3, f1, f2 }); const b_range = try env.scratch.appendSliceGatheredTags(&[_]Tag{ f1, f2, f3 }); @@ -1509,8 +1509,7 @@ test "unify - flex with constraints vs structure captures deferred check" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); - const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; + const deferred = env.scratch.deferred_constraints.get(.first).*; try std.testing.expectEqual( env.module_env.types.resolveVar(structure_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1545,8 +1544,7 @@ test "unify - structure vs flex with constraints captures deferred check (revers // Check that constraint was captured (note: vars might be swapped due to merge order) try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); - const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; + const deferred = env.scratch.deferred_constraints.get(.first).*; try std.testing.expectEqual( env.module_env.types.resolveVar(flex_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1599,8 +1597,7 @@ test "unify - flex vs nominal type captures constraint" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const first_constraint_idx: unify_mod.DeferredConstraintCheck.SafeList.Idx = @enumFromInt(0); - const deferred = env.scratch.deferred_constraints.get(first_constraint_idx).*; + const deferred = env.scratch.deferred_constraints.get(.first).*; try std.testing.expectEqual( env.module_env.types.resolveVar(nominal_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, diff --git a/src/compile/test/type_printing_bug_test.zig b/src/compile/test/type_printing_bug_test.zig index 4bd20e1830..8d04c665c7 100644 --- a/src/compile/test/type_printing_bug_test.zig +++ b/src/compile/test/type_printing_bug_test.zig @@ -16,7 +16,6 @@ const AST = parse.AST; test "canonicalizeAndTypeCheckModule preserves Try types in type printing" { const testing = std.testing; const gpa = testing.allocator; - const first_var: types.Var = @enumFromInt(0); const source = \\app [main] { pf: platform "platform.roc" } @@ -65,7 +64,7 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" { defer result.deinit(); // Now get the type of map_result and convert it to a string - // Find the map_result definition + // Find the map_result definition and get its type var from the expression const defs_slice = env.store.sliceDefs(env.all_defs); var map_result_var: ?types.Var = null; for (defs_slice) |def_idx| { @@ -75,8 +74,8 @@ test "canonicalizeAndTypeCheckModule preserves Try types in type printing" { const ident_idx = pattern.assign.ident; const ident_text = env.getIdent(ident_idx); if (std.mem.eql(u8, ident_text, "map_result")) { - // Get the type variable from the first definition - it's the first in the defs list - map_result_var = first_var; + // Get the type variable from the definition's expression + map_result_var = ModuleEnv.varFrom(def.expr); break; } } diff --git a/src/eval/test/helpers.zig b/src/eval/test/helpers.zig index b055cb1947..1382ce81bc 100644 --- a/src/eval/test/helpers.zig +++ b/src/eval/test/helpers.zig @@ -331,8 +331,8 @@ pub fn runExpectTuple(src: []const u8, expected_elements: []const ExpectedElemen for (expected_elements) |expected_element| { // Get the element at the specified index - // Use placeholder rt_var (test helper without type information) - const element = try tuple_accessor.getElement(@intCast(expected_element.index), @enumFromInt(0)); + // Use the result's rt_var since we're accessing elements of the evaluated expression + const element = try tuple_accessor.getElement(@intCast(expected_element.index), result.rt_var); // Check if this is an integer or Dec try std.testing.expect(element.layout.tag == .scalar); @@ -398,7 +398,7 @@ pub fn runExpectRecord(src: []const u8, expected_fields: []const ExpectedField, .layout = field_layout, .ptr = field_ptr, .is_initialized = true, - .rt_var = @enumFromInt(0), + .rt_var = result.rt_var, // use result's rt_var for field access }; // Check if this is an integer or Dec const int_val = if (field_layout.data.scalar.tag == .int) blk: { @@ -455,8 +455,8 @@ pub fn runExpectListI64(src: []const u8, expected_elements: []const i64, should_ try std.testing.expectEqual(expected_elements.len, list_accessor.len()); for (expected_elements, 0..) |expected_val, i| { - // Use placeholder rt_var (test helper without type information) - const element = try list_accessor.getElement(i, @enumFromInt(0)); + // Use the result's rt_var since we're accessing elements of the evaluated expression + const element = try list_accessor.getElement(i, result.rt_var); // Check if this is an integer try std.testing.expect(element.layout.tag == .scalar); diff --git a/src/eval/test/stack_test.zig b/src/eval/test/stack_test.zig index 389ed83347..0aa14f86f2 100644 --- a/src/eval/test/stack_test.zig +++ b/src/eval/test/stack_test.zig @@ -17,10 +17,10 @@ test "Stack.alloca basic allocation" { var stack = try Stack.initCapacity(std.testing.allocator, 1024); defer stack.deinit(); - const ptr1 = try stack.alloca(10, @enumFromInt(0)); + const ptr1 = try stack.alloca(10, .@"1"); try std.testing.expectEqual(@as(u32, 10), stack.used); - const ptr2 = try stack.alloca(20, @enumFromInt(0)); + const ptr2 = try stack.alloca(20, .@"1"); try std.testing.expectEqual(@as(u32, 30), stack.used); // The pointers should be different @@ -42,7 +42,7 @@ test "Stack.alloca with alignment" { // Create initial misalignment if (misalign > 0) { - _ = try stack.alloca(@intCast(misalign), @enumFromInt(0)); + _ = try stack.alloca(@intCast(misalign), .@"1"); } // Test each alignment with the current misalignment @@ -70,7 +70,7 @@ test "Stack.alloca with alignment" { stack.used = 0; for (alignments) |alignment| { // Create some misalignment - _ = try stack.alloca(3, @enumFromInt(0)); + _ = try stack.alloca(3, .@"1"); const before_used = stack.used; const ptr = try stack.alloca(alignment * 2, @enumFromInt(std.math.log2_int(u32, alignment))); @@ -88,10 +88,10 @@ test "Stack.alloca overflow" { defer stack.deinit(); // This should succeed - _ = try stack.alloca(50, @enumFromInt(0)); + _ = try stack.alloca(50, .@"1"); // This should fail (would total 150 bytes) - try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, @enumFromInt(0))); + try std.testing.expectError(StackOverflow.StackOverflow, stack.alloca(100, .@"1")); // Stack should still be in valid state try std.testing.expectEqual(@as(u32, 50), stack.used); @@ -102,14 +102,14 @@ test "Stack.restore" { defer stack.deinit(); const checkpoint = stack.next(); - _ = try stack.alloca(100, @enumFromInt(0)); + _ = try stack.alloca(100, .@"1"); try std.testing.expectEqual(@as(u32, 100), stack.used); stack.restore(checkpoint); try std.testing.expectEqual(@as(u32, 0), stack.used); // Allocate again after restore - const ptr1 = try stack.alloca(50, @enumFromInt(0)); + const ptr1 = try stack.alloca(50, .@"1"); try std.testing.expectEqual(@intFromPtr(checkpoint), @intFromPtr(ptr1)); } @@ -120,7 +120,7 @@ test "Stack.isEmpty" { try std.testing.expect(stack.isEmpty()); try std.testing.expectEqual(@as(u32, 100), stack.available()); - _ = try stack.alloca(30, @enumFromInt(0)); + _ = try stack.alloca(30, .@"1"); try std.testing.expect(!stack.isEmpty()); try std.testing.expectEqual(@as(u32, 70), stack.available()); } @@ -129,8 +129,8 @@ test "Stack zero-size allocation" { var stack = try Stack.initCapacity(std.testing.allocator, 100); defer stack.deinit(); - const ptr1 = try stack.alloca(0, @enumFromInt(0)); - const ptr2 = try stack.alloca(0, @enumFromInt(0)); + const ptr1 = try stack.alloca(0, .@"1"); + const ptr2 = try stack.alloca(0, .@"1"); // Zero-size allocations should return the same pointer try std.testing.expectEqual(@intFromPtr(ptr1), @intFromPtr(ptr2)); @@ -147,8 +147,8 @@ test "Stack memory is aligned to max_roc_alignment" { try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value); // Also verify after some allocations - _ = try stack.alloca(100, @enumFromInt(0)); - _ = try stack.alloca(200, @enumFromInt(0)); + _ = try stack.alloca(100, .@"1"); + _ = try stack.alloca(200, .@"1"); // The start pointer should still be aligned try std.testing.expectEqual(@as(usize, 0), start_addr % max_alignment_value); From 5262438d2e570f09783052c10fa9e42d9f671c85 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 21:42:56 -0500 Subject: [PATCH 28/64] Use a fresh var where it turns out it's needed --- src/canonicalize/Can.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/canonicalize/Can.zig b/src/canonicalize/Can.zig index 1818c8ab02..c2f59f24e1 100644 --- a/src/canonicalize/Can.zig +++ b/src/canonicalize/Can.zig @@ -5171,7 +5171,7 @@ pub fn canonicalizeExpr( .patterns = ok_branch_pat_span, .value = ok_lookup_idx, .guard = null, - .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking + .redundant = try self.env.types.fresh(), }, region, ); @@ -5245,7 +5245,7 @@ pub fn canonicalizeExpr( .patterns = err_branch_pat_span, .value = return_expr_idx, .guard = null, - .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking + .redundant = try self.env.types.fresh(), }, region, ); @@ -5259,7 +5259,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = undefined, // currently unused, but reserved for future exhaustiveness checking + .exhaustive = try self.env.types.fresh(), }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); @@ -5636,7 +5636,7 @@ pub fn canonicalizeExpr( .patterns = branch_pat_span, .value = value_idx, .guard = null, - .redundant = undefined, // currently unused, but reserved for future exhaustiveness checking + .redundant = try self.env.types.fresh(), }, region, ); @@ -5656,7 +5656,7 @@ pub fn canonicalizeExpr( const match_expr = Expr.Match{ .cond = can_cond.idx, .branches = branches_span, - .exhaustive = undefined, // currently unused, but reserved for future exhaustiveness checking + .exhaustive = try self.env.types.fresh(), }; const expr_idx = try self.env.addExpr(CIR.Expr{ .e_match = match_expr }, region); From b1a09477a5784d5c7121f55810a7b9a54ca6fbd6 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 21:59:02 -0500 Subject: [PATCH 29/64] More cleanups --- build.zig | 10 +- src/canonicalize/test/anno_only_test.zig | 25 ---- src/canonicalize/test/import_store_test.zig | 35 ++--- .../test/import_validation_test.zig | 141 ++---------------- src/eval/comptime_evaluator.zig | 6 +- 5 files changed, 33 insertions(+), 184 deletions(-) diff --git a/build.zig b/build.zig index ad94ad82dd..68da021feb 100644 --- a/build.zig +++ b/build.zig @@ -373,7 +373,10 @@ const CheckEnumFromIntZeroStep = struct { std.debug.print("\n" ++ "=" ** 80 ++ "\n", .{}); return step.fail( - "Found {d} uses of @enumFromInt(0). Use `undefined` instead with a comment explaining why. " ++ + "Found {d} uses of @enumFromInt(0). Using placeholder values like this has consistently led to bugs in this code base. " ++ + "Do not use @enumFromInt(0) and also do not uncritically replace it with another placeholder like .first or something like that. " ++ + "If you want it to be uninitialized and are very confident it will be overwritten before it is ever read, then use `undefined`. " ++ + "Otherwise, take a step back and rethink how this code works; there should be a way to implement this in a way that does not use hardcoded placeholder indices like 0! " ++ "See above for details.", .{violations.items.len}, ); @@ -399,11 +402,6 @@ const CheckEnumFromIntZeroStep = struct { if (entry.kind != .file) continue; if (!std.mem.endsWith(u8, entry.path, ".zig")) continue; - // Skip test files - they may legitimately need @enumFromInt(0) for test indices - if (std.mem.endsWith(u8, entry.path, "_test.zig")) continue; - if (std.mem.indexOf(u8, entry.path, "test/") != null) continue; - if (std.mem.startsWith(u8, entry.path, "test")) continue; - const full_path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ path_prefix, entry.path }); const file = dir.openFile(entry.path, .{}) catch continue; diff --git a/src/canonicalize/test/anno_only_test.zig b/src/canonicalize/test/anno_only_test.zig index 5812d90437..ad8799c897 100644 --- a/src/canonicalize/test/anno_only_test.zig +++ b/src/canonicalize/test/anno_only_test.zig @@ -17,28 +17,3 @@ test "e_anno_only expression variant exists" { else => return error.WrongExprVariant, } } - -test "e_anno_only can be used in statements" { - // This test verifies that e_anno_only expressions can be - // used as part of s_decl statements, which is how standalone - // type annotations are represented after canonicalization. - - // Use arbitrary non-zero indices for construction test - const pattern_idx: CIR.Pattern.Idx = @enumFromInt(42); - const expr_idx: CIR.Expr.Idx = @enumFromInt(42); - const anno_idx: CIR.Annotation.Idx = @enumFromInt(42); - - const stmt = CIR.Statement{ .s_decl = .{ - .pattern = pattern_idx, - .expr = expr_idx, - .anno = anno_idx, - } }; - - // Verify the statement was created correctly - switch (stmt) { - .s_decl => |decl| { - try testing.expect(decl.anno != null); - }, - else => return error.WrongStatementType, - } -} diff --git a/src/canonicalize/test/import_store_test.zig b/src/canonicalize/test/import_store_test.zig index 0418fab2aa..24184d7de7 100644 --- a/src/canonicalize/test/import_store_test.zig +++ b/src/canonicalize/test/import_store_test.zig @@ -102,10 +102,12 @@ test "Import.Store basic CompactWriter roundtrip" { const idx2 = try original.getOrPut(gpa, mock_env.strings, "core.List"); const idx3 = try original.getOrPut(gpa, mock_env.strings, "my.Module"); - // Verify indices - try testing.expectEqual(@as(u32, 0), @intFromEnum(idx1)); - try testing.expectEqual(@as(u32, 1), @intFromEnum(idx2)); - try testing.expectEqual(@as(u32, 2), @intFromEnum(idx3)); + // Verify indices are distinct and in order + try testing.expect(idx1 != idx2); + try testing.expect(idx2 != idx3); + try testing.expect(idx1 != idx3); + try testing.expect(@intFromEnum(idx1) < @intFromEnum(idx2)); + try testing.expect(@intFromEnum(idx2) < @intFromEnum(idx3)); // Create a temp file var tmp_dir = testing.tmpDir(.{}); @@ -136,10 +138,10 @@ test "Import.Store basic CompactWriter roundtrip" { // Verify the imports are accessible try testing.expectEqual(@as(usize, 3), deserialized.imports.len()); - // Verify the interned string IDs are stored correctly - const str_idx1 = deserialized.imports.items.items[0]; - const str_idx2 = deserialized.imports.items.items[1]; - const str_idx3 = deserialized.imports.items.items[2]; + // Verify the interned string IDs are stored correctly by using the indices we got + const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)]; + const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)]; + const str_idx3 = deserialized.imports.items.items[@intFromEnum(idx3)]; try testing.expectEqualStrings("json.Json", string_store.get(str_idx1)); try testing.expectEqualStrings("core.List", string_store.get(str_idx2)); @@ -201,7 +203,7 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { // Verify correct number of imports try testing.expectEqual(@as(usize, 2), deserialized.imports.len()); - // Get the string IDs and verify the strings + // Get the string IDs using the indices we captured and verify the strings const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)]; const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)]; @@ -212,15 +214,8 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { try testing.expectEqual(@as(usize, 2), deserialized.map.count()); // Check that the map has correct entries for the string indices that were deserialized - const str_idx_0 = deserialized.imports.items.items[0]; - const str_idx_1 = deserialized.imports.items.items[1]; - - // Named constants for first and second import indices - const first_import_idx: Import.Idx = .first; - const second_import_idx: Import.Idx = @enumFromInt(1); - - try testing.expect(deserialized.map.contains(str_idx_0)); - try testing.expect(deserialized.map.contains(str_idx_1)); - try testing.expectEqual(first_import_idx, deserialized.map.get(str_idx_0).?); - try testing.expectEqual(second_import_idx, deserialized.map.get(str_idx_1).?); + try testing.expect(deserialized.map.contains(str_idx1)); + try testing.expect(deserialized.map.contains(str_idx2)); + try testing.expectEqual(idx1, deserialized.map.get(str_idx1).?); + try testing.expectEqual(idx2, deserialized.map.get(str_idx2).?); } diff --git a/src/canonicalize/test/import_validation_test.zig b/src/canonicalize/test/import_validation_test.zig index 881eb97219..03d97b89e4 100644 --- a/src/canonicalize/test/import_validation_test.zig +++ b/src/canonicalize/test/import_validation_test.zig @@ -254,7 +254,7 @@ test "import interner - Import.Idx functionality" { // Check that we have the correct number of unique imports (duplicates are deduplicated) // Expected: List, Dict, Json, Set (4 unique) try expectEqual(@as(usize, 4), result.parse_env.imports.imports.len()); - // Verify each unique module has an Import.Idx + // Verify each unique module has an Import.Idx by checking the imports list var found_list = false; var found_dict = false; var found_json_decode = false; @@ -276,16 +276,6 @@ test "import interner - Import.Idx functionality" { try expectEqual(true, found_dict); try expectEqual(true, found_json_decode); try expectEqual(true, found_set); - // Test the lookup functionality - // Get the Import.Idx for "List" (should be used twice) - var list_import_idx: ?CIR.Import.Idx = null; - for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| { - if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) { - list_import_idx = @enumFromInt(idx); - break; - } - } - try testing.expect(list_import_idx != null); } test "import interner - comprehensive usage example" { @@ -325,22 +315,19 @@ test "import interner - comprehensive usage example" { // Check that we have the correct number of unique imports // Expected: List, Dict, Try (3 unique) try expectEqual(@as(usize, 3), result.parse_env.imports.imports.len()); - // Verify each unique module has an Import.Idx + // Verify each unique module was imported var found_list = false; var found_dict = false; var found_result = false; - for (result.parse_env.imports.imports.items.items, 0..) |import_string_idx, idx| { - if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "List")) { + for (result.parse_env.imports.imports.items.items) |import_string_idx| { + const module_name = result.parse_env.getString(import_string_idx); + if (std.mem.eql(u8, module_name, "List")) { found_list = true; - // Note: We can't verify exposed items count here as Import.Store only stores module names - } else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Dict")) { + } else if (std.mem.eql(u8, module_name, "Dict")) { found_dict = true; - } else if (std.mem.eql(u8, result.parse_env.getString(import_string_idx), "Try")) { + } else if (std.mem.eql(u8, module_name, "Try")) { found_result = true; } - // Verify Import.Idx can be created from the index - const import_idx: CIR.Import.Idx = @enumFromInt(idx); - _ = import_idx; // Just verify it compiles } // Verify all expected modules were found try expectEqual(true, found_list); @@ -348,25 +335,6 @@ test "import interner - comprehensive usage example" { try expectEqual(true, found_result); } -test "Import.Idx is u32" { - - // Verify that Import.Idx is indeed a u32 enum - // Import.Idx is defined as: pub const Idx = enum(u32) { _ }; - // So we know it's backed by u32 - // Verify we can create Import.Idx values from u32 - const test_idx: u32 = 42; - const import_idx = @as(CIR.Import.Idx, @enumFromInt(test_idx)); - const back_to_u32 = @intFromEnum(import_idx); - try testing.expectEqual(test_idx, back_to_u32); - // Test that we can create valid Import.Idx values - const first_import_idx: CIR.Import.Idx = .first; - const max_import_idx: CIR.Import.Idx = @enumFromInt(4294967295); // max u32 value - // Verify they are distinct - try testing.expect(first_import_idx != max_import_idx); - // Verify the size in memory - try testing.expectEqual(@sizeOf(u32), @sizeOf(CIR.Import.Idx)); -} - test "module scopes - imports work in module scope" { var gpa_state = std.heap.GeneralPurposeAllocator(.{ .safety = true }){}; defer std.debug.assert(gpa_state.deinit() == .ok); @@ -436,18 +404,9 @@ test "module-qualified lookups with e_lookup_external" { allocator.destroy(result.parse_env); } _ = try result.can.canonicalizeFile(); - // Count e_lookup_external expressions - var external_lookup_count: u32 = 0; - var found_list_map = false; - var found_list_len = false; - var found_dict_insert = false; - var found_dict_empty = false; - // For this test, we're checking that module-qualified lookups work - // In the new CIR, we'd need to traverse the expression tree from the root - // For now, let's verify that the imports were registered correctly + // Verify the module names are correct const imports_list = result.parse_env.imports.imports; try testing.expect(imports_list.len() >= 2); // List and Dict - // Verify the module names are correct var has_list = false; var has_dict = false; for (imports_list.items.items) |import_string_idx| { @@ -457,19 +416,6 @@ test "module-qualified lookups with e_lookup_external" { } try testing.expect(has_list); try testing.expect(has_dict); - // TODO: Once we have proper expression traversal, verify the e_lookup_external nodes - // For now, we'll skip counting the actual lookup expressions - external_lookup_count = 4; // Expected count - found_list_map = true; - found_list_len = true; - found_dict_insert = true; - found_dict_empty = true; - // Verify we found all expected external lookups - try expectEqual(@as(u32, 4), external_lookup_count); - try expectEqual(true, found_list_map); - try expectEqual(true, found_list_len); - try expectEqual(true, found_dict_insert); - try expectEqual(true, found_dict_empty); } test "exposed_items - tracking CIR node indices for exposed items" { @@ -492,7 +438,7 @@ test "exposed_items - tracking CIR node indices for exposed items" { math_env.deinit(); allocator.destroy(math_env); } - // Add exposed items and set their node indices + // Add exposed items const Ident = base.Ident; const add_idx = try math_env.common.idents.insert(allocator, Ident.for_text("add")); try math_env.addExposedById(add_idx); @@ -500,11 +446,7 @@ test "exposed_items - tracking CIR node indices for exposed items" { try math_env.addExposedById(multiply_idx); const pi_idx = try math_env.common.idents.insert(allocator, Ident.for_text("PI")); try math_env.addExposedById(pi_idx); - // Simulate having CIR node indices for these exposed items - // In real usage, these would be set during canonicalization of MathUtils - try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(add_idx), 100); - try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(multiply_idx), 200); - try math_env.common.exposed_items.setNodeIndexById(allocator, @bitCast(pi_idx), 300); + const math_utils_ident = try temp_idents.insert(allocator, Ident.for_text("MathUtils")); const math_utils_qualified_ident = try math_env.common.insertIdent(math_env.gpa, Ident.for_text("MathUtils")); try module_envs.put(math_utils_ident, .{ .env = math_env, .qualified_type_ident = math_utils_qualified_ident }); @@ -531,12 +473,7 @@ test "exposed_items - tracking CIR node indices for exposed items" { allocator.destroy(result.parse_env); } _ = try result.can.canonicalizeFile(); - // Verify that e_lookup_external expressions have the correct target_node_idx values - var found_add_with_idx_100 = false; - var found_multiply_with_idx_200 = false; - var found_pi_with_idx_300 = false; - // In the new CIR, we'd need to traverse the expression tree properly - // For now, let's verify the imports were registered + // Verify the MathUtils import was registered const imports_list = result.parse_env.imports.imports; var has_mathutils = false; for (imports_list.items.items) |import_string_idx| { @@ -547,62 +484,6 @@ test "exposed_items - tracking CIR node indices for exposed items" { } } try testing.expect(has_mathutils); - // TODO: Once we have proper expression traversal, verify the target_node_idx values - // For now, we'll assume they work correctly - found_add_with_idx_100 = true; - found_multiply_with_idx_200 = true; - found_pi_with_idx_300 = true; - // Verify all lookups have the correct target node indices - try expectEqual(true, found_add_with_idx_100); - try expectEqual(true, found_multiply_with_idx_200); - try expectEqual(true, found_pi_with_idx_300); - // Test case where node index is not populated (should get 0) - const empty_env = try allocator.create(ModuleEnv); - empty_env.* = try ModuleEnv.init(allocator, ""); - defer { - empty_env.deinit(); - allocator.destroy(empty_env); - } - const undefined_idx = try empty_env.common.idents.insert(allocator, Ident.for_text("undefined")); - try empty_env.addExposedById(undefined_idx); - // Don't set node index - should default to 0 - const empty_module_ident = try temp_idents.insert(allocator, Ident.for_text("EmptyModule")); - const empty_qualified_ident = try empty_env.common.insertIdent(empty_env.gpa, Ident.for_text("EmptyModule")); - try module_envs.put(empty_module_ident, .{ .env = empty_env, .qualified_type_ident = empty_qualified_ident }); - const source2 = - \\module [test] - \\ - \\import EmptyModule exposing [undefined] - \\ - \\test = undefined - ; - var result2 = try parseAndCanonicalizeSource(allocator, source2, &module_envs); - defer { - result2.can.deinit(); - allocator.destroy(result2.can); - result2.ast.deinit(allocator); - allocator.destroy(result2.ast); - result2.parse_env.deinit(); - allocator.destroy(result2.parse_env); - } - _ = try result2.can.canonicalizeFile(); - // Verify that undefined gets target_node_idx = 0 (not found) - var found_undefined_with_idx_0 = false; - // Verify EmptyModule was imported - const imports_list2 = result2.parse_env.imports.imports; - var has_empty_module = false; - for (imports_list2.items.items) |import_string_idx| { - const import_name = result2.parse_env.getString(import_string_idx); - if (std.mem.eql(u8, import_name, "EmptyModule")) { - has_empty_module = true; - break; - } - } - try testing.expect(has_empty_module); - // TODO: Once we have proper expression traversal, verify target_node_idx = 0 - // For now, we'll assume it works correctly - found_undefined_with_idx_0 = true; - try expectEqual(true, found_undefined_with_idx_0); } test "export count safety - ensures safe u16 casting" { diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 01b5e7a1f5..9486c79c00 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -463,7 +463,7 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var const variant_var: types_mod.Var = bool_rt_var; - // ext_var is a placeholder that will be set if this is a tag_union type + // ext_var will be set if this is a tag_union type var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { @@ -515,7 +515,7 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var from type information const resolved = self.interpreter.runtime_types.resolveVar(rt_var); const variant_var: types_mod.Var = rt_var; - // ext_var is a placeholder that will be set if this is a tag_union type + // ext_var will be set if this is a tag_union type var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { @@ -574,7 +574,7 @@ pub const ComptimeEvaluator = struct { // Get variant_var and ext_var from type information const resolved = self.interpreter.runtime_types.resolveVar(rt_var); const variant_var: types_mod.Var = rt_var; - // ext_var is a placeholder that will be set if this is a tag_union type + // ext_var will be set if this is a tag_union type var ext_var: types_mod.Var = undefined; if (resolved.desc.content == .structure) { From 5dbe474bc4327a4464362ac0b0aa13cbb9ac13b4 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 22:01:45 -0500 Subject: [PATCH 30/64] Change some NotImplemented errors to debug asserts --- src/canonicalize/Scope.zig | 2 +- src/eval/comptime_evaluator.zig | 19 +++++++------------ 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/canonicalize/Scope.zig b/src/canonicalize/Scope.zig index 04758be7e5..208b3f0dd8 100644 --- a/src/canonicalize/Scope.zig +++ b/src/canonicalize/Scope.zig @@ -363,7 +363,7 @@ pub fn lookupTypeVar(scope: *const Scope, name: Ident.Idx) TypeVarLookupResult { /// Look up a module alias in this scope pub fn lookupModuleAlias(scope: *const Scope, name: Ident.Idx) ModuleAliasLookupResult { - // Search by comparing .idx values (u29 index into string interner) + // Search by comparing .idx values (integer index into string interner) var iter = scope.module_aliases.iterator(); while (iter.next()) |entry| { if (name.idx == entry.key_ptr.idx) { diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 9486c79c00..71ac43854f 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -485,11 +485,9 @@ pub const ComptimeEvaluator = struct { /// Fold a tag union (represented as scalar, like Bool) to an e_zero_argument_tag expression fn foldTagUnionScalar(self: *ComptimeEvaluator, def_idx: CIR.Def.Idx, expr_idx: CIR.Expr.Idx, stack_value: eval_mod.StackValue) !void { _ = def_idx; // unused now that we get rt_var from stack_value - // The value is the tag index directly (scalar integer) - // Verify the layout is actually a scalar int before extracting - if (stack_value.layout.tag != .scalar or stack_value.layout.data.scalar.tag != .int) { - return error.NotImplemented; - } + // The value is the tag index directly (scalar integer). + // The caller already verified layout.tag == .scalar, and scalar tag unions are always ints. + std.debug.assert(stack_value.layout.tag == .scalar and stack_value.layout.data.scalar.tag == .int); const tag_index: usize = @intCast(stack_value.asI128()); // Get the runtime type variable from the StackValue @@ -500,17 +498,14 @@ pub const ComptimeEvaluator = struct { defer tag_list.deinit(); try self.interpreter.appendUnionTags(rt_var, &tag_list); - if (tag_index >= tag_list.items.len) { - return error.NotImplemented; - } + // Tag index from the value must be valid + std.debug.assert(tag_index < tag_list.items.len); const tag_info = tag_list.items[tag_index]; const arg_vars = self.interpreter.runtime_types.sliceVars(tag_info.args); - // Only fold zero-argument tags (like True, False) - if (arg_vars.len != 0) { - return error.NotImplemented; - } + // Scalar tag unions don't have payloads, so arg_vars must be empty + std.debug.assert(arg_vars.len == 0); // Get variant_var and ext_var from type information const resolved = self.interpreter.runtime_types.resolveVar(rt_var); From 61fc0ae0c25529d233e0508d7e7a09dec636d06a Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 22:28:30 -0500 Subject: [PATCH 31/64] use items[0] --- src/check/test/unify_test.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/check/test/unify_test.zig b/src/check/test/unify_test.zig index a9eb182d62..9f0df1a0ae 100644 --- a/src/check/test/unify_test.zig +++ b/src/check/test/unify_test.zig @@ -1509,7 +1509,7 @@ test "unify - flex with constraints vs structure captures deferred check" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(.first).*; + const deferred = env.scratch.deferred_constraints.items.items[0]; try std.testing.expectEqual( env.module_env.types.resolveVar(structure_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1544,7 +1544,7 @@ test "unify - structure vs flex with constraints captures deferred check (revers // Check that constraint was captured (note: vars might be swapped due to merge order) try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(.first).*; + const deferred = env.scratch.deferred_constraints.items.items[0]; try std.testing.expectEqual( env.module_env.types.resolveVar(flex_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, @@ -1597,7 +1597,7 @@ test "unify - flex vs nominal type captures constraint" { // Check that constraint was captured try std.testing.expectEqual(1, env.scratch.deferred_constraints.len()); - const deferred = env.scratch.deferred_constraints.get(.first).*; + const deferred = env.scratch.deferred_constraints.items.items[0]; try std.testing.expectEqual( env.module_env.types.resolveVar(nominal_var).var_, env.module_env.types.resolveVar(deferred.var_).var_, From e064d79e4f47748dd39cc0ee051593aaa187f1f7 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 22:31:11 -0500 Subject: [PATCH 32/64] Use null over undefined --- src/eval/comptime_evaluator.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 71ac43854f..1c29f677ed 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -1128,7 +1128,7 @@ pub const ComptimeEvaluator = struct { try self.interpreter.bindings.append(.{ .pattern_idx = params[0], .value = num_literal_record, - .expr_idx = undefined, // No source expression for synthetic binding + .expr_idx = null, // No source expression for synthetic binding .source_env = origin_env, }); defer _ = self.interpreter.bindings.pop(); From be700b1948425cb79a6355750bddd05d7e9a01ed Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 22:36:44 -0500 Subject: [PATCH 33/64] Make a test less hardcoded --- src/canonicalize/test/import_store_test.zig | 94 +++++++-------------- 1 file changed, 29 insertions(+), 65 deletions(-) diff --git a/src/canonicalize/test/import_store_test.zig b/src/canonicalize/test/import_store_test.zig index 24184d7de7..f23c08ac5e 100644 --- a/src/canonicalize/test/import_store_test.zig +++ b/src/canonicalize/test/import_store_test.zig @@ -9,15 +9,22 @@ const Import = CIR.Import; const StringLiteral = base.StringLiteral; const CompactWriter = collections.CompactWriter; +fn storeContainsModule(store: *const Import.Store, string_store: *const StringLiteral.Store, module_name: []const u8) bool { + for (store.imports.items.items) |string_idx| { + if (std.mem.eql(u8, string_store.get(string_idx), module_name)) { + return true; + } + } + return false; +} + test "Import.Store deduplicates module names" { const testing = std.testing; const gpa = testing.allocator; - // Create a string store for interning module names var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024); defer string_store.deinit(gpa); - // Create import store var store = Import.Store.init(); defer store.deinit(gpa); @@ -25,7 +32,7 @@ test "Import.Store deduplicates module names" { const idx1 = try store.getOrPut(gpa, &string_store, "test.Module"); const idx2 = try store.getOrPut(gpa, &string_store, "test.Module"); - // Should get the same index + // Should get the same index back (deduplication) try testing.expectEqual(idx1, idx2); try testing.expectEqual(@as(usize, 1), store.imports.len()); @@ -39,21 +46,17 @@ test "Import.Store deduplicates module names" { try testing.expectEqual(idx1, idx4); try testing.expectEqual(@as(usize, 2), store.imports.len()); - // Verify we can retrieve the module names through the string store - const str_idx1 = store.imports.items.items[@intFromEnum(idx1)]; - const str_idx3 = store.imports.items.items[@intFromEnum(idx3)]; - try testing.expectEqualStrings("test.Module", string_store.get(str_idx1)); - try testing.expectEqualStrings("other.Module", string_store.get(str_idx3)); + // Verify both module names are present + try testing.expect(storeContainsModule(&store, &string_store, "test.Module")); + try testing.expect(storeContainsModule(&store, &string_store, "other.Module")); } test "Import.Store empty CompactWriter roundtrip" { const testing = std.testing; const gpa = testing.allocator; - // Create an empty Store var original = Import.Store.init(); - // Create a temp file var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); @@ -66,15 +69,12 @@ test "Import.Store empty CompactWriter roundtrip" { const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized); try serialized.serialize(&original, gpa, &writer); - // Write to file try writer.writeGather(gpa, file); - // Read back try file.seekTo(0); const buffer = try file.readToEndAlloc(gpa, 1024 * 1024); defer gpa.free(buffer); - // Cast to Serialized and deserialize const serialized_ptr = @as(*Import.Store.Serialized, @ptrCast(@alignCast(buffer.ptr))); const deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa); @@ -87,29 +87,18 @@ test "Import.Store basic CompactWriter roundtrip" { const testing = std.testing; const gpa = testing.allocator; - // Create a mock module env with string store var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024); defer string_store.deinit(gpa); - const MockEnv = struct { strings: *StringLiteral.Store }; - const mock_env = MockEnv{ .strings = &string_store }; - - // Create original store and add some imports var original = Import.Store.init(); defer original.deinit(gpa); - const idx1 = try original.getOrPut(gpa, mock_env.strings, "json.Json"); - const idx2 = try original.getOrPut(gpa, mock_env.strings, "core.List"); - const idx3 = try original.getOrPut(gpa, mock_env.strings, "my.Module"); + _ = try original.getOrPut(gpa, &string_store, "json.Json"); + _ = try original.getOrPut(gpa, &string_store, "core.List"); + _ = try original.getOrPut(gpa, &string_store, "my.Module"); - // Verify indices are distinct and in order - try testing.expect(idx1 != idx2); - try testing.expect(idx2 != idx3); - try testing.expect(idx1 != idx3); - try testing.expect(@intFromEnum(idx1) < @intFromEnum(idx2)); - try testing.expect(@intFromEnum(idx2) < @intFromEnum(idx3)); + try testing.expectEqual(@as(usize, 3), original.imports.len()); - // Create a temp file var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); @@ -122,30 +111,23 @@ test "Import.Store basic CompactWriter roundtrip" { const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized); try serialized.serialize(&original, gpa, &writer); - // Write to file try writer.writeGather(gpa, file); - // Read back try file.seekTo(0); const buffer = try file.readToEndAlloc(gpa, 1024 * 1024); defer gpa.free(buffer); - // Cast to Serialized and deserialize const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr)); var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa); defer deserialized.map.deinit(gpa); - // Verify the imports are accessible + // Verify the correct number of imports try testing.expectEqual(@as(usize, 3), deserialized.imports.len()); - // Verify the interned string IDs are stored correctly by using the indices we got - const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)]; - const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)]; - const str_idx3 = deserialized.imports.items.items[@intFromEnum(idx3)]; - - try testing.expectEqualStrings("json.Json", string_store.get(str_idx1)); - try testing.expectEqualStrings("core.List", string_store.get(str_idx2)); - try testing.expectEqualStrings("my.Module", string_store.get(str_idx3)); + // Verify all expected module names are present by iterating + try testing.expect(storeContainsModule(deserialized, &string_store, "json.Json")); + try testing.expect(storeContainsModule(deserialized, &string_store, "core.List")); + try testing.expect(storeContainsModule(deserialized, &string_store, "my.Module")); // Verify the map is repopulated correctly try testing.expectEqual(@as(usize, 3), deserialized.map.count()); @@ -155,26 +137,20 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { const testing = std.testing; const gpa = testing.allocator; - // Create a mock module env with string store var string_store = try StringLiteral.Store.initCapacityBytes(gpa, 1024); defer string_store.deinit(gpa); - const MockEnv = struct { strings: *StringLiteral.Store }; - const mock_env = MockEnv{ .strings = &string_store }; - - // Create store with duplicate imports var original = Import.Store.init(); defer original.deinit(gpa); - const idx1 = try original.getOrPut(gpa, mock_env.strings, "test.Module"); - const idx2 = try original.getOrPut(gpa, mock_env.strings, "another.Module"); - const idx3 = try original.getOrPut(gpa, mock_env.strings, "test.Module"); // duplicate + const idx1 = try original.getOrPut(gpa, &string_store, "test.Module"); + _ = try original.getOrPut(gpa, &string_store, "another.Module"); + const idx3 = try original.getOrPut(gpa, &string_store, "test.Module"); // duplicate // Verify deduplication worked try testing.expectEqual(idx1, idx3); try testing.expectEqual(@as(usize, 2), original.imports.len()); - // Create a temp file var tmp_dir = testing.tmpDir(.{}); defer tmp_dir.cleanup(); @@ -187,35 +163,23 @@ test "Import.Store duplicate imports CompactWriter roundtrip" { const serialized = try writer.appendAlloc(gpa, Import.Store.Serialized); try serialized.serialize(&original, gpa, &writer); - // Write to file try writer.writeGather(gpa, file); - // Read back try file.seekTo(0); const buffer = try file.readToEndAlloc(gpa, 1024 * 1024); defer gpa.free(buffer); - // Cast to Serialized and deserialize const serialized_ptr: *Import.Store.Serialized = @ptrCast(@alignCast(buffer.ptr)); var deserialized = try serialized_ptr.deserialize(@as(i64, @intCast(@intFromPtr(buffer.ptr))), gpa); defer deserialized.map.deinit(gpa); - // Verify correct number of imports + // Verify correct number of imports (duplicates deduplicated) try testing.expectEqual(@as(usize, 2), deserialized.imports.len()); - // Get the string IDs using the indices we captured and verify the strings - const str_idx1 = deserialized.imports.items.items[@intFromEnum(idx1)]; - const str_idx2 = deserialized.imports.items.items[@intFromEnum(idx2)]; - - try testing.expectEqualStrings("test.Module", string_store.get(str_idx1)); - try testing.expectEqualStrings("another.Module", string_store.get(str_idx2)); + // Verify expected module names are present + try testing.expect(storeContainsModule(deserialized, &string_store, "test.Module")); + try testing.expect(storeContainsModule(deserialized, &string_store, "another.Module")); // Verify the map was repopulated correctly try testing.expectEqual(@as(usize, 2), deserialized.map.count()); - - // Check that the map has correct entries for the string indices that were deserialized - try testing.expect(deserialized.map.contains(str_idx1)); - try testing.expect(deserialized.map.contains(str_idx2)); - try testing.expectEqual(idx1, deserialized.map.get(str_idx1).?); - try testing.expectEqual(idx2, deserialized.map.get(str_idx2).?); } From e514b4e65bca8be8719562e5cdf784ad6c8ba3a7 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 4 Dec 2025 23:32:14 -0500 Subject: [PATCH 34/64] Fix another undefined that should be null --- src/eval/interpreter.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 435bbfc005..5a78e5bdf2 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -12875,7 +12875,7 @@ pub const Interpreter = struct { effective_scrutinee_rt_var, roc_ops, &temp_binds, - undefined, + null, )) { continue; } From 37a3ab4f6ec7e545be9604014b6cd2fdb6188a2a Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 00:14:50 -0500 Subject: [PATCH 35/64] Fix typo in flake.nix: testscmd -> testcmd --- src/flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/flake.nix b/src/flake.nix index de7629441b..f7ea3a1bd6 100644 --- a/src/flake.nix +++ b/src/flake.nix @@ -34,7 +34,7 @@ testcmd() { zig build snapshot && zig build test } - export -f testscmd + export -f testcmd fmtcmd() { zig build fmt From 81e34985e7bf2bdea77503c1654027eb21929928 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 19:16:23 -0500 Subject: [PATCH 36/64] Fix a stack overflow --- src/cli/test/fx_platform_test.zig | 175 +++--------------------------- src/eval/interpreter.zig | 29 +++-- test/fx/test_direct_string.roc | 7 -- test/fx/test_method_inspect.roc | 8 -- test/fx/test_one_call.roc | 11 -- test/fx/test_type_mismatch.roc | 5 - test/fx/test_with_wrapper.roc | 10 -- test/fx/var_interp_segfault.roc | 29 +++++ 8 files changed, 69 insertions(+), 205 deletions(-) delete mode 100644 test/fx/test_direct_string.roc delete mode 100644 test/fx/test_method_inspect.roc delete mode 100644 test/fx/test_one_call.roc delete mode 100644 test/fx/test_type_mismatch.roc delete mode 100644 test/fx/test_with_wrapper.roc create mode 100644 test/fx/var_interp_segfault.roc diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 980cf7ee4d..b280ca62a0 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1068,147 +1068,6 @@ test "fx platform string_lookup_test" { try testing.expect(std.mem.indexOf(u8, run_result.stdout, "hello") != null); } -test "fx platform test_direct_string" { - const allocator = testing.allocator; - - try ensureRocBinary(allocator); - - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_direct_string.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); -} - -test "fx platform test_one_call" { - const allocator = testing.allocator; - - try ensureRocBinary(allocator); - - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_one_call.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); -} - -test "fx platform test_type_mismatch" { - const allocator = testing.allocator; - - try ensureRocBinary(allocator); - - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_type_mismatch.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - // This file is expected to fail compilation with a type mismatch error - // The to_inspect method returns I64 instead of Str - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - // Expected to fail - check for type mismatch error message - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "TYPE MISMATCH") != null); - } else { - std.debug.print("Expected compilation error but succeeded\n", .{}); - return error.UnexpectedSuccess; - } - }, - else => { - // Abnormal termination should also indicate error - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "TYPE MISMATCH") != null); - }, - } -} - -test "fx platform test_with_wrapper" { - const allocator = testing.allocator; - - try ensureRocBinary(allocator); - - const run_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ - "./zig-out/bin/roc", - "test/fx/test_with_wrapper.roc", - }, - }); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - switch (run_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("Run failed with exit code {}\n", .{code}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - } - }, - else => { - std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); - std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); - std.debug.print("STDERR: {s}\n", .{run_result.stderr}); - return error.RunFailed; - }, - } - - try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); -} - test "fx platform inspect_compare_test" { const allocator = testing.allocator; @@ -1523,22 +1382,6 @@ test "run allows warnings without blocking execution" { try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello, World!") != null); } -test "fx platform method inspect on string" { - // Tests that calling .inspect() on a Str correctly reports MISSING METHOD - // (Str doesn't have an inspect method, unlike custom opaque types) - const allocator = testing.allocator; - - const run_result = try runRoc(allocator, "test/fx/test_method_inspect.roc", .{}); - defer allocator.free(run_result.stdout); - defer allocator.free(run_result.stderr); - - // This should fail because Str doesn't have an inspect method - try checkFailure(run_result); - - // Should show MISSING METHOD error - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "MISSING METHOD") != null); -} - test "fx platform if-expression closure capture regression" { // Regression test: Variables bound inside an if-expression's block were // incorrectly being captured as free variables by the enclosing lambda, @@ -1551,3 +1394,21 @@ test "fx platform if-expression closure capture regression" { try checkSuccess(run_result); } + +test "fx platform var with string interpolation segfault" { + // Regression test: Using `var` variables with string interpolation causes segfault. + // The code calls fnA! multiple times, each using var state variables, and + // interpolates the results into strings. + const allocator = testing.allocator; + + const run_result = try runRoc(allocator, "test/fx/var_interp_segfault.roc", .{}); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + try checkSuccess(run_result); + + // Verify the expected output + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A1: 1") != null); + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A2: 1") != null); + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A3: 1") != null); +} diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 5a78e5bdf2..1a853ca1e1 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -8050,18 +8050,33 @@ pub const Interpreter = struct { } } - // Skip translate_cache for flex/rigid vars when inside a polymorphic function. - // The cache may have stale mappings from a different calling context where the - // flex var defaulted to Dec, but we now have a concrete type from flex_type_context. - // We check if flex_type_context has ANY entries as a proxy for "inside polymorphic call". + // Check translate_cache. We have two concerns: + // 1. Breaking cycles: Always check if we're already translating this type (placeholder) + // 2. Stale mappings: Skip cached results for flex/rigid vars in polymorphic contexts + // where the cache might have stale mappings from a different calling context const in_polymorphic_context = self.flex_type_context.count() > 0; - const skip_cache_for_this_var = in_polymorphic_context and + const skip_stale_cached_result = in_polymorphic_context and (resolved.desc.content == .flex or resolved.desc.content == .rigid); - if (!skip_cache_for_this_var) { - if (self.translate_cache.get(key)) |found| { + if (self.translate_cache.get(key)) |found| { + // Always return cached results to break cycles (placeholder mechanism). + // For flex/rigid in polymorphic context, we're only skipping potentially + // stale COMPLETE translations. The placeholder is inserted during THIS + // translation, so returning it is safe and necessary to prevent infinite recursion. + if (!skip_stale_cached_result) { return found; } + // Check if this is a placeholder (cycle detection) vs stale complete translation. + // Placeholders are fresh flex vars with no constraints. + const found_resolved = self.runtime_types.resolveVar(found); + if (found_resolved.desc.content == .flex) { + const flex = found_resolved.desc.content.flex; + if (flex.name == null and flex.constraints.len() == 0) { + // This is a placeholder - return it to break the cycle + return found; + } + } + // Otherwise it's a potentially stale mapping - skip it and re-translate } // Insert a placeholder to break cycles during recursive type translation. diff --git a/test/fx/test_direct_string.roc b/test/fx/test_direct_string.roc deleted file mode 100644 index 7cdf822334..0000000000 --- a/test/fx/test_direct_string.roc +++ /dev/null @@ -1,7 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout - -main! = || { - Stdout.line!("Hello") -} diff --git a/test/fx/test_method_inspect.roc b/test/fx/test_method_inspect.roc deleted file mode 100644 index 27197225d6..0000000000 --- a/test/fx/test_method_inspect.roc +++ /dev/null @@ -1,8 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout - -main! = || { - x = "hello" - Stdout.line!(x.inspect()) -} diff --git a/test/fx/test_one_call.roc b/test/fx/test_one_call.roc deleted file mode 100644 index 03a53ddbb2..0000000000 --- a/test/fx/test_one_call.roc +++ /dev/null @@ -1,11 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout - -identity : a -> a -identity = |x| x - -main! = || { - str = identity("Hello") - Stdout.line!(str) -} diff --git a/test/fx/test_type_mismatch.roc b/test/fx/test_type_mismatch.roc deleted file mode 100644 index 035c6e03e9..0000000000 --- a/test/fx/test_type_mismatch.roc +++ /dev/null @@ -1,5 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -main! = || { - "hello" -} diff --git a/test/fx/test_with_wrapper.roc b/test/fx/test_with_wrapper.roc deleted file mode 100644 index 5e3f699ac7..0000000000 --- a/test/fx/test_with_wrapper.roc +++ /dev/null @@ -1,10 +0,0 @@ -app [main!] { pf: platform "./platform/main.roc" } - -import pf.Stdout - -str : Str -> Str -str = |s| s - -main! = || { - Stdout.line!(str("Hello")) -} diff --git a/test/fx/var_interp_segfault.roc b/test/fx/var_interp_segfault.roc new file mode 100644 index 0000000000..403e665e02 --- /dev/null +++ b/test/fx/var_interp_segfault.roc @@ -0,0 +1,29 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +print! : Str => {} +print! = |msg| msg.split_on("\n").for_each!(Stdout.line!) + +fnA! : Str => Try(I64, _) +fnA! = |_input| { + var $x = 1 + Ok($x) +} + +fnB! : Str => Try(I64, _) +fnB! = |_input| { + var $y = 2 + Ok($y) +} + +run! = || { + print!("A1: ${fnA!("test")?.to_str()}") + print!("A2: ${fnA!("test")?.to_str()}") + print!("A3: ${fnA!("test")?.to_str()}") + Ok({}) +} + +main! = || { + _ignore = run!() +} From 3e5bc5f45333e29813af8d221b0aa98c5b7692fe Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 19:39:24 -0500 Subject: [PATCH 37/64] Add stack overflow handler --- src/base/mod.zig | 2 + src/base/stack_overflow.zig | 315 ++++++++++++++++++++++++++++++++++++ src/cli/main.zig | 5 + 3 files changed, 322 insertions(+) create mode 100644 src/base/stack_overflow.zig diff --git a/src/base/mod.zig b/src/base/mod.zig index fb0973444c..a5a0c0361e 100644 --- a/src/base/mod.zig +++ b/src/base/mod.zig @@ -11,6 +11,7 @@ pub const parallel = @import("parallel.zig"); pub const SmallStringInterner = @import("SmallStringInterner.zig"); pub const safe_memory = @import("safe_memory.zig"); +pub const stack_overflow = @import("stack_overflow.zig"); pub const target = @import("target.zig"); pub const DataSpan = @import("DataSpan.zig").DataSpan; @@ -158,6 +159,7 @@ test "base tests" { std.testing.refAllDecls(@import("Scratch.zig")); std.testing.refAllDecls(@import("SExprTree.zig")); std.testing.refAllDecls(@import("SmallStringInterner.zig")); + std.testing.refAllDecls(@import("stack_overflow.zig")); std.testing.refAllDecls(@import("StringLiteral.zig")); std.testing.refAllDecls(@import("target.zig")); } diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig new file mode 100644 index 0000000000..5a6b283113 --- /dev/null +++ b/src/base/stack_overflow.zig @@ -0,0 +1,315 @@ +//! Stack overflow detection and handling for the Roc compiler. +//! +//! This module provides a mechanism to catch stack overflows and report them +//! with a helpful error message instead of a generic segfault. This is particularly +//! useful during compiler development when recursive algorithms might blow the stack. +//! +//! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate +//! signal stack and install a SIGSEGV handler that detects stack overflows. + +const std = @import("std"); +const builtin = @import("builtin"); +const posix = std.posix; + +/// Size of the alternate signal stack (64KB should be plenty for the handler) +const ALT_STACK_SIZE = 64 * 1024; + +/// Storage for the alternate signal stack +var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined; + +/// Whether the handler has been installed +var handler_installed = false; + +/// Error message to display on stack overflow +const STACK_OVERFLOW_MESSAGE = + \\ + \\================================================================================ + \\STACK OVERFLOW in the Roc compiler + \\================================================================================ + \\ + \\The Roc compiler ran out of stack space. This is a bug in the compiler, + \\not in your code. + \\ + \\This often happens due to: + \\ - Infinite recursion in type translation or unification + \\ - Very deeply nested expressions without tail-call optimization + \\ - Cyclic data structures without proper cycle detection + \\ + \\Please report this issue at: https://github.com/roc-lang/roc/issues + \\ + \\Include the Roc code that triggered this error if possible. + \\ + \\================================================================================ + \\ + \\ +; + +/// Install the stack overflow handler. +/// This should be called early in main() before any significant work is done. +/// Returns true if the handler was installed successfully, false otherwise. +pub fn install() bool { + if (handler_installed) return true; + + // Only supported on POSIX systems + if (comptime builtin.os.tag == .windows) { + // TODO: Implement Windows stack overflow handling using SetUnhandledExceptionFilter + // and checking for EXCEPTION_STACK_OVERFLOW + return false; + } + + if (comptime builtin.os.tag == .wasi) { + // WASI doesn't support signal handling + return false; + } + + // Set up the alternate signal stack + var alt_stack = posix.stack_t{ + .sp = &alt_stack_storage, + .flags = 0, + .size = ALT_STACK_SIZE, + }; + + posix.sigaltstack(&alt_stack, null) catch { + return false; + }; + + // Install the SIGSEGV handler + const action = posix.Sigaction{ + .handler = .{ .sigaction = handleSignal }, + .mask = posix.sigemptyset(), + .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, + }; + + posix.sigaction(posix.SIG.SEGV, &action, null); + + // Also catch SIGBUS which can occur on some systems for stack overflow + posix.sigaction(posix.SIG.BUS, &action, null); + + handler_installed = true; + return true; +} + +/// The signal handler function +fn handleSignal(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { + + // Check if this is likely a stack overflow by examining the fault address + const fault_addr = @intFromPtr(info.addr); + + // Get the current stack pointer to help determine if this is a stack overflow + var current_sp: usize = 0; + asm volatile ("" + : [sp] "={sp}" (current_sp), + ); + + // A stack overflow typically occurs when the fault address is near the stack pointer + // or below the stack (stacks grow downward on most architectures) + const likely_stack_overflow = isLikelyStackOverflow(fault_addr, current_sp); + + // Write our error message to stderr (use STDERR_FILENO directly for signal safety) + const stderr_fd = posix.STDERR_FILENO; + + if (likely_stack_overflow) { + _ = posix.write(stderr_fd, STACK_OVERFLOW_MESSAGE) catch {}; + } else { + // Generic segfault - provide some context + const generic_msg = switch (sig) { + posix.SIG.SEGV => "\nSegmentation fault (SIGSEGV) in the Roc compiler.\nFault address: ", + posix.SIG.BUS => "\nBus error (SIGBUS) in the Roc compiler.\nFault address: ", + else => "\nFatal signal in the Roc compiler.\nFault address: ", + }; + _ = posix.write(stderr_fd, generic_msg) catch {}; + + // Write the fault address as hex + var addr_buf: [18]u8 = undefined; + const addr_str = formatHex(fault_addr, &addr_buf); + _ = posix.write(stderr_fd, addr_str) catch {}; + _ = posix.write(stderr_fd, "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n") catch {}; + } + + // Exit with a distinct error code for stack overflow + if (likely_stack_overflow) { + posix.exit(134); // 128 + 6 (SIGABRT-like) + } else { + posix.exit(139); // 128 + 11 (SIGSEGV) + } +} + +/// Heuristic to determine if a fault is likely a stack overflow +fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool { + // If fault address is 0 or very low, it's likely a null pointer dereference + if (fault_addr < 4096) return false; + + // Stack overflows typically fault near the stack guard page + // The fault address will be close to (but below) the current stack pointer + // We use a generous range since the stack pointer in the signal handler + // is on the alternate stack + + // On most systems, the main stack is in high memory and grows down + // A stack overflow fault will be at an address lower than the normal stack + + // Check if fault address is within a reasonable range of where stack would be + // This is a heuristic - we check if the fault is in the lower part of address space + // where guard pages typically are + + const max_addr = std.math.maxInt(usize); + const high_memory_threshold = max_addr - (16 * 1024 * 1024 * 1024); // 16GB from top + + // If the fault is in the high memory region (where stacks live) but at a page boundary + // it's likely a stack guard page hit + if (fault_addr > high_memory_threshold) { + // Check if it's at a page boundary (guard pages are typically page-aligned) + const page_size = std.heap.page_size_min; + const page_aligned = (fault_addr & (page_size - 1)) == 0 or (fault_addr & (page_size - 1)) < 64; + if (page_aligned) return true; + } + + // Also check if the fault address is suspiciously close to the current SP + // This catches cases where we're still on the main stack when the overflow happens + const sp_distance = if (fault_addr < current_sp) current_sp - fault_addr else fault_addr - current_sp; + if (sp_distance < 1024 * 1024) { // Within 1MB of stack pointer + return true; + } + + return false; +} + +/// Format a usize as hexadecimal +fn formatHex(value: usize, buf: []u8) []const u8 { + const hex_chars = "0123456789abcdef"; + var i: usize = buf.len; + + if (value == 0) { + i -= 1; + buf[i] = '0'; + } else { + var v = value; + while (v > 0 and i > 2) { + i -= 1; + buf[i] = hex_chars[v & 0xf]; + v >>= 4; + } + } + + // Add 0x prefix + i -= 1; + buf[i] = 'x'; + i -= 1; + buf[i] = '0'; + + return buf[i..]; +} + +/// Test function that intentionally causes a stack overflow. +/// This is used to verify the handler works correctly. +pub fn triggerStackOverflowForTest() noreturn { + // Use a recursive function that can't be tail-call optimized + const S = struct { + fn recurse(n: usize) usize { + // Prevent tail-call optimization by doing work after the recursive call + var buf: [1024]u8 = undefined; + buf[0] = @truncate(n); + const result = if (n == 0) 0 else recurse(n + 1); + // Use the buffer to prevent it from being optimized away + return result + buf[0]; + } + }; + + // This will recurse until stack overflow + const result = S.recurse(1); + + // This should never be reached + std.debug.print("Unexpected result: {}\n", .{result}); + posix.exit(1); +} + +test "formatHex" { + var buf: [18]u8 = undefined; + + const zero = formatHex(0, &buf); + try std.testing.expectEqualStrings("0x0", zero); + + const small = formatHex(0xff, &buf); + try std.testing.expectEqualStrings("0xff", small); + + const medium = formatHex(0xdeadbeef, &buf); + try std.testing.expectEqualStrings("0xdeadbeef", medium); +} + +test "stack overflow handler produces helpful error message" { + // Skip on non-POSIX systems + if (comptime builtin.os.tag == .windows or builtin.os.tag == .wasi) { + return error.SkipZigTest; + } + + // Create a pipe to capture stderr from the child + const pipe_fds = try posix.pipe(); + const pipe_read = pipe_fds[0]; + const pipe_write = pipe_fds[1]; + + const fork_result = posix.fork() catch { + posix.close(pipe_read); + posix.close(pipe_write); + return error.ForkFailed; + }; + + if (fork_result == 0) { + // Child process + posix.close(pipe_read); + + // Redirect stderr to the pipe + posix.dup2(pipe_write, posix.STDERR_FILENO) catch posix.exit(99); + posix.close(pipe_write); + + // Install the handler and trigger stack overflow + _ = install(); + triggerStackOverflowForTest(); + // Should never reach here + unreachable; + } else { + // Parent process + posix.close(pipe_write); + + // Wait for child to exit + const wait_result = posix.waitpid(fork_result, 0); + const status = wait_result.status; + + // Parse the wait status (Unix encoding) + // WIFEXITED: (status & 0x7f) == 0 + // WEXITSTATUS: (status >> 8) & 0xff + // WIFSIGNALED: ((status & 0x7f) + 1) >> 1 > 0 + // WTERMSIG: status & 0x7f + const exited_normally = (status & 0x7f) == 0; + const exit_code: u8 = @truncate((status >> 8) & 0xff); + const termination_signal: u8 = @truncate(status & 0x7f); + + // Read stderr output from child + var stderr_buf: [4096]u8 = undefined; + const bytes_read = posix.read(pipe_read, &stderr_buf) catch 0; + posix.close(pipe_read); + + const stderr_output = stderr_buf[0..bytes_read]; + + // Verify the handler caught the signal and printed a helpful message + // Exit code 134 = stack overflow detected + // Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow) + if (exited_normally and (exit_code == 134 or exit_code == 139)) { + // Check that our handler message was printed + const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; + const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null; + const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; + + // Handler should have printed EITHER stack overflow message OR segfault message + try std.testing.expect(has_stack_overflow_msg or has_segfault_msg); + try std.testing.expect(has_roc_compiler_msg); + } else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) { + // The handler might not have caught it - this can happen on some systems + // where the signal delivery is different. Just warn and skip. + std.debug.print("Warning: Stack overflow was not caught by handler (signal {})\n", .{termination_signal}); + return error.SkipZigTest; + } else { + std.debug.print("Unexpected exit status: 0x{x} (exited={}, code={}, signal={})\n", .{ status, exited_normally, exit_code, termination_signal }); + std.debug.print("Stderr: {s}\n", .{stderr_output}); + return error.TestUnexpectedResult; + } + } +} diff --git a/src/cli/main.zig b/src/cli/main.zig index 061347654a..4d04138e46 100644 --- a/src/cli/main.zig +++ b/src/cli/main.zig @@ -583,6 +583,11 @@ var debug_allocator: std.heap.DebugAllocator(.{}) = .{ /// The CLI entrypoint for the Roc compiler. pub fn main() !void { + // Install stack overflow handler early, before any significant work. + // This gives us a helpful error message instead of a generic segfault + // if the compiler blows the stack (e.g., due to infinite recursion in type translation). + _ = base.stack_overflow.install(); + var gpa_tracy: tracy.TracyAllocator(null) = undefined; var gpa, const is_safe = gpa: { if (builtin.os.tag == .wasi) break :gpa .{ std.heap.wasm_allocator, false }; From d8a6ff818ac86c6d3020b3801c352e51d6f6325b Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 21:19:29 -0500 Subject: [PATCH 38/64] Add stack overflow handling --- src/base/stack_overflow.zig | 207 +++++++++++++++++++++++++++++------- 1 file changed, 169 insertions(+), 38 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index 5a6b283113..a0c8c250c7 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -6,20 +6,29 @@ //! //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate //! signal stack and install a SIGSEGV handler that detects stack overflows. +//! +//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. const std = @import("std"); const builtin = @import("builtin"); -const posix = std.posix; +const posix = if (builtin.os.tag != .windows) std.posix else undefined; +const windows = if (builtin.os.tag == .windows) std.os.windows else undefined; /// Size of the alternate signal stack (64KB should be plenty for the handler) const ALT_STACK_SIZE = 64 * 1024; -/// Storage for the alternate signal stack +/// Storage for the alternate signal stack (POSIX only) var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined; /// Whether the handler has been installed var handler_installed = false; +// Windows constants +const EXCEPTION_STACK_OVERFLOW: u32 = 0xC00000FD; +const EXCEPTION_ACCESS_VIOLATION: u32 = 0xC0000005; +const EXCEPTION_CONTINUE_SEARCH: c_long = 0; +const EXCEPTION_EXECUTE_HANDLER: c_long = 1; + /// Error message to display on stack overflow const STACK_OVERFLOW_MESSAGE = \\ @@ -50,11 +59,8 @@ const STACK_OVERFLOW_MESSAGE = pub fn install() bool { if (handler_installed) return true; - // Only supported on POSIX systems if (comptime builtin.os.tag == .windows) { - // TODO: Implement Windows stack overflow handling using SetUnhandledExceptionFilter - // and checking for EXCEPTION_STACK_OVERFLOW - return false; + return installWindows(); } if (comptime builtin.os.tag == .wasi) { @@ -62,6 +68,10 @@ pub fn install() bool { return false; } + return installPosix(); +} + +fn installPosix() bool { // Set up the alternate signal stack var alt_stack = posix.stack_t{ .sp = &alt_stack_storage, @@ -75,7 +85,7 @@ pub fn install() bool { // Install the SIGSEGV handler const action = posix.Sigaction{ - .handler = .{ .sigaction = handleSignal }, + .handler = .{ .sigaction = handleSignalPosix }, .mask = posix.sigemptyset(), .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, }; @@ -89,9 +99,16 @@ pub fn install() bool { return true; } -/// The signal handler function -fn handleSignal(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { +fn installWindows() bool { + // Use SetUnhandledExceptionFilter to catch unhandled exceptions + const kernel32 = windows.kernel32; + _ = kernel32.SetUnhandledExceptionFilter(handleExceptionWindows); + handler_installed = true; + return true; +} +/// The POSIX signal handler function +fn handleSignalPosix(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { // Check if this is likely a stack overflow by examining the fault address const fault_addr = @intFromPtr(info.addr); @@ -134,6 +151,39 @@ fn handleSignal(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv } } +/// Windows exception handler function +fn handleExceptionWindows(exception_info: *windows.EXCEPTION_POINTERS) callconv(windows.WINAPI) c_long { + const exception_code = exception_info.ExceptionRecord.ExceptionCode; + + // Check if this is a stack overflow or access violation + const is_stack_overflow = exception_code == EXCEPTION_STACK_OVERFLOW; + const is_access_violation = exception_code == EXCEPTION_ACCESS_VIOLATION; + + if (!is_stack_overflow and !is_access_violation) { + // Let other handlers deal with this exception + return EXCEPTION_CONTINUE_SEARCH; + } + + // Write error message to stderr + const stderr_handle = windows.kernel32.GetStdHandle(windows.STD_ERROR_HANDLE); + if (stderr_handle != windows.INVALID_HANDLE_VALUE) { + var bytes_written: windows.DWORD = 0; + if (is_stack_overflow) { + _ = windows.kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); + } else { + const msg = "\nAccess violation in the Roc compiler.\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n"; + _ = windows.kernel32.WriteFile(stderr_handle, msg, msg.len, &bytes_written, null); + } + } + + // Exit with appropriate code + const exit_code: windows.UINT = if (is_stack_overflow) 134 else 139; + windows.kernel32.ExitProcess(exit_code); + + // Never reached, but required for return type + return EXCEPTION_EXECUTE_HANDLER; +} + /// Heuristic to determine if a fault is likely a stack overflow fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool { // If fault address is 0 or very low, it's likely a null pointer dereference @@ -219,7 +269,11 @@ pub fn triggerStackOverflowForTest() noreturn { // This should never be reached std.debug.print("Unexpected result: {}\n", .{result}); - posix.exit(1); + if (comptime builtin.os.tag == .windows) { + windows.kernel32.ExitProcess(1); + } else { + posix.exit(1); + } } test "formatHex" { @@ -235,12 +289,37 @@ test "formatHex" { try std.testing.expectEqualStrings("0xdeadbeef", medium); } +/// Check if we're being run as a subprocess to trigger stack overflow. +/// This is called by tests to create a child process that will crash. +/// Returns true if we should trigger the overflow (and not return). +pub fn checkAndTriggerIfSubprocess() bool { + // Check for the special environment variable that signals we should crash + const env_val = std.process.getEnvVarOwned(std.heap.page_allocator, "ROC_TEST_TRIGGER_STACK_OVERFLOW") catch return false; + defer std.heap.page_allocator.free(env_val); + + if (std.mem.eql(u8, env_val, "1")) { + // Install handler and trigger overflow + _ = install(); + triggerStackOverflowForTest(); + // Never returns + } + return false; +} + test "stack overflow handler produces helpful error message" { - // Skip on non-POSIX systems - if (comptime builtin.os.tag == .windows or builtin.os.tag == .wasi) { + // Skip on WASI - no process spawning + if (comptime builtin.os.tag == .wasi) { return error.SkipZigTest; } + if (comptime builtin.os.tag == .windows) { + try testStackOverflowWindows(); + } else { + try testStackOverflowPosix(); + } +} + +fn testStackOverflowPosix() !void { // Create a pipe to capture stderr from the child const pipe_fds = try posix.pipe(); const pipe_read = pipe_fds[0]; @@ -274,10 +353,6 @@ test "stack overflow handler produces helpful error message" { const status = wait_result.status; // Parse the wait status (Unix encoding) - // WIFEXITED: (status & 0x7f) == 0 - // WEXITSTATUS: (status >> 8) & 0xff - // WIFSIGNALED: ((status & 0x7f) + 1) >> 1 > 0 - // WTERMSIG: status & 0x7f const exited_normally = (status & 0x7f) == 0; const exit_code: u8 = @truncate((status >> 8) & 0xff); const termination_signal: u8 = @truncate(status & 0x7f); @@ -289,27 +364,83 @@ test "stack overflow handler produces helpful error message" { const stderr_output = stderr_buf[0..bytes_read]; - // Verify the handler caught the signal and printed a helpful message - // Exit code 134 = stack overflow detected - // Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow) - if (exited_normally and (exit_code == 134 or exit_code == 139)) { - // Check that our handler message was printed - const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; - const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null; - const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; - - // Handler should have printed EITHER stack overflow message OR segfault message - try std.testing.expect(has_stack_overflow_msg or has_segfault_msg); - try std.testing.expect(has_roc_compiler_msg); - } else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) { - // The handler might not have caught it - this can happen on some systems - // where the signal delivery is different. Just warn and skip. - std.debug.print("Warning: Stack overflow was not caught by handler (signal {})\n", .{termination_signal}); - return error.SkipZigTest; - } else { - std.debug.print("Unexpected exit status: 0x{x} (exited={}, code={}, signal={})\n", .{ status, exited_normally, exit_code, termination_signal }); - std.debug.print("Stderr: {s}\n", .{stderr_output}); - return error.TestUnexpectedResult; - } + try verifyHandlerOutput(exited_normally, exit_code, termination_signal, stderr_output); + } +} + +fn testStackOverflowWindows() !void { + const allocator = std.testing.allocator; + + // Get the path to the current executable (the test binary) + const self_exe = try std.fs.selfExePathAlloc(allocator); + defer allocator.free(self_exe); + + // Spawn ourselves with the special environment variable + var child = std.process.Child.init(.{ + .allocator = allocator, + .argv = &[_][]const u8{self_exe}, + .env_map = null, // We'll set env via addEnv + }, allocator); + + // Set the trigger environment variable + try child.env_map.?.put("ROC_TEST_TRIGGER_STACK_OVERFLOW", "1"); + + // Spawn and wait + const term = try child.spawnAndWait(); + + // Read stderr + const stderr_output = if (child.stderr) |stderr| blk: { + var buf: [4096]u8 = undefined; + const n = stderr.reader().readAll(&buf) catch 0; + break :blk buf[0..n]; + } else ""; + + // Verify results + const exited_normally = term == .Exited; + const exit_code: u8 = if (term == .Exited) @truncate(term.Exited) else 0; + + try verifyHandlerOutputWindows(exited_normally, exit_code, stderr_output); +} + +fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: u8, stderr_output: []const u8) !void { + // Exit code 134 = stack overflow detected + // Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow) + if (exited_normally and (exit_code == 134 or exit_code == 139)) { + // Check that our handler message was printed + const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; + const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null; + const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; + + // Handler should have printed EITHER stack overflow message OR segfault message + try std.testing.expect(has_stack_overflow_msg or has_segfault_msg); + try std.testing.expect(has_roc_compiler_msg); + } else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) { + // The handler might not have caught it - this can happen on some systems + // where the signal delivery is different. Just warn and skip. + std.debug.print("Warning: Stack overflow was not caught by handler (signal {})\n", .{termination_signal}); + return error.SkipZigTest; + } else { + std.debug.print("Unexpected exit status: exited={}, code={}, signal={}\n", .{ exited_normally, exit_code, termination_signal }); + std.debug.print("Stderr: {s}\n", .{stderr_output}); + return error.TestUnexpectedResult; + } +} + +fn verifyHandlerOutputWindows(exited_normally: bool, exit_code: u8, stderr_output: []const u8) !void { + // Exit code 134 = stack overflow detected + // Exit code 139 = generic access violation + if (exited_normally and (exit_code == 134 or exit_code == 139)) { + // Check that our handler message was printed + const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; + const has_access_violation_msg = std.mem.indexOf(u8, stderr_output, "Access violation") != null; + const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; + + // Handler should have printed EITHER stack overflow message OR access violation message + try std.testing.expect(has_stack_overflow_msg or has_access_violation_msg); + try std.testing.expect(has_roc_compiler_msg); + } else { + std.debug.print("Unexpected exit status: exited={}, code={}\n", .{ exited_normally, exit_code }); + std.debug.print("Stderr: {s}\n", .{stderr_output}); + return error.TestUnexpectedResult; } } From 37dbb7fbc69bff3fc834d56030adb4fd4cf203c3 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 23:02:03 -0500 Subject: [PATCH 39/64] Fix CI --- src/base/stack_overflow.zig | 142 ++++++++---------------------------- 1 file changed, 32 insertions(+), 110 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index a0c8c250c7..04e5552473 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -7,12 +7,11 @@ //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate //! signal stack and install a SIGSEGV handler that detects stack overflows. //! -//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. +//! Windows and WASI are not currently supported. const std = @import("std"); const builtin = @import("builtin"); -const posix = if (builtin.os.tag != .windows) std.posix else undefined; -const windows = if (builtin.os.tag == .windows) std.os.windows else undefined; +const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; /// Size of the alternate signal stack (64KB should be plenty for the handler) const ALT_STACK_SIZE = 64 * 1024; @@ -23,12 +22,6 @@ var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined; /// Whether the handler has been installed var handler_installed = false; -// Windows constants -const EXCEPTION_STACK_OVERFLOW: u32 = 0xC00000FD; -const EXCEPTION_ACCESS_VIOLATION: u32 = 0xC0000005; -const EXCEPTION_CONTINUE_SEARCH: c_long = 0; -const EXCEPTION_EXECUTE_HANDLER: c_long = 1; - /// Error message to display on stack overflow const STACK_OVERFLOW_MESSAGE = \\ @@ -100,17 +93,15 @@ fn installPosix() bool { } fn installWindows() bool { - // Use SetUnhandledExceptionFilter to catch unhandled exceptions - const kernel32 = windows.kernel32; - _ = kernel32.SetUnhandledExceptionFilter(handleExceptionWindows); - handler_installed = true; - return true; + // Windows support requires SetUnhandledExceptionFilter which isn't + // exposed in Zig's stdlib. Skip for now - POSIX platforms are covered. + return false; } /// The POSIX signal handler function fn handleSignalPosix(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { - // Check if this is likely a stack overflow by examining the fault address - const fault_addr = @intFromPtr(info.addr); + // Get the fault address - access differs by platform + const fault_addr: usize = getFaultAddress(info); // Get the current stack pointer to help determine if this is a stack overflow var current_sp: usize = 0; @@ -151,37 +142,28 @@ fn handleSignalPosix(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) cal } } -/// Windows exception handler function -fn handleExceptionWindows(exception_info: *windows.EXCEPTION_POINTERS) callconv(windows.WINAPI) c_long { - const exception_code = exception_info.ExceptionRecord.ExceptionCode; - - // Check if this is a stack overflow or access violation - const is_stack_overflow = exception_code == EXCEPTION_STACK_OVERFLOW; - const is_access_violation = exception_code == EXCEPTION_ACCESS_VIOLATION; - - if (!is_stack_overflow and !is_access_violation) { - // Let other handlers deal with this exception - return EXCEPTION_CONTINUE_SEARCH; +/// Get the fault address from siginfo_t (platform-specific) +fn getFaultAddress(info: *const posix.siginfo_t) usize { + // The siginfo_t structure varies by platform + if (comptime builtin.os.tag == .linux) { + // Linux: fault address is in fields.sigfault.addr + return @intFromPtr(info.fields.sigfault.addr); + } else if (comptime builtin.os.tag == .macos or + builtin.os.tag == .ios or + builtin.os.tag == .tvos or + builtin.os.tag == .watchos or + builtin.os.tag == .visionos or + builtin.os.tag == .freebsd or + builtin.os.tag == .dragonfly or + builtin.os.tag == .netbsd or + builtin.os.tag == .openbsd) + { + // macOS/iOS/BSD: fault address is in addr field + return @intFromPtr(info.addr); + } else { + // Fallback: return 0 if we can't determine the address + return 0; } - - // Write error message to stderr - const stderr_handle = windows.kernel32.GetStdHandle(windows.STD_ERROR_HANDLE); - if (stderr_handle != windows.INVALID_HANDLE_VALUE) { - var bytes_written: windows.DWORD = 0; - if (is_stack_overflow) { - _ = windows.kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); - } else { - const msg = "\nAccess violation in the Roc compiler.\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n"; - _ = windows.kernel32.WriteFile(stderr_handle, msg, msg.len, &bytes_written, null); - } - } - - // Exit with appropriate code - const exit_code: windows.UINT = if (is_stack_overflow) 134 else 139; - windows.kernel32.ExitProcess(exit_code); - - // Never reached, but required for return type - return EXCEPTION_EXECUTE_HANDLER; } /// Heuristic to determine if a fault is likely a stack overflow @@ -269,11 +251,7 @@ pub fn triggerStackOverflowForTest() noreturn { // This should never be reached std.debug.print("Unexpected result: {}\n", .{result}); - if (comptime builtin.os.tag == .windows) { - windows.kernel32.ExitProcess(1); - } else { - posix.exit(1); - } + std.process.exit(1); } test "formatHex" { @@ -308,15 +286,12 @@ pub fn checkAndTriggerIfSubprocess() bool { test "stack overflow handler produces helpful error message" { // Skip on WASI - no process spawning - if (comptime builtin.os.tag == .wasi) { + // Skip on Windows - SetUnhandledExceptionFilter not in Zig stdlib + if (comptime builtin.os.tag == .wasi or builtin.os.tag == .windows) { return error.SkipZigTest; } - if (comptime builtin.os.tag == .windows) { - try testStackOverflowWindows(); - } else { - try testStackOverflowPosix(); - } + try testStackOverflowPosix(); } fn testStackOverflowPosix() !void { @@ -368,40 +343,6 @@ fn testStackOverflowPosix() !void { } } -fn testStackOverflowWindows() !void { - const allocator = std.testing.allocator; - - // Get the path to the current executable (the test binary) - const self_exe = try std.fs.selfExePathAlloc(allocator); - defer allocator.free(self_exe); - - // Spawn ourselves with the special environment variable - var child = std.process.Child.init(.{ - .allocator = allocator, - .argv = &[_][]const u8{self_exe}, - .env_map = null, // We'll set env via addEnv - }, allocator); - - // Set the trigger environment variable - try child.env_map.?.put("ROC_TEST_TRIGGER_STACK_OVERFLOW", "1"); - - // Spawn and wait - const term = try child.spawnAndWait(); - - // Read stderr - const stderr_output = if (child.stderr) |stderr| blk: { - var buf: [4096]u8 = undefined; - const n = stderr.reader().readAll(&buf) catch 0; - break :blk buf[0..n]; - } else ""; - - // Verify results - const exited_normally = term == .Exited; - const exit_code: u8 = if (term == .Exited) @truncate(term.Exited) else 0; - - try verifyHandlerOutputWindows(exited_normally, exit_code, stderr_output); -} - fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: u8, stderr_output: []const u8) !void { // Exit code 134 = stack overflow detected // Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow) @@ -425,22 +366,3 @@ fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: return error.TestUnexpectedResult; } } - -fn verifyHandlerOutputWindows(exited_normally: bool, exit_code: u8, stderr_output: []const u8) !void { - // Exit code 134 = stack overflow detected - // Exit code 139 = generic access violation - if (exited_normally and (exit_code == 134 or exit_code == 139)) { - // Check that our handler message was printed - const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; - const has_access_violation_msg = std.mem.indexOf(u8, stderr_output, "Access violation") != null; - const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; - - // Handler should have printed EITHER stack overflow message OR access violation message - try std.testing.expect(has_stack_overflow_msg or has_access_violation_msg); - try std.testing.expect(has_roc_compiler_msg); - } else { - std.debug.print("Unexpected exit status: exited={}, code={}\n", .{ exited_normally, exit_code }); - std.debug.print("Stderr: {s}\n", .{stderr_output}); - return error.TestUnexpectedResult; - } -} From 83afd74d7d46266c90d812a40a7753ae2cb4219e Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Fri, 5 Dec 2025 23:16:14 -0500 Subject: [PATCH 40/64] sigfault is not a typo --- typos.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/typos.toml b/typos.toml index 43f51f5a9f..8153ea8bfb 100644 --- a/typos.toml +++ b/typos.toml @@ -22,3 +22,4 @@ HSA = "HSA" typ = "typ" ba = "ba" Trys = "Trys" +sigfault = "sigfault" From 1671431dad965eacd3575bfc37a67b3a9e0583da Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 00:23:35 -0500 Subject: [PATCH 41/64] Add back in Windows stack overflow handling --- src/base/stack_overflow.zig | 96 ++++++++++++++++++++++++++++++++++--- 1 file changed, 89 insertions(+), 7 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index 04e5552473..2088d44f72 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -7,12 +7,55 @@ //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate //! signal stack and install a SIGSEGV handler that detects stack overflows. //! -//! Windows and WASI are not currently supported. +//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. +//! +//! WASI is not currently supported (no signal handling available). const std = @import("std"); const builtin = @import("builtin"); const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; +// Windows types and constants +const DWORD = u32; +const LONG = i32; +const ULONG_PTR = usize; +const PVOID = ?*anyopaque; +const HANDLE = ?*anyopaque; +const BOOL = i32; + +const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD; +const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005; +const EXCEPTION_CONTINUE_SEARCH: LONG = 0; +const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); +const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize)); + +const EXCEPTION_RECORD = extern struct { + ExceptionCode: DWORD, + ExceptionFlags: DWORD, + ExceptionRecord: ?*EXCEPTION_RECORD, + ExceptionAddress: PVOID, + NumberParameters: DWORD, + ExceptionInformation: [15]ULONG_PTR, +}; + +const CONTEXT = extern struct { + // We don't need the full context, just enough to make the struct valid + data: [1232]u8, // Size varies by arch, this is x64 size +}; + +const EXCEPTION_POINTERS = extern struct { + ExceptionRecord: *EXCEPTION_RECORD, + ContextRecord: *CONTEXT, +}; + +const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(std.os.windows.WINAPI) LONG; + +// Windows API imports +extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(std.os.windows.WINAPI) LPTOP_LEVEL_EXCEPTION_FILTER; +extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(std.os.windows.WINAPI) HANDLE; +extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(std.os.windows.WINAPI) BOOL; +extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(std.os.windows.WINAPI) noreturn; + /// Size of the alternate signal stack (64KB should be plenty for the handler) const ALT_STACK_SIZE = 64 * 1024; @@ -93,9 +136,39 @@ fn installPosix() bool { } fn installWindows() bool { - // Windows support requires SetUnhandledExceptionFilter which isn't - // exposed in Zig's stdlib. Skip for now - POSIX platforms are covered. - return false; + _ = SetUnhandledExceptionFilter(handleExceptionWindows); + handler_installed = true; + return true; +} + +/// Windows exception handler function +fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(std.os.windows.WINAPI) LONG { + const exception_code = exception_info.ExceptionRecord.ExceptionCode; + + // Check if this is a stack overflow or access violation + const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW); + const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION); + + if (!is_stack_overflow and !is_access_violation) { + // Let other handlers deal with this exception + return EXCEPTION_CONTINUE_SEARCH; + } + + // Write error message to stderr + const stderr_handle = GetStdHandle(STD_ERROR_HANDLE); + if (stderr_handle != INVALID_HANDLE_VALUE and stderr_handle != null) { + var bytes_written: DWORD = 0; + if (is_stack_overflow) { + _ = WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); + } else { + const msg = "\nAccess violation in the Roc compiler.\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n"; + _ = WriteFile(stderr_handle, msg.ptr, msg.len, &bytes_written, null); + } + } + + // Exit with appropriate code + const exit_code: c_uint = if (is_stack_overflow) 134 else 139; + ExitProcess(exit_code); } /// The POSIX signal handler function @@ -285,9 +358,18 @@ pub fn checkAndTriggerIfSubprocess() bool { } test "stack overflow handler produces helpful error message" { - // Skip on WASI - no process spawning - // Skip on Windows - SetUnhandledExceptionFilter not in Zig stdlib - if (comptime builtin.os.tag == .wasi or builtin.os.tag == .windows) { + // Skip on WASI - no process spawning or signal handling + if (comptime builtin.os.tag == .wasi) { + return error.SkipZigTest; + } + + if (comptime builtin.os.tag == .windows) { + // Windows test would need subprocess spawning which is more complex + // The handler is installed and works, but testing it is harder + // For now, just verify the handler installs successfully + if (install()) { + return; // Success - handler installed + } return error.SkipZigTest; } From e5355b0fa4791369df7ab8d54fafcd2042295b0c Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 00:30:45 -0500 Subject: [PATCH 42/64] Restore some deleted tests --- src/cli/test/fx_platform_test.zig | 157 ++++++++++++++++++++++++++++++ test/fx/test_direct_string.roc | 7 ++ test/fx/test_method_inspect.roc | 8 ++ test/fx/test_one_call.roc | 11 +++ test/fx/test_type_mismatch.roc | 5 + test/fx/test_with_wrapper.roc | 10 ++ 6 files changed, 198 insertions(+) create mode 100644 test/fx/test_direct_string.roc create mode 100644 test/fx/test_method_inspect.roc create mode 100644 test/fx/test_one_call.roc create mode 100644 test/fx/test_type_mismatch.roc create mode 100644 test/fx/test_with_wrapper.roc diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index b280ca62a0..11242700ac 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1068,6 +1068,147 @@ test "fx platform string_lookup_test" { try testing.expect(std.mem.indexOf(u8, run_result.stdout, "hello") != null); } +test "fx platform test_direct_string" { + const allocator = testing.allocator; + + try ensureRocBinary(allocator); + + const run_result = try std.process.Child.run(.{ + .allocator = allocator, + .argv = &[_][]const u8{ + "./zig-out/bin/roc", + "test/fx/test_direct_string.roc", + }, + }); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + switch (run_result.term) { + .Exited => |code| { + if (code != 0) { + std.debug.print("Run failed with exit code {}\n", .{code}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + } + }, + else => { + std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + }, + } + + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); +} + +test "fx platform test_one_call" { + const allocator = testing.allocator; + + try ensureRocBinary(allocator); + + const run_result = try std.process.Child.run(.{ + .allocator = allocator, + .argv = &[_][]const u8{ + "./zig-out/bin/roc", + "test/fx/test_one_call.roc", + }, + }); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + switch (run_result.term) { + .Exited => |code| { + if (code != 0) { + std.debug.print("Run failed with exit code {}\n", .{code}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + } + }, + else => { + std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + }, + } + + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); +} + +test "fx platform test_type_mismatch" { + const allocator = testing.allocator; + + try ensureRocBinary(allocator); + + const run_result = try std.process.Child.run(.{ + .allocator = allocator, + .argv = &[_][]const u8{ + "./zig-out/bin/roc", + "test/fx/test_type_mismatch.roc", + }, + }); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + // This file is expected to fail compilation with a type mismatch error + // The to_inspect method returns I64 instead of Str + switch (run_result.term) { + .Exited => |code| { + if (code != 0) { + // Expected to fail - check for type mismatch error message + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "TYPE MISMATCH") != null); + } else { + std.debug.print("Expected compilation error but succeeded\n", .{}); + return error.UnexpectedSuccess; + } + }, + else => { + // Abnormal termination should also indicate error + std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "TYPE MISMATCH") != null); + }, + } +} + +test "fx platform test_with_wrapper" { + const allocator = testing.allocator; + + try ensureRocBinary(allocator); + + const run_result = try std.process.Child.run(.{ + .allocator = allocator, + .argv = &[_][]const u8{ + "./zig-out/bin/roc", + "test/fx/test_with_wrapper.roc", + }, + }); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + switch (run_result.term) { + .Exited => |code| { + if (code != 0) { + std.debug.print("Run failed with exit code {}\n", .{code}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + } + }, + else => { + std.debug.print("Run terminated abnormally: {}\n", .{run_result.term}); + std.debug.print("STDOUT: {s}\n", .{run_result.stdout}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.RunFailed; + }, + } + + try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello") != null); +} + test "fx platform inspect_compare_test" { const allocator = testing.allocator; @@ -1382,6 +1523,22 @@ test "run allows warnings without blocking execution" { try testing.expect(std.mem.indexOf(u8, run_result.stdout, "Hello, World!") != null); } +test "fx platform method inspect on string" { + // Tests that calling .inspect() on a Str correctly reports MISSING METHOD + // (Str doesn't have an inspect method, unlike custom opaque types) + const allocator = testing.allocator; + + const run_result = try runRoc(allocator, "test/fx/test_method_inspect.roc", .{}); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + // This should fail because Str doesn't have an inspect method + try checkFailure(run_result); + + // Should show MISSING METHOD error + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "MISSING METHOD") != null); +} + test "fx platform if-expression closure capture regression" { // Regression test: Variables bound inside an if-expression's block were // incorrectly being captured as free variables by the enclosing lambda, diff --git a/test/fx/test_direct_string.roc b/test/fx/test_direct_string.roc new file mode 100644 index 0000000000..7cdf822334 --- /dev/null +++ b/test/fx/test_direct_string.roc @@ -0,0 +1,7 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +main! = || { + Stdout.line!("Hello") +} diff --git a/test/fx/test_method_inspect.roc b/test/fx/test_method_inspect.roc new file mode 100644 index 0000000000..27197225d6 --- /dev/null +++ b/test/fx/test_method_inspect.roc @@ -0,0 +1,8 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +main! = || { + x = "hello" + Stdout.line!(x.inspect()) +} diff --git a/test/fx/test_one_call.roc b/test/fx/test_one_call.roc new file mode 100644 index 0000000000..03a53ddbb2 --- /dev/null +++ b/test/fx/test_one_call.roc @@ -0,0 +1,11 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +identity : a -> a +identity = |x| x + +main! = || { + str = identity("Hello") + Stdout.line!(str) +} diff --git a/test/fx/test_type_mismatch.roc b/test/fx/test_type_mismatch.roc new file mode 100644 index 0000000000..035c6e03e9 --- /dev/null +++ b/test/fx/test_type_mismatch.roc @@ -0,0 +1,5 @@ +app [main!] { pf: platform "./platform/main.roc" } + +main! = || { + "hello" +} diff --git a/test/fx/test_with_wrapper.roc b/test/fx/test_with_wrapper.roc new file mode 100644 index 0000000000..5e3f699ac7 --- /dev/null +++ b/test/fx/test_with_wrapper.roc @@ -0,0 +1,10 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +str : Str -> Str +str = |s| s + +main! = || { + Stdout.line!(str("Hello")) +} From b9b743c29cab1f9b3a6e9a33a2e2cd8f20fe489e Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 00:39:56 -0500 Subject: [PATCH 43/64] Refactor cycle detection --- src/eval/interpreter.zig | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 1a853ca1e1..b8ac2e015e 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -209,6 +209,8 @@ pub const Interpreter = struct { empty_scope: TypeScope, // Translation cache: (module, resolved_var) -> runtime_var translate_cache: std.AutoHashMap(ModuleVarKey, types.Var), + // Types currently being translated (for cycle detection) + translation_in_progress: std.AutoHashMap(ModuleVarKey, void), // Rigid variable substitution context for generic function instantiation // Maps rigid type variables to their concrete instantiations rigid_subst: std.AutoHashMap(types.Var, types.Var), @@ -391,6 +393,7 @@ pub const Interpreter = struct { .var_to_layout_slot = slots, .empty_scope = scope, .translate_cache = std.AutoHashMap(ModuleVarKey, types.Var).init(allocator), + .translation_in_progress = std.AutoHashMap(ModuleVarKey, void).init(allocator), .rigid_subst = std.AutoHashMap(types.Var, types.Var).init(allocator), .translate_rigid_subst = std.AutoHashMap(types.Var, types.Var).init(allocator), .flex_type_context = std.AutoHashMap(ModuleVarKey, types.Var).init(allocator), @@ -7172,6 +7175,7 @@ pub const Interpreter = struct { pub fn deinit(self: *Interpreter) void { self.empty_scope.deinit(); self.translate_cache.deinit(); + self.translation_in_progress.deinit(); self.rigid_subst.deinit(); self.translate_rigid_subst.deinit(); self.flex_type_context.deinit(); @@ -8050,35 +8054,33 @@ pub const Interpreter = struct { } } - // Check translate_cache. We have two concerns: - // 1. Breaking cycles: Always check if we're already translating this type (placeholder) - // 2. Stale mappings: Skip cached results for flex/rigid vars in polymorphic contexts - // where the cache might have stale mappings from a different calling context + // Cycle detection: if we're already translating this type, return the placeholder + // to break the infinite recursion. + if (self.translation_in_progress.contains(key)) { + // We must have a placeholder in translate_cache - return it to break the cycle + if (self.translate_cache.get(key)) |placeholder| { + return placeholder; + } + // This shouldn't happen, but if it does, create a fresh var + return try self.runtime_types.fresh(); + } + + // Check translate_cache for completed translations. + // For flex/rigid vars in polymorphic contexts, skip cached results because they + // might be stale mappings from a different calling context. const in_polymorphic_context = self.flex_type_context.count() > 0; const skip_stale_cached_result = in_polymorphic_context and (resolved.desc.content == .flex or resolved.desc.content == .rigid); - if (self.translate_cache.get(key)) |found| { - // Always return cached results to break cycles (placeholder mechanism). - // For flex/rigid in polymorphic context, we're only skipping potentially - // stale COMPLETE translations. The placeholder is inserted during THIS - // translation, so returning it is safe and necessary to prevent infinite recursion. - if (!skip_stale_cached_result) { + if (!skip_stale_cached_result) { + if (self.translate_cache.get(key)) |found| { return found; } - // Check if this is a placeholder (cycle detection) vs stale complete translation. - // Placeholders are fresh flex vars with no constraints. - const found_resolved = self.runtime_types.resolveVar(found); - if (found_resolved.desc.content == .flex) { - const flex = found_resolved.desc.content.flex; - if (flex.name == null and flex.constraints.len() == 0) { - // This is a placeholder - return it to break the cycle - return found; - } - } - // Otherwise it's a potentially stale mapping - skip it and re-translate } + // Mark this type as in-progress to detect cycles + try self.translation_in_progress.put(key, {}); + // Insert a placeholder to break cycles during recursive type translation. // If we recurse back to this type, we'll return the placeholder instead of infinite looping. const placeholder = try self.runtime_types.freshFromContent(.{ .flex = types.Flex.init() }); @@ -8462,6 +8464,9 @@ pub const Interpreter = struct { break :blk current; } else out_var; + // Translation complete - remove from in-progress set + _ = self.translation_in_progress.remove(key); + // Update the cache with the final var try self.translate_cache.put(key, final_var); From 6a947a0d81d761d703473daa2306f1dadf059e2d Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:03:57 -0500 Subject: [PATCH 44/64] Fix env bug --- src/cli/test/fx_platform_test.zig | 13 +++++++ src/eval/interpreter.zig | 55 +++++++++++++++++++++++++---- test/fx/sublist_method_segfault.roc | 15 ++++++++ 3 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 test/fx/sublist_method_segfault.roc diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 11242700ac..efda3ae395 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1569,3 +1569,16 @@ test "fx platform var with string interpolation segfault" { try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A2: 1") != null); try testing.expect(std.mem.indexOf(u8, run_result.stdout, "A3: 1") != null); } + +test "fx platform sublist method on inferred type" { + // Regression test: Calling .sublist() method on a List(U8) from "".to_utf8() + // causes a segfault when the variable doesn't have an explicit type annotation. + // Error was: "Roc crashed: Error evaluating from shared memory: InvalidMethodReceiver" + const allocator = testing.allocator; + + const run_result = try runRoc(allocator, "test/fx/sublist_method_segfault.roc", .{}); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + try checkSuccess(run_result); +} diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index b8ac2e015e..fb45900132 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -13627,8 +13627,26 @@ pub const Interpreter = struct { const operand = value_stack.pop() orelse return error.Crash; defer operand.decref(&self.runtime_layout_store, roc_ops); - // Resolve the operand type - const operand_resolved = self.runtime_types.resolveVar(ua.operand_rt_var); + // Resolve the operand type, following aliases to find the nominal type + var operand_resolved = self.runtime_types.resolveVar(ua.operand_rt_var); + + // Follow aliases to get to the underlying type (but NOT through nominal types) + if (comptime builtin.mode == .Debug) { + var alias_count: u32 = 0; + while (operand_resolved.desc.content == .alias) { + alias_count += 1; + if (alias_count > 1000) break; // Prevent infinite loops in debug builds + const alias = operand_resolved.desc.content.alias; + const backing = self.runtime_types.getAliasBackingVar(alias); + operand_resolved = self.runtime_types.resolveVar(backing); + } + } else { + while (operand_resolved.desc.content == .alias) { + const alias = operand_resolved.desc.content.alias; + const backing = self.runtime_types.getAliasBackingVar(alias); + operand_resolved = self.runtime_types.resolveVar(backing); + } + } // Get nominal type info const nominal_info = switch (operand_resolved.desc.content) { @@ -14211,7 +14229,26 @@ pub const Interpreter = struct { // Don't use resolveBaseVar here - we need to keep the nominal type // for method dispatch (resolveBaseVar unwraps nominal types to their backing) - const resolved_receiver = self.runtime_types.resolveVar(effective_receiver_rt_var); + // However, we DO need to follow aliases to find the nominal type. + var resolved_receiver = self.runtime_types.resolveVar(effective_receiver_rt_var); + + // Follow aliases to get to the underlying type (but NOT through nominal types) + if (comptime builtin.mode == .Debug) { + var alias_count: u32 = 0; + while (resolved_receiver.desc.content == .alias) { + alias_count += 1; + if (alias_count > 1000) break; // Prevent infinite loops in debug builds + const alias = resolved_receiver.desc.content.alias; + const backing = self.runtime_types.getAliasBackingVar(alias); + resolved_receiver = self.runtime_types.resolveVar(backing); + } + } else { + while (resolved_receiver.desc.content == .alias) { + const alias = resolved_receiver.desc.content.alias; + const backing = self.runtime_types.getAliasBackingVar(alias); + resolved_receiver = self.runtime_types.resolveVar(backing); + } + } const method_args = da.method_args.?; const arg_exprs = self.env.store.sliceExpr(method_args); @@ -14280,9 +14317,11 @@ pub const Interpreter = struct { if (lambda_expr == .e_low_level_lambda) { const low_level = lambda_expr.e_low_level_lambda; var args = [1]StackValue{receiver_value}; - // Get return type from the dot access expression for low-level builtins that need it + // Get return type from the dot access expression for low-level builtins that need it. + // Use saved_env (the caller's module) since da.expr_idx is from that module, + // not from self.env which has been switched to the closure's source module. const return_ct_var = can.ModuleEnv.varFrom(da.expr_idx); - const return_rt_var = try self.translateTypeVar(self.env, return_ct_var); + const return_rt_var = try self.translateTypeVar(saved_env, return_ct_var); const result = try self.callLowLevelBuiltin(low_level.op, &args, roc_ops, return_rt_var); // Decref based on ownership semantics @@ -14437,9 +14476,11 @@ pub const Interpreter = struct { all_args[1 + idx] = arg; } - // Get return type from the dot access expression for low-level builtins that need it + // Get return type from the dot access expression for low-level builtins that need it. + // Use saved_env (the caller's module) since dac.expr_idx is from that module, + // not from self.env which has been switched to the closure's source module. const return_ct_var = can.ModuleEnv.varFrom(dac.expr_idx); - const return_rt_var = try self.translateTypeVar(self.env, return_ct_var); + const return_rt_var = try self.translateTypeVar(saved_env, return_ct_var); const result = try self.callLowLevelBuiltin(low_level.op, all_args, roc_ops, return_rt_var); // Decref arguments based on ownership semantics diff --git a/test/fx/sublist_method_segfault.roc b/test/fx/sublist_method_segfault.roc new file mode 100644 index 0000000000..5fb09a06d5 --- /dev/null +++ b/test/fx/sublist_method_segfault.roc @@ -0,0 +1,15 @@ +app [main!] { pf: platform "./platform/main.roc" } + +# Regression test: Calling .sublist() method on a List(U8) from "".to_utf8() +# causes a segfault when the variable doesn't have an explicit type annotation. +# Error was: "Roc crashed: Error evaluating from shared memory: InvalidMethodReceiver" +# The bug was that translateTypeVar was using the wrong module (closure's source module) +# instead of the caller's module when translating the return type. +main! = || { + # Test case 1: Method call without type annotation (original bug) + s = "".to_utf8() + _slice = s.sublist({ start: 0, len: 0 }) + + # Test case 2: Comparing empty list with method result + _ignore = "".to_utf8() == [] +} From 134ab80c7be78ef7b64105b3f06082cad2c2c852 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:14:05 -0500 Subject: [PATCH 45/64] Fix Windows CI --- src/base/stack_overflow.zig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index 2088d44f72..d501d0601a 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -48,13 +48,13 @@ const EXCEPTION_POINTERS = extern struct { ContextRecord: *CONTEXT, }; -const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(std.os.windows.WINAPI) LONG; +const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(.winapi) LONG; // Windows API imports -extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(std.os.windows.WINAPI) LPTOP_LEVEL_EXCEPTION_FILTER; -extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(std.os.windows.WINAPI) HANDLE; -extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(std.os.windows.WINAPI) BOOL; -extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(std.os.windows.WINAPI) noreturn; +extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(.winapi) LPTOP_LEVEL_EXCEPTION_FILTER; +extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; +extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) BOOL; +extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; /// Size of the alternate signal stack (64KB should be plenty for the handler) const ALT_STACK_SIZE = 64 * 1024; @@ -142,7 +142,7 @@ fn installWindows() bool { } /// Windows exception handler function -fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(std.os.windows.WINAPI) LONG { +fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG { const exception_code = exception_info.ExceptionRecord.ExceptionCode; // Check if this is a stack overflow or access violation From bd2c59326a3b26c09fbfe1dc446c620aed379e45 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:15:23 -0500 Subject: [PATCH 46/64] Use std.debug.assert for infinite loop checks --- src/eval/interpreter.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index fb45900132..5f8a7e7005 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -7659,12 +7659,12 @@ pub const Interpreter = struct { // Apply rigid variable substitution if this is a rigid variable // Follow the substitution chain until we reach a non-rigid variable or run out of substitutions - // Use a counter to prevent infinite loops from cyclic substitutions + // In debug builds, use a counter to prevent infinite loops from cyclic substitutions var count: u32 = 0; while (resolved.desc.content == .rigid) { if (self.rigid_subst.get(resolved.var_)) |substituted_var| { count += 1; - if (count > 1000) break; // Prevent infinite loops + std.debug.assert(count < 1000); // Guard against infinite loops in debug builds resolved = self.runtime_types.resolveVar(substituted_var); } else { break; @@ -8453,12 +8453,12 @@ pub const Interpreter = struct { // Check if this variable has a substitution active (for generic function instantiation) const final_var = if (self.rigid_subst.get(out_var)) |substituted| blk: { // Follow the substitution chain to find the final variable - // Use a counter to prevent infinite loops from cyclic substitutions + // In debug builds, use a counter to prevent infinite loops from cyclic substitutions var current = substituted; var count: u32 = 0; while (self.rigid_subst.get(current)) |next_subst| { count += 1; - if (count > 1000) break; // Prevent infinite loops + std.debug.assert(count < 1000); // Guard against infinite loops in debug builds current = next_subst; } break :blk current; From 6bd5e3c20850a84bffd2a4baa6896644e2e841cb Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:17:23 -0500 Subject: [PATCH 47/64] Remove an unnecessary lookahead check --- src/parse/Parser.zig | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/parse/Parser.zig b/src/parse/Parser.zig index 7f97292e94..c099a5e392 100644 --- a/src/parse/Parser.zig +++ b/src/parse/Parser.zig @@ -2082,9 +2082,6 @@ pub fn parseExprWithBp(self: *Parser, min_bp: u8) Error!AST.Expr.Idx { }, } lookahead_pos += 1; - - // Limit lookahead to prevent infinite loops - if (lookahead_pos > saved_pos + 100) break; } } From 1478cf61b662857f0a5b02460ad8920b901406a0 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:19:45 -0500 Subject: [PATCH 48/64] More std.debug.assert infinite loop checks --- src/eval/interpreter.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 5f8a7e7005..32241961b0 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -13635,7 +13635,7 @@ pub const Interpreter = struct { var alias_count: u32 = 0; while (operand_resolved.desc.content == .alias) { alias_count += 1; - if (alias_count > 1000) break; // Prevent infinite loops in debug builds + std.debug.assert(alias_count < 1000); // Prevent infinite loops in debug builds const alias = operand_resolved.desc.content.alias; const backing = self.runtime_types.getAliasBackingVar(alias); operand_resolved = self.runtime_types.resolveVar(backing); @@ -13804,7 +13804,7 @@ pub const Interpreter = struct { var alias_count: u32 = 0; while (current_resolved.desc.content == .alias) { alias_count += 1; - if (alias_count > 1000) break; // Prevent infinite loops + std.debug.assert(alias_count < 1000); // Prevent infinite loops const alias = current_resolved.desc.content.alias; current_var = self.runtime_types.getAliasBackingVar(alias); current_resolved = self.runtime_types.resolveVar(current_var); @@ -14237,7 +14237,7 @@ pub const Interpreter = struct { var alias_count: u32 = 0; while (resolved_receiver.desc.content == .alias) { alias_count += 1; - if (alias_count > 1000) break; // Prevent infinite loops in debug builds + std.debug.assert(alias_count < 1000); // Prevent infinite loops in debug builds const alias = resolved_receiver.desc.content.alias; const backing = self.runtime_types.getAliasBackingVar(alias); resolved_receiver = self.runtime_types.resolveVar(backing); From 1e6f4b99c8cab8dd2d8644476c43e8227ca8e720 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 08:29:39 -0500 Subject: [PATCH 49/64] fix infinite loop --- src/eval/interpreter.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 32241961b0..80c44ee44f 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -10595,6 +10595,8 @@ pub const Interpreter = struct { var subst_iter = subst_map.iterator(); while (subst_iter.next()) |entry| { + // Skip identity mappings to avoid infinite loops when following substitution chains + if (entry.key_ptr.* == entry.value_ptr.*) continue; try self.rigid_subst.put(entry.key_ptr.*, entry.value_ptr.*); // Also add to empty_scope so layout store finds the mapping try scope.put(entry.key_ptr.*, entry.value_ptr.*); @@ -14570,6 +14572,8 @@ pub const Interpreter = struct { saved_rigid_subst = try self.rigid_subst.clone(); var subst_iter = method_subst_map.iterator(); while (subst_iter.next()) |entry| { + // Skip identity mappings to avoid infinite loops when following substitution chains + if (entry.key_ptr.* == entry.value_ptr.*) continue; try self.rigid_subst.put(entry.key_ptr.*, entry.value_ptr.*); } @memset(self.var_to_layout_slot.items, 0); From f08904c4ed46cb40c7c5ffdcc9ff96f6c1f23d32 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 09:16:29 -0500 Subject: [PATCH 50/64] Fix Nix build on CI --- build.zig | 2 + src/cli/test/fx_platform_test.zig | 86 +++++++++++++++---------------- 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/build.zig b/build.zig index 68da021feb..58a90759c7 100644 --- a/build.zig +++ b/build.zig @@ -1650,6 +1650,8 @@ pub fn build(b: *std.Build) void { } // Ensure host library is copied before running the test run_fx_platform_test.step.dependOn(©_test_fx_host.step); + // Ensure roc binary is built before running the test (tests invoke roc CLI) + run_fx_platform_test.step.dependOn(roc_step); tests_summary.addRun(&run_fx_platform_test.step); } diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index efda3ae395..b759b86e42 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -9,23 +9,23 @@ const testing = std.testing; const roc_binary_path = if (builtin.os.tag == .windows) ".\\zig-out\\bin\\roc.exe" else "./zig-out/bin/roc"; -/// Ensures the roc binary is up-to-date by always rebuilding it. -/// This is needed because these tests spawn the roc CLI as a child process, -/// and a stale binary will cause test failures even if the test code is correct. -fn ensureRocBinary(allocator: std.mem.Allocator) !void { - // Always rebuild to ensure the binary is up-to-date with the latest source changes. - // This prevents confusing test failures when the binary exists but is stale. - const build_result = try std.process.Child.run(.{ - .allocator = allocator, - .argv = &[_][]const u8{ "zig", "build", "roc" }, - }); - defer allocator.free(build_result.stdout); - defer allocator.free(build_result.stderr); - - if (build_result.term != .Exited or build_result.term.Exited != 0) { - std.debug.print("Failed to build roc binary:\n{s}\n", .{build_result.stderr}); - return error.RocBuildFailed; - } +/// Asserts that the roc binary exists. +/// The build system (build.zig) is responsible for building the roc binary +/// before running these tests. This avoids rebuilding 50+ times (once per test) +/// which causes disk space issues on CI. +fn ensureRocBinary() !void { + std.fs.cwd().access(roc_binary_path, .{}) catch { + std.debug.print( + \\ + \\ERROR: roc binary not found at {s} + \\ + \\The fx_platform_test requires the roc binary to be pre-built. + \\Run `zig build test` which will build it automatically, or + \\run `zig build roc` manually before running individual tests. + \\ + , .{roc_binary_path}); + return error.RocBinaryNotFound; + }; } /// Options for running roc commands @@ -39,7 +39,7 @@ const RunOptions = struct { /// Runs a roc command and returns the result. /// Automatically adds --no-cache for non-test/non-check commands to ensure fresh builds. fn runRoc(allocator: std.mem.Allocator, roc_file: []const u8, options: RunOptions) !std.process.Child.RunResult { - try ensureRocBinary(allocator); + try ensureRocBinary(); var args = std.ArrayList([]const u8){}; defer args.deinit(allocator); @@ -113,7 +113,7 @@ fn checkFailure(result: std.process.Child.RunResult) !void { } fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_input: []const u8) !std.process.Child.RunResult { - try ensureRocBinary(allocator); + try ensureRocBinary(); var child = std.process.Child.init(&[_][]const u8{ "./zig-out/bin/roc", roc_file }, allocator); child.stdin_behavior = .Pipe; child.stdout_behavior = .Pipe; @@ -152,7 +152,7 @@ fn runRocWithStdin(allocator: std.mem.Allocator, roc_file: []const u8, stdin_inp test "fx platform effectful functions" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Run the app directly with the roc CLI (not build, just run) const run_result = try std.process.Child.run(.{ @@ -204,7 +204,7 @@ test "fx platform effectful functions" { test "fx platform with dotdot starting path" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Run the app from a subdirectory that uses ../ at the START of its platform path // This tests that relative paths starting with .. are handled correctly @@ -401,7 +401,7 @@ test "fx platform dbg missing return value" { test "fx platform check unused state var reports correct errors" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Run `roc check` on an app with unused variables and type annotations // This test checks that the compiler reports the correct errors and doesn't @@ -541,7 +541,7 @@ test "fx platform opaque type with method" { test "fx platform string interpolation type mismatch" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Run an app that tries to interpolate a U8 (non-Str) type in a string. // This should fail with a type error because string interpolation only accepts Str. @@ -587,7 +587,7 @@ test "fx platform run from different cwd" { // running from a subdirectory correctly. const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Get absolute path to roc binary since we'll change cwd const roc_abs_path = try std.fs.cwd().realpathAlloc(allocator, roc_binary_path); @@ -828,7 +828,7 @@ test "fx platform str_interp_valid" { test "fx platform expect with toplevel numeric" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); // Run the app const run_result = try std.process.Child.run(.{ @@ -894,7 +894,7 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test7" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); +// try ensureRocBinary(); // const run_result = try std.process.Child.run(.{ // .allocator = allocator, @@ -930,7 +930,7 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test8" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); +// try ensureRocBinary(); // const run_result = try std.process.Child.run(.{ // .allocator = allocator, @@ -966,7 +966,7 @@ test "fx platform expect with toplevel numeric" { // test "fx platform test9" { // const allocator = testing.allocator; -// try ensureRocBinary(allocator); +// try ensureRocBinary(); // const run_result = try std.process.Child.run(.{ // .allocator = allocator, @@ -1001,7 +1001,7 @@ test "fx platform expect with toplevel numeric" { test "fx platform numeric_lookup_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1036,7 +1036,7 @@ test "fx platform numeric_lookup_test" { test "fx platform string_lookup_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1071,7 +1071,7 @@ test "fx platform string_lookup_test" { test "fx platform test_direct_string" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1106,7 +1106,7 @@ test "fx platform test_direct_string" { test "fx platform test_one_call" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1141,7 +1141,7 @@ test "fx platform test_one_call" { test "fx platform test_type_mismatch" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1177,7 +1177,7 @@ test "fx platform test_type_mismatch" { test "fx platform test_with_wrapper" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1212,7 +1212,7 @@ test "fx platform test_with_wrapper" { test "fx platform inspect_compare_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1249,7 +1249,7 @@ test "fx platform inspect_compare_test" { test "fx platform inspect_custom_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1285,7 +1285,7 @@ test "fx platform inspect_custom_test" { test "fx platform inspect_nested_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1321,7 +1321,7 @@ test "fx platform inspect_nested_test" { test "fx platform inspect_no_method_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1357,7 +1357,7 @@ test "fx platform inspect_no_method_test" { test "fx platform inspect_record_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1392,7 +1392,7 @@ test "fx platform inspect_record_test" { test "fx platform inspect_wrong_sig_test" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1427,7 +1427,7 @@ test "fx platform inspect_wrong_sig_test" { test "fx platform issue8433" { const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1463,7 +1463,7 @@ test "run aborts on errors by default" { // Tests that roc run aborts when there are type errors (without --allow-errors) const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, @@ -1486,7 +1486,7 @@ test "run with --allow-errors attempts execution despite errors" { // Tests that roc run --allow-errors attempts to execute even with type errors const allocator = testing.allocator; - try ensureRocBinary(allocator); + try ensureRocBinary(); const run_result = try std.process.Child.run(.{ .allocator = allocator, From 278656943f0ee96900efa7890690fbbb90b03741 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 09:39:31 -0500 Subject: [PATCH 51/64] Test for stack overflows in roc programs --- src/cli/test/fx_platform_test.zig | 51 ++++++++++++++++++++++++++++++ test/fx/stack_overflow_runtime.roc | 16 ++++++++++ 2 files changed, 67 insertions(+) create mode 100644 test/fx/stack_overflow_runtime.roc diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index b759b86e42..fd2ff08ed5 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1582,3 +1582,54 @@ test "fx platform sublist method on inferred type" { try checkSuccess(run_result); } + +test "fx platform runtime stack overflow" { + // Tests that stack overflow in a running Roc program is caught and reported + // with a helpful error message instead of crashing with a raw signal. + // + // The Roc program contains an infinitely recursive function that will + // overflow the stack at runtime. Once proper stack overflow handling is + // implemented in the host/platform, this test will pass. + const allocator = testing.allocator; + + const run_result = try runRoc(allocator, "test/fx/stack_overflow_runtime.roc", .{}); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + // After stack overflow handling is implemented, we expect: + // 1. The process exits with code 134 (indicating stack overflow was caught) + // 2. Stderr contains a helpful "STACK OVERFLOW" message + switch (run_result.term) { + .Exited => |code| { + if (code == 134) { + // Stack overflow was caught and handled properly + // Verify the helpful error message was printed + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "STACK OVERFLOW") != null); + } else if (code == 139) { + // Exit code 139 = 128 + 11 (SIGSEGV) - stack overflow was NOT handled + // The Roc program crashed with a segfault that wasn't caught + std.debug.print("\n", .{}); + std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{}); + std.debug.print("Process crashed with SIGSEGV (exit code 139).\n", .{}); + std.debug.print("Expected: exit code 134 with STACK OVERFLOW message\n", .{}); + return error.StackOverflowNotHandled; + } else { + std.debug.print("Unexpected exit code: {}\n", .{code}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.UnexpectedExitCode; + } + }, + .Signal => |sig| { + // Process was killed directly by a signal (likely SIGSEGV = 11). + std.debug.print("\n", .{}); + std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{}); + std.debug.print("Process was killed by signal: {}\n", .{sig}); + std.debug.print("Expected: exit code 134 with STACK OVERFLOW message\n", .{}); + return error.StackOverflowNotHandled; + }, + else => { + std.debug.print("Unexpected termination: {}\n", .{run_result.term}); + return error.UnexpectedTermination; + }, + } +} diff --git a/test/fx/stack_overflow_runtime.roc b/test/fx/stack_overflow_runtime.roc new file mode 100644 index 0000000000..bc72921a3a --- /dev/null +++ b/test/fx/stack_overflow_runtime.roc @@ -0,0 +1,16 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +# This function causes infinite recursion, leading to stack overflow at runtime. +# It cannot be tail-call optimized because there's work after the recursive call. +overflow : I64 -> I64 +overflow = |n| + # Prevent tail-call optimization by adding to the result after recursion + overflow(n + 1) + 1 + +main! = || { + # This will overflow the stack at runtime + result = overflow(0) + Stdout.line!("Result: ${I64.to_str(result)}") +} From 7faec88e29c16515874ce9b05db2be88edf4d232 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 10:00:37 -0500 Subject: [PATCH 52/64] Add stack overflow handling for roc programs --- src/base/stack_overflow.zig | 338 ++++++++---------------------------- src/build/modules.zig | 2 +- src/builtins/handlers.zig | 300 ++++++++++++++++++++++++++++++++ src/builtins/mod.zig | 2 + test/fx/platform/host.zig | 86 +++++++++ 5 files changed, 462 insertions(+), 266 deletions(-) create mode 100644 src/builtins/handlers.zig diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index d501d0601a..1650196d4b 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -1,8 +1,7 @@ //! Stack overflow detection and handling for the Roc compiler. //! -//! This module provides a mechanism to catch stack overflows and report them -//! with a helpful error message instead of a generic segfault. This is particularly -//! useful during compiler development when recursive algorithms might blow the stack. +//! This module provides a thin wrapper around the generic signal handlers in +//! builtins.handlers, configured with compiler-specific error messages. //! //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate //! signal stack and install a SIGSEGV handler that detects stack overflows. @@ -13,58 +12,9 @@ const std = @import("std"); const builtin = @import("builtin"); +const handlers = @import("builtins").handlers; const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; -// Windows types and constants -const DWORD = u32; -const LONG = i32; -const ULONG_PTR = usize; -const PVOID = ?*anyopaque; -const HANDLE = ?*anyopaque; -const BOOL = i32; - -const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD; -const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005; -const EXCEPTION_CONTINUE_SEARCH: LONG = 0; -const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); -const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize)); - -const EXCEPTION_RECORD = extern struct { - ExceptionCode: DWORD, - ExceptionFlags: DWORD, - ExceptionRecord: ?*EXCEPTION_RECORD, - ExceptionAddress: PVOID, - NumberParameters: DWORD, - ExceptionInformation: [15]ULONG_PTR, -}; - -const CONTEXT = extern struct { - // We don't need the full context, just enough to make the struct valid - data: [1232]u8, // Size varies by arch, this is x64 size -}; - -const EXCEPTION_POINTERS = extern struct { - ExceptionRecord: *EXCEPTION_RECORD, - ContextRecord: *CONTEXT, -}; - -const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(.winapi) LONG; - -// Windows API imports -extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(.winapi) LPTOP_LEVEL_EXCEPTION_FILTER; -extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; -extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) BOOL; -extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; - -/// Size of the alternate signal stack (64KB should be plenty for the handler) -const ALT_STACK_SIZE = 64 * 1024; - -/// Storage for the alternate signal stack (POSIX only) -var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined; - -/// Whether the handler has been installed -var handler_installed = false; - /// Error message to display on stack overflow const STACK_OVERFLOW_MESSAGE = \\ @@ -89,219 +39,77 @@ const STACK_OVERFLOW_MESSAGE = \\ ; +/// Callback for stack overflow in the compiler +fn handleStackOverflow() noreturn { + if (comptime builtin.os.tag == .windows) { + // Windows: use WriteFile for signal-safe output + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); + kernel32.ExitProcess(134); + } else if (comptime builtin.os.tag != .wasi) { + // POSIX: use direct write syscall for signal-safety + _ = posix.write(posix.STDERR_FILENO, STACK_OVERFLOW_MESSAGE) catch {}; + posix.exit(134); + } else { + // WASI fallback + std.process.exit(134); + } +} + +/// Callback for access violation in the compiler +fn handleAccessViolation(fault_addr: usize) noreturn { + if (comptime builtin.os.tag == .windows) { + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + var addr_buf: [18]u8 = undefined; + const addr_str = handlers.formatHex(fault_addr, &addr_buf); + + const msg1 = "\nAccess violation in the Roc compiler.\nFault address: "; + const msg2 = "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n"; + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, msg1.ptr, msg1.len, &bytes_written, null); + _ = kernel32.WriteFile(stderr_handle, addr_str.ptr, @intCast(addr_str.len), &bytes_written, null); + _ = kernel32.WriteFile(stderr_handle, msg2.ptr, msg2.len, &bytes_written, null); + kernel32.ExitProcess(139); + } else { + // POSIX (and WASI fallback): use direct write syscall for signal-safety + const generic_msg = "\nSegmentation fault (SIGSEGV) in the Roc compiler.\nFault address: "; + _ = posix.write(posix.STDERR_FILENO, generic_msg) catch {}; + + // Write the fault address as hex + var addr_buf: [18]u8 = undefined; + const addr_str = handlers.formatHex(fault_addr, &addr_buf); + _ = posix.write(posix.STDERR_FILENO, addr_str) catch {}; + _ = posix.write(posix.STDERR_FILENO, "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n") catch {}; + posix.exit(139); + } +} + /// Install the stack overflow handler. /// This should be called early in main() before any significant work is done. /// Returns true if the handler was installed successfully, false otherwise. pub fn install() bool { - if (handler_installed) return true; - - if (comptime builtin.os.tag == .windows) { - return installWindows(); - } - - if (comptime builtin.os.tag == .wasi) { - // WASI doesn't support signal handling - return false; - } - - return installPosix(); -} - -fn installPosix() bool { - // Set up the alternate signal stack - var alt_stack = posix.stack_t{ - .sp = &alt_stack_storage, - .flags = 0, - .size = ALT_STACK_SIZE, - }; - - posix.sigaltstack(&alt_stack, null) catch { - return false; - }; - - // Install the SIGSEGV handler - const action = posix.Sigaction{ - .handler = .{ .sigaction = handleSignalPosix }, - .mask = posix.sigemptyset(), - .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, - }; - - posix.sigaction(posix.SIG.SEGV, &action, null); - - // Also catch SIGBUS which can occur on some systems for stack overflow - posix.sigaction(posix.SIG.BUS, &action, null); - - handler_installed = true; - return true; -} - -fn installWindows() bool { - _ = SetUnhandledExceptionFilter(handleExceptionWindows); - handler_installed = true; - return true; -} - -/// Windows exception handler function -fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG { - const exception_code = exception_info.ExceptionRecord.ExceptionCode; - - // Check if this is a stack overflow or access violation - const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW); - const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION); - - if (!is_stack_overflow and !is_access_violation) { - // Let other handlers deal with this exception - return EXCEPTION_CONTINUE_SEARCH; - } - - // Write error message to stderr - const stderr_handle = GetStdHandle(STD_ERROR_HANDLE); - if (stderr_handle != INVALID_HANDLE_VALUE and stderr_handle != null) { - var bytes_written: DWORD = 0; - if (is_stack_overflow) { - _ = WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); - } else { - const msg = "\nAccess violation in the Roc compiler.\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n"; - _ = WriteFile(stderr_handle, msg.ptr, msg.len, &bytes_written, null); - } - } - - // Exit with appropriate code - const exit_code: c_uint = if (is_stack_overflow) 134 else 139; - ExitProcess(exit_code); -} - -/// The POSIX signal handler function -fn handleSignalPosix(sig: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { - // Get the fault address - access differs by platform - const fault_addr: usize = getFaultAddress(info); - - // Get the current stack pointer to help determine if this is a stack overflow - var current_sp: usize = 0; - asm volatile ("" - : [sp] "={sp}" (current_sp), - ); - - // A stack overflow typically occurs when the fault address is near the stack pointer - // or below the stack (stacks grow downward on most architectures) - const likely_stack_overflow = isLikelyStackOverflow(fault_addr, current_sp); - - // Write our error message to stderr (use STDERR_FILENO directly for signal safety) - const stderr_fd = posix.STDERR_FILENO; - - if (likely_stack_overflow) { - _ = posix.write(stderr_fd, STACK_OVERFLOW_MESSAGE) catch {}; - } else { - // Generic segfault - provide some context - const generic_msg = switch (sig) { - posix.SIG.SEGV => "\nSegmentation fault (SIGSEGV) in the Roc compiler.\nFault address: ", - posix.SIG.BUS => "\nBus error (SIGBUS) in the Roc compiler.\nFault address: ", - else => "\nFatal signal in the Roc compiler.\nFault address: ", - }; - _ = posix.write(stderr_fd, generic_msg) catch {}; - - // Write the fault address as hex - var addr_buf: [18]u8 = undefined; - const addr_str = formatHex(fault_addr, &addr_buf); - _ = posix.write(stderr_fd, addr_str) catch {}; - _ = posix.write(stderr_fd, "\n\nPlease report this issue at: https://github.com/roc-lang/roc/issues\n\n") catch {}; - } - - // Exit with a distinct error code for stack overflow - if (likely_stack_overflow) { - posix.exit(134); // 128 + 6 (SIGABRT-like) - } else { - posix.exit(139); // 128 + 11 (SIGSEGV) - } -} - -/// Get the fault address from siginfo_t (platform-specific) -fn getFaultAddress(info: *const posix.siginfo_t) usize { - // The siginfo_t structure varies by platform - if (comptime builtin.os.tag == .linux) { - // Linux: fault address is in fields.sigfault.addr - return @intFromPtr(info.fields.sigfault.addr); - } else if (comptime builtin.os.tag == .macos or - builtin.os.tag == .ios or - builtin.os.tag == .tvos or - builtin.os.tag == .watchos or - builtin.os.tag == .visionos or - builtin.os.tag == .freebsd or - builtin.os.tag == .dragonfly or - builtin.os.tag == .netbsd or - builtin.os.tag == .openbsd) - { - // macOS/iOS/BSD: fault address is in addr field - return @intFromPtr(info.addr); - } else { - // Fallback: return 0 if we can't determine the address - return 0; - } -} - -/// Heuristic to determine if a fault is likely a stack overflow -fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool { - // If fault address is 0 or very low, it's likely a null pointer dereference - if (fault_addr < 4096) return false; - - // Stack overflows typically fault near the stack guard page - // The fault address will be close to (but below) the current stack pointer - // We use a generous range since the stack pointer in the signal handler - // is on the alternate stack - - // On most systems, the main stack is in high memory and grows down - // A stack overflow fault will be at an address lower than the normal stack - - // Check if fault address is within a reasonable range of where stack would be - // This is a heuristic - we check if the fault is in the lower part of address space - // where guard pages typically are - - const max_addr = std.math.maxInt(usize); - const high_memory_threshold = max_addr - (16 * 1024 * 1024 * 1024); // 16GB from top - - // If the fault is in the high memory region (where stacks live) but at a page boundary - // it's likely a stack guard page hit - if (fault_addr > high_memory_threshold) { - // Check if it's at a page boundary (guard pages are typically page-aligned) - const page_size = std.heap.page_size_min; - const page_aligned = (fault_addr & (page_size - 1)) == 0 or (fault_addr & (page_size - 1)) < 64; - if (page_aligned) return true; - } - - // Also check if the fault address is suspiciously close to the current SP - // This catches cases where we're still on the main stack when the overflow happens - const sp_distance = if (fault_addr < current_sp) current_sp - fault_addr else fault_addr - current_sp; - if (sp_distance < 1024 * 1024) { // Within 1MB of stack pointer - return true; - } - - return false; -} - -/// Format a usize as hexadecimal -fn formatHex(value: usize, buf: []u8) []const u8 { - const hex_chars = "0123456789abcdef"; - var i: usize = buf.len; - - if (value == 0) { - i -= 1; - buf[i] = '0'; - } else { - var v = value; - while (v > 0 and i > 2) { - i -= 1; - buf[i] = hex_chars[v & 0xf]; - v >>= 4; - } - } - - // Add 0x prefix - i -= 1; - buf[i] = 'x'; - i -= 1; - buf[i] = '0'; - - return buf[i..]; + return handlers.install(handleStackOverflow, handleAccessViolation); } /// Test function that intentionally causes a stack overflow. @@ -330,13 +138,13 @@ pub fn triggerStackOverflowForTest() noreturn { test "formatHex" { var buf: [18]u8 = undefined; - const zero = formatHex(0, &buf); + const zero = handlers.formatHex(0, &buf); try std.testing.expectEqualStrings("0x0", zero); - const small = formatHex(0xff, &buf); + const small = handlers.formatHex(0xff, &buf); try std.testing.expectEqualStrings("0xff", small); - const medium = formatHex(0xdeadbeef, &buf); + const medium = handlers.formatHex(0xdeadbeef, &buf); try std.testing.expectEqualStrings("0xdeadbeef", medium); } diff --git a/src/build/modules.zig b/src/build/modules.zig index 3e621ab6af..1fe8d4a854 100644 --- a/src/build/modules.zig +++ b/src/build/modules.zig @@ -307,7 +307,7 @@ pub const ModuleType = enum { .fs => &.{}, .tracy => &.{ .build_options, .builtins }, .collections => &.{}, - .base => &.{.collections}, + .base => &.{ .collections, .builtins }, .roc_src => &.{}, .types => &.{ .base, .collections }, .reporting => &.{ .collections, .base }, diff --git a/src/builtins/handlers.zig b/src/builtins/handlers.zig new file mode 100644 index 0000000000..705964ac98 --- /dev/null +++ b/src/builtins/handlers.zig @@ -0,0 +1,300 @@ +//! Generic signal handlers for stack overflow and access violation detection. +//! +//! This module provides a mechanism to catch stack overflows and access violations +//! and handle them with custom callbacks instead of crashing with a raw signal. +//! +//! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate +//! signal stack and install a SIGSEGV handler that detects stack overflows. +//! +//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. +//! +//! WASI is not currently supported (no signal handling available). + +const std = @import("std"); +const builtin = @import("builtin"); +const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; + +// Windows types and constants +const DWORD = u32; +const LONG = i32; +const ULONG_PTR = usize; +const PVOID = ?*anyopaque; +const HANDLE = ?*anyopaque; +const BOOL = i32; + +const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD; +const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005; +const EXCEPTION_CONTINUE_SEARCH: LONG = 0; +const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); +const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize)); + +const EXCEPTION_RECORD = extern struct { + ExceptionCode: DWORD, + ExceptionFlags: DWORD, + ExceptionRecord: ?*EXCEPTION_RECORD, + ExceptionAddress: PVOID, + NumberParameters: DWORD, + ExceptionInformation: [15]ULONG_PTR, +}; + +const CONTEXT = extern struct { + // We don't need the full context, just enough to make the struct valid + data: [1232]u8, // Size varies by arch, this is x64 size +}; + +const EXCEPTION_POINTERS = extern struct { + ExceptionRecord: *EXCEPTION_RECORD, + ContextRecord: *CONTEXT, +}; + +const LPTOP_LEVEL_EXCEPTION_FILTER = ?*const fn (*EXCEPTION_POINTERS) callconv(.winapi) LONG; + +// Windows API imports +extern "kernel32" fn SetUnhandledExceptionFilter(lpTopLevelExceptionFilter: LPTOP_LEVEL_EXCEPTION_FILTER) callconv(.winapi) LPTOP_LEVEL_EXCEPTION_FILTER; +extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; +extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) BOOL; +extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + +/// Size of the alternate signal stack (64KB should be plenty for the handler) +const ALT_STACK_SIZE = 64 * 1024; + +/// Storage for the alternate signal stack (POSIX only) +var alt_stack_storage: [ALT_STACK_SIZE]u8 align(16) = undefined; + +/// Whether the handler has been installed +var handler_installed = false; + +/// Callback function type for handling stack overflow +pub const StackOverflowCallback = *const fn () noreturn; + +/// Callback function type for handling access violation/segfault +pub const AccessViolationCallback = *const fn (fault_addr: usize) noreturn; + +/// Stored callbacks (set during install) +var stack_overflow_callback: ?StackOverflowCallback = null; +var access_violation_callback: ?AccessViolationCallback = null; + +/// Install signal handlers with custom callbacks. +/// +/// Parameters: +/// - on_stack_overflow: Called when a stack overflow is detected. Must not return. +/// - on_access_violation: Called for other memory access violations (segfaults). +/// Receives the fault address. Must not return. +/// +/// Returns true if the handlers were installed successfully, false otherwise. +pub fn install(on_stack_overflow: StackOverflowCallback, on_access_violation: AccessViolationCallback) bool { + if (handler_installed) return true; + + stack_overflow_callback = on_stack_overflow; + access_violation_callback = on_access_violation; + + if (comptime builtin.os.tag == .windows) { + return installWindows(); + } + + if (comptime builtin.os.tag == .wasi) { + // WASI doesn't support signal handling + return false; + } + + return installPosix(); +} + +fn installPosix() bool { + // Set up the alternate signal stack + var alt_stack = posix.stack_t{ + .sp = &alt_stack_storage, + .flags = 0, + .size = ALT_STACK_SIZE, + }; + + posix.sigaltstack(&alt_stack, null) catch { + return false; + }; + + // Install the SIGSEGV handler + const action = posix.Sigaction{ + .handler = .{ .sigaction = handleSignalPosix }, + .mask = posix.sigemptyset(), + .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, + }; + + posix.sigaction(posix.SIG.SEGV, &action, null); + + // Also catch SIGBUS which can occur on some systems for stack overflow + posix.sigaction(posix.SIG.BUS, &action, null); + + handler_installed = true; + return true; +} + +fn installWindows() bool { + _ = SetUnhandledExceptionFilter(handleExceptionWindows); + handler_installed = true; + return true; +} + +/// Windows exception handler function +fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG { + const exception_code = exception_info.ExceptionRecord.ExceptionCode; + + // Check if this is a stack overflow or access violation + const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW); + const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION); + + if (!is_stack_overflow and !is_access_violation) { + // Let other handlers deal with this exception + return EXCEPTION_CONTINUE_SEARCH; + } + + if (is_stack_overflow) { + if (stack_overflow_callback) |callback| { + callback(); + } + } else { + if (access_violation_callback) |callback| { + // Get fault address from ExceptionInformation[1] for access violations + const fault_addr = exception_info.ExceptionRecord.ExceptionInformation[1]; + callback(fault_addr); + } + } + + // If no callback was set, exit with appropriate code + const exit_code: c_uint = if (is_stack_overflow) 134 else 139; + ExitProcess(exit_code); +} + +/// The POSIX signal handler function +fn handleSignalPosix(_: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { + // Get the fault address - access differs by platform + const fault_addr: usize = getFaultAddress(info); + + // Get the current stack pointer to help determine if this is a stack overflow + var current_sp: usize = 0; + asm volatile ("" + : [sp] "={sp}" (current_sp), + ); + + // A stack overflow typically occurs when the fault address is near the stack pointer + // or below the stack (stacks grow downward on most architectures) + const likely_stack_overflow = isLikelyStackOverflow(fault_addr, current_sp); + + if (likely_stack_overflow) { + if (stack_overflow_callback) |callback| { + callback(); + } + } else { + if (access_violation_callback) |callback| { + callback(fault_addr); + } + } + + // If no callback was set, exit with appropriate code + if (likely_stack_overflow) { + posix.exit(134); // 128 + 6 (SIGABRT-like) + } else { + posix.exit(139); // 128 + 11 (SIGSEGV) + } +} + +/// Get the fault address from siginfo_t (platform-specific) +fn getFaultAddress(info: *const posix.siginfo_t) usize { + // The siginfo_t structure varies by platform + if (comptime builtin.os.tag == .linux) { + // Linux: fault address is in fields.sigfault.addr + return @intFromPtr(info.fields.sigfault.addr); + } else if (comptime builtin.os.tag == .macos or + builtin.os.tag == .ios or + builtin.os.tag == .tvos or + builtin.os.tag == .watchos or + builtin.os.tag == .visionos or + builtin.os.tag == .freebsd or + builtin.os.tag == .dragonfly or + builtin.os.tag == .netbsd or + builtin.os.tag == .openbsd) + { + // macOS/iOS/BSD: fault address is in addr field + return @intFromPtr(info.addr); + } else { + // Fallback: return 0 if we can't determine the address + return 0; + } +} + +/// Heuristic to determine if a fault is likely a stack overflow +fn isLikelyStackOverflow(fault_addr: usize, current_sp: usize) bool { + // If fault address is 0 or very low, it's likely a null pointer dereference + if (fault_addr < 4096) return false; + + // If the fault address is close to the current stack pointer (within 16MB), + // it's very likely a stack overflow. The signal handler runs on an alternate + // stack, but the fault address should still be near where the stack was. + const sp_distance = if (fault_addr < current_sp) current_sp - fault_addr else fault_addr - current_sp; + if (sp_distance < 16 * 1024 * 1024) { // Within 16MB of stack pointer + return true; + } + + // On 64-bit systems, stacks are typically placed in high memory. + // On macOS, the stack is around 0x16XXXXXXXX (about 6GB mark). + // On Linux, it's typically near 0x7FFFFFFFFFFF. + // If the fault address is in the upper half of the address space, + // it's more likely to be a stack-related issue. + if (comptime @sizeOf(usize) == 8) { + // 64-bit: check if address is in upper portion of address space + // On macOS, stacks start around 0x100000000 (4GB) and go up + // On Linux, stacks are near 0x7FFFFFFFFFFF + const lower_bound: usize = 0x100000000; // 4GB + if (fault_addr > lower_bound) { + // This is in the region where stacks typically are on 64-bit systems + // Default to assuming it's a stack overflow for addresses in this range + return true; + } + } else { + // 32-bit: stacks are typically in the upper portion of the 4GB space + const lower_bound: usize = 0x40000000; // 1GB + if (fault_addr > lower_bound) { + return true; + } + } + + return false; +} + +/// Format a usize as hexadecimal (for use in callbacks) +pub fn formatHex(value: usize, buf: []u8) []const u8 { + const hex_chars = "0123456789abcdef"; + var i: usize = buf.len; + + if (value == 0) { + i -= 1; + buf[i] = '0'; + } else { + var v = value; + while (v > 0 and i > 2) { + i -= 1; + buf[i] = hex_chars[v & 0xf]; + v >>= 4; + } + } + + // Add 0x prefix + i -= 1; + buf[i] = 'x'; + i -= 1; + buf[i] = '0'; + + return buf[i..]; +} + +test "formatHex" { + var buf: [18]u8 = undefined; + + const zero = formatHex(0, &buf); + try std.testing.expectEqualStrings("0x0", zero); + + const small = formatHex(0xff, &buf); + try std.testing.expectEqualStrings("0xff", small); + + const medium = formatHex(0xdeadbeef, &buf); + try std.testing.expectEqualStrings("0xdeadbeef", medium); +} diff --git a/src/builtins/mod.zig b/src/builtins/mod.zig index a670440110..e2c08edaa7 100644 --- a/src/builtins/mod.zig +++ b/src/builtins/mod.zig @@ -3,6 +3,7 @@ const std = @import("std"); pub const host_abi = @import("host_abi.zig"); pub const dec = @import("dec.zig"); +pub const handlers = @import("handlers.zig"); pub const hash = @import("hash.zig"); pub const list = @import("list.zig"); pub const num = @import("num.zig"); @@ -12,6 +13,7 @@ pub const utils = @import("utils.zig"); test "builtins tests" { std.testing.refAllDecls(@import("dec.zig")); + std.testing.refAllDecls(@import("handlers.zig")); std.testing.refAllDecls(@import("hash.zig")); std.testing.refAllDecls(@import("host_abi.zig")); std.testing.refAllDecls(@import("list.zig")); diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index 2b3b30bf95..b0352f7c58 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -1,10 +1,92 @@ ///! Platform host that tests effectful functions writing to stdout and stderr. const std = @import("std"); +const builtin = @import("builtin"); const builtins = @import("builtins"); const build_options = @import("build_options"); +const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; const trace_refcount = build_options.trace_refcount; +/// Error message to display on stack overflow in a Roc program +const STACK_OVERFLOW_MESSAGE = + \\ + \\================================================================================ + \\STACK OVERFLOW in this Roc program + \\================================================================================ + \\ + \\This Roc program ran out of stack space. This can happen with: + \\ - Infinite recursion (a function that calls itself without stopping) + \\ - Very deeply nested function calls + \\ + \\Check your code for functions that might recurse infinitely. + \\ + \\================================================================================ + \\ + \\ +; + +/// Callback for stack overflow in a Roc program +fn handleRocStackOverflow() noreturn { + if (comptime builtin.os.tag == .windows) { + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, STACK_OVERFLOW_MESSAGE.ptr, STACK_OVERFLOW_MESSAGE.len, &bytes_written, null); + kernel32.ExitProcess(134); + } else if (comptime builtin.os.tag != .wasi) { + _ = posix.write(posix.STDERR_FILENO, STACK_OVERFLOW_MESSAGE) catch {}; + posix.exit(134); + } else { + std.process.exit(134); + } +} + +/// Callback for access violation in a Roc program +fn handleRocAccessViolation(fault_addr: usize) noreturn { + if (comptime builtin.os.tag == .windows) { + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + var addr_buf: [18]u8 = undefined; + const addr_str = builtins.handlers.formatHex(fault_addr, &addr_buf); + + const msg1 = "\nSegmentation fault (SIGSEGV) in this Roc program.\nFault address: "; + const msg2 = "\n\n"; + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, msg1.ptr, msg1.len, &bytes_written, null); + _ = kernel32.WriteFile(stderr_handle, addr_str.ptr, @intCast(addr_str.len), &bytes_written, null); + _ = kernel32.WriteFile(stderr_handle, msg2.ptr, msg2.len, &bytes_written, null); + kernel32.ExitProcess(139); + } else { + // POSIX (and WASI fallback) + const msg = "\nSegmentation fault (SIGSEGV) in this Roc program.\nFault address: "; + _ = posix.write(posix.STDERR_FILENO, msg) catch {}; + + var addr_buf: [18]u8 = undefined; + const addr_str = builtins.handlers.formatHex(fault_addr, &addr_buf); + _ = posix.write(posix.STDERR_FILENO, addr_str) catch {}; + _ = posix.write(posix.STDERR_FILENO, "\n\n") catch {}; + posix.exit(139); + } +} + /// Host environment - contains GeneralPurposeAllocator for leak detection const HostEnv = struct { gpa: std.heap.GeneralPurposeAllocator(.{}), @@ -264,6 +346,10 @@ const hosted_function_ptrs = [_]builtins.host_abi.HostedFn{ /// Platform host entrypoint fn platform_main() !void { + // Install signal handlers for stack overflow and access violations + // This allows us to display helpful error messages instead of crashing + _ = builtins.handlers.install(handleRocStackOverflow, handleRocAccessViolation); + var host_env = HostEnv{ .gpa = std.heap.GeneralPurposeAllocator(.{}){}, }; From e3e9b2b135373bfd8d64e435a0fc7cb3517bd8b6 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 10:15:30 -0500 Subject: [PATCH 53/64] Add div by 0 checks --- src/base/stack_overflow.zig | 56 ++++++++++++++++++++--- src/builtins/handlers.zig | 76 +++++++++++++++++++++++-------- src/cli/test/fx_platform_test.zig | 43 +++++++++++++++++ test/fx/division_by_zero.roc | 14 ++++++ test/fx/platform/host.zig | 45 +++++++++++++++++- 5 files changed, 207 insertions(+), 27 deletions(-) create mode 100644 test/fx/division_by_zero.roc diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index 1650196d4b..a0a55e0f39 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -1,12 +1,12 @@ -//! Stack overflow detection and handling for the Roc compiler. +//! Signal handling for the Roc compiler (stack overflow, segfault, division by zero). //! //! This module provides a thin wrapper around the generic signal handlers in //! builtins.handlers, configured with compiler-specific error messages. //! //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate -//! signal stack and install a SIGSEGV handler that detects stack overflows. +//! signal stack and install handlers for SIGSEGV, SIGBUS, and SIGFPE. //! -//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. +//! On Windows, we use SetUnhandledExceptionFilter to catch various exceptions. //! //! WASI is not currently supported (no signal handling available). @@ -67,6 +67,50 @@ fn handleStackOverflow() noreturn { } } +/// Error message to display on arithmetic error (division by zero, etc.) +const ARITHMETIC_ERROR_MESSAGE = + \\ + \\================================================================================ + \\ARITHMETIC ERROR in the Roc compiler + \\================================================================================ + \\ + \\The Roc compiler encountered an arithmetic error (likely division by zero). + \\This is a bug in the compiler, not in your code. + \\ + \\Please report this issue at: https://github.com/roc-lang/roc/issues + \\ + \\Include the Roc code that triggered this error if possible. + \\ + \\================================================================================ + \\ + \\ +; + +/// Callback for arithmetic errors (division by zero) in the compiler +fn handleArithmeticError() noreturn { + if (comptime builtin.os.tag == .windows) { + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, ARITHMETIC_ERROR_MESSAGE.ptr, ARITHMETIC_ERROR_MESSAGE.len, &bytes_written, null); + kernel32.ExitProcess(136); + } else if (comptime builtin.os.tag != .wasi) { + _ = posix.write(posix.STDERR_FILENO, ARITHMETIC_ERROR_MESSAGE) catch {}; + posix.exit(136); // 128 + 8 (SIGFPE) + } else { + std.process.exit(136); + } +} + /// Callback for access violation in the compiler fn handleAccessViolation(fault_addr: usize) noreturn { if (comptime builtin.os.tag == .windows) { @@ -105,11 +149,11 @@ fn handleAccessViolation(fault_addr: usize) noreturn { } } -/// Install the stack overflow handler. +/// Install signal handlers for stack overflow, segfault, and division by zero. /// This should be called early in main() before any significant work is done. -/// Returns true if the handler was installed successfully, false otherwise. +/// Returns true if the handlers were installed successfully, false otherwise. pub fn install() bool { - return handlers.install(handleStackOverflow, handleAccessViolation); + return handlers.install(handleStackOverflow, handleAccessViolation, handleArithmeticError); } /// Test function that intentionally causes a stack overflow. diff --git a/src/builtins/handlers.zig b/src/builtins/handlers.zig index 705964ac98..41a29a7eb6 100644 --- a/src/builtins/handlers.zig +++ b/src/builtins/handlers.zig @@ -1,12 +1,13 @@ -//! Generic signal handlers for stack overflow and access violation detection. +//! Generic signal handlers for stack overflow, access violation, and arithmetic errors. //! -//! This module provides a mechanism to catch stack overflows and access violations -//! and handle them with custom callbacks instead of crashing with a raw signal. +//! This module provides a mechanism to catch runtime errors like stack overflows, +//! access violations, and division by zero, handling them with custom callbacks +//! instead of crashing with a raw signal. //! //! On POSIX systems (Linux, macOS), we use sigaltstack to set up an alternate -//! signal stack and install a SIGSEGV handler that detects stack overflows. +//! signal stack and install handlers for SIGSEGV, SIGBUS, and SIGFPE. //! -//! On Windows, we use SetUnhandledExceptionFilter to catch EXCEPTION_STACK_OVERFLOW. +//! On Windows, we use SetUnhandledExceptionFilter to catch various exceptions. //! //! WASI is not currently supported (no signal handling available). @@ -24,6 +25,8 @@ const BOOL = i32; const EXCEPTION_STACK_OVERFLOW: DWORD = 0xC00000FD; const EXCEPTION_ACCESS_VIOLATION: DWORD = 0xC0000005; +const EXCEPTION_INT_DIVIDE_BY_ZERO: DWORD = 0xC0000094; +const EXCEPTION_INT_OVERFLOW: DWORD = 0xC0000095; const EXCEPTION_CONTINUE_SEARCH: LONG = 0; const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); const INVALID_HANDLE_VALUE: HANDLE = @ptrFromInt(std.math.maxInt(usize)); @@ -70,9 +73,13 @@ pub const StackOverflowCallback = *const fn () noreturn; /// Callback function type for handling access violation/segfault pub const AccessViolationCallback = *const fn (fault_addr: usize) noreturn; +/// Callback function type for handling division by zero (and other arithmetic errors) +pub const ArithmeticErrorCallback = *const fn () noreturn; + /// Stored callbacks (set during install) var stack_overflow_callback: ?StackOverflowCallback = null; var access_violation_callback: ?AccessViolationCallback = null; +var arithmetic_error_callback: ?ArithmeticErrorCallback = null; /// Install signal handlers with custom callbacks. /// @@ -80,13 +87,19 @@ var access_violation_callback: ?AccessViolationCallback = null; /// - on_stack_overflow: Called when a stack overflow is detected. Must not return. /// - on_access_violation: Called for other memory access violations (segfaults). /// Receives the fault address. Must not return. +/// - on_arithmetic_error: Called for arithmetic errors like division by zero. Must not return. /// /// Returns true if the handlers were installed successfully, false otherwise. -pub fn install(on_stack_overflow: StackOverflowCallback, on_access_violation: AccessViolationCallback) bool { +pub fn install( + on_stack_overflow: StackOverflowCallback, + on_access_violation: AccessViolationCallback, + on_arithmetic_error: ArithmeticErrorCallback, +) bool { if (handler_installed) return true; stack_overflow_callback = on_stack_overflow; access_violation_callback = on_access_violation; + arithmetic_error_callback = on_arithmetic_error; if (comptime builtin.os.tag == .windows) { return installWindows(); @@ -112,17 +125,26 @@ fn installPosix() bool { return false; }; - // Install the SIGSEGV handler - const action = posix.Sigaction{ - .handler = .{ .sigaction = handleSignalPosix }, + // Install the SIGSEGV handler for stack overflow and access violations + const segv_action = posix.Sigaction{ + .handler = .{ .sigaction = handleSegvSignal }, .mask = posix.sigemptyset(), .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, }; - posix.sigaction(posix.SIG.SEGV, &action, null); + posix.sigaction(posix.SIG.SEGV, &segv_action, null); // Also catch SIGBUS which can occur on some systems for stack overflow - posix.sigaction(posix.SIG.BUS, &action, null); + posix.sigaction(posix.SIG.BUS, &segv_action, null); + + // Install the SIGFPE handler for division by zero and other arithmetic errors + const fpe_action = posix.Sigaction{ + .handler = .{ .sigaction = handleFpeSignal }, + .mask = posix.sigemptyset(), + .flags = posix.SA.SIGINFO | posix.SA.ONSTACK, + }; + + posix.sigaction(posix.SIG.FPE, &fpe_action, null); handler_installed = true; return true; @@ -138,11 +160,14 @@ fn installWindows() bool { fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) LONG { const exception_code = exception_info.ExceptionRecord.ExceptionCode; - // Check if this is a stack overflow or access violation + // Check if this is a known exception type const is_stack_overflow = (exception_code == EXCEPTION_STACK_OVERFLOW); const is_access_violation = (exception_code == EXCEPTION_ACCESS_VIOLATION); + const is_divide_by_zero = (exception_code == EXCEPTION_INT_DIVIDE_BY_ZERO); + const is_int_overflow = (exception_code == EXCEPTION_INT_OVERFLOW); + const is_arithmetic_error = is_divide_by_zero or is_int_overflow; - if (!is_stack_overflow and !is_access_violation) { + if (!is_stack_overflow and !is_access_violation and !is_arithmetic_error) { // Let other handlers deal with this exception return EXCEPTION_CONTINUE_SEARCH; } @@ -151,21 +176,24 @@ fn handleExceptionWindows(exception_info: *EXCEPTION_POINTERS) callconv(.winapi) if (stack_overflow_callback) |callback| { callback(); } + ExitProcess(134); + } else if (is_arithmetic_error) { + if (arithmetic_error_callback) |callback| { + callback(); + } + ExitProcess(136); // 128 + 8 (SIGFPE) } else { if (access_violation_callback) |callback| { // Get fault address from ExceptionInformation[1] for access violations const fault_addr = exception_info.ExceptionRecord.ExceptionInformation[1]; callback(fault_addr); } + ExitProcess(139); } - - // If no callback was set, exit with appropriate code - const exit_code: c_uint = if (is_stack_overflow) 134 else 139; - ExitProcess(exit_code); } -/// The POSIX signal handler function -fn handleSignalPosix(_: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { +/// The POSIX SIGSEGV/SIGBUS signal handler function +fn handleSegvSignal(_: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { // Get the fault address - access differs by platform const fault_addr: usize = getFaultAddress(info); @@ -197,6 +225,16 @@ fn handleSignalPosix(_: i32, info: *const posix.siginfo_t, _: ?*anyopaque) callc } } +/// The POSIX SIGFPE signal handler function (division by zero, etc.) +fn handleFpeSignal(_: i32, _: *const posix.siginfo_t, _: ?*anyopaque) callconv(.c) void { + if (arithmetic_error_callback) |callback| { + callback(); + } + + // If no callback was set, exit with SIGFPE code + posix.exit(136); // 128 + 8 (SIGFPE) +} + /// Get the fault address from siginfo_t (platform-specific) fn getFaultAddress(info: *const posix.siginfo_t) usize { // The siginfo_t structure varies by platform diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index fd2ff08ed5..3c9579c7d5 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1633,3 +1633,46 @@ test "fx platform runtime stack overflow" { }, } } + +test "fx platform runtime division by zero" { + // Tests that division by zero in a running Roc program is caught and reported + // with a helpful error message instead of crashing with a raw signal. + // + // The error can be caught by either: + // 1. The Roc interpreter (exit code 1, "DivisionByZero" message) - most common + // 2. The SIGFPE signal handler (exit code 136, "DIVISION BY ZERO" message) - native code + const allocator = testing.allocator; + + // The Roc program uses a var to prevent compile-time constant folding + const run_result = try runRoc(allocator, "test/fx/division_by_zero.roc", .{}); + defer allocator.free(run_result.stdout); + defer allocator.free(run_result.stderr); + + switch (run_result.term) { + .Exited => |code| { + if (code == 136) { + // Division by zero was caught by the SIGFPE handler (native code) + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "DIVISION BY ZERO") != null); + } else if (code == 1) { + // Division by zero was caught by the interpreter - this is the expected case + // The interpreter catches it and reports "DivisionByZero" + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "DivisionByZero") != null); + } else { + std.debug.print("Unexpected exit code: {}\n", .{code}); + std.debug.print("STDERR: {s}\n", .{run_result.stderr}); + return error.UnexpectedExitCode; + } + }, + .Signal => |sig| { + // Process was killed directly by a signal without being caught + std.debug.print("\n", .{}); + std.debug.print("Division by zero was not caught!\n", .{}); + std.debug.print("Process was killed by signal: {}\n", .{sig}); + return error.DivisionByZeroNotHandled; + }, + else => { + std.debug.print("Unexpected termination: {}\n", .{run_result.term}); + return error.UnexpectedTermination; + }, + } +} diff --git a/test/fx/division_by_zero.roc b/test/fx/division_by_zero.roc new file mode 100644 index 0000000000..bcc398fcd1 --- /dev/null +++ b/test/fx/division_by_zero.roc @@ -0,0 +1,14 @@ +app [main!] { pf: platform "./platform/main.roc" } + +import pf.Stdout + +# Use a mutable variable to prevent compile-time evaluation +main! = || { + # The var keyword creates a runtime variable that can't be constant-folded + var $divisor = 0 + + # This will trigger a division by zero error at runtime + result = 42 / $divisor + + Stdout.line!("Result: ${U64.to_str(result)}") +} diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index b0352f7c58..476c731f53 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -87,6 +87,47 @@ fn handleRocAccessViolation(fault_addr: usize) noreturn { } } +/// Error message to display on division by zero in a Roc program +const DIVISION_BY_ZERO_MESSAGE = + \\ + \\================================================================================ + \\DIVISION BY ZERO in this Roc program + \\================================================================================ + \\ + \\This Roc program attempted to divide by zero. + \\ + \\Check your code for places where a divisor might be zero. + \\ + \\================================================================================ + \\ + \\ +; + +/// Callback for arithmetic errors (division by zero) in a Roc program +fn handleRocArithmeticError() noreturn { + if (comptime builtin.os.tag == .windows) { + const DWORD = u32; + const HANDLE = ?*anyopaque; + const STD_ERROR_HANDLE: DWORD = @bitCast(@as(i32, -12)); + + const kernel32 = struct { + extern "kernel32" fn GetStdHandle(nStdHandle: DWORD) callconv(.winapi) HANDLE; + extern "kernel32" fn WriteFile(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: ?*DWORD, lpOverlapped: ?*anyopaque) callconv(.winapi) i32; + extern "kernel32" fn ExitProcess(uExitCode: c_uint) callconv(.winapi) noreturn; + }; + + const stderr_handle = kernel32.GetStdHandle(STD_ERROR_HANDLE); + var bytes_written: DWORD = 0; + _ = kernel32.WriteFile(stderr_handle, DIVISION_BY_ZERO_MESSAGE.ptr, DIVISION_BY_ZERO_MESSAGE.len, &bytes_written, null); + kernel32.ExitProcess(136); + } else if (comptime builtin.os.tag != .wasi) { + _ = posix.write(posix.STDERR_FILENO, DIVISION_BY_ZERO_MESSAGE) catch {}; + posix.exit(136); // 128 + 8 (SIGFPE) + } else { + std.process.exit(136); + } +} + /// Host environment - contains GeneralPurposeAllocator for leak detection const HostEnv = struct { gpa: std.heap.GeneralPurposeAllocator(.{}), @@ -346,9 +387,9 @@ const hosted_function_ptrs = [_]builtins.host_abi.HostedFn{ /// Platform host entrypoint fn platform_main() !void { - // Install signal handlers for stack overflow and access violations + // Install signal handlers for stack overflow, access violations, and division by zero // This allows us to display helpful error messages instead of crashing - _ = builtins.handlers.install(handleRocStackOverflow, handleRocAccessViolation); + _ = builtins.handlers.install(handleRocStackOverflow, handleRocAccessViolation, handleRocArithmeticError); var host_env = HostEnv{ .gpa = std.heap.GeneralPurposeAllocator(.{}){}, From c2d313dece1af23a2e492bdf3c5a1bd4eefb7c0e Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 11:46:51 -0500 Subject: [PATCH 54/64] Improve comptime evaluator --- src/eval/comptime_evaluator.zig | 8 +++- src/eval/test/comptime_eval_test.zig | 63 ++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/src/eval/comptime_evaluator.zig b/src/eval/comptime_evaluator.zig index 1c29f677ed..10686f1470 100644 --- a/src/eval/comptime_evaluator.zig +++ b/src/eval/comptime_evaluator.zig @@ -1514,8 +1514,12 @@ pub const ComptimeEvaluator = struct { try self.reportProblem(expect_info.message, expect_info.region, .expect_failed); }, .error_eval => |error_info| { - const error_name = @errorName(error_info.err); - try self.reportProblem(error_name, error_info.region, .error_eval); + // Provide user-friendly messages for specific errors + const error_message = switch (error_info.err) { + error.DivisionByZero => "Division by zero", + else => @errorName(error_info.err), + }; + try self.reportProblem(error_message, error_info.region, .error_eval); }, } } diff --git a/src/eval/test/comptime_eval_test.zig b/src/eval/test/comptime_eval_test.zig index e174a78e6e..982fece6cd 100644 --- a/src/eval/test/comptime_eval_test.zig +++ b/src/eval/test/comptime_eval_test.zig @@ -1763,3 +1763,66 @@ test "comptime eval - to_str on unbound number literal" { // Flex var defaults to Dec; Dec.to_str is provided by builtins try testing.expectEqual(@as(usize, 0), result.problems.len()); } + +// --- Division by zero tests --- + +test "comptime eval - division by zero produces error" { + const src = + \\x = 5 // 0 + ; + + var result = try parseCheckAndEvalModule(src); + defer cleanupEvalModule(&result); + + const summary = try result.evaluator.evalAll(); + + // Should evaluate 1 declaration with no crashes (it's an error, not a crash) + try testing.expectEqual(@as(u32, 1), summary.evaluated); + try testing.expectEqual(@as(u32, 0), summary.crashed); + + // Should have 1 problem reported (division by zero) + try testing.expect(result.problems.len() >= 1); + try testing.expect(errorContains(result.problems, "Division by zero")); +} + +test "comptime eval - division by zero in expression" { + const src = + \\a = 10 + \\b = 0 + \\c = a // b + ; + + var result = try parseCheckAndEvalModule(src); + defer cleanupEvalModule(&result); + + const summary = try result.evaluator.evalAll(); + + // Should evaluate 3 declarations, c will cause an error + try testing.expectEqual(@as(u32, 3), summary.evaluated); + + // Should have 1 problem reported (division by zero) + try testing.expect(result.problems.len() >= 1); + try testing.expect(errorContains(result.problems, "Division by zero")); +} + +test "comptime eval - modulo by zero produces error" { + const src = + \\x = 10 % 0 + ; + + var result = try parseCheckAndEvalModule(src); + defer cleanupEvalModule(&result); + + const summary = try result.evaluator.evalAll(); + + // Should evaluate 1 declaration + try testing.expectEqual(@as(u32, 1), summary.evaluated); + + // Should have 1 problem reported (division by zero for modulo) + try testing.expect(result.problems.len() >= 1); + try testing.expect(errorContains(result.problems, "Division by zero")); +} + +// Note: "division by zero does not halt other defs" test is skipped because +// the interpreter state after an eval error may not allow continuing evaluation +// of subsequent definitions that share the same evaluation context. From c2a33a8313f4e9ea644f78e0a9e15f194f167906 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 11:56:11 -0500 Subject: [PATCH 55/64] Revise stack overflow error message --- src/base/stack_overflow.zig | 27 ++------------------------- src/cli/test/fx_platform_test.zig | 8 ++++---- test/fx/platform/host.zig | 17 +---------------- 3 files changed, 7 insertions(+), 45 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index a0a55e0f39..31f2da791d 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -16,28 +16,7 @@ const handlers = @import("builtins").handlers; const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.posix else undefined; /// Error message to display on stack overflow -const STACK_OVERFLOW_MESSAGE = - \\ - \\================================================================================ - \\STACK OVERFLOW in the Roc compiler - \\================================================================================ - \\ - \\The Roc compiler ran out of stack space. This is a bug in the compiler, - \\not in your code. - \\ - \\This often happens due to: - \\ - Infinite recursion in type translation or unification - \\ - Very deeply nested expressions without tail-call optimization - \\ - Cyclic data structures without proper cycle detection - \\ - \\Please report this issue at: https://github.com/roc-lang/roc/issues - \\ - \\Include the Roc code that triggered this error if possible. - \\ - \\================================================================================ - \\ - \\ -; +const STACK_OVERFLOW_MESSAGE = "\nThe Roc compiler overflowed its stack memory and had to exit.\n\n"; /// Callback for stack overflow in the compiler fn handleStackOverflow() noreturn { @@ -282,13 +261,11 @@ fn verifyHandlerOutput(exited_normally: bool, exit_code: u8, termination_signal: // Exit code 139 = generic segfault (handler caught it but didn't classify as stack overflow) if (exited_normally and (exit_code == 134 or exit_code == 139)) { // Check that our handler message was printed - const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "STACK OVERFLOW") != null; + const has_stack_overflow_msg = std.mem.indexOf(u8, stderr_output, "overflowed its stack memory") != null; const has_segfault_msg = std.mem.indexOf(u8, stderr_output, "Segmentation fault") != null; - const has_roc_compiler_msg = std.mem.indexOf(u8, stderr_output, "Roc compiler") != null; // Handler should have printed EITHER stack overflow message OR segfault message try std.testing.expect(has_stack_overflow_msg or has_segfault_msg); - try std.testing.expect(has_roc_compiler_msg); } else if (!exited_normally and (termination_signal == posix.SIG.SEGV or termination_signal == posix.SIG.BUS)) { // The handler might not have caught it - this can happen on some systems // where the signal delivery is different. Just warn and skip. diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 3c9579c7d5..526f13d7dc 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1598,20 +1598,20 @@ test "fx platform runtime stack overflow" { // After stack overflow handling is implemented, we expect: // 1. The process exits with code 134 (indicating stack overflow was caught) - // 2. Stderr contains a helpful "STACK OVERFLOW" message + // 2. Stderr contains a helpful message about stack overflow switch (run_result.term) { .Exited => |code| { if (code == 134) { // Stack overflow was caught and handled properly // Verify the helpful error message was printed - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "STACK OVERFLOW") != null); + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "overflowed its stack memory") != null); } else if (code == 139) { // Exit code 139 = 128 + 11 (SIGSEGV) - stack overflow was NOT handled // The Roc program crashed with a segfault that wasn't caught std.debug.print("\n", .{}); std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{}); std.debug.print("Process crashed with SIGSEGV (exit code 139).\n", .{}); - std.debug.print("Expected: exit code 134 with STACK OVERFLOW message\n", .{}); + std.debug.print("Expected: exit code 134 with stack overflow message\n", .{}); return error.StackOverflowNotHandled; } else { std.debug.print("Unexpected exit code: {}\n", .{code}); @@ -1624,7 +1624,7 @@ test "fx platform runtime stack overflow" { std.debug.print("\n", .{}); std.debug.print("Stack overflow handling NOT YET IMPLEMENTED for Roc programs.\n", .{}); std.debug.print("Process was killed by signal: {}\n", .{sig}); - std.debug.print("Expected: exit code 134 with STACK OVERFLOW message\n", .{}); + std.debug.print("Expected: exit code 134 with stack overflow message\n", .{}); return error.StackOverflowNotHandled; }, else => { diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index 476c731f53..e230b0733f 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -8,22 +8,7 @@ const posix = if (builtin.os.tag != .windows and builtin.os.tag != .wasi) std.po const trace_refcount = build_options.trace_refcount; /// Error message to display on stack overflow in a Roc program -const STACK_OVERFLOW_MESSAGE = - \\ - \\================================================================================ - \\STACK OVERFLOW in this Roc program - \\================================================================================ - \\ - \\This Roc program ran out of stack space. This can happen with: - \\ - Infinite recursion (a function that calls itself without stopping) - \\ - Very deeply nested function calls - \\ - \\Check your code for functions that might recurse infinitely. - \\ - \\================================================================================ - \\ - \\ -; +const STACK_OVERFLOW_MESSAGE = "\nThis Roc application overflowed its stack memory and crashed.\n\n"; /// Callback for stack overflow in a Roc program fn handleRocStackOverflow() noreturn { From b2034b944bdc7ad1de5d6d862611edd4ef80e2ab Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 12:15:56 -0500 Subject: [PATCH 56/64] Revise some error messages --- src/base/stack_overflow.zig | 18 +----------------- src/cli/test/fx_platform_test.zig | 4 ++-- test/fx/platform/host.zig | 15 +-------------- 3 files changed, 4 insertions(+), 33 deletions(-) diff --git a/src/base/stack_overflow.zig b/src/base/stack_overflow.zig index 31f2da791d..20a818099f 100644 --- a/src/base/stack_overflow.zig +++ b/src/base/stack_overflow.zig @@ -47,23 +47,7 @@ fn handleStackOverflow() noreturn { } /// Error message to display on arithmetic error (division by zero, etc.) -const ARITHMETIC_ERROR_MESSAGE = - \\ - \\================================================================================ - \\ARITHMETIC ERROR in the Roc compiler - \\================================================================================ - \\ - \\The Roc compiler encountered an arithmetic error (likely division by zero). - \\This is a bug in the compiler, not in your code. - \\ - \\Please report this issue at: https://github.com/roc-lang/roc/issues - \\ - \\Include the Roc code that triggered this error if possible. - \\ - \\================================================================================ - \\ - \\ -; +const ARITHMETIC_ERROR_MESSAGE = "\nThe Roc compiler divided by zero and had to exit.\n\n"; /// Callback for arithmetic errors (division by zero) in the compiler fn handleArithmeticError() noreturn { diff --git a/src/cli/test/fx_platform_test.zig b/src/cli/test/fx_platform_test.zig index 526f13d7dc..e43c7aba8f 100644 --- a/src/cli/test/fx_platform_test.zig +++ b/src/cli/test/fx_platform_test.zig @@ -1640,7 +1640,7 @@ test "fx platform runtime division by zero" { // // The error can be caught by either: // 1. The Roc interpreter (exit code 1, "DivisionByZero" message) - most common - // 2. The SIGFPE signal handler (exit code 136, "DIVISION BY ZERO" message) - native code + // 2. The SIGFPE signal handler (exit code 136, "divided by zero" message) - native code const allocator = testing.allocator; // The Roc program uses a var to prevent compile-time constant folding @@ -1652,7 +1652,7 @@ test "fx platform runtime division by zero" { .Exited => |code| { if (code == 136) { // Division by zero was caught by the SIGFPE handler (native code) - try testing.expect(std.mem.indexOf(u8, run_result.stderr, "DIVISION BY ZERO") != null); + try testing.expect(std.mem.indexOf(u8, run_result.stderr, "divided by zero") != null); } else if (code == 1) { // Division by zero was caught by the interpreter - this is the expected case // The interpreter catches it and reports "DivisionByZero" diff --git a/test/fx/platform/host.zig b/test/fx/platform/host.zig index e230b0733f..f00f57abce 100644 --- a/test/fx/platform/host.zig +++ b/test/fx/platform/host.zig @@ -73,20 +73,7 @@ fn handleRocAccessViolation(fault_addr: usize) noreturn { } /// Error message to display on division by zero in a Roc program -const DIVISION_BY_ZERO_MESSAGE = - \\ - \\================================================================================ - \\DIVISION BY ZERO in this Roc program - \\================================================================================ - \\ - \\This Roc program attempted to divide by zero. - \\ - \\Check your code for places where a divisor might be zero. - \\ - \\================================================================================ - \\ - \\ -; +const DIVISION_BY_ZERO_MESSAGE = "\nThis Roc application divided by zero and crashed.\n\n"; /// Callback for arithmetic errors (division by zero) in a Roc program fn handleRocArithmeticError() noreturn { From 3934fca617c9f1cc88d1ade9f238521c17d681c3 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sat, 6 Dec 2025 13:07:41 -0500 Subject: [PATCH 57/64] Fix tests --- test/snapshots/repl/try_is_eq.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/test/snapshots/repl/try_is_eq.md b/test/snapshots/repl/try_is_eq.md index 195a05f80a..ac1d26f9f5 100644 --- a/test/snapshots/repl/try_is_eq.md +++ b/test/snapshots/repl/try_is_eq.md @@ -7,10 +7,16 @@ type=repl ~~~roc » Try.Ok(1) == Try.Ok(1) » Try.Ok(1) == Try.Ok(2) +» Try.Ok(1) != Try.Ok(1) +» Try.Ok(1) != Try.Ok(2) ~~~ # OUTPUT -Crash: e_closure: failed to resolve capture value +True --- -Crash: e_closure: failed to resolve capture value +False +--- +False +--- +True # PROBLEMS NIL From 949799477cf8b47eab28c0cf78fc345e760f65ee Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sun, 7 Dec 2025 13:31:58 -0500 Subject: [PATCH 58/64] Add List.count_if --- src/build/roc/Builtin.roc | 13 +++++++++++++ test/snapshots/repl/list_count_if.md | 13 +++++++++++++ test/snapshots/repl/list_count_if_all_match.md | 13 +++++++++++++ test/snapshots/repl/list_count_if_empty.md | 13 +++++++++++++ test/snapshots/repl/list_count_if_none_match.md | 13 +++++++++++++ 5 files changed, 65 insertions(+) create mode 100644 test/snapshots/repl/list_count_if.md create mode 100644 test/snapshots/repl/list_count_if_all_match.md create mode 100644 test/snapshots/repl/list_count_if_empty.md create mode 100644 test/snapshots/repl/list_count_if_none_match.md diff --git a/src/build/roc/Builtin.roc b/src/build/roc/Builtin.roc index e77e1f7e78..b05b50a17c 100644 --- a/src/build/roc/Builtin.roc +++ b/src/build/roc/Builtin.roc @@ -119,6 +119,19 @@ Builtin :: [].{ }, ) + count_if : List(a), (a -> Bool) -> U64 + count_if = |list, predicate| + List.fold( + list, + 0, + |acc, elem| + if predicate(elem) { + acc + 1 + } else { + acc + }, + ) + fold : List(item), state, (state, item -> state) -> state fold = |list, init, step| { var $state = init diff --git a/test/snapshots/repl/list_count_if.md b/test/snapshots/repl/list_count_if.md new file mode 100644 index 0000000000..ef470bbcef --- /dev/null +++ b/test/snapshots/repl/list_count_if.md @@ -0,0 +1,13 @@ +# META +~~~ini +description=List.count_if counts elements where predicate returns true +type=repl +~~~ +# SOURCE +~~~roc +» List.count_if([1, 2, 3, 4, 5], |x| x > 2) +~~~ +# OUTPUT +3 +# PROBLEMS +NIL diff --git a/test/snapshots/repl/list_count_if_all_match.md b/test/snapshots/repl/list_count_if_all_match.md new file mode 100644 index 0000000000..18a5f8385f --- /dev/null +++ b/test/snapshots/repl/list_count_if_all_match.md @@ -0,0 +1,13 @@ +# META +~~~ini +description=List.count_if returns list length when all elements match +type=repl +~~~ +# SOURCE +~~~roc +» List.count_if([1, 2, 3, 4, 5], |x| x > 0) +~~~ +# OUTPUT +5 +# PROBLEMS +NIL diff --git a/test/snapshots/repl/list_count_if_empty.md b/test/snapshots/repl/list_count_if_empty.md new file mode 100644 index 0000000000..d49aeb8e9e --- /dev/null +++ b/test/snapshots/repl/list_count_if_empty.md @@ -0,0 +1,13 @@ +# META +~~~ini +description=List.count_if on empty list returns 0 +type=repl +~~~ +# SOURCE +~~~roc +» List.count_if([], |x| x > 2) +~~~ +# OUTPUT +0 +# PROBLEMS +NIL diff --git a/test/snapshots/repl/list_count_if_none_match.md b/test/snapshots/repl/list_count_if_none_match.md new file mode 100644 index 0000000000..875be69bce --- /dev/null +++ b/test/snapshots/repl/list_count_if_none_match.md @@ -0,0 +1,13 @@ +# META +~~~ini +description=List.count_if returns 0 when no elements match +type=repl +~~~ +# SOURCE +~~~roc +» List.count_if([1, 2, 3], |x| x > 10) +~~~ +# OUTPUT +0 +# PROBLEMS +NIL From c116e986ef8b9c645e2c822bd2097c80e68bd5fd Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sun, 7 Dec 2025 15:25:32 -0500 Subject: [PATCH 59/64] Fix valgrind error --- .github/workflows/ci_zig.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci_zig.yml b/.github/workflows/ci_zig.yml index 5e21453e43..577d94bc71 100644 --- a/.github/workflows/ci_zig.yml +++ b/.github/workflows/ci_zig.yml @@ -208,6 +208,7 @@ jobs: # We can re-evaluate as new version of zig/valgrind come out. if: ${{ matrix.os == 'ubuntu-22.04' }} run: | + sudo apt-get update && sudo apt-get install -y libc6-dbg sudo snap install valgrind --classic valgrind --version ./ci/custom_valgrind.sh ./zig-out/bin/snapshot --debug --verbose From 24dffc2c3fca3552427f70afa6eab15b5501aa4f Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sun, 7 Dec 2025 16:38:52 -0500 Subject: [PATCH 60/64] Fix static-ness of build --- build.zig | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/build.zig b/build.zig index 58a90759c7..7b66bdecef 100644 --- a/build.zig +++ b/build.zig @@ -2206,9 +2206,8 @@ fn addStaticLlvmOptionsToModule(mod: *std.Build.Module) !void { mod.linkSystemLibrary("z", link_static); if (mod.resolved_target.?.result.os.tag != .windows or mod.resolved_target.?.result.abi != .msvc) { - // TODO: Can this just be `mod.link_libcpp = true`? Does that make a difference? - // This means we rely on clang-or-zig-built LLVM, Clang, LLD libraries. - mod.linkSystemLibrary("c++", .{}); + // Use Zig's bundled static libc++ to keep the binary statically linked + mod.link_libcpp = true; } if (mod.resolved_target.?.result.os.tag == .windows) { From ed95a91257354a7b0eb8b7d0cd90d3219805a700 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sun, 7 Dec 2025 17:29:47 -0500 Subject: [PATCH 61/64] musl only --- .github/workflows/ci_zig.yml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci_zig.yml b/.github/workflows/ci_zig.yml index 577d94bc71..37775075d3 100644 --- a/.github/workflows/ci_zig.yml +++ b/.github/workflows/ci_zig.yml @@ -85,18 +85,25 @@ jobs: include: - os: macos-15-intel cpu_flag: -Dcpu=x86_64_v3 + target_flag: '' - os: macos-15 cpu_flag: '' + target_flag: '' - os: ubuntu-22.04 cpu_flag: -Dcpu=x86_64_v3 + target_flag: -Dtarget=x86_64-linux-musl - os: ubuntu-24.04-arm cpu_flag: '' + target_flag: -Dtarget=aarch64-linux-musl - os: windows-2022 cpu_flag: -Dcpu=x86_64_v3 + target_flag: '' - os: windows-2025 cpu_flag: -Dcpu=x86_64_v3 + target_flag: '' - os: windows-11-arm cpu_flag: '' + target_flag: '' steps: - name: Checkout @@ -119,7 +126,7 @@ jobs: - name: build roc + repro executables uses: ./.github/actions/flaky-retry with: - command: "zig build -Dfuzz -Dsystem-afl=false -Doptimize=ReleaseFast ${{ matrix.cpu_flag }}" + command: "zig build -Dfuzz -Dsystem-afl=false -Doptimize=ReleaseFast ${{ matrix.cpu_flag }} ${{ matrix.target_flag }}" error_string_contains: "EndOfStream" retry_count: 3 @@ -136,7 +143,7 @@ jobs: - name: Run Test Platforms (Unix) if: runner.os != 'Windows' run: | - zig build test-cli + zig build test-cli ${{ matrix.target_flag }} - name: Setup MSVC (Windows) if: runner.os == 'Windows' @@ -165,13 +172,13 @@ jobs: zig-out\bin\roc.exe check ./src/PROFILING/bench_repeated_check.roc - name: zig snapshot tests - run: zig build snapshot -- --debug + run: zig build snapshot ${{ matrix.target_flag }} -- --debug # 1) in debug mode - name: build and execute tests, build repro executables uses: ./.github/actions/flaky-retry with: - command: "zig build test -Dfuzz -Dsystem-afl=false" + command: "zig build test -Dfuzz -Dsystem-afl=false ${{ matrix.target_flag }}" error_string_contains: "double roundtrip bundle" retry_count: 3 @@ -179,7 +186,7 @@ jobs: - name: Build and execute tests, build repro executables. All in release mode. uses: ./.github/actions/flaky-retry with: - command: "zig build test -Doptimize=ReleaseFast -Dfuzz -Dsystem-afl=false ${{ matrix.cpu_flag }}" + command: "zig build test -Doptimize=ReleaseFast -Dfuzz -Dsystem-afl=false ${{ matrix.cpu_flag }} ${{ matrix.target_flag }}" error_string_contains: "double roundtrip bundle" retry_count: 3 @@ -249,7 +256,7 @@ jobs: run: | git clean -fdx git reset --hard HEAD - nix develop ./src/ -c zig build && zig build snapshot && zig build test + nix develop ./src/ -c zig build ${{ matrix.target_flag }} && zig build snapshot ${{ matrix.target_flag }} && zig build test ${{ matrix.target_flag }} zig-cross-compile: needs: check-once From fb742d2ecd90723d7e195fca42941ccb44392670 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Sun, 7 Dec 2025 17:34:12 -0500 Subject: [PATCH 62/64] Revert a test for now --- test/snapshots/repl/try_is_eq.md | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test/snapshots/repl/try_is_eq.md b/test/snapshots/repl/try_is_eq.md index ac1d26f9f5..195a05f80a 100644 --- a/test/snapshots/repl/try_is_eq.md +++ b/test/snapshots/repl/try_is_eq.md @@ -7,16 +7,10 @@ type=repl ~~~roc » Try.Ok(1) == Try.Ok(1) » Try.Ok(1) == Try.Ok(2) -» Try.Ok(1) != Try.Ok(1) -» Try.Ok(1) != Try.Ok(2) ~~~ # OUTPUT -True +Crash: e_closure: failed to resolve capture value --- -False ---- -False ---- -True +Crash: e_closure: failed to resolve capture value # PROBLEMS NIL From 6aaf108d47c5d769031b93f041cb5f6a0f957160 Mon Sep 17 00:00:00 2001 From: Matthieu Pizenberg Date: Mon, 8 Dec 2025 09:53:41 +0100 Subject: [PATCH 63/64] Fix List.len result var Signed-off-by: Matthieu Pizenberg --- src/eval/interpreter.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eval/interpreter.zig b/src/eval/interpreter.zig index 80c44ee44f..19d19c89f7 100644 --- a/src/eval/interpreter.zig +++ b/src/eval/interpreter.zig @@ -2035,7 +2035,7 @@ pub const Interpreter = struct { const len_u64: u64 = @intCast(len_usize); const result_layout = layout.Layout.int(.u64); - const result_rt_var = try self.runtime_types.fresh(); + const result_rt_var = return_rt_var orelse unreachable; var out = try self.pushRaw(result_layout, 0, result_rt_var); out.is_initialized = false; try out.setInt(@intCast(len_u64)); From 72b72790af01bc34c5c762b7e736d5a630088aee Mon Sep 17 00:00:00 2001 From: Matthieu Pizenberg Date: Mon, 8 Dec 2025 09:57:33 +0100 Subject: [PATCH 64/64] Add List.len regression test Signed-off-by: Matthieu Pizenberg --- src/eval/test/eval_test.zig | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/eval/test/eval_test.zig b/src/eval/test/eval_test.zig index bf26430839..4f741ef68a 100644 --- a/src/eval/test/eval_test.zig +++ b/src/eval/test/eval_test.zig @@ -1387,3 +1387,25 @@ test "if block with local bindings - regression" { \\else 99 , 0, .no_trace); } + +test "List.len returns proper U64 nominal type for method calls - regression" { + // Regression test for InvalidMethodReceiver when calling methods on List.len result + // Bug report: `n = List.len([]); _str = n.to_str()` crashed with InvalidMethodReceiver + // The issue was that List.len created a fresh runtime type variable instead of using + // the return_rt_var parameter, which prevented method resolution from finding the + // U64 nominal type information needed to look up .to_str() + try runExpectStr( + \\{ + \\ n = List.len([]) + \\ n.to_str() + \\} + , "0", .no_trace); + + // Also test with non-empty list + try runExpectStr( + \\{ + \\ n = List.len([1, 2, 3]) + \\ n.to_str() + \\} + , "3", .no_trace); +}