fix two bugs in the dev backend

This commit is contained in:
Folkert 2023-09-12 17:35:08 +02:00
parent dfee6740dd
commit 3909443ef4
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
4 changed files with 92 additions and 10 deletions

View file

@ -4,6 +4,8 @@ const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const Monotonic = std.builtin.AtomicOrder.Monotonic; const Monotonic = std.builtin.AtomicOrder.Monotonic;
const DEBUG_INCDEC = false; const DEBUG_INCDEC = false;
const DEBUG_TESTING_ALLOC = false;
const DEBUG_ALLOC = false;
pub fn WithOverflow(comptime T: type) type { pub fn WithOverflow(comptime T: type) type {
return extern struct { value: T, has_overflowed: bool }; return extern struct { value: T, has_overflowed: bool };
@ -60,19 +62,35 @@ comptime {
} }
fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*anyopaque { fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*anyopaque {
return @ptrCast(?*anyopaque, std.testing.allocator.alloc(u8, size) catch unreachable); const ptr = @ptrCast(?*anyopaque, std.testing.allocator.alloc(u8, size) catch unreachable);
if (DEBUG_TESTING_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("+ alloc {*}: {} bytes\n", .{ ptr, size });
}
return ptr;
} }
fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*anyopaque { fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*anyopaque {
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr)); const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
const slice = ptr[0..old_size]; const slice = ptr[0..old_size];
return @ptrCast(?*anyopaque, std.testing.allocator.realloc(slice, new_size) catch unreachable); const new = @ptrCast(?*anyopaque, std.testing.allocator.realloc(slice, new_size) catch unreachable);
if (DEBUG_TESTING_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("- realloc {*}\n", .{new});
}
return new;
} }
fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.C) void { fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.C) void {
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr)); const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
if (DEBUG_TESTING_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("💀 dealloc {*}\n", .{ptr});
}
std.testing.allocator.destroy(ptr); std.testing.allocator.destroy(ptr);
} }
@ -88,6 +106,9 @@ pub fn alloc(size: usize, alignment: u32) ?[*]u8 {
} }
pub fn realloc(c_ptr: [*]u8, new_size: usize, old_size: usize, alignment: u32) [*]u8 { pub fn realloc(c_ptr: [*]u8, new_size: usize, old_size: usize, alignment: u32) [*]u8 {
if (DEBUG_INCDEC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("- realloc {*}\n", .{c_ptr});
}
return @ptrCast([*]u8, roc_realloc(c_ptr, new_size, old_size, alignment)); return @ptrCast([*]u8, roc_realloc(c_ptr, new_size, old_size, alignment));
} }
@ -269,9 +290,18 @@ inline fn free_ptr_to_refcount(
) void { ) void {
if (RC_TYPE == Refcount.none) return; if (RC_TYPE == Refcount.none) return;
const extra_bytes = std.math.max(alignment, @sizeOf(usize)); const extra_bytes = std.math.max(alignment, @sizeOf(usize));
const allocation_ptr = @ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize));
if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("💀 free {*}\n", .{allocation_ptr});
}
// NOTE: we don't even check whether the refcount is "infinity" here! // NOTE: we don't even check whether the refcount is "infinity" here!
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment); dealloc(allocation_ptr, alignment);
if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("💀 freed {*}\n", .{allocation_ptr});
}
} }
inline fn decref_ptr_to_refcount( inline fn decref_ptr_to_refcount(
@ -326,12 +356,12 @@ pub fn isUnique(
const isizes: [*]isize = @intToPtr([*]isize, masked_ptr); const isizes: [*]isize = @intToPtr([*]isize, masked_ptr);
if (DEBUG_INCDEC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("| is unique {*}\n", .{&bytes[0]});
}
const refcount = (isizes - 1)[0]; const refcount = (isizes - 1)[0];
if (DEBUG_INCDEC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("| is unique {*}\n", .{isizes});
}
return refcount == REFCOUNT_ONE_ISIZE; return refcount == REFCOUNT_ONE_ISIZE;
} }
@ -398,8 +428,16 @@ pub fn allocateWithRefcount(
const alignment = std.math.max(ptr_width, element_alignment); const alignment = std.math.max(ptr_width, element_alignment);
const length = alignment + data_bytes; const length = alignment + data_bytes;
if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("+ before allocate {} {} {} \n", .{ data_bytes, element_alignment, length });
}
var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable; var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable;
if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("+ allocated {*}\n", .{new_bytes});
}
const data_ptr = new_bytes + alignment; const data_ptr = new_bytes + alignment;
const refcount_ptr = @ptrCast([*]usize, @alignCast(ptr_width, data_ptr) - ptr_width); const refcount_ptr = @ptrCast([*]usize, @alignCast(ptr_width, data_ptr) - ptr_width);
refcount_ptr[0] = if (RC_TYPE == Refcount.none) REFCOUNT_MAX_ISIZE else REFCOUNT_ONE; refcount_ptr[0] = if (RC_TYPE == Refcount.none) REFCOUNT_MAX_ISIZE else REFCOUNT_ONE;

View file

@ -3412,6 +3412,22 @@ impl<
let stores_tag_id_as_data = let stores_tag_id_as_data =
union_layout.stores_tag_id_as_data(self.storage_manager.target_info); union_layout.stores_tag_id_as_data(self.storage_manager.target_info);
let largest_variant = tags
.iter()
.map(|fields| {
let struct_layout = self
.layout_interner
.insert_direct_no_semantic(LayoutRepr::Struct(fields));
(
struct_layout,
self.layout_interner.stack_size(struct_layout),
)
})
.max_by(|(_, a), (_, b)| a.cmp(b))
.unwrap()
.0;
// construct the payload as a struct on the stack // construct the payload as a struct on the stack
let data_struct_layout = self let data_struct_layout = self
.layout_interner .layout_interner
@ -3462,12 +3478,28 @@ impl<
fields, fields,
); );
let (data_size, _) = union_layout.data_size_and_alignment(self.layout_interner);
let scratch_space = self.debug_symbol("scratch_space");
let to_offset = self
.storage_manager
.claim_stack_area(&scratch_space, data_size);
// this is a cheaty copy, because the destination may be wider than the source
let (from_offset, _) =
self.storage_manager.stack_offset_and_size(&scratch_space);
self.storage_manager.copy_to_stack_offset(
&mut self.buf,
data_size,
from_offset,
to_offset,
);
// now effectively box this struct // now effectively box this struct
let untagged_pointer_symbol = self.debug_symbol("untagged_pointer"); let untagged_pointer_symbol = self.debug_symbol("untagged_pointer");
self.expr_box( self.expr_box(
untagged_pointer_symbol, untagged_pointer_symbol,
whole_struct_symbol, whole_struct_symbol,
data_struct_layout, largest_variant,
reuse, reuse,
); );

View file

@ -3644,12 +3644,25 @@ fn specialize_proc_help<'a>(
let symbol = get_specialized_name(**symbol); let symbol = get_specialized_name(**symbol);
let fresh_symbol =
env.named_unique_symbol(&format!("{:?}_closure", symbol));
specialized_body = Stmt::Let( specialized_body = Stmt::Let(
symbol, fresh_symbol,
expr, expr,
layout, layout,
env.arena.alloc(specialized_body), env.arena.alloc(specialized_body),
); );
// the same symbol may be used where
// - the closure is created
// - the closure is consumed
substitute_in_exprs(
env.arena,
&mut specialized_body,
symbol,
fresh_symbol,
);
} }
} }
ClosureRepresentation::AlphabeticOrderStruct(field_layouts) => { ClosureRepresentation::AlphabeticOrderStruct(field_layouts) => {

View file

@ -2258,7 +2258,6 @@ fn nested_switch() {
_ -> e _ -> e
expr : Expr expr : Expr
expr = ZAdd (Val 3) (ZAdd (Val 4) (Val 5)) expr = ZAdd (Val 3) (ZAdd (Val 4) (Val 5))