checkpoint

This commit is contained in:
Folkert 2023-09-13 17:23:13 +02:00
parent 3c8dbce72e
commit 5e4f43e1d8
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
12 changed files with 296 additions and 109 deletions

View file

@ -3,9 +3,9 @@ const builtin = @import("builtin");
const always_inline = std.builtin.CallOptions.Modifier.always_inline; const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const Monotonic = std.builtin.AtomicOrder.Monotonic; const Monotonic = std.builtin.AtomicOrder.Monotonic;
const DEBUG_INCDEC = false; const DEBUG_INCDEC = true;
const DEBUG_TESTING_ALLOC = false; const DEBUG_TESTING_ALLOC = false;
const DEBUG_ALLOC = false; const DEBUG_ALLOC = true;
pub fn WithOverflow(comptime T: type) type { pub fn WithOverflow(comptime T: type) type {
return extern struct { value: T, has_overflowed: bool }; return extern struct { value: T, has_overflowed: bool };
@ -360,7 +360,7 @@ pub fn isUnique(
const refcount = (isizes - 1)[0]; const refcount = (isizes - 1)[0];
if (DEBUG_INCDEC and builtin.target.cpu.arch != .wasm32) { if (DEBUG_INCDEC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("| is unique {*}\n", .{isizes}); std.debug.print("| is unique {*}\n", .{isizes - 1});
} }
return refcount == REFCOUNT_ONE_ISIZE; return refcount == REFCOUNT_ONE_ISIZE;
@ -436,7 +436,7 @@ pub fn allocateWithRefcount(
var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable; var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable;
if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) { if (DEBUG_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("+ allocated {*} ({} bytes with alignment {})\n", .{ new_bytes, data_bytes, element_alignment }); std.debug.print("+ allocated {*} ({} bytes with alignment {})\n", .{ new_bytes, data_bytes, alignment });
} }
const data_ptr = new_bytes + alignment; const data_ptr = new_bytes + alignment;

View file

@ -128,7 +128,13 @@ impl IntWidth {
U128 | I128 => { U128 | I128 => {
// the C ABI defines 128-bit integers to always be 16B aligned, // the C ABI defines 128-bit integers to always be 16B aligned,
// according to https://reviews.llvm.org/D28990#655487 // according to https://reviews.llvm.org/D28990#655487
16 //
// however, rust does not always think that this is true
match target_info.architecture {
Architecture::X86_64 => 8,
Architecture::Aarch64 | Architecture::Aarch32 | Architecture::Wasm32 => 16,
Architecture::X86_32 => 8,
}
} }
} }
} }

View file

@ -987,7 +987,11 @@ impl<
} }
// Note that on windows there is only 1 general return register so we can't use this optimisation // Note that on windows there is only 1 general return register so we can't use this optimisation
LayoutRepr::I128 | LayoutRepr::U128 if CC::GENERAL_RETURN_REGS.len() > 1 => { LayoutRepr::I128 | LayoutRepr::U128 if CC::GENERAL_RETURN_REGS.len() > 1 => {
let offset = self.storage_manager.claim_stack_area(dst, 16); let offset = self.storage_manager.claim_stack_area_layout(
self.layout_interner,
*dst,
Layout::U128,
);
ASM::mov_base32_reg64(&mut self.buf, offset, CC::GENERAL_RETURN_REGS[0]); ASM::mov_base32_reg64(&mut self.buf, offset, CC::GENERAL_RETURN_REGS[0]);
ASM::mov_base32_reg64(&mut self.buf, offset + 8, CC::GENERAL_RETURN_REGS[1]); ASM::mov_base32_reg64(&mut self.buf, offset + 8, CC::GENERAL_RETURN_REGS[1]);
@ -1264,9 +1268,11 @@ impl<
let buf = &mut self.buf; let buf = &mut self.buf;
let struct_size = self.layout_interner.stack_size(*return_layout); let base_offset = self.storage_manager.claim_stack_area_layout(
self.layout_interner,
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size); *dst,
*return_layout,
);
match self.layout_interner.get_repr(*num_layout) { match self.layout_interner.get_repr(*num_layout) {
LayoutRepr::Builtin(Int( LayoutRepr::Builtin(Int(
@ -1453,14 +1459,16 @@ impl<
let buf = &mut self.buf; let buf = &mut self.buf;
let struct_size = self.layout_interner.stack_size(*return_layout);
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size);
match self.layout_interner.get_repr(*num_layout) { match self.layout_interner.get_repr(*num_layout) {
LayoutRepr::Builtin(Int( LayoutRepr::Builtin(Int(
IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8, IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8,
)) => { )) => {
let base_offset = self.storage_manager.claim_stack_area_layout(
self.layout_interner,
*dst,
*return_layout,
);
let dst_reg = self let dst_reg = self
.storage_manager .storage_manager
.claim_general_reg(buf, &Symbol::DEV_TMP); .claim_general_reg(buf, &Symbol::DEV_TMP);
@ -1481,14 +1489,27 @@ impl<
self.free_symbol(&Symbol::DEV_TMP); self.free_symbol(&Symbol::DEV_TMP);
self.free_symbol(&Symbol::DEV_TMP2); self.free_symbol(&Symbol::DEV_TMP2);
} }
LayoutRepr::Builtin(Int( LayoutRepr::Builtin(Builtin::Int(int_width)) => {
IntWidth::U64 | IntWidth::U32 | IntWidth::U16 | IntWidth::U8, self.build_fn_call(
)) => { dst,
todo!("mulChecked for unsigned integers") bitcode::NUM_MUL_CHECKED_INT[int_width].to_string(),
&[*src1, *src2],
&[*num_layout, *num_layout],
return_layout,
);
} }
LayoutRepr::Builtin(Builtin::Float(_width)) => { LayoutRepr::Builtin(Builtin::Float(_width)) => {
todo!("mulChecked for floats") todo!("mulChecked for floats")
} }
LayoutRepr::Builtin(Builtin::Decimal) => {
self.build_fn_call(
dst,
bitcode::DEC_MUL_WITH_OVERFLOW.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
return_layout,
);
}
x => todo!("mulChecked: layout, {:?}", x), x => todo!("mulChecked: layout, {:?}", x),
} }
} }
@ -2614,9 +2635,9 @@ impl<
self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2); self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2);
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let lowlevel_args = [ let lowlevel_args = [
capacity, capacity,
@ -2677,9 +2698,9 @@ impl<
); );
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let lowlevel_args = bumpalo::vec![ let lowlevel_args = bumpalo::vec![
in self.env.arena; in self.env.arena;
@ -2751,9 +2772,9 @@ impl<
self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2); self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2);
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let lowlevel_args = [ let lowlevel_args = [
list, list,
@ -2863,9 +2884,9 @@ impl<
self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP3); self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP3);
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let ret_fields = let ret_fields =
if let LayoutRepr::Struct(field_layouts) = self.layout_interner.get_repr(*ret_layout) { if let LayoutRepr::Struct(field_layouts) = self.layout_interner.get_repr(*ret_layout) {
@ -2960,9 +2981,9 @@ impl<
self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2); self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP2);
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let lowlevel_args = bumpalo::vec![ let lowlevel_args = bumpalo::vec![
in self.env.arena; in self.env.arena;
@ -3028,9 +3049,9 @@ impl<
self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP3); self.load_layout_stack_size(elem_layout, Symbol::DEV_TMP3);
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); .claim_stack_area_layout(self.layout_interner, *dst, *ret_layout);
let lowlevel_args = [ let lowlevel_args = [
list, list,
@ -3074,7 +3095,10 @@ impl<
} }
fn create_empty_array(&mut self, sym: &Symbol) { fn create_empty_array(&mut self, sym: &Symbol) {
let base_offset = self.storage_manager.claim_stack_area(sym, 24); let base_offset = self
.storage_manager
.claim_stack_area_with_alignment(*sym, 24, 8);
self.storage_manager self.storage_manager
.with_tmp_general_reg(&mut self.buf, |_storage_manager, buf, reg| { .with_tmp_general_reg(&mut self.buf, |_storage_manager, buf, reg| {
ASM::mov_reg64_imm64(buf, reg, 0); ASM::mov_reg64_imm64(buf, reg, 0);
@ -3161,7 +3185,7 @@ impl<
self.storage_manager.with_tmp_general_reg( self.storage_manager.with_tmp_general_reg(
&mut self.buf, &mut self.buf,
|storage_manager, buf, tmp_reg| { |storage_manager, buf, tmp_reg| {
let base_offset = storage_manager.claim_stack_area(sym, 24); let base_offset = storage_manager.claim_stack_area_with_alignment(*sym, 24, 8);
ASM::mov_base32_reg64(buf, base_offset, ptr_reg); ASM::mov_base32_reg64(buf, base_offset, ptr_reg);
ASM::mov_reg64_imm64(buf, tmp_reg, elements.len() as i64); ASM::mov_reg64_imm64(buf, tmp_reg, elements.len() as i64);
@ -3510,9 +3534,11 @@ impl<
return; return;
} }
let base_offset = self let base_offset = self.storage_manager.claim_stack_area_layout(
.storage_manager self.layout_interner,
.claim_stack_area(&allocation, element_width); allocation,
element_layout,
);
let ptr_reg = self.storage_manager.claim_general_reg(&mut self.buf, &ptr); let ptr_reg = self.storage_manager.claim_general_reg(&mut self.buf, &ptr);
@ -3636,9 +3662,11 @@ impl<
.chain(nullable_id + 1..number_of_tags); .chain(nullable_id + 1..number_of_tags);
let table = self.debug_symbol("tag_id_table"); let table = self.debug_symbol("tag_id_table");
let table_offset = self let table_offset = self.storage_manager.claim_stack_area_with_alignment(
.storage_manager table,
.claim_stack_area(&table, (number_of_tags * 2) as _); (number_of_tags * 2) as _,
8,
);
let mut offset = table_offset; let mut offset = table_offset;
for i in it { for i in it {
@ -3744,7 +3772,7 @@ impl<
UnionLayout::NonRecursive(field_layouts) => { UnionLayout::NonRecursive(field_layouts) => {
let id_offset = data_size - data_alignment; let id_offset = data_size - data_alignment;
let base_offset = self.storage_manager.claim_stack_area_with_alignment( let base_offset = self.storage_manager.claim_stack_area_with_alignment(
sym, *sym,
data_size, data_size,
data_alignment, data_alignment,
); );
@ -3941,7 +3969,7 @@ impl<
let (data_size, data_alignment) = let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.layout_interner); union_layout.data_size_and_alignment(self.layout_interner);
let to_offset = self.storage_manager.claim_stack_area_with_alignment( let to_offset = self.storage_manager.claim_stack_area_with_alignment(
&scratch_space, scratch_space,
data_size, data_size,
data_alignment, data_alignment,
); );
@ -3993,11 +4021,14 @@ impl<
fields, fields,
); );
let (data_size, _) = union_layout.data_size_and_alignment(self.layout_interner); let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.layout_interner);
let scratch_space = self.debug_symbol("scratch_space"); let scratch_space = self.debug_symbol("scratch_space");
let to_offset = self let to_offset = self.storage_manager.claim_stack_area_with_alignment(
.storage_manager scratch_space,
.claim_stack_area(&scratch_space, data_size); data_size,
data_alignment,
);
// this is a cheaty copy, because the destination may be wider than the source // this is a cheaty copy, because the destination may be wider than the source
let (from_offset, _) = let (from_offset, _) =
@ -4070,7 +4101,11 @@ impl<
self.storage_manager.with_tmp_general_reg( self.storage_manager.with_tmp_general_reg(
&mut self.buf, &mut self.buf,
|storage_manager, buf, reg| { |storage_manager, buf, reg| {
let base_offset = storage_manager.claim_stack_area(sym, 16); let base_offset = storage_manager.claim_stack_area_layout(
self.layout_interner,
*sym,
Layout::U128,
);
let mut num_bytes = [0; 8]; let mut num_bytes = [0; 8];
num_bytes.copy_from_slice(&bytes[..8]); num_bytes.copy_from_slice(&bytes[..8]);
@ -4108,7 +4143,11 @@ impl<
self.storage_manager.with_tmp_general_reg( self.storage_manager.with_tmp_general_reg(
&mut self.buf, &mut self.buf,
|storage_manager, buf, reg| { |storage_manager, buf, reg| {
let base_offset = storage_manager.claim_stack_area(sym, 16); let base_offset = storage_manager.claim_stack_area_layout(
self.layout_interner,
*sym,
Layout::DEC,
);
let mut num_bytes = [0; 8]; let mut num_bytes = [0; 8];
num_bytes.copy_from_slice(&bytes[..8]); num_bytes.copy_from_slice(&bytes[..8]);
@ -4129,7 +4168,12 @@ impl<
self.storage_manager.with_tmp_general_reg( self.storage_manager.with_tmp_general_reg(
&mut self.buf, &mut self.buf,
|storage_manager, buf, reg| { |storage_manager, buf, reg| {
let base_offset = storage_manager.claim_stack_area(sym, 24); let base_offset = storage_manager.claim_stack_area_layout(
self.layout_interner,
*sym,
Layout::STR,
);
let mut bytes = [0; 24]; let mut bytes = [0; 24];
bytes[..x.len()].copy_from_slice(x.as_bytes()); bytes[..x.len()].copy_from_slice(x.as_bytes());
bytes[23] = (x.len() as u8) | 0b1000_0000; bytes[23] = (x.len() as u8) | 0b1000_0000;
@ -4455,7 +4499,11 @@ impl<
(U64, U128) => { (U64, U128) => {
let src_reg = self.storage_manager.load_to_general_reg(buf, src); let src_reg = self.storage_manager.load_to_general_reg(buf, src);
let base_offset = self.storage_manager.claim_stack_area(dst, 16); let base_offset = self.storage_manager.claim_stack_area_layout(
self.layout_interner,
*dst,
Layout::U128,
);
let tmp = Symbol::DEV_TMP; let tmp = Symbol::DEV_TMP;
let tmp_reg = self.storage_manager.claim_general_reg(buf, &tmp); let tmp_reg = self.storage_manager.claim_general_reg(buf, &tmp);
@ -4553,9 +4601,9 @@ impl<
dst: Symbol, dst: Symbol,
) { ) {
// Setup the return location. // Setup the return location.
let base_offset = self let base_offset =
.storage_manager self.storage_manager
.claim_stack_area(&dst, self.layout_interner.stack_size(ret_layout)); .claim_stack_area_layout(self.layout_interner, dst, ret_layout);
let tmp = self.debug_symbol("call_with_stack_return_result"); let tmp = self.debug_symbol("call_with_stack_return_result");
@ -4811,7 +4859,7 @@ impl<
tmp_reg: GeneralReg, tmp_reg: GeneralReg,
offset: i32, offset: i32,
) { ) {
let base_offset = storage_manager.claim_stack_area(&dst, 24); let base_offset = storage_manager.claim_stack_area_with_alignment(dst, 24, 8);
ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset); ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset);
ASM::mov_base32_reg64(buf, base_offset, tmp_reg); ASM::mov_base32_reg64(buf, base_offset, tmp_reg);
@ -4840,7 +4888,8 @@ impl<
return; return;
} }
let base_offset = storage_manager.claim_stack_area(&dst, stack_size); // 16 is a guess here. In practice this is the highest alignment we encounter in roc datastructures
let base_offset = storage_manager.claim_stack_area_with_alignment(dst, stack_size, 16);
if size - copied >= 8 { if size - copied >= 8 {
for _ in (0..(size - copied)).step_by(8) { for _ in (0..(size - copied)).step_by(8) {
@ -4896,7 +4945,11 @@ impl<
storage_manager.with_tmp_general_reg( storage_manager.with_tmp_general_reg(
buf, buf,
|storage_manager, buf, tmp_reg| { |storage_manager, buf, tmp_reg| {
let base_offset = storage_manager.claim_stack_area(&dst, 16); let base_offset = storage_manager.claim_stack_area_layout(
layout_interner,
dst,
Layout::U128,
);
ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset); ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset);
ASM::mov_base32_reg64(buf, base_offset, tmp_reg); ASM::mov_base32_reg64(buf, base_offset, tmp_reg);
@ -4943,7 +4996,11 @@ impl<
Builtin::Decimal => { Builtin::Decimal => {
// same as 128-bit integer // same as 128-bit integer
storage_manager.with_tmp_general_reg(buf, |storage_manager, buf, tmp_reg| { storage_manager.with_tmp_general_reg(buf, |storage_manager, buf, tmp_reg| {
let base_offset = storage_manager.claim_stack_area(&dst, 16); let base_offset = storage_manager.claim_stack_area_layout(
layout_interner,
dst,
Layout::DEC,
);
ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset); ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, offset);
ASM::mov_base32_reg64(buf, base_offset, tmp_reg); ASM::mov_base32_reg64(buf, base_offset, tmp_reg);
@ -5079,7 +5136,9 @@ impl<
} }
let (from_offset, stack_size) = storage_manager.stack_offset_and_size(&value); let (from_offset, stack_size) = storage_manager.stack_offset_and_size(&value);
debug_assert!(from_offset % 8 == 0);
// on x86_64 this is too strict.
// debug_assert_eq!(from_offset % 8, 0);
storage_manager.with_tmp_general_reg(buf, |_storage_manager, buf, tmp_reg| { storage_manager.with_tmp_general_reg(buf, |_storage_manager, buf, tmp_reg| {
let mut copied = 0; let mut copied = 0;

View file

@ -679,7 +679,7 @@ impl<
self.symbol_storage_map.insert(*sym, NoData); self.symbol_storage_map.insert(*sym, NoData);
return; return;
} }
let base_offset = self.claim_stack_area(sym, struct_size); let base_offset = self.claim_stack_area_layout(layout_interner, *sym, *layout);
let mut in_layout = *layout; let mut in_layout = *layout;
let layout = loop { let layout = loop {
@ -955,7 +955,7 @@ impl<
pub fn ensure_symbol_on_stack(&mut self, buf: &mut Vec<'a, u8>, sym: &Symbol) { pub fn ensure_symbol_on_stack(&mut self, buf: &mut Vec<'a, u8>, sym: &Symbol) {
match self.remove_storage_for_sym(sym) { match self.remove_storage_for_sym(sym) {
Reg(reg_storage) => { Reg(reg_storage) => {
let base_offset = self.claim_stack_size(8); let base_offset = self.claim_stack_size_with_alignment(8, 8);
match reg_storage { match reg_storage {
General(reg) => ASM::mov_base32_reg64(buf, base_offset, reg), General(reg) => ASM::mov_base32_reg64(buf, base_offset, reg),
Float(reg) => ASM::mov_base32_freg64(buf, base_offset, reg), Float(reg) => ASM::mov_base32_freg64(buf, base_offset, reg),
@ -1015,7 +1015,7 @@ impl<
match self.remove_storage_for_sym(sym) { match self.remove_storage_for_sym(sym) {
Reg(reg_storage) => { Reg(reg_storage) => {
debug_assert_eq!(reg_storage, wanted_reg); debug_assert_eq!(reg_storage, wanted_reg);
let base_offset = self.claim_stack_size(8); let base_offset = self.claim_stack_size_with_alignment(8, 8);
match reg_storage { match reg_storage {
General(reg) => ASM::mov_base32_reg64(buf, base_offset, reg), General(reg) => ASM::mov_base32_reg64(buf, base_offset, reg),
Float(reg) => ASM::mov_base32_freg64(buf, base_offset, reg), Float(reg) => ASM::mov_base32_freg64(buf, base_offset, reg),
@ -1135,7 +1135,7 @@ impl<
) { ) {
match layout_interner.get_repr(layout) { match layout_interner.get_repr(layout) {
single_register_layouts!() | pointer_layouts!() => { single_register_layouts!() | pointer_layouts!() => {
let base_offset = self.claim_stack_size(8); let base_offset = self.claim_stack_size_with_alignment(8, 8);
self.symbol_storage_map.insert( self.symbol_storage_map.insert(
symbol, symbol,
Stack(Primitive { Stack(Primitive {
@ -1156,7 +1156,7 @@ impl<
if stack_size == 0 { if stack_size == 0 {
self.no_data(&symbol); self.no_data(&symbol);
} else { } else {
self.claim_stack_area(&symbol, stack_size); self.claim_stack_area_layout(layout_interner, symbol, layout);
} }
} }
} }
@ -1288,32 +1288,47 @@ impl<
self.join_param_map.insert(*id, param_storage); self.join_param_map.insert(*id, param_storage);
} }
/// claim_stack_area is the public wrapper around claim_stack_size. /// Claim space on the stack for a certain layout. Size and alignment are handled
/// It also deals with updating symbol storage. ///
/// It returns the base offset of the stack area. /// This function:
/// It should only be used for complex data and not primitives. ///
pub fn claim_stack_area(&mut self, sym: &Symbol, size: u32) -> i32 { /// - deals with updating symbol storage.
self.claim_stack_area_with_alignment(sym, size, 8) /// - should only be used for complex data and not primitives.
/// - returns the base offset of the stack area.
pub(crate) fn claim_stack_area_layout(
&mut self,
layout_interner: &STLayoutInterner<'_>,
sym: Symbol,
layout: InLayout<'_>,
) -> i32 {
let (size, alignment) = layout_interner.stack_size_and_alignment(layout);
self.claim_stack_area_with_alignment(sym, size, Ord::min(alignment, 8))
} }
/// Claim space on the stack of a certain size and alignment.
///
/// This function:
///
/// - deals with updating symbol storage.
/// - should only be used for complex data and not primitives.
/// - returns the base offset of the stack area.
pub fn claim_stack_area_with_alignment( pub fn claim_stack_area_with_alignment(
&mut self, &mut self,
sym: &Symbol, sym: Symbol,
size: u32, size: u32,
alignment: u32, alignment: u32,
) -> i32 { ) -> i32 {
let base_offset = self.claim_stack_size_with_alignment(size, alignment); let base_offset = self.claim_stack_size_with_alignment(size, Ord::min(alignment, 8));
self.symbol_storage_map self.symbol_storage_map
.insert(*sym, Stack(Complex { base_offset, size })); .insert(sym, Stack(Complex { base_offset, size }));
self.allocation_map self.allocation_map
.insert(*sym, Rc::new((base_offset, size))); .insert(sym, Rc::new((base_offset, size)));
base_offset base_offset
} }
pub fn claim_pointer_stack_area(&mut self, sym: Symbol) -> i32 { pub fn claim_pointer_stack_area(&mut self, sym: Symbol) -> i32 {
let size = 8; // pointers are 8 bytes wide with an alignment of 8
let base_offset = self.claim_stack_size_with_alignment(8, 8);
let base_offset = self.claim_stack_size(size);
self.symbol_storage_map.insert( self.symbol_storage_map.insert(
sym, sym,
@ -1326,13 +1341,9 @@ impl<
base_offset base_offset
} }
/// claim_stack_size claims `amount` bytes from the stack alignind to 8. /// claim_stack_size claims `amount` bytes from the stack
/// This may be free space in the stack or result in increasing the stack size. /// This may be free space in the stack or result in increasing the stack size.
/// It returns base pointer relative offset of the new data. /// It returns base pointer relative offset of the new data.
fn claim_stack_size(&mut self, amount: u32) -> i32 {
self.claim_stack_size_with_alignment(amount, 8)
}
fn claim_stack_size_with_alignment(&mut self, amount: u32, alignment: u32) -> i32 { fn claim_stack_size_with_alignment(&mut self, amount: u32, alignment: u32) -> i32 {
debug_assert_ne!(amount, 0); debug_assert_ne!(amount, 0);

View file

@ -253,7 +253,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
#[inline(always)] #[inline(always)]
fn load_args<'a>( fn load_args<'a>(
_buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager< storage_manager: &mut StorageManager<
'a, 'a,
'_, '_,
@ -281,7 +281,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
} }
for (in_layout, sym) in args.iter() { for (in_layout, sym) in args.iter() {
state.load_arg(storage_manager, layout_interner, *sym, *in_layout); state.load_arg(buf, storage_manager, layout_interner, *sym, *in_layout);
} }
} }
@ -307,7 +307,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
if Self::returns_via_arg_pointer(layout_interner, ret_layout) { if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return. // Save space on the stack for the result we will be return.
let base_offset = let base_offset =
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout)); storage_manager.claim_stack_area_layout(layout_interner, *dst, *ret_layout);
// Set the first reg to the address base + offset. // Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i]; let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1; general_i += 1;
@ -413,7 +413,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
} }
_ if !Self::returns_via_arg_pointer(layout_interner, layout) => { _ if !Self::returns_via_arg_pointer(layout_interner, layout) => {
let size = layout_interner.stack_size(*layout); let size = layout_interner.stack_size(*layout);
let offset = storage_manager.claim_stack_area(sym, size); let offset =
storage_manager.claim_stack_area_layout(layout_interner, *sym, *layout);
if size <= 8 { if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]); X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
} else if size <= 16 { } else if size <= 16 {
@ -971,6 +972,7 @@ struct X64_64SystemVLoadArgs {
impl X64_64SystemVLoadArgs { impl X64_64SystemVLoadArgs {
fn load_arg<'a>( fn load_arg<'a>(
&mut self, &mut self,
buf: &mut Vec<u8>,
storage_manager: &mut X86_64StorageManager<'a, '_, X86_64SystemV>, storage_manager: &mut X86_64StorageManager<'a, '_, X86_64SystemV>,
layout_interner: &mut STLayoutInterner<'a>, layout_interner: &mut STLayoutInterner<'a>,
sym: Symbol, sym: Symbol,
@ -990,6 +992,7 @@ impl X64_64SystemVLoadArgs {
self.argument_offset += stack_size as i32; self.argument_offset += stack_size as i32;
} }
LayoutRepr::LambdaSet(lambda_set) => self.load_arg( LayoutRepr::LambdaSet(lambda_set) => self.load_arg(
buf,
storage_manager, storage_manager,
layout_interner, layout_interner,
sym, sym,
@ -1001,12 +1004,10 @@ impl X64_64SystemVLoadArgs {
self.argument_offset += stack_size as i32; self.argument_offset += stack_size as i32;
} }
LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => { LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size); self.load_arg_general_128bit(buf, storage_manager, sym);
self.argument_offset += stack_size as i32;
} }
LayoutRepr::Builtin(Builtin::Decimal) => { LayoutRepr::Builtin(Builtin::Decimal) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size); self.load_arg_general_128bit(buf, storage_manager, sym);
self.argument_offset += stack_size as i32;
} }
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => { LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack // for now, just also store this on the stack
@ -1036,6 +1037,33 @@ impl X64_64SystemVLoadArgs {
} }
} }
fn load_arg_general_128bit(
&mut self,
buf: &mut Vec<u8>,
storage_manager: &mut X86_64StorageManager<'_, '_, X86_64SystemV>,
sym: Symbol,
) {
type ASM = X86_64Assembler;
let reg1 = X86_64SystemV::GENERAL_PARAM_REGS.get(self.general_i);
let reg2 = X86_64SystemV::GENERAL_PARAM_REGS.get(self.general_i + 1);
match (reg1, reg2) {
(Some(reg1), Some(reg2)) => {
let offset = storage_manager.claim_stack_area_with_alignment(sym, 16, 16);
ASM::mov_base32_reg64(buf, offset, *reg1);
ASM::mov_base32_reg64(buf, offset + 8, *reg2);
self.general_i += 2;
}
_ => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, 16);
self.argument_offset += 16;
}
}
}
fn load_arg_float( fn load_arg_float(
&mut self, &mut self,
storage_manager: &mut X86_64StorageManager<'_, '_, X86_64SystemV>, storage_manager: &mut X86_64StorageManager<'_, '_, X86_64SystemV>,
@ -1081,7 +1109,11 @@ impl X64_64WindowsFastCallLoadArgs {
match X86_64WindowsFastcall::GENERAL_PARAM_REGS.get(self.general_i) { match X86_64WindowsFastcall::GENERAL_PARAM_REGS.get(self.general_i) {
Some(ptr_reg) => { Some(ptr_reg) => {
// if there is a general purpose register available, use it to store a pointer to the value // if there is a general purpose register available, use it to store a pointer to the value
let base_offset = storage_manager.claim_stack_area(&sym, stack_size); let base_offset = storage_manager.claim_stack_area_layout(
layout_interner,
sym,
in_layout,
);
let tmp_reg = X86_64WindowsFastcall::GENERAL_RETURN_REGS[0]; let tmp_reg = X86_64WindowsFastcall::GENERAL_RETURN_REGS[0];
copy_to_base_offset::<_, _, ASM>( copy_to_base_offset::<_, _, ASM>(
@ -1115,8 +1147,10 @@ impl X64_64WindowsFastCallLoadArgs {
self.argument_offset += stack_size as i32; self.argument_offset += stack_size as i32;
} }
LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => { LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size); self.load_arg_general_128bit(buf, storage_manager, sym);
self.argument_offset += stack_size as i32; }
LayoutRepr::Builtin(Builtin::Decimal) => {
self.load_arg_general_128bit(buf, storage_manager, sym);
} }
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => { LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack // for now, just also store this on the stack
@ -1146,6 +1180,33 @@ impl X64_64WindowsFastCallLoadArgs {
} }
} }
fn load_arg_general_128bit(
&mut self,
buf: &mut Vec<u8>,
storage_manager: &mut X86_64StorageManager<'_, '_, X86_64WindowsFastcall>,
sym: Symbol,
) {
type ASM = X86_64Assembler;
let reg1 = X86_64WindowsFastcall::GENERAL_PARAM_REGS.get(self.general_i);
let reg2 = X86_64WindowsFastcall::GENERAL_PARAM_REGS.get(self.general_i + 1);
match (reg1, reg2) {
(Some(reg1), Some(reg2)) => {
let offset = storage_manager.claim_stack_area_with_alignment(sym, 16, 16);
ASM::mov_base32_reg64(buf, offset, *reg1);
ASM::mov_base32_reg64(buf, offset + 8, *reg2);
self.general_i += 2;
}
_ => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, 16);
self.argument_offset += 16;
}
}
}
fn load_arg_float( fn load_arg_float(
&mut self, &mut self,
storage_manager: &mut X86_64StorageManager<'_, '_, X86_64WindowsFastcall>, storage_manager: &mut X86_64StorageManager<'_, '_, X86_64WindowsFastcall>,
@ -1360,7 +1421,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
if Self::returns_via_arg_pointer(layout_interner, ret_layout) { if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return. // Save space on the stack for the result we will be return.
let base_offset = let base_offset =
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout)); storage_manager.claim_stack_area_layout(layout_interner, *dst, *ret_layout);
// Set the first reg to the address base + offset. // Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i]; let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
@ -1464,8 +1525,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
// For windows (and zig 0.9 changes in zig 0.10) we need to match what zig does, // For windows (and zig 0.9 changes in zig 0.10) we need to match what zig does,
// in this case uses RAX & RDX to return the value // in this case uses RAX & RDX to return the value
LayoutRepr::I128 | LayoutRepr::U128 => { LayoutRepr::I128 | LayoutRepr::U128 => {
let size = layout_interner.stack_size(*layout); let offset =
let offset = storage_manager.claim_stack_area(sym, size); storage_manager.claim_stack_area_layout(layout_interner, *sym, *layout);
X86_64Assembler::mov_base32_reg64(buf, offset, X86_64GeneralReg::RAX); X86_64Assembler::mov_base32_reg64(buf, offset, X86_64GeneralReg::RAX);
X86_64Assembler::mov_base32_reg64(buf, offset + 0x08, X86_64GeneralReg::RDX); X86_64Assembler::mov_base32_reg64(buf, offset + 0x08, X86_64GeneralReg::RDX);
} }
@ -1474,7 +1535,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
} }
_ if !Self::returns_via_arg_pointer(layout_interner, layout) => { _ if !Self::returns_via_arg_pointer(layout_interner, layout) => {
let size = layout_interner.stack_size(*layout); let size = layout_interner.stack_size(*layout);
let offset = storage_manager.claim_stack_area(sym, size); let offset =
storage_manager.claim_stack_area_layout(layout_interner, *sym, *layout);
if size <= 8 { if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]); X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
} else { } else {

View file

@ -455,7 +455,6 @@ fn build_object<'a, B: Backend<'a>>(
); );
} }
AssemblyBackendMode::Binary => { /* do nothing */ } AssemblyBackendMode::Binary => { /* do nothing */ }
>>>>>>> 12686f23b (repl helper codegen)
} }
build_proc_symbol( build_proc_symbol(

View file

@ -3970,11 +3970,28 @@ fn mul_checked_dec() {
assert_evals_to!( assert_evals_to!(
indoc!( indoc!(
r#" r#"
Num.mulChecked 5.0dec 2.0dec == Ok 10.0dec Num.mulChecked 5.0dec 2.0dec
"# "#
), ),
true, RocResult::ok(RocDec::from_str("10.0").unwrap()),
bool RocResult<RocDec, ()>
);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn mul_checked_u128() {
assert_evals_to!(
indoc!(
r#"
x : Result U128 [ Overflow ]
x = Num.mulChecked 5u128 2u128
x
"#
),
RocResult::ok(5u128 * 2u128),
RocResult<u128, ()>
); );
} }

View file

@ -2061,7 +2061,7 @@ fn non_unary_union_with_lambda_set_with_imported_toplevels_issue_4733() {
_ -> (\a -> a) _ -> (\a -> a)
main = ((fn "*") 3) * ((fn "+") 5) main = ((fn "*") 3i64) * ((fn "+") 5)
"# "#
), ),
90, 90,

View file

@ -216,7 +216,7 @@ pub fn helper(
roc_bitcode::host_tempfile().expect("failed to write host builtins object to tempfile"); roc_bitcode::host_tempfile().expect("failed to write host builtins object to tempfile");
// TODO make this an environment variable // TODO make this an environment variable
if false { if true {
let file_path = std::env::temp_dir().join("app.o"); let file_path = std::env::temp_dir().join("app.o");
println!("gen-test object file written to {}", file_path.display()); println!("gen-test object file written to {}", file_path.display());
std::fs::copy(&app_o_file, file_path).unwrap(); std::fs::copy(&app_o_file, file_path).unwrap();

View file

@ -425,9 +425,19 @@ fn list_contains() {
#[cfg(not(feature = "wasm"))] #[cfg(not(feature = "wasm"))]
#[test] #[test]
fn list_sum() { fn list_sum_empty() {
expect_success("List.sum []", "0 : Num a"); expect_success("List.sum []", "0 : Num a");
}
#[cfg(not(feature = "wasm"))]
#[test]
fn list_sum_num() {
expect_success("List.sum [1, 2, 3]", "6 : Num *"); expect_success("List.sum [1, 2, 3]", "6 : Num *");
}
#[cfg(not(feature = "wasm"))]
#[test]
fn list_sum_frac() {
expect_success("List.sum [1.1, 2.2, 3.3]", "6.6 : Frac *"); expect_success("List.sum [1.1, 2.2, 3.3]", "6.6 : Frac *");
} }
@ -677,16 +687,28 @@ fn type_problem() {
} }
#[test] #[test]
fn issue_2149() { fn issue_2149_i8_ok() {
expect_success(r#"Str.toI8 "127""#, "Ok 127 : Result I8 [InvalidNumStr]"); expect_success(r#"Str.toI8 "127""#, "Ok 127 : Result I8 [InvalidNumStr]");
}
#[test]
fn issue_2149_i8_err() {
expect_success( expect_success(
r#"Str.toI8 "128""#, r#"Str.toI8 "128""#,
"Err InvalidNumStr : Result I8 [InvalidNumStr]", "Err InvalidNumStr : Result I8 [InvalidNumStr]",
); );
}
#[test]
fn issue_2149_i16_ok() {
expect_success( expect_success(
r#"Str.toI16 "32767""#, r#"Str.toI16 "32767""#,
"Ok 32767 : Result I16 [InvalidNumStr]", "Ok 32767 : Result I16 [InvalidNumStr]",
); );
}
#[test]
fn issue_2149_i16_err() {
expect_success( expect_success(
r#"Str.toI16 "32768""#, r#"Str.toI16 "32768""#,
"Err InvalidNumStr : Result I16 [InvalidNumStr]", "Err InvalidNumStr : Result I16 [InvalidNumStr]",

View file

@ -1,5 +1,5 @@
//! Provides Rust representations of Roc data structures. //! Provides Rust representations of Roc data structures.
#![cfg_attr(not(feature = "std"), no_std)] // #![cfg_attr(not(feature = "std"), no_std)]
#![crate_type = "lib"] #![crate_type = "lib"]
use arrayvec::ArrayString; use arrayvec::ArrayString;
@ -227,10 +227,19 @@ impl<T, E> Drop for RocResult<T, E> {
} }
} }
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[repr(C)] #[repr(C)]
pub struct RocDec([u8; 16]); pub struct RocDec([u8; 16]);
impl Debug for RocDec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("RocDec")
.field(&self.0)
.field(&self.to_str())
.finish()
}
}
impl RocDec { impl RocDec {
pub const MIN: Self = Self(i128::MIN.to_ne_bytes()); pub const MIN: Self = Self(i128::MIN.to_ne_bytes());
pub const MAX: Self = Self(i128::MAX.to_ne_bytes()); pub const MAX: Self = Self(i128::MAX.to_ne_bytes());

View file

@ -547,6 +547,8 @@ impl<T> Drop for RocList<T> {
ManuallyDrop::drop(&mut *elements.as_ptr().add(index)); ManuallyDrop::drop(&mut *elements.as_ptr().add(index));
} }
dbg!(self.ptr_to_allocation());
// Release the memory. // Release the memory.
roc_dealloc(self.ptr_to_allocation(), Self::alloc_alignment()); roc_dealloc(self.ptr_to_allocation(), Self::alloc_alignment());
} }