first working linked list test

This commit is contained in:
Folkert 2023-05-17 20:07:44 +02:00
parent 3364d03fcc
commit e33c2b3c84
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
6 changed files with 386 additions and 320 deletions

View file

@ -1065,7 +1065,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
}
#[inline(always)]
fn neq_reg64_reg64_reg64(
fn neq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
dst: AArch64GeneralReg,

View file

@ -1,6 +1,6 @@
use crate::{
single_register_floats, single_register_int_builtins, single_register_integers, Backend, Env,
Relocation,
pointer_layouts, single_register_floats, single_register_int_builtins,
single_register_integers, Backend, Env, Relocation,
};
use bumpalo::collections::{CollectIn, Vec};
use roc_builtins::bitcode::{self, FloatWidth, IntWidth};
@ -517,7 +517,7 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
Self::eq_reg_reg_reg(buf, RegisterWidth::W64, dst, src1, src2)
}
fn neq_reg64_reg64_reg64(
fn neq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
@ -904,27 +904,30 @@ impl<
ASM::mov_base32_reg64(&mut self.buf, offset, CC::GENERAL_RETURN_REGS[0]);
ASM::mov_base32_reg64(&mut self.buf, offset + 8, CC::GENERAL_RETURN_REGS[1]);
}
other => {
//
match other {
LayoutRepr::Boxed(_) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]);
}
LayoutRepr::LambdaSet(lambda_set) => {
self.move_return_value(dst, &lambda_set.runtime_representation())
}
_ => {
CC::load_returned_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
ret_layout,
);
}
}
pointer_layouts!() => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]);
}
LayoutRepr::LambdaSet(lambda_set) => {
self.move_return_value(dst, &lambda_set.runtime_representation())
}
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
CC::load_returned_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
ret_layout,
);
}
_ => {
CC::load_returned_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
ret_layout,
);
}
}
}
@ -1558,7 +1561,7 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
ASM::neq_reg_reg_reg(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::STR => {
self.build_fn_call(
@ -1576,7 +1579,7 @@ impl<
let width = RegisterWidth::W8; // we're comparing booleans
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
ASM::neq_reg_reg_reg(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
}
x => todo!("NumNeq: layout, {:?}", x),
}
@ -1714,25 +1717,13 @@ impl<
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg); // zero out dst reg
ASM::mov_reg32_freg32(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::neq_reg64_reg64_reg64(
buf,
RegisterWidth::W32,
dst_reg,
dst_reg,
mask_reg,
);
ASM::neq_reg_reg_reg(buf, RegisterWidth::W32, dst_reg, dst_reg, mask_reg);
}
Layout::F64 => {
ASM::mov_reg64_imm64(buf, mask_reg, 0x7ff0_0000_0000_0000);
ASM::mov_reg64_freg64(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::neq_reg64_reg64_reg64(
buf,
RegisterWidth::W64,
dst_reg,
dst_reg,
mask_reg,
);
ASM::neq_reg_reg_reg(buf, RegisterWidth::W64, dst_reg, dst_reg, mask_reg);
}
_ => unreachable!(),
}
@ -2601,7 +2592,7 @@ impl<
union_layout: &UnionLayout<'a>,
) {
match union_layout {
UnionLayout::NonRecursive(tag_layouts) | UnionLayout::Recursive(tag_layouts) => {
UnionLayout::NonRecursive(tag_layouts) => {
self.storage_manager.load_field_at_index(
self.layout_interner,
sym,
@ -2610,6 +2601,34 @@ impl<
tag_layouts[tag_id as usize],
);
}
UnionLayout::NullableUnwrapped {
nullable_id,
other_fields,
} => {
debug_assert_ne!(tag_id, *nullable_id as TagIdIntType);
let element_layout = other_fields[index as usize];
let ptr_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, structure);
let mut offset = 0;
for field in &other_fields[..index as usize] {
offset += self.layout_interner.stack_size(*field);
}
Self::ptr_read(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
ptr_reg,
offset as i32,
element_layout,
*sym,
);
}
_ => {
let union_in_layout = self
.layout_interner
@ -2654,7 +2673,7 @@ impl<
}
// box is just a pointer on the stack
let base_offset = self.storage_manager.claim_stack_area(&sym, 8);
let base_offset = self.storage_manager.claim_pointer_stack_area(sym);
ASM::mov_base32_reg64(&mut self.buf, base_offset, ptr_reg);
}
@ -2699,13 +2718,53 @@ impl<
}
fn get_tag_id(&mut self, sym: &Symbol, structure: &Symbol, union_layout: &UnionLayout<'a>) {
self.storage_manager.load_union_tag_id(
self.layout_interner,
&mut self.buf,
sym,
structure,
union_layout,
);
let layout_interner: &mut STLayoutInterner<'a> = self.layout_interner;
let _buf: &mut Vec<'a, u8> = &mut self.buf;
match union_layout {
UnionLayout::NonRecursive(tags) => {
self.storage_manager.load_union_tag_id_nonrecursive(
layout_interner,
&mut self.buf,
sym,
structure,
tags,
);
}
UnionLayout::NullableUnwrapped { nullable_id, .. } => {
// simple is_null check on the pointer
let tmp = Symbol::DEV_TMP5;
let reg = self.storage_manager.claim_general_reg(&mut self.buf, &tmp);
ASM::mov_reg64_imm64(&mut self.buf, reg, 0);
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, sym);
let src1_reg = reg;
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, structure);
match *nullable_id {
true => {
ASM::neq_reg_reg_reg(
&mut self.buf,
RegisterWidth::W64,
dst_reg,
src1_reg,
src2_reg,
);
}
false => {
ASM::eq_reg_reg_reg(
&mut self.buf,
RegisterWidth::W64,
dst_reg,
src1_reg,
src2_reg,
);
}
}
}
x => todo!("getting tag id of union with layout ({:?})", x),
};
}
fn tag(
@ -2715,14 +2774,73 @@ impl<
union_layout: &UnionLayout<'a>,
tag_id: TagIdIntType,
) {
self.storage_manager.create_union(
self.layout_interner,
&mut self.buf,
sym,
union_layout,
fields,
tag_id,
)
let target_info = self.storage_manager.target_info;
let layout_interner: &mut STLayoutInterner<'a> = self.layout_interner;
let buf: &mut Vec<'a, u8> = &mut self.buf;
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, target_info);
let id_offset = data_size - data_alignment;
let base_offset = self.storage_manager.claim_stack_area(sym, data_size);
let mut current_offset = base_offset;
let it = fields.iter().zip(field_layouts[tag_id as usize].iter());
for (field, field_layout) in it {
self.storage_manager.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
// put the tag id in the right place
self.storage_manager
.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
ASM::mov_reg64_imm64(buf, reg, tag_id as i64);
let total_id_offset = base_offset as u32 + id_offset;
debug_assert!(total_id_offset % data_alignment == 0);
// pick the right instruction based on the alignment of the tag id
if field_layouts.len() <= u8::MAX as _ {
ASM::mov_base32_reg8(buf, total_id_offset as i32, reg);
} else {
ASM::mov_base32_reg16(buf, total_id_offset as i32, reg);
}
});
}
UnionLayout::NullableUnwrapped {
nullable_id,
other_fields,
} => {
if tag_id == *nullable_id as TagIdIntType {
// step 1: make the struct
let temp_sym = Symbol::DEV_TMP5;
let layout =
layout_interner.insert_no_semantic(LayoutRepr::Struct(other_fields));
self.storage_manager.create_struct(
layout_interner,
buf,
&temp_sym,
&layout,
fields,
);
// now effectively box this struct
self.expr_box(*sym, Symbol::DEV_TMP5, layout)
} else {
// it's just a null pointer
self.load_literal_i64(sym, 0);
}
}
x => todo!("creating unions with layout: {:?}", x),
}
}
fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>) {
@ -2864,7 +2982,7 @@ impl<
if self.storage_manager.is_stored_primitive(sym) {
// Just load it to the correct type of reg as a stand alone value.
match repr {
single_register_integers!() => {
single_register_integers!() | pointer_layouts!() => {
self.storage_manager.load_to_specified_general_reg(
&mut self.buf,
sym,
@ -2878,22 +2996,14 @@ impl<
CC::FLOAT_RETURN_REGS[0],
);
}
other => match other {
LayoutRepr::Boxed(_) => {
// treat like a 64-bit integer
self.storage_manager.load_to_specified_general_reg(
&mut self.buf,
sym,
CC::GENERAL_RETURN_REGS[0],
);
}
LayoutRepr::LambdaSet(lambda_set) => {
self.return_symbol(sym, &lambda_set.runtime_representation())
}
_ => {
internal_error!("All primitive values should fit in a single register");
}
},
LayoutRepr::LambdaSet(lambda_set) => {
self.return_symbol(sym, &lambda_set.runtime_representation())
}
LayoutRepr::Union(UnionLayout::NonRecursive(_))
| LayoutRepr::Builtin(_)
| LayoutRepr::Struct(_) => {
internal_error!("All primitive values should fit in a single register");
}
}
} else {
CC::return_complex_symbol(
@ -3440,7 +3550,7 @@ impl<
}
},
LayoutRepr::Boxed(_) => {
pointer_layouts!() => {
// the same as 64-bit integer (for 64-bit targets)
let dst_reg = storage_manager.claim_general_reg(buf, &dst);
ASM::mov_reg64_mem64_offset32(buf, dst_reg, ptr_reg, offset);
@ -3491,8 +3601,6 @@ impl<
dst,
);
}
_ => todo!("unboxing of {:?}", layout_interner.dbg(element_in_layout)),
}
}
@ -3528,7 +3636,7 @@ impl<
let sym_reg = storage_manager.load_to_float_reg(buf, &value);
ASM::movesd_mem64_offset32_freg64(buf, ptr_reg, element_offset, sym_reg);
}
LayoutRepr::Boxed(_) => {
pointer_layouts!() => {
let sym_reg = storage_manager.load_to_general_reg(buf, &value);
ASM::mov_mem64_offset32_reg64(buf, ptr_reg, element_offset, sym_reg);
}
@ -3702,3 +3810,17 @@ macro_rules! single_register_layouts {
single_register_integers!() | single_register_floats!()
};
}
#[macro_export]
macro_rules! pointer_layouts {
() => {
LayoutRepr::Boxed(_)
| LayoutRepr::RecursivePointer(_)
| LayoutRepr::Union(
UnionLayout::Recursive(_)
| UnionLayout::NonNullableUnwrapped(_)
| UnionLayout::NullableWrapped { .. }
| UnionLayout::NullableUnwrapped { .. },
)
};
}

View file

@ -1,7 +1,7 @@
use crate::{
generic64::{Assembler, CallConv, RegTrait},
sign_extended_int_builtins, single_register_floats, single_register_int_builtins,
single_register_integers, single_register_layouts, Env,
pointer_layouts, sign_extended_int_builtins, single_register_floats,
single_register_int_builtins, single_register_integers, single_register_layouts, Env,
};
use bumpalo::collections::Vec;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
@ -91,7 +91,7 @@ pub struct StorageManager<
phantom_cc: PhantomData<CC>,
phantom_asm: PhantomData<ASM>,
pub(crate) env: &'r Env<'a>,
target_info: TargetInfo,
pub(crate) target_info: TargetInfo,
// Data about where each symbol is stored.
symbol_storage_map: MutMap<Symbol, Storage<GeneralReg, FloatReg>>,
@ -598,40 +598,38 @@ impl<
}
}
pub fn load_union_tag_id(
pub fn load_union_tag_id_nonrecursive(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
_buf: &mut Vec<'a, u8>,
sym: &Symbol,
structure: &Symbol,
union_layout: &UnionLayout<'a>,
tags: &[&[InLayout]],
) {
let union_layout = UnionLayout::NonRecursive(tags);
// This must be removed and reinserted for ownership and mutability reasons.
let owned_data = self.remove_allocation_for_sym(structure);
self.allocation_map
.insert(*structure, Rc::clone(&owned_data));
match union_layout {
UnionLayout::NonRecursive(_) => {
let (union_offset, _) = self.stack_offset_and_size(structure);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();
let (union_offset, _) = self.stack_offset_and_size(structure);
let size = discriminant.stack_size();
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
Stack(ReferencedPrimitive {
base_offset: union_offset + id_offset as i32,
size,
sign_extend: false, // tag ids are always unsigned
}),
);
}
x => todo!("getting tag id of union with layout ({:?})", x),
}
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();
let size = discriminant.stack_size();
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
Stack(ReferencedPrimitive {
base_offset: union_offset + id_offset as i32,
size,
sign_extend: false, // tag ids are always unsigned
}),
);
}
// Loads the dst to be the later 64 bits of a list (its length).
@ -700,56 +698,6 @@ impl<
}
}
/// Creates a union on the stack, moving the data in fields into the union and tagging it.
pub fn create_union(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
union_layout: &UnionLayout<'a>,
fields: &'a [Symbol],
tag_id: TagIdIntType,
) {
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let base_offset = self.claim_stack_area(sym, data_size);
let mut current_offset = base_offset;
let it = fields.iter().zip(field_layouts[tag_id as usize].iter());
for (field, field_layout) in it {
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
// put the tag id in the right place
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
ASM::mov_reg64_imm64(buf, reg, tag_id as i64);
let total_id_offset = base_offset as u32 + id_offset;
debug_assert!(total_id_offset % data_alignment == 0);
// pick the right instruction based on the alignment of the tag id
if field_layouts.len() <= u8::MAX as _ {
ASM::mov_base32_reg8(buf, total_id_offset as i32, reg);
} else {
ASM::mov_base32_reg16(buf, total_id_offset as i32, reg);
}
});
}
x => todo!("creating unions with layout: {:?}", x),
}
}
/// Copies a complex symbol on the stack to the arg pointer.
pub fn copy_symbol_to_arg_pointer(
&mut self,
@ -845,12 +793,6 @@ impl<
self.copy_to_stack_offset(buf, size, from_offset, to_offset)
}
},
LayoutRepr::Boxed(_) => {
// like a 64-bit integer
debug_assert_eq!(to_offset % 8, 0);
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg64(buf, to_offset, reg);
}
LayoutRepr::LambdaSet(lambda_set) => {
// like its runtime representation
self.copy_symbol_to_stack_offset(
@ -861,14 +803,18 @@ impl<
&lambda_set.runtime_representation(),
)
}
_ if layout_interner.stack_size(*layout) == 0 => {}
LayoutRepr::Struct { .. } | LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert_eq!(size, layout_interner.stack_size(*layout));
self.copy_to_stack_offset(buf, size, from_offset, to_offset)
}
x => todo!("copying data to the stack with layout, {:?}", x),
LayoutRepr::RecursivePointer(_) | LayoutRepr::Boxed(_) | LayoutRepr::Union(_) => {
// like a 64-bit integer
debug_assert_eq!(to_offset % 8, 0);
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg64(buf, to_offset, reg);
}
}
}
@ -1160,7 +1106,7 @@ impl<
layout: InLayout<'a>,
) {
match layout_interner.get(layout).repr {
single_register_layouts!() => {
single_register_layouts!() | pointer_layouts!() => {
let base_offset = self.claim_stack_size(8);
self.symbol_storage_map.insert(
symbol,
@ -1172,20 +1118,17 @@ impl<
self.allocation_map
.insert(symbol, Rc::new((base_offset, 8)));
}
LayoutRepr::LambdaSet(lambda_set) => self.joinpoint_argument_stack_storage(
layout_interner,
symbol,
lambda_set.runtime_representation(),
),
_ => {
if let LayoutRepr::LambdaSet(lambda_set) = layout_interner.get(layout).repr {
self.joinpoint_argument_stack_storage(
layout_interner,
symbol,
lambda_set.runtime_representation(),
)
let stack_size = layout_interner.stack_size(layout);
if stack_size == 0 {
self.no_data(&symbol);
} else {
let stack_size = layout_interner.stack_size(layout);
if stack_size == 0 {
self.no_data(&symbol);
} else {
self.claim_stack_area(&symbol, stack_size);
}
self.claim_stack_area(&symbol, stack_size);
}
}
}
@ -1228,7 +1171,7 @@ impl<
base_offset: i32,
) {
match layout_interner.get(layout).repr {
single_register_integers!() => {
single_register_integers!() | pointer_layouts!() => {
let reg = self.load_to_general_reg(buf, &symbol);
ASM::mov_base32_reg64(buf, base_offset, reg);
}
@ -1236,27 +1179,21 @@ impl<
let reg = self.load_to_float_reg(buf, &symbol);
ASM::mov_base32_freg64(buf, base_offset, reg);
}
_ => match layout_interner.get(layout).repr {
LayoutRepr::LambdaSet(lambda_set) => {
self.jump_argument_stack_storage(
layout_interner,
buf,
symbol,
lambda_set.runtime_representation(),
base_offset,
);
}
LayoutRepr::Boxed(_) => {
let reg = self.load_to_general_reg(buf, &symbol);
ASM::mov_base32_reg64(buf, base_offset, reg);
}
_ => {
internal_error!(
r"cannot load non-primitive layout ({:?}) to primitive stack location",
layout_interner.dbg(layout)
)
}
},
LayoutRepr::LambdaSet(lambda_set) => {
self.jump_argument_stack_storage(
layout_interner,
buf,
symbol,
lambda_set.runtime_representation(),
base_offset,
);
}
_ => {
internal_error!(
r"cannot load non-primitive layout ({:?}) to primitive stack location",
layout_interner.dbg(layout)
)
}
}
}
@ -1341,6 +1278,22 @@ impl<
base_offset
}
pub fn claim_pointer_stack_area(&mut self, sym: Symbol) -> i32 {
let size = 8;
let base_offset = self.claim_stack_size(size);
self.symbol_storage_map.insert(
sym,
Stack(Primitive {
base_offset,
reg: None,
}),
);
base_offset
}
/// claim_stack_size claims `amount` bytes from the stack alignind to 8.
/// This may be free space in the stack or result in increasing the stack size.
/// It returns base pointer relative offset of the new data.
@ -1541,12 +1494,10 @@ impl<
fn is_primitive(layout_interner: &mut STLayoutInterner<'_>, layout: InLayout<'_>) -> bool {
match layout_interner.get(layout).repr {
single_register_layouts!() => true,
_ => match layout_interner.get(layout).repr {
LayoutRepr::Boxed(_) => true,
LayoutRepr::LambdaSet(lambda_set) => {
is_primitive(layout_interner, lambda_set.runtime_representation())
}
_ => false,
},
pointer_layouts!() => true,
LayoutRepr::LambdaSet(lambda_set) => {
is_primitive(layout_interner, lambda_set.runtime_representation())
}
_ => false,
}
}

View file

@ -1,7 +1,7 @@
use crate::generic64::{storage::StorageManager, Assembler, CallConv, RegTrait};
use crate::{
single_register_floats, single_register_int_builtins, single_register_integers,
single_register_layouts, Relocation,
pointer_layouts, single_register_floats, single_register_int_builtins,
single_register_integers, single_register_layouts, Relocation,
};
use bumpalo::collections::Vec;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
@ -461,6 +461,7 @@ impl X64_64SystemVStoreArgs {
) {
match layout_interner.get(in_layout).repr {
single_register_integers!() => self.store_arg_general(buf, storage_manager, sym),
pointer_layouts!() => self.store_arg_general(buf, storage_manager, sym),
single_register_floats!() => self.store_arg_float(buf, storage_manager, sym),
LayoutRepr::I128 | LayoutRepr::U128 => {
let (offset, _) = storage_manager.stack_offset_and_size(&sym);
@ -507,92 +508,83 @@ impl X64_64SystemVStoreArgs {
}
self.tmp_stack_offset += size as i32;
}
other => {
// look at the layout in more detail
match other {
LayoutRepr::Boxed(_) => {
// treat boxed like a 64-bit integer
self.store_arg_general(buf, storage_manager, sym)
}
LayoutRepr::LambdaSet(lambda_set) => self.store_arg(
LayoutRepr::LambdaSet(lambda_set) => self.store_arg(
buf,
storage_manager,
layout_interner,
sym,
lambda_set.runtime_representation(),
),
LayoutRepr::Struct { .. } => {
// for now, just also store this on the stack
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
debug_assert_eq!(base_offset % 8, 0);
for i in (0..size as i32).step_by(8) {
X86_64Assembler::mov_reg64_base32(
buf,
storage_manager,
layout_interner,
sym,
lambda_set.runtime_representation(),
),
LayoutRepr::Struct { .. } => {
// for now, just also store this on the stack
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
debug_assert_eq!(base_offset % 8, 0);
for i in (0..size as i32).step_by(8) {
X86_64Assembler::mov_reg64_base32(
buf,
Self::GENERAL_RETURN_REGS[0],
base_offset + i,
);
X86_64Assembler::mov_stack32_reg64(
buf,
self.tmp_stack_offset + i,
Self::GENERAL_RETURN_REGS[0],
);
}
self.tmp_stack_offset += size as i32;
}
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
type ASM = X86_64Assembler;
Self::GENERAL_RETURN_REGS[0],
base_offset + i,
);
X86_64Assembler::mov_stack32_reg64(
buf,
self.tmp_stack_offset + i,
Self::GENERAL_RETURN_REGS[0],
);
}
self.tmp_stack_offset += size as i32;
}
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
type ASM = X86_64Assembler;
let tmp_reg = Self::GENERAL_RETURN_REGS[0];
let stack_offset = self.tmp_stack_offset;
let tmp_reg = Self::GENERAL_RETURN_REGS[0];
let stack_offset = self.tmp_stack_offset;
let mut copied = 0;
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
let mut copied = 0;
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
if size - copied >= 8 {
for _ in (0..(size - copied)).step_by(8) {
ASM::mov_reg64_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg64(buf, stack_offset + copied as i32, tmp_reg);
if size - copied >= 8 {
for _ in (0..(size - copied)).step_by(8) {
ASM::mov_reg64_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg64(buf, stack_offset + copied as i32, tmp_reg);
copied += 8;
}
}
if size - copied >= 4 {
for _ in (0..(size - copied)).step_by(4) {
ASM::mov_reg32_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg32(buf, stack_offset + copied as i32, tmp_reg);
copied += 4;
}
}
if size - copied >= 2 {
for _ in (0..(size - copied)).step_by(2) {
ASM::mov_reg16_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg16(buf, stack_offset + copied as i32, tmp_reg);
copied += 2;
}
}
if size - copied >= 1 {
for _ in (0..(size - copied)).step_by(1) {
ASM::mov_reg8_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg8(buf, stack_offset + copied as i32, tmp_reg);
copied += 1;
}
}
self.tmp_stack_offset += size as i32;
}
_ => {
todo!(
"calling with arg type, {:?}",
layout_interner.dbg(in_layout)
);
copied += 8;
}
}
if size - copied >= 4 {
for _ in (0..(size - copied)).step_by(4) {
ASM::mov_reg32_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg32(buf, stack_offset + copied as i32, tmp_reg);
copied += 4;
}
}
if size - copied >= 2 {
for _ in (0..(size - copied)).step_by(2) {
ASM::mov_reg16_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg16(buf, stack_offset + copied as i32, tmp_reg);
copied += 2;
}
}
if size - copied >= 1 {
for _ in (0..(size - copied)).step_by(1) {
ASM::mov_reg8_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg8(buf, stack_offset + copied as i32, tmp_reg);
copied += 1;
}
}
self.tmp_stack_offset += size as i32;
}
_ => {
todo!(
"calling with arg type, {:?}",
layout_interner.dbg(in_layout)
);
}
}
}
@ -664,6 +656,7 @@ impl X64_64SystemVLoadArgs {
let stack_size = layout_interner.stack_size(in_layout);
match layout_interner.get(in_layout).repr {
single_register_integers!() => self.load_arg_general(storage_manager, sym),
pointer_layouts!() => self.load_arg_general(storage_manager, sym),
single_register_floats!() => self.load_arg_float(storage_manager, sym),
_ if stack_size == 0 => {
storage_manager.no_data(&sym);
@ -673,38 +666,32 @@ impl X64_64SystemVLoadArgs {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
other => match other {
LayoutRepr::Boxed(_) => {
// boxed layouts are pointers, which we treat as 64-bit integers
self.load_arg_general(storage_manager, sym)
}
LayoutRepr::LambdaSet(lambda_set) => self.load_arg(
storage_manager,
layout_interner,
sym,
lambda_set.runtime_representation(),
),
LayoutRepr::Struct { .. } => {
// for now, just also store this on the stack
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
_ => {
todo!(
"Loading args with layout {:?}",
layout_interner.dbg(in_layout)
);
}
},
LayoutRepr::LambdaSet(lambda_set) => self.load_arg(
storage_manager,
layout_interner,
sym,
lambda_set.runtime_representation(),
),
LayoutRepr::Struct { .. } => {
// for now, just also store this on the stack
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
LayoutRepr::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
_ => {
todo!(
"Loading args with layout {:?}",
layout_interner.dbg(in_layout)
);
}
}
}
@ -1756,7 +1743,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
#[inline(always)]
fn neq_reg64_reg64_reg64(
fn neq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,