mirror of
https://github.com/roc-lang/roc.git
synced 2025-07-24 06:55:15 +00:00
Push layout interner further through Layout
This commit is contained in:
parent
ed04c2040a
commit
3b4b1838b8
34 changed files with 1279 additions and 634 deletions
|
@ -380,7 +380,12 @@ pub fn new_backend_64bit<
|
|||
target_info,
|
||||
env,
|
||||
interns,
|
||||
helper_proc_gen: CodeGenHelp::new(env.arena, target_info, env.module_id),
|
||||
helper_proc_gen: CodeGenHelp::new(
|
||||
env.arena,
|
||||
env.layout_interner,
|
||||
target_info,
|
||||
env.module_id,
|
||||
),
|
||||
helper_proc_symbols: bumpalo::vec![in env.arena],
|
||||
proc_name: None,
|
||||
is_self_recursive: None,
|
||||
|
@ -806,7 +811,7 @@ impl<
|
|||
|
||||
let buf = &mut self.buf;
|
||||
|
||||
let struct_size = return_layout.stack_size(self.target_info);
|
||||
let struct_size = return_layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
|
||||
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size);
|
||||
|
||||
|
@ -1143,7 +1148,8 @@ impl<
|
|||
let index_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, index);
|
||||
let ret_stack_size = ret_layout.stack_size(self.storage_manager.target_info());
|
||||
let ret_stack_size =
|
||||
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
|
||||
// TODO: This can be optimized with smarter instructions.
|
||||
// Also can probably be moved into storage manager at least partly.
|
||||
self.storage_manager.with_tmp_general_reg(
|
||||
|
@ -1185,7 +1191,8 @@ impl<
|
|||
let elem_layout = arg_layouts[2];
|
||||
|
||||
let u32_layout = &Layout::Builtin(Builtin::Int(IntWidth::U32));
|
||||
let list_alignment = list_layout.alignment_bytes(self.storage_manager.target_info());
|
||||
let list_alignment = list_layout
|
||||
.alignment_bytes(self.env.layout_interner, self.storage_manager.target_info());
|
||||
self.load_literal(
|
||||
&Symbol::DEV_TMP,
|
||||
u32_layout,
|
||||
|
@ -1204,7 +1211,8 @@ impl<
|
|||
ASM::add_reg64_reg64_imm32(&mut self.buf, reg, CC::BASE_PTR_REG, new_elem_offset);
|
||||
|
||||
// Load the elements size.
|
||||
let elem_stack_size = elem_layout.stack_size(self.storage_manager.target_info());
|
||||
let elem_stack_size =
|
||||
elem_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
|
||||
self.load_literal(
|
||||
&Symbol::DEV_TMP3,
|
||||
u64_layout,
|
||||
|
@ -1214,7 +1222,7 @@ impl<
|
|||
// Setup the return location.
|
||||
let base_offset = self.storage_manager.claim_stack_area(
|
||||
dst,
|
||||
ret_layout.stack_size(self.storage_manager.target_info()),
|
||||
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info()),
|
||||
);
|
||||
|
||||
let ret_fields = if let Layout::Struct { field_layouts, .. } = ret_layout {
|
||||
|
@ -1231,13 +1239,19 @@ impl<
|
|||
|
||||
let (out_list_offset, out_elem_offset) = if ret_fields[0] == elem_layout {
|
||||
(
|
||||
base_offset + ret_fields[0].stack_size(self.storage_manager.target_info()) as i32,
|
||||
base_offset
|
||||
+ ret_fields[0]
|
||||
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
|
||||
as i32,
|
||||
base_offset,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
base_offset,
|
||||
base_offset + ret_fields[0].stack_size(self.storage_manager.target_info()) as i32,
|
||||
base_offset
|
||||
+ ret_fields[0]
|
||||
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
|
||||
as i32,
|
||||
)
|
||||
};
|
||||
|
||||
|
@ -1318,10 +1332,15 @@ impl<
|
|||
// This requires at least 8 for the refcount alignment.
|
||||
let allocation_alignment = std::cmp::max(
|
||||
8,
|
||||
elem_layout.allocation_alignment_bytes(self.storage_manager.target_info()) as u64,
|
||||
elem_layout.allocation_alignment_bytes(
|
||||
self.env.layout_interner,
|
||||
self.storage_manager.target_info(),
|
||||
) as u64,
|
||||
);
|
||||
|
||||
let elem_size = elem_layout.stack_size(self.storage_manager.target_info()) as u64;
|
||||
let elem_size = elem_layout
|
||||
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
|
||||
as u64;
|
||||
let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */;
|
||||
let u64_layout = Layout::Builtin(Builtin::Int(IntWidth::U64));
|
||||
self.load_literal(
|
||||
|
|
|
@ -86,7 +86,7 @@ pub struct StorageManager<
|
|||
> {
|
||||
phantom_cc: PhantomData<CC>,
|
||||
phantom_asm: PhantomData<ASM>,
|
||||
env: &'a Env<'a>,
|
||||
pub(crate) env: &'a Env<'a>,
|
||||
target_info: TargetInfo,
|
||||
// Data about where each symbol is stored.
|
||||
symbol_storage_map: MutMap<Symbol, Storage<GeneralReg, FloatReg>>,
|
||||
|
@ -541,12 +541,12 @@ impl<
|
|||
let (base_offset, size) = (*base_offset, *size);
|
||||
let mut data_offset = base_offset;
|
||||
for layout in field_layouts.iter().take(index as usize) {
|
||||
let field_size = layout.stack_size(self.target_info);
|
||||
let field_size = layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
data_offset += field_size as i32;
|
||||
}
|
||||
debug_assert!(data_offset < base_offset + size as i32);
|
||||
let layout = field_layouts[index as usize];
|
||||
let size = layout.stack_size(self.target_info);
|
||||
let size = layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
self.allocation_map.insert(*sym, owned_data);
|
||||
self.symbol_storage_map.insert(
|
||||
*sym,
|
||||
|
@ -591,8 +591,8 @@ impl<
|
|||
UnionLayout::NonRecursive(_) => {
|
||||
let (union_offset, _) = self.stack_offset_and_size(structure);
|
||||
|
||||
let (data_size, data_alignment) =
|
||||
union_layout.data_size_and_alignment(self.target_info);
|
||||
let (data_size, data_alignment) = union_layout
|
||||
.data_size_and_alignment(self.env.layout_interner, self.target_info);
|
||||
let id_offset = data_size - data_alignment;
|
||||
let discriminant = union_layout.discriminant();
|
||||
|
||||
|
@ -635,7 +635,7 @@ impl<
|
|||
layout: &Layout<'a>,
|
||||
fields: &'a [Symbol],
|
||||
) {
|
||||
let struct_size = layout.stack_size(self.target_info);
|
||||
let struct_size = layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
if struct_size == 0 {
|
||||
self.symbol_storage_map.insert(*sym, NoData);
|
||||
return;
|
||||
|
@ -646,7 +646,8 @@ impl<
|
|||
let mut current_offset = base_offset;
|
||||
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
|
||||
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
|
||||
let field_size = field_layout.stack_size(self.target_info);
|
||||
let field_size =
|
||||
field_layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
current_offset += field_size as i32;
|
||||
}
|
||||
} else {
|
||||
|
@ -667,8 +668,8 @@ impl<
|
|||
) {
|
||||
match union_layout {
|
||||
UnionLayout::NonRecursive(field_layouts) => {
|
||||
let (data_size, data_alignment) =
|
||||
union_layout.data_size_and_alignment(self.target_info);
|
||||
let (data_size, data_alignment) = union_layout
|
||||
.data_size_and_alignment(self.env.layout_interner, self.target_info);
|
||||
let id_offset = data_size - data_alignment;
|
||||
if data_alignment < 8 || data_alignment % 8 != 0 {
|
||||
todo!("small/unaligned tagging");
|
||||
|
@ -679,7 +680,8 @@ impl<
|
|||
fields.iter().zip(field_layouts[tag_id as usize].iter())
|
||||
{
|
||||
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
|
||||
let field_size = field_layout.stack_size(self.target_info);
|
||||
let field_size =
|
||||
field_layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
current_offset += field_size as i32;
|
||||
}
|
||||
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
|
||||
|
@ -733,16 +735,19 @@ impl<
|
|||
let reg = self.load_to_float_reg(buf, sym);
|
||||
ASM::mov_base32_freg64(buf, to_offset, reg);
|
||||
}
|
||||
_ if layout.stack_size(self.target_info) == 0 => {}
|
||||
_ if layout.stack_size(self.env.layout_interner, self.target_info) == 0 => {}
|
||||
// TODO: Verify this is always true.
|
||||
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
|
||||
// It is just temporarily storing the value due to needing to free registers.
|
||||
// Later, it will be reloaded and stored in refcounted as needed.
|
||||
_ if layout.stack_size(self.target_info) > 8 => {
|
||||
_ if layout.stack_size(self.env.layout_interner, self.target_info) > 8 => {
|
||||
let (from_offset, size) = self.stack_offset_and_size(sym);
|
||||
debug_assert!(from_offset % 8 == 0);
|
||||
debug_assert!(size % 8 == 0);
|
||||
debug_assert_eq!(size, layout.stack_size(self.target_info));
|
||||
debug_assert_eq!(
|
||||
size,
|
||||
layout.stack_size(self.env.layout_interner, self.target_info)
|
||||
);
|
||||
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
|
||||
for i in (0..size as i32).step_by(8) {
|
||||
ASM::mov_reg64_base32(buf, reg, from_offset + i);
|
||||
|
@ -1016,7 +1021,7 @@ impl<
|
|||
.insert(*symbol, Rc::new((base_offset, 8)));
|
||||
}
|
||||
_ => {
|
||||
let stack_size = layout.stack_size(self.target_info);
|
||||
let stack_size = layout.stack_size(self.env.layout_interner, self.target_info);
|
||||
if stack_size == 0 {
|
||||
self.symbol_storage_map.insert(*symbol, NoData);
|
||||
} else {
|
||||
|
|
|
@ -7,7 +7,7 @@ use bumpalo::collections::Vec;
|
|||
use roc_builtins::bitcode::{FloatWidth, IntWidth};
|
||||
use roc_error_macros::internal_error;
|
||||
use roc_module::symbol::Symbol;
|
||||
use roc_mono::layout::{Builtin, Layout};
|
||||
use roc_mono::layout::{Builtin, Layout, STLayoutInterner};
|
||||
use roc_target::TargetInfo;
|
||||
|
||||
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
|
||||
|
@ -266,12 +266,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
|
||||
let mut general_i = 0;
|
||||
let mut float_i = 0;
|
||||
if X86_64SystemV::returns_via_arg_pointer(ret_layout) {
|
||||
if X86_64SystemV::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
|
||||
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[0]);
|
||||
general_i += 1;
|
||||
}
|
||||
for (layout, sym) in args.iter() {
|
||||
let stack_size = layout.stack_size(TARGET_INFO);
|
||||
let stack_size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
|
||||
match layout {
|
||||
single_register_integers!() => {
|
||||
if general_i < Self::GENERAL_PARAM_REGS.len() {
|
||||
|
@ -324,10 +324,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
|
||||
let mut general_i = 0;
|
||||
let mut float_i = 0;
|
||||
if Self::returns_via_arg_pointer(ret_layout) {
|
||||
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
|
||||
// Save space on the stack for the result we will be return.
|
||||
let base_offset =
|
||||
storage_manager.claim_stack_area(dst, ret_layout.stack_size(TARGET_INFO));
|
||||
let base_offset = storage_manager.claim_stack_area(
|
||||
dst,
|
||||
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
|
||||
);
|
||||
// Set the first reg to the address base + offset.
|
||||
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
|
||||
general_i += 1;
|
||||
|
@ -386,8 +388,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
tmp_stack_offset += 8;
|
||||
}
|
||||
}
|
||||
x if x.stack_size(TARGET_INFO) == 0 => {}
|
||||
x if x.stack_size(TARGET_INFO) > 16 => {
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) > 16 => {
|
||||
// TODO: Double check this.
|
||||
// Just copy onto the stack.
|
||||
// Use return reg as buffer because it will be empty right now.
|
||||
|
@ -431,8 +433,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
single_register_layouts!() => {
|
||||
internal_error!("single register layouts are not complex symbols");
|
||||
}
|
||||
x if x.stack_size(TARGET_INFO) == 0 => {}
|
||||
x if !Self::returns_via_arg_pointer(x) => {
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
|
||||
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
|
||||
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
|
||||
debug_assert_eq!(base_offset % 8, 0);
|
||||
if size <= 8 {
|
||||
|
@ -487,9 +489,9 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
single_register_layouts!() => {
|
||||
internal_error!("single register layouts are not complex symbols");
|
||||
}
|
||||
x if x.stack_size(TARGET_INFO) == 0 => {}
|
||||
x if !Self::returns_via_arg_pointer(x) => {
|
||||
let size = layout.stack_size(TARGET_INFO);
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
|
||||
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
|
||||
let size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
|
||||
let offset = storage_manager.claim_stack_area(sym, size);
|
||||
if size <= 8 {
|
||||
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
|
||||
|
@ -516,10 +518,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
|
|||
}
|
||||
|
||||
impl X86_64SystemV {
|
||||
fn returns_via_arg_pointer(ret_layout: &Layout) -> bool {
|
||||
fn returns_via_arg_pointer<'a>(
|
||||
interner: &STLayoutInterner<'a>,
|
||||
ret_layout: &Layout<'a>,
|
||||
) -> bool {
|
||||
// TODO: This will need to be more complex/extended to fully support the calling convention.
|
||||
// details here: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
|
||||
ret_layout.stack_size(TARGET_INFO) > 16
|
||||
ret_layout.stack_size(interner, TARGET_INFO) > 16
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,7 +672,10 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
|
|||
) {
|
||||
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
|
||||
let mut i = 0;
|
||||
if X86_64WindowsFastcall::returns_via_arg_pointer(ret_layout) {
|
||||
if X86_64WindowsFastcall::returns_via_arg_pointer(
|
||||
storage_manager.env.layout_interner,
|
||||
ret_layout,
|
||||
) {
|
||||
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[i]);
|
||||
i += 1;
|
||||
}
|
||||
|
@ -682,7 +690,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
|
|||
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
|
||||
i += 1;
|
||||
}
|
||||
x if x.stack_size(TARGET_INFO) == 0 => {}
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
|
||||
x => {
|
||||
todo!("Loading args with layout {:?}", x);
|
||||
}
|
||||
|
@ -717,9 +725,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
|
|||
ret_layout: &Layout<'a>,
|
||||
) {
|
||||
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
|
||||
if Self::returns_via_arg_pointer(ret_layout) {
|
||||
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
|
||||
// Save space on the stack for the arg we will return.
|
||||
storage_manager.claim_stack_area(dst, ret_layout.stack_size(TARGET_INFO));
|
||||
storage_manager.claim_stack_area(
|
||||
dst,
|
||||
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
|
||||
);
|
||||
todo!("claim first parama reg for the address");
|
||||
}
|
||||
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
|
||||
|
@ -768,7 +779,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
|
|||
tmp_stack_offset += 8;
|
||||
}
|
||||
}
|
||||
x if x.stack_size(TARGET_INFO) == 0 => {}
|
||||
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
|
||||
x => {
|
||||
todo!("calling with arg type, {:?}", x);
|
||||
}
|
||||
|
@ -809,10 +820,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
|
|||
}
|
||||
|
||||
impl X86_64WindowsFastcall {
|
||||
fn returns_via_arg_pointer(ret_layout: &Layout) -> bool {
|
||||
fn returns_via_arg_pointer<'a>(
|
||||
interner: &STLayoutInterner<'a>,
|
||||
ret_layout: &Layout<'a>,
|
||||
) -> bool {
|
||||
// TODO: This is not fully correct there are some exceptions for "vector" types.
|
||||
// details here: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-160#return-values
|
||||
ret_layout.stack_size(TARGET_INFO) > 8
|
||||
ret_layout.stack_size(interner, TARGET_INFO) > 8
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,9 @@ use roc_mono::ir::{
|
|||
BranchInfo, CallType, Expr, JoinPointId, ListLiteralElement, Literal, Param, Proc, ProcLayout,
|
||||
SelfRecursive, Stmt,
|
||||
};
|
||||
use roc_mono::layout::{Builtin, Layout, LayoutId, LayoutIds, TagIdIntType, UnionLayout};
|
||||
use roc_mono::layout::{
|
||||
Builtin, Layout, LayoutId, LayoutIds, STLayoutInterner, TagIdIntType, UnionLayout,
|
||||
};
|
||||
|
||||
mod generic64;
|
||||
mod object_builder;
|
||||
|
@ -23,6 +25,7 @@ mod run_roc;
|
|||
|
||||
pub struct Env<'a> {
|
||||
pub arena: &'a Bump,
|
||||
pub layout_interner: &'a STLayoutInterner<'a>,
|
||||
pub module_id: ModuleId,
|
||||
pub exposed_to_host: MutSet<Symbol>,
|
||||
pub lazy_literals: bool,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue