Push interned layouts as mut throughout the backend, and intern box layouts

This commit is contained in:
Ayaz Hafiz 2022-12-28 18:51:26 -06:00
parent dd6a72fc46
commit 7ab7fdfa7b
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
26 changed files with 769 additions and 375 deletions

View file

@ -4,7 +4,7 @@ use bumpalo::collections::Vec;
use packed_struct::prelude::*;
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::Layout;
use roc_mono::layout::{Layout, STLayoutInterner};
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
@ -305,15 +305,17 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_args: &'a [(Layout<'a>, Symbol)],
_ret_layout: &Layout<'a>,
) {
@ -321,15 +323,17 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_dst: &Symbol,
_args: &[Symbol],
_arg_layouts: &[Layout<'a>],
@ -338,30 +342,34 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
todo!("Storing args for AArch64");
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
todo!("Returning complex symbols for AArch64");
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
@ -443,9 +451,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register signed multiplication for AArch64");
}
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
@ -456,9 +464,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register unsigned multiplication for AArch64");
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
@ -469,9 +477,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register signed division for AArch64");
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,

View file

@ -6,12 +6,12 @@ use bumpalo::collections::Vec;
use roc_builtins::bitcode::{self, FloatWidth, IntWidth};
use roc_collections::all::MutMap;
use roc_error_macros::internal_error;
use roc_module::symbol::{Interns, Symbol};
use roc_module::symbol::{Interns, ModuleId, Symbol};
use roc_mono::code_gen_help::CodeGenHelp;
use roc_mono::ir::{
BranchInfo, JoinPointId, ListLiteralElement, Literal, Param, ProcLayout, SelfRecursive, Stmt,
};
use roc_mono::layout::{Builtin, Layout, TagIdIntType, UnionLayout};
use roc_mono::layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout};
use roc_target::TargetInfo;
use std::marker::PhantomData;
@ -69,9 +69,10 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
);
/// load_args updates the storage manager to know where every arg is stored.
fn load_args<'a>(
fn load_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
// ret_layout is needed because if it is a complex type, we pass a pointer as the first arg.
ret_layout: &Layout<'a>,
@ -79,9 +80,10 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
/// store_args stores the args in registers and on the stack for function calling.
/// It also updates the amount of temporary stack space needed in the storage manager.
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
@ -91,18 +93,20 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
/// return_complex_symbol returns the specified complex/non-primative symbol.
/// It uses the layout to determine how the data should be returned.
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
);
/// load_returned_complex_symbol loads a complex symbol that was returned from a function call.
/// It uses the layout to determine how the data should be loaded into the symbol.
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
);
@ -261,9 +265,9 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src1: GeneralReg,
src2: GeneralReg,
);
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -271,18 +275,18 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -354,6 +358,7 @@ pub trait RegTrait:
pub struct Backend64Bit<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
@ -364,8 +369,9 @@ pub struct Backend64Bit<
phantom_asm: PhantomData<ASM>,
phantom_cc: PhantomData<CC>,
target_info: TargetInfo,
env: &'a Env<'a>,
interns: &'a mut Interns,
env: &'r Env<'a>,
layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
helper_proc_gen: CodeGenHelp<'a>,
helper_proc_symbols: Vec<'a, (Symbol, ProcLayout<'a>)>,
buf: Vec<'a, u8>,
@ -380,33 +386,31 @@ pub struct Backend64Bit<
literal_map: MutMap<Symbol, (*const Literal<'a>, *const Layout<'a>)>,
join_map: MutMap<JoinPointId, Vec<'a, (u64, u64)>>,
storage_manager: StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
}
/// new creates a new backend that will output to the specific Object.
pub fn new_backend_64bit<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
>(
env: &'a Env,
env: &'r Env<'a>,
target_info: TargetInfo,
interns: &'a mut Interns,
) -> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC> {
interns: &'r mut Interns,
layout_interner: &'r mut STLayoutInterner<'a>,
) -> Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC> {
Backend64Bit {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
target_info,
env,
interns,
helper_proc_gen: CodeGenHelp::new(
env.arena,
env.layout_interner,
target_info,
env.module_id,
),
layout_interner,
helper_proc_gen: CodeGenHelp::new(env.arena, target_info, env.module_id),
helper_proc_symbols: bumpalo::vec![in env.arena],
proc_name: None,
is_self_recursive: None,
@ -436,11 +440,12 @@ macro_rules! quadword_and_smaller {
impl<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> Backend<'a> for Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
> Backend<'a> for Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
fn env(&self) -> &Env<'a> {
self.env
@ -448,8 +453,20 @@ impl<
fn interns(&self) -> &Interns {
self.interns
}
fn env_interns_helpers_mut(&mut self) -> (&Env<'a>, &mut Interns, &mut CodeGenHelp<'a>) {
(self.env, self.interns, &mut self.helper_proc_gen)
fn module_interns_helpers_mut(
&mut self,
) -> (
ModuleId,
&mut STLayoutInterner<'a>,
&mut Interns,
&mut CodeGenHelp<'a>,
) {
(
self.env.module_id,
self.layout_interner,
self.interns,
&mut self.helper_proc_gen,
)
}
fn helper_proc_gen_mut(&mut self) -> &mut CodeGenHelp<'a> {
&mut self.helper_proc_gen
@ -587,7 +604,13 @@ impl<
}
fn load_args(&mut self, args: &'a [(Layout<'a>, Symbol)], ret_layout: &Layout<'a>) {
CC::load_args(&mut self.buf, &mut self.storage_manager, args, ret_layout);
CC::load_args(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
args,
ret_layout,
);
}
/// Used for generating wrappers for malloc/realloc/free
@ -619,6 +642,7 @@ impl<
CC::store_args(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
args,
arg_layouts,
@ -642,6 +666,7 @@ impl<
CC::load_returned_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
ret_layout,
);
@ -732,7 +757,7 @@ impl<
// Ensure all the joinpoint parameters have storage locations.
// On jumps to the joinpoint, we will overwrite those locations as a way to "pass parameters" to the joinpoint.
self.storage_manager
.setup_joinpoint(&mut self.buf, id, parameters);
.setup_joinpoint(self.layout_interner, &mut self.buf, id, parameters);
self.join_map.insert(*id, bumpalo::vec![in self.env.arena]);
@ -764,7 +789,7 @@ impl<
_ret_layout: &Layout<'a>,
) {
self.storage_manager
.setup_jump(&mut self.buf, id, args, arg_layouts);
.setup_jump(self.layout_interner, &mut self.buf, id, args, arg_layouts);
let jmp_location = self.buf.len();
let start_offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678);
@ -832,7 +857,7 @@ impl<
let buf = &mut self.buf;
let struct_size = return_layout.stack_size(self.env.layout_interner, self.target_info);
let struct_size = return_layout.stack_size(self.layout_interner, self.target_info);
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size);
@ -1170,7 +1195,7 @@ impl<
.storage_manager
.load_to_general_reg(&mut self.buf, index);
let ret_stack_size =
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info());
// TODO: This can be optimized with smarter instructions.
// Also can probably be moved into storage manager at least partly.
self.storage_manager.with_tmp_general_reg(
@ -1212,8 +1237,8 @@ impl<
let elem_layout = arg_layouts[2];
let u32_layout = &Layout::Builtin(Builtin::Int(IntWidth::U32));
let list_alignment = list_layout
.alignment_bytes(self.env.layout_interner, self.storage_manager.target_info());
let list_alignment =
list_layout.alignment_bytes(self.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP,
u32_layout,
@ -1233,7 +1258,7 @@ impl<
// Load the elements size.
let elem_stack_size =
elem_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP3,
u64_layout,
@ -1243,7 +1268,7 @@ impl<
// Setup the return location.
let base_offset = self.storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info()),
ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info()),
);
let ret_fields = if let Layout::Struct { field_layouts, .. } = ret_layout {
@ -1262,7 +1287,7 @@ impl<
(
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
.stack_size(self.layout_interner, self.storage_manager.target_info())
as i32,
base_offset,
)
@ -1271,7 +1296,7 @@ impl<
base_offset,
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
.stack_size(self.layout_interner, self.storage_manager.target_info())
as i32,
)
};
@ -1315,6 +1340,7 @@ impl<
// Copy from list to the output record.
self.storage_manager.copy_symbol_to_stack_offset(
self.layout_interner,
&mut self.buf,
out_list_offset,
&Symbol::DEV_TMP5,
@ -1354,14 +1380,13 @@ impl<
let allocation_alignment = std::cmp::max(
8,
elem_layout.allocation_alignment_bytes(
self.env.layout_interner,
self.layout_interner,
self.storage_manager.target_info(),
) as u64,
);
let elem_size = elem_layout
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
as u64;
let elem_size =
elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info()) as u64;
let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */;
let u64_layout = Layout::Builtin(Builtin::Int(IntWidth::U64));
self.load_literal(
@ -1465,8 +1490,13 @@ impl<
}
fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]) {
self.storage_manager
.create_struct(&mut self.buf, sym, layout, fields);
self.storage_manager.create_struct(
self.layout_interner,
&mut self.buf,
sym,
layout,
fields,
);
}
fn load_struct_at_index(
@ -1476,8 +1506,13 @@ impl<
index: u64,
field_layouts: &'a [Layout<'a>],
) {
self.storage_manager
.load_field_at_index(sym, structure, index, field_layouts);
self.storage_manager.load_field_at_index(
self.layout_interner,
sym,
structure,
index,
field_layouts,
);
}
fn load_union_at_index(
@ -1491,6 +1526,7 @@ impl<
match union_layout {
UnionLayout::NonRecursive(tag_layouts) | UnionLayout::Recursive(tag_layouts) => {
self.storage_manager.load_field_at_index(
self.layout_interner,
sym,
structure,
index,
@ -1502,8 +1538,13 @@ impl<
}
fn get_tag_id(&mut self, sym: &Symbol, structure: &Symbol, union_layout: &UnionLayout<'a>) {
self.storage_manager
.load_union_tag_id(&mut self.buf, sym, structure, union_layout);
self.storage_manager.load_union_tag_id(
self.layout_interner,
&mut self.buf,
sym,
structure,
union_layout,
);
}
fn tag(
@ -1513,8 +1554,14 @@ impl<
union_layout: &UnionLayout<'a>,
tag_id: TagIdIntType,
) {
self.storage_manager
.create_union(&mut self.buf, sym, union_layout, fields, tag_id)
self.storage_manager.create_union(
self.layout_interner,
&mut self.buf,
sym,
union_layout,
fields,
tag_id,
)
}
fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>) {
@ -1611,7 +1658,13 @@ impl<
}
}
} else {
CC::return_complex_symbol(&mut self.buf, &mut self.storage_manager, sym, layout)
CC::return_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
sym,
layout,
)
}
let inst_loc = self.buf.len() as u64;
let offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678) as u64;
@ -1687,11 +1740,12 @@ impl<
/// For example, loading a symbol for doing a computation.
impl<
'a,
'r,
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
> Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
/// Updates a jump instruction to a new offset and returns the number of bytes written.
fn update_jmp_imm32_offset(

View file

@ -10,7 +10,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::{
ir::{JoinPointId, Param},
layout::{Builtin, Layout, TagIdIntType, UnionLayout},
layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout},
};
use roc_target::TargetInfo;
use std::cmp::max;
@ -79,6 +79,7 @@ enum Storage<GeneralReg: RegTrait, FloatReg: RegTrait> {
#[derive(Clone)]
pub struct StorageManager<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
@ -86,7 +87,7 @@ pub struct StorageManager<
> {
phantom_cc: PhantomData<CC>,
phantom_asm: PhantomData<ASM>,
pub(crate) env: &'a Env<'a>,
pub(crate) env: &'r Env<'a>,
target_info: TargetInfo,
// Data about where each symbol is stored.
symbol_storage_map: MutMap<Symbol, Storage<GeneralReg, FloatReg>>,
@ -127,14 +128,15 @@ pub struct StorageManager<
pub fn new_storage_manager<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
>(
env: &'a Env,
env: &'r Env<'a>,
target_info: TargetInfo,
) -> StorageManager<'a, GeneralReg, FloatReg, ASM, CC> {
) -> StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC> {
StorageManager {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
@ -157,11 +159,12 @@ pub fn new_storage_manager<
impl<
'a,
'r,
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> StorageManager<'a, GeneralReg, FloatReg, ASM, CC>
> StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
pub fn reset(&mut self) {
self.symbol_storage_map.clear();
@ -526,6 +529,7 @@ impl<
/// This is lazy by default. It will not copy anything around.
pub fn load_field_at_index(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
structure: &Symbol,
index: u64,
@ -541,12 +545,12 @@ impl<
let (base_offset, size) = (*base_offset, *size);
let mut data_offset = base_offset;
for layout in field_layouts.iter().take(index as usize) {
let field_size = layout.stack_size(self.env.layout_interner, self.target_info);
let field_size = layout.stack_size(layout_interner, self.target_info);
data_offset += field_size as i32;
}
debug_assert!(data_offset < base_offset + size as i32);
let layout = field_layouts[index as usize];
let size = layout.stack_size(self.env.layout_interner, self.target_info);
let size = layout.stack_size(layout_interner, self.target_info);
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
@ -578,6 +582,7 @@ impl<
pub fn load_union_tag_id(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
_buf: &mut Vec<'a, u8>,
sym: &Symbol,
structure: &Symbol,
@ -591,8 +596,8 @@ impl<
UnionLayout::NonRecursive(_) => {
let (union_offset, _) = self.stack_offset_and_size(structure);
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();
@ -630,12 +635,13 @@ impl<
/// Creates a struct on the stack, moving the data in fields into the struct.
pub fn create_struct(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
layout: &Layout<'a>,
fields: &'a [Symbol],
) {
let struct_size = layout.stack_size(self.env.layout_interner, self.target_info);
let struct_size = layout.stack_size(layout_interner, self.target_info);
if struct_size == 0 {
self.symbol_storage_map.insert(*sym, NoData);
return;
@ -645,21 +651,27 @@ impl<
if let Layout::Struct { field_layouts, .. } = layout {
let mut current_offset = base_offset;
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
current_offset += field_size as i32;
}
} else {
// This is a single element struct. Just copy the single field to the stack.
debug_assert_eq!(fields.len(), 1);
self.copy_symbol_to_stack_offset(buf, base_offset, &fields[0], layout);
self.copy_symbol_to_stack_offset(layout_interner, buf, base_offset, &fields[0], layout);
}
}
/// Creates a union on the stack, moving the data in fields into the union and tagging it.
pub fn create_union(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
union_layout: &UnionLayout<'a>,
@ -668,8 +680,8 @@ impl<
) {
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
if data_alignment < 8 || data_alignment % 8 != 0 {
todo!("small/unaligned tagging");
@ -679,9 +691,14 @@ impl<
for (field, field_layout) in
fields.iter().zip(field_layouts[tag_id as usize].iter())
{
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
current_offset += field_size as i32;
}
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
@ -719,6 +736,7 @@ impl<
/// Always interact with the stack using aligned 64bit movement.
pub fn copy_symbol_to_stack_offset(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
to_offset: i32,
sym: &Symbol,
@ -735,19 +753,16 @@ impl<
let reg = self.load_to_float_reg(buf, sym);
ASM::mov_base32_freg64(buf, to_offset, reg);
}
_ if layout.stack_size(self.env.layout_interner, self.target_info) == 0 => {}
_ if layout.stack_size(layout_interner, self.target_info) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout.stack_size(self.env.layout_interner, self.target_info) > 8 => {
_ if layout.stack_size(layout_interner, self.target_info) > 8 => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0);
debug_assert_eq!(
size,
layout.stack_size(self.env.layout_interner, self.target_info)
);
debug_assert_eq!(size, layout.stack_size(layout_interner, self.target_info));
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
for i in (0..size as i32).step_by(8) {
ASM::mov_reg64_base32(buf, reg, from_offset + i);
@ -988,6 +1003,7 @@ impl<
/// Later jumps to the join point can overwrite the stored locations to pass parameters.
pub fn setup_joinpoint(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
_buf: &mut Vec<'a, u8>,
id: &JoinPointId,
params: &'a [Param<'a>],
@ -1021,7 +1037,7 @@ impl<
.insert(*symbol, Rc::new((base_offset, 8)));
}
_ => {
let stack_size = layout.stack_size(self.env.layout_interner, self.target_info);
let stack_size = layout.stack_size(layout_interner, self.target_info);
if stack_size == 0 {
self.symbol_storage_map.insert(*symbol, NoData);
} else {
@ -1038,6 +1054,7 @@ impl<
/// This enables the jump to correctly passe arguments to the joinpoint.
pub fn setup_jump(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
id: &JoinPointId,
args: &[Symbol],
@ -1065,7 +1082,13 @@ impl<
// Maybe we want a more memcpy like method to directly get called here.
// That would also be capable of asserting the size.
// Maybe copy stack to stack or something.
self.copy_symbol_to_stack_offset(buf, *base_offset, sym, layout);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
*base_offset,
sym,
layout,
);
}
Stack(Primitive {
base_offset,

View file

@ -251,27 +251,29 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut general_i = 0;
let mut float_i = 0;
if X86_64SystemV::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if X86_64SystemV::returns_via_arg_pointer(layout_interner, ret_layout) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[0]);
general_i += 1;
}
for (layout, sym) in args.iter() {
let stack_size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
let stack_size = layout.stack_size(layout_interner, TARGET_INFO);
match layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
@ -307,15 +309,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
@ -324,12 +328,10 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
let mut general_i = 0;
let mut float_i = 0;
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return.
let base_offset = storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
let base_offset = storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
// Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1;
@ -388,8 +390,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
tmp_stack_offset += 8;
}
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) > 16 => {
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) > 16 => {
// TODO: Double check this.
// Just copy onto the stack.
// Use return reg as buffer because it will be empty right now.
@ -417,15 +419,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
storage_manager.update_fn_call_stack_size(tmp_stack_offset as u32);
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
) {
@ -433,8 +437,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
debug_assert_eq!(base_offset % 8, 0);
if size <= 8 {
@ -473,15 +477,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
) {
@ -489,9 +495,9 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
let size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let size = layout.stack_size(layout_interner, TARGET_INFO);
let offset = storage_manager.claim_stack_area(sym, size);
if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
@ -658,24 +664,23 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut i = 0;
if X86_64WindowsFastcall::returns_via_arg_pointer(
storage_manager.env.layout_interner,
ret_layout,
) {
if X86_64WindowsFastcall::returns_via_arg_pointer(layout_interner, ret_layout) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[i]);
i += 1;
}
@ -690,7 +695,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
i += 1;
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("Loading args with layout {:?}", x);
}
@ -710,27 +715,27 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the arg we will return.
storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
todo!("claim first parama reg for the address");
}
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
@ -779,7 +784,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
tmp_stack_offset += 8;
}
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("calling with arg type, {:?}", x);
}
@ -788,30 +793,34 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.update_fn_call_stack_size(tmp_stack_offset as u32);
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
todo!("Returning complex symbols for X86_64");
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
@ -1029,9 +1038,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
imul_reg64_reg64(buf, dst, src2);
}
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
@ -1113,9 +1122,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
@ -1133,9 +1142,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RAX);
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,

View file

@ -28,7 +28,6 @@ mod run_roc;
pub struct Env<'a> {
pub arena: &'a Bump,
pub layout_interner: &'a STLayoutInterner<'a>,
pub module_id: ModuleId,
pub exposed_to_host: MutSet<Symbol>,
pub lazy_literals: bool,
@ -68,7 +67,14 @@ trait Backend<'a> {
// This method is suboptimal, but it seems to be the only way to make rust understand
// that all of these values can be mutable at the same time. By returning them together,
// rust understands that they are part of a single use of mutable self.
fn env_interns_helpers_mut(&mut self) -> (&Env<'a>, &mut Interns, &mut CodeGenHelp<'a>);
fn module_interns_helpers_mut(
&mut self,
) -> (
ModuleId,
&mut STLayoutInterner<'a>,
&mut Interns,
&mut CodeGenHelp<'a>,
);
fn symbol_to_string(&self, symbol: Symbol, layout_id: LayoutId) -> String {
layout_id.to_symbol_string(symbol, self.interns())
@ -155,11 +161,17 @@ trait Backend<'a> {
// If this layout requires a new RC proc, we get enough info to create a linker symbol
// for it. Here we don't create linker symbols at this time, but in Wasm backend, we do.
let (rc_stmt, new_specializations) = {
let (env, interns, rc_proc_gen) = self.env_interns_helpers_mut();
let module_id = env.module_id;
let (module_id, layout_interner, interns, rc_proc_gen) =
self.module_interns_helpers_mut();
let ident_ids = interns.all_ident_ids.get_mut(&module_id).unwrap();
rc_proc_gen.expand_refcount_stmt(ident_ids, layout, modify, following)
rc_proc_gen.expand_refcount_stmt(
ident_ids,
layout_interner,
layout,
modify,
following,
)
};
for spec in new_specializations.into_iter() {

View file

@ -12,7 +12,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol;
use roc_module::symbol::Interns;
use roc_mono::ir::{Proc, ProcLayout};
use roc_mono::layout::LayoutIds;
use roc_mono::layout::{LayoutIds, STLayoutInterner};
use roc_target::TargetInfo;
use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Triple};
@ -22,9 +22,10 @@ use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Tripl
/// build_module is the high level builder/delegator.
/// It takes the request to build a module and output the object file for the module.
pub fn build_module<'a>(
env: &'a Env,
interns: &'a mut Interns,
pub fn build_module<'a, 'r>(
env: &'r Env<'a>,
interns: &'r mut Interns,
layout_interner: &'r mut STLayoutInterner<'a>,
target: &Triple,
procedures: MutMap<(symbol::Symbol, ProcLayout<'a>), Proc<'a>>,
) -> Object<'a> {
@ -39,7 +40,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
>(env, TargetInfo::default_x86_64(), interns);
>(env, TargetInfo::default_x86_64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -56,7 +57,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
>(env, TargetInfo::default_x86_64(), interns);
>(env, TargetInfo::default_x86_64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -72,12 +73,13 @@ pub fn build_module<'a>(
binary_format: TargetBF::Elf,
..
} if cfg!(feature = "target-aarch64") => {
let backend = new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns);
let backend =
new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -89,12 +91,13 @@ pub fn build_module<'a>(
binary_format: TargetBF::Macho,
..
} if cfg!(feature = "target-aarch64") => {
let backend = new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns);
let backend =
new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -245,11 +248,11 @@ fn build_object<'a, B: Backend<'a>>(
let helper_procs = {
let module_id = backend.env().module_id;
let (env, interns, helper_proc_gen) = backend.env_interns_helpers_mut();
let (module_id, _interner, interns, helper_proc_gen) = backend.module_interns_helpers_mut();
let ident_ids = interns.all_ident_ids.get_mut(&module_id).unwrap();
let helper_procs = helper_proc_gen.take_procs();
env.module_id.register_debug_idents(ident_ids);
module_id.register_debug_idents(ident_ids);
helper_procs
};