Push interned layouts as mut throughout the backend, and intern box layouts

This commit is contained in:
Ayaz Hafiz 2022-12-28 18:51:26 -06:00
parent dd6a72fc46
commit 7ab7fdfa7b
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
26 changed files with 769 additions and 375 deletions

2
Cargo.lock generated
View file

@ -3273,6 +3273,7 @@ dependencies = [
"morphic_lib",
"roc_collections",
"roc_debug_flags",
"roc_intern",
"roc_module",
"roc_mono",
]
@ -3646,6 +3647,7 @@ dependencies = [
"roc_collections",
"roc_debug_flags",
"roc_error_macros",
"roc_intern",
"roc_module",
"roc_mono",
"roc_region",

View file

@ -8,6 +8,7 @@ version = "0.0.1"
[dependencies]
morphic_lib = {path = "../../vendor/morphic_lib"}
roc_collections = {path = "../collections"}
roc_intern = {path = "../intern"}
roc_module = {path = "../module"}
roc_mono = {path = "../mono"}
roc_debug_flags = {path = "../debug_flags"}

View file

@ -6,6 +6,7 @@ use morphic_lib::{
TypeDefBuilder, TypeId, TypeName, UpdateModeVar, ValueId,
};
use roc_collections::all::{MutMap, MutSet};
use roc_intern::Interner;
use roc_module::low_level::LowLevel;
use roc_module::symbol::Symbol;
@ -1730,6 +1731,7 @@ fn layout_spec_help<'a>(
}
Boxed(inner_layout) => {
let inner_layout = interner.get(*inner_layout);
let inner_type =
layout_spec_help(env, builder, interner, inner_layout, when_recursive)?;
let cell_type = builder.add_heap_cell_type();

View file

@ -470,7 +470,7 @@ fn gen_from_mono_module_dev_wasm32<'a>(
module_id,
procedures,
mut interns,
layout_interner,
mut layout_interner,
..
} = loaded;
@ -483,7 +483,6 @@ fn gen_from_mono_module_dev_wasm32<'a>(
let env = roc_gen_wasm::Env {
arena,
layout_interner: &layout_interner,
module_id,
exposed_to_host,
stack_bytes: wasm_dev_stack_bytes.unwrap_or(roc_gen_wasm::Env::DEFAULT_STACK_BYTES),
@ -505,8 +504,13 @@ fn gen_from_mono_module_dev_wasm32<'a>(
)
});
let final_binary_bytes =
roc_gen_wasm::build_app_binary(&env, &mut interns, host_module, procedures);
let final_binary_bytes = roc_gen_wasm::build_app_binary(
&env,
&mut layout_interner,
&mut interns,
host_module,
procedures,
);
let code_gen = code_gen_start.elapsed();
@ -536,20 +540,20 @@ fn gen_from_mono_module_dev_assembly<'a>(
procedures,
mut interns,
exposed_to_host,
layout_interner,
mut layout_interner,
..
} = loaded;
let env = roc_gen_dev::Env {
arena,
layout_interner: &layout_interner,
module_id,
exposed_to_host: exposed_to_host.values.keys().copied().collect(),
lazy_literals,
generate_allocators,
};
let module_object = roc_gen_dev::build_module(&env, &mut interns, target, procedures);
let module_object =
roc_gen_dev::build_module(&env, &mut interns, &mut layout_interner, target, procedures);
let code_gen = code_gen_start.elapsed();

View file

@ -4,7 +4,7 @@ use bumpalo::collections::Vec;
use packed_struct::prelude::*;
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::Layout;
use roc_mono::layout::{Layout, STLayoutInterner};
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
@ -305,15 +305,17 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_args: &'a [(Layout<'a>, Symbol)],
_ret_layout: &Layout<'a>,
) {
@ -321,15 +323,17 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_dst: &Symbol,
_args: &[Symbol],
_arg_layouts: &[Layout<'a>],
@ -338,30 +342,34 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
todo!("Storing args for AArch64");
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
todo!("Returning complex symbols for AArch64");
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
AArch64GeneralReg,
AArch64FloatReg,
AArch64Assembler,
AArch64Call,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
@ -443,9 +451,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register signed multiplication for AArch64");
}
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
@ -456,9 +464,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register unsigned multiplication for AArch64");
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
@ -469,9 +477,9 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register signed division for AArch64");
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,

View file

@ -6,12 +6,12 @@ use bumpalo::collections::Vec;
use roc_builtins::bitcode::{self, FloatWidth, IntWidth};
use roc_collections::all::MutMap;
use roc_error_macros::internal_error;
use roc_module::symbol::{Interns, Symbol};
use roc_module::symbol::{Interns, ModuleId, Symbol};
use roc_mono::code_gen_help::CodeGenHelp;
use roc_mono::ir::{
BranchInfo, JoinPointId, ListLiteralElement, Literal, Param, ProcLayout, SelfRecursive, Stmt,
};
use roc_mono::layout::{Builtin, Layout, TagIdIntType, UnionLayout};
use roc_mono::layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout};
use roc_target::TargetInfo;
use std::marker::PhantomData;
@ -69,9 +69,10 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
);
/// load_args updates the storage manager to know where every arg is stored.
fn load_args<'a>(
fn load_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
// ret_layout is needed because if it is a complex type, we pass a pointer as the first arg.
ret_layout: &Layout<'a>,
@ -79,9 +80,10 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
/// store_args stores the args in registers and on the stack for function calling.
/// It also updates the amount of temporary stack space needed in the storage manager.
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
@ -91,18 +93,20 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<Gene
/// return_complex_symbol returns the specified complex/non-primative symbol.
/// It uses the layout to determine how the data should be returned.
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
);
/// load_returned_complex_symbol loads a complex symbol that was returned from a function call.
/// It uses the layout to determine how the data should be loaded into the symbol.
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, Self>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
);
@ -261,9 +265,9 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src1: GeneralReg,
src2: GeneralReg,
);
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -271,18 +275,18 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -354,6 +358,7 @@ pub trait RegTrait:
pub struct Backend64Bit<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
@ -364,8 +369,9 @@ pub struct Backend64Bit<
phantom_asm: PhantomData<ASM>,
phantom_cc: PhantomData<CC>,
target_info: TargetInfo,
env: &'a Env<'a>,
interns: &'a mut Interns,
env: &'r Env<'a>,
layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
helper_proc_gen: CodeGenHelp<'a>,
helper_proc_symbols: Vec<'a, (Symbol, ProcLayout<'a>)>,
buf: Vec<'a, u8>,
@ -380,33 +386,31 @@ pub struct Backend64Bit<
literal_map: MutMap<Symbol, (*const Literal<'a>, *const Layout<'a>)>,
join_map: MutMap<JoinPointId, Vec<'a, (u64, u64)>>,
storage_manager: StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
storage_manager: StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
}
/// new creates a new backend that will output to the specific Object.
pub fn new_backend_64bit<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
>(
env: &'a Env,
env: &'r Env<'a>,
target_info: TargetInfo,
interns: &'a mut Interns,
) -> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC> {
interns: &'r mut Interns,
layout_interner: &'r mut STLayoutInterner<'a>,
) -> Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC> {
Backend64Bit {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
target_info,
env,
interns,
helper_proc_gen: CodeGenHelp::new(
env.arena,
env.layout_interner,
target_info,
env.module_id,
),
layout_interner,
helper_proc_gen: CodeGenHelp::new(env.arena, target_info, env.module_id),
helper_proc_symbols: bumpalo::vec![in env.arena],
proc_name: None,
is_self_recursive: None,
@ -436,11 +440,12 @@ macro_rules! quadword_and_smaller {
impl<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> Backend<'a> for Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
> Backend<'a> for Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
fn env(&self) -> &Env<'a> {
self.env
@ -448,8 +453,20 @@ impl<
fn interns(&self) -> &Interns {
self.interns
}
fn env_interns_helpers_mut(&mut self) -> (&Env<'a>, &mut Interns, &mut CodeGenHelp<'a>) {
(self.env, self.interns, &mut self.helper_proc_gen)
fn module_interns_helpers_mut(
&mut self,
) -> (
ModuleId,
&mut STLayoutInterner<'a>,
&mut Interns,
&mut CodeGenHelp<'a>,
) {
(
self.env.module_id,
self.layout_interner,
self.interns,
&mut self.helper_proc_gen,
)
}
fn helper_proc_gen_mut(&mut self) -> &mut CodeGenHelp<'a> {
&mut self.helper_proc_gen
@ -587,7 +604,13 @@ impl<
}
fn load_args(&mut self, args: &'a [(Layout<'a>, Symbol)], ret_layout: &Layout<'a>) {
CC::load_args(&mut self.buf, &mut self.storage_manager, args, ret_layout);
CC::load_args(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
args,
ret_layout,
);
}
/// Used for generating wrappers for malloc/realloc/free
@ -619,6 +642,7 @@ impl<
CC::store_args(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
args,
arg_layouts,
@ -642,6 +666,7 @@ impl<
CC::load_returned_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
dst,
ret_layout,
);
@ -732,7 +757,7 @@ impl<
// Ensure all the joinpoint parameters have storage locations.
// On jumps to the joinpoint, we will overwrite those locations as a way to "pass parameters" to the joinpoint.
self.storage_manager
.setup_joinpoint(&mut self.buf, id, parameters);
.setup_joinpoint(self.layout_interner, &mut self.buf, id, parameters);
self.join_map.insert(*id, bumpalo::vec![in self.env.arena]);
@ -764,7 +789,7 @@ impl<
_ret_layout: &Layout<'a>,
) {
self.storage_manager
.setup_jump(&mut self.buf, id, args, arg_layouts);
.setup_jump(self.layout_interner, &mut self.buf, id, args, arg_layouts);
let jmp_location = self.buf.len();
let start_offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678);
@ -832,7 +857,7 @@ impl<
let buf = &mut self.buf;
let struct_size = return_layout.stack_size(self.env.layout_interner, self.target_info);
let struct_size = return_layout.stack_size(self.layout_interner, self.target_info);
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size);
@ -1170,7 +1195,7 @@ impl<
.storage_manager
.load_to_general_reg(&mut self.buf, index);
let ret_stack_size =
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info());
// TODO: This can be optimized with smarter instructions.
// Also can probably be moved into storage manager at least partly.
self.storage_manager.with_tmp_general_reg(
@ -1212,8 +1237,8 @@ impl<
let elem_layout = arg_layouts[2];
let u32_layout = &Layout::Builtin(Builtin::Int(IntWidth::U32));
let list_alignment = list_layout
.alignment_bytes(self.env.layout_interner, self.storage_manager.target_info());
let list_alignment =
list_layout.alignment_bytes(self.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP,
u32_layout,
@ -1233,7 +1258,7 @@ impl<
// Load the elements size.
let elem_stack_size =
elem_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP3,
u64_layout,
@ -1243,7 +1268,7 @@ impl<
// Setup the return location.
let base_offset = self.storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info()),
ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info()),
);
let ret_fields = if let Layout::Struct { field_layouts, .. } = ret_layout {
@ -1262,7 +1287,7 @@ impl<
(
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
.stack_size(self.layout_interner, self.storage_manager.target_info())
as i32,
base_offset,
)
@ -1271,7 +1296,7 @@ impl<
base_offset,
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
.stack_size(self.layout_interner, self.storage_manager.target_info())
as i32,
)
};
@ -1315,6 +1340,7 @@ impl<
// Copy from list to the output record.
self.storage_manager.copy_symbol_to_stack_offset(
self.layout_interner,
&mut self.buf,
out_list_offset,
&Symbol::DEV_TMP5,
@ -1354,14 +1380,13 @@ impl<
let allocation_alignment = std::cmp::max(
8,
elem_layout.allocation_alignment_bytes(
self.env.layout_interner,
self.layout_interner,
self.storage_manager.target_info(),
) as u64,
);
let elem_size = elem_layout
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
as u64;
let elem_size =
elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info()) as u64;
let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */;
let u64_layout = Layout::Builtin(Builtin::Int(IntWidth::U64));
self.load_literal(
@ -1465,8 +1490,13 @@ impl<
}
fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]) {
self.storage_manager
.create_struct(&mut self.buf, sym, layout, fields);
self.storage_manager.create_struct(
self.layout_interner,
&mut self.buf,
sym,
layout,
fields,
);
}
fn load_struct_at_index(
@ -1476,8 +1506,13 @@ impl<
index: u64,
field_layouts: &'a [Layout<'a>],
) {
self.storage_manager
.load_field_at_index(sym, structure, index, field_layouts);
self.storage_manager.load_field_at_index(
self.layout_interner,
sym,
structure,
index,
field_layouts,
);
}
fn load_union_at_index(
@ -1491,6 +1526,7 @@ impl<
match union_layout {
UnionLayout::NonRecursive(tag_layouts) | UnionLayout::Recursive(tag_layouts) => {
self.storage_manager.load_field_at_index(
self.layout_interner,
sym,
structure,
index,
@ -1502,8 +1538,13 @@ impl<
}
fn get_tag_id(&mut self, sym: &Symbol, structure: &Symbol, union_layout: &UnionLayout<'a>) {
self.storage_manager
.load_union_tag_id(&mut self.buf, sym, structure, union_layout);
self.storage_manager.load_union_tag_id(
self.layout_interner,
&mut self.buf,
sym,
structure,
union_layout,
);
}
fn tag(
@ -1513,8 +1554,14 @@ impl<
union_layout: &UnionLayout<'a>,
tag_id: TagIdIntType,
) {
self.storage_manager
.create_union(&mut self.buf, sym, union_layout, fields, tag_id)
self.storage_manager.create_union(
self.layout_interner,
&mut self.buf,
sym,
union_layout,
fields,
tag_id,
)
}
fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>) {
@ -1611,7 +1658,13 @@ impl<
}
}
} else {
CC::return_complex_symbol(&mut self.buf, &mut self.storage_manager, sym, layout)
CC::return_complex_symbol(
&mut self.buf,
&mut self.storage_manager,
self.layout_interner,
sym,
layout,
)
}
let inst_loc = self.buf.len() as u64;
let offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678) as u64;
@ -1687,11 +1740,12 @@ impl<
/// For example, loading a symbol for doing a computation.
impl<
'a,
'r,
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
> Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
/// Updates a jump instruction to a new offset and returns the number of bytes written.
fn update_jmp_imm32_offset(

View file

@ -10,7 +10,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::{
ir::{JoinPointId, Param},
layout::{Builtin, Layout, TagIdIntType, UnionLayout},
layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout},
};
use roc_target::TargetInfo;
use std::cmp::max;
@ -79,6 +79,7 @@ enum Storage<GeneralReg: RegTrait, FloatReg: RegTrait> {
#[derive(Clone)]
pub struct StorageManager<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
@ -86,7 +87,7 @@ pub struct StorageManager<
> {
phantom_cc: PhantomData<CC>,
phantom_asm: PhantomData<ASM>,
pub(crate) env: &'a Env<'a>,
pub(crate) env: &'r Env<'a>,
target_info: TargetInfo,
// Data about where each symbol is stored.
symbol_storage_map: MutMap<Symbol, Storage<GeneralReg, FloatReg>>,
@ -127,14 +128,15 @@ pub struct StorageManager<
pub fn new_storage_manager<
'a,
'r,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
>(
env: &'a Env,
env: &'r Env<'a>,
target_info: TargetInfo,
) -> StorageManager<'a, GeneralReg, FloatReg, ASM, CC> {
) -> StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC> {
StorageManager {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
@ -157,11 +159,12 @@ pub fn new_storage_manager<
impl<
'a,
'r,
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>,
> StorageManager<'a, GeneralReg, FloatReg, ASM, CC>
> StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{
pub fn reset(&mut self) {
self.symbol_storage_map.clear();
@ -526,6 +529,7 @@ impl<
/// This is lazy by default. It will not copy anything around.
pub fn load_field_at_index(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
structure: &Symbol,
index: u64,
@ -541,12 +545,12 @@ impl<
let (base_offset, size) = (*base_offset, *size);
let mut data_offset = base_offset;
for layout in field_layouts.iter().take(index as usize) {
let field_size = layout.stack_size(self.env.layout_interner, self.target_info);
let field_size = layout.stack_size(layout_interner, self.target_info);
data_offset += field_size as i32;
}
debug_assert!(data_offset < base_offset + size as i32);
let layout = field_layouts[index as usize];
let size = layout.stack_size(self.env.layout_interner, self.target_info);
let size = layout.stack_size(layout_interner, self.target_info);
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
@ -578,6 +582,7 @@ impl<
pub fn load_union_tag_id(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
_buf: &mut Vec<'a, u8>,
sym: &Symbol,
structure: &Symbol,
@ -591,8 +596,8 @@ impl<
UnionLayout::NonRecursive(_) => {
let (union_offset, _) = self.stack_offset_and_size(structure);
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();
@ -630,12 +635,13 @@ impl<
/// Creates a struct on the stack, moving the data in fields into the struct.
pub fn create_struct(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
layout: &Layout<'a>,
fields: &'a [Symbol],
) {
let struct_size = layout.stack_size(self.env.layout_interner, self.target_info);
let struct_size = layout.stack_size(layout_interner, self.target_info);
if struct_size == 0 {
self.symbol_storage_map.insert(*sym, NoData);
return;
@ -645,21 +651,27 @@ impl<
if let Layout::Struct { field_layouts, .. } = layout {
let mut current_offset = base_offset;
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
current_offset += field_size as i32;
}
} else {
// This is a single element struct. Just copy the single field to the stack.
debug_assert_eq!(fields.len(), 1);
self.copy_symbol_to_stack_offset(buf, base_offset, &fields[0], layout);
self.copy_symbol_to_stack_offset(layout_interner, buf, base_offset, &fields[0], layout);
}
}
/// Creates a union on the stack, moving the data in fields into the union and tagging it.
pub fn create_union(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
union_layout: &UnionLayout<'a>,
@ -668,8 +680,8 @@ impl<
) {
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
if data_alignment < 8 || data_alignment % 8 != 0 {
todo!("small/unaligned tagging");
@ -679,9 +691,14 @@ impl<
for (field, field_layout) in
fields.iter().zip(field_layouts[tag_id as usize].iter())
{
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
current_offset,
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
current_offset += field_size as i32;
}
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
@ -719,6 +736,7 @@ impl<
/// Always interact with the stack using aligned 64bit movement.
pub fn copy_symbol_to_stack_offset(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
to_offset: i32,
sym: &Symbol,
@ -735,19 +753,16 @@ impl<
let reg = self.load_to_float_reg(buf, sym);
ASM::mov_base32_freg64(buf, to_offset, reg);
}
_ if layout.stack_size(self.env.layout_interner, self.target_info) == 0 => {}
_ if layout.stack_size(layout_interner, self.target_info) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout.stack_size(self.env.layout_interner, self.target_info) > 8 => {
_ if layout.stack_size(layout_interner, self.target_info) > 8 => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0);
debug_assert_eq!(
size,
layout.stack_size(self.env.layout_interner, self.target_info)
);
debug_assert_eq!(size, layout.stack_size(layout_interner, self.target_info));
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
for i in (0..size as i32).step_by(8) {
ASM::mov_reg64_base32(buf, reg, from_offset + i);
@ -988,6 +1003,7 @@ impl<
/// Later jumps to the join point can overwrite the stored locations to pass parameters.
pub fn setup_joinpoint(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
_buf: &mut Vec<'a, u8>,
id: &JoinPointId,
params: &'a [Param<'a>],
@ -1021,7 +1037,7 @@ impl<
.insert(*symbol, Rc::new((base_offset, 8)));
}
_ => {
let stack_size = layout.stack_size(self.env.layout_interner, self.target_info);
let stack_size = layout.stack_size(layout_interner, self.target_info);
if stack_size == 0 {
self.symbol_storage_map.insert(*symbol, NoData);
} else {
@ -1038,6 +1054,7 @@ impl<
/// This enables the jump to correctly passe arguments to the joinpoint.
pub fn setup_jump(
&mut self,
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
id: &JoinPointId,
args: &[Symbol],
@ -1065,7 +1082,13 @@ impl<
// Maybe we want a more memcpy like method to directly get called here.
// That would also be capable of asserting the size.
// Maybe copy stack to stack or something.
self.copy_symbol_to_stack_offset(buf, *base_offset, sym, layout);
self.copy_symbol_to_stack_offset(
layout_interner,
buf,
*base_offset,
sym,
layout,
);
}
Stack(Primitive {
base_offset,

View file

@ -251,27 +251,29 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut general_i = 0;
let mut float_i = 0;
if X86_64SystemV::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if X86_64SystemV::returns_via_arg_pointer(layout_interner, ret_layout) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[0]);
general_i += 1;
}
for (layout, sym) in args.iter() {
let stack_size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
let stack_size = layout.stack_size(layout_interner, TARGET_INFO);
match layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
@ -307,15 +309,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
@ -324,12 +328,10 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
let mut general_i = 0;
let mut float_i = 0;
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return.
let base_offset = storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
let base_offset = storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
// Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1;
@ -388,8 +390,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
tmp_stack_offset += 8;
}
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) > 16 => {
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) > 16 => {
// TODO: Double check this.
// Just copy onto the stack.
// Use return reg as buffer because it will be empty right now.
@ -417,15 +419,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
storage_manager.update_fn_call_stack_size(tmp_stack_offset as u32);
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
) {
@ -433,8 +437,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
debug_assert_eq!(base_offset % 8, 0);
if size <= 8 {
@ -473,15 +477,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
) {
@ -489,9 +495,9 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
let size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let size = layout.stack_size(layout_interner, TARGET_INFO);
let offset = storage_manager.claim_stack_area(sym, size);
if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
@ -658,24 +664,23 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
#[inline(always)]
fn load_args<'a>(
fn load_args<'a, 'r>(
_buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut i = 0;
if X86_64WindowsFastcall::returns_via_arg_pointer(
storage_manager.env.layout_interner,
ret_layout,
) {
if X86_64WindowsFastcall::returns_via_arg_pointer(layout_interner, ret_layout) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[i]);
i += 1;
}
@ -690,7 +695,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
i += 1;
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("Loading args with layout {:?}", x);
}
@ -710,27 +715,27 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
#[inline(always)]
fn store_args<'a>(
fn store_args<'a, 'r>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the arg we will return.
storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
todo!("claim first parama reg for the address");
}
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
@ -779,7 +784,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
tmp_stack_offset += 8;
}
}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("calling with arg type, {:?}", x);
}
@ -788,30 +793,34 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.update_fn_call_stack_size(tmp_stack_offset as u32);
}
fn return_complex_symbol<'a>(
fn return_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
todo!("Returning complex symbols for X86_64");
}
fn load_returned_complex_symbol<'a>(
fn load_returned_complex_symbol<'a, 'r>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<
'a,
'r,
X86_64GeneralReg,
X86_64FloatReg,
X86_64Assembler,
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
) {
@ -1029,9 +1038,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
imul_reg64_reg64(buf, dst, src2);
}
fn umul_reg64_reg64_reg64<'a, ASM, CC>(
fn umul_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
@ -1113,9 +1122,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
fn idiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
@ -1133,9 +1142,9 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RAX);
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
fn udiv_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,

View file

@ -28,7 +28,6 @@ mod run_roc;
pub struct Env<'a> {
pub arena: &'a Bump,
pub layout_interner: &'a STLayoutInterner<'a>,
pub module_id: ModuleId,
pub exposed_to_host: MutSet<Symbol>,
pub lazy_literals: bool,
@ -68,7 +67,14 @@ trait Backend<'a> {
// This method is suboptimal, but it seems to be the only way to make rust understand
// that all of these values can be mutable at the same time. By returning them together,
// rust understands that they are part of a single use of mutable self.
fn env_interns_helpers_mut(&mut self) -> (&Env<'a>, &mut Interns, &mut CodeGenHelp<'a>);
fn module_interns_helpers_mut(
&mut self,
) -> (
ModuleId,
&mut STLayoutInterner<'a>,
&mut Interns,
&mut CodeGenHelp<'a>,
);
fn symbol_to_string(&self, symbol: Symbol, layout_id: LayoutId) -> String {
layout_id.to_symbol_string(symbol, self.interns())
@ -155,11 +161,17 @@ trait Backend<'a> {
// If this layout requires a new RC proc, we get enough info to create a linker symbol
// for it. Here we don't create linker symbols at this time, but in Wasm backend, we do.
let (rc_stmt, new_specializations) = {
let (env, interns, rc_proc_gen) = self.env_interns_helpers_mut();
let module_id = env.module_id;
let (module_id, layout_interner, interns, rc_proc_gen) =
self.module_interns_helpers_mut();
let ident_ids = interns.all_ident_ids.get_mut(&module_id).unwrap();
rc_proc_gen.expand_refcount_stmt(ident_ids, layout, modify, following)
rc_proc_gen.expand_refcount_stmt(
ident_ids,
layout_interner,
layout,
modify,
following,
)
};
for spec in new_specializations.into_iter() {

View file

@ -12,7 +12,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol;
use roc_module::symbol::Interns;
use roc_mono::ir::{Proc, ProcLayout};
use roc_mono::layout::LayoutIds;
use roc_mono::layout::{LayoutIds, STLayoutInterner};
use roc_target::TargetInfo;
use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Triple};
@ -22,9 +22,10 @@ use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Tripl
/// build_module is the high level builder/delegator.
/// It takes the request to build a module and output the object file for the module.
pub fn build_module<'a>(
env: &'a Env,
interns: &'a mut Interns,
pub fn build_module<'a, 'r>(
env: &'r Env<'a>,
interns: &'r mut Interns,
layout_interner: &'r mut STLayoutInterner<'a>,
target: &Triple,
procedures: MutMap<(symbol::Symbol, ProcLayout<'a>), Proc<'a>>,
) -> Object<'a> {
@ -39,7 +40,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
>(env, TargetInfo::default_x86_64(), interns);
>(env, TargetInfo::default_x86_64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -56,7 +57,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
>(env, TargetInfo::default_x86_64(), interns);
>(env, TargetInfo::default_x86_64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -72,12 +73,13 @@ pub fn build_module<'a>(
binary_format: TargetBF::Elf,
..
} if cfg!(feature = "target-aarch64") => {
let backend = new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns);
let backend =
new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -89,12 +91,13 @@ pub fn build_module<'a>(
binary_format: TargetBF::Macho,
..
} if cfg!(feature = "target-aarch64") => {
let backend = new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns);
let backend =
new_backend_64bit::<
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
>(env, TargetInfo::default_aarch64(), interns, layout_interner);
build_object(
procedures,
backend,
@ -245,11 +248,11 @@ fn build_object<'a, B: Backend<'a>>(
let helper_procs = {
let module_id = backend.env().module_id;
let (env, interns, helper_proc_gen) = backend.env_interns_helpers_mut();
let (module_id, _interner, interns, helper_proc_gen) = backend.module_interns_helpers_mut();
let ident_ids = interns.all_ident_ids.get_mut(&module_id).unwrap();
let helper_procs = helper_proc_gen.take_procs();
env.module_id.register_debug_idents(ident_ids);
module_id.register_debug_idents(ident_ids);
helper_procs
};

View file

@ -9,6 +9,7 @@ edition = "2021"
[dependencies]
roc_alias_analysis = { path = "../alias_analysis" }
roc_collections = { path = "../collections" }
roc_intern = { path = "../intern" }
roc_module = { path = "../module" }
roc_builtins = { path = "../builtins" }
roc_error_macros = { path = "../../error_macros" }

View file

@ -12,8 +12,9 @@ use inkwell::values::{
use inkwell::{AddressSpace, FloatPredicate, IntPredicate};
use roc_builtins::bitcode;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_intern::Interner;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, LayoutIds, UnionLayout};
use roc_mono::layout::{Builtin, InLayout, Layout, LayoutIds, UnionLayout};
use super::build::{load_roc_value, use_roc_value, BuilderExt};
use super::convert::argument_type_from_union_layout;
@ -182,7 +183,7 @@ fn build_eq<'a, 'ctx, 'env>(
layout_ids,
when_recursive,
lhs_layout,
inner_layout,
*inner_layout,
lhs_val,
rhs_val,
),
@ -371,7 +372,7 @@ fn build_neq<'a, 'ctx, 'env>(
layout_ids,
when_recursive,
lhs_layout,
inner_layout,
*inner_layout,
lhs_val,
rhs_val,
)
@ -1283,7 +1284,7 @@ fn build_box_eq<'a, 'ctx, 'env>(
layout_ids: &mut LayoutIds<'a>,
when_recursive: WhenRecursive<'a>,
box_layout: &Layout<'a>,
inner_layout: &Layout<'a>,
inner_layout: InLayout<'a>,
tag1: BasicValueEnum<'ctx>,
tag2: BasicValueEnum<'ctx>,
) -> BasicValueEnum<'ctx> {
@ -1336,7 +1337,7 @@ fn build_box_eq_help<'a, 'ctx, 'env>(
layout_ids: &mut LayoutIds<'a>,
when_recursive: WhenRecursive<'a>,
parent: FunctionValue<'ctx>,
inner_layout: &Layout<'a>,
inner_layout: InLayout<'a>,
) {
let ctx = env.context;
let builder = env.builder;
@ -1400,6 +1401,8 @@ fn build_box_eq_help<'a, 'ctx, 'env>(
let box1 = box1.into_pointer_value();
let box2 = box2.into_pointer_value();
let inner_layout = env.layout_interner.get(inner_layout);
let value1 = load_roc_value(env, *inner_layout, box1, "load_box1");
let value2 = load_roc_value(env, *inner_layout, box2, "load_box2");

View file

@ -5,6 +5,7 @@ use inkwell::types::{BasicType, BasicTypeEnum, FloatType, IntType, StructType};
use inkwell::values::StructValue;
use inkwell::AddressSpace;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_intern::Interner;
use roc_mono::layout::{round_up_to_alignment, Builtin, Layout, STLayoutInterner, UnionLayout};
use roc_target::TargetInfo;
@ -38,6 +39,7 @@ pub fn basic_type_from_layout<'a, 'ctx, 'env>(
basic_type_from_layout(env, &lambda_set.runtime_representation(env.layout_interner))
}
Boxed(inner_layout) => {
let inner_layout = env.layout_interner.get(*inner_layout);
let inner_type = basic_type_from_layout(env, inner_layout);
inner_type.ptr_type(AddressSpace::Generic).into()

View file

@ -9,6 +9,7 @@ use inkwell::types::{BasicMetadataTypeEnum, BasicType, BasicTypeEnum};
use inkwell::values::{BasicValueEnum, FunctionValue, IntValue, PointerValue};
use inkwell::AddressSpace;
use roc_builtins::bitcode;
use roc_intern::Interner;
use roc_module::symbol::Symbol;
use roc_mono::ir::LookupType;
use roc_mono::layout::{Builtin, Layout, LayoutIds, UnionLayout};
@ -348,6 +349,7 @@ fn build_clone<'a, 'ctx, 'env>(
build_copy(env, ptr, cursors.offset, cursors.extra_offset.into());
let source = value.into_pointer_value();
let inner_layout = env.layout_interner.get(inner_layout);
let value = load_roc_value(env, *inner_layout, source, "inner");
let inner_width = env.ptr_int().const_int(

View file

@ -17,7 +17,7 @@ use inkwell::values::{
use inkwell::{AddressSpace, IntPredicate};
use roc_module::symbol::Interns;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, LayoutIds, STLayoutInterner, UnionLayout};
use roc_mono::layout::{Builtin, InLayout, Layout, LayoutIds, STLayoutInterner, UnionLayout};
use super::build::{cast_if_necessary_for_opaque_recursive_pointers, load_roc_value, FunctionSpec};
use super::convert::{argument_type_from_layout, argument_type_from_union_layout};
@ -531,7 +531,7 @@ fn modify_refcount_layout_build_function<'a, 'ctx, 'env>(
}
Boxed(inner) => {
let function = modify_refcount_boxed(env, layout_ids, mode, inner);
let function = modify_refcount_boxed(env, layout_ids, mode, *inner);
Some(function)
}
@ -851,7 +851,7 @@ fn modify_refcount_boxed<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout_ids: &mut LayoutIds<'a>,
mode: Mode,
inner_layout: &'a Layout<'a>,
inner_layout: InLayout<'a>,
) -> FunctionValue<'ctx> {
let block = env.builder.get_insert_block().expect("to be in a function");
let di_location = env.builder.get_current_debug_location().unwrap();
@ -889,7 +889,7 @@ fn modify_refcount_boxed<'a, 'ctx, 'env>(
fn modify_refcount_box_help<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
mode: Mode,
inner_layout: &Layout<'a>,
inner_layout: InLayout<'a>,
fn_val: FunctionValue<'ctx>,
) {
let builder = env.builder;

View file

@ -4,6 +4,7 @@ use bumpalo::collections::{String, Vec};
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_collections::all::MutMap;
use roc_error_macros::internal_error;
use roc_intern::Interner;
use roc_module::low_level::{LowLevel, LowLevelWrapperType};
use roc_module::symbol::{Interns, Symbol};
use roc_mono::code_gen_help::{CodeGenHelp, HelperOp, REFCOUNT_MAX};
@ -11,7 +12,7 @@ use roc_mono::ir::{
BranchInfo, CallType, CrashTag, Expr, JoinPointId, ListLiteralElement, Literal, ModifyRc,
Param, Proc, ProcLayout, Stmt,
};
use roc_mono::layout::{Builtin, Layout, LayoutIds, TagIdIntType, UnionLayout};
use roc_mono::layout::{Builtin, Layout, LayoutIds, STLayoutInterner, TagIdIntType, UnionLayout};
use roc_std::RocDec;
use roc_wasm_module::linking::{DataSymbol, WasmObjectSymbol};
@ -48,9 +49,10 @@ pub struct ProcLookupData<'a> {
pub source: ProcSource,
}
pub struct WasmBackend<'a> {
pub env: &'a Env<'a>,
interns: &'a mut Interns,
pub struct WasmBackend<'a, 'r> {
pub env: &'r Env<'a>,
pub(crate) layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
// Module-level data
module: WasmModule<'a>,
@ -72,11 +74,12 @@ pub struct WasmBackend<'a> {
joinpoint_label_map: MutMap<JoinPointId, (u32, Vec<'a, StoredValue>)>,
}
impl<'a> WasmBackend<'a> {
impl<'a, 'r> WasmBackend<'a, 'r> {
#[allow(clippy::too_many_arguments)]
pub fn new(
env: &'a Env<'a>,
interns: &'a mut Interns,
env: &'r Env<'a>,
layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
layout_ids: LayoutIds<'a>,
proc_lookup: Vec<'a, ProcLookupData<'a>>,
host_to_app_map: Vec<'a, (&'a str, u32)>,
@ -114,6 +117,7 @@ impl<'a> WasmBackend<'a> {
WasmBackend {
env,
layout_interner,
interns,
// Module-level data
@ -403,7 +407,7 @@ impl<'a> WasmBackend<'a> {
fn start_proc(&mut self, proc: &Proc<'a>) {
use ReturnMethod::*;
let ret_layout = WasmLayout::new(self.env.layout_interner, &proc.ret_layout);
let ret_layout = WasmLayout::new(self.layout_interner, &proc.ret_layout);
let ret_type = match ret_layout.return_method(CallConv::C) {
Primitive(ty, _) => Some(ty),
@ -422,7 +426,7 @@ impl<'a> WasmBackend<'a> {
self.start_block();
self.storage.allocate_args(
self.env.layout_interner,
self.layout_interner,
proc.args,
&mut self.code_builder,
self.env.arena,
@ -500,7 +504,9 @@ impl<'a> WasmBackend<'a> {
// Our convention is that the last arg of the wrapper is the heap return pointer
let heap_return_ptr_id = LocalId(wrapper_arg_layouts.len() as u32 - 1);
let inner_ret_layout = match wrapper_arg_layouts.last() {
Some(Layout::Boxed(inner)) => WasmLayout::new(self.env.layout_interner, inner),
Some(Layout::Boxed(inner)) => {
WasmLayout::new(self.layout_interner, self.layout_interner.get(*inner))
}
x => internal_error!("Higher-order wrapper: invalid return layout {:?}", x),
};
@ -531,10 +537,10 @@ impl<'a> WasmBackend<'a> {
}
let inner_layout = match wrapper_arg {
Layout::Boxed(inner) => inner,
Layout::Boxed(inner) => self.layout_interner.get(*inner),
x => internal_error!("Expected a Boxed layout, got {:?}", x),
};
if inner_layout.stack_size(self.env.layout_interner, TARGET_INFO) == 0 {
if inner_layout.stack_size(self.layout_interner, TARGET_INFO) == 0 {
continue;
}
@ -546,7 +552,7 @@ impl<'a> WasmBackend<'a> {
// If the inner function has closure data, it's the last arg of the inner fn
let closure_data_layout = wrapper_arg_layouts[0];
if closure_data_layout.stack_size(self.env.layout_interner, TARGET_INFO) > 0 {
if closure_data_layout.stack_size(self.layout_interner, TARGET_INFO) > 0 {
// The closure data exists, and will have been passed in to the wrapper as a
// one-element struct.
let inner_closure_data_layout = match closure_data_layout {
@ -621,13 +627,13 @@ impl<'a> WasmBackend<'a> {
let value_layout = wrapper_proc_layout.arguments[1];
let mut n_inner_args = 2;
if closure_data_layout.stack_size(self.env.layout_interner, TARGET_INFO) > 0 {
if closure_data_layout.stack_size(self.layout_interner, TARGET_INFO) > 0 {
self.code_builder.get_local(LocalId(0));
n_inner_args += 1;
}
let inner_layout = match value_layout {
Layout::Boxed(inner) => inner,
Layout::Boxed(inner) => self.layout_interner.get(inner),
x => internal_error!("Expected a Boxed layout, got {:?}", x),
};
self.code_builder.get_local(LocalId(1));
@ -770,7 +776,7 @@ impl<'a> WasmBackend<'a> {
) {
let sym_storage = self
.storage
.allocate_var(self.env.layout_interner, *layout, sym, kind);
.allocate_var(self.layout_interner, *layout, sym, kind);
self.expr(sym, expr, layout, &sym_storage);
@ -848,7 +854,7 @@ impl<'a> WasmBackend<'a> {
let is_bool = matches!(cond_layout, Layout::Builtin(Builtin::Bool));
let cond_type =
WasmLayout::new(self.env.layout_interner, cond_layout).arg_types(CallConv::C)[0];
WasmLayout::new(self.layout_interner, cond_layout).arg_types(CallConv::C)[0];
// then, we jump whenever the value under scrutiny is equal to the value of a branch
for (i, (value, _, _)) in branches.iter().enumerate() {
@ -910,7 +916,7 @@ impl<'a> WasmBackend<'a> {
let mut jp_param_storages = Vec::with_capacity_in(parameters.len(), self.env.arena);
for parameter in parameters.iter() {
let mut param_storage = self.storage.allocate_var(
self.env.layout_interner,
self.layout_interner,
parameter.layout,
parameter.symbol,
StoredVarKind::Variable,
@ -967,15 +973,19 @@ impl<'a> WasmBackend<'a> {
.get_mut(&self.env.module_id)
.unwrap();
let (rc_stmt, new_specializations) = self
.helper_proc_gen
.expand_refcount_stmt(ident_ids, layout, modify, following);
let (rc_stmt, new_specializations) = self.helper_proc_gen.expand_refcount_stmt(
ident_ids,
self.layout_interner,
layout,
modify,
following,
);
if false {
self.register_symbol_debug_names();
println!(
"## rc_stmt:\n{}\n{:?}",
rc_stmt.to_pretty(self.env.layout_interner, 200, true),
rc_stmt.to_pretty(self.layout_interner, 200, true),
rc_stmt
);
}
@ -991,7 +1001,7 @@ impl<'a> WasmBackend<'a> {
pub fn stmt_internal_error(&mut self, msg: &'a str) {
let msg_sym = self.create_symbol("panic_str");
let msg_storage = self.storage.allocate_var(
self.env.layout_interner,
self.layout_interner,
Layout::Builtin(Builtin::Str),
msg_sym,
StoredVarKind::Variable,
@ -1264,7 +1274,7 @@ impl<'a> WasmBackend<'a> {
ret_layout,
} => {
let name = foreign_symbol.as_str();
let wasm_layout = WasmLayout::new(self.env.layout_interner, ret_layout);
let wasm_layout = WasmLayout::new(self.layout_interner, ret_layout);
let (num_wasm_args, has_return_val, ret_zig_packed_struct) =
self.storage.load_symbols_for_call(
self.env.arena,
@ -1289,7 +1299,7 @@ impl<'a> WasmBackend<'a> {
ret_layout: &Layout<'a>,
ret_storage: &StoredValue,
) {
let wasm_layout = WasmLayout::new(self.env.layout_interner, ret_layout);
let wasm_layout = WasmLayout::new(self.layout_interner, ret_layout);
// If this function is just a lowlevel wrapper, then inline it
if let LowLevelWrapperType::CanBeReplacedBy(lowlevel) =
@ -1391,7 +1401,7 @@ impl<'a> WasmBackend<'a> {
// Get an IR expression for the call to the specialized procedure
let (specialized_call_expr, new_specializations) = self
.helper_proc_gen
.call_specialized_equals(ident_ids, arg_layout, arguments);
.call_specialized_equals(ident_ids, self.layout_interner, arg_layout, arguments);
// If any new specializations were created, register their symbol data
for (spec_sym, spec_layout) in new_specializations.into_iter() {
@ -1446,7 +1456,7 @@ impl<'a> WasmBackend<'a> {
}
Layout::LambdaSet(lambdaset) => self.expr_struct(
sym,
&lambdaset.runtime_representation(self.env.layout_interner),
&lambdaset.runtime_representation(self.layout_interner),
storage,
fields,
),
@ -1497,7 +1507,7 @@ impl<'a> WasmBackend<'a> {
}
};
for field in field_layouts.iter().take(index as usize) {
offset += field.stack_size(self.env.layout_interner, TARGET_INFO);
offset += field.stack_size(self.layout_interner, TARGET_INFO);
}
self.storage
.copy_value_from_memory(&mut self.code_builder, sym, from_addr_val, offset);
@ -1515,12 +1525,12 @@ impl<'a> WasmBackend<'a> {
elems: &'a [ListLiteralElement<'a>],
) {
if let StoredValue::StackMemory { location, .. } = storage {
let size = elem_layout.stack_size(self.env.layout_interner, TARGET_INFO)
* (elems.len() as u32);
let size =
elem_layout.stack_size(self.layout_interner, TARGET_INFO) * (elems.len() as u32);
// Allocate heap space and store its address in a local variable
let heap_local_id = self.storage.create_anonymous_local(PTR_TYPE);
let heap_alignment = elem_layout.alignment_bytes(self.env.layout_interner, TARGET_INFO);
let heap_alignment = elem_layout.alignment_bytes(self.layout_interner, TARGET_INFO);
self.allocate_with_refcount(Some(size), heap_alignment, 1);
self.code_builder.set_local(heap_local_id);
@ -1618,7 +1628,7 @@ impl<'a> WasmBackend<'a> {
let stores_tag_id_as_data = union_layout.stores_tag_id_as_data(TARGET_INFO);
let stores_tag_id_in_pointer = union_layout.stores_tag_id_in_pointer(TARGET_INFO);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.env.layout_interner, TARGET_INFO);
union_layout.data_size_and_alignment(self.layout_interner, TARGET_INFO);
// We're going to use the pointer many times, so put it in a local variable
let stored_with_local =
@ -1672,7 +1682,7 @@ impl<'a> WasmBackend<'a> {
if stores_tag_id_as_data {
let id_offset = data_offset
+ union_layout
.tag_id_offset(self.env.layout_interner, TARGET_INFO)
.tag_id_offset(self.layout_interner, TARGET_INFO)
.unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
@ -1757,7 +1767,7 @@ impl<'a> WasmBackend<'a> {
if union_layout.stores_tag_id_as_data(TARGET_INFO) {
let id_offset = union_layout
.tag_id_offset(self.env.layout_interner, TARGET_INFO)
.tag_id_offset(self.layout_interner, TARGET_INFO)
.unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
@ -1818,7 +1828,7 @@ impl<'a> WasmBackend<'a> {
let field_offset: u32 = field_layouts
.iter()
.take(index as usize)
.map(|field_layout| field_layout.stack_size(self.env.layout_interner, TARGET_INFO))
.map(|field_layout| field_layout.stack_size(self.layout_interner, TARGET_INFO))
.sum();
// Get pointer and offset to the tag's data
@ -1881,11 +1891,11 @@ impl<'a> WasmBackend<'a> {
// allocate heap memory and load its data address onto the value stack
let arg_layout = match layout {
Layout::Boxed(arg) => *arg,
Layout::Boxed(arg) => self.layout_interner.get(*arg),
_ => internal_error!("ExprBox should always produce a Boxed layout"),
};
let (size, alignment) =
arg_layout.stack_size_and_alignment(self.env.layout_interner, TARGET_INFO);
arg_layout.stack_size_and_alignment(self.layout_interner, TARGET_INFO);
self.allocate_with_refcount(Some(size), alignment, 1);
// store the pointer value from the value stack into the local variable
@ -1984,7 +1994,7 @@ impl<'a> WasmBackend<'a> {
let layout = self.storage.symbol_layouts[&argument];
let (specialized_call_expr, new_specializations) = self
.helper_proc_gen
.call_reset_refcount(ident_ids, layout, argument);
.call_reset_refcount(ident_ids, self.layout_interner, layout, argument);
// If any new specializations were created, register their symbol data
for (spec_sym, spec_layout) in new_specializations.into_iter() {
@ -2009,9 +2019,9 @@ impl<'a> WasmBackend<'a> {
.get_mut(&self.env.module_id)
.unwrap();
let (proc_symbol, new_specializations) = self
.helper_proc_gen
.gen_refcount_proc(ident_ids, layout, op);
let (proc_symbol, new_specializations) =
self.helper_proc_gen
.gen_refcount_proc(ident_ids, self.layout_interner, layout, op);
// If any new specializations were created, register their symbol data
for (spec_sym, spec_layout) in new_specializations.into_iter() {

View file

@ -43,7 +43,6 @@ pub const STACK_POINTER_NAME: &str = "__stack_pointer";
pub struct Env<'a> {
pub arena: &'a Bump,
pub layout_interner: &'a STLayoutInterner<'a>,
pub module_id: ModuleId,
pub exposed_to_host: MutSet<Symbol>,
pub stack_bytes: u32,
@ -65,13 +64,15 @@ pub fn parse_host<'a>(arena: &'a Bump, host_bytes: &[u8]) -> Result<WasmModule<'
/// interns names of functions and variables (as memory-efficient interned strings)
/// host_module parsed module from a Wasm object file containing all of the non-Roc code
/// procedures Roc code in monomorphized intermediate representation
pub fn build_app_binary<'a>(
env: &'a Env<'a>,
interns: &'a mut Interns,
pub fn build_app_binary<'a, 'r>(
env: &'r Env<'a>,
layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
host_module: WasmModule<'a>,
procedures: MutMap<(Symbol, ProcLayout<'a>), Proc<'a>>,
) -> std::vec::Vec<u8> {
let (mut wasm_module, called_fns, _) = build_app_module(env, interns, host_module, procedures);
let (mut wasm_module, called_fns, _) =
build_app_module(env, layout_interner, interns, host_module, procedures);
wasm_module.eliminate_dead_code(env.arena, called_fns);
@ -84,9 +85,10 @@ pub fn build_app_binary<'a>(
/// Shared by all consumers of gen_wasm: roc_build, roc_repl_wasm, and test_gen
/// (roc_repl_wasm and test_gen will add more generated code for a wrapper function
/// that defines a common interface to `main`, independent of return type.)
pub fn build_app_module<'a>(
env: &'a Env<'a>,
interns: &'a mut Interns,
pub fn build_app_module<'a, 'r>(
env: &'r Env<'a>,
layout_interner: &'r mut STLayoutInterner<'a>,
interns: &'r mut Interns,
host_module: WasmModule<'a>,
procedures: MutMap<(Symbol, ProcLayout<'a>), Proc<'a>>,
) -> (WasmModule<'a>, BitVec<usize>, u32) {
@ -126,24 +128,20 @@ pub fn build_app_module<'a>(
let mut backend = WasmBackend::new(
env,
layout_interner,
interns,
layout_ids,
proc_lookup,
host_to_app_map,
host_module,
fn_index_offset,
CodeGenHelp::new(
env.arena,
env.layout_interner,
TargetInfo::default_wasm32(),
env.module_id,
),
CodeGenHelp::new(env.arena, TargetInfo::default_wasm32(), env.module_id),
);
if DEBUG_SETTINGS.user_procs_ir {
println!("## procs");
for proc in procs.iter() {
println!("{}", proc.to_pretty(env.layout_interner, 200, true));
println!("{}", proc.to_pretty(backend.layout_interner, 200, true));
// println!("{:?}", proc);
}
}
@ -161,7 +159,7 @@ pub fn build_app_module<'a>(
if DEBUG_SETTINGS.helper_procs_ir {
println!("## helper_procs");
for proc in helper_procs.iter() {
println!("{}", proc.to_pretty(env.layout_interner, 200, true));
println!("{}", proc.to_pretty(backend.layout_interner, 200, true));
// println!("{:#?}", proc);
}
}

View file

@ -2,6 +2,7 @@ use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_builtins::bitcode::{self, FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_intern::Interner;
use roc_module::low_level::LowLevel;
use roc_module::symbol::Symbol;
use roc_mono::code_gen_help::HelperOp;
@ -34,7 +35,7 @@ enum CodeGenNumType {
}
impl CodeGenNumType {
pub fn for_symbol(backend: &WasmBackend<'_>, symbol: Symbol) -> Self {
pub fn for_symbol(backend: &WasmBackend<'_, '_>, symbol: Symbol) -> Self {
Self::from(backend.storage.get(&symbol))
}
}
@ -124,7 +125,7 @@ fn layout_is_signed_int(layout: &Layout) -> bool {
}
}
fn symbol_is_signed_int(backend: &WasmBackend<'_>, symbol: Symbol) -> bool {
fn symbol_is_signed_int(backend: &WasmBackend<'_, '_>, symbol: Symbol) -> bool {
layout_is_signed_int(&backend.storage.symbol_layouts[&symbol])
}
@ -141,18 +142,18 @@ impl<'a> LowLevelCall<'a> {
/// For numerical ops, this just pushes the arguments to the Wasm VM's value stack
/// It implements the calling convention used by Zig for both numbers and structs
/// Result is the type signature of the call
fn load_args(&self, backend: &mut WasmBackend<'a>) -> (usize, bool, bool) {
fn load_args(&self, backend: &mut WasmBackend<'a, '_>) -> (usize, bool, bool) {
backend.storage.load_symbols_for_call(
backend.env.arena,
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
)
}
fn load_args_and_call_zig(&self, backend: &mut WasmBackend<'a>, name: &'a str) {
fn load_args_and_call_zig(&self, backend: &mut WasmBackend<'a, '_>, name: &'a str) {
let (num_wasm_args, has_return_val, ret_zig_packed_struct) = self.load_args(backend);
backend.call_host_fn_after_loading_args(name, num_wasm_args, has_return_val);
@ -182,7 +183,7 @@ impl<'a> LowLevelCall<'a> {
/// This may seem like deliberately introducing an error!
/// But we want all targets to behave the same, and hash algos rely on wrapping.
/// Discussion: https://github.com/roc-lang/roc/pull/2117#discussion_r760723063
fn wrap_small_int(&self, backend: &mut WasmBackend<'a>, int_width: IntWidth) {
fn wrap_small_int(&self, backend: &mut WasmBackend<'a, '_>, int_width: IntWidth) {
let bits = 8 * int_width.stack_size() as i32;
let shift = 32 - bits;
if shift <= 0 {
@ -200,7 +201,7 @@ impl<'a> LowLevelCall<'a> {
}
/// Main entrypoint from WasmBackend
pub fn generate(&self, backend: &mut WasmBackend<'a>) {
pub fn generate(&self, backend: &mut WasmBackend<'a, '_>) {
use CodeGenNumType::*;
use LowLevel::*;
@ -281,7 +282,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
backend.code_builder.i32_const(UPDATE_MODE_IMMUTABLE);
@ -360,7 +361,7 @@ impl<'a> LowLevelCall<'a> {
.load_symbols(&mut backend.code_builder, &[index]);
let elem_size = self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_size as i32);
backend.code_builder.i32_mul(); // index*size
@ -418,7 +419,7 @@ impl<'a> LowLevelCall<'a> {
} if value_layout == *list_elem => {
let list_offset = 0;
let elem_offset = Layout::Builtin(Builtin::List(list_elem))
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
(list_offset, elem_offset, value_layout)
}
Layout::Struct {
@ -426,7 +427,7 @@ impl<'a> LowLevelCall<'a> {
..
} if value_layout == *list_elem => {
let list_offset =
value_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
value_layout.stack_size(backend.layout_interner, TARGET_INFO);
let elem_offset = 0;
(list_offset, elem_offset, value_layout)
}
@ -434,7 +435,7 @@ impl<'a> LowLevelCall<'a> {
};
let (elem_width, elem_alignment) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Ensure the new element is stored in memory so we can pass a pointer to Zig
let (new_elem_local, new_elem_offset, _) =
@ -484,7 +485,7 @@ impl<'a> LowLevelCall<'a> {
let capacity: Symbol = self.arguments[0];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -515,14 +516,14 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
// Load monomorphization constants
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_align as i32);
backend.code_builder.i32_const(elem_width as i32);
@ -537,7 +538,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (spare_local, spare_offset, _) = ensure_symbol_is_in_memory(
backend,
spare,
@ -559,7 +560,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -585,7 +586,7 @@ impl<'a> LowLevelCall<'a> {
let elem: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let elem_width = elem_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
let elem_width = elem_layout.stack_size(backend.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -601,7 +602,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -623,7 +624,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -640,7 +641,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -665,7 +666,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -690,7 +691,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -710,7 +711,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -735,7 +736,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -756,7 +757,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -773,7 +774,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -1631,7 +1632,7 @@ impl<'a> LowLevelCall<'a> {
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO)
.stack_size(backend.layout_interner, TARGET_INFO)
as i32;
if bit_width < 32 && !symbol_is_signed_int(backend, num) {
// Sign-extend the number by shifting left and right again
@ -1680,7 +1681,7 @@ impl<'a> LowLevelCall<'a> {
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
if bit_width < 32 && symbol_is_signed_int(backend, num) {
let mask = (1 << bit_width) - 1;
@ -1872,11 +1873,11 @@ impl<'a> LowLevelCall<'a> {
/// Equality and inequality
/// These can operate on any data type (except functions) so they're more complex than other operators.
fn eq_or_neq(&self, backend: &mut WasmBackend<'a>) {
fn eq_or_neq(&self, backend: &mut WasmBackend<'a, '_>) {
let arg_layout = backend.storage.symbol_layouts[&self.arguments[0]]
.runtime_representation(backend.env.layout_interner);
.runtime_representation(backend.layout_interner);
let other_arg_layout = backend.storage.symbol_layouts[&self.arguments[1]]
.runtime_representation(backend.env.layout_interner);
.runtime_representation(backend.layout_interner);
debug_assert!(
arg_layout == other_arg_layout,
"Cannot do `==` comparison on different types: {:?} vs {:?}",
@ -1941,7 +1942,7 @@ impl<'a> LowLevelCall<'a> {
}
}
fn eq_or_neq_number(&self, backend: &mut WasmBackend<'a>) {
fn eq_or_neq_number(&self, backend: &mut WasmBackend<'a, '_>) {
use StoredValue::*;
match backend.storage.get(&self.arguments[0]).to_owned() {
@ -1986,7 +1987,7 @@ impl<'a> LowLevelCall<'a> {
/// Takes care of loading the arguments
fn eq_num128(
&self,
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
format: StackMemoryFormat,
locations: [StackMemoryLocation; 2],
) {
@ -2004,7 +2005,7 @@ impl<'a> LowLevelCall<'a> {
/// Check that two 128-bit numbers contain the same bytes
/// Loads *half* an argument at a time
/// (Don't call "load arguments" or "load symbols" helpers before this, it'll just waste instructions)
fn eq_num128_bytes(backend: &mut WasmBackend<'a>, locations: [StackMemoryLocation; 2]) {
fn eq_num128_bytes(backend: &mut WasmBackend<'a, '_>, locations: [StackMemoryLocation; 2]) {
let (local0, offset0) = locations[0].local_and_offset(backend.storage.stack_frame_pointer);
let (local1, offset1) = locations[1].local_and_offset(backend.storage.stack_frame_pointer);
@ -2026,7 +2027,7 @@ impl<'a> LowLevelCall<'a> {
backend.code_builder.i32_and();
}
fn num_to_str(&self, backend: &mut WasmBackend<'a>) {
fn num_to_str(&self, backend: &mut WasmBackend<'a, '_>) {
let arg_layout = backend.storage.symbol_layouts[&self.arguments[0]];
match arg_layout {
Layout::Builtin(Builtin::Int(width)) => {
@ -2051,7 +2052,7 @@ impl<'a> LowLevelCall<'a> {
}
/// Helper for NumIsFinite op, and also part of Eq/NotEq
fn num_is_finite(backend: &mut WasmBackend<'_>, argument: Symbol) {
fn num_is_finite(backend: &mut WasmBackend<'_, '_>, argument: Symbol) {
use StoredValue::*;
let stored = backend.storage.get(&argument).to_owned();
match stored {
@ -2094,7 +2095,7 @@ fn num_is_finite(backend: &mut WasmBackend<'_>, argument: Symbol) {
}
pub fn call_higher_order_lowlevel<'a>(
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
return_sym: Symbol,
return_layout: &Layout<'a>,
higher_order: &'a HigherOrderLowLevel<'a>,
@ -2131,12 +2132,9 @@ pub fn call_higher_order_lowlevel<'a>(
let (closure_data_layout, closure_data_exists) =
match backend.storage.symbol_layouts[captured_environment] {
Layout::LambdaSet(lambda_set) => {
if lambda_set
.is_represented(backend.env.layout_interner)
.is_some()
{
if lambda_set.is_represented(backend.layout_interner).is_some() {
(
lambda_set.runtime_representation(backend.env.layout_interner),
lambda_set.runtime_representation(backend.layout_interner),
true,
)
} else {
@ -2162,7 +2160,7 @@ pub fn call_higher_order_lowlevel<'a>(
// make sure that the wrapping struct is available in stack memory, so we can hand out a
// pointer to it.
let wrapped_storage = backend.storage.allocate_var(
backend.env.layout_interner,
backend.layout_interner,
wrapped_captures_layout,
wrapped_closure_data_sym,
crate::storage::StoredVarKind::Variable,
@ -2226,17 +2224,19 @@ pub fn call_higher_order_lowlevel<'a>(
argument_layouts.len()
};
let boxed_closure_arg_layouts =
argument_layouts.iter().take(n_non_closure_args).map(|lay| {
let lay_in = backend.layout_interner.insert(lay);
Layout::Boxed(lay_in)
});
wrapper_arg_layouts.push(wrapped_captures_layout);
wrapper_arg_layouts.extend(
argument_layouts
.iter()
.take(n_non_closure_args)
.map(Layout::Boxed),
);
wrapper_arg_layouts.extend(boxed_closure_arg_layouts);
match helper_proc_source {
ProcSource::HigherOrderMapper(_) => {
// Our convention for mappers is that they write to the heap via the last argument
let result_layout = backend.layout_interner.insert(result_layout);
wrapper_arg_layouts.push(Layout::Boxed(result_layout));
ProcLayout {
arguments: wrapper_arg_layouts.into_bump_slice(),
@ -2326,7 +2326,7 @@ pub fn call_higher_order_lowlevel<'a>(
ListSortWith { xs } => {
let elem_layout = unwrap_list_elem_layout(backend.storage.symbol_layouts[xs]);
let (element_width, alignment) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2371,7 +2371,7 @@ fn unwrap_list_elem_layout(list_layout: Layout<'_>) -> &Layout<'_> {
#[allow(clippy::too_many_arguments)]
fn list_map_n<'a>(
zig_fn_name: &'static str,
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
arg_symbols: &[Symbol],
return_sym: Symbol,
return_layout: Layout<'a>,
@ -2390,7 +2390,7 @@ fn list_map_n<'a>(
let elem_ret = unwrap_list_elem_layout(return_layout);
let (elem_ret_size, elem_ret_align) =
elem_ret.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_ret.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2411,7 +2411,7 @@ fn list_map_n<'a>(
cb.i32_const(owns_captured_environment as i32);
cb.i32_const(elem_ret_align as i32);
for el in arg_elem_layouts.iter() {
cb.i32_const(el.stack_size(backend.env.layout_interner, TARGET_INFO) as i32);
cb.i32_const(el.stack_size(backend.layout_interner, TARGET_INFO) as i32);
}
cb.i32_const(elem_ret_size as i32);
@ -2438,7 +2438,7 @@ fn list_map_n<'a>(
}
fn ensure_symbol_is_in_memory<'a>(
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
symbol: Symbol,
layout: Layout<'a>,
arena: &'a Bump,
@ -2451,7 +2451,7 @@ fn ensure_symbol_is_in_memory<'a>(
}
_ => {
let (width, alignment) =
layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (frame_ptr, offset) = backend
.storage
.allocate_anonymous_stack_memory(width, alignment);

View file

@ -1,11 +1,12 @@
use bumpalo::collections::vec::Vec;
use roc_intern::Interner;
use roc_module::low_level::LowLevel;
use roc_module::symbol::{IdentIds, Symbol};
use crate::ir::{
BranchInfo, Call, CallType, Expr, JoinPointId, Literal, Param, Stmt, UpdateModeId,
};
use crate::layout::{Builtin, Layout, TagIdIntType, UnionLayout};
use crate::layout::{Builtin, InLayout, Layout, STLayoutInterner, TagIdIntType, UnionLayout};
use super::{let_lowlevel, CodeGenHelp, Context, LAYOUT_BOOL};
@ -16,6 +17,7 @@ pub fn eq_generic<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
) -> Stmt<'a> {
let main_body = match layout {
@ -28,10 +30,18 @@ pub fn eq_generic<'a>(
Layout::Builtin(Builtin::Str) => {
unreachable!("No generated helper proc for `==` on Str. Use Zig function.")
}
Layout::Builtin(Builtin::List(elem_layout)) => eq_list(root, ident_ids, ctx, elem_layout),
Layout::Struct { field_layouts, .. } => eq_struct(root, ident_ids, ctx, field_layouts),
Layout::Union(union_layout) => eq_tag_union(root, ident_ids, ctx, union_layout),
Layout::Boxed(inner_layout) => eq_boxed(root, ident_ids, ctx, inner_layout),
Layout::Builtin(Builtin::List(elem_layout)) => {
eq_list(root, ident_ids, ctx, layout_interner, elem_layout)
}
Layout::Struct { field_layouts, .. } => {
eq_struct(root, ident_ids, ctx, layout_interner, field_layouts)
}
Layout::Union(union_layout) => {
eq_tag_union(root, ident_ids, ctx, layout_interner, union_layout)
}
Layout::Boxed(inner_layout) => {
eq_boxed(root, ident_ids, ctx, layout_interner, inner_layout)
}
Layout::LambdaSet(_) => unreachable!("`==` is not defined on functions"),
Layout::RecursivePointer => {
unreachable!(
@ -129,6 +139,7 @@ fn eq_struct<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
field_layouts: &'a [Layout<'a>],
) -> Stmt<'a> {
let mut else_stmt = Stmt::Ret(Symbol::BOOL_TRUE);
@ -153,6 +164,7 @@ fn eq_struct<'a>(
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
*layout,
root.arena.alloc([field1_sym, field2_sym]),
)
@ -181,6 +193,7 @@ fn eq_tag_union<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
) -> Stmt<'a> {
use UnionLayout::*;
@ -191,13 +204,37 @@ fn eq_tag_union<'a>(
}
let body = match union_layout {
NonRecursive(tags) => eq_tag_union_help(root, ident_ids, ctx, union_layout, tags, None),
NonRecursive(tags) => eq_tag_union_help(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tags,
None,
),
Recursive(tags) => eq_tag_union_help(root, ident_ids, ctx, union_layout, tags, None),
Recursive(tags) => eq_tag_union_help(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tags,
None,
),
NonNullableUnwrapped(field_layouts) => {
let tags = root.arena.alloc([field_layouts]);
eq_tag_union_help(root, ident_ids, ctx, union_layout, tags, None)
eq_tag_union_help(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tags,
None,
)
}
NullableWrapped {
@ -207,6 +244,7 @@ fn eq_tag_union<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
other_tags,
Some(nullable_id),
@ -219,6 +257,7 @@ fn eq_tag_union<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
root.arena.alloc([other_fields]),
Some(nullable_id as TagIdIntType),
@ -234,6 +273,7 @@ fn eq_tag_union_help<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
tag_layouts: &'a [&'a [Layout<'a>]],
nullable_id: Option<TagIdIntType>,
@ -314,6 +354,7 @@ fn eq_tag_union_help<'a>(
root,
ident_ids,
ctx,
layout_interner,
tailrec_loop,
union_layout,
field_layouts,
@ -335,6 +376,7 @@ fn eq_tag_union_help<'a>(
root,
ident_ids,
ctx,
layout_interner,
tailrec_loop,
union_layout,
tag_layouts.last().unwrap(),
@ -395,6 +437,7 @@ fn eq_tag_fields<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
tailrec_loop: JoinPointId,
union_layout: UnionLayout<'a>,
field_layouts: &'a [Layout<'a>],
@ -482,6 +525,7 @@ fn eq_tag_fields<'a>(
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
*layout,
root.arena.alloc([field1_sym, field2_sym]),
)
@ -530,8 +574,11 @@ fn eq_boxed<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
inner_layout: &'a Layout<'a>,
layout_interner: &mut STLayoutInterner<'a>,
inner_layout: InLayout<'a>,
) -> Stmt<'a> {
let inner_layout = layout_interner.get(inner_layout);
let a = root.create_symbol(ident_ids, "a");
let b = root.create_symbol(ident_ids, "b");
let result = root.create_symbol(ident_ids, "result");
@ -539,7 +586,13 @@ fn eq_boxed<'a>(
let a_expr = Expr::ExprUnbox { symbol: ARG_1 };
let b_expr = Expr::ExprUnbox { symbol: ARG_2 };
let eq_call_expr = root
.call_specialized_op(ident_ids, ctx, *inner_layout, root.arena.alloc([a, b]))
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
*inner_layout,
root.arena.alloc([a, b]),
)
.unwrap();
Stmt::Let(
@ -576,6 +629,7 @@ fn eq_list<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
elem_layout: &Layout<'a>,
) -> Stmt<'a> {
use LowLevel::*;
@ -629,7 +683,7 @@ fn eq_list<'a>(
// let size = literal int
let size = root.create_symbol(ident_ids, "size");
let size_expr = Expr::Literal(Literal::Int(
(elem_layout.stack_size(root.layout_interner, root.target_info) as i128).to_ne_bytes(),
(elem_layout.stack_size(layout_interner, root.target_info) as i128).to_ne_bytes(),
));
let size_stmt = |next| Stmt::Let(size, size_expr, layout_isize, next);
@ -703,7 +757,7 @@ fn eq_list<'a>(
let eq_elems = root.create_symbol(ident_ids, "eq_elems");
let eq_elems_args = root.arena.alloc([elem1, elem2]);
let eq_elems_expr = root
.call_specialized_op(ident_ids, ctx, *elem_layout, eq_elems_args)
.call_specialized_op(ident_ids, ctx, layout_interner, *elem_layout, eq_elems_args)
.unwrap();
let eq_elems_stmt = |next| Stmt::Let(eq_elems, eq_elems_expr, LAYOUT_BOOL, next);

View file

@ -1,5 +1,6 @@
use bumpalo::collections::vec::Vec;
use bumpalo::Bump;
use roc_intern::Interner;
use roc_module::low_level::LowLevel;
use roc_module::symbol::{IdentIds, ModuleId, Symbol};
use roc_target::TargetInfo;
@ -73,7 +74,6 @@ pub struct Context<'a> {
///
pub struct CodeGenHelp<'a> {
arena: &'a Bump,
layout_interner: &'a STLayoutInterner<'a>,
home: ModuleId,
target_info: TargetInfo,
layout_isize: Layout<'a>,
@ -83,12 +83,7 @@ pub struct CodeGenHelp<'a> {
}
impl<'a> CodeGenHelp<'a> {
pub fn new(
arena: &'a Bump,
layout_interner: &'a STLayoutInterner<'a>,
target_info: TargetInfo,
home: ModuleId,
) -> Self {
pub fn new(arena: &'a Bump, target_info: TargetInfo, home: ModuleId) -> Self {
let layout_isize = Layout::isize(target_info);
// Refcount is a boxed isize. TODO: use the new Box layout when dev backends support it
@ -96,7 +91,6 @@ impl<'a> CodeGenHelp<'a> {
CodeGenHelp {
arena,
layout_interner,
home,
target_info,
layout_isize,
@ -125,11 +119,12 @@ impl<'a> CodeGenHelp<'a> {
pub fn expand_refcount_stmt(
&mut self,
ident_ids: &mut IdentIds,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
modify: &ModifyRc,
following: &'a Stmt<'a>,
) -> (&'a Stmt<'a>, Vec<'a, (Symbol, ProcLayout<'a>)>) {
if !refcount::is_rc_implemented_yet(self.layout_interner, &layout) {
if !refcount::is_rc_implemented_yet(layout_interner, &layout) {
// Just a warning, so we can decouple backend development from refcounting development.
// When we are closer to completion, we can change it to a panic.
println!(
@ -154,13 +149,22 @@ impl<'a> CodeGenHelp<'a> {
op,
};
let rc_stmt = refcount::refcount_stmt(self, ident_ids, &mut ctx, layout, modify, following);
let rc_stmt = refcount::refcount_stmt(
self,
ident_ids,
&mut ctx,
layout_interner,
layout,
modify,
following,
);
(rc_stmt, ctx.new_linker_data)
}
pub fn call_reset_refcount(
&mut self,
ident_ids: &mut IdentIds,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
argument: Symbol,
) -> (Expr<'a>, Vec<'a, (Symbol, ProcLayout<'a>)>) {
@ -170,7 +174,7 @@ impl<'a> CodeGenHelp<'a> {
op: HelperOp::Reset,
};
let proc_name = self.find_or_create_proc(ident_ids, &mut ctx, layout);
let proc_name = self.find_or_create_proc(ident_ids, &mut ctx, layout_interner, layout);
let arguments = self.arena.alloc([argument]);
let ret_layout = self.arena.alloc(layout);
@ -194,6 +198,7 @@ impl<'a> CodeGenHelp<'a> {
pub fn gen_refcount_proc(
&mut self,
ident_ids: &mut IdentIds,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
op: HelperOp,
) -> (Symbol, Vec<'a, (Symbol, ProcLayout<'a>)>) {
@ -203,7 +208,7 @@ impl<'a> CodeGenHelp<'a> {
op,
};
let proc_name = self.find_or_create_proc(ident_ids, &mut ctx, layout);
let proc_name = self.find_or_create_proc(ident_ids, &mut ctx, layout_interner, layout);
(proc_name, ctx.new_linker_data)
}
@ -213,6 +218,7 @@ impl<'a> CodeGenHelp<'a> {
pub fn call_specialized_equals(
&mut self,
ident_ids: &mut IdentIds,
layout_interner: &mut STLayoutInterner<'a>,
layout: &Layout<'a>,
arguments: &'a [Symbol],
) -> (Expr<'a>, Vec<'a, (Symbol, ProcLayout<'a>)>) {
@ -223,7 +229,7 @@ impl<'a> CodeGenHelp<'a> {
};
let expr = self
.call_specialized_op(ident_ids, &mut ctx, *layout, arguments)
.call_specialized_op(ident_ids, &mut ctx, layout_interner, *layout, arguments)
.unwrap();
(expr, ctx.new_linker_data)
@ -239,6 +245,7 @@ impl<'a> CodeGenHelp<'a> {
&mut self,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
called_layout: Layout<'a>,
arguments: &'a [Symbol],
) -> Option<Expr<'a>> {
@ -255,10 +262,10 @@ impl<'a> CodeGenHelp<'a> {
};
if layout_needs_helper_proc(&layout, ctx.op) {
let proc_name = self.find_or_create_proc(ident_ids, ctx, layout);
let proc_name = self.find_or_create_proc(ident_ids, ctx, layout_interner, layout);
let (ret_layout, arg_layouts): (&'a Layout<'a>, &'a [Layout<'a>]) = {
let arg = self.replace_rec_ptr(ctx, layout);
let arg = self.replace_rec_ptr(ctx, layout_interner, layout);
match ctx.op {
Dec | DecRef(_) => (&LAYOUT_UNIT, self.arena.alloc([arg])),
Reset => (self.arena.alloc(layout), self.arena.alloc([layout])),
@ -293,11 +300,12 @@ impl<'a> CodeGenHelp<'a> {
&mut self,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
orig_layout: Layout<'a>,
) -> Symbol {
use HelperOp::*;
let layout = self.replace_rec_ptr(ctx, orig_layout);
let layout = self.replace_rec_ptr(ctx, layout_interner, orig_layout);
let found = self
.specializations
@ -325,15 +333,29 @@ impl<'a> CodeGenHelp<'a> {
let (ret_layout, body) = match ctx.op {
Inc | Dec | DecRef(_) => (
LAYOUT_UNIT,
refcount::refcount_generic(self, ident_ids, ctx, layout, Symbol::ARG_1),
refcount::refcount_generic(
self,
ident_ids,
ctx,
layout_interner,
layout,
Symbol::ARG_1,
),
),
Reset => (
layout,
refcount::refcount_reset_proc_body(self, ident_ids, ctx, layout, Symbol::ARG_1),
refcount::refcount_reset_proc_body(
self,
ident_ids,
ctx,
layout_interner,
layout,
Symbol::ARG_1,
),
),
Eq => (
LAYOUT_BOOL,
equality::eq_generic(self, ident_ids, ctx, layout),
equality::eq_generic(self, ident_ids, ctx, layout_interner, layout),
),
};
@ -415,10 +437,16 @@ impl<'a> CodeGenHelp<'a> {
// For example if a program uses `RoseTree a : [Tree a (List (RoseTree a))]`
// then it could have both `RoseTree I64` and `RoseTree Str`. In this case it
// needs *two* specializations for `List(RecursivePointer)`, not just one.
fn replace_rec_ptr(&self, ctx: &Context<'a>, layout: Layout<'a>) -> Layout<'a> {
fn replace_rec_ptr(
&mut self,
ctx: &Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
) -> Layout<'a> {
match layout {
Layout::Builtin(Builtin::List(v)) => Layout::Builtin(Builtin::List(
self.arena.alloc(self.replace_rec_ptr(ctx, *v)),
self.arena
.alloc(self.replace_rec_ptr(ctx, layout_interner, *v)),
)),
Layout::Builtin(_) => layout,
@ -427,9 +455,12 @@ impl<'a> CodeGenHelp<'a> {
field_layouts,
field_order_hash,
} => {
let new_fields_iter = field_layouts.iter().map(|f| self.replace_rec_ptr(ctx, *f));
let mut new_field_layouts = Vec::with_capacity_in(field_layouts.len(), self.arena);
for f in field_layouts.iter() {
new_field_layouts.push(self.replace_rec_ptr(ctx, layout_interner, *f));
}
Layout::Struct {
field_layouts: self.arena.alloc_slice_fill_iter(new_fields_iter),
field_layouts: new_field_layouts.into_bump_slice(),
field_order_hash,
}
}
@ -439,7 +470,7 @@ impl<'a> CodeGenHelp<'a> {
for fields in tags {
let mut new_fields = Vec::with_capacity_in(fields.len(), self.arena);
for field in fields.iter() {
new_fields.push(self.replace_rec_ptr(ctx, *field))
new_fields.push(self.replace_rec_ptr(ctx, layout_interner, *field))
}
new_tags.push(new_fields.into_bump_slice());
}
@ -453,12 +484,19 @@ impl<'a> CodeGenHelp<'a> {
}
Layout::Boxed(inner) => {
Layout::Boxed(self.arena.alloc(self.replace_rec_ptr(ctx, *inner)))
let inner = layout_interner.get(inner);
let inner = self
.arena
.alloc(self.replace_rec_ptr(ctx, layout_interner, *inner));
let inner = layout_interner.insert(inner);
Layout::Boxed(inner)
}
Layout::LambdaSet(lambda_set) => {
self.replace_rec_ptr(ctx, lambda_set.runtime_representation(self.layout_interner))
}
Layout::LambdaSet(lambda_set) => self.replace_rec_ptr(
ctx,
layout_interner,
lambda_set.runtime_representation(layout_interner),
),
// This line is the whole point of the function
Layout::RecursivePointer => Layout::Union(ctx.recursive_union.unwrap()),

View file

@ -9,7 +9,7 @@ use crate::code_gen_help::let_lowlevel;
use crate::ir::{
BranchInfo, Call, CallType, Expr, JoinPointId, Literal, ModifyRc, Param, Stmt, UpdateModeId,
};
use crate::layout::{Builtin, Layout, TagIdIntType, UnionLayout};
use crate::layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout};
use super::{CodeGenHelp, Context, HelperOp};
@ -24,6 +24,7 @@ pub fn refcount_stmt<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
modify: &ModifyRc,
following: &'a Stmt<'a>,
@ -45,6 +46,7 @@ pub fn refcount_stmt<'a>(
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
layout,
arena.alloc([*structure, amount_sym]),
)
@ -58,7 +60,13 @@ pub fn refcount_stmt<'a>(
// Call helper proc, passing the Roc structure
let call_result_empty = root.create_symbol(ident_ids, "call_result_empty");
let call_expr = root
.call_specialized_op(ident_ids, ctx, layout, arena.alloc([*structure]))
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
layout,
arena.alloc([*structure]),
)
.unwrap();
let call_stmt = Stmt::Let(call_result_empty, call_expr, LAYOUT_UNIT, following);
arena.alloc(call_stmt)
@ -69,7 +77,15 @@ pub fn refcount_stmt<'a>(
// Str has no children, so we might as well do what we normally do and call the helper.
Layout::Builtin(Builtin::Str) => {
ctx.op = HelperOp::Dec;
refcount_stmt(root, ident_ids, ctx, layout, modify, following)
refcount_stmt(
root,
ident_ids,
ctx,
layout_interner,
layout,
modify,
following,
)
}
// Struct and non-recursive Unions are stack-only, so DecRef is a no-op
@ -80,7 +96,14 @@ pub fn refcount_stmt<'a>(
// and replace any return statements with jumps to the `following` statement.
_ => match ctx.op {
HelperOp::DecRef(jp_decref) => {
let rc_stmt = refcount_generic(root, ident_ids, ctx, layout, *structure);
let rc_stmt = refcount_generic(
root,
ident_ids,
ctx,
layout_interner,
layout,
*structure,
);
let join = Stmt::Join {
id: jp_decref,
parameters: &[],
@ -100,10 +123,11 @@ pub fn refcount_generic<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
structure: Symbol,
) -> Stmt<'a> {
debug_assert!(is_rc_implemented_yet(root.layout_interner, &layout));
debug_assert!(is_rc_implemented_yet(layout_interner, &layout));
match layout {
Layout::Builtin(Builtin::Int(_) | Builtin::Float(_) | Builtin::Bool | Builtin::Decimal) => {
@ -112,24 +136,56 @@ pub fn refcount_generic<'a>(
rc_return_stmt(root, ident_ids, ctx)
}
Layout::Builtin(Builtin::Str) => refcount_str(root, ident_ids, ctx),
Layout::Builtin(Builtin::List(elem_layout)) => {
refcount_list(root, ident_ids, ctx, &layout, elem_layout, structure)
}
Layout::Struct { field_layouts, .. } => {
refcount_struct(root, ident_ids, ctx, field_layouts, structure)
}
Layout::Union(union_layout) => {
refcount_union(root, ident_ids, ctx, union_layout, structure)
}
Layout::Builtin(Builtin::List(elem_layout)) => refcount_list(
root,
ident_ids,
ctx,
layout_interner,
&layout,
elem_layout,
structure,
),
Layout::Struct { field_layouts, .. } => refcount_struct(
root,
ident_ids,
ctx,
layout_interner,
field_layouts,
structure,
),
Layout::Union(union_layout) => refcount_union(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
structure,
),
Layout::LambdaSet(lambda_set) => {
let runtime_layout = lambda_set.runtime_representation(root.layout_interner);
refcount_generic(root, ident_ids, ctx, runtime_layout, structure)
let runtime_layout = lambda_set.runtime_representation(layout_interner);
refcount_generic(
root,
ident_ids,
ctx,
layout_interner,
runtime_layout,
structure,
)
}
Layout::RecursivePointer => unreachable!(
"We should never call a refcounting helper on a RecursivePointer layout directly"
),
Layout::Boxed(inner_layout) => {
refcount_boxed(root, ident_ids, ctx, &layout, inner_layout, structure)
let inner_layout = layout_interner.get(inner_layout);
refcount_boxed(
root,
ident_ids,
ctx,
layout_interner,
&layout,
inner_layout,
structure,
)
}
}
}
@ -138,6 +194,7 @@ pub fn refcount_reset_proc_body<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: Layout<'a>,
structure: Symbol,
) -> Stmt<'a> {
@ -206,8 +263,7 @@ pub fn refcount_reset_proc_body<'a>(
let alloc_addr_stmt = {
let alignment = root.create_symbol(ident_ids, "alignment");
let alignment_expr = Expr::Literal(Literal::Int(
(layout.alignment_bytes(root.layout_interner, root.target_info) as i128)
.to_ne_bytes(),
(layout.alignment_bytes(layout_interner, root.target_info) as i128).to_ne_bytes(),
));
let alloc_addr = root.create_symbol(ident_ids, "alloc_addr");
let alloc_addr_expr = Expr::Call(Call {
@ -241,6 +297,7 @@ pub fn refcount_reset_proc_body<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tag_layouts,
null_id,
@ -260,7 +317,13 @@ pub fn refcount_reset_proc_body<'a>(
let else_stmt = {
let decrement_unit = root.create_symbol(ident_ids, "decrement_unit");
let decrement_expr = root
.call_specialized_op(ident_ids, ctx, layout, root.arena.alloc([structure]))
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
layout,
root.arena.alloc([structure]),
)
.unwrap();
let decrement_stmt = |next| Stmt::Let(decrement_unit, decrement_expr, LAYOUT_UNIT, next);
@ -694,6 +757,7 @@ fn refcount_list<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: &Layout,
elem_layout: &'a Layout,
structure: Symbol,
@ -743,7 +807,7 @@ fn refcount_list<'a>(
//
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let alignment = layout.alignment_bytes(layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_list = modify_refcount(
@ -769,6 +833,7 @@ fn refcount_list<'a>(
root,
ident_ids,
ctx,
layout_interner,
elem_layout,
LAYOUT_UNIT,
box_union_layout,
@ -819,6 +884,7 @@ fn refcount_list_elems<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
elem_layout: &Layout<'a>,
ret_layout: Layout<'a>,
box_union_layout: UnionLayout<'a>,
@ -841,7 +907,7 @@ fn refcount_list_elems<'a>(
// let size = literal int
let elem_size = root.create_symbol(ident_ids, "elem_size");
let elem_size_expr = Expr::Literal(Literal::Int(
(elem_layout.stack_size(root.layout_interner, root.target_info) as i128).to_ne_bytes(),
(elem_layout.stack_size(layout_interner, root.target_info) as i128).to_ne_bytes(),
));
let elem_size_stmt = |next| Stmt::Let(elem_size, elem_size_expr, layout_isize, next);
@ -901,7 +967,7 @@ fn refcount_list_elems<'a>(
let mod_elem_unit = root.create_symbol(ident_ids, "mod_elem_unit");
let mod_elem_args = refcount_args(root, ctx, elem);
let mod_elem_expr = root
.call_specialized_op(ident_ids, ctx, *elem_layout, mod_elem_args)
.call_specialized_op(ident_ids, ctx, layout_interner, *elem_layout, mod_elem_args)
.unwrap();
let mod_elem_stmt = |next| Stmt::Let(mod_elem_unit, mod_elem_expr, LAYOUT_UNIT, next);
@ -984,13 +1050,14 @@ fn refcount_struct<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
field_layouts: &'a [Layout<'a>],
structure: Symbol,
) -> Stmt<'a> {
let mut stmt = rc_return_stmt(root, ident_ids, ctx);
for (i, field_layout) in field_layouts.iter().enumerate().rev() {
if field_layout.contains_refcounted(root.layout_interner) {
if field_layout.contains_refcounted(layout_interner) {
let field_val = root.create_symbol(ident_ids, &format!("field_val_{}", i));
let field_val_expr = Expr::StructAtIndex {
index: i as u64,
@ -1002,7 +1069,7 @@ fn refcount_struct<'a>(
let mod_unit = root.create_symbol(ident_ids, &format!("mod_field_{}", i));
let mod_args = refcount_args(root, ctx, field_val);
let mod_expr = root
.call_specialized_op(ident_ids, ctx, *field_layout, mod_args)
.call_specialized_op(ident_ids, ctx, layout_interner, *field_layout, mod_args)
.unwrap();
let mod_stmt = |next| Stmt::Let(mod_unit, mod_expr, LAYOUT_UNIT, next);
@ -1023,6 +1090,7 @@ fn refcount_union<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union: UnionLayout<'a>,
structure: Symbol,
) -> Stmt<'a> {
@ -1034,14 +1102,41 @@ fn refcount_union<'a>(
}
let body = match union {
NonRecursive(tags) => refcount_union_nonrec(root, ident_ids, ctx, union, tags, structure),
NonRecursive(tags) => refcount_union_nonrec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
structure,
),
Recursive(tags) => {
let (is_tailrec, tail_idx) = root.union_tail_recursion_fields(union);
if is_tailrec && !ctx.op.is_decref() {
refcount_union_tailrec(root, ident_ids, ctx, union, tags, None, tail_idx, structure)
refcount_union_tailrec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
None,
tail_idx,
structure,
)
} else {
refcount_union_rec(root, ident_ids, ctx, union, tags, None, structure)
refcount_union_rec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
None,
structure,
)
}
}
@ -1051,7 +1146,16 @@ fn refcount_union<'a>(
// a direct RecursionPointer is only possible if there's at least one non-recursive variant.
// This nesting makes it harder to do tail recursion, so we just don't.
let tags = root.arena.alloc([field_layouts]);
refcount_union_rec(root, ident_ids, ctx, union, tags, None, structure)
refcount_union_rec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
None,
structure,
)
}
NullableWrapped {
@ -1062,10 +1166,27 @@ fn refcount_union<'a>(
let (is_tailrec, tail_idx) = root.union_tail_recursion_fields(union);
if is_tailrec && !ctx.op.is_decref() {
refcount_union_tailrec(
root, ident_ids, ctx, union, tags, null_id, tail_idx, structure,
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
null_id,
tail_idx,
structure,
)
} else {
refcount_union_rec(root, ident_ids, ctx, union, tags, null_id, structure)
refcount_union_rec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
null_id,
structure,
)
}
}
@ -1078,10 +1199,27 @@ fn refcount_union<'a>(
let (is_tailrec, tail_idx) = root.union_tail_recursion_fields(union);
if is_tailrec && !ctx.op.is_decref() {
refcount_union_tailrec(
root, ident_ids, ctx, union, tags, null_id, tail_idx, structure,
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
null_id,
tail_idx,
structure,
)
} else {
refcount_union_rec(root, ident_ids, ctx, union, tags, null_id, structure)
refcount_union_rec(
root,
ident_ids,
ctx,
layout_interner,
union,
tags,
null_id,
structure,
)
}
}
};
@ -1095,6 +1233,7 @@ fn refcount_union_nonrec<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
tag_layouts: &'a [&'a [Layout<'a>]],
structure: Symbol,
@ -1120,6 +1259,7 @@ fn refcount_union_nonrec<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tag_layouts,
None,
@ -1140,6 +1280,7 @@ fn refcount_union_contents<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
tag_layouts: &'a [&'a [Layout<'a>]],
null_id: Option<TagIdIntType>,
@ -1173,6 +1314,7 @@ fn refcount_union_contents<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
field_layouts,
structure,
@ -1207,6 +1349,7 @@ fn refcount_union_rec<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
tag_layouts: &'a [&'a [Layout<'a>]],
null_id: Option<TagIdIntType>,
@ -1231,7 +1374,7 @@ fn refcount_union_rec<'a>(
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment =
Layout::Union(union_layout).alignment_bytes(root.layout_interner, root.target_info);
Layout::Union(union_layout).alignment_bytes(layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_structure_stmt = modify_refcount(
root,
@ -1259,6 +1402,7 @@ fn refcount_union_rec<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
tag_layouts,
null_id,
@ -1285,6 +1429,7 @@ fn refcount_union_tailrec<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
tag_layouts: &'a [&'a [Layout<'a>]],
null_id: Option<TagIdIntType>,
@ -1339,7 +1484,7 @@ fn refcount_union_tailrec<'a>(
)
};
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let alignment = layout.alignment_bytes(layout_interner, root.target_info);
let modify_structure_stmt = modify_refcount(
root,
ident_ids,
@ -1427,6 +1572,7 @@ fn refcount_union_tailrec<'a>(
root,
ident_ids,
ctx,
layout_interner,
union_layout,
non_tailrec_fields,
current,
@ -1488,6 +1634,7 @@ fn refcount_tag_fields<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
field_layouts: &'a [Layout<'a>],
structure: Symbol,
@ -1497,7 +1644,7 @@ fn refcount_tag_fields<'a>(
let mut stmt = following;
for (i, field_layout) in field_layouts.iter().enumerate().rev() {
if field_layout.contains_refcounted(root.layout_interner) {
if field_layout.contains_refcounted(layout_interner) {
let field_val = root.create_symbol(ident_ids, &format!("field_{}_{}", tag_id, i));
let field_val_expr = Expr::UnionAtIndex {
union_layout,
@ -1510,7 +1657,7 @@ fn refcount_tag_fields<'a>(
let mod_unit = root.create_symbol(ident_ids, &format!("mod_field_{}_{}", tag_id, i));
let mod_args = refcount_args(root, ctx, field_val);
let mod_expr = root
.call_specialized_op(ident_ids, ctx, *field_layout, mod_args)
.call_specialized_op(ident_ids, ctx, layout_interner, *field_layout, mod_args)
.unwrap();
let mod_stmt = |next| Stmt::Let(mod_unit, mod_expr, LAYOUT_UNIT, next);
@ -1531,6 +1678,7 @@ fn refcount_boxed<'a>(
root: &mut CodeGenHelp<'a>,
ident_ids: &mut IdentIds,
ctx: &mut Context<'a>,
layout_interner: &mut STLayoutInterner<'a>,
layout: &Layout,
inner_layout: &'a Layout,
outer: Symbol,
@ -1544,7 +1692,7 @@ fn refcount_boxed<'a>(
//
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let alignment = layout.alignment_bytes(layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_outer = modify_refcount(
root,
@ -1571,7 +1719,13 @@ fn refcount_boxed<'a>(
let mod_inner_unit = root.create_symbol(ident_ids, "mod_inner_unit");
let mod_inner_args = refcount_args(root, ctx, inner);
let mod_inner_expr = root
.call_specialized_op(ident_ids, ctx, *inner_layout, mod_inner_args)
.call_specialized_op(
ident_ids,
ctx,
layout_interner,
*inner_layout,
mod_inner_args,
)
.unwrap();
Stmt::Let(

View file

@ -434,11 +434,12 @@ impl<'a, 'r> Ctx<'a, 'r> {
None
}
&Expr::ExprBox { symbol } => self.with_sym_layout(symbol, |ctx, _def_line, layout| {
Some(Layout::Boxed(ctx.alloc(layout)))
let inner = ctx.interner.insert(ctx.alloc(layout));
Some(Layout::Boxed(inner))
}),
&Expr::ExprUnbox { symbol } => {
self.with_sym_layout(symbol, |ctx, def_line, layout| match ctx.resolve(layout) {
Layout::Boxed(inner) => Some(*inner),
Layout::Boxed(inner) => Some(*ctx.interner.get(inner)),
_ => {
ctx.problem(ProblemKind::UnboxNotABox { symbol, def_line });
None
@ -671,7 +672,10 @@ fn resolve_recursive_layout<'a>(
layout
}
},
Layout::Boxed(inner) => Layout::Boxed(arena.alloc(go!(*inner))),
Layout::Boxed(inner) => {
let inner = go!(*interner.get(inner));
Layout::Boxed(interner.insert(arena.alloc(inner)))
}
Layout::Struct {
field_order_hash,
field_layouts,

View file

@ -315,6 +315,10 @@ impl<'a> LayoutCache<'a> {
self.interner.get(interned)
}
pub fn put_in(&mut self, layout: &'a Layout<'a>) -> InLayout<'a> {
self.interner.insert(layout)
}
#[cfg(debug_assertions)]
pub fn statistics(&self) -> (CacheStatistics, CacheStatistics) {
(self.stats, self.raw_function_stats)
@ -678,7 +682,7 @@ pub enum Layout<'a> {
field_order_hash: FieldOrderHash,
field_layouts: &'a [Layout<'a>],
},
Boxed(&'a Layout<'a>),
Boxed(InLayout<'a>),
Union(UnionLayout<'a>),
LambdaSet(LambdaSet<'a>),
RecursivePointer,
@ -2494,7 +2498,9 @@ impl<'a> Layout<'a> {
.runtime_representation(interner)
.allocation_alignment_bytes(interner, target_info),
Layout::RecursivePointer => unreachable!("should be looked up to get an actual layout"),
Layout::Boxed(inner) => inner.allocation_alignment_bytes(interner, target_info),
Layout::Boxed(inner) => interner
.get(*inner)
.allocation_alignment_bytes(interner, target_info),
}
}
@ -2612,7 +2618,7 @@ impl<'a> Layout<'a> {
RecursivePointer => alloc.text("*self"),
Boxed(inner) => alloc
.text("Boxed(")
.append(inner.to_doc(alloc, interner, parens))
.append(interner.get(inner).to_doc(alloc, interner, parens))
.append(")"),
}
}
@ -3024,8 +3030,9 @@ fn layout_from_flat_type<'a>(
let inner_var = args[0];
let inner_layout = cached!(Layout::from_var(env, inner_var), criteria);
let inner_layout = env.cache.put_in(env.arena.alloc(inner_layout));
Cacheable(Ok(Layout::Boxed(env.arena.alloc(inner_layout))), criteria)
Cacheable(Ok(Layout::Boxed(inner_layout)), criteria)
}
_ => {
panic!(

View file

@ -1455,6 +1455,7 @@ fn add_tag_union<'a>(
}
}
Layout::Boxed(elem_layout) => {
let elem_layout = env.layout_cache.get_in(elem_layout);
let (tag_name, payload_fields) =
single_tag_payload_fields(union_tags, subs, &[*elem_layout], env, types);

View file

@ -1,5 +1,6 @@
use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_intern::Interner;
use roc_types::types::AliasKind;
use std::cmp::{max_by_key, min_by_key};
@ -854,6 +855,7 @@ fn addr_to_ast<'a, M: ReplAppMemory>(
let inner_var = env.subs[inner_var_index];
let addr_of_inner = mem.deref_usize(addr);
let inner_layout = env.layout_cache.interner.get(*inner_layout);
let inner_expr = addr_to_ast(
env,
mem,

View file

@ -208,7 +208,7 @@ pub async fn entrypoint_from_js(src: String) -> Result<String, String> {
mut interns,
mut subs,
exposed_to_host,
layout_interner,
mut layout_interner,
..
} = mono;
@ -234,7 +234,6 @@ pub async fn entrypoint_from_js(src: String) -> Result<String, String> {
let app_module_bytes = {
let env = roc_gen_wasm::Env {
arena,
layout_interner: &layout_interner,
module_id,
stack_bytes: roc_gen_wasm::Env::DEFAULT_STACK_BYTES,
exposed_to_host: exposed_to_host
@ -248,6 +247,7 @@ pub async fn entrypoint_from_js(src: String) -> Result<String, String> {
let host_module = roc_gen_wasm::parse_host(env.arena, PRE_LINKED_BINARY).unwrap();
roc_gen_wasm::build_app_module(
&env,
&mut layout_interner,
&mut interns, // NOTE: must drop this mutable ref before jit_to_ast
host_module,
procedures,