From 6859c2e15ce9c4665c5e606579f635ef158e2530 Mon Sep 17 00:00:00 2001 From: Ayaz Hafiz Date: Tue, 3 Jan 2023 20:35:09 -0600 Subject: [PATCH] Update dev backend --- crates/compiler/alias_analysis/src/lib.rs | 172 ++- .../compiler/gen_dev/src/generic64/aarch64.rs | 14 +- crates/compiler/gen_dev/src/generic64/mod.rs | 1052 +++++++++++++++-- .../compiler/gen_dev/src/generic64/storage.rs | 53 +- .../compiler/gen_dev/src/generic64/x86_64.rs | 79 +- crates/compiler/gen_dev/src/lib.rs | 127 +- crates/compiler/mono/src/layout.rs | 8 +- crates/compiler/mono/src/layout/intern.rs | 5 + 8 files changed, 1154 insertions(+), 356 deletions(-) diff --git a/crates/compiler/alias_analysis/src/lib.rs b/crates/compiler/alias_analysis/src/lib.rs index e115abedbd..3d672a317b 100644 --- a/crates/compiler/alias_analysis/src/lib.rs +++ b/crates/compiler/alias_analysis/src/lib.rs @@ -14,7 +14,7 @@ use roc_mono::ir::{ Literal, ModifyRc, OptLevel, Proc, ProcLayout, SingleEntryPoint, Stmt, }; use roc_mono::layout::{ - Builtin, FieldOrderHash, Layout, LayoutInterner, Niche, RawFunctionLayout, STLayoutInterner, + Builtin, InLayout, Layout, LayoutInterner, Niche, RawFunctionLayout, STLayoutInterner, UnionLayout, }; @@ -31,7 +31,7 @@ pub fn func_name_bytes(proc: &Proc) -> [u8; SIZE] { proc.name.name(), proc.args.iter().map(|x| x.0), proc.name.niche(), - &proc.ret_layout, + proc.ret_layout, ); bytes } @@ -75,10 +75,10 @@ pub fn func_name_bytes_help<'a, I>( symbol: Symbol, argument_layouts: I, niche: Niche<'a>, - return_layout: &Layout<'a>, + return_layout: InLayout<'a>, ) -> [u8; SIZE] where - I: IntoIterator>, + I: IntoIterator>, { let mut name_bytes = [0u8; SIZE]; @@ -189,13 +189,13 @@ where RawFunctionLayout::Function(_, _, _) => { let it = top_level.arguments.iter().copied(); let bytes = - func_name_bytes_help(*symbol, it, Niche::NONE, &top_level.result); + func_name_bytes_help(*symbol, it, Niche::NONE, top_level.result); host_exposed_functions.push((bytes, top_level.arguments)); } RawFunctionLayout::ZeroArgumentThunk(_) => { let bytes = - func_name_bytes_help(*symbol, [], Niche::NONE, &top_level.result); + func_name_bytes_help(*symbol, [], Niche::NONE, top_level.result); host_exposed_functions.push((bytes, top_level.arguments)); } @@ -229,11 +229,11 @@ where entry_point_symbol, entry_point_layout.arguments.iter().copied(), Niche::NONE, - &entry_point_layout.result, + entry_point_layout.result, ); let roc_main = FuncName(&roc_main_bytes); - let mut env = Env::new(arena); + let mut env = Env::new(); let entry_point_function = build_entry_point( &mut env, @@ -252,10 +252,7 @@ where // construct a big pattern match picking one of the expects at random let layout: ProcLayout<'a> = ProcLayout { arguments: &[], - result: Layout::Struct { - field_order_hash: FieldOrderHash::from_ordered_fields(&[]), - field_layouts: &[], - }, + result: Layout::UNIT, niche: Niche::NONE, }; @@ -263,13 +260,13 @@ where .iter() .map(|symbol| { ( - func_name_bytes_help(*symbol, [], Niche::NONE, &layout.result), + func_name_bytes_help(*symbol, [], Niche::NONE, layout.result), [].as_slice(), ) }) .collect(); - let mut env = Env::new(arena); + let mut env = Env::new(); let entry_point_function = build_entry_point(&mut env, interner, layout, None, &host_exposed)?; @@ -286,7 +283,7 @@ where let mut builder = TypeDefBuilder::new(); - let mut env = Env::new(arena); + let mut env = Env::new(); let variant_types = recursive_variant_types(&mut env, &mut builder, interner, &union_layout)?; @@ -351,10 +348,10 @@ fn terrible_hack(builder: &mut FuncDefBuilder, block: BlockId, type_id: TypeId) fn build_entry_point<'a>( env: &mut Env<'a>, - interner: &STLayoutInterner<'a>, + interner: &mut STLayoutInterner<'a>, layout: roc_mono::ir::ProcLayout<'a>, entry_point_function: Option, - host_exposed_functions: &[([u8; SIZE], &'a [Layout<'a>])], + host_exposed_functions: &[([u8; SIZE], &'a [InLayout<'a>])], ) -> Result { let mut builder = FuncDefBuilder::new(); let outer_block = builder.add_block(); @@ -400,11 +397,12 @@ fn build_entry_point<'a>( let block = builder.add_block(); + let struct_layout = interner.insert(Layout::struct_no_name_order(layouts)); let type_id = layout_spec( env, &mut builder, interner, - env.arena.alloc(Layout::struct_no_name_order(layouts)), + struct_layout, &WhenRecursive::Unreachable, )?; @@ -439,7 +437,7 @@ fn proc_spec<'a>( proc: &Proc<'a>, ) -> Result<(FuncDef, MutSet>)> { let mut builder = FuncDefBuilder::new(); - let mut env = Env::new(arena); + let mut env = Env::new(); let block = builder.add_block(); @@ -457,25 +455,26 @@ fn proc_spec<'a>( interner, &mut env, block, - &proc.ret_layout, + proc.ret_layout, &proc.body, )?; let root = BlockExpr(block, value_id); + let args_struct_layout = interner.insert(Layout::struct_no_name_order( + argument_layouts.into_bump_slice(), + )); let arg_type_id = layout_spec( &mut env, &mut builder, interner, - arena.alloc(Layout::struct_no_name_order( - argument_layouts.into_bump_slice(), - )), + args_struct_layout, &WhenRecursive::Unreachable, )?; let ret_type_id = layout_spec( &mut env, &mut builder, interner, - &proc.ret_layout, + proc.ret_layout, &WhenRecursive::Unreachable, )?; @@ -485,16 +484,14 @@ fn proc_spec<'a>( } struct Env<'a> { - arena: &'a Bump, symbols: MutMap, join_points: MutMap, type_names: MutSet>, } impl<'a> Env<'a> { - fn new(arena: &'a Bump) -> Self { + fn new() -> Self { Self { - arena, symbols: Default::default(), join_points: Default::default(), type_names: Default::default(), @@ -535,14 +532,14 @@ fn stmt_spec<'a>( interner: &mut STLayoutInterner<'a>, env: &mut Env<'a>, block: BlockId, - layout: &Layout<'a>, + layout: InLayout<'a>, stmt: &Stmt<'a>, ) -> Result { use Stmt::*; match stmt { Let(symbol, expr, expr_layout, mut continuation) => { - let value_id = expr_spec(builder, interner, env, block, expr_layout, expr)?; + let value_id = expr_spec(builder, interner, env, block, *expr_layout, expr)?; env.symbols.insert(*symbol, value_id); let mut queue = vec![symbol]; @@ -550,7 +547,8 @@ fn stmt_spec<'a>( loop { match continuation { Let(symbol, expr, expr_layout, c) => { - let value_id = expr_spec(builder, interner, env, block, expr_layout, expr)?; + let value_id = + expr_spec(builder, interner, env, block, *expr_layout, expr)?; env.symbols.insert(*symbol, value_id); queue.push(symbol); @@ -620,7 +618,7 @@ fn stmt_spec<'a>( env, builder, interner, - &p.layout, + p.layout, &WhenRecursive::Unreachable, )?); } @@ -717,13 +715,13 @@ fn build_recursive_tuple_type<'a>( env: &mut Env<'a>, builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, - layouts: &[Layout<'a>], + layouts: &[InLayout<'a>], when_recursive: &WhenRecursive, ) -> Result { let mut field_types = Vec::new(); for field in layouts.iter() { - let type_id = layout_spec_help(env, builder, interner, field, when_recursive)?; + let type_id = layout_spec_help(env, builder, interner, *field, when_recursive)?; field_types.push(type_id); } @@ -734,13 +732,13 @@ fn build_tuple_type<'a>( env: &mut Env<'a>, builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, - layouts: &[Layout<'a>], + layouts: &[InLayout<'a>], when_recursive: &WhenRecursive, ) -> Result { let mut field_types = Vec::new(); for field in layouts.iter() { - field_types.push(layout_spec(env, builder, interner, field, when_recursive)?); + field_types.push(layout_spec(env, builder, interner, *field, when_recursive)?); } builder.add_tuple_type(&field_types) @@ -777,7 +775,7 @@ fn call_spec<'a>( interner: &mut STLayoutInterner<'a>, env: &mut Env<'a>, block: BlockId, - layout: &Layout<'a>, + layout: InLayout<'a>, call: &Call<'a>, ) -> Result { use CallType::*; @@ -795,7 +793,7 @@ fn call_spec<'a>( let arg_value_id = build_tuple_value(builder, env, block, call.arguments)?; let args_it = arg_layouts.iter().copied(); let captures_niche = name.niche(); - let bytes = func_name_bytes_help(name.name(), args_it, captures_niche, ret_layout); + let bytes = func_name_bytes_help(name.name(), args_it, captures_niche, *ret_layout); let name = FuncName(&bytes); let module = MOD_APP; builder.add_call(block, spec_var, module, name, arg_value_id) @@ -814,7 +812,7 @@ fn call_spec<'a>( env, builder, interner, - ret_layout, + *ret_layout, &WhenRecursive::Unreachable, )?; @@ -851,7 +849,7 @@ fn call_spec<'a>( passed_function.name.name(), args_it, captures_niche, - &passed_function.return_layout, + passed_function.return_layout, ); let name = FuncName(&bytes); let module = MOD_APP; @@ -891,18 +889,17 @@ fn call_spec<'a>( env, builder, interner, - return_layout, + *return_layout, &WhenRecursive::Unreachable, )?; - let return_layout = interner.insert(*return_layout); - - let state_layout = Layout::Builtin(Builtin::List(return_layout)); + let state_layout = + interner.insert(Layout::Builtin(Builtin::List(*return_layout))); let state_type = layout_spec( env, builder, interner, - &state_layout, + state_layout, &WhenRecursive::Unreachable, )?; @@ -928,14 +925,14 @@ fn call_spec<'a>( with_new_heap_cell(builder, block, bag) }; - let arg0_layout = interner.insert(argument_layouts[0]); + let arg0_layout = argument_layouts[0]; - let state_layout = Layout::Builtin(Builtin::List(arg0_layout)); + let state_layout = interner.insert(Layout::Builtin(Builtin::List(arg0_layout))); let state_type = layout_spec( env, builder, interner, - &state_layout, + state_layout, &WhenRecursive::Unreachable, )?; let init_state = list; @@ -965,18 +962,17 @@ fn call_spec<'a>( env, builder, interner, - return_layout, + *return_layout, &WhenRecursive::Unreachable, )?; - let return_layout = interner.insert(*return_layout); - - let state_layout = Layout::Builtin(Builtin::List(return_layout)); + let state_layout = + interner.insert(Layout::Builtin(Builtin::List(*return_layout))); let state_type = layout_spec( env, builder, interner, - &state_layout, + state_layout, &WhenRecursive::Unreachable, )?; @@ -1012,18 +1008,17 @@ fn call_spec<'a>( env, builder, interner, - return_layout, + *return_layout, &WhenRecursive::Unreachable, )?; - let return_layout = interner.insert(*return_layout); - - let state_layout = Layout::Builtin(Builtin::List(return_layout)); + let state_layout = + interner.insert(Layout::Builtin(Builtin::List(*return_layout))); let state_type = layout_spec( env, builder, interner, - &state_layout, + state_layout, &WhenRecursive::Unreachable, )?; @@ -1065,18 +1060,17 @@ fn call_spec<'a>( env, builder, interner, - return_layout, + *return_layout, &WhenRecursive::Unreachable, )?; - let return_layout = interner.insert(*return_layout); - - let state_layout = Layout::Builtin(Builtin::List(return_layout)); + let state_layout = + interner.insert(Layout::Builtin(Builtin::List(*return_layout))); let state_type = layout_spec( env, builder, interner, - &state_layout, + state_layout, &WhenRecursive::Unreachable, )?; @@ -1126,7 +1120,7 @@ fn lowlevel_spec<'a>( interner: &STLayoutInterner<'a>, env: &mut Env<'a>, block: BlockId, - layout: &Layout<'a>, + layout: InLayout<'a>, op: &LowLevel, update_mode: roc_mono::ir::UpdateModeId, arguments: &[Symbol], @@ -1206,21 +1200,21 @@ fn lowlevel_spec<'a>( let new_list = with_new_heap_cell(builder, block, bag)?; // depending on the types, the list or value will come first in the struct - let fields = match layout { + let fields = match interner.get(layout) { Layout::Struct { field_layouts, .. } => field_layouts, _ => unreachable!(), }; - match fields { - [Layout::Builtin(Builtin::List(_)), Layout::Builtin(Builtin::List(_))] => { + match (interner.get(fields[0]), interner.get(fields[1])) { + (Layout::Builtin(Builtin::List(_)), Layout::Builtin(Builtin::List(_))) => { // field name is the tie breaker, list is first in // { list : List a, value : a } builder.add_make_tuple(block, &[new_list, old_value]) } - [Layout::Builtin(Builtin::List(_)), _] => { + (Layout::Builtin(Builtin::List(_)), _) => { builder.add_make_tuple(block, &[new_list, old_value]) } - [_, Layout::Builtin(Builtin::List(_))] => { + (_, Layout::Builtin(Builtin::List(_))) => { builder.add_make_tuple(block, &[old_value, new_list]) } _ => unreachable!(), @@ -1239,14 +1233,13 @@ fn lowlevel_spec<'a>( ListWithCapacity => { // essentially an empty list, capacity is not relevant for morphic - match layout { + match interner.get(layout) { Layout::Builtin(Builtin::List(element_layout)) => { - let element_layout = interner.get(*element_layout); let type_id = layout_spec( env, builder, interner, - &element_layout, + element_layout, &WhenRecursive::Unreachable, )?; new_list(builder, block, type_id) @@ -1304,7 +1297,7 @@ fn recursive_tag_variant<'a>( builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, union_layout: &UnionLayout, - fields: &[Layout<'a>], + fields: &[InLayout<'a>], ) -> Result { let when_recursive = WhenRecursive::Loop(*union_layout); @@ -1413,7 +1406,7 @@ fn expr_spec<'a>( interner: &mut STLayoutInterner<'a>, env: &mut Env<'a>, block: BlockId, - layout: &Layout<'a>, + layout: InLayout<'a>, expr: &Expr<'a>, ) -> Result { use Expr::*; @@ -1551,7 +1544,7 @@ fn expr_spec<'a>( env, builder, interner, - elem_layout, + *elem_layout, &WhenRecursive::Unreachable, )?; @@ -1578,14 +1571,13 @@ fn expr_spec<'a>( } } - EmptyArray => match layout { + EmptyArray => match interner.get(layout) { Layout::Builtin(Builtin::List(element_layout)) => { - let element_layout = interner.get(*element_layout); let type_id = layout_spec( env, builder, interner, - &element_layout, + element_layout, &WhenRecursive::Unreachable, )?; new_list(builder, block, type_id) @@ -1598,12 +1590,12 @@ fn expr_spec<'a>( } => { let tag_value_id = env.symbols[symbol]; - let union_layout = match layout { + let union_layout = match interner.get(layout) { Layout::Union(ul) => ul, _ => unreachable!(), }; - let type_name_bytes = recursive_tag_union_name_bytes(union_layout).as_bytes(); + let type_name_bytes = recursive_tag_union_name_bytes(&union_layout).as_bytes(); let type_name = TypeName(&type_name_bytes); // unwrap the named wrapper @@ -1651,7 +1643,7 @@ fn layout_spec<'a>( env: &mut Env<'a>, builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, - layout: &Layout<'a>, + layout: InLayout<'a>, when_recursive: &WhenRecursive, ) -> Result { layout_spec_help(env, builder, interner, layout, when_recursive) @@ -1661,7 +1653,7 @@ fn non_recursive_variant_types<'a>( env: &mut Env<'a>, builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, - tags: &[&[Layout<'a>]], + tags: &[&[InLayout<'a>]], // If there is a recursive pointer latent within this layout, coming from a containing layout. when_recursive: &WhenRecursive, ) -> Result> { @@ -1684,13 +1676,13 @@ fn layout_spec_help<'a>( env: &mut Env<'a>, builder: &mut impl TypeContext, interner: &STLayoutInterner<'a>, - layout: &Layout<'a>, + layout: InLayout<'a>, when_recursive: &WhenRecursive, ) -> Result { use Layout::*; - match layout { - Builtin(builtin) => builtin_spec(env, builder, interner, builtin, when_recursive), + match interner.get(layout) { + Builtin(builtin) => builtin_spec(env, builder, interner, &builtin, when_recursive), Struct { field_layouts, .. } => { build_recursive_tuple_type(env, builder, interner, field_layouts, when_recursive) } @@ -1698,7 +1690,7 @@ fn layout_spec_help<'a>( env, builder, interner, - &lambda_set.runtime_representation(interner), + lambda_set.runtime_representation(), when_recursive, ), Union(union_layout) => { @@ -1718,10 +1710,10 @@ fn layout_spec_help<'a>( | UnionLayout::NullableUnwrapped { .. } | UnionLayout::NullableWrapped { .. } | UnionLayout::NonNullableUnwrapped(_) => { - let type_name_bytes = recursive_tag_union_name_bytes(union_layout).as_bytes(); + let type_name_bytes = recursive_tag_union_name_bytes(&union_layout).as_bytes(); let type_name = TypeName(&type_name_bytes); - env.type_names.insert(*union_layout); + env.type_names.insert(union_layout); Ok(builder.add_named_type(MOD_APP, type_name)) } @@ -1729,9 +1721,8 @@ fn layout_spec_help<'a>( } Boxed(inner_layout) => { - let inner_layout = interner.get(*inner_layout); let inner_type = - layout_spec_help(env, builder, interner, &inner_layout, when_recursive)?; + layout_spec_help(env, builder, interner, inner_layout, when_recursive)?; let cell_type = builder.add_heap_cell_type(); builder.add_tuple_type(&[cell_type, inner_type]) @@ -1770,9 +1761,8 @@ fn builtin_spec<'a>( Decimal | Float(_) => builder.add_tuple_type(&[]), Str => str_type(builder), List(element_layout) => { - let element_layout = interner.get(*element_layout); let element_type = - layout_spec_help(env, builder, interner, &element_layout, when_recursive)?; + layout_spec_help(env, builder, interner, *element_layout, when_recursive)?; let cell = builder.add_heap_cell_type(); let bag = builder.add_bag_type(element_type)?; diff --git a/crates/compiler/gen_dev/src/generic64/aarch64.rs b/crates/compiler/gen_dev/src/generic64/aarch64.rs index 8124dbb8a1..eb98070607 100644 --- a/crates/compiler/gen_dev/src/generic64/aarch64.rs +++ b/crates/compiler/gen_dev/src/generic64/aarch64.rs @@ -4,7 +4,7 @@ use bumpalo::collections::Vec; use packed_struct::prelude::*; use roc_error_macros::internal_error; use roc_module::symbol::Symbol; -use roc_mono::layout::{Layout, STLayoutInterner}; +use roc_mono::layout::{InLayout, STLayoutInterner}; #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] #[allow(dead_code)] @@ -316,8 +316,8 @@ impl CallConv for AArch64C AArch64Call, >, _layout_interner: &mut STLayoutInterner<'a>, - _args: &'a [(Layout<'a>, Symbol)], - _ret_layout: &Layout<'a>, + _args: &'a [(InLayout<'a>, Symbol)], + _ret_layout: &InLayout<'a>, ) { todo!("Loading args for AArch64"); } @@ -336,8 +336,8 @@ impl CallConv for AArch64C _layout_interner: &mut STLayoutInterner<'a>, _dst: &Symbol, _args: &[Symbol], - _arg_layouts: &[Layout<'a>], - _ret_layout: &Layout<'a>, + _arg_layouts: &[InLayout<'a>], + _ret_layout: &InLayout<'a>, ) { todo!("Storing args for AArch64"); } @@ -354,7 +354,7 @@ impl CallConv for AArch64C >, _layout_interner: &mut STLayoutInterner<'a>, _sym: &Symbol, - _layout: &Layout<'a>, + _layout: &InLayout<'a>, ) { todo!("Returning complex symbols for AArch64"); } @@ -371,7 +371,7 @@ impl CallConv for AArch64C >, _layout_interner: &mut STLayoutInterner<'a>, _sym: &Symbol, - _layout: &Layout<'a>, + _layout: &InLayout<'a>, ) { todo!("Loading returned complex symbols for AArch64"); } diff --git a/crates/compiler/gen_dev/src/generic64/mod.rs b/crates/compiler/gen_dev/src/generic64/mod.rs index a7e8c1544e..683df08408 100644 --- a/crates/compiler/gen_dev/src/generic64/mod.rs +++ b/crates/compiler/gen_dev/src/generic64/mod.rs @@ -11,7 +11,9 @@ use roc_mono::code_gen_help::CodeGenHelp; use roc_mono::ir::{ BranchInfo, JoinPointId, ListLiteralElement, Literal, Param, ProcLayout, SelfRecursive, Stmt, }; -use roc_mono::layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout}; +use roc_mono::layout::{ + Builtin, InLayout, Layout, LayoutInterner, STLayoutInterner, TagIdIntType, UnionLayout, +}; use roc_target::TargetInfo; use std::marker::PhantomData; @@ -73,9 +75,9 @@ pub trait CallConv, storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, Self>, layout_interner: &mut STLayoutInterner<'a>, - args: &'a [(Layout<'a>, Symbol)], + args: &'a [(InLayout<'a>, Symbol)], // ret_layout is needed because if it is a complex type, we pass a pointer as the first arg. - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ); /// store_args stores the args in registers and on the stack for function calling. @@ -86,9 +88,9 @@ pub trait CallConv, dst: &Symbol, args: &[Symbol], - arg_layouts: &[Layout<'a>], + arg_layouts: &[InLayout<'a>], // ret_layout is needed because if it is a complex type, we pass a pointer as the first arg. - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ); /// return_complex_symbol returns the specified complex/non-primative symbol. @@ -98,7 +100,7 @@ pub trait CallConv, layout_interner: &mut STLayoutInterner<'a>, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, ); /// load_returned_complex_symbol loads a complex symbol that was returned from a function call. @@ -108,7 +110,7 @@ pub trait CallConv, layout_interner: &mut STLayoutInterner<'a>, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, ); } @@ -368,7 +370,6 @@ pub struct Backend64Bit< // They are likely to be small enough that it is faster to use a vec and linearly scan it or keep it sorted and binary search. phantom_asm: PhantomData, phantom_cc: PhantomData, - target_info: TargetInfo, env: &'r Env<'a>, layout_interner: &'r mut STLayoutInterner<'a>, interns: &'r mut Interns, @@ -380,10 +381,10 @@ pub struct Backend64Bit< is_self_recursive: Option, last_seen_map: MutMap>, - layout_map: MutMap>, + layout_map: MutMap>, free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>, - literal_map: MutMap, *const Layout<'a>)>, + literal_map: MutMap, *const InLayout<'a>)>, join_map: MutMap>, storage_manager: StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>, @@ -406,7 +407,6 @@ pub fn new_backend_64bit< Backend64Bit { phantom_asm: PhantomData, phantom_cc: PhantomData, - target_info, env, interns, layout_interner, @@ -453,6 +453,9 @@ impl< fn interns(&self) -> &Interns { self.interns } + fn interner(&self) -> &STLayoutInterner<'a> { + &self.layout_interner + } fn module_interns_helpers_mut( &mut self, ) -> ( @@ -489,7 +492,7 @@ impl< self.storage_manager.reset(); } - fn literal_map(&mut self) -> &mut MutMap, *const Layout<'a>)> { + fn literal_map(&mut self) -> &mut MutMap, *const InLayout<'a>)> { &mut self.literal_map } @@ -497,7 +500,7 @@ impl< &mut self.last_seen_map } - fn layout_map(&mut self) -> &mut MutMap> { + fn layout_map(&mut self) -> &mut MutMap> { &mut self.layout_map } @@ -603,7 +606,7 @@ impl< (out, out_relocs) } - fn load_args(&mut self, args: &'a [(Layout<'a>, Symbol)], ret_layout: &Layout<'a>) { + fn load_args(&mut self, args: &'a [(InLayout<'a>, Symbol)], ret_layout: &InLayout<'a>) { CC::load_args( &mut self.buf, &mut self.storage_manager, @@ -626,8 +629,8 @@ impl< dst: &Symbol, fn_name: String, args: &[Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { if let Some(SelfRecursive::SelfRecursive(id)) = self.is_self_recursive { if &fn_name == self.proc_name.as_ref().unwrap() && self.join_map.contains_key(&id) { @@ -653,7 +656,7 @@ impl< ASM::call(&mut self.buf, &mut self.relocs, fn_name); // move return value to dst. - match ret_layout { + match *ret_layout { single_register_integers!() => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]); @@ -677,10 +680,10 @@ impl< fn build_switch( &mut self, cond_symbol: &Symbol, - _cond_layout: &Layout<'a>, // cond_layout must be a integer due to potential jump table optimizations. + _cond_layout: &InLayout<'a>, // cond_layout must be a integer due to potential jump table optimizations. branches: &'a [(u64, BranchInfo<'a>, Stmt<'a>)], default_branch: &(BranchInfo<'a>, &'a Stmt<'a>), - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ) { // Switches are a little complex due to keeping track of jumps. // In general I am trying to not have to loop over things multiple times or waste memory. @@ -748,7 +751,7 @@ impl< parameters: &'a [Param<'a>], body: &'a Stmt<'a>, remainder: &'a Stmt<'a>, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ) { // Free everything to the stack to make sure they don't get messed up when looping back to this point. // TODO: look into a nicer solution. @@ -785,8 +788,8 @@ impl< &mut self, id: &JoinPointId, args: &[Symbol], - arg_layouts: &[Layout<'a>], - _ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + _ret_layout: &InLayout<'a>, ) { self.storage_manager .setup_jump(self.layout_interner, &mut self.buf, id, args, arg_layouts); @@ -801,8 +804,8 @@ impl< } } - fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>) { - match layout { + fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>) { + match self.interner().get(*layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src_reg = self.storage_manager.load_to_general_reg(&mut self.buf, src); @@ -817,8 +820,8 @@ impl< } } - fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>) { - match layout { + fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) { + match self.layout_interner.get(*layout) { Layout::Builtin(Builtin::Int(quadword_and_smaller!())) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self @@ -850,18 +853,18 @@ impl< dst: &Symbol, src1: &Symbol, src2: &Symbol, - num_layout: &Layout<'a>, - return_layout: &Layout<'a>, + num_layout: &InLayout<'a>, + return_layout: &InLayout<'a>, ) { use Builtin::Int; let buf = &mut self.buf; - let struct_size = return_layout.stack_size(self.layout_interner, self.target_info); + let struct_size = self.layout_interner.stack_size(*return_layout); let base_offset = self.storage_manager.claim_stack_area(dst, struct_size); - match num_layout { + match self.layout_interner.get(*num_layout) { Layout::Builtin(Int(IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8)) => { let dst_reg = self .storage_manager @@ -896,10 +899,10 @@ impl< } } - fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>) { + fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) { use Builtin::Int; - match layout { + match self.layout_interner.get(*layout) { Layout::Builtin(Int(IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self @@ -943,8 +946,8 @@ impl< } } - fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>) { - match layout { + fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) { + match self.layout_interner.get(*layout) { Layout::Builtin(Builtin::Int( IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8, )) => { @@ -999,8 +1002,8 @@ impl< } } - fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>) { - match layout { + fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>) { + match self.layout_interner.get(*layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src_reg = self.storage_manager.load_to_general_reg(&mut self.buf, src); @@ -1010,8 +1013,8 @@ impl< } } - fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>) { - match layout { + fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) { + match self.layout_interner.get(*layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self @@ -1026,9 +1029,9 @@ impl< } } - fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>) { - match arg_layout { - Layout::Builtin(single_register_int_builtins!()) => { + fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) { + match *arg_layout { + single_register_int_builtins!() => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self .storage_manager @@ -1042,8 +1045,8 @@ impl< } } - fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>) { - match arg_layout { + fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) { + match self.layout_interner.get(*arg_layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self @@ -1063,9 +1066,9 @@ impl< dst: &Symbol, src1: &Symbol, src2: &Symbol, - arg_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, ) { - match arg_layout { + match self.layout_interner.get(*arg_layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self @@ -1084,11 +1087,14 @@ impl< &mut self, dst: &Symbol, src: &Symbol, - arg_layout: &Layout<'a>, - ret_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, + ret_layout: &InLayout<'a>, ) { let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst); - match (arg_layout, ret_layout) { + match ( + self.layout_interner.get(*arg_layout), + self.layout_interner.get(*ret_layout), + ) { ( Layout::Builtin(Builtin::Int(IntWidth::I32 | IntWidth::I64)), Layout::Builtin(Builtin::Float(FloatWidth::F64)), @@ -1140,10 +1146,10 @@ impl< dst: &Symbol, src1: &Symbol, src2: &Symbol, - arg_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, ) { - match arg_layout { - Layout::Builtin(single_register_int_builtins!()) => { + match *arg_layout { + single_register_int_builtins!() => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self .storage_manager @@ -1162,10 +1168,10 @@ impl< dst: &Symbol, src1: &Symbol, src2: &Symbol, - arg_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, ) { - match arg_layout { - Layout::Builtin(single_register_int_builtins!()) => { + match *arg_layout { + single_register_int_builtins!() => { let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst); let src1_reg = self .storage_manager @@ -1188,14 +1194,13 @@ impl< dst: &Symbol, list: &Symbol, index: &Symbol, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ) { let (base_offset, _) = self.storage_manager.stack_offset_and_size(list); let index_reg = self .storage_manager .load_to_general_reg(&mut self.buf, index); - let ret_stack_size = - ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info()); + let ret_stack_size = self.layout_interner.stack_size(*ret_layout); // TODO: This can be optimized with smarter instructions. // Also can probably be moved into storage manager at least partly. self.storage_manager.with_tmp_general_reg( @@ -1206,7 +1211,7 @@ impl< ASM::mov_reg64_imm64(buf, tmp, ret_stack_size as i64); ASM::imul_reg64_reg64_reg64(buf, tmp, tmp, index_reg); ASM::add_reg64_reg64_reg64(buf, tmp, tmp, list_ptr); - match ret_layout { + match *ret_layout { single_register_integers!() if ret_stack_size == 8 => { let dst_reg = storage_manager.claim_general_reg(buf, dst); ASM::mov_reg64_mem64_offset32(buf, dst_reg, tmp, 0); @@ -1222,8 +1227,8 @@ impl< &mut self, dst: &Symbol, args: &'a [Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { // We want to delegate to the zig builtin, but it takes some extra parameters. // Firstly, it takes the alignment of the list. @@ -1236,19 +1241,18 @@ impl< let elem = args[2]; let elem_layout = arg_layouts[2]; - let u32_layout = &Layout::Builtin(Builtin::Int(IntWidth::U32)); - let list_alignment = - list_layout.alignment_bytes(self.layout_interner, self.storage_manager.target_info()); + let u32_layout = Layout::U32; + let list_alignment = self.layout_interner.alignment_bytes(list_layout); self.load_literal( &Symbol::DEV_TMP, - u32_layout, + &u32_layout, &Literal::Int((list_alignment as i128).to_ne_bytes()), ); // Have to pass the input element by pointer, so put it on the stack and load it's address. self.storage_manager .ensure_symbol_on_stack(&mut self.buf, &elem); - let u64_layout = &Layout::Builtin(Builtin::Int(IntWidth::U64)); + let u64_layout = Layout::U64; let (new_elem_offset, _) = self.storage_manager.stack_offset_and_size(&elem); // Load address of output element into register. let reg = self @@ -1257,47 +1261,40 @@ impl< ASM::add_reg64_reg64_imm32(&mut self.buf, reg, CC::BASE_PTR_REG, new_elem_offset); // Load the elements size. - let elem_stack_size = - elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info()); + let elem_stack_size = self.layout_interner.stack_size(elem_layout); self.load_literal( &Symbol::DEV_TMP3, - u64_layout, + &u64_layout, &Literal::Int((elem_stack_size as i128).to_ne_bytes()), ); // Setup the return location. - let base_offset = self.storage_manager.claim_stack_area( - dst, - ret_layout.stack_size(self.layout_interner, self.storage_manager.target_info()), - ); + let base_offset = self + .storage_manager + .claim_stack_area(dst, self.layout_interner.stack_size(*ret_layout)); - let ret_fields = if let Layout::Struct { field_layouts, .. } = ret_layout { - field_layouts - } else { - internal_error!( - "Expected replace to return a struct instead found: {:?}", - ret_layout - ) - }; + let ret_fields = + if let Layout::Struct { field_layouts, .. } = self.layout_interner.get(*ret_layout) { + field_layouts + } else { + internal_error!( + "Expected replace to return a struct instead found: {:?}", + ret_layout + ) + }; // Only return list and old element. debug_assert_eq!(ret_fields.len(), 2); let (out_list_offset, out_elem_offset) = if ret_fields[0] == elem_layout { ( - base_offset - + ret_fields[0] - .stack_size(self.layout_interner, self.storage_manager.target_info()) - as i32, + base_offset + self.layout_interner.stack_size(ret_fields[0]) as i32, base_offset, ) } else { ( base_offset, - base_offset - + ret_fields[0] - .stack_size(self.layout_interner, self.storage_manager.target_info()) - as i32, + base_offset + self.layout_interner.stack_size(ret_fields[0]) as i32, ) }; @@ -1319,11 +1316,11 @@ impl< let lowlevel_arg_layouts = bumpalo::vec![ in self.env.arena; list_layout, - *u32_layout, + u32_layout, index_layout, - *u64_layout, - *u64_layout, - *u64_layout, + u64_layout, + u64_layout, + u64_layout, ]; self.build_fn_call( @@ -1372,29 +1369,26 @@ impl< fn create_array( &mut self, sym: &Symbol, - elem_layout: &Layout<'a>, + elem_layout: &InLayout<'a>, elems: &'a [ListLiteralElement<'a>], ) { // Allocate // This requires at least 8 for the refcount alignment. let allocation_alignment = std::cmp::max( 8, - elem_layout.allocation_alignment_bytes( - self.layout_interner, - self.storage_manager.target_info(), - ) as u64, + self.layout_interner + .allocation_alignment_bytes(*elem_layout) as u64, ); - let elem_size = - elem_layout.stack_size(self.layout_interner, self.storage_manager.target_info()) as u64; + let elem_size = self.layout_interner.stack_size(*elem_layout) as u64; let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */; - let u64_layout = Layout::Builtin(Builtin::Int(IntWidth::U64)); + let u64_layout = Layout::U64; self.load_literal( &Symbol::DEV_TMP, &u64_layout, &Literal::Int((allocation_size as i128).to_ne_bytes()), ); - let u32_layout = Layout::Builtin(Builtin::Int(IntWidth::U32)); + let u32_layout = Layout::U32; self.load_literal( &Symbol::DEV_TMP2, &u32_layout, @@ -1443,7 +1437,7 @@ impl< } }; // TODO: Expand to all types. - match elem_layout { + match self.layout_interner.get(*elem_layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { let sym_reg = self .storage_manager @@ -1489,7 +1483,7 @@ impl< self.free_symbol(&Symbol::DEV_TMP3); } - fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]) { + fn create_struct(&mut self, sym: &Symbol, layout: &InLayout<'a>, fields: &'a [Symbol]) { self.storage_manager.create_struct( self.layout_interner, &mut self.buf, @@ -1504,7 +1498,7 @@ impl< sym: &Symbol, structure: &Symbol, index: u64, - field_layouts: &'a [Layout<'a>], + field_layouts: &'a [InLayout<'a>], ) { self.storage_manager.load_field_at_index( self.layout_interner, @@ -1564,8 +1558,8 @@ impl< ) } - fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>) { - match (lit, layout) { + fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>) { + match (lit, self.layout_interner.get(*layout)) { ( Literal::Int(x), Layout::Builtin(Builtin::Int( @@ -1635,10 +1629,10 @@ impl< self.storage_manager.free_symbol(sym); } - fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>) { + fn return_symbol(&mut self, sym: &Symbol, layout: &InLayout<'a>) { if self.storage_manager.is_stored_primitive(sym) { // Just load it to the correct type of reg as a stand alone value. - match layout { + match *layout { single_register_integers!() => { self.storage_manager.load_to_specified_general_reg( &mut self.buf, @@ -1734,6 +1728,822 @@ impl< } } } + + fn symbol_to_string(&self, symbol: Symbol, layout_id: roc_mono::layout::LayoutId) -> String { + layout_id.to_symbol_string(symbol, self.interns()) + } + + fn defined_in_app_module(&self, symbol: Symbol) -> bool { + symbol + .module_string(self.interns()) + .starts_with(roc_module::ident::ModuleName::APP) + } + + fn build_proc( + &mut self, + proc: roc_mono::ir::Proc<'a>, + layout_ids: &mut roc_mono::layout::LayoutIds<'a>, + ) -> (Vec, Vec, Vec<'a, (Symbol, String)>) { + let layout_id = layout_ids.get(proc.name.name(), &proc.ret_layout); + let proc_name = self.symbol_to_string(proc.name.name(), layout_id); + self.reset(proc_name, proc.is_self_recursive); + self.load_args(proc.args, &proc.ret_layout); + for (layout, sym) in proc.args { + self.set_layout_map(*sym, layout); + } + self.scan_ast(&proc.body); + self.create_free_map(); + self.build_stmt(&proc.body, &proc.ret_layout); + let mut helper_proc_names = bumpalo::vec![in self.env().arena]; + helper_proc_names.reserve(self.helper_proc_symbols().len()); + for (rc_proc_sym, rc_proc_layout) in self.helper_proc_symbols() { + let name = layout_ids + .get_toplevel(*rc_proc_sym, rc_proc_layout) + .to_symbol_string(*rc_proc_sym, self.interns()); + + helper_proc_names.push((*rc_proc_sym, name)); + } + let (bytes, relocs) = self.finalize(); + (bytes, relocs, helper_proc_names) + } + + fn build_stmt(&mut self, stmt: &Stmt<'a>, ret_layout: &roc_mono::layout::InLayout<'a>) { + match stmt { + Stmt::Let(sym, expr, layout, following) => { + self.build_expr(sym, expr, layout); + self.set_layout_map(*sym, layout); + self.free_symbols(stmt); + self.build_stmt(following, ret_layout); + } + Stmt::Ret(sym) => { + self.load_literal_symbols(&[*sym]); + self.return_symbol(sym, ret_layout); + self.free_symbols(stmt); + } + Stmt::Refcounting(modify, following) => { + let sym = modify.get_symbol(); + let layout = *self.layout_map().get(&sym).unwrap(); + + // Expand the Refcounting statement into more detailed IR with a function call + // If this layout requires a new RC proc, we get enough info to create a linker symbol + // for it. Here we don't create linker symbols at this time, but in Wasm backend, we do. + let (rc_stmt, new_specializations) = { + let (module_id, layout_interner, interns, rc_proc_gen) = + self.module_interns_helpers_mut(); + let ident_ids = interns.all_ident_ids.get_mut(&module_id).unwrap(); + + rc_proc_gen.expand_refcount_stmt( + ident_ids, + layout_interner, + layout, + modify, + following, + ) + }; + + for spec in new_specializations.into_iter() { + self.helper_proc_symbols_mut().push(spec); + } + + self.build_stmt(rc_stmt, ret_layout) + } + Stmt::Switch { + cond_symbol, + cond_layout, + branches, + default_branch, + ret_layout, + } => { + self.load_literal_symbols(&[*cond_symbol]); + self.build_switch( + cond_symbol, + cond_layout, + branches, + default_branch, + ret_layout, + ); + self.free_symbols(stmt); + } + Stmt::Join { + id, + parameters, + body, + remainder, + } => { + for param in parameters.iter() { + self.set_layout_map(param.symbol, ¶m.layout); + } + self.build_join(id, parameters, body, remainder, ret_layout); + self.free_symbols(stmt); + } + Stmt::Jump(id, args) => { + let mut arg_layouts: bumpalo::collections::Vec> = + bumpalo::vec![in self.env().arena]; + arg_layouts.reserve(args.len()); + let layout_map = self.layout_map(); + for arg in *args { + if let Some(layout) = layout_map.get(arg) { + arg_layouts.push(*layout); + } else { + internal_error!("the argument, {:?}, has no know layout", arg); + } + } + self.build_jump(id, args, arg_layouts.into_bump_slice(), ret_layout); + self.free_symbols(stmt); + } + x => todo!("the statement, {:?}", x), + } + } + + fn build_expr( + &mut self, + sym: &Symbol, + expr: &roc_mono::ir::Expr<'a>, + layout: &roc_mono::layout::InLayout<'a>, + ) { + match expr { + roc_mono::ir::Expr::Literal(lit) => { + if self.env().lazy_literals { + self.literal_map().insert(*sym, (lit, layout)); + } else { + self.load_literal(sym, layout, lit); + } + } + roc_mono::ir::Expr::Call(roc_mono::ir::Call { + call_type, + arguments, + }) => { + match call_type { + roc_mono::ir::CallType::ByName { + name: func_sym, + arg_layouts, + ret_layout, + .. + } => { + if let roc_module::low_level::LowLevelWrapperType::CanBeReplacedBy( + lowlevel, + ) = + roc_module::low_level::LowLevelWrapperType::from_symbol(func_sym.name()) + { + self.build_run_low_level( + sym, + &lowlevel, + arguments, + arg_layouts, + ret_layout, + ) + } else if self.defined_in_app_module(func_sym.name()) { + let layout_id = + roc_mono::layout::LayoutIds::default().get(func_sym.name(), layout); + let fn_name = self.symbol_to_string(func_sym.name(), layout_id); + // Now that the arguments are needed, load them if they are literals. + self.load_literal_symbols(arguments); + self.build_fn_call(sym, fn_name, arguments, arg_layouts, ret_layout) + } else { + self.build_builtin( + sym, + func_sym.name(), + arguments, + arg_layouts, + ret_layout, + ) + } + } + + roc_mono::ir::CallType::LowLevel { op: lowlevel, .. } => { + let mut arg_layouts: bumpalo::collections::Vec< + roc_mono::layout::InLayout<'a>, + > = bumpalo::vec![in self.env().arena]; + arg_layouts.reserve(arguments.len()); + let layout_map = self.layout_map(); + for arg in *arguments { + if let Some(layout) = layout_map.get(arg) { + arg_layouts.push(*layout); + } else { + internal_error!("the argument, {:?}, has no know layout", arg); + } + } + self.build_run_low_level( + sym, + lowlevel, + arguments, + arg_layouts.into_bump_slice(), + layout, + ) + } + x => todo!("the call type, {:?}", x), + } + } + roc_mono::ir::Expr::EmptyArray => { + self.create_empty_array(sym); + } + roc_mono::ir::Expr::Array { elem_layout, elems } => { + let mut syms = bumpalo::vec![in self.env().arena]; + for sym in elems.iter().filter_map(|x| match x { + ListLiteralElement::Symbol(sym) => Some(sym), + _ => None, + }) { + syms.push(*sym); + } + // TODO: This could be a huge waste. + // We probably want to call this within create_array, one element at a time. + self.load_literal_symbols(syms.into_bump_slice()); + self.create_array(sym, elem_layout, elems); + } + roc_mono::ir::Expr::Struct(fields) => { + self.load_literal_symbols(fields); + self.create_struct(sym, layout, fields); + } + roc_mono::ir::Expr::StructAtIndex { + index, + field_layouts, + structure, + } => { + self.load_struct_at_index(sym, structure, *index, field_layouts); + } + roc_mono::ir::Expr::UnionAtIndex { + structure, + tag_id, + union_layout, + index, + } => { + self.load_union_at_index(sym, structure, *tag_id, *index, union_layout); + } + roc_mono::ir::Expr::GetTagId { + structure, + union_layout, + } => { + self.get_tag_id(sym, structure, union_layout); + } + roc_mono::ir::Expr::Tag { + tag_layout, + tag_id, + arguments, + .. + } => { + self.load_literal_symbols(arguments); + self.tag(sym, arguments, tag_layout, *tag_id); + } + x => todo!("the expression, {:?}", x), + } + } + + fn build_run_low_level( + &mut self, + sym: &Symbol, + lowlevel: &roc_module::low_level::LowLevel, + args: &'a [Symbol], + arg_layouts: &[roc_mono::layout::InLayout<'a>], + ret_layout: &roc_mono::layout::InLayout<'a>, + ) { + // Now that the arguments are needed, load them if they are literals. + self.load_literal_symbols(args); + match lowlevel { + roc_module::low_level::LowLevel::NumAbs => { + debug_assert_eq!( + 1, + args.len(), + "NumAbs: expected to have exactly one argument" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumAbs: expected to have the same argument and return layout" + ); + self.build_num_abs(sym, &args[0], ret_layout) + } + roc_module::low_level::LowLevel::NumAdd => { + debug_assert_eq!( + 2, + args.len(), + "NumAdd: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumAdd: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumAdd: expected to have the same argument and return layout" + ); + self.build_num_add(sym, &args[0], &args[1], ret_layout) + } + roc_module::low_level::LowLevel::NumAddChecked => { + self.build_num_add_checked(sym, &args[0], &args[1], &arg_layouts[0], ret_layout) + } + roc_module::low_level::LowLevel::NumAcos => self.build_fn_call( + sym, + bitcode::NUM_ACOS[FloatWidth::F64].to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::NumAsin => self.build_fn_call( + sym, + bitcode::NUM_ASIN[FloatWidth::F64].to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::NumAtan => self.build_fn_call( + sym, + bitcode::NUM_ATAN[FloatWidth::F64].to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::NumMul => { + debug_assert_eq!( + 2, + args.len(), + "NumMul: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumMul: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumMul: expected to have the same argument and return layout" + ); + self.build_num_mul(sym, &args[0], &args[1], ret_layout) + } + roc_module::low_level::LowLevel::NumDivTruncUnchecked + | roc_module::low_level::LowLevel::NumDivFrac => { + debug_assert_eq!( + 2, + args.len(), + "NumDiv: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumDiv: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumDiv: expected to have the same argument and return layout" + ); + self.build_num_div(sym, &args[0], &args[1], ret_layout) + } + roc_module::low_level::LowLevel::NumNeg => { + debug_assert_eq!( + 1, + args.len(), + "NumNeg: expected to have exactly one argument" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumNeg: expected to have the same argument and return layout" + ); + self.build_num_neg(sym, &args[0], ret_layout) + } + roc_module::low_level::LowLevel::NumPowInt => self.build_fn_call( + sym, + bitcode::NUM_POW_INT[IntWidth::I64].to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::NumSub => { + debug_assert_eq!( + 2, + args.len(), + "NumSub: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumSub: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + arg_layouts[0], *ret_layout, + "NumSub: expected to have the same argument and return layout" + ); + self.build_num_sub(sym, &args[0], &args[1], ret_layout) + } + roc_module::low_level::LowLevel::NumBitwiseAnd => { + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_and(sym, &args[0], &args[1], int_width) + } else { + internal_error!("bitwise and on a non-integer") + } + } + roc_module::low_level::LowLevel::NumBitwiseOr => { + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_or(sym, &args[0], &args[1], int_width) + } else { + internal_error!("bitwise or on a non-integer") + } + } + roc_module::low_level::LowLevel::NumBitwiseXor => { + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_xor(sym, &args[0], &args[1], int_width) + } else { + internal_error!("bitwise xor on a non-integer") + } + } + roc_module::low_level::LowLevel::Eq => { + debug_assert_eq!(2, args.len(), "Eq: expected to have exactly two argument"); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "Eq: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "Eq: expected to have return layout of type Bool" + ); + self.build_eq(sym, &args[0], &args[1], &arg_layouts[0]) + } + roc_module::low_level::LowLevel::NotEq => { + debug_assert_eq!( + 2, + args.len(), + "NotEq: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NotEq: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "NotEq: expected to have return layout of type Bool" + ); + self.build_neq(sym, &args[0], &args[1], &arg_layouts[0]) + } + roc_module::low_level::LowLevel::NumLt => { + debug_assert_eq!( + 2, + args.len(), + "NumLt: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumLt: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "NumLt: expected to have return layout of type Bool" + ); + self.build_num_lt(sym, &args[0], &args[1], &arg_layouts[0]) + } + roc_module::low_level::LowLevel::NumToFrac => { + debug_assert_eq!( + 1, + args.len(), + "NumToFrac: expected to have exactly one argument" + ); + + debug_assert!( + matches!(*ret_layout, Layout::F32 | Layout::F64), + "NumToFrac: expected to have return layout of type Float" + ); + self.build_num_to_frac(sym, &args[0], &arg_layouts[0], ret_layout) + } + roc_module::low_level::LowLevel::NumLte => { + debug_assert_eq!( + 2, + args.len(), + "NumLte: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumLte: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "NumLte: expected to have return layout of type Bool" + ); + self.build_num_lte(sym, &args[0], &args[1], &arg_layouts[0]) + } + roc_module::low_level::LowLevel::NumGte => { + debug_assert_eq!( + 2, + args.len(), + "NumGte: expected to have exactly two argument" + ); + debug_assert_eq!( + arg_layouts[0], arg_layouts[1], + "NumGte: expected all arguments of to have the same layout" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "NumGte: expected to have return layout of type Bool" + ); + self.build_num_gte(sym, &args[0], &args[1], &arg_layouts[0]) + } + roc_module::low_level::LowLevel::NumRound => self.build_fn_call( + sym, + bitcode::NUM_ROUND_F64[IntWidth::I64].to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::ListLen => { + debug_assert_eq!( + 1, + args.len(), + "ListLen: expected to have exactly one argument" + ); + self.build_list_len(sym, &args[0]) + } + roc_module::low_level::LowLevel::ListGetUnsafe => { + debug_assert_eq!( + 2, + args.len(), + "ListGetUnsafe: expected to have exactly two arguments" + ); + self.build_list_get_unsafe(sym, &args[0], &args[1], ret_layout) + } + roc_module::low_level::LowLevel::ListReplaceUnsafe => { + debug_assert_eq!( + 3, + args.len(), + "ListReplaceUnsafe: expected to have exactly three arguments" + ); + self.build_list_replace_unsafe(sym, args, arg_layouts, ret_layout) + } + roc_module::low_level::LowLevel::StrConcat => self.build_fn_call( + sym, + bitcode::STR_CONCAT.to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::PtrCast => { + debug_assert_eq!( + 1, + args.len(), + "RefCountGetPtr: expected to have exactly one argument" + ); + self.build_ptr_cast(sym, &args[0]) + } + roc_module::low_level::LowLevel::RefCountDec => self.build_fn_call( + sym, + bitcode::UTILS_DECREF.to_string(), + args, + arg_layouts, + ret_layout, + ), + roc_module::low_level::LowLevel::RefCountInc => self.build_fn_call( + sym, + bitcode::UTILS_INCREF.to_string(), + args, + arg_layouts, + ret_layout, + ), + x => todo!("low level, {:?}", x), + } + } + + fn build_builtin( + &mut self, + sym: &Symbol, + func_sym: Symbol, + args: &'a [Symbol], + arg_layouts: &[roc_mono::layout::InLayout<'a>], + ret_layout: &roc_mono::layout::InLayout<'a>, + ) { + self.load_literal_symbols(args); + match func_sym { + Symbol::NUM_IS_ZERO => { + debug_assert_eq!( + 1, + args.len(), + "NumIsZero: expected to have exactly one argument" + ); + debug_assert_eq!( + Layout::BOOL, + *ret_layout, + "NumIsZero: expected to have return layout of type Bool" + ); + + self.load_literal( + &Symbol::DEV_TMP, + &arg_layouts[0], + &Literal::Int(0i128.to_ne_bytes()), + ); + self.build_eq(sym, &args[0], &Symbol::DEV_TMP, &arg_layouts[0]); + self.free_symbol(&Symbol::DEV_TMP) + } + Symbol::LIST_GET | Symbol::LIST_SET | Symbol::LIST_REPLACE => { + // TODO: This is probably simple enough to be worth inlining. + let layout_id = roc_mono::layout::LayoutIds::default().get(func_sym, ret_layout); + let fn_name = self.symbol_to_string(func_sym, layout_id); + // Now that the arguments are needed, load them if they are literals. + self.load_literal_symbols(args); + self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout) + } + Symbol::NUM_ADD_CHECKED => { + let layout_id = roc_mono::layout::LayoutIds::default().get(func_sym, ret_layout); + let fn_name = self.symbol_to_string(func_sym, layout_id); + // Now that the arguments are needed, load them if they are literals. + self.load_literal_symbols(args); + self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout) + } + Symbol::BOOL_TRUE => { + let bool_layout = Layout::BOOL; + self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(true)); + self.return_symbol(&Symbol::DEV_TMP, &bool_layout); + } + Symbol::BOOL_FALSE => { + let bool_layout = Layout::BOOL; + self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(false)); + self.return_symbol(&Symbol::DEV_TMP, &bool_layout); + } + _ => todo!("the function, {:?}", func_sym), + } + } + + fn load_literal_symbols(&mut self, syms: &[Symbol]) { + if self.env().lazy_literals { + for sym in syms { + if let Some((lit, layout)) = self.literal_map().remove(sym) { + // This operation is always safe but complicates lifetimes. + // The map is reset when building a procedure and then used for that single procedure. + // Since the lifetime is shorter than the entire backend, we need to use a pointer. + let (lit, layout) = unsafe { (*lit, *layout) }; + self.load_literal(sym, &layout, &lit); + } + } + } + } + + fn free_symbols(&mut self, stmt: &Stmt<'a>) { + if let Some(syms) = self.free_map().remove(&(stmt as *const Stmt<'a>)) { + for sym in syms { + // println!("Freeing symbol: {:?}", sym); + self.free_symbol(&sym); + } + } + } + + fn set_last_seen(&mut self, sym: Symbol, stmt: &Stmt<'a>) { + self.last_seen_map().insert(sym, stmt); + } + + fn set_layout_map(&mut self, sym: Symbol, layout: &roc_mono::layout::InLayout<'a>) { + if let Some(old_layout) = self.layout_map().insert(sym, *layout) { + // Layout map already contains the symbol. We should never need to overwrite. + // If the layout is not the same, that is a bug. + if &old_layout != layout { + internal_error!( + "Overwriting layout for symbol, {:?}: got {:?}, want {:?}", + sym, + layout, + old_layout + ) + } + } + } + + fn create_free_map(&mut self) { + let mut free_map = MutMap::default(); + let arena = self.env().arena; + for (sym, stmt) in self.last_seen_map() { + let vals = free_map + .entry(*stmt) + .or_insert_with(|| bumpalo::vec![in arena]); + vals.push(*sym); + } + self.set_free_map(free_map); + } + + fn scan_ast(&mut self, stmt: &Stmt<'a>) { + // Join map keeps track of join point parameters so that we can keep them around while they still might be jumped to. + let mut join_map: MutMap]> = MutMap::default(); + match stmt { + Stmt::Let(sym, expr, _, following) => { + self.set_last_seen(*sym, stmt); + match expr { + roc_mono::ir::Expr::Literal(_) => {} + + roc_mono::ir::Expr::Call(call) => self.scan_ast_call(call, stmt), + + roc_mono::ir::Expr::Tag { arguments, .. } => { + for sym in *arguments { + self.set_last_seen(*sym, stmt); + } + } + roc_mono::ir::Expr::ExprBox { symbol } => { + self.set_last_seen(*symbol, stmt); + } + roc_mono::ir::Expr::ExprUnbox { symbol } => { + self.set_last_seen(*symbol, stmt); + } + roc_mono::ir::Expr::Struct(syms) => { + for sym in *syms { + self.set_last_seen(*sym, stmt); + } + } + roc_mono::ir::Expr::StructAtIndex { structure, .. } => { + self.set_last_seen(*structure, stmt); + } + roc_mono::ir::Expr::GetTagId { structure, .. } => { + self.set_last_seen(*structure, stmt); + } + roc_mono::ir::Expr::UnionAtIndex { structure, .. } => { + self.set_last_seen(*structure, stmt); + } + roc_mono::ir::Expr::Array { elems, .. } => { + for elem in *elems { + if let ListLiteralElement::Symbol(sym) = elem { + self.set_last_seen(*sym, stmt); + } + } + } + roc_mono::ir::Expr::Reuse { + symbol, arguments, .. + } => { + self.set_last_seen(*symbol, stmt); + for sym in *arguments { + self.set_last_seen(*sym, stmt); + } + } + roc_mono::ir::Expr::Reset { symbol, .. } => { + self.set_last_seen(*symbol, stmt); + } + roc_mono::ir::Expr::EmptyArray => {} + roc_mono::ir::Expr::RuntimeErrorFunction(_) => {} + } + self.scan_ast(following); + } + + Stmt::Switch { + cond_symbol, + branches, + default_branch, + .. + } => { + self.set_last_seen(*cond_symbol, stmt); + for (_, _, branch) in *branches { + self.scan_ast(branch); + } + self.scan_ast(default_branch.1); + } + Stmt::Ret(sym) => { + self.set_last_seen(*sym, stmt); + } + Stmt::Refcounting(modify, following) => { + let sym = modify.get_symbol(); + + self.set_last_seen(sym, stmt); + self.scan_ast(following); + } + Stmt::Join { + parameters, + body: continuation, + remainder, + id: JoinPointId(sym), + .. + } => { + self.set_last_seen(*sym, stmt); + join_map.insert(JoinPointId(*sym), parameters); + for param in *parameters { + self.set_last_seen(param.symbol, stmt); + } + self.scan_ast(remainder); + self.scan_ast(continuation); + } + Stmt::Jump(JoinPointId(sym), symbols) => { + if let Some(parameters) = join_map.get(&JoinPointId(*sym)) { + // Keep the parameters around. They will be overwritten when jumping. + for param in *parameters { + self.set_last_seen(param.symbol, stmt); + } + } + for sym in *symbols { + self.set_last_seen(*sym, stmt); + } + } + + Stmt::Dbg { .. } => todo!("dbg not implemented in the dev backend"), + Stmt::Expect { .. } => todo!("expect is not implemented in the dev backend"), + Stmt::ExpectFx { .. } => todo!("expect-fx is not implemented in the dev backend"), + + Stmt::Crash(..) => todo!("crash is not implemented in the dev backend"), + } + } + + fn scan_ast_call(&mut self, call: &roc_mono::ir::Call, stmt: &roc_mono::ir::Stmt<'a>) { + let roc_mono::ir::Call { + call_type, + arguments, + } = call; + + for sym in *arguments { + self.set_last_seen(*sym, stmt); + } + + match call_type { + roc_mono::ir::CallType::ByName { .. } => {} + roc_mono::ir::CallType::LowLevel { .. } => {} + roc_mono::ir::CallType::HigherOrder { .. } => {} + roc_mono::ir::CallType::Foreign { .. } => {} + } + } } /// This impl block is for ir related instructions that need backend specific information. @@ -1767,44 +2577,42 @@ impl< #[macro_export] macro_rules! sign_extended_int_builtins { () => { - Builtin::Int(IntWidth::I8 | IntWidth::I16 | IntWidth::I32 | IntWidth::I64 | IntWidth::I128) + Layout::I8 | Layout::I16 | Layout::I32 | Layout::I64 | Layout::I128 }; } #[macro_export] macro_rules! zero_extended_int_builtins { () => { - Builtin::Int(IntWidth::U8 | IntWidth::U16 | IntWidth::U32 | IntWidth::U64 | IntWidth::U128) + Layout::U8 | Layout::U16 | Layout::U32 | Layout::U64 | Layout::U128 }; } #[macro_export] macro_rules! single_register_int_builtins { () => { - Builtin::Int( - IntWidth::I8 - | IntWidth::I16 - | IntWidth::I32 - | IntWidth::I64 - | IntWidth::U8 - | IntWidth::U16 - | IntWidth::U32 - | IntWidth::U64, - ) + Layout::I8 + | Layout::I16 + | Layout::I32 + | Layout::I64 + | Layout::U8 + | Layout::U16 + | Layout::U32 + | Layout::U64 }; } #[macro_export] macro_rules! single_register_integers { () => { - Layout::Builtin(Builtin::Bool | single_register_int_builtins!()) | Layout::RecursivePointer + Layout::BOOL | single_register_int_builtins!() | Layout::RECURSIVE_PTR }; } #[macro_export] macro_rules! single_register_floats { () => { - Layout::Builtin(Builtin::Float(FloatWidth::F32 | FloatWidth::F64)) + Layout::F32 | Layout::F64 }; } diff --git a/crates/compiler/gen_dev/src/generic64/storage.rs b/crates/compiler/gen_dev/src/generic64/storage.rs index 7a7dd76aad..4a3090b0dd 100644 --- a/crates/compiler/gen_dev/src/generic64/storage.rs +++ b/crates/compiler/gen_dev/src/generic64/storage.rs @@ -11,7 +11,9 @@ use roc_module::symbol::Symbol; use roc_mono::{ borrow::Ownership, ir::{JoinPointId, Param}, - layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout}, + layout::{ + Builtin, InLayout, Layout, LayoutInterner, STLayoutInterner, TagIdIntType, UnionLayout, + }, }; use roc_target::TargetInfo; use std::cmp::max; @@ -186,10 +188,6 @@ impl< self.fn_call_stack_size = 0; } - pub fn target_info(&self) -> TargetInfo { - self.target_info - } - pub fn stack_size(&self) -> u32 { self.stack_size } @@ -534,7 +532,7 @@ impl< sym: &Symbol, structure: &Symbol, index: u64, - field_layouts: &'a [Layout<'a>], + field_layouts: &'a [InLayout<'a>], ) { debug_assert!(index < field_layouts.len() as u64); // This must be removed and reinserted for ownership and mutability reasons. @@ -546,23 +544,20 @@ impl< let (base_offset, size) = (*base_offset, *size); let mut data_offset = base_offset; for layout in field_layouts.iter().take(index as usize) { - let field_size = layout.stack_size(layout_interner, self.target_info); + let field_size = layout_interner.stack_size(*layout); data_offset += field_size as i32; } debug_assert!(data_offset < base_offset + size as i32); let layout = field_layouts[index as usize]; - let size = layout.stack_size(layout_interner, self.target_info); + let size = layout_interner.stack_size(layout); self.allocation_map.insert(*sym, owned_data); self.symbol_storage_map.insert( *sym, - Stack(if is_primitive(&layout) { + Stack(if is_primitive(layout) { ReferencedPrimitive { base_offset: data_offset, size, - sign_extend: matches!( - layout, - Layout::Builtin(sign_extended_int_builtins!()) - ), + sign_extend: matches!(layout, sign_extended_int_builtins!()), } } else { Complex { @@ -639,17 +634,17 @@ impl< layout_interner: &mut STLayoutInterner<'a>, buf: &mut Vec<'a, u8>, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, fields: &'a [Symbol], ) { - let struct_size = layout.stack_size(layout_interner, self.target_info); + let struct_size = layout_interner.stack_size(*layout); if struct_size == 0 { self.symbol_storage_map.insert(*sym, NoData); return; } let base_offset = self.claim_stack_area(sym, struct_size); - if let Layout::Struct { field_layouts, .. } = layout { + if let Layout::Struct { field_layouts, .. } = layout_interner.get(*layout) { let mut current_offset = base_offset; for (field, field_layout) in fields.iter().zip(field_layouts.iter()) { self.copy_symbol_to_stack_offset( @@ -659,7 +654,7 @@ impl< field, field_layout, ); - let field_size = field_layout.stack_size(layout_interner, self.target_info); + let field_size = layout_interner.stack_size(*field_layout); current_offset += field_size as i32; } } else { @@ -699,7 +694,7 @@ impl< field, field_layout, ); - let field_size = field_layout.stack_size(layout_interner, self.target_info); + let field_size = layout_interner.stack_size(*field_layout); current_offset += field_size as i32; } self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| { @@ -717,7 +712,7 @@ impl< &mut self, buf: &mut Vec<'a, u8>, sym: &Symbol, - _layout: &Layout<'a>, + _layout: &InLayout<'a>, ) { let ret_reg = self.load_to_general_reg(buf, &Symbol::RET_POINTER); let (base_offset, size) = self.stack_offset_and_size(sym); @@ -741,9 +736,9 @@ impl< buf: &mut Vec<'a, u8>, to_offset: i32, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, ) { - match layout { + match layout_interner.get(*layout) { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => { debug_assert_eq!(to_offset % 8, 0); let reg = self.load_to_general_reg(buf, sym); @@ -754,16 +749,16 @@ impl< let reg = self.load_to_float_reg(buf, sym); ASM::mov_base32_freg64(buf, to_offset, reg); } - _ if layout.stack_size(layout_interner, self.target_info) == 0 => {} + _ if layout_interner.stack_size(*layout) == 0 => {} // TODO: Verify this is always true. // The dev backend does not deal with refcounting and does not care about if data is safe to memcpy. // It is just temporarily storing the value due to needing to free registers. // Later, it will be reloaded and stored in refcounted as needed. - _ if layout.stack_size(layout_interner, self.target_info) > 8 => { + _ if layout_interner.stack_size(*layout) > 8 => { let (from_offset, size) = self.stack_offset_and_size(sym); debug_assert!(from_offset % 8 == 0); debug_assert!(size % 8 == 0); - debug_assert_eq!(size, layout.stack_size(layout_interner, self.target_info)); + debug_assert_eq!(size, layout_interner.stack_size(*layout)); self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| { for i in (0..size as i32).step_by(8) { ASM::mov_reg64_base32(buf, reg, from_offset + i); @@ -1024,7 +1019,7 @@ impl< } // Claim a location for every join point parameter to be loaded at. // Put everything on the stack for simplicity. - match layout { + match *layout { single_register_layouts!() => { let base_offset = self.claim_stack_size(8); self.symbol_storage_map.insert( @@ -1038,7 +1033,7 @@ impl< .insert(*symbol, Rc::new((base_offset, 8))); } _ => { - let stack_size = layout.stack_size(layout_interner, self.target_info); + let stack_size = layout_interner.stack_size(*layout); if stack_size == 0 { self.symbol_storage_map.insert(*symbol, NoData); } else { @@ -1059,7 +1054,7 @@ impl< buf: &mut Vec<'a, u8>, id: &JoinPointId, args: &[Symbol], - arg_layouts: &[Layout<'a>], + arg_layouts: &[InLayout<'a>], ) { // TODO: remove was use here and for current_storage to deal with borrow checker. // See if we can do this better. @@ -1094,7 +1089,7 @@ impl< Stack(Primitive { base_offset, reg: None, - }) => match layout { + }) => match *layout { single_register_integers!() => { let reg = self.load_to_general_reg(buf, sym); ASM::mov_base32_reg64(buf, *base_offset, reg); @@ -1336,6 +1331,6 @@ impl< } } -fn is_primitive(layout: &Layout<'_>) -> bool { +fn is_primitive(layout: InLayout<'_>) -> bool { matches!(layout, single_register_layouts!()) } diff --git a/crates/compiler/gen_dev/src/generic64/x86_64.rs b/crates/compiler/gen_dev/src/generic64/x86_64.rs index 41c059c8ce..0956483071 100644 --- a/crates/compiler/gen_dev/src/generic64/x86_64.rs +++ b/crates/compiler/gen_dev/src/generic64/x86_64.rs @@ -4,13 +4,9 @@ use crate::{ single_register_layouts, Relocation, }; use bumpalo::collections::Vec; -use roc_builtins::bitcode::{FloatWidth, IntWidth}; use roc_error_macros::internal_error; use roc_module::symbol::Symbol; -use roc_mono::layout::{Builtin, Layout, STLayoutInterner}; -use roc_target::TargetInfo; - -const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64(); +use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner}; // Not sure exactly how I want to represent registers. // If we want max speed, we would likely make them structs that impl the same trait to avoid ifs. @@ -262,8 +258,8 @@ impl CallConv for X86_64Syste X86_64SystemV, >, layout_interner: &mut STLayoutInterner<'a>, - args: &'a [(Layout<'a>, Symbol)], - ret_layout: &Layout<'a>, + args: &'a [(InLayout<'a>, Symbol)], + ret_layout: &InLayout<'a>, ) { let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer. let mut general_i = 0; @@ -273,8 +269,8 @@ impl CallConv for X86_64Syste general_i += 1; } for (layout, sym) in args.iter() { - let stack_size = layout.stack_size(layout_interner, TARGET_INFO); - match layout { + let stack_size = layout_interner.stack_size(*layout); + match *layout { single_register_integers!() => { if general_i < Self::GENERAL_PARAM_REGS.len() { storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[general_i]); @@ -322,16 +318,16 @@ impl CallConv for X86_64Syste layout_interner: &mut STLayoutInterner<'a>, dst: &Symbol, args: &[Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32; let mut general_i = 0; let mut float_i = 0; if Self::returns_via_arg_pointer(layout_interner, ret_layout) { // Save space on the stack for the result we will be return. - let base_offset = storage_manager - .claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO)); + let base_offset = + storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout)); // Set the first reg to the address base + offset. let ret_reg = Self::GENERAL_PARAM_REGS[general_i]; general_i += 1; @@ -343,7 +339,7 @@ impl CallConv for X86_64Syste ); } for (sym, layout) in args.iter().zip(arg_layouts.iter()) { - match layout { + match *layout { single_register_integers!() => { if general_i < Self::GENERAL_PARAM_REGS.len() { storage_manager.load_to_specified_general_reg( @@ -390,8 +386,8 @@ impl CallConv for X86_64Syste tmp_stack_offset += 8; } } - x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {} - x if x.stack_size(layout_interner, TARGET_INFO) > 16 => { + x if layout_interner.stack_size(x) == 0 => {} + x if layout_interner.stack_size(x) > 16 => { // TODO: Double check this. // Just copy onto the stack. // Use return reg as buffer because it will be empty right now. @@ -431,14 +427,14 @@ impl CallConv for X86_64Syste >, layout_interner: &mut STLayoutInterner<'a>, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, ) { - match layout { + match *layout { single_register_layouts!() => { internal_error!("single register layouts are not complex symbols"); } - x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {} - x if !Self::returns_via_arg_pointer(layout_interner, x) => { + x if layout_interner.stack_size(x) == 0 => {} + x if !Self::returns_via_arg_pointer(layout_interner, &x) => { let (base_offset, size) = storage_manager.stack_offset_and_size(sym); debug_assert_eq!(base_offset % 8, 0); if size <= 8 { @@ -489,15 +485,15 @@ impl CallConv for X86_64Syste >, layout_interner: &mut STLayoutInterner<'a>, sym: &Symbol, - layout: &Layout<'a>, + layout: &InLayout<'a>, ) { - match layout { + match *layout { single_register_layouts!() => { internal_error!("single register layouts are not complex symbols"); } - x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {} - x if !Self::returns_via_arg_pointer(layout_interner, x) => { - let size = layout.stack_size(layout_interner, TARGET_INFO); + x if layout_interner.stack_size(x) == 0 => {} + x if !Self::returns_via_arg_pointer(layout_interner, &x) => { + let size = layout_interner.stack_size(*layout); let offset = storage_manager.claim_stack_area(sym, size); if size <= 8 { X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]); @@ -526,11 +522,11 @@ impl CallConv for X86_64Syste impl X86_64SystemV { fn returns_via_arg_pointer<'a>( interner: &STLayoutInterner<'a>, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ) -> bool { // TODO: This will need to be more complex/extended to fully support the calling convention. // details here: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf - ret_layout.stack_size(interner, TARGET_INFO) > 16 + interner.stack_size(*ret_layout) > 16 } } @@ -675,8 +671,8 @@ impl CallConv for X86_64Windo X86_64WindowsFastcall, >, layout_interner: &mut STLayoutInterner<'a>, - args: &'a [(Layout<'a>, Symbol)], - ret_layout: &Layout<'a>, + args: &'a [(InLayout<'a>, Symbol)], + ret_layout: &InLayout<'a>, ) { let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer. let mut i = 0; @@ -686,7 +682,7 @@ impl CallConv for X86_64Windo } for (layout, sym) in args.iter() { if i < Self::GENERAL_PARAM_REGS.len() { - match layout { + match *layout { single_register_integers!() => { storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[i]); i += 1; @@ -695,13 +691,13 @@ impl CallConv for X86_64Windo storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]); i += 1; } - x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {} + x if layout_interner.stack_size(x) == 0 => {} x => { todo!("Loading args with layout {:?}", x); } } } else { - match layout { + match *layout { single_register_layouts!() => { storage_manager.primitive_stack_arg(sym, arg_offset); arg_offset += 8; @@ -728,18 +724,17 @@ impl CallConv for X86_64Windo layout_interner: &mut STLayoutInterner<'a>, dst: &Symbol, args: &[Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32; if Self::returns_via_arg_pointer(layout_interner, ret_layout) { // Save space on the stack for the arg we will return. - storage_manager - .claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO)); + storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout)); todo!("claim first parama reg for the address"); } for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() { - match layout { + match *layout { single_register_integers!() => { if i < Self::GENERAL_PARAM_REGS.len() { storage_manager.load_to_specified_general_reg( @@ -784,7 +779,7 @@ impl CallConv for X86_64Windo tmp_stack_offset += 8; } } - x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {} + x if layout_interner.stack_size(x) == 0 => {} x => { todo!("calling with arg type, {:?}", x); } @@ -805,7 +800,7 @@ impl CallConv for X86_64Windo >, _layout_interner: &mut STLayoutInterner<'a>, _sym: &Symbol, - _layout: &Layout<'a>, + _layout: &InLayout<'a>, ) { todo!("Returning complex symbols for X86_64"); } @@ -822,7 +817,7 @@ impl CallConv for X86_64Windo >, _layout_interner: &mut STLayoutInterner<'a>, _sym: &Symbol, - _layout: &Layout<'a>, + _layout: &InLayout<'a>, ) { todo!("Loading returned complex symbols for X86_64"); } @@ -831,11 +826,11 @@ impl CallConv for X86_64Windo impl X86_64WindowsFastcall { fn returns_via_arg_pointer<'a>( interner: &STLayoutInterner<'a>, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ) -> bool { // TODO: This is not fully correct there are some exceptions for "vector" types. // details here: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-160#return-values - ret_layout.stack_size(interner, TARGET_INFO) > 8 + interner.stack_size(*ret_layout) > 8 } } diff --git a/crates/compiler/gen_dev/src/lib.rs b/crates/compiler/gen_dev/src/lib.rs index 38936a1be3..f1e608f145 100644 --- a/crates/compiler/gen_dev/src/lib.rs +++ b/crates/compiler/gen_dev/src/lib.rs @@ -18,7 +18,8 @@ use roc_mono::ir::{ SelfRecursive, Stmt, }; use roc_mono::layout::{ - Builtin, Layout, LayoutId, LayoutIds, STLayoutInterner, TagIdIntType, UnionLayout, + Builtin, InLayout, Layout, LayoutId, LayoutIds, LayoutInterner, STLayoutInterner, TagIdIntType, + UnionLayout, }; mod generic64; @@ -63,6 +64,7 @@ pub enum Relocation { trait Backend<'a> { fn env(&self) -> &Env<'a>; fn interns(&self) -> &Interns; + fn interner(&self) -> &STLayoutInterner<'a>; // This method is suboptimal, but it seems to be the only way to make rust understand // that all of these values can be mutable at the same time. By returning them together, @@ -104,7 +106,7 @@ trait Backend<'a> { // load_args is used to let the backend know what the args are. // The backend should track these args so it can use them as needed. - fn load_args(&mut self, args: &'a [(Layout<'a>, Symbol)], ret_layout: &Layout<'a>); + fn load_args(&mut self, args: &'a [(InLayout<'a>, Symbol)], ret_layout: &InLayout<'a>); /// Used for generating wrappers for malloc/realloc/free fn build_wrapped_jmp(&mut self) -> (&'a [u8], u64); @@ -140,7 +142,7 @@ trait Backend<'a> { } /// build_stmt builds a statement and outputs at the end of the buffer. - fn build_stmt(&mut self, stmt: &Stmt<'a>, ret_layout: &Layout<'a>) { + fn build_stmt(&mut self, stmt: &Stmt<'a>, ret_layout: &InLayout<'a>) { match stmt { Stmt::Let(sym, expr, layout, following) => { self.build_expr(sym, expr, layout); @@ -210,7 +212,7 @@ trait Backend<'a> { self.free_symbols(stmt); } Stmt::Jump(id, args) => { - let mut arg_layouts: bumpalo::collections::Vec> = + let mut arg_layouts: bumpalo::collections::Vec> = bumpalo::vec![in self.env().arena]; arg_layouts.reserve(args.len()); let layout_map = self.layout_map(); @@ -231,10 +233,10 @@ trait Backend<'a> { fn build_switch( &mut self, cond_symbol: &Symbol, - cond_layout: &Layout<'a>, + cond_layout: &InLayout<'a>, branches: &'a [(u64, BranchInfo<'a>, Stmt<'a>)], default_branch: &(BranchInfo<'a>, &'a Stmt<'a>), - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ); // build_join generates a instructions for a join statement. @@ -244,7 +246,7 @@ trait Backend<'a> { parameters: &'a [Param<'a>], body: &'a Stmt<'a>, remainder: &'a Stmt<'a>, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ); // build_jump generates a instructions for a jump statement. @@ -252,13 +254,13 @@ trait Backend<'a> { &mut self, id: &JoinPointId, args: &[Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ); /// build_expr builds the expressions for the specified symbol. /// The builder must keep track of the symbol because it may be referred to later. - fn build_expr(&mut self, sym: &Symbol, expr: &Expr<'a>, layout: &Layout<'a>) { + fn build_expr(&mut self, sym: &Symbol, expr: &Expr<'a>, layout: &InLayout<'a>) { match expr { Expr::Literal(lit) => { if self.env().lazy_literals { @@ -306,7 +308,7 @@ trait Backend<'a> { } CallType::LowLevel { op: lowlevel, .. } => { - let mut arg_layouts: bumpalo::collections::Vec> = + let mut arg_layouts: bumpalo::collections::Vec> = bumpalo::vec![in self.env().arena]; arg_layouts.reserve(arguments.len()); let layout_map = self.layout_map(); @@ -389,8 +391,8 @@ trait Backend<'a> { sym: &Symbol, lowlevel: &LowLevel, args: &'a [Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { // Now that the arguments are needed, load them if they are literals. self.load_literal_symbols(args); @@ -515,22 +517,22 @@ trait Backend<'a> { self.build_num_sub(sym, &args[0], &args[1], ret_layout) } LowLevel::NumBitwiseAnd => { - if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { - self.build_int_bitwise_and(sym, &args[0], &args[1], *int_width) + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_and(sym, &args[0], &args[1], int_width) } else { internal_error!("bitwise and on a non-integer") } } LowLevel::NumBitwiseOr => { - if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { - self.build_int_bitwise_or(sym, &args[0], &args[1], *int_width) + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_or(sym, &args[0], &args[1], int_width) } else { internal_error!("bitwise or on a non-integer") } } LowLevel::NumBitwiseXor => { - if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { - self.build_int_bitwise_xor(sym, &args[0], &args[1], *int_width) + if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) { + self.build_int_bitwise_xor(sym, &args[0], &args[1], int_width) } else { internal_error!("bitwise xor on a non-integer") } @@ -542,7 +544,7 @@ trait Backend<'a> { "Eq: expected all arguments of to have the same layout" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "Eq: expected to have return layout of type Bool" ); @@ -559,7 +561,7 @@ trait Backend<'a> { "NotEq: expected all arguments of to have the same layout" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "NotEq: expected to have return layout of type Bool" ); @@ -576,7 +578,7 @@ trait Backend<'a> { "NumLt: expected all arguments of to have the same layout" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "NumLt: expected to have return layout of type Bool" ); @@ -590,10 +592,7 @@ trait Backend<'a> { ); debug_assert!( - matches!( - *ret_layout, - Layout::Builtin(Builtin::Float(FloatWidth::F32 | FloatWidth::F64)), - ), + matches!(*ret_layout, Layout::F32 | Layout::F64), "NumToFrac: expected to have return layout of type Float" ); self.build_num_to_frac(sym, &args[0], &arg_layouts[0], ret_layout) @@ -609,7 +608,7 @@ trait Backend<'a> { "NumLte: expected all arguments of to have the same layout" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "NumLte: expected to have return layout of type Bool" ); @@ -626,7 +625,7 @@ trait Backend<'a> { "NumGte: expected all arguments of to have the same layout" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "NumGte: expected to have return layout of type Bool" ); @@ -703,8 +702,8 @@ trait Backend<'a> { sym: &Symbol, func_sym: Symbol, args: &'a [Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ) { self.load_literal_symbols(args); match func_sym { @@ -715,7 +714,7 @@ trait Backend<'a> { "NumIsZero: expected to have exactly one argument" ); debug_assert_eq!( - Layout::Builtin(Builtin::Bool), + Layout::BOOL, *ret_layout, "NumIsZero: expected to have return layout of type Bool" ); @@ -744,12 +743,12 @@ trait Backend<'a> { self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout) } Symbol::BOOL_TRUE => { - let bool_layout = Layout::Builtin(Builtin::Bool); + let bool_layout = Layout::BOOL; self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(true)); self.return_symbol(&Symbol::DEV_TMP, &bool_layout); } Symbol::BOOL_FALSE => { - let bool_layout = Layout::Builtin(Builtin::Bool); + let bool_layout = Layout::BOOL; self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(false)); self.return_symbol(&Symbol::DEV_TMP, &bool_layout); } @@ -764,15 +763,15 @@ trait Backend<'a> { dst: &Symbol, fn_name: String, args: &[Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ); /// build_num_abs stores the absolute value of src into dst. - fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>); + fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>); /// build_num_add stores the sum of src1 and src2 into dst. - fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>); + fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>); /// build_num_add_checked stores the sum of src1 and src2 into dst. fn build_num_add_checked( @@ -780,21 +779,21 @@ trait Backend<'a> { dst: &Symbol, src1: &Symbol, src2: &Symbol, - num_layout: &Layout<'a>, - return_layout: &Layout<'a>, + num_layout: &InLayout<'a>, + return_layout: &InLayout<'a>, ); /// build_num_mul stores `src1 * src2` into dst. - fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>); + fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>); /// build_num_mul stores `src1 / src2` into dst. - fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>); + fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>); /// build_num_neg stores the negated value of src into dst. - fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>); + fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>); /// build_num_sub stores the `src1 - src2` difference into dst. - fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>); + fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>); /// stores the `src1 & src2` into dst. fn build_int_bitwise_and( @@ -824,21 +823,27 @@ trait Backend<'a> { ); /// build_eq stores the result of `src1 == src2` into dst. - fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>); + fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>); /// build_neq stores the result of `src1 != src2` into dst. - fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>); + fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>); /// build_num_lt stores the result of `src1 < src2` into dst. - fn build_num_lt(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>); + fn build_num_lt( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + arg_layout: &InLayout<'a>, + ); /// build_num_to_frac convert Number to Frac fn build_num_to_frac( &mut self, dst: &Symbol, src: &Symbol, - arg_layout: &Layout<'a>, - ret_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, + ret_layout: &InLayout<'a>, ); /// build_num_lte stores the result of `src1 <= src2` into dst. @@ -847,7 +852,7 @@ trait Backend<'a> { dst: &Symbol, src1: &Symbol, src2: &Symbol, - arg_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, ); /// build_num_gte stores the result of `src1 >= src2` into dst. @@ -856,7 +861,7 @@ trait Backend<'a> { dst: &Symbol, src1: &Symbol, src2: &Symbol, - arg_layout: &Layout<'a>, + arg_layout: &InLayout<'a>, ); /// build_list_len returns the length of a list. @@ -868,7 +873,7 @@ trait Backend<'a> { dst: &Symbol, list: &Symbol, index: &Symbol, - ret_layout: &Layout<'a>, + ret_layout: &InLayout<'a>, ); /// build_list_replace_unsafe returns the old element and new list with the list having the new element inserted. @@ -876,15 +881,15 @@ trait Backend<'a> { &mut self, dst: &Symbol, args: &'a [Symbol], - arg_layouts: &[Layout<'a>], - ret_layout: &Layout<'a>, + arg_layouts: &[InLayout<'a>], + ret_layout: &InLayout<'a>, ); /// build_refcount_getptr loads the pointer to the reference count of src into dst. fn build_ptr_cast(&mut self, dst: &Symbol, src: &Symbol); /// literal_map gets the map from symbol to literal and layout, used for lazy loading and literal folding. - fn literal_map(&mut self) -> &mut MutMap, *const Layout<'a>)>; + fn literal_map(&mut self) -> &mut MutMap, *const InLayout<'a>)>; fn load_literal_symbols(&mut self, syms: &[Symbol]) { if self.env().lazy_literals { @@ -901,7 +906,7 @@ trait Backend<'a> { } /// load_literal sets a symbol to be equal to a literal. - fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>); + fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>); /// create_empty_array creates an empty array with nullptr, zero length, and zero capacity. fn create_empty_array(&mut self, sym: &Symbol); @@ -910,12 +915,12 @@ trait Backend<'a> { fn create_array( &mut self, sym: &Symbol, - elem_layout: &Layout<'a>, + elem_layout: &InLayout<'a>, elems: &'a [ListLiteralElement<'a>], ); /// create_struct creates a struct with the elements specified loaded into it as data. - fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]); + fn create_struct(&mut self, sym: &Symbol, layout: &InLayout<'a>, fields: &'a [Symbol]); /// load_struct_at_index loads into `sym` the value at `index` in `structure`. fn load_struct_at_index( @@ -923,7 +928,7 @@ trait Backend<'a> { sym: &Symbol, structure: &Symbol, index: u64, - field_layouts: &'a [Layout<'a>], + field_layouts: &'a [InLayout<'a>], ); /// load_union_at_index loads into `sym` the value at `index` for `tag_id`. @@ -949,7 +954,7 @@ trait Backend<'a> { ); /// return_symbol moves a symbol to the correct return location for the backend and adds a jump to the end of the function. - fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>); + fn return_symbol(&mut self, sym: &Symbol, layout: &InLayout<'a>); /// free_symbols will free all symbols for the given statement. fn free_symbols(&mut self, stmt: &Stmt<'a>) { @@ -973,7 +978,7 @@ trait Backend<'a> { fn last_seen_map(&mut self) -> &mut MutMap>; /// set_layout_map sets the layout for a specific symbol. - fn set_layout_map(&mut self, sym: Symbol, layout: &Layout<'a>) { + fn set_layout_map(&mut self, sym: Symbol, layout: &InLayout<'a>) { if let Some(old_layout) = self.layout_map().insert(sym, *layout) { // Layout map already contains the symbol. We should never need to overwrite. // If the layout is not the same, that is a bug. @@ -989,7 +994,7 @@ trait Backend<'a> { } /// layout_map gets the map from symbol to layout. - fn layout_map(&mut self) -> &mut MutMap>; + fn layout_map(&mut self) -> &mut MutMap>; fn create_free_map(&mut self) { let mut free_map = MutMap::default(); diff --git a/crates/compiler/mono/src/layout.rs b/crates/compiler/mono/src/layout.rs index 41fbcdd134..46534e07f0 100644 --- a/crates/compiler/mono/src/layout.rs +++ b/crates/compiler/mono/src/layout.rs @@ -4234,14 +4234,14 @@ impl LayoutId { } struct IdsByLayout<'a> { - by_id: MutMap, u32>, + by_id: MutMap, u32>, toplevels_by_id: MutMap, u32>, next_id: u32, } impl<'a> IdsByLayout<'a> { #[inline(always)] - fn insert_layout(&mut self, layout: Layout<'a>) -> LayoutId { + fn insert_layout(&mut self, layout: InLayout<'a>) -> LayoutId { match self.by_id.entry(layout) { Entry::Vacant(vacant) => { let answer = self.next_id; @@ -4255,7 +4255,7 @@ impl<'a> IdsByLayout<'a> { } #[inline(always)] - fn singleton_layout(layout: Layout<'a>) -> (Self, LayoutId) { + fn singleton_layout(layout: InLayout<'a>) -> (Self, LayoutId) { let mut by_id = HashMap::with_capacity_and_hasher(1, default_hasher()); by_id.insert(layout, 1); @@ -4306,7 +4306,7 @@ impl<'a> LayoutIds<'a> { /// Returns a LayoutId which is unique for the given symbol and layout. /// If given the same symbol and same layout, returns the same LayoutId. #[inline(always)] - pub fn get<'b>(&mut self, symbol: Symbol, layout: &'b Layout<'a>) -> LayoutId { + pub fn get<'b>(&mut self, symbol: Symbol, layout: &'b InLayout<'a>) -> LayoutId { match self.by_symbol.entry(symbol) { Entry::Vacant(vacant) => { let (ids_by_layout, layout_id) = IdsByLayout::singleton_layout(*layout); diff --git a/crates/compiler/mono/src/layout/intern.rs b/crates/compiler/mono/src/layout/intern.rs index d0b3ffecaf..83ce412af9 100644 --- a/crates/compiler/mono/src/layout/intern.rs +++ b/crates/compiler/mono/src/layout/intern.rs @@ -126,6 +126,11 @@ pub trait LayoutInterner<'a>: Sized { self.get(layout).alignment_bytes(self, self.target_info()) } + fn allocation_alignment_bytes(&self, layout: InLayout<'a>) -> u32 { + self.get(layout) + .allocation_alignment_bytes(self, self.target_info()) + } + fn stack_size(&self, layout: InLayout<'a>) -> u32 { self.get(layout).stack_size(self, self.target_info()) }