Push layout interner further through Layout

This commit is contained in:
Ayaz Hafiz 2022-08-31 14:14:34 -05:00
parent ed04c2040a
commit 3b4b1838b8
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
34 changed files with 1279 additions and 634 deletions

2
Cargo.lock generated
View file

@ -3685,6 +3685,7 @@ dependencies = [
"roc_can",
"roc_collections",
"roc_error_macros",
"roc_intern",
"roc_module",
"roc_mono",
"roc_parse",
@ -3727,6 +3728,7 @@ dependencies = [
"roc_builtins",
"roc_collections",
"roc_error_macros",
"roc_intern",
"roc_module",
"roc_mono",
"roc_std",

View file

@ -12,7 +12,9 @@ use roc_mono::ir::{
Call, CallType, Expr, HigherOrderLowLevel, HostExposedLayouts, ListLiteralElement, Literal,
ModifyRc, OptLevel, Proc, Stmt,
};
use roc_mono::layout::{Builtin, CapturesNiche, Layout, RawFunctionLayout, UnionLayout};
use roc_mono::layout::{
Builtin, CapturesNiche, Layout, RawFunctionLayout, STLayoutInterner, UnionLayout,
};
// just using one module for now
pub const MOD_APP: ModName = ModName(b"UserApp");
@ -130,6 +132,7 @@ fn bytes_as_ascii(bytes: &[u8]) -> String {
}
pub fn spec_program<'a, I>(
interner: &STLayoutInterner,
opt_level: OptLevel,
opt_entry_point: Option<roc_mono::ir::EntryPoint<'a>>,
procs: I,
@ -214,7 +217,7 @@ where
);
}
let (spec, type_names) = proc_spec(proc)?;
let (spec, type_names) = proc_spec(interner, proc)?;
type_definitions.extend(type_names);
@ -231,8 +234,12 @@ where
);
let roc_main = FuncName(&roc_main_bytes);
let entry_point_function =
build_entry_point(entry_point.layout, roc_main, &host_exposed_functions)?;
let entry_point_function = build_entry_point(
interner,
entry_point.layout,
roc_main,
&host_exposed_functions,
)?;
let entry_point_name = FuncName(ENTRY_POINT_NAME);
m.add_func(entry_point_name, entry_point_function)?;
}
@ -243,7 +250,7 @@ where
let mut builder = TypeDefBuilder::new();
let variant_types = recursive_variant_types(&mut builder, &union_layout)?;
let variant_types = recursive_variant_types(&mut builder, interner, &union_layout)?;
let root_type = if let UnionLayout::NonNullableUnwrapped(_) = union_layout {
debug_assert_eq!(variant_types.len(), 1);
variant_types[0]
@ -301,6 +308,7 @@ fn terrible_hack(builder: &mut FuncDefBuilder, block: BlockId, type_id: TypeId)
}
fn build_entry_point(
interner: &STLayoutInterner,
layout: roc_mono::ir::ProcLayout,
func_name: FuncName,
host_exposed_functions: &[([u8; SIZE], &[Layout])],
@ -314,8 +322,12 @@ fn build_entry_point(
let block = builder.add_block();
// to the modelling language, the arguments appear out of thin air
let argument_type =
build_tuple_type(&mut builder, layout.arguments, &WhenRecursive::Unreachable)?;
let argument_type = build_tuple_type(
&mut builder,
interner,
layout.arguments,
&WhenRecursive::Unreachable,
)?;
// does not make any assumptions about the input
// let argument = builder.add_unknown_with(block, &[], argument_type)?;
@ -346,6 +358,7 @@ fn build_entry_point(
let type_id = layout_spec(
&mut builder,
interner,
&Layout::struct_no_name_order(layouts),
&WhenRecursive::Unreachable,
)?;
@ -371,7 +384,10 @@ fn build_entry_point(
Ok(spec)
}
fn proc_spec<'a>(proc: &Proc<'a>) -> Result<(FuncDef, MutSet<UnionLayout<'a>>)> {
fn proc_spec<'a>(
interner: &STLayoutInterner<'a>,
proc: &Proc<'a>,
) -> Result<(FuncDef, MutSet<UnionLayout<'a>>)> {
let mut builder = FuncDefBuilder::new();
let mut env = Env::default();
@ -386,15 +402,28 @@ fn proc_spec<'a>(proc: &Proc<'a>) -> Result<(FuncDef, MutSet<UnionLayout<'a>>)>
argument_layouts.push(*layout);
}
let value_id = stmt_spec(&mut builder, &mut env, block, &proc.ret_layout, &proc.body)?;
let value_id = stmt_spec(
&mut builder,
interner,
&mut env,
block,
&proc.ret_layout,
&proc.body,
)?;
let root = BlockExpr(block, value_id);
let arg_type_id = layout_spec(
&mut builder,
interner,
&Layout::struct_no_name_order(&argument_layouts),
&WhenRecursive::Unreachable,
)?;
let ret_type_id = layout_spec(&mut builder, &proc.ret_layout, &WhenRecursive::Unreachable)?;
let ret_type_id = layout_spec(
&mut builder,
interner,
&proc.ret_layout,
&WhenRecursive::Unreachable,
)?;
let spec = builder.build(arg_type_id, ret_type_id, root)?;
@ -410,6 +439,7 @@ struct Env<'a> {
fn stmt_spec<'a>(
builder: &mut FuncDefBuilder,
interner: &STLayoutInterner<'a>,
env: &mut Env<'a>,
block: BlockId,
layout: &Layout,
@ -419,20 +449,20 @@ fn stmt_spec<'a>(
match stmt {
Let(symbol, expr, expr_layout, mut continuation) => {
let value_id = expr_spec(builder, env, block, expr_layout, expr)?;
let value_id = expr_spec(builder, interner, env, block, expr_layout, expr)?;
env.symbols.insert(*symbol, value_id);
let mut queue = vec![symbol];
while let Let(symbol, expr, expr_layout, c) = continuation {
let value_id = expr_spec(builder, env, block, expr_layout, expr)?;
let value_id = expr_spec(builder, interner, env, block, expr_layout, expr)?;
env.symbols.insert(*symbol, value_id);
queue.push(symbol);
continuation = c;
}
let result = stmt_spec(builder, env, block, layout, continuation)?;
let result = stmt_spec(builder, interner, env, block, layout, continuation)?;
for symbol in queue {
env.symbols.remove(symbol);
@ -456,14 +486,14 @@ fn stmt_spec<'a>(
for branch in it {
let block = builder.add_block();
let value_id = stmt_spec(builder, env, block, layout, branch)?;
let value_id = stmt_spec(builder, interner, env, block, layout, branch)?;
cases.push(BlockExpr(block, value_id));
}
builder.add_choice(block, &cases)
}
Expect { remainder, .. } => stmt_spec(builder, env, block, layout, remainder),
ExpectFx { remainder, .. } => stmt_spec(builder, env, block, layout, remainder),
Expect { remainder, .. } => stmt_spec(builder, interner, env, block, layout, remainder),
ExpectFx { remainder, .. } => stmt_spec(builder, interner, env, block, layout, remainder),
Ret(symbol) => Ok(env.symbols[symbol]),
Refcounting(modify_rc, continuation) => match modify_rc {
ModifyRc::Inc(symbol, _) => {
@ -473,7 +503,7 @@ fn stmt_spec<'a>(
// and a bit more permissive in its type
builder.add_recursive_touch(block, argument)?;
stmt_spec(builder, env, block, layout, continuation)
stmt_spec(builder, interner, env, block, layout, continuation)
}
ModifyRc::Dec(symbol) => {
@ -481,14 +511,14 @@ fn stmt_spec<'a>(
builder.add_recursive_touch(block, argument)?;
stmt_spec(builder, env, block, layout, continuation)
stmt_spec(builder, interner, env, block, layout, continuation)
}
ModifyRc::DecRef(symbol) => {
let argument = env.symbols[symbol];
builder.add_recursive_touch(block, argument)?;
stmt_spec(builder, env, block, layout, continuation)
stmt_spec(builder, interner, env, block, layout, continuation)
}
},
Join {
@ -502,12 +532,13 @@ fn stmt_spec<'a>(
for p in parameters.iter() {
type_ids.push(layout_spec(
builder,
interner,
&p.layout,
&WhenRecursive::Unreachable,
)?);
}
let ret_type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let ret_type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
let jp_arg_type_id = builder.add_tuple_type(&type_ids)?;
@ -522,7 +553,7 @@ fn stmt_spec<'a>(
// first, with the current variable bindings, process the remainder
let cont_block = builder.add_block();
let cont_value_id = stmt_spec(builder, env, cont_block, layout, remainder)?;
let cont_value_id = stmt_spec(builder, interner, env, cont_block, layout, remainder)?;
// only then introduce variables bound by the jump point, and process its body
let join_body_sub_block = {
@ -536,7 +567,8 @@ fn stmt_spec<'a>(
env.symbols.insert(p.symbol, value_id);
}
let jp_body_value_id = stmt_spec(builder, env, jp_body_block, layout, body)?;
let jp_body_value_id =
stmt_spec(builder, interner, env, jp_body_block, layout, body)?;
BlockExpr(jp_body_block, jp_body_value_id)
};
@ -547,14 +579,14 @@ fn stmt_spec<'a>(
builder.add_sub_block(block, BlockExpr(cont_block, cont_value_id))
}
Jump(id, symbols) => {
let ret_type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let ret_type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
let argument = build_tuple_value(builder, env, block, symbols)?;
let jpid = env.join_points[id];
builder.add_jump(block, jpid, argument, ret_type_id)
}
RuntimeError(_) => {
let type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
builder.add_terminate(block, type_id)
}
@ -591,13 +623,14 @@ enum WhenRecursive<'a> {
fn build_recursive_tuple_type(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
layouts: &[Layout],
when_recursive: &WhenRecursive,
) -> Result<TypeId> {
let mut field_types = Vec::new();
for field in layouts.iter() {
let type_id = layout_spec_help(builder, field, when_recursive)?;
let type_id = layout_spec_help(builder, interner, field, when_recursive)?;
field_types.push(type_id);
}
@ -606,13 +639,14 @@ fn build_recursive_tuple_type(
fn build_tuple_type(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
layouts: &[Layout],
when_recursive: &WhenRecursive,
) -> Result<TypeId> {
let mut field_types = Vec::new();
for field in layouts.iter() {
field_types.push(layout_spec(builder, field, when_recursive)?);
field_types.push(layout_spec(builder, interner, field, when_recursive)?);
}
builder.add_tuple_type(&field_types)
@ -646,6 +680,7 @@ fn add_loop(
fn call_spec(
builder: &mut FuncDefBuilder,
interner: &STLayoutInterner,
env: &Env,
block: BlockId,
layout: &Layout,
@ -681,12 +716,14 @@ fn call_spec(
.map(|symbol| env.symbols[symbol])
.collect();
let result_type = layout_spec(builder, ret_layout, &WhenRecursive::Unreachable)?;
let result_type =
layout_spec(builder, interner, ret_layout, &WhenRecursive::Unreachable)?;
builder.add_unknown_with(block, &arguments, result_type)
}
LowLevel { op, update_mode } => lowlevel_spec(
builder,
interner,
env,
block,
layout,
@ -751,12 +788,20 @@ fn call_spec(
list_append(builder, block, update_mode_var, state, new_element)
};
let output_element_type =
layout_spec(builder, return_layout, &WhenRecursive::Unreachable)?;
let output_element_type = layout_spec(
builder,
interner,
return_layout,
&WhenRecursive::Unreachable,
)?;
let state_layout = Layout::Builtin(Builtin::List(return_layout));
let state_type =
layout_spec(builder, &state_layout, &WhenRecursive::Unreachable)?;
let state_type = layout_spec(
builder,
interner,
&state_layout,
&WhenRecursive::Unreachable,
)?;
let init_state = new_list(builder, block, output_element_type)?;
@ -781,8 +826,12 @@ fn call_spec(
};
let state_layout = Layout::Builtin(Builtin::List(&argument_layouts[0]));
let state_type =
layout_spec(builder, &state_layout, &WhenRecursive::Unreachable)?;
let state_type = layout_spec(
builder,
interner,
&state_layout,
&WhenRecursive::Unreachable,
)?;
let init_state = list;
add_loop(builder, block, state_type, init_state, loop_body)
@ -806,12 +855,20 @@ fn call_spec(
list_append(builder, block, update_mode_var, state, new_element)
};
let output_element_type =
layout_spec(builder, return_layout, &WhenRecursive::Unreachable)?;
let output_element_type = layout_spec(
builder,
interner,
return_layout,
&WhenRecursive::Unreachable,
)?;
let state_layout = Layout::Builtin(Builtin::List(return_layout));
let state_type =
layout_spec(builder, &state_layout, &WhenRecursive::Unreachable)?;
let state_type = layout_spec(
builder,
interner,
&state_layout,
&WhenRecursive::Unreachable,
)?;
let init_state = new_list(builder, block, output_element_type)?;
@ -841,12 +898,20 @@ fn call_spec(
list_append(builder, block, update_mode_var, state, new_element)
};
let output_element_type =
layout_spec(builder, return_layout, &WhenRecursive::Unreachable)?;
let output_element_type = layout_spec(
builder,
interner,
return_layout,
&WhenRecursive::Unreachable,
)?;
let state_layout = Layout::Builtin(Builtin::List(return_layout));
let state_type =
layout_spec(builder, &state_layout, &WhenRecursive::Unreachable)?;
let state_type = layout_spec(
builder,
interner,
&state_layout,
&WhenRecursive::Unreachable,
)?;
let init_state = new_list(builder, block, output_element_type)?;
@ -882,12 +947,20 @@ fn call_spec(
list_append(builder, block, update_mode_var, state, new_element)
};
let output_element_type =
layout_spec(builder, return_layout, &WhenRecursive::Unreachable)?;
let output_element_type = layout_spec(
builder,
interner,
return_layout,
&WhenRecursive::Unreachable,
)?;
let state_layout = Layout::Builtin(Builtin::List(return_layout));
let state_type =
layout_spec(builder, &state_layout, &WhenRecursive::Unreachable)?;
let state_type = layout_spec(
builder,
interner,
&state_layout,
&WhenRecursive::Unreachable,
)?;
let init_state = new_list(builder, block, output_element_type)?;
@ -931,6 +1004,7 @@ fn list_clone(
fn lowlevel_spec(
builder: &mut FuncDefBuilder,
interner: &STLayoutInterner,
env: &Env,
block: BlockId,
layout: &Layout,
@ -940,7 +1014,7 @@ fn lowlevel_spec(
) -> Result<ValueId> {
use LowLevel::*;
let type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
let mode = update_mode.to_bytes();
let update_mode_var = UpdateModeVar(&mode);
@ -1048,8 +1122,12 @@ fn lowlevel_spec(
match layout {
Layout::Builtin(Builtin::List(element_layout)) => {
let type_id =
layout_spec(builder, element_layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(
builder,
interner,
element_layout,
&WhenRecursive::Unreachable,
)?;
new_list(builder, block, type_id)
}
_ => unreachable!("empty array does not have a list layout"),
@ -1092,7 +1170,7 @@ fn lowlevel_spec(
// TODO overly pessimstic
let arguments: Vec<_> = arguments.iter().map(|symbol| env.symbols[symbol]).collect();
let result_type = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let result_type = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
builder.add_unknown_with(block, &arguments, result_type)
}
@ -1101,16 +1179,18 @@ fn lowlevel_spec(
fn recursive_tag_variant(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
union_layout: &UnionLayout,
fields: &[Layout],
) -> Result<TypeId> {
let when_recursive = WhenRecursive::Loop(*union_layout);
build_recursive_tuple_type(builder, fields, &when_recursive)
build_recursive_tuple_type(builder, interner, fields, &when_recursive)
}
fn recursive_variant_types(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
union_layout: &UnionLayout,
) -> Result<Vec<TypeId>> {
use UnionLayout::*;
@ -1125,11 +1205,16 @@ fn recursive_variant_types(
result = Vec::with_capacity(tags.len());
for tag in tags.iter() {
result.push(recursive_tag_variant(builder, union_layout, tag)?);
result.push(recursive_tag_variant(builder, interner, union_layout, tag)?);
}
}
NonNullableUnwrapped(fields) => {
result = vec![recursive_tag_variant(builder, union_layout, fields)?];
result = vec![recursive_tag_variant(
builder,
interner,
union_layout,
fields,
)?];
}
NullableWrapped {
nullable_id,
@ -1140,21 +1225,21 @@ fn recursive_variant_types(
let cutoff = *nullable_id as usize;
for tag in tags[..cutoff].iter() {
result.push(recursive_tag_variant(builder, union_layout, tag)?);
result.push(recursive_tag_variant(builder, interner, union_layout, tag)?);
}
result.push(recursive_tag_variant(builder, union_layout, &[])?);
result.push(recursive_tag_variant(builder, interner, union_layout, &[])?);
for tag in tags[cutoff..].iter() {
result.push(recursive_tag_variant(builder, union_layout, tag)?);
result.push(recursive_tag_variant(builder, interner, union_layout, tag)?);
}
}
NullableUnwrapped {
nullable_id,
other_fields: fields,
} => {
let unit = recursive_tag_variant(builder, union_layout, &[])?;
let other_type = recursive_tag_variant(builder, union_layout, fields)?;
let unit = recursive_tag_variant(builder, interner, union_layout, &[])?;
let other_type = recursive_tag_variant(builder, interner, union_layout, fields)?;
if *nullable_id {
// nullable_id == 1
@ -1176,6 +1261,7 @@ fn worst_case_type(context: &mut impl TypeContext) -> Result<TypeId> {
fn expr_spec<'a>(
builder: &mut FuncDefBuilder,
interner: &STLayoutInterner,
env: &mut Env<'a>,
block: BlockId,
layout: &Layout<'a>,
@ -1185,7 +1271,7 @@ fn expr_spec<'a>(
match expr {
Literal(literal) => literal_spec(builder, block, literal),
Call(call) => call_spec(builder, env, block, layout, call),
Call(call) => call_spec(builder, interner, env, block, layout, call),
Reuse {
tag_layout,
tag_id,
@ -1201,8 +1287,12 @@ fn expr_spec<'a>(
let value_id = match tag_layout {
UnionLayout::NonRecursive(tags) => {
let variant_types =
non_recursive_variant_types(builder, tags, &WhenRecursive::Unreachable)?;
let variant_types = non_recursive_variant_types(
builder,
interner,
tags,
&WhenRecursive::Unreachable,
)?;
let value_id = build_tuple_value(builder, env, block, arguments)?;
return builder.add_make_union(block, &variant_types, *tag_id as u32, value_id);
}
@ -1221,7 +1311,7 @@ fn expr_spec<'a>(
UnionLayout::NullableUnwrapped { .. } => data_id,
};
let variant_types = recursive_variant_types(builder, tag_layout)?;
let variant_types = recursive_variant_types(builder, interner, tag_layout)?;
let union_id =
builder.add_make_union(block, &variant_types, *tag_id as u32, value_id)?;
@ -1307,7 +1397,7 @@ fn expr_spec<'a>(
builder.add_get_tuple_field(block, value_id, *index as u32)
}
Array { elem_layout, elems } => {
let type_id = layout_spec(builder, elem_layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(builder, interner, elem_layout, &WhenRecursive::Unreachable)?;
let list = new_list(builder, block, type_id)?;
@ -1334,19 +1424,24 @@ fn expr_spec<'a>(
EmptyArray => match layout {
Layout::Builtin(Builtin::List(element_layout)) => {
let type_id = layout_spec(builder, element_layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(
builder,
interner,
element_layout,
&WhenRecursive::Unreachable,
)?;
new_list(builder, block, type_id)
}
_ => unreachable!("empty array does not have a list layout"),
},
Reset { symbol, .. } => {
let type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
let value_id = env.symbols[symbol];
builder.add_unknown_with(block, &[value_id], type_id)
}
RuntimeErrorFunction(_) => {
let type_id = layout_spec(builder, layout, &WhenRecursive::Unreachable)?;
let type_id = layout_spec(builder, interner, layout, &WhenRecursive::Unreachable)?;
builder.add_terminate(block, type_id)
}
@ -1375,14 +1470,16 @@ fn literal_spec(
fn layout_spec(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
layout: &Layout,
when_recursive: &WhenRecursive,
) -> Result<TypeId> {
layout_spec_help(builder, layout, when_recursive)
layout_spec_help(builder, interner, layout, when_recursive)
}
fn non_recursive_variant_types(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
tags: &[&[Layout]],
// If there is a recursive pointer latent within this layout, coming from a containing layout.
when_recursive: &WhenRecursive,
@ -1390,7 +1487,7 @@ fn non_recursive_variant_types(
let mut result = Vec::with_capacity(tags.len());
for tag in tags.iter() {
result.push(build_tuple_type(builder, tag, when_recursive)?);
result.push(build_tuple_type(builder, interner, tag, when_recursive)?);
}
Ok(result)
@ -1398,19 +1495,21 @@ fn non_recursive_variant_types(
fn layout_spec_help(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
layout: &Layout,
when_recursive: &WhenRecursive,
) -> Result<TypeId> {
use Layout::*;
match layout {
Builtin(builtin) => builtin_spec(builder, builtin, when_recursive),
Builtin(builtin) => builtin_spec(builder, interner, builtin, when_recursive),
Struct { field_layouts, .. } => {
build_recursive_tuple_type(builder, field_layouts, when_recursive)
build_recursive_tuple_type(builder, interner, field_layouts, when_recursive)
}
LambdaSet(lambda_set) => layout_spec_help(
builder,
&lambda_set.runtime_representation(),
interner,
&lambda_set.runtime_representation(interner),
when_recursive,
),
Union(union_layout) => {
@ -1422,7 +1521,8 @@ fn layout_spec_help(
builder.add_tuple_type(&[])
}
UnionLayout::NonRecursive(tags) => {
let variant_types = non_recursive_variant_types(builder, tags, when_recursive)?;
let variant_types =
non_recursive_variant_types(builder, interner, tags, when_recursive)?;
builder.add_union_type(&variant_types)
}
UnionLayout::Recursive(_)
@ -1438,7 +1538,7 @@ fn layout_spec_help(
}
Boxed(inner_layout) => {
let inner_type = layout_spec_help(builder, inner_layout, when_recursive)?;
let inner_type = layout_spec_help(builder, interner, inner_layout, when_recursive)?;
let cell_type = builder.add_heap_cell_type();
builder.add_tuple_type(&[cell_type, inner_type])
@ -1465,6 +1565,7 @@ fn layout_spec_help(
fn builtin_spec(
builder: &mut impl TypeContext,
interner: &STLayoutInterner,
builtin: &Builtin,
when_recursive: &WhenRecursive,
) -> Result<TypeId> {
@ -1475,7 +1576,7 @@ fn builtin_spec(
Decimal | Float(_) => builder.add_tuple_type(&[]),
Str => str_type(builder),
List(element_layout) => {
let element_type = layout_spec_help(builder, element_layout, when_recursive)?;
let element_type = layout_spec_help(builder, interner, element_layout, when_recursive)?;
let cell = builder.add_heap_cell_type();
let bag = builder.add_bag_type(element_type)?;

View file

@ -548,11 +548,13 @@ fn gen_from_mono_module_dev_assembly(
procedures,
mut interns,
exposed_to_host,
layout_interner,
..
} = loaded;
let env = roc_gen_dev::Env {
arena,
layout_interner: &layout_interner,
module_id,
exposed_to_host: exposed_to_host.values.keys().copied().collect(),
lazy_literals,

View file

@ -8,6 +8,7 @@ edition = "2021"
[dependencies]
roc_collections = { path = "../collections" }
roc_intern = { path = "../intern" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_problem = { path = "../problem" }

View file

@ -380,7 +380,12 @@ pub fn new_backend_64bit<
target_info,
env,
interns,
helper_proc_gen: CodeGenHelp::new(env.arena, target_info, env.module_id),
helper_proc_gen: CodeGenHelp::new(
env.arena,
env.layout_interner,
target_info,
env.module_id,
),
helper_proc_symbols: bumpalo::vec![in env.arena],
proc_name: None,
is_self_recursive: None,
@ -806,7 +811,7 @@ impl<
let buf = &mut self.buf;
let struct_size = return_layout.stack_size(self.target_info);
let struct_size = return_layout.stack_size(self.env.layout_interner, self.target_info);
let base_offset = self.storage_manager.claim_stack_area(dst, struct_size);
@ -1143,7 +1148,8 @@ impl<
let index_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, index);
let ret_stack_size = ret_layout.stack_size(self.storage_manager.target_info());
let ret_stack_size =
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
// TODO: This can be optimized with smarter instructions.
// Also can probably be moved into storage manager at least partly.
self.storage_manager.with_tmp_general_reg(
@ -1185,7 +1191,8 @@ impl<
let elem_layout = arg_layouts[2];
let u32_layout = &Layout::Builtin(Builtin::Int(IntWidth::U32));
let list_alignment = list_layout.alignment_bytes(self.storage_manager.target_info());
let list_alignment = list_layout
.alignment_bytes(self.env.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP,
u32_layout,
@ -1204,7 +1211,8 @@ impl<
ASM::add_reg64_reg64_imm32(&mut self.buf, reg, CC::BASE_PTR_REG, new_elem_offset);
// Load the elements size.
let elem_stack_size = elem_layout.stack_size(self.storage_manager.target_info());
let elem_stack_size =
elem_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info());
self.load_literal(
&Symbol::DEV_TMP3,
u64_layout,
@ -1214,7 +1222,7 @@ impl<
// Setup the return location.
let base_offset = self.storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(self.storage_manager.target_info()),
ret_layout.stack_size(self.env.layout_interner, self.storage_manager.target_info()),
);
let ret_fields = if let Layout::Struct { field_layouts, .. } = ret_layout {
@ -1231,13 +1239,19 @@ impl<
let (out_list_offset, out_elem_offset) = if ret_fields[0] == elem_layout {
(
base_offset + ret_fields[0].stack_size(self.storage_manager.target_info()) as i32,
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
as i32,
base_offset,
)
} else {
(
base_offset,
base_offset + ret_fields[0].stack_size(self.storage_manager.target_info()) as i32,
base_offset
+ ret_fields[0]
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
as i32,
)
};
@ -1318,10 +1332,15 @@ impl<
// This requires at least 8 for the refcount alignment.
let allocation_alignment = std::cmp::max(
8,
elem_layout.allocation_alignment_bytes(self.storage_manager.target_info()) as u64,
elem_layout.allocation_alignment_bytes(
self.env.layout_interner,
self.storage_manager.target_info(),
) as u64,
);
let elem_size = elem_layout.stack_size(self.storage_manager.target_info()) as u64;
let elem_size = elem_layout
.stack_size(self.env.layout_interner, self.storage_manager.target_info())
as u64;
let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */;
let u64_layout = Layout::Builtin(Builtin::Int(IntWidth::U64));
self.load_literal(

View file

@ -86,7 +86,7 @@ pub struct StorageManager<
> {
phantom_cc: PhantomData<CC>,
phantom_asm: PhantomData<ASM>,
env: &'a Env<'a>,
pub(crate) env: &'a Env<'a>,
target_info: TargetInfo,
// Data about where each symbol is stored.
symbol_storage_map: MutMap<Symbol, Storage<GeneralReg, FloatReg>>,
@ -541,12 +541,12 @@ impl<
let (base_offset, size) = (*base_offset, *size);
let mut data_offset = base_offset;
for layout in field_layouts.iter().take(index as usize) {
let field_size = layout.stack_size(self.target_info);
let field_size = layout.stack_size(self.env.layout_interner, self.target_info);
data_offset += field_size as i32;
}
debug_assert!(data_offset < base_offset + size as i32);
let layout = field_layouts[index as usize];
let size = layout.stack_size(self.target_info);
let size = layout.stack_size(self.env.layout_interner, self.target_info);
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
@ -591,8 +591,8 @@ impl<
UnionLayout::NonRecursive(_) => {
let (union_offset, _) = self.stack_offset_and_size(structure);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.target_info);
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();
@ -635,7 +635,7 @@ impl<
layout: &Layout<'a>,
fields: &'a [Symbol],
) {
let struct_size = layout.stack_size(self.target_info);
let struct_size = layout.stack_size(self.env.layout_interner, self.target_info);
if struct_size == 0 {
self.symbol_storage_map.insert(*sym, NoData);
return;
@ -646,7 +646,8 @@ impl<
let mut current_offset = base_offset;
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size = field_layout.stack_size(self.target_info);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
current_offset += field_size as i32;
}
} else {
@ -667,8 +668,8 @@ impl<
) {
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.target_info);
let (data_size, data_alignment) = union_layout
.data_size_and_alignment(self.env.layout_interner, self.target_info);
let id_offset = data_size - data_alignment;
if data_alignment < 8 || data_alignment % 8 != 0 {
todo!("small/unaligned tagging");
@ -679,7 +680,8 @@ impl<
fields.iter().zip(field_layouts[tag_id as usize].iter())
{
self.copy_symbol_to_stack_offset(buf, current_offset, field, field_layout);
let field_size = field_layout.stack_size(self.target_info);
let field_size =
field_layout.stack_size(self.env.layout_interner, self.target_info);
current_offset += field_size as i32;
}
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
@ -733,16 +735,19 @@ impl<
let reg = self.load_to_float_reg(buf, sym);
ASM::mov_base32_freg64(buf, to_offset, reg);
}
_ if layout.stack_size(self.target_info) == 0 => {}
_ if layout.stack_size(self.env.layout_interner, self.target_info) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout.stack_size(self.target_info) > 8 => {
_ if layout.stack_size(self.env.layout_interner, self.target_info) > 8 => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0);
debug_assert_eq!(size, layout.stack_size(self.target_info));
debug_assert_eq!(
size,
layout.stack_size(self.env.layout_interner, self.target_info)
);
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
for i in (0..size as i32).step_by(8) {
ASM::mov_reg64_base32(buf, reg, from_offset + i);
@ -1016,7 +1021,7 @@ impl<
.insert(*symbol, Rc::new((base_offset, 8)));
}
_ => {
let stack_size = layout.stack_size(self.target_info);
let stack_size = layout.stack_size(self.env.layout_interner, self.target_info);
if stack_size == 0 {
self.symbol_storage_map.insert(*symbol, NoData);
} else {

View file

@ -7,7 +7,7 @@ use bumpalo::collections::Vec;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout};
use roc_mono::layout::{Builtin, Layout, STLayoutInterner};
use roc_target::TargetInfo;
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
@ -266,12 +266,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut general_i = 0;
let mut float_i = 0;
if X86_64SystemV::returns_via_arg_pointer(ret_layout) {
if X86_64SystemV::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[0]);
general_i += 1;
}
for (layout, sym) in args.iter() {
let stack_size = layout.stack_size(TARGET_INFO);
let stack_size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
match layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
@ -324,10 +324,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
let mut general_i = 0;
let mut float_i = 0;
if Self::returns_via_arg_pointer(ret_layout) {
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
// Save space on the stack for the result we will be return.
let base_offset =
storage_manager.claim_stack_area(dst, ret_layout.stack_size(TARGET_INFO));
let base_offset = storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
// Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1;
@ -386,8 +388,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
tmp_stack_offset += 8;
}
}
x if x.stack_size(TARGET_INFO) == 0 => {}
x if x.stack_size(TARGET_INFO) > 16 => {
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) > 16 => {
// TODO: Double check this.
// Just copy onto the stack.
// Use return reg as buffer because it will be empty right now.
@ -431,8 +433,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(x) => {
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
debug_assert_eq!(base_offset % 8, 0);
if size <= 8 {
@ -487,9 +489,9 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(x) => {
let size = layout.stack_size(TARGET_INFO);
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(storage_manager.env.layout_interner, x) => {
let size = layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO);
let offset = storage_manager.claim_stack_area(sym, size);
if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
@ -516,10 +518,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
}
impl X86_64SystemV {
fn returns_via_arg_pointer(ret_layout: &Layout) -> bool {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
) -> bool {
// TODO: This will need to be more complex/extended to fully support the calling convention.
// details here: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
ret_layout.stack_size(TARGET_INFO) > 16
ret_layout.stack_size(interner, TARGET_INFO) > 16
}
}
@ -667,7 +672,10 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut i = 0;
if X86_64WindowsFastcall::returns_via_arg_pointer(ret_layout) {
if X86_64WindowsFastcall::returns_via_arg_pointer(
storage_manager.env.layout_interner,
ret_layout,
) {
storage_manager.ret_pointer_arg(Self::GENERAL_PARAM_REGS[i]);
i += 1;
}
@ -682,7 +690,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
i += 1;
}
x if x.stack_size(TARGET_INFO) == 0 => {}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("Loading args with layout {:?}", x);
}
@ -717,9 +725,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
ret_layout: &Layout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
if Self::returns_via_arg_pointer(ret_layout) {
if Self::returns_via_arg_pointer(storage_manager.env.layout_interner, ret_layout) {
// Save space on the stack for the arg we will return.
storage_manager.claim_stack_area(dst, ret_layout.stack_size(TARGET_INFO));
storage_manager.claim_stack_area(
dst,
ret_layout.stack_size(storage_manager.env.layout_interner, TARGET_INFO),
);
todo!("claim first parama reg for the address");
}
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
@ -768,7 +779,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
tmp_stack_offset += 8;
}
}
x if x.stack_size(TARGET_INFO) == 0 => {}
x if x.stack_size(storage_manager.env.layout_interner, TARGET_INFO) == 0 => {}
x => {
todo!("calling with arg type, {:?}", x);
}
@ -809,10 +820,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
impl X86_64WindowsFastcall {
fn returns_via_arg_pointer(ret_layout: &Layout) -> bool {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
) -> bool {
// TODO: This is not fully correct there are some exceptions for "vector" types.
// details here: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-160#return-values
ret_layout.stack_size(TARGET_INFO) > 8
ret_layout.stack_size(interner, TARGET_INFO) > 8
}
}

View file

@ -14,7 +14,9 @@ use roc_mono::ir::{
BranchInfo, CallType, Expr, JoinPointId, ListLiteralElement, Literal, Param, Proc, ProcLayout,
SelfRecursive, Stmt,
};
use roc_mono::layout::{Builtin, Layout, LayoutId, LayoutIds, TagIdIntType, UnionLayout};
use roc_mono::layout::{
Builtin, Layout, LayoutId, LayoutIds, STLayoutInterner, TagIdIntType, UnionLayout,
};
mod generic64;
mod object_builder;
@ -23,6 +25,7 @@ mod run_roc;
pub struct Env<'a> {
pub arena: &'a Bump,
pub layout_interner: &'a STLayoutInterner<'a>,
pub module_id: ModuleId,
pub exposed_to_host: MutSet<Symbol>,
pub lazy_literals: bool,

View file

@ -247,7 +247,7 @@ fn build_transform_caller_help<'a, 'ctx, 'env>(
for (argument_ptr, layout) in arguments.iter().zip(argument_layouts) {
let basic_type = basic_type_from_layout(env, layout).ptr_type(AddressSpace::Generic);
let argument = if layout.is_passed_by_reference(env.target_info) {
let argument = if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
env.builder
.build_pointer_cast(
argument_ptr.into_pointer_value(),
@ -271,7 +271,7 @@ fn build_transform_caller_help<'a, 'ctx, 'env>(
closure_data_layout
.is_represented(env.layout_interner)
.is_some(),
closure_data_layout.runtime_representation(),
closure_data_layout.runtime_representation(env.layout_interner),
) {
(false, _) => {
// the function doesn't expect a closure argument, nothing to add
@ -404,7 +404,7 @@ fn build_rc_wrapper<'a, 'ctx, 'env>(
let value_type = basic_type_from_layout(env, layout).ptr_type(AddressSpace::Generic);
let value = if layout.is_passed_by_reference(env.target_info) {
let value = if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
env.builder
.build_pointer_cast(value_ptr, value_type, "cast_ptr_to_tag_build_rc_wrapper")
.into()
@ -592,7 +592,8 @@ pub fn build_compare_wrapper<'a, 'ctx, 'env>(
let default = [value1.into(), value2.into()];
let arguments_cast = match closure_data_layout.runtime_representation() {
let arguments_cast =
match closure_data_layout.runtime_representation(env.layout_interner) {
Layout::Struct {
field_layouts: &[], ..
} => {

View file

@ -312,7 +312,7 @@ impl<'a, 'ctx, 'env> Env<'a, 'ctx, 'env> {
}
pub fn alignment_intvalue(&self, element_layout: &Layout<'a>) -> BasicValueEnum<'ctx> {
let alignment = element_layout.alignment_bytes(self.target_info);
let alignment = element_layout.alignment_bytes(self.layout_interner, self.target_info);
let alignment_iv = self.alignment_const(alignment);
alignment_iv.into()
@ -881,7 +881,9 @@ fn promote_to_wasm_test_wrapper<'a, 'ctx, 'env>(
let roc_main_fn_result = call_roc_function(env, roc_main_fn, &top_level.result, &[]);
// For consistency, we always return with a heap-allocated value
let (size, alignment) = top_level.result.stack_size_and_alignment(env.target_info);
let (size, alignment) = top_level
.result
.stack_size_and_alignment(env.layout_interner, env.target_info);
let number_of_bytes = env.ptr_int().const_int(size as _, false);
let void_ptr = env.call_alloc(number_of_bytes, alignment);
@ -1250,8 +1252,8 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
let allocation = reserve_with_refcount_help(
env,
basic_type,
layout.stack_size(env.target_info),
layout.alignment_bytes(env.target_info),
layout.stack_size(env.layout_interner, env.target_info),
layout.alignment_bytes(env.layout_interner, env.target_info),
);
store_roc_value(env, *layout, allocation, value);
@ -1329,7 +1331,7 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
let (value, layout) = load_symbol_and_layout(scope, structure);
let layout = if let Layout::LambdaSet(lambda_set) = layout {
lambda_set.runtime_representation()
lambda_set.runtime_representation(env.layout_interner)
} else {
*layout
};
@ -1601,7 +1603,7 @@ fn build_tag_field_value<'a, 'ctx, 'env>(
env.context.i64_type().ptr_type(AddressSpace::Generic),
"cast_recursive_pointer",
)
} else if tag_field_layout.is_passed_by_reference(env.target_info) {
} else if tag_field_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
debug_assert!(value.is_pointer_value());
// NOTE: we rely on this being passed to `store_roc_value` so that
@ -1662,7 +1664,7 @@ fn build_struct<'a, 'ctx, 'env>(
if !field_layout.is_dropped_because_empty() {
field_types.push(basic_type_from_layout(env, field_layout));
if field_layout.is_passed_by_reference(env.target_info) {
if field_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let field_value = env
.builder
.build_load(field_expr.into_pointer_value(), "load_tag_to_put_in_struct");
@ -1698,7 +1700,12 @@ fn build_tag<'a, 'ctx, 'env>(
let data = build_struct(env, scope, arguments);
let roc_union = RocUnion::tagged_from_slices(env.context, tags, env.target_info);
let roc_union = RocUnion::tagged_from_slices(
env.layout_interner,
env.context,
tags,
env.target_info,
);
let value = roc_union.as_struct_value(env, data, Some(tag_id as _));
let alloca = create_entry_block_alloca(
@ -1789,8 +1796,12 @@ fn build_tag<'a, 'ctx, 'env>(
nullable_id,
other_fields,
} => {
let roc_union =
RocUnion::untagged_from_slices(env.context, &[other_fields], env.target_info);
let roc_union = RocUnion::untagged_from_slices(
env.layout_interner,
env.context,
&[other_fields],
env.target_info,
);
if tag_id == *nullable_id as _ {
let output_type = roc_union.struct_type().ptr_type(AddressSpace::Generic);
@ -2105,8 +2116,8 @@ pub fn reserve_with_refcount<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout: &Layout<'a>,
) -> PointerValue<'ctx> {
let stack_size = layout.stack_size(env.target_info);
let alignment_bytes = layout.alignment_bytes(env.target_info);
let stack_size = layout.stack_size(env.layout_interner, env.target_info);
let alignment_bytes = layout.alignment_bytes(env.layout_interner, env.target_info);
let basic_type = basic_type_from_layout(env, layout);
@ -2121,9 +2132,9 @@ fn reserve_with_refcount_union_as_block_of_memory<'a, 'ctx, 'env>(
let ptr_bytes = env.target_info;
let roc_union = if union_layout.stores_tag_id_as_data(ptr_bytes) {
RocUnion::tagged_from_slices(env.context, fields, env.target_info)
RocUnion::tagged_from_slices(env.layout_interner, env.context, fields, env.target_info)
} else {
RocUnion::untagged_from_slices(env.context, fields, env.target_info)
RocUnion::untagged_from_slices(env.layout_interner, env.context, fields, env.target_info)
};
reserve_with_refcount_help(
@ -2212,10 +2223,10 @@ fn list_literal<'a, 'ctx, 'env>(
// if element_type.is_int_type() {
if false {
let element_type = element_type.into_int_type();
let element_width = element_layout.stack_size(env.target_info);
let element_width = element_layout.stack_size(env.layout_interner, env.target_info);
let size = list_length * element_width as usize;
let alignment = element_layout
.alignment_bytes(env.target_info)
.alignment_bytes(env.layout_interner, env.target_info)
.max(env.target_info.ptr_width() as u32);
let mut is_all_constant = true;
@ -2352,7 +2363,7 @@ pub fn load_roc_value<'a, 'ctx, 'env>(
source: PointerValue<'ctx>,
name: &str,
) -> BasicValueEnum<'ctx> {
if layout.is_passed_by_reference(env.target_info) {
if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let alloca = entry_block_alloca_zerofill(env, basic_type_from_layout(env, &layout), name);
store_roc_value(env, layout, alloca, source.into());
@ -2369,7 +2380,7 @@ pub fn use_roc_value<'a, 'ctx, 'env>(
source: BasicValueEnum<'ctx>,
name: &str,
) -> BasicValueEnum<'ctx> {
if layout.is_passed_by_reference(env.target_info) {
if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let alloca = entry_block_alloca_zerofill(env, basic_type_from_layout(env, &layout), name);
env.builder.build_store(alloca, source);
@ -2400,15 +2411,16 @@ pub fn store_roc_value<'a, 'ctx, 'env>(
destination: PointerValue<'ctx>,
value: BasicValueEnum<'ctx>,
) {
if layout.is_passed_by_reference(env.target_info) {
if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
debug_assert!(value.is_pointer_value());
let align_bytes = layout.alignment_bytes(env.target_info);
let align_bytes = layout.alignment_bytes(env.layout_interner, env.target_info);
if align_bytes > 0 {
let size = env
.ptr_int()
.const_int(layout.stack_size(env.target_info) as u64, false);
let size = env.ptr_int().const_int(
layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
env.builder
.build_memcpy(
@ -2503,8 +2515,9 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
// store_roc_value(env, *layout, out_parameter.into_pointer_value(), value);
let destination = out_parameter.into_pointer_value();
if layout.is_passed_by_reference(env.target_info) {
let align_bytes = layout.alignment_bytes(env.target_info);
if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let align_bytes =
layout.alignment_bytes(env.layout_interner, env.target_info);
if align_bytes > 0 {
debug_assert!(
@ -2535,7 +2548,7 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
// Hence, we explicitly memcpy source to destination, and rely on
// LLVM optimizing away any inefficiencies.
let target_info = env.target_info;
let width = layout.stack_size(target_info);
let width = layout.stack_size(env.layout_interner, target_info);
let size = env.ptr_int().const_int(width as _, false);
env.builder
@ -2615,7 +2628,10 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
for param in parameters.iter() {
let basic_type = basic_type_from_layout(env, &param.layout);
let phi_type = if param.layout.is_passed_by_reference(env.target_info) {
let phi_type = if param
.layout
.is_passed_by_reference(env.layout_interner, env.target_info)
{
basic_type.ptr_type(AddressSpace::Generic).into()
} else {
basic_type
@ -2698,7 +2714,7 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
let (value, layout) = load_symbol_and_layout(scope, symbol);
let layout = *layout;
if layout.contains_refcounted() {
if layout.contains_refcounted(env.layout_interner) {
increment_refcount_layout(
env,
parent,
@ -2714,7 +2730,7 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
Dec(symbol) => {
let (value, layout) = load_symbol_and_layout(scope, symbol);
if layout.contains_refcounted() {
if layout.contains_refcounted(env.layout_interner) {
decrement_refcount_layout(env, parent, layout_ids, value, layout);
}
@ -2727,7 +2743,8 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
Layout::Builtin(Builtin::Str) => todo!(),
Layout::Builtin(Builtin::List(element_layout)) => {
debug_assert!(value.is_struct_value());
let alignment = element_layout.alignment_bytes(env.target_info);
let alignment = element_layout
.alignment_bytes(env.layout_interner, env.target_info);
build_list::decref(env, value.into_struct_value(), alignment);
}
@ -4140,7 +4157,7 @@ fn make_good_roc_result<'a, 'ctx, 'env>(
.build_insert_value(v1, context.i64_type().const_zero(), 0, "set_no_error")
.unwrap();
let v3 = if return_layout.is_passed_by_reference(env.target_info) {
let v3 = if return_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let loaded = env.builder.build_load(
return_value.into_pointer_value(),
"load_call_result_passed_by_ptr",
@ -4403,7 +4420,9 @@ fn build_procedures_help<'a, 'ctx, 'env>(
let it = procedures.iter().map(|x| x.1);
let solutions = match roc_alias_analysis::spec_program(opt_level, opt_entry_point, it) {
let solutions =
match roc_alias_analysis::spec_program(env.layout_interner, opt_level, opt_entry_point, it)
{
Err(e) => panic!("Error in alias analysis: {}", e),
Ok(solutions) => solutions,
};
@ -4672,7 +4691,8 @@ fn build_closure_caller<'a, 'ctx, 'env>(
}
let closure_argument_type = {
let basic_type = basic_type_from_layout(env, &lambda_set.runtime_representation());
let basic_type =
basic_type_from_layout(env, &lambda_set.runtime_representation(env.layout_interner));
basic_type.ptr_type(AddressSpace::Generic)
};
@ -4719,10 +4739,12 @@ fn build_closure_caller<'a, 'ctx, 'env>(
// NOTE this may be incorrect in the long run
// here we load any argument that is a pointer
let closure_layout = lambda_set.runtime_representation();
let closure_layout = lambda_set.runtime_representation(env.layout_interner);
let layouts_it = arguments.iter().chain(std::iter::once(&closure_layout));
for (param, layout) in evaluator_arguments.iter_mut().zip(layouts_it) {
if param.is_pointer_value() && !layout.is_passed_by_reference(env.target_info) {
if param.is_pointer_value()
&& !layout.is_passed_by_reference(env.layout_interner, env.target_info)
{
*param = builder.build_load(param.into_pointer_value(), "load_param");
}
}
@ -4740,13 +4762,14 @@ fn build_closure_caller<'a, 'ctx, 'env>(
} else {
let call_result = call_roc_function(env, evaluator, return_layout, &evaluator_arguments);
if return_layout.is_passed_by_reference(env.target_info) {
let align_bytes = return_layout.alignment_bytes(env.target_info);
if return_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
let align_bytes = return_layout.alignment_bytes(env.layout_interner, env.target_info);
if align_bytes > 0 {
let size = env
.ptr_int()
.const_int(return_layout.stack_size(env.target_info) as u64, false);
let size = env.ptr_int().const_int(
return_layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
env.builder
.build_memcpy(
@ -4773,7 +4796,7 @@ fn build_closure_caller<'a, 'ctx, 'env>(
env,
def_name,
alias_symbol,
lambda_set.runtime_representation(),
lambda_set.runtime_representation(env.layout_interner),
);
}
@ -5043,7 +5066,7 @@ pub fn call_roc_function<'a, 'ctx, 'env>(
debug_assert_eq!(roc_function.get_call_conventions(), FAST_CALL_CONV);
call.set_call_convention(FAST_CALL_CONV);
if result_layout.is_passed_by_reference(env.target_info) {
if result_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
result_alloca.into()
} else {
env.builder
@ -5125,8 +5148,11 @@ fn roc_function_call<'a, 'ctx, 'env>(
.as_global_value()
.as_pointer_value();
let inc_closure_data =
build_inc_n_wrapper(env, layout_ids, &lambda_set.runtime_representation())
let inc_closure_data = build_inc_n_wrapper(
env,
layout_ids,
&lambda_set.runtime_representation(env.layout_interner),
)
.as_global_value()
.as_pointer_value();
@ -6467,7 +6493,7 @@ fn to_cc_type<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout: &Layout<'a>,
) -> BasicTypeEnum<'ctx> {
match layout.runtime_representation() {
match layout.runtime_representation(env.layout_interner) {
Layout::Builtin(builtin) => to_cc_type_builtin(env, &builtin),
layout => {
// TODO this is almost certainly incorrect for bigger structs
@ -6509,7 +6535,11 @@ enum RocReturn {
}
impl RocReturn {
fn roc_return_by_pointer(target_info: TargetInfo, layout: Layout) -> bool {
fn roc_return_by_pointer(
interner: &STLayoutInterner,
target_info: TargetInfo,
layout: Layout,
) -> bool {
match layout {
Layout::Builtin(builtin) => {
use Builtin::*;
@ -6524,15 +6554,17 @@ impl RocReturn {
}
}
Layout::Union(UnionLayout::NonRecursive(_)) => true,
Layout::LambdaSet(lambda_set) => {
RocReturn::roc_return_by_pointer(target_info, lambda_set.runtime_representation())
}
Layout::LambdaSet(lambda_set) => RocReturn::roc_return_by_pointer(
interner,
target_info,
lambda_set.runtime_representation(interner),
),
_ => false,
}
}
fn from_layout<'a, 'ctx, 'env>(env: &Env<'a, 'ctx, 'env>, layout: &Layout<'a>) -> Self {
if Self::roc_return_by_pointer(env.target_info, *layout) {
if Self::roc_return_by_pointer(env.layout_interner, env.target_info, *layout) {
RocReturn::ByPointer
} else {
RocReturn::Return
@ -6670,7 +6702,7 @@ impl<'ctx> FunctionSpec<'ctx> {
/// According to the C ABI, how should we return a value with the given layout?
pub fn to_cc_return<'a, 'ctx, 'env>(env: &Env<'a, 'ctx, 'env>, layout: &Layout<'a>) -> CCReturn {
let return_size = layout.stack_size(env.target_info);
let return_size = layout.stack_size(env.layout_interner, env.target_info);
let pass_result_by_pointer = return_size > 2 * env.target_info.ptr_width() as u32;
if return_size == 0 {

View file

@ -82,7 +82,10 @@ pub(crate) fn layout_width<'a, 'ctx, 'env>(
layout: &Layout<'a>,
) -> BasicValueEnum<'ctx> {
env.ptr_int()
.const_int(layout.stack_size(env.target_info) as u64, false)
.const_int(
layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
)
.into()
}
@ -317,7 +320,7 @@ pub(crate) fn list_replace_unsafe<'a, 'ctx, 'env>(
// the list has the same alignment as a usize / ptr. The element comes first in the struct if
// its alignment is bigger than that of a list.
let element_align = element_layout.alignment_bytes(env.target_info);
let element_align = element_layout.alignment_bytes(env.layout_interner, env.target_info);
let element_first = element_align > env.target_info.ptr_width() as u32;
let fields = if element_first {
@ -715,13 +718,13 @@ pub(crate) fn allocate_list<'a, 'ctx, 'env>(
let builder = env.builder;
let len_type = env.ptr_int();
let elem_bytes = elem_layout.stack_size(env.target_info) as u64;
let elem_bytes = elem_layout.stack_size(env.layout_interner, env.target_info) as u64;
let bytes_per_element = len_type.const_int(elem_bytes, false);
let number_of_data_bytes =
builder.build_int_mul(bytes_per_element, number_of_elements, "data_length");
let basic_type = basic_type_from_layout(env, elem_layout);
let alignment_bytes = elem_layout.alignment_bytes(env.target_info);
let alignment_bytes = elem_layout.alignment_bytes(env.layout_interner, env.target_info);
allocate_with_refcount_help(env, basic_type, alignment_bytes, number_of_data_bytes)
}

View file

@ -5,7 +5,7 @@ use inkwell::types::{BasicType, BasicTypeEnum, FloatType, IntType, StructType};
use inkwell::values::StructValue;
use inkwell::AddressSpace;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_mono::layout::{round_up_to_alignment, Builtin, Layout, UnionLayout};
use roc_mono::layout::{round_up_to_alignment, Builtin, Layout, STLayoutInterner, UnionLayout};
use roc_target::TargetInfo;
fn basic_type_from_record<'a, 'ctx, 'env>(
@ -34,7 +34,9 @@ pub fn basic_type_from_layout<'a, 'ctx, 'env>(
field_layouts: sorted_fields,
..
} => basic_type_from_record(env, sorted_fields),
LambdaSet(lambda_set) => basic_type_from_layout(env, &lambda_set.runtime_representation()),
LambdaSet(lambda_set) => {
basic_type_from_layout(env, &lambda_set.runtime_representation(env.layout_interner))
}
Boxed(inner_layout) => {
let inner_type = basic_type_from_layout(env, inner_layout);
@ -60,7 +62,7 @@ pub fn basic_type_from_union_layout<'a, 'ctx, 'env>(
match union_layout {
NonRecursive(tags) => {
//
RocUnion::tagged_from_slices(env.context, tags, env.target_info)
RocUnion::tagged_from_slices(env.layout_interner, env.context, tags, env.target_info)
.struct_type()
.into()
}
@ -69,29 +71,45 @@ pub fn basic_type_from_union_layout<'a, 'ctx, 'env>(
other_tags: tags, ..
} => {
if union_layout.stores_tag_id_as_data(env.target_info) {
RocUnion::tagged_from_slices(env.context, tags, env.target_info)
RocUnion::tagged_from_slices(
env.layout_interner,
env.context,
tags,
env.target_info,
)
.struct_type()
.ptr_type(AddressSpace::Generic)
.into()
} else {
RocUnion::untagged_from_slices(env.context, tags, env.target_info)
RocUnion::untagged_from_slices(
env.layout_interner,
env.context,
tags,
env.target_info,
)
.struct_type()
.ptr_type(AddressSpace::Generic)
.into()
}
}
NullableUnwrapped { other_fields, .. } => {
RocUnion::untagged_from_slices(env.context, &[other_fields], env.target_info)
NullableUnwrapped { other_fields, .. } => RocUnion::untagged_from_slices(
env.layout_interner,
env.context,
&[other_fields],
env.target_info,
)
.struct_type()
.ptr_type(AddressSpace::Generic)
.into()
}
NonNullableUnwrapped(fields) => {
RocUnion::untagged_from_slices(env.context, &[fields], env.target_info)
.into(),
NonNullableUnwrapped(fields) => RocUnion::untagged_from_slices(
env.layout_interner,
env.context,
&[fields],
env.target_info,
)
.struct_type()
.ptr_type(AddressSpace::Generic)
.into()
}
.into(),
}
}
@ -132,13 +150,13 @@ pub fn argument_type_from_layout<'a, 'ctx, 'env>(
match layout {
LambdaSet(lambda_set) => {
argument_type_from_layout(env, &lambda_set.runtime_representation())
argument_type_from_layout(env, &lambda_set.runtime_representation(env.layout_interner))
}
Union(union_layout) => argument_type_from_union_layout(env, union_layout),
Builtin(_) => {
let base = basic_type_from_layout(env, layout);
if layout.is_passed_by_reference(env.target_info) {
if layout.is_passed_by_reference(env.layout_interner, env.target_info) {
base.ptr_type(AddressSpace::Generic).into()
} else {
base
@ -275,6 +293,7 @@ impl<'ctx> RocUnion<'ctx> {
}
pub fn tagged_from_slices(
interner: &STLayoutInterner,
context: &'ctx Context,
layouts: &[&[Layout<'_>]],
target_info: TargetInfo,
@ -285,18 +304,19 @@ impl<'ctx> RocUnion<'ctx> {
};
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(layouts, target_info);
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
Self::new(context, target_info, data_align, data_width, Some(tag_type))
}
pub fn untagged_from_slices(
interner: &STLayoutInterner,
context: &'ctx Context,
layouts: &[&[Layout<'_>]],
target_info: TargetInfo,
) -> Self {
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(layouts, target_info);
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
Self::new(context, target_info, data_align, data_width, None)
}

View file

@ -137,9 +137,10 @@ pub(crate) fn clone_to_shared_memory<'a, 'ctx, 'env>(
let (value, layout) = load_symbol_and_layout(scope, lookup);
let stack_size = env
.ptr_int()
.const_int(layout.stack_size(env.target_info) as u64, false);
let stack_size = env.ptr_int().const_int(
layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
let mut extra_offset = env.builder.build_int_add(offset, stack_size, "offset");
@ -221,7 +222,7 @@ fn build_clone<'a, 'ctx, 'env>(
Layout::LambdaSet(_) => unreachable!("cannot compare closures"),
Layout::Union(union_layout) => {
if layout.safe_to_memcpy() {
if layout.safe_to_memcpy(env.layout_interner) {
let ptr = unsafe {
env.builder
.build_in_bounds_gep(ptr, &[cursors.offset], "at_current_offset")
@ -255,9 +256,10 @@ fn build_clone<'a, 'ctx, 'env>(
let source = value.into_pointer_value();
let value = load_roc_value(env, *inner_layout, source, "inner");
let inner_width = env
.ptr_int()
.const_int(inner_layout.stack_size(env.target_info) as u64, false);
let inner_width = env.ptr_int().const_int(
inner_layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
let new_extra = env
.builder
@ -318,7 +320,7 @@ fn build_clone_struct<'a, 'ctx, 'env>(
) -> IntValue<'ctx> {
let layout = Layout::struct_no_name_order(field_layouts);
if layout.safe_to_memcpy() {
if layout.safe_to_memcpy(env.layout_interner) {
build_copy(env, ptr, cursors.offset, value)
} else {
let mut cursors = cursors;
@ -343,9 +345,10 @@ fn build_clone_struct<'a, 'ctx, 'env>(
when_recursive,
);
let field_width = env
.ptr_int()
.const_int(field_layout.stack_size(env.target_info) as u64, false);
let field_width = env.ptr_int().const_int(
field_layout.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
cursors.extra_offset = new_extra;
cursors.offset = env
@ -576,7 +579,8 @@ fn build_clone_tag_help<'a, 'ctx, 'env>(
let data = env.builder.build_load(data_ptr, "load_data");
let (width, _) = union_layout.data_size_and_alignment(env.target_info);
let (width, _) =
union_layout.data_size_and_alignment(env.layout_interner, env.target_info);
let cursors = Cursors {
offset: extra_offset,
@ -618,7 +622,8 @@ fn build_clone_tag_help<'a, 'ctx, 'env>(
let layout = Layout::struct_no_name_order(fields);
let basic_type = basic_type_from_layout(env, &layout);
let (width, _) = union_layout.data_size_and_alignment(env.target_info);
let (width, _) =
union_layout.data_size_and_alignment(env.layout_interner, env.target_info);
let cursors = Cursors {
offset: extra_offset,
@ -686,7 +691,8 @@ fn build_clone_tag_help<'a, 'ctx, 'env>(
let layout = Layout::struct_no_name_order(fields);
let basic_type = basic_type_from_layout(env, &layout);
let (width, _) = union_layout.data_size_and_alignment(env.target_info);
let (width, _) =
union_layout.data_size_and_alignment(env.layout_interner, env.target_info);
let cursors = Cursors {
offset: extra_offset,
@ -776,8 +782,10 @@ fn build_clone_tag_help<'a, 'ctx, 'env>(
offset: extra_offset,
extra_offset: env.builder.build_int_add(
extra_offset,
env.ptr_int()
.const_int(layout.stack_size(env.target_info) as _, false),
env.ptr_int().const_int(
layout.stack_size(env.layout_interner, env.target_info) as _,
false,
),
"new_offset",
),
};
@ -907,12 +915,13 @@ fn build_clone_builtin<'a, 'ctx, 'env>(
offset = build_copy(env, ptr, offset, len.into());
offset = build_copy(env, ptr, offset, len.into());
let (element_width, _element_align) = elem.stack_size_and_alignment(env.target_info);
let (element_width, _element_align) =
elem.stack_size_and_alignment(env.layout_interner, env.target_info);
let element_width = env.ptr_int().const_int(element_width as _, false);
let elements_width = bd.build_int_mul(element_width, len, "elements_width");
if elem.safe_to_memcpy() {
if elem.safe_to_memcpy(env.layout_interner) {
// NOTE we are not actually sure the dest is properly aligned
let dest = pointer_at_offset(bd, ptr, offset);
let src = bd.build_pointer_cast(
@ -936,9 +945,10 @@ fn build_clone_builtin<'a, 'ctx, 'env>(
// if the element has any pointers, we clone them to this offset
let rest_offset = bd.build_alloca(env.ptr_int(), "rest_offset");
let element_stack_size = env
.ptr_int()
.const_int(elem.stack_size(env.target_info) as u64, false);
let element_stack_size = env.ptr_int().const_int(
elem.stack_size(env.layout_interner, env.target_info) as u64,
false,
);
let rest_start_offset = bd.build_int_add(
cursors.extra_offset,
bd.build_int_mul(len, element_stack_size, "elements_width"),

View file

@ -16,7 +16,7 @@ use inkwell::values::{
use inkwell::{AddressSpace, IntPredicate};
use roc_module::symbol::Interns;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, LayoutIds, UnionLayout};
use roc_mono::layout::{Builtin, Layout, LayoutIds, STLayoutInterner, UnionLayout};
use super::build::{load_roc_value, FunctionSpec};
use super::convert::{argument_type_from_layout, argument_type_from_union_layout};
@ -124,7 +124,7 @@ impl<'ctx> PointerToRefcount<'ctx> {
pub fn decrement<'a, 'env>(&self, env: &Env<'a, 'ctx, 'env>, layout: &Layout<'a>) {
let alignment = layout
.allocation_alignment_bytes(env.target_info)
.allocation_alignment_bytes(env.layout_interner, env.target_info)
.max(env.target_info.ptr_width() as u32);
let context = env.context;
@ -332,7 +332,7 @@ fn modify_refcount_struct_help<'a, 'ctx, 'env>(
let wrapper_struct = arg_val.into_struct_value();
for (i, field_layout) in layouts.iter().enumerate() {
if field_layout.contains_refcounted() {
if field_layout.contains_refcounted(env.layout_interner) {
let raw_value = env
.builder
.build_extract_value(wrapper_struct, i as u32, "decrement_struct_field")
@ -613,7 +613,7 @@ fn modify_refcount_layout_build_function<'a, 'ctx, 'env>(
layout_ids,
mode,
when_recursive,
&lambda_set.runtime_representation(),
&lambda_set.runtime_representation(env.layout_interner),
),
}
}
@ -717,7 +717,7 @@ fn modify_refcount_list_help<'a, 'ctx, 'env>(
builder.position_at_end(modification_block);
if element_layout.contains_refcounted() {
if element_layout.contains_refcounted(env.layout_interner) {
let ptr_type = basic_type_from_layout(env, element_layout).ptr_type(AddressSpace::Generic);
let (len, ptr) = load_list(env.builder, original_wrapper, ptr_type);
@ -818,7 +818,9 @@ fn modify_refcount_str_help<'a, 'ctx, 'env>(
let parent = fn_val;
let arg_val = if Layout::Builtin(Builtin::Str).is_passed_by_reference(env.target_info) {
let arg_val = if Layout::Builtin(Builtin::Str)
.is_passed_by_reference(env.layout_interner, env.target_info)
{
env.builder
.build_load(arg_val.into_pointer_value(), "load_str_to_stack")
} else {
@ -1178,10 +1180,10 @@ enum DecOrReuse {
Reuse,
}
fn fields_need_no_refcounting(field_layouts: &[Layout]) -> bool {
fn fields_need_no_refcounting(interner: &STLayoutInterner, field_layouts: &[Layout]) -> bool {
!field_layouts
.iter()
.any(|x| x.is_refcounted() || x.contains_refcounted())
.any(|x| x.is_refcounted() || x.contains_refcounted(interner))
}
#[allow(clippy::too_many_arguments)]
@ -1211,7 +1213,7 @@ fn build_rec_union_recursive_decrement<'a, 'ctx, 'env>(
for (tag_id, field_layouts) in tags.iter().enumerate() {
// if none of the fields are or contain anything refcounted, just move on
if fields_need_no_refcounting(field_layouts) {
if fields_need_no_refcounting(env.layout_interner, field_layouts) {
continue;
}
@ -1256,7 +1258,7 @@ fn build_rec_union_recursive_decrement<'a, 'ctx, 'env>(
let recursive_field_ptr = cast_basic_basic(env.builder, ptr_as_i64_ptr, union_type);
deferred_rec.push(recursive_field_ptr);
} else if field_layout.contains_refcounted() {
} else if field_layout.contains_refcounted(env.layout_interner) {
let elem_pointer = env
.builder
.build_struct_gep(struct_ptr, i as u32, "gep_recursive_pointer")
@ -1620,7 +1622,7 @@ fn modify_refcount_union_help<'a, 'ctx, 'env>(
// if none of the fields are or contain anything refcounted, just move on
if !field_layouts
.iter()
.any(|x| x.is_refcounted() || x.contains_refcounted())
.any(|x| x.is_refcounted() || x.contains_refcounted(env.layout_interner))
{
continue;
}
@ -1678,13 +1680,14 @@ fn modify_refcount_union_help<'a, 'ctx, 'env>(
recursive_ptr_field_value,
&Layout::RecursivePointer,
)
} else if field_layout.contains_refcounted() {
} else if field_layout.contains_refcounted(env.layout_interner) {
let field_ptr = env
.builder
.build_struct_gep(cast_tag_data_pointer, i as u32, "modify_tag_field")
.unwrap();
let field_value = if field_layout.is_passed_by_reference(env.target_info) {
let field_value =
if field_layout.is_passed_by_reference(env.layout_interner, env.target_info) {
field_ptr.into()
} else {
env.builder.build_load(field_ptr, "field_value")

View file

@ -10,6 +10,7 @@ bitvec = "1"
bumpalo = { version = "3.8.0", features = ["collections"] }
roc_builtins = { path = "../builtins" }
roc_collections = { path = "../collections" }
roc_intern = { path = "../intern" }
roc_module = { path = "../module" }
roc_mono = { path = "../mono" }
roc_target = { path = "../roc_target" }

View file

@ -400,7 +400,7 @@ impl<'a> WasmBackend<'a> {
fn start_proc(&mut self, proc: &Proc<'a>) {
use ReturnMethod::*;
let ret_layout = WasmLayout::new(&proc.ret_layout);
let ret_layout = WasmLayout::new(self.env.layout_interner, &proc.ret_layout);
let ret_type = match ret_layout.return_method(CallConv::C) {
Primitive(ty, _) => Some(ty),
@ -418,8 +418,12 @@ impl<'a> WasmBackend<'a> {
// We never use the `return` instruction. Instead, we break from this block.
self.start_block();
self.storage
.allocate_args(proc.args, &mut self.code_builder, self.env.arena);
self.storage.allocate_args(
self.env.layout_interner,
proc.args,
&mut self.code_builder,
self.env.arena,
);
if let Some(ty) = ret_type {
let ret_var = self.storage.create_anonymous_local(ty);
@ -493,7 +497,7 @@ impl<'a> WasmBackend<'a> {
// Our convention is that the last arg of the wrapper is the heap return pointer
let heap_return_ptr_id = LocalId(wrapper_arg_layouts.len() as u32 - 1);
let inner_ret_layout = match wrapper_arg_layouts.last() {
Some(Layout::Boxed(inner)) => WasmLayout::new(inner),
Some(Layout::Boxed(inner)) => WasmLayout::new(self.env.layout_interner, inner),
x => internal_error!("Higher-order wrapper: invalid return layout {:?}", x),
};
@ -527,7 +531,7 @@ impl<'a> WasmBackend<'a> {
Layout::Boxed(inner) => inner,
x => internal_error!("Expected a Boxed layout, got {:?}", x),
};
if inner_layout.stack_size(TARGET_INFO) == 0 {
if inner_layout.stack_size(self.env.layout_interner, TARGET_INFO) == 0 {
continue;
}
@ -539,7 +543,7 @@ impl<'a> WasmBackend<'a> {
// If the inner function has closure data, it's the last arg of the inner fn
let closure_data_layout = wrapper_arg_layouts[0];
if closure_data_layout.stack_size(TARGET_INFO) > 0 {
if closure_data_layout.stack_size(self.env.layout_interner, TARGET_INFO) > 0 {
// The closure data exists, and will have been passed in to the wrapper as a
// one-element struct.
let inner_closure_data_layout = match closure_data_layout {
@ -614,7 +618,7 @@ impl<'a> WasmBackend<'a> {
let value_layout = wrapper_proc_layout.arguments[1];
let mut n_inner_args = 2;
if closure_data_layout.stack_size(TARGET_INFO) > 0 {
if closure_data_layout.stack_size(self.env.layout_interner, TARGET_INFO) > 0 {
self.code_builder.get_local(LocalId(0));
n_inner_args += 1;
}
@ -760,7 +764,9 @@ impl<'a> WasmBackend<'a> {
expr: &Expr<'a>,
kind: StoredVarKind,
) {
let sym_storage = self.storage.allocate_var(*layout, sym, kind);
let sym_storage = self
.storage
.allocate_var(self.env.layout_interner, *layout, sym, kind);
self.expr(sym, expr, layout, &sym_storage);
@ -837,7 +843,8 @@ impl<'a> WasmBackend<'a> {
}
let is_bool = matches!(cond_layout, Layout::Builtin(Builtin::Bool));
let cond_type = WasmLayout::new(cond_layout).arg_types(CallConv::C)[0];
let cond_type =
WasmLayout::new(self.env.layout_interner, cond_layout).arg_types(CallConv::C)[0];
// then, we jump whenever the value under scrutiny is equal to the value of a branch
for (i, (value, _, _)) in branches.iter().enumerate() {
@ -899,6 +906,7 @@ impl<'a> WasmBackend<'a> {
let mut jp_param_storages = Vec::with_capacity_in(parameters.len(), self.env.arena);
for parameter in parameters.iter() {
let mut param_storage = self.storage.allocate_var(
self.env.layout_interner,
parameter.layout,
parameter.symbol,
StoredVarKind::Variable,
@ -961,7 +969,11 @@ impl<'a> WasmBackend<'a> {
if false {
self.register_symbol_debug_names();
println!("## rc_stmt:\n{}\n{:?}", rc_stmt.to_pretty(200), rc_stmt);
println!(
"## rc_stmt:\n{}\n{:?}",
rc_stmt.to_pretty(self.env.layout_interner, 200),
rc_stmt
);
}
// If any new specializations were created, register their symbol data
@ -1233,7 +1245,7 @@ impl<'a> WasmBackend<'a> {
ret_layout,
} => {
let name = foreign_symbol.as_str();
let wasm_layout = WasmLayout::new(ret_layout);
let wasm_layout = WasmLayout::new(self.env.layout_interner, ret_layout);
let (num_wasm_args, has_return_val, ret_zig_packed_struct) =
self.storage.load_symbols_for_call(
self.env.arena,
@ -1258,7 +1270,7 @@ impl<'a> WasmBackend<'a> {
ret_layout: &Layout<'a>,
ret_storage: &StoredValue,
) {
let wasm_layout = WasmLayout::new(ret_layout);
let wasm_layout = WasmLayout::new(self.env.layout_interner, ret_layout);
// If this function is just a lowlevel wrapper, then inline it
if let LowLevelWrapperType::CanBeReplacedBy(lowlevel) =
@ -1414,9 +1426,12 @@ impl<'a> WasmBackend<'a> {
}
};
}
Layout::LambdaSet(lambdaset) => {
self.expr_struct(sym, &lambdaset.runtime_representation(), storage, fields)
}
Layout::LambdaSet(lambdaset) => self.expr_struct(
sym,
&lambdaset.runtime_representation(self.env.layout_interner),
storage,
fields,
),
_ => {
if !fields.is_empty() {
// Struct expression but not Struct layout => single element. Copy it.
@ -1464,7 +1479,7 @@ impl<'a> WasmBackend<'a> {
}
};
for field in field_layouts.iter().take(index as usize) {
offset += field.stack_size(TARGET_INFO);
offset += field.stack_size(self.env.layout_interner, TARGET_INFO);
}
self.storage
.copy_value_from_memory(&mut self.code_builder, sym, from_addr_val, offset);
@ -1482,11 +1497,12 @@ impl<'a> WasmBackend<'a> {
elems: &'a [ListLiteralElement<'a>],
) {
if let StoredValue::StackMemory { location, .. } = storage {
let size = elem_layout.stack_size(TARGET_INFO) * (elems.len() as u32);
let size = elem_layout.stack_size(self.env.layout_interner, TARGET_INFO)
* (elems.len() as u32);
// Allocate heap space and store its address in a local variable
let heap_local_id = self.storage.create_anonymous_local(PTR_TYPE);
let heap_alignment = elem_layout.alignment_bytes(TARGET_INFO);
let heap_alignment = elem_layout.alignment_bytes(self.env.layout_interner, TARGET_INFO);
self.allocate_with_refcount(Some(size), heap_alignment, 1);
self.code_builder.set_local(heap_local_id);
@ -1583,7 +1599,8 @@ impl<'a> WasmBackend<'a> {
let stores_tag_id_as_data = union_layout.stores_tag_id_as_data(TARGET_INFO);
let stores_tag_id_in_pointer = union_layout.stores_tag_id_in_pointer(TARGET_INFO);
let (data_size, data_alignment) = union_layout.data_size_and_alignment(TARGET_INFO);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.env.layout_interner, TARGET_INFO);
// We're going to use the pointer many times, so put it in a local variable
let stored_with_local =
@ -1635,7 +1652,10 @@ impl<'a> WasmBackend<'a> {
// Store the tag ID (if any)
if stores_tag_id_as_data {
let id_offset = data_offset + union_layout.tag_id_offset(TARGET_INFO).unwrap();
let id_offset = data_offset
+ union_layout
.tag_id_offset(self.env.layout_interner, TARGET_INFO)
.unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
let id_align = Align::from(id_align);
@ -1718,7 +1738,9 @@ impl<'a> WasmBackend<'a> {
};
if union_layout.stores_tag_id_as_data(TARGET_INFO) {
let id_offset = union_layout.tag_id_offset(TARGET_INFO).unwrap();
let id_offset = union_layout
.tag_id_offset(self.env.layout_interner, TARGET_INFO)
.unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
let id_align = Align::from(id_align);
@ -1778,7 +1800,7 @@ impl<'a> WasmBackend<'a> {
let field_offset: u32 = field_layouts
.iter()
.take(index as usize)
.map(|field_layout| field_layout.stack_size(TARGET_INFO))
.map(|field_layout| field_layout.stack_size(self.env.layout_interner, TARGET_INFO))
.sum();
// Get pointer and offset to the tag's data
@ -1844,7 +1866,8 @@ impl<'a> WasmBackend<'a> {
Layout::Boxed(arg) => *arg,
_ => internal_error!("ExprBox should always produce a Boxed layout"),
};
let (size, alignment) = arg_layout.stack_size_and_alignment(TARGET_INFO);
let (size, alignment) =
arg_layout.stack_size_and_alignment(self.env.layout_interner, TARGET_INFO);
self.allocate_with_refcount(Some(size), alignment, 1);
// store the pointer value from the value stack into the local variable

View file

@ -1,5 +1,5 @@
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_mono::layout::{Layout, UnionLayout};
use roc_mono::layout::{Layout, STLayoutInterner, UnionLayout};
use crate::wasm_module::ValueType;
use crate::{PTR_SIZE, PTR_TYPE, TARGET_INFO};
@ -41,12 +41,12 @@ pub enum WasmLayout {
}
impl WasmLayout {
pub fn new(layout: &Layout) -> Self {
pub fn new<'a>(interner: &STLayoutInterner<'a>, layout: &Layout<'a>) -> Self {
use roc_mono::layout::Builtin::*;
use UnionLayout::*;
use ValueType::*;
let (size, alignment_bytes) = layout.stack_size_and_alignment(TARGET_INFO);
let (size, alignment_bytes) = layout.stack_size_and_alignment(interner, TARGET_INFO);
match layout {
Layout::Builtin(Int(int_width)) => {
@ -85,7 +85,9 @@ impl WasmLayout {
format: StackMemoryFormat::Decimal,
},
Layout::LambdaSet(lambda_set) => WasmLayout::new(&lambda_set.runtime_representation()),
Layout::LambdaSet(lambda_set) => {
WasmLayout::new(interner, &lambda_set.runtime_representation(interner))
}
Layout::Builtin(Str | List(_))
| Layout::Struct { .. }

View file

@ -132,13 +132,18 @@ pub fn build_app_module<'a>(
host_to_app_map,
host_module,
fn_index_offset,
CodeGenHelp::new(env.arena, TargetInfo::default_wasm32(), env.module_id),
CodeGenHelp::new(
env.arena,
env.layout_interner,
TargetInfo::default_wasm32(),
env.module_id,
),
);
if DEBUG_SETTINGS.user_procs_ir {
println!("## procs");
for proc in procs.iter() {
println!("{}", proc.to_pretty(200));
println!("{}", proc.to_pretty(env.layout_interner, 200));
// println!("{:?}", proc);
}
}
@ -156,7 +161,7 @@ pub fn build_app_module<'a>(
if DEBUG_SETTINGS.helper_procs_ir {
println!("## helper_procs");
for proc in helper_procs.iter() {
println!("{}", proc.to_pretty(200));
println!("{}", proc.to_pretty(env.layout_interner, 200));
// println!("{:#?}", proc);
}
}

View file

@ -149,7 +149,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
)
}
@ -283,7 +283,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
backend.code_builder.i32_const(UPDATE_MODE_IMMUTABLE);
@ -358,7 +358,9 @@ impl<'a> LowLevelCall<'a> {
backend
.storage
.load_symbols(&mut backend.code_builder, &[index]);
let elem_size = self.ret_layout.stack_size(TARGET_INFO);
let elem_size = self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_size as i32);
backend.code_builder.i32_mul(); // index*size
@ -415,15 +417,16 @@ impl<'a> LowLevelCall<'a> {
..
} if value_layout == *list_elem => {
let list_offset = 0;
let elem_offset =
Layout::Builtin(Builtin::List(list_elem)).stack_size(TARGET_INFO);
let elem_offset = Layout::Builtin(Builtin::List(list_elem))
.stack_size(backend.env.layout_interner, TARGET_INFO);
(list_offset, elem_offset, value_layout)
}
Layout::Struct {
field_layouts: &[value_layout, Layout::Builtin(Builtin::List(list_elem))],
..
} if value_layout == *list_elem => {
let list_offset = value_layout.stack_size(TARGET_INFO);
let list_offset =
value_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
let elem_offset = 0;
(list_offset, elem_offset, value_layout)
}
@ -431,7 +434,7 @@ impl<'a> LowLevelCall<'a> {
};
let (elem_width, elem_alignment) =
elem_layout.stack_size_and_alignment(TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
// Ensure the new element is stored in memory so we can pass a pointer to Zig
let (new_elem_local, new_elem_offset, _) =
@ -480,7 +483,8 @@ impl<'a> LowLevelCall<'a> {
let capacity: Symbol = self.arguments[0];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -511,13 +515,14 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
// Load monomorphization constants
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_align as i32);
backend.code_builder.i32_const(elem_width as i32);
@ -531,7 +536,8 @@ impl<'a> LowLevelCall<'a> {
let spare: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
let (spare_local, spare_offset, _) = ensure_symbol_is_in_memory(
backend,
spare,
@ -553,7 +559,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -579,7 +585,7 @@ impl<'a> LowLevelCall<'a> {
let elem: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let elem_width = elem_layout.stack_size(TARGET_INFO);
let elem_width = elem_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -595,7 +601,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -616,7 +622,8 @@ impl<'a> LowLevelCall<'a> {
let elem: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -633,7 +640,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -657,7 +664,8 @@ impl<'a> LowLevelCall<'a> {
let len: Symbol = self.arguments[2];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -682,7 +690,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -701,7 +709,8 @@ impl<'a> LowLevelCall<'a> {
let drop_index: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -726,7 +735,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -746,7 +755,8 @@ impl<'a> LowLevelCall<'a> {
let index_2: Symbol = self.arguments[2];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -763,7 +773,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(&self.ret_layout),
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -1625,7 +1635,10 @@ impl<'a> LowLevelCall<'a> {
// In most languages this operation is for signed numbers, but Roc defines it on all integers.
// So the argument is implicitly converted to signed before the shift operator.
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self.ret_layout.stack_size(TARGET_INFO) as i32;
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO)
as i32;
if bit_width < 32 && !symbol_is_signed_int(backend, num) {
// Sign-extend the number by shifting left and right again
backend
@ -1670,7 +1683,9 @@ impl<'a> LowLevelCall<'a> {
// In most languages this operation is for unsigned numbers, but Roc defines it on all integers.
// So the argument is implicitly converted to unsigned before the shift operator.
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self.ret_layout.stack_size(TARGET_INFO);
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
if bit_width < 32 && symbol_is_signed_int(backend, bits) {
let mask = (1 << bit_width) - 1;
@ -1861,10 +1876,10 @@ impl<'a> LowLevelCall<'a> {
/// Equality and inequality
/// These can operate on any data type (except functions) so they're more complex than other operators.
fn eq_or_neq(&self, backend: &mut WasmBackend<'a>) {
let arg_layout =
backend.storage.symbol_layouts[&self.arguments[0]].runtime_representation();
let other_arg_layout =
backend.storage.symbol_layouts[&self.arguments[1]].runtime_representation();
let arg_layout = backend.storage.symbol_layouts[&self.arguments[0]]
.runtime_representation(backend.env.layout_interner);
let other_arg_layout = backend.storage.symbol_layouts[&self.arguments[1]]
.runtime_representation(backend.env.layout_interner);
debug_assert!(
arg_layout == other_arg_layout,
"Cannot do `==` comparison on different types: {:?} vs {:?}",
@ -2140,7 +2155,10 @@ pub fn call_higher_order_lowlevel<'a>(
.is_represented(backend.env.layout_interner)
.is_some()
{
(lambda_set.runtime_representation(), true)
(
lambda_set.runtime_representation(backend.env.layout_interner),
true,
)
} else {
// Closure data is a lambda set, which *itself* has no closure data!
// The higher-order wrapper doesn't need to pass this down, that's
@ -2164,6 +2182,7 @@ pub fn call_higher_order_lowlevel<'a>(
// make sure that the wrapping struct is available in stack memory, so we can hand out a
// pointer to it.
let wrapped_storage = backend.storage.allocate_var(
backend.env.layout_interner,
wrapped_captures_layout,
wrapped_closure_data_sym,
crate::storage::StoredVarKind::Variable,
@ -2326,7 +2345,8 @@ pub fn call_higher_order_lowlevel<'a>(
ListSortWith { xs } => {
let elem_layout = unwrap_list_elem_layout(backend.storage.symbol_layouts[xs]);
let (element_width, alignment) = elem_layout.stack_size_and_alignment(TARGET_INFO);
let (element_width, alignment) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2389,7 +2409,8 @@ fn list_map_n<'a>(
);
let elem_ret = unwrap_list_elem_layout(return_layout);
let (elem_ret_size, elem_ret_align) = elem_ret.stack_size_and_alignment(TARGET_INFO);
let (elem_ret_size, elem_ret_align) =
elem_ret.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2410,7 +2431,7 @@ fn list_map_n<'a>(
cb.i32_const(owns_captured_environment as i32);
cb.i32_const(elem_ret_align as i32);
for el in arg_elem_layouts.iter() {
cb.i32_const(el.stack_size(TARGET_INFO) as i32);
cb.i32_const(el.stack_size(backend.env.layout_interner, TARGET_INFO) as i32);
}
cb.i32_const(elem_ret_size as i32);
@ -2449,7 +2470,8 @@ fn ensure_symbol_is_in_memory<'a>(
(local, offset, layout)
}
_ => {
let (width, alignment) = layout.stack_size_and_alignment(TARGET_INFO);
let (width, alignment) =
layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
let (frame_ptr, offset) = backend
.storage
.allocate_anonymous_stack_memory(width, alignment);

View file

@ -4,7 +4,7 @@ use bumpalo::Bump;
use roc_collections::all::MutMap;
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::Layout;
use roc_mono::layout::{Layout, STLayoutInterner};
use crate::layout::{CallConv, ReturnMethod, StackMemoryFormat, WasmLayout};
use crate::wasm_module::{Align, CodeBuilder, LocalId, ValueType, VmSymbolState};
@ -168,11 +168,12 @@ impl<'a> Storage<'a> {
/// They are allocated a certain offset and size in the stack frame.
pub fn allocate_var(
&mut self,
interner: &STLayoutInterner<'a>,
layout: Layout<'a>,
symbol: Symbol,
kind: StoredVarKind,
) -> StoredValue {
let wasm_layout = WasmLayout::new(&layout);
let wasm_layout = WasmLayout::new(interner, &layout);
self.symbol_layouts.insert(symbol, layout);
let storage = match wasm_layout {
@ -217,6 +218,7 @@ impl<'a> Storage<'a> {
/// stack frame, because it's a lot easier to keep track of the data flow.
pub fn allocate_args(
&mut self,
interner: &STLayoutInterner<'a>,
args: &[(Layout<'a>, Symbol)],
code_builder: &mut CodeBuilder,
arena: &'a Bump,
@ -226,7 +228,7 @@ impl<'a> Storage<'a> {
for (layout, symbol) in args {
self.symbol_layouts.insert(*symbol, *layout);
let wasm_layout = WasmLayout::new(layout);
let wasm_layout = WasmLayout::new(interner, layout);
let local_index = self.arg_types.len() as u32;
let storage = match wasm_layout {

View file

@ -6,6 +6,7 @@ The user needs to analyse the Wasm module's memory to decode the result.
use bumpalo::{collections::Vec, Bump};
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_intern::Interner;
use roc_mono::layout::{Builtin, Layout, UnionLayout};
use roc_target::TargetInfo;
@ -36,13 +37,14 @@ pub trait Wasm32Result {
/// Layout-driven wrapper generation
pub fn insert_wrapper_for_layout<'a>(
arena: &'a Bump,
interner: &impl Interner<'a, Layout<'a>>,
module: &mut WasmModule<'a>,
wrapper_name: &'static str,
main_fn_index: u32,
layout: &Layout<'a>,
) {
let mut stack_data_structure = || {
let size = layout.stack_size(TargetInfo::default_wasm32());
let size = layout.stack_size(interner, TargetInfo::default_wasm32());
if size == 0 {
<() as Wasm32Result>::insert_wrapper(arena, module, wrapper_name, main_fn_index);
} else {

View file

@ -182,6 +182,14 @@ impl<'a, K: Hash + Eq> Interner<'a, K> for ThreadLocalInterner<'a, K> {
}
impl<'a, K> SingleThreadedInterner<'a, K> {
/// Creates a new single threaded interner with the given capacity.
pub fn with_capacity(cap: usize) -> Self {
Self {
map: BumpMap::with_capacity_and_hasher(cap, default_hasher()),
vec: Vec::with_capacity(cap),
}
}
/// Promotes the [SingleThreadedInterner] back to a [GlobalInterner].
///
/// You should *only* use this if you need to go from a single-threaded to a concurrent context,

View file

@ -34,7 +34,9 @@ use roc_mono::ir::{
CapturedSymbols, ExternalSpecializations, PartialProc, Proc, ProcLayout, Procs, ProcsBase,
UpdateModeIds,
};
use roc_mono::layout::{CapturesNiche, LambdaName, Layout, LayoutCache, LayoutProblem};
use roc_mono::layout::{
CapturesNiche, LambdaName, Layout, LayoutCache, LayoutProblem, STLayoutInterner,
};
use roc_parse::ast::{self, Defs, ExtractSpaces, Spaced, StrLiteral, TypeAnnotation};
use roc_parse::header::{ExposedName, ImportsEntry, PackageEntry, PlatformHeader, To, TypedIdent};
use roc_parse::header::{HeaderFor, ModuleNameEnum, PackageName};
@ -850,6 +852,9 @@ enum Msg<'a> {
/// all modules are now monomorphized, we are done
FinishedAllSpecialization {
subs: Subs,
/// The layout interner after all passes in mono are done.
/// DO NOT use the one on state; that is left in an empty state after specialization is complete!
layout_interner: STLayoutInterner<'a>,
exposed_to_host: ExposedToHost,
},
@ -1610,12 +1615,14 @@ fn state_thread_step<'a>(
}
Msg::FinishedAllSpecialization {
subs,
layout_interner,
exposed_to_host,
} => {
// We're done! There should be no more messages pending.
debug_assert!(msg_rx.is_empty());
let monomorphized = finish_specialization(state, subs, exposed_to_host)?;
let monomorphized =
finish_specialization(state, subs, layout_interner, exposed_to_host)?;
Ok(ControlFlow::Break(LoadResult::Monomorphized(monomorphized)))
}
@ -1991,12 +1998,12 @@ fn start_tasks<'a>(
}
macro_rules! debug_print_ir {
($state:expr, $flag:path) => {
($state:expr, $interner:expr, $flag:path) => {
dbg_do!($flag, {
let procs_string = $state
.procedures
.values()
.map(|proc| proc.to_pretty(200))
.map(|proc| proc.to_pretty($interner, 200))
.collect::<Vec<_>>();
let result = procs_string.join("\n");
@ -2690,9 +2697,18 @@ fn update<'a>(
}
}
let layout_interner = {
let mut taken = GlobalInterner::with_capacity(0);
std::mem::swap(&mut state.layout_interner, &mut taken);
taken
};
let layout_interner = layout_interner
.unwrap()
.expect("outstanding references to global layout interener, but we just drained all layout caches");
log!("specializations complete from {:?}", module_id);
debug_print_ir!(state, ROC_PRINT_IR_AFTER_SPECIALIZATION);
debug_print_ir!(state, &layout_interner, ROC_PRINT_IR_AFTER_SPECIALIZATION);
let ident_ids = state.constrained_ident_ids.get_mut(&module_id).unwrap();
@ -2704,17 +2720,18 @@ fn update<'a>(
&mut state.procedures,
);
debug_print_ir!(state, ROC_PRINT_IR_AFTER_RESET_REUSE);
debug_print_ir!(state, &layout_interner, ROC_PRINT_IR_AFTER_RESET_REUSE);
Proc::insert_refcount_operations(
arena,
&layout_interner,
module_id,
ident_ids,
&mut update_mode_ids,
&mut state.procedures,
);
debug_print_ir!(state, ROC_PRINT_IR_AFTER_REFCOUNT);
debug_print_ir!(state, &layout_interner, ROC_PRINT_IR_AFTER_REFCOUNT);
// This is not safe with the new non-recursive RC updates that we do for tag unions
//
@ -2732,7 +2749,7 @@ fn update<'a>(
msg_tx
.send(Msg::FinishedAllSpecialization {
subs,
// TODO thread through mono problems
layout_interner,
exposed_to_host: state.exposed_to_host.clone(),
})
.map_err(|_| LoadingProblem::MsgChannelDied)?;
@ -2855,11 +2872,12 @@ fn log_layout_stats(module_id: ModuleId, layout_cache: &LayoutCache) {
);
}
fn finish_specialization(
state: State,
fn finish_specialization<'a>(
state: State<'a>,
subs: Subs,
layout_interner: STLayoutInterner<'a>,
exposed_to_host: ExposedToHost,
) -> Result<MonomorphizedModule, LoadingProblem> {
) -> Result<MonomorphizedModule<'a>, LoadingProblem<'a>> {
if false {
println!(
"total Type clones: {} ",
@ -2895,7 +2913,6 @@ fn finish_specialization(
platform_path,
platform_data,
exec_mode,
layout_interner,
..
} = state;
@ -2907,9 +2924,6 @@ fn finish_specialization(
..
} = module_cache;
let layout_interner = GlobalInterner::unwrap(layout_interner)
.expect("Outstanding references to the global layout interner");
let sources: MutMap<ModuleId, (PathBuf, Box<str>)> = sources
.into_iter()
.map(|(id, (path, src))| (id, (path, src.into())))

View file

@ -629,7 +629,7 @@ fn eq_list<'a>(
// let size = literal int
let size = root.create_symbol(ident_ids, "size");
let size_expr = Expr::Literal(Literal::Int(
(elem_layout.stack_size(root.target_info) as i128).to_ne_bytes(),
(elem_layout.stack_size(root.layout_interner, root.target_info) as i128).to_ne_bytes(),
));
let size_stmt = |next| Stmt::Let(size, size_expr, layout_isize, next);

View file

@ -8,7 +8,7 @@ use crate::ir::{
Call, CallSpecId, CallType, Expr, HostExposedLayouts, JoinPointId, ModifyRc, Proc, ProcLayout,
SelfRecursive, Stmt, UpdateModeId,
};
use crate::layout::{Builtin, CapturesNiche, LambdaName, Layout, UnionLayout};
use crate::layout::{Builtin, CapturesNiche, LambdaName, Layout, STLayoutInterner, UnionLayout};
mod equality;
mod refcount;
@ -73,6 +73,7 @@ pub struct Context<'a> {
///
pub struct CodeGenHelp<'a> {
arena: &'a Bump,
layout_interner: &'a STLayoutInterner<'a>,
home: ModuleId,
target_info: TargetInfo,
layout_isize: Layout<'a>,
@ -82,7 +83,12 @@ pub struct CodeGenHelp<'a> {
}
impl<'a> CodeGenHelp<'a> {
pub fn new(arena: &'a Bump, target_info: TargetInfo, home: ModuleId) -> Self {
pub fn new(
arena: &'a Bump,
layout_interner: &'a STLayoutInterner<'a>,
target_info: TargetInfo,
home: ModuleId,
) -> Self {
let layout_isize = Layout::isize(target_info);
// Refcount is a boxed isize. TODO: use the new Box layout when dev backends support it
@ -90,6 +96,7 @@ impl<'a> CodeGenHelp<'a> {
CodeGenHelp {
arena,
layout_interner,
home,
target_info,
layout_isize,
@ -122,7 +129,7 @@ impl<'a> CodeGenHelp<'a> {
modify: &ModifyRc,
following: &'a Stmt<'a>,
) -> (&'a Stmt<'a>, Vec<'a, (Symbol, ProcLayout<'a>)>) {
if !refcount::is_rc_implemented_yet(&layout) {
if !refcount::is_rc_implemented_yet(self.layout_interner, &layout) {
// Just a warning, so we can decouple backend development from refcounting development.
// When we are closer to completion, we can change it to a panic.
println!(
@ -450,7 +457,7 @@ impl<'a> CodeGenHelp<'a> {
}
Layout::LambdaSet(lambda_set) => {
self.replace_rec_ptr(ctx, lambda_set.runtime_representation())
self.replace_rec_ptr(ctx, lambda_set.runtime_representation(self.layout_interner))
}
// This line is the whole point of the function

View file

@ -1,5 +1,6 @@
use bumpalo::collections::vec::Vec;
use roc_builtins::bitcode::IntWidth;
use roc_intern::Interner;
use roc_module::low_level::{LowLevel, LowLevel::*};
use roc_module::symbol::{IdentIds, Symbol};
use roc_target::PtrWidth;
@ -102,7 +103,7 @@ pub fn refcount_generic<'a>(
layout: Layout<'a>,
structure: Symbol,
) -> Stmt<'a> {
debug_assert!(is_rc_implemented_yet(&layout));
debug_assert!(is_rc_implemented_yet(root.layout_interner, &layout));
match layout {
Layout::Builtin(Builtin::Int(_) | Builtin::Float(_) | Builtin::Bool | Builtin::Decimal) => {
@ -121,7 +122,7 @@ pub fn refcount_generic<'a>(
refcount_union(root, ident_ids, ctx, union_layout, structure)
}
Layout::LambdaSet(lambda_set) => {
let runtime_layout = lambda_set.runtime_representation();
let runtime_layout = lambda_set.runtime_representation(root.layout_interner);
refcount_generic(root, ident_ids, ctx, runtime_layout, structure)
}
Layout::RecursivePointer => unreachable!(
@ -205,7 +206,8 @@ pub fn refcount_reset_proc_body<'a>(
let alloc_addr_stmt = {
let alignment = root.create_symbol(ident_ids, "alignment");
let alignment_expr = Expr::Literal(Literal::Int(
(layout.alignment_bytes(root.target_info) as i128).to_ne_bytes(),
(layout.alignment_bytes(root.layout_interner, root.target_info) as i128)
.to_ne_bytes(),
));
let alloc_addr = root.create_symbol(ident_ids, "alloc_addr");
let alloc_addr_expr = Expr::Call(Call {
@ -351,30 +353,37 @@ pub fn refcount_reset_proc_body<'a>(
// Check if refcounting is implemented yet. In the long term, this will be deleted.
// In the short term, it helps us to skip refcounting and let it leak, so we can make
// progress incrementally. Kept in sync with generate_procs using assertions.
pub fn is_rc_implemented_yet(layout: &Layout) -> bool {
pub fn is_rc_implemented_yet<'a, I>(interner: &I, layout: &Layout<'a>) -> bool
where
I: Interner<'a, Layout<'a>>,
{
use UnionLayout::*;
match layout {
Layout::Builtin(Builtin::List(elem_layout)) => is_rc_implemented_yet(elem_layout),
Layout::Builtin(Builtin::List(elem_layout)) => is_rc_implemented_yet(interner, elem_layout),
Layout::Builtin(_) => true,
Layout::Struct { field_layouts, .. } => field_layouts.iter().all(is_rc_implemented_yet),
Layout::Struct { field_layouts, .. } => field_layouts
.iter()
.all(|l| is_rc_implemented_yet(interner, l)),
Layout::Union(union_layout) => match union_layout {
NonRecursive(tags) => tags
.iter()
.all(|fields| fields.iter().all(is_rc_implemented_yet)),
.all(|fields| fields.iter().all(|l| is_rc_implemented_yet(interner, l))),
Recursive(tags) => tags
.iter()
.all(|fields| fields.iter().all(is_rc_implemented_yet)),
NonNullableUnwrapped(fields) => fields.iter().all(is_rc_implemented_yet),
.all(|fields| fields.iter().all(|l| is_rc_implemented_yet(interner, l))),
NonNullableUnwrapped(fields) => {
fields.iter().all(|l| is_rc_implemented_yet(interner, l))
}
NullableWrapped { other_tags, .. } => other_tags
.iter()
.all(|fields| fields.iter().all(is_rc_implemented_yet)),
NullableUnwrapped { other_fields, .. } => {
other_fields.iter().all(is_rc_implemented_yet)
}
.all(|fields| fields.iter().all(|l| is_rc_implemented_yet(interner, l))),
NullableUnwrapped { other_fields, .. } => other_fields
.iter()
.all(|l| is_rc_implemented_yet(interner, l)),
},
Layout::LambdaSet(lambda_set) => {
is_rc_implemented_yet(&lambda_set.runtime_representation())
is_rc_implemented_yet(interner, &lambda_set.runtime_representation(interner))
}
Layout::RecursivePointer => true,
Layout::Boxed(_) => true,
@ -734,7 +743,7 @@ fn refcount_list<'a>(
//
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment = layout.alignment_bytes(root.target_info);
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_list = modify_refcount(
@ -832,7 +841,7 @@ fn refcount_list_elems<'a>(
// let size = literal int
let elem_size = root.create_symbol(ident_ids, "elem_size");
let elem_size_expr = Expr::Literal(Literal::Int(
(elem_layout.stack_size(root.target_info) as i128).to_ne_bytes(),
(elem_layout.stack_size(root.layout_interner, root.target_info) as i128).to_ne_bytes(),
));
let elem_size_stmt = |next| Stmt::Let(elem_size, elem_size_expr, layout_isize, next);
@ -981,7 +990,7 @@ fn refcount_struct<'a>(
let mut stmt = rc_return_stmt(root, ident_ids, ctx);
for (i, field_layout) in field_layouts.iter().enumerate().rev() {
if field_layout.contains_refcounted() {
if field_layout.contains_refcounted(root.layout_interner) {
let field_val = root.create_symbol(ident_ids, &format!("field_val_{}", i));
let field_val_expr = Expr::StructAtIndex {
index: i as u64,
@ -1221,7 +1230,8 @@ fn refcount_union_rec<'a>(
let rc_structure_stmt = {
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment = Layout::Union(union_layout).alignment_bytes(root.target_info);
let alignment =
Layout::Union(union_layout).alignment_bytes(root.layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_structure_stmt = modify_refcount(
root,
@ -1329,7 +1339,7 @@ fn refcount_union_tailrec<'a>(
)
};
let alignment = layout.alignment_bytes(root.target_info);
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let modify_structure_stmt = modify_refcount(
root,
ident_ids,
@ -1487,7 +1497,7 @@ fn refcount_tag_fields<'a>(
let mut stmt = following;
for (i, field_layout) in field_layouts.iter().enumerate().rev() {
if field_layout.contains_refcounted() {
if field_layout.contains_refcounted(root.layout_interner) {
let field_val = root.create_symbol(ident_ids, &format!("field_{}_{}", tag_id, i));
let field_val_expr = Expr::UnionAtIndex {
union_layout,
@ -1534,7 +1544,7 @@ fn refcount_boxed<'a>(
//
let rc_ptr = root.create_symbol(ident_ids, "rc_ptr");
let alignment = layout.alignment_bytes(root.target_info);
let alignment = layout.alignment_bytes(root.layout_interner, root.target_info);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
let modify_outer = modify_refcount(
root,

View file

@ -3,7 +3,7 @@ use crate::ir::{
CallType, Expr, HigherOrderLowLevel, JoinPointId, ModifyRc, Param, Proc, ProcLayout, Stmt,
UpdateModeIds,
};
use crate::layout::Layout;
use crate::layout::{Layout, STLayoutInterner};
use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_collections::all::{MutMap, MutSet};
@ -238,8 +238,9 @@ pub type LiveVarSet = MutSet<Symbol>;
pub type JPLiveVarMap = MutMap<JoinPointId, LiveVarSet>;
#[derive(Clone, Debug)]
struct Context<'a> {
struct Context<'a, 'i> {
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
vars: VarMap,
jp_live_vars: JPLiveVarMap, // map: join point => live variables
param_map: &'a ParamMap<'a>,
@ -313,8 +314,12 @@ fn consume_expr(m: &VarMap, e: &Expr<'_>) -> bool {
}
}
impl<'a> Context<'a> {
pub fn new(arena: &'a Bump, param_map: &'a ParamMap<'a>) -> Self {
impl<'a, 'i> Context<'a, 'i> {
pub fn new(
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
param_map: &'a ParamMap<'a>,
) -> Self {
let mut vars = MutMap::default();
for symbol in param_map.iter_symbols() {
@ -331,6 +336,7 @@ impl<'a> Context<'a> {
Self {
arena,
layout_interner,
vars,
jp_live_vars: MutMap::default(),
param_map,
@ -532,7 +538,7 @@ impl<'a> Context<'a> {
}
#[allow(clippy::too_many_arguments)]
fn visit_call<'i>(
fn visit_call(
&self,
codegen: &mut CodegenTools<'i>,
z: Symbol,
@ -602,7 +608,7 @@ impl<'a> Context<'a> {
}
#[allow(clippy::too_many_arguments)]
fn visit_higher_order_lowlevel<'i>(
fn visit_higher_order_lowlevel(
&self,
codegen: &mut CodegenTools<'i>,
z: Symbol,
@ -831,7 +837,7 @@ impl<'a> Context<'a> {
}
#[allow(clippy::many_single_char_names)]
fn visit_variable_declaration<'i>(
fn visit_variable_declaration(
&self,
codegen: &mut CodegenTools<'i>,
z: Symbol,
@ -955,7 +961,7 @@ impl<'a> Context<'a> {
reset: bool,
) -> Self {
// should we perform incs and decs on this value?
let reference = layout.contains_refcounted();
let reference = layout.contains_refcounted(self.layout_interner);
let info = VarInfo {
reference,
@ -976,7 +982,7 @@ impl<'a> Context<'a> {
for p in ps.iter() {
let info = VarInfo {
reference: p.layout.contains_refcounted(),
reference: p.layout.contains_refcounted(self.layout_interner),
consume: !p.borrow,
persistent: false,
reset: false,
@ -999,7 +1005,10 @@ impl<'a> Context<'a> {
b_live_vars: &LiveVarSet,
) -> &'a Stmt<'a> {
for p in ps.iter() {
if !p.borrow && p.layout.contains_refcounted() && !b_live_vars.contains(&p.symbol) {
if !p.borrow
&& p.layout.contains_refcounted(self.layout_interner)
&& !b_live_vars.contains(&p.symbol)
{
b = self.add_dec(p.symbol, b)
}
}
@ -1022,7 +1031,7 @@ impl<'a> Context<'a> {
b
}
fn visit_stmt<'i>(
fn visit_stmt(
&self,
codegen: &mut CodegenTools<'i>,
stmt: &'a Stmt<'a>,
@ -1425,13 +1434,14 @@ struct CodegenTools<'i> {
pub fn visit_procs<'a, 'i>(
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
home: ModuleId,
ident_ids: &'i mut IdentIds,
update_mode_ids: &'i mut UpdateModeIds,
param_map: &'a ParamMap<'a>,
procs: &mut MutMap<(Symbol, ProcLayout<'a>), Proc<'a>>,
) {
let ctx = Context::new(arena, param_map);
let ctx = Context::new(arena, layout_interner, param_map);
let mut codegen = CodegenTools {
home,
@ -1448,7 +1458,7 @@ fn visit_proc<'a, 'i>(
arena: &'a Bump,
codegen: &mut CodegenTools<'i>,
param_map: &'a ParamMap<'a>,
ctx: &Context<'a>,
ctx: &Context<'a, 'i>,
proc: &mut Proc<'a>,
layout: ProcLayout<'a>,
) {

View file

@ -2,8 +2,8 @@
use crate::layout::{
self, Builtin, CapturesNiche, ClosureCallOptions, ClosureRepresentation, EnumDispatch,
LambdaName, LambdaSet, Layout, LayoutCache, LayoutProblem, RawFunctionLayout, TagIdIntType,
UnionLayout, WrappedVariant,
LambdaName, LambdaSet, Layout, LayoutCache, LayoutProblem, RawFunctionLayout, STLayoutInterner,
TagIdIntType, UnionLayout, WrappedVariant,
};
use bumpalo::collections::{CollectIn, Vec};
use bumpalo::Bump;
@ -22,6 +22,7 @@ use roc_debug_flags::{
use roc_derive::SharedDerivedModule;
use roc_error_macros::{internal_error, todo_abilities};
use roc_exhaustive::{Ctor, CtorName, RenderAs, TagId};
use roc_intern::Interner;
use roc_late_solve::{resolve_ability_specialization, AbilitiesView, Resolved, UnificationFailed};
use roc_module::ident::{ForeignSymbol, Lowercase, TagName};
use roc_module::low_level::LowLevel;
@ -338,18 +339,26 @@ pub enum Parens {
}
impl<'a> Proc<'a> {
pub fn to_doc<'b, D, A>(&'b self, alloc: &'b D, _parens: Parens) -> DocBuilder<'b, D, A>
pub fn to_doc<'b, D, A, I>(
&'b self,
alloc: &'b D,
interner: &'b I,
_parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
let args_doc = self.args.iter().map(|(layout, symbol)| {
let arg_doc = symbol_to_doc(alloc, *symbol);
if pretty_print_ir_symbols() {
arg_doc
.append(alloc.reflow(": "))
.append(layout.to_doc(alloc, Parens::NotNeeded))
arg_doc.append(alloc.reflow(": ")).append(layout.to_doc(
alloc,
interner,
Parens::NotNeeded,
))
} else {
arg_doc
}
@ -360,7 +369,7 @@ impl<'a> Proc<'a> {
.text("procedure : ")
.append(symbol_to_doc(alloc, self.name.name()))
.append(" ")
.append(self.ret_layout.to_doc(alloc, Parens::NotNeeded))
.append(self.ret_layout.to_doc(alloc, interner, Parens::NotNeeded))
.append(alloc.hardline())
.append(alloc.text("procedure = "))
.append(symbol_to_doc(alloc, self.name.name()))
@ -368,7 +377,7 @@ impl<'a> Proc<'a> {
.append(alloc.intersperse(args_doc, ", "))
.append("):")
.append(alloc.hardline())
.append(self.body.to_doc(alloc).indent(4))
.append(self.body.to_doc(alloc, interner).indent(4))
} else {
alloc
.text("procedure ")
@ -377,14 +386,17 @@ impl<'a> Proc<'a> {
.append(alloc.intersperse(args_doc, ", "))
.append("):")
.append(alloc.hardline())
.append(self.body.to_doc(alloc).indent(4))
.append(self.body.to_doc(alloc, interner).indent(4))
}
}
pub fn to_pretty(&self, width: usize) -> String {
pub fn to_pretty<I>(&self, interner: &I, width: usize) -> String
where
I: Interner<'a, Layout<'a>>,
{
let allocator = BoxAllocator;
let mut w = std::vec::Vec::new();
self.to_doc::<_, ()>(&allocator, Parens::NotNeeded)
self.to_doc::<_, (), _>(&allocator, interner, Parens::NotNeeded)
.1
.render(width, &mut w)
.unwrap();
@ -394,6 +406,7 @@ impl<'a> Proc<'a> {
pub fn insert_refcount_operations<'i>(
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
home: ModuleId,
ident_ids: &'i mut IdentIds,
update_mode_ids: &'i mut UpdateModeIds,
@ -403,6 +416,7 @@ impl<'a> Proc<'a> {
crate::inc_dec::visit_procs(
arena,
layout_interner,
home,
ident_ids,
update_mode_ids,
@ -2081,11 +2095,12 @@ impl<'a> Stmt<'a> {
from_can(env, var, can_expr, procs, layout_cache)
}
pub fn to_doc<'b, D, A>(&'b self, alloc: &'b D) -> DocBuilder<'b, D, A>
pub fn to_doc<'b, D, A, I>(&'b self, alloc: &'b D, interner: &I) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use Stmt::*;
@ -2094,17 +2109,17 @@ impl<'a> Stmt<'a> {
.text("let ")
.append(symbol_to_doc(alloc, *symbol))
.append(" : ")
.append(layout.to_doc(alloc, Parens::NotNeeded))
.append(layout.to_doc(alloc, interner, Parens::NotNeeded))
.append(" = ")
.append(expr.to_doc(alloc))
.append(";")
.append(alloc.hardline())
.append(cont.to_doc(alloc)),
.append(cont.to_doc(alloc, interner)),
Refcounting(modify, cont) => modify
.to_doc(alloc)
.append(alloc.hardline())
.append(cont.to_doc(alloc)),
.append(cont.to_doc(alloc, interner)),
Expect {
condition,
@ -2115,7 +2130,7 @@ impl<'a> Stmt<'a> {
.append(symbol_to_doc(alloc, *condition))
.append(";")
.append(alloc.hardline())
.append(remainder.to_doc(alloc)),
.append(remainder.to_doc(alloc, interner)),
ExpectFx {
condition,
@ -2126,7 +2141,7 @@ impl<'a> Stmt<'a> {
.append(symbol_to_doc(alloc, *condition))
.append(";")
.append(alloc.hardline())
.append(remainder.to_doc(alloc)),
.append(remainder.to_doc(alloc, interner)),
Ret(symbol) => alloc
.text("ret ")
@ -2148,19 +2163,19 @@ impl<'a> Stmt<'a> {
.append(" then")
.append(info.to_doc(alloc))
.append(alloc.hardline())
.append(pass.to_doc(alloc).indent(4))
.append(pass.to_doc(alloc, interner).indent(4))
.append(alloc.hardline())
.append(alloc.text("else"))
.append(default_branch.0.to_doc(alloc))
.append(alloc.hardline())
.append(fail.to_doc(alloc).indent(4))
.append(fail.to_doc(alloc, interner).indent(4))
}
_ => {
let default_doc = alloc
.text("default:")
.append(alloc.hardline())
.append(default_branch.1.to_doc(alloc).indent(4))
.append(default_branch.1.to_doc(alloc, interner).indent(4))
.indent(4);
let branches_docs = branches
@ -2169,7 +2184,7 @@ impl<'a> Stmt<'a> {
alloc
.text(format!("case {}:", tag))
.append(alloc.hardline())
.append(expr.to_doc(alloc).indent(4))
.append(expr.to_doc(alloc, interner).indent(4))
.indent(4)
})
.chain(std::iter::once(default_doc));
@ -2206,9 +2221,9 @@ impl<'a> Stmt<'a> {
.append(" ".repeat(parameters.len().min(1)))
.append(alloc.intersperse(it, alloc.space()))
.append(":"),
continuation.to_doc(alloc).indent(4),
continuation.to_doc(alloc, interner).indent(4),
alloc.text("in"),
remainder.to_doc(alloc),
remainder.to_doc(alloc, interner),
],
alloc.hardline(),
)
@ -2226,10 +2241,13 @@ impl<'a> Stmt<'a> {
}
}
pub fn to_pretty(&self, width: usize) -> String {
pub fn to_pretty<I>(&self, interner: &I, width: usize) -> String
where
I: Interner<'a, Layout<'a>>,
{
let allocator = BoxAllocator;
let mut w = std::vec::Vec::new();
self.to_doc::<_, ()>(&allocator)
self.to_doc::<_, (), _>(&allocator, interner)
.1
.render(width, &mut w)
.unwrap();
@ -3311,8 +3329,10 @@ fn specialize_proc_help<'a>(
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
let size1 =
layout1.alignment_bytes(&layout_cache.interner, ptr_bytes);
let size2 =
layout2.alignment_bytes(&layout_cache.interner, ptr_bytes);
size2.cmp(&size1)
});
@ -3351,8 +3371,10 @@ fn specialize_proc_help<'a>(
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
let size1 =
layout1.alignment_bytes(&layout_cache.interner, ptr_bytes);
let size2 =
layout2.alignment_bytes(&layout_cache.interner, ptr_bytes);
size2.cmp(&size1)
});
@ -4595,6 +4617,7 @@ pub fn with_hole<'a>(
find_lambda_name(env, layout_cache, lambda_set, name, &[]);
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
&[],
@ -4647,6 +4670,7 @@ pub fn with_hole<'a>(
find_lambda_name(env, layout_cache, lambda_set, name, &[]);
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
&[],
@ -4891,6 +4915,7 @@ pub fn with_hole<'a>(
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
symbols.iter().copied(),
@ -5438,12 +5463,17 @@ where
.expect("layout problem for capture")
})
.collect_in::<Vec<_>>(env.arena);
lambda_set.find_lambda_name(function_name, &this_function_captures_layouts)
lambda_set.find_lambda_name(
&layout_cache.interner,
function_name,
&this_function_captures_layouts,
)
}
#[allow(clippy::too_many_arguments)]
fn construct_closure_data<'a, I>(
env: &mut Env<'a, '_>,
layout_cache: &LayoutCache<'a>,
lambda_set: LambdaSet<'a>,
name: LambdaName<'a>,
symbols: I,
@ -5474,8 +5504,8 @@ where
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
let size1 = layout1.alignment_bytes(&layout_cache.interner, ptr_bytes);
let size2 = layout2.alignment_bytes(&layout_cache.interner, ptr_bytes);
size2.cmp(&size1)
});
@ -5504,8 +5534,8 @@ where
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
let size1 = layout1.alignment_bytes(&layout_cache.interner, ptr_bytes);
let size2 = layout2.alignment_bytes(&layout_cache.interner, ptr_bytes);
size2.cmp(&size1)
});
@ -5517,7 +5547,7 @@ where
debug_assert_eq!(
Layout::struct_no_name_order(field_layouts),
lambda_set.runtime_representation()
lambda_set.runtime_representation(&layout_cache.interner)
);
let expr = Expr::Struct(symbols);
@ -5885,7 +5915,15 @@ fn tag_union_to_function<'a>(
let lambda_name =
find_lambda_name(env, layout_cache, lambda_set, proc_symbol, &[]);
debug_assert!(lambda_name.no_captures());
construct_closure_data(env, lambda_set, lambda_name, &[], assigned, hole)
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
&[],
assigned,
hole,
)
}
RawFunctionLayout::ZeroArgumentThunk(_) => unreachable!(),
}
@ -5932,7 +5970,7 @@ fn sorted_field_symbols<'a>(
}
};
let alignment = layout.alignment_bytes(env.target_info);
let alignment = layout.alignment_bytes(&layout_cache.interner, env.target_info);
let symbol = possible_reuse_symbol_or_specialize(env, procs, layout_cache, &arg.value, var);
field_symbols_temp.push((alignment, symbol, ((var, arg), &*env.arena.alloc(symbol))));
@ -7616,6 +7654,7 @@ fn specialize_symbol<'a>(
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
symbols.iter().copied(),
@ -7665,6 +7704,7 @@ fn specialize_symbol<'a>(
construct_closure_data(
env,
layout_cache,
lambda_set,
lambda_name,
&[],
@ -8065,7 +8105,15 @@ fn call_by_name_help<'a>(
let captured = &[];
debug_assert!(proc_name.no_captures());
construct_closure_data(env, lambda_set, proc_name, captured, assigned, hole)
construct_closure_data(
env,
layout_cache,
lambda_set,
proc_name,
captured,
assigned,
hole,
)
} else {
debug_assert_eq!(
argument_layouts.len(),
@ -8450,6 +8498,7 @@ fn call_specialized_proc<'a>(
let result = construct_closure_data(
env,
layout_cache,
lambda_set,
proc_name,
symbols.iter().copied(),
@ -8722,12 +8771,12 @@ fn from_can_pattern_help<'a>(
arguments.sort_by(|arg1, arg2| {
let size1 = layout_cache
.from_var(env.arena, arg1.0, env.subs)
.map(|x| x.alignment_bytes(env.target_info))
.map(|x| x.alignment_bytes(&layout_cache.interner, env.target_info))
.unwrap_or(0);
let size2 = layout_cache
.from_var(env.arena, arg2.0, env.subs)
.map(|x| x.alignment_bytes(env.target_info))
.map(|x| x.alignment_bytes(&layout_cache.interner, env.target_info))
.unwrap_or(0);
size2.cmp(&size1)
@ -8766,8 +8815,10 @@ fn from_can_pattern_help<'a>(
let layout2 =
layout_cache.from_var(env.arena, arg2.0, env.subs).unwrap();
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 =
layout1.alignment_bytes(&layout_cache.interner, env.target_info);
let size2 =
layout2.alignment_bytes(&layout_cache.interner, env.target_info);
size2.cmp(&size1)
});

View file

@ -106,7 +106,7 @@ pub struct LayoutCache<'a> {
cache: std::vec::Vec<CacheLayer<LayoutResult<'a>>>,
raw_function_cache: std::vec::Vec<CacheLayer<RawFunctionLayoutResult<'a>>>,
pub(crate) interner: LayoutInterner<'a>,
pub interner: LayoutInterner<'a>,
/// Statistics on the usage of the layout cache.
#[cfg(debug_assertions)]
@ -720,21 +720,31 @@ pub enum UnionLayout<'a> {
}
impl<'a> UnionLayout<'a> {
pub fn to_doc<D, A>(self, alloc: &'a D, _parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
_parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use UnionLayout::*;
match self {
NonRecursive(tags) => {
let tags_doc = tags.iter().map(|fields| {
alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
))
),
)
});
alloc
@ -744,10 +754,14 @@ impl<'a> UnionLayout<'a> {
}
Recursive(tags) => {
let tags_doc = tags.iter().map(|fields| {
alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
))
),
)
});
alloc
.text("[<r>")
@ -755,10 +769,14 @@ impl<'a> UnionLayout<'a> {
.append(alloc.text("]"))
}
NonNullableUnwrapped(fields) => {
let fields_doc = alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
let fields_doc = alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
));
),
);
alloc
.text("[<rnnu>")
.append(fields_doc)
@ -772,7 +790,7 @@ impl<'a> UnionLayout<'a> {
alloc.intersperse(
other_fields
.iter()
.map(|x| x.to_doc(alloc, Parens::InTypeParam)),
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
),
);
@ -927,27 +945,39 @@ impl<'a> UnionLayout<'a> {
}
}
fn tags_alignment_bytes(tags: &[&[Layout]], target_info: TargetInfo) -> u32 {
fn tags_alignment_bytes<I>(
interner: &I,
tags: &[&'a [Layout<'a>]],
target_info: TargetInfo,
) -> u32
where
I: Interner<'a, Layout<'a>>,
{
tags.iter()
.map(|field_layouts| {
Layout::struct_no_name_order(field_layouts).alignment_bytes(target_info)
Layout::struct_no_name_order(field_layouts).alignment_bytes(interner, target_info)
})
.max()
.unwrap_or(0)
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let allocation = match self {
UnionLayout::NonRecursive(tags) => Self::tags_alignment_bytes(tags, target_info),
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(tags, target_info),
UnionLayout::NonRecursive(tags) => {
Self::tags_alignment_bytes(interner, tags, target_info)
}
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags, target_info),
UnionLayout::NonNullableUnwrapped(field_layouts) => {
Layout::struct_no_name_order(field_layouts).alignment_bytes(target_info)
Layout::struct_no_name_order(field_layouts).alignment_bytes(interner, target_info)
}
UnionLayout::NullableWrapped { other_tags, .. } => {
Self::tags_alignment_bytes(other_tags, target_info)
Self::tags_alignment_bytes(interner, other_tags, target_info)
}
UnionLayout::NullableUnwrapped { other_fields, .. } => {
Layout::struct_no_name_order(other_fields).alignment_bytes(target_info)
Layout::struct_no_name_order(other_fields).alignment_bytes(interner, target_info)
}
};
@ -956,8 +986,12 @@ impl<'a> UnionLayout<'a> {
}
/// Size of the data in memory, whether it's stack or heap (for non-null tag ids)
pub fn data_size_and_alignment(&self, target_info: TargetInfo) -> (u32, u32) {
let (data_width, data_align) = self.data_size_and_alignment_help_match(target_info);
pub fn data_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let (data_width, data_align) =
self.data_size_and_alignment_help_match(interner, target_info);
if self.stores_tag_id_as_data(target_info) {
use Discriminant::*;
@ -985,53 +1019,83 @@ impl<'a> UnionLayout<'a> {
/// Size of the data before the tag_id, if it exists.
/// Returns None if the tag_id is not stored as data in the layout.
pub fn data_size_without_tag_id(&self, target_info: TargetInfo) -> Option<u32> {
pub fn data_size_without_tag_id<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
where
I: Interner<'a, Layout<'a>>,
{
if !self.stores_tag_id_as_data(target_info) {
return None;
};
Some(self.data_size_and_alignment_help_match(target_info).0)
Some(
self.data_size_and_alignment_help_match(interner, target_info)
.0,
)
}
fn data_size_and_alignment_help_match(&self, target_info: TargetInfo) -> (u32, u32) {
fn data_size_and_alignment_help_match<I>(
&self,
interner: &I,
target_info: TargetInfo,
) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
match self {
Self::NonRecursive(tags) => Layout::stack_size_and_alignment_slices(tags, target_info),
Self::Recursive(tags) => Layout::stack_size_and_alignment_slices(tags, target_info),
Self::NonRecursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::Recursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::NonNullableUnwrapped(fields) => {
Layout::stack_size_and_alignment_slices(&[fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[fields], target_info)
}
Self::NullableWrapped { other_tags, .. } => {
Layout::stack_size_and_alignment_slices(other_tags, target_info)
Layout::stack_size_and_alignment_slices(interner, other_tags, target_info)
}
Self::NullableUnwrapped { other_fields, .. } => {
Layout::stack_size_and_alignment_slices(&[other_fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[other_fields], target_info)
}
}
}
pub fn tag_id_offset(&self, target_info: TargetInfo) -> Option<u32> {
pub fn tag_id_offset<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
where
I: Interner<'a, Layout<'a>>,
{
match self {
UnionLayout::NonRecursive(tags)
| UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => Some(Self::tag_id_offset_help(tags, target_info)),
} => Some(Self::tag_id_offset_help(interner, tags, target_info)),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => None,
}
}
fn tag_id_offset_help(layouts: &[&[Layout]], target_info: TargetInfo) -> u32 {
fn tag_id_offset_help<I>(
interner: &I,
layouts: &[&[Layout<'a>]],
target_info: TargetInfo,
) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(layouts, target_info);
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
round_up_to_alignment(data_width, data_align)
}
/// Very important to use this when doing a memcpy!
fn stack_size_without_alignment(&self, target_info: TargetInfo) -> u32 {
fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
match self {
UnionLayout::NonRecursive(_) => {
let (width, align) = self.data_size_and_alignment(target_info);
let (width, align) = self.data_size_and_alignment(interner, target_info);
round_up_to_alignment(width, align)
}
UnionLayout::Recursive(_)
@ -1238,7 +1302,10 @@ pub enum ClosureCallOptions<'a> {
}
impl<'a> LambdaSet<'a> {
pub fn runtime_representation(&self) -> Layout<'a> {
pub fn runtime_representation<I>(&self, _interner: &I) -> Layout<'a>
where
I: Interner<'a, Layout<'a>>,
{
*self.representation
}
@ -1300,19 +1367,24 @@ impl<'a> LambdaSet<'a> {
}
/// Finds an alias name for a possible-multimorphic lambda variant in the lambda set.
pub fn find_lambda_name(
pub fn find_lambda_name<I>(
&self,
interner: &I,
function_symbol: Symbol,
captures_layouts: &[Layout],
) -> LambdaName<'a> {
) -> LambdaName<'a>
where
I: Interner<'a, Layout<'a>>,
{
debug_assert!(self.contains(function_symbol), "function symbol not in set");
let comparator = |other_name: Symbol, other_captures_layouts: &[Layout]| {
other_name == function_symbol
&& other_captures_layouts
.iter()
.zip(captures_layouts)
.all(|(other_layout, layout)| self.capture_layouts_eq(other_layout, layout))
&& other_captures_layouts.iter().zip(captures_layouts).all(
|(other_layout, layout)| {
self.capture_layouts_eq(interner, other_layout, layout)
},
)
};
let (name, layouts) = self
@ -1336,13 +1408,16 @@ impl<'a> LambdaSet<'a> {
/// Checks if two captured layouts are equivalent under the current lambda set.
/// Resolves recursive pointers to the layout of the lambda set.
fn capture_layouts_eq(&self, left: &Layout, right: &Layout) -> bool {
fn capture_layouts_eq<I>(&self, interner: &I, left: &Layout, right: &Layout) -> bool
where
I: Interner<'a, Layout<'a>>,
{
if left == right {
return true;
}
let left = if left == &Layout::RecursivePointer {
let runtime_repr = self.runtime_representation();
let runtime_repr = self.runtime_representation(interner);
debug_assert!(matches!(
runtime_repr,
Layout::Union(UnionLayout::Recursive(_) | UnionLayout::NullableUnwrapped { .. })
@ -1353,7 +1428,7 @@ impl<'a> LambdaSet<'a> {
};
let right = if right == &Layout::RecursivePointer {
let runtime_repr = self.runtime_representation();
let runtime_repr = self.runtime_representation(interner);
debug_assert!(matches!(
runtime_repr,
Layout::Union(UnionLayout::Recursive(_) | UnionLayout::NullableUnwrapped { .. })
@ -1706,18 +1781,30 @@ impl<'a> LambdaSet<'a> {
}
}
pub fn stack_size(&self, target_info: TargetInfo) -> u32 {
self.representation.stack_size(target_info)
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
self.representation.stack_size(interner, target_info)
}
pub fn contains_refcounted(&self) -> bool {
self.representation.contains_refcounted()
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
self.representation.contains_refcounted(interner)
}
pub fn safe_to_memcpy(&self) -> bool {
self.representation.safe_to_memcpy()
pub fn safe_to_memcpy<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
self.representation.safe_to_memcpy(interner)
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
self.representation.alignment_bytes(target_info)
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
self.representation.alignment_bytes(interner, target_info)
}
}
@ -2199,21 +2286,26 @@ impl<'a> Layout<'a> {
})
}
pub fn safe_to_memcpy(&self) -> bool {
pub fn safe_to_memcpy<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.safe_to_memcpy(),
Struct { field_layouts, .. } => field_layouts
.iter()
.all(|field_layout| field_layout.safe_to_memcpy()),
.all(|field_layout| field_layout.safe_to_memcpy(interner)),
Union(variant) => {
use UnionLayout::*;
match variant {
NonRecursive(tags) => tags
NonRecursive(tags) => tags.iter().all(|tag_layout| {
tag_layout
.iter()
.all(|tag_layout| tag_layout.iter().all(|field| field.safe_to_memcpy())),
.all(|field| field.safe_to_memcpy(interner))
}),
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
@ -2223,7 +2315,9 @@ impl<'a> Layout<'a> {
}
}
}
LambdaSet(lambda_set) => lambda_set.runtime_representation().safe_to_memcpy(),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.safe_to_memcpy(interner),
Boxed(_) | RecursivePointer => {
// We cannot memcpy pointers, because then we would have the same pointer in multiple places!
false
@ -2273,7 +2367,10 @@ impl<'a> Layout<'a> {
}
}
pub fn is_passed_by_reference(&self, target_info: TargetInfo) -> bool {
pub fn is_passed_by_reference<I>(&self, interner: &I, target_info: TargetInfo) -> bool
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::Builtin(builtin) => {
use Builtin::*;
@ -2291,29 +2388,38 @@ impl<'a> Layout<'a> {
}
Layout::Union(UnionLayout::NonRecursive(_)) => true,
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.is_passed_by_reference(target_info),
.runtime_representation(interner)
.is_passed_by_reference(interner, target_info),
_ => false,
}
}
pub fn stack_size(&self, target_info: TargetInfo) -> u32 {
let width = self.stack_size_without_alignment(target_info);
let alignment = self.alignment_bytes(target_info);
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
round_up_to_alignment(width, alignment)
}
pub fn stack_size_and_alignment(&self, target_info: TargetInfo) -> (u32, u32) {
let width = self.stack_size_without_alignment(target_info);
let alignment = self.alignment_bytes(target_info);
pub fn stack_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let size = round_up_to_alignment(width, alignment);
(size, alignment)
}
/// Very important to use this when doing a memcpy!
pub fn stack_size_without_alignment(&self, target_info: TargetInfo) -> u32 {
pub fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
@ -2322,25 +2428,28 @@ impl<'a> Layout<'a> {
let mut sum = 0;
for field_layout in *field_layouts {
sum += field_layout.stack_size(target_info);
sum += field_layout.stack_size(interner, target_info);
}
sum
}
Union(variant) => variant.stack_size_without_alignment(target_info),
Union(variant) => variant.stack_size_without_alignment(interner, target_info),
LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.stack_size_without_alignment(target_info),
.runtime_representation(interner)
.stack_size_without_alignment(interner, target_info),
RecursivePointer => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
}
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::Struct { field_layouts, .. } => field_layouts
.iter()
.map(|x| x.alignment_bytes(target_info))
.map(|x| x.alignment_bytes(interner, target_info))
.max()
.unwrap_or(0),
@ -2354,7 +2463,7 @@ impl<'a> Layout<'a> {
.flat_map(|layouts| {
layouts
.iter()
.map(|layout| layout.alignment_bytes(target_info))
.map(|layout| layout.alignment_bytes(interner, target_info))
})
.max();
@ -2377,40 +2486,50 @@ impl<'a> Layout<'a> {
}
}
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.alignment_bytes(target_info),
.runtime_representation(interner)
.alignment_bytes(interner, target_info),
Layout::Builtin(builtin) => builtin.alignment_bytes(target_info),
Layout::RecursivePointer => target_info.ptr_width() as u32,
Layout::Boxed(_) => target_info.ptr_width() as u32,
}
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let ptr_width = target_info.ptr_width() as u32;
match self {
Layout::Builtin(builtin) => builtin.allocation_alignment_bytes(target_info),
Layout::Struct { .. } => self.alignment_bytes(target_info).max(ptr_width),
Layout::Union(union_layout) => union_layout.allocation_alignment_bytes(target_info),
Layout::Builtin(builtin) => builtin.allocation_alignment_bytes(interner, target_info),
Layout::Struct { .. } => self.alignment_bytes(interner, target_info).max(ptr_width),
Layout::Union(union_layout) => {
union_layout.allocation_alignment_bytes(interner, target_info)
}
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.allocation_alignment_bytes(target_info),
.runtime_representation(interner)
.allocation_alignment_bytes(interner, target_info),
Layout::RecursivePointer => unreachable!("should be looked up to get an actual layout"),
Layout::Boxed(inner) => inner.allocation_alignment_bytes(target_info),
Layout::Boxed(inner) => inner.allocation_alignment_bytes(interner, target_info),
}
}
pub fn stack_size_and_alignment_slices(
pub fn stack_size_and_alignment_slices<I>(
interner: &I,
slices: &[&[Self]],
target_info: TargetInfo,
) -> (u32, u32) {
) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let mut data_align = 1;
let mut data_width = 0;
for tag in slices {
let mut total = 0;
for layout in tag.iter() {
let (stack_size, alignment) = layout.stack_size_and_alignment(target_info);
let (stack_size, alignment) =
layout.stack_size_and_alignment(interner, target_info);
total += stack_size;
data_align = data_align.max(alignment);
}
@ -2443,12 +2562,17 @@ impl<'a> Layout<'a> {
/// Even if a value (say, a record) is not itself reference counted,
/// it may contains values/fields that are. Therefore when this record
/// goes out of scope, the refcount on those values/fields must be decremented.
pub fn contains_refcounted(&self) -> bool {
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.is_refcounted(),
Struct { field_layouts, .. } => field_layouts.iter().any(|f| f.contains_refcounted()),
Struct { field_layouts, .. } => field_layouts
.iter()
.any(|f| f.contains_refcounted(interner)),
Union(variant) => {
use UnionLayout::*;
@ -2456,43 +2580,55 @@ impl<'a> Layout<'a> {
NonRecursive(fields) => fields
.iter()
.flat_map(|ls| ls.iter())
.any(|f| f.contains_refcounted()),
.any(|f| f.contains_refcounted(interner)),
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => true,
}
}
LambdaSet(lambda_set) => lambda_set.runtime_representation().contains_refcounted(),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.contains_refcounted(interner),
RecursivePointer => true,
Boxed(_) => true,
}
}
pub fn to_doc<D, A>(self, alloc: &'a D, parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.to_doc(alloc, parens),
Builtin(builtin) => builtin.to_doc(alloc, interner, parens),
Struct { field_layouts, .. } => {
let fields_doc = field_layouts.iter().map(|x| x.to_doc(alloc, parens));
let fields_doc = field_layouts
.iter()
.map(|x| x.to_doc(alloc, interner, parens));
alloc
.text("{")
.append(alloc.intersperse(fields_doc, ", "))
.append(alloc.text("}"))
}
Union(union_layout) => union_layout.to_doc(alloc, parens),
LambdaSet(lambda_set) => lambda_set.runtime_representation().to_doc(alloc, parens),
Union(union_layout) => union_layout.to_doc(alloc, interner, parens),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.to_doc(alloc, interner, parens),
RecursivePointer => alloc.text("*self"),
Boxed(inner) => alloc
.text("Boxed(")
.append(inner.to_doc(alloc, parens))
.append(inner.to_doc(alloc, interner, parens))
.append(")"),
}
}
@ -2509,9 +2645,12 @@ impl<'a> Layout<'a> {
}
}
pub fn runtime_representation(&self) -> Self {
pub fn runtime_representation<I>(&self, interner: &I) -> Self
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::LambdaSet(lambda_set) => lambda_set.runtime_representation(),
Layout::LambdaSet(lambda_set) => lambda_set.runtime_representation(interner),
other => *other,
}
}
@ -2700,11 +2839,17 @@ impl<'a> Builtin<'a> {
}
}
pub fn to_doc<D, A>(self, alloc: &'a D, _parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
_parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use Builtin::*;
@ -2740,18 +2885,23 @@ impl<'a> Builtin<'a> {
Decimal => alloc.text("Decimal"),
Str => alloc.text("Str"),
List(layout) => alloc
List(layout) => {
alloc
.text("List ")
.append(layout.to_doc(alloc, Parens::InTypeParam)),
.append(layout.to_doc(alloc, interner, Parens::InTypeParam))
}
}
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let ptr_width = target_info.ptr_width() as u32;
let allocation = match self {
Builtin::Str => ptr_width,
Builtin::List(e) => e.alignment_bytes(target_info).max(ptr_width),
Builtin::List(e) => e.alignment_bytes(interner, target_info).max(ptr_width),
// The following are usually not heap-allocated, but they might be when inside a Box.
Builtin::Int(int_width) => int_width.alignment_bytes(target_info).max(ptr_width),
Builtin::Float(float_width) => float_width.alignment_bytes(target_info).max(ptr_width),
@ -2947,7 +3097,14 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(label1, layout1), (label2, layout2)| {
cmp_fields(label1, layout1, label2, layout2, target_info)
cmp_fields(
&env.cache.interner,
label1,
layout1,
label2,
layout2,
target_info,
)
});
let ordered_field_names =
@ -3044,9 +3201,14 @@ fn sort_record_fields_help<'a>(
sorted_fields.sort_by(
|(label1, _, res_layout1), (label2, _, res_layout2)| match res_layout1 {
Ok(layout1) | Err(layout1) => match res_layout2 {
Ok(layout2) | Err(layout2) => {
cmp_fields(label1, layout1, label2, layout2, target_info)
}
Ok(layout2) | Err(layout2) => cmp_fields(
&env.cache.interner,
label1,
layout1,
label2,
layout2,
target_info,
),
},
},
);
@ -3323,8 +3485,8 @@ where
}
layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3373,8 +3535,8 @@ where
}
arg_layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3477,8 +3639,8 @@ where
}
layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3569,8 +3731,8 @@ where
}
arg_layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3786,8 +3948,8 @@ where
}
tag_layout.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(target_info);
let size2 = layout2.alignment_bytes(target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, target_info);
size2.cmp(&size1)
});
@ -4086,15 +4248,19 @@ impl<'a> LayoutIds<'a> {
/// This is called by both code gen and glue, so that
/// their field orderings agree.
#[inline(always)]
pub fn cmp_fields<L: Ord>(
pub fn cmp_fields<'a, L: Ord, I>(
interner: &I,
label1: &L,
layout1: &Layout<'_>,
layout1: &Layout<'a>,
label2: &L,
layout2: &Layout<'_>,
layout2: &Layout<'a>,
target_info: TargetInfo,
) -> Ordering {
let size1 = layout1.alignment_bytes(target_info);
let size2 = layout2.alignment_bytes(target_info);
) -> Ordering
where
I: Interner<'a, Layout<'a>>,
{
let size1 = layout1.alignment_bytes(interner, target_info);
let size2 = layout2.alignment_bytes(interner, target_info);
size2.cmp(&size1).then(label1.cmp(label2))
}
@ -4105,6 +4271,8 @@ mod test {
#[test]
fn width_and_alignment_union_empty_struct() {
let interner = SingleThreadedInterner::with_capacity(4);
let lambda_set = LambdaSet {
set: &[(Symbol::LIST_MAP, &[])],
representation: &Layout::UNIT,
@ -4117,12 +4285,14 @@ mod test {
let layout = Layout::Union(UnionLayout::NonRecursive(&tt));
let target_info = TargetInfo::default_x86_64();
assert_eq!(layout.stack_size(target_info), 1);
assert_eq!(layout.alignment_bytes(target_info), 1);
assert_eq!(layout.stack_size(&interner, target_info), 1);
assert_eq!(layout.alignment_bytes(&interner, target_info), 1);
}
#[test]
fn memcpy_size_result_u32_unit() {
let interner = SingleThreadedInterner::with_capacity(4);
let ok_tag = &[Layout::Builtin(Builtin::Int(IntWidth::U32))];
let err_tag = &[Layout::UNIT];
let tags = [ok_tag as &[_], err_tag as &[_]];
@ -4130,12 +4300,16 @@ mod test {
let layout = Layout::Union(union_layout);
let target_info = TargetInfo::default_x86_64();
assert_eq!(layout.stack_size_without_alignment(target_info), 8);
assert_eq!(
layout.stack_size_without_alignment(&interner, target_info),
8
);
}
#[test]
fn void_stack_size() {
let interner = SingleThreadedInterner::with_capacity(4);
let target_info = TargetInfo::default_x86_64();
assert_eq!(Layout::VOID.stack_size(target_info), 0);
assert_eq!(Layout::VOID.stack_size(&interner, target_info), 0);
}
}

View file

@ -10,6 +10,7 @@ use crate::helpers::wasm::assert_evals_to;
#[cfg(test)]
use indoc::indoc;
use roc_mono::layout::STLayoutInterner;
#[cfg(all(test, any(feature = "gen-llvm", feature = "gen-wasm")))]
use roc_std::{RocList, RocStr, U128};
@ -18,14 +19,16 @@ fn width_and_alignment_u8_u8() {
use roc_mono::layout::Layout;
use roc_mono::layout::UnionLayout;
let interner = STLayoutInterner::with_capacity(4);
let t = &[Layout::u8()] as &[_];
let tt = [t, t];
let layout = Layout::Union(UnionLayout::NonRecursive(&tt));
let target_info = roc_target::TargetInfo::default_x86_64();
assert_eq!(layout.alignment_bytes(target_info), 1);
assert_eq!(layout.stack_size(target_info), 2);
assert_eq!(layout.alignment_bytes(&interner, target_info), 1);
assert_eq!(layout.stack_size(&interner, target_info), 2);
}
#[test]

View file

@ -22,6 +22,7 @@ use roc_load::Threading;
use roc_module::symbol::Symbol;
use roc_mono::ir::Proc;
use roc_mono::ir::ProcLayout;
use roc_mono::layout::STLayoutInterner;
const TARGET_INFO: roc_target::TargetInfo = roc_target::TargetInfo::default_x86_64();
@ -122,6 +123,7 @@ fn compiles_to_ir(test_name: &str, src: &str) {
module_id: home,
procedures,
exposed_to_host,
layout_interner,
..
} = loaded;
@ -138,13 +140,14 @@ fn compiles_to_ir(test_name: &str, src: &str) {
let main_fn_symbol = exposed_to_host.values.keys().copied().next().unwrap();
verify_procedures(test_name, procedures, main_fn_symbol);
verify_procedures(test_name, layout_interner, procedures, main_fn_symbol);
}
#[cfg(debug_assertions)]
fn verify_procedures(
fn verify_procedures<'a>(
test_name: &str,
procedures: MutMap<(Symbol, ProcLayout<'_>), Proc<'_>>,
interner: STLayoutInterner<'a>,
procedures: MutMap<(Symbol, ProcLayout<'a>), Proc<'a>>,
main_fn_symbol: Symbol,
) {
let index = procedures
@ -154,7 +157,7 @@ fn verify_procedures(
let mut procs_string = procedures
.values()
.map(|proc| proc.to_pretty(200))
.map(|proc| proc.to_pretty(&interner, 200))
.collect::<Vec<_>>();
let main_fn = procs_string.swap_remove(index);
@ -169,6 +172,7 @@ fn verify_procedures(
std::fs::write(&path, result).unwrap();
use std::process::Command;
let is_tracked = Command::new("git")
.args(&["ls-files", "--error-unmatch", &path])
.output()

View file

@ -325,7 +325,13 @@ impl Types {
}
}
pub fn add_named(&mut self, name: String, typ: RocType, layout: Layout<'_>) -> TypeId {
pub fn add_named<'a>(
&mut self,
interner: &LayoutInterner<'a>,
name: String,
typ: RocType,
layout: Layout<'a>,
) -> TypeId {
if let Some(existing_type_id) = self.types_by_name.get(&name) {
let existing_type = self.get_type(*existing_type_id);
@ -339,7 +345,7 @@ impl Types {
);
}
} else {
let id = self.add_anonymous(typ, layout);
let id = self.add_anonymous(interner, typ, layout);
self.types_by_name.insert(name, id);
@ -347,15 +353,21 @@ impl Types {
}
}
pub fn add_anonymous(&mut self, typ: RocType, layout: Layout<'_>) -> TypeId {
pub fn add_anonymous<'a>(
&mut self,
interner: &LayoutInterner<'a>,
typ: RocType,
layout: Layout<'a>,
) -> TypeId {
let id = TypeId(self.types.len());
assert!(id.0 <= TypeId::MAX.0);
self.types.push(typ);
self.sizes
.push(layout.stack_size_without_alignment(self.target));
self.aligns.push(layout.alignment_bytes(self.target));
.push(layout.stack_size_without_alignment(interner, self.target));
self.aligns
.push(layout.alignment_bytes(interner, self.target));
id
}
@ -774,6 +786,7 @@ fn add_type_help<'a>(
let name = format!("TODO_roc_function_{:?}", closure_var);
let fn_type_id = types.add_named(
&env.layout_cache.interner,
name.clone(),
RocType::Function {
name,
@ -795,9 +808,11 @@ fn add_type_help<'a>(
todo!()
}
Content::Structure(FlatType::Erroneous(_)) => todo!(),
Content::Structure(FlatType::EmptyRecord) => types.add_anonymous(RocType::Unit, layout),
Content::Structure(FlatType::EmptyRecord) => {
types.add_anonymous(&env.layout_cache.interner, RocType::Unit, layout)
}
Content::Structure(FlatType::EmptyTagUnion) => {
types.add_anonymous(RocType::EmptyTagUnion, layout)
types.add_anonymous(&env.layout_cache.interner, RocType::EmptyTagUnion, layout)
}
Content::Alias(name, alias_vars, real_var, _) => {
if name.is_builtin() {
@ -820,7 +835,7 @@ fn add_type_help<'a>(
}
}
types.add_anonymous(RocType::Bool, layout)
types.add_anonymous(&env.layout_cache.interner, RocType::Bool, layout)
}
Layout::Union(union_layout) if *name == Symbol::RESULT_RESULT => {
match union_layout {
@ -841,8 +856,11 @@ fn add_type_help<'a>(
env.layout_cache.from_var(env.arena, err_var, subs).unwrap();
let err_id = add_type_help(env, err_layout, err_var, None, types);
let type_id =
types.add_anonymous(RocType::RocResult(ok_id, err_id), layout);
let type_id = types.add_anonymous(
&env.layout_cache.interner,
RocType::RocResult(ok_id, err_id),
layout,
);
types.depends(type_id, ok_id);
types.depends(type_id, err_id);
@ -870,7 +888,11 @@ fn add_type_help<'a>(
Content::RangedNumber(_) => todo!(),
Content::Error => todo!(),
Content::RecursionVar { structure, .. } => {
let type_id = types.add_anonymous(RocType::RecursivePointer(TypeId::PENDING), layout);
let type_id = types.add_anonymous(
&env.layout_cache.interner,
RocType::RecursivePointer(TypeId::PENDING),
layout,
);
// These should be different Variables, but the same layout!
debug_assert_eq!(
@ -896,7 +918,7 @@ fn add_builtin_type<'a>(
var: Variable,
opt_name: Option<Symbol>,
types: &mut Types,
layout: Layout<'_>,
layout: Layout<'a>,
) -> TypeId {
use Content::*;
use FlatType::*;
@ -905,31 +927,87 @@ fn add_builtin_type<'a>(
match (builtin, builtin_type) {
(Builtin::Int(width), _) => match width {
U8 => types.add_anonymous(RocType::Num(RocNum::U8), layout),
U16 => types.add_anonymous(RocType::Num(RocNum::U16), layout),
U32 => types.add_anonymous(RocType::Num(RocNum::U32), layout),
U64 => types.add_anonymous(RocType::Num(RocNum::U64), layout),
U128 => types.add_anonymous(RocType::Num(RocNum::U128), layout),
I8 => types.add_anonymous(RocType::Num(RocNum::I8), layout),
I16 => types.add_anonymous(RocType::Num(RocNum::I16), layout),
I32 => types.add_anonymous(RocType::Num(RocNum::I32), layout),
I64 => types.add_anonymous(RocType::Num(RocNum::I64), layout),
I128 => types.add_anonymous(RocType::Num(RocNum::I128), layout),
U8 => types.add_anonymous(&env.layout_cache.interner, RocType::Num(RocNum::U8), layout),
U16 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::U16),
layout,
),
U32 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::U32),
layout,
),
U64 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::U64),
layout,
),
U128 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::U128),
layout,
),
I8 => types.add_anonymous(&env.layout_cache.interner, RocType::Num(RocNum::I8), layout),
I16 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::I16),
layout,
),
I32 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::I32),
layout,
),
I64 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::I64),
layout,
),
I128 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::I128),
layout,
),
},
(Builtin::Float(width), _) => match width {
F32 => types.add_anonymous(RocType::Num(RocNum::F32), layout),
F64 => types.add_anonymous(RocType::Num(RocNum::F64), layout),
F128 => types.add_anonymous(RocType::Num(RocNum::F128), layout),
F32 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::F32),
layout,
),
F64 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::F64),
layout,
),
F128 => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::F128),
layout,
),
},
(Builtin::Decimal, _) => types.add_anonymous(RocType::Num(RocNum::Dec), layout),
(Builtin::Bool, _) => types.add_anonymous(RocType::Bool, layout),
(Builtin::Str, _) => types.add_anonymous(RocType::RocStr, layout),
(Builtin::Decimal, _) => types.add_anonymous(
&env.layout_cache.interner,
RocType::Num(RocNum::Dec),
layout,
),
(Builtin::Bool, _) => {
types.add_anonymous(&env.layout_cache.interner, RocType::Bool, layout)
}
(Builtin::Str, _) => {
types.add_anonymous(&env.layout_cache.interner, RocType::RocStr, layout)
}
(Builtin::List(elem_layout), Structure(Apply(Symbol::LIST_LIST, args))) => {
let args = env.subs.get_subs_slice(*args);
debug_assert_eq!(args.len(), 1);
let elem_id = add_type_help(env, *elem_layout, args[0], opt_name, types);
let list_id = types.add_anonymous(RocType::RocList(elem_id), layout);
let list_id = types.add_anonymous(
&env.layout_cache.interner,
RocType::RocList(elem_id),
layout,
);
types.depends(list_id, elem_id);
@ -971,7 +1049,7 @@ fn add_builtin_type<'a>(
let key_id = add_type_help(env, field_layouts[0], key_var, opt_name, types);
let val_id = add_type_help(env, field_layouts[1], val_var, opt_name, types);
let dict_id = types.add_anonymous(RocType::RocDict(key_id, val_id), layout);
let dict_id = types.add_anonymous(&env.layout_cache.interner,RocType::RocDict(key_id, val_id), layout);
types.depends(dict_id, key_id);
types.depends(dict_id, val_id);
@ -1007,7 +1085,7 @@ fn add_builtin_type<'a>(
debug_assert_eq!(field_layouts.len(), 2);
let elem_id = add_type_help(env, field_layouts[0], elem_var, opt_name, types);
let set_id = types.add_anonymous(RocType::RocSet(elem_id), layout);
let set_id = types.add_anonymous(&env.layout_cache.interner,RocType::RocSet(elem_id), layout);
types.depends(set_id, elem_id);
@ -1029,12 +1107,12 @@ fn add_builtin_type<'a>(
}
}
fn add_struct<I, L, F>(
env: &mut Env<'_>,
fn add_struct<'a, I, L, F>(
env: &mut Env<'a>,
name: String,
fields: I,
types: &mut Types,
layout: Layout<'_>,
layout: Layout<'a>,
to_type: F,
) -> TypeId
where
@ -1059,6 +1137,7 @@ where
sortables.sort_by(|(label1, _, layout1), (label2, _, layout2)| {
cmp_fields(
&env.layout_cache.interner,
label1,
layout1,
label2,
@ -1076,7 +1155,12 @@ where
})
.collect::<Vec<(L, TypeId)>>();
types.add_named(name.clone(), to_type(name, fields), layout)
types.add_named(
&env.layout_cache.interner,
name.clone(),
to_type(name, fields),
layout,
)
}
fn add_tag_union<'a>(
@ -1108,7 +1192,9 @@ fn add_tag_union<'a>(
let discriminant_size = Discriminant::from_number_of_tags(tags.len())
.stack_size()
.max(1);
let discriminant_offset = union_layout.tag_id_offset(env.target).unwrap();
let discriminant_offset = union_layout
.tag_id_offset(&env.layout_cache.interner, env.target)
.unwrap();
RocTagUnion::NonRecursive {
name: name.clone(),
@ -1124,7 +1210,9 @@ fn add_tag_union<'a>(
union_tags_to_types(&name, union_tags, subs, env, types, layout, true);
let discriminant_size =
Discriminant::from_number_of_tags(tags.len()).stack_size();
let discriminant_offset = union_layout.tag_id_offset(env.target).unwrap();
let discriminant_offset = union_layout
.tag_id_offset(&env.layout_cache.interner, env.target)
.unwrap();
RocTagUnion::Recursive {
name: name.clone(),
@ -1163,7 +1251,9 @@ fn add_tag_union<'a>(
union_tags_to_types(&name, union_tags, subs, env, types, layout, true);
let discriminant_size =
Discriminant::from_number_of_tags(other_tags.len()).stack_size();
let discriminant_offset = union_layout.tag_id_offset(env.target).unwrap();
let discriminant_offset = union_layout
.tag_id_offset(&env.layout_cache.interner, env.target)
.unwrap();
// nullable_id refers to the index of the tag that is represented at runtime as NULL.
// For example, in `FingerTree a : [Empty, Single a, More (Some a) (FingerTree (Tuple a)) (Some a)]`,
@ -1269,7 +1359,7 @@ fn add_tag_union<'a>(
};
let typ = RocType::TagUnion(tag_union_type);
let type_id = types.add_named(name, typ, layout);
let type_id = types.add_named(&env.layout_cache.interner, name, typ, layout);
if let Some(rec_var) = rec_root {
env.known_recursive_types.insert(rec_var, type_id);
@ -1295,13 +1385,13 @@ fn add_int_enumeration(
}
}
fn union_tags_to_types(
fn union_tags_to_types<'a>(
name: &str,
union_tags: &UnionLabels<TagName>,
subs: &Subs,
env: &mut Env,
env: &mut Env<'a>,
types: &mut Types,
layout: Layout,
layout: Layout<'a>,
is_recursive: bool,
) -> Vec<(String, Option<TypeId>)> {
let mut tags: Vec<(String, Vec<Variable>)> = union_tags
@ -1348,12 +1438,12 @@ fn single_tag_payload_fields<'a>(
(tag_name, payload_fields)
}
fn tags_to_types(
fn tags_to_types<'a>(
name: &str,
tags: Vec<(String, Vec<Variable>)>,
env: &mut Env,
env: &mut Env<'a>,
types: &mut Types,
layout: Layout,
layout: Layout<'a>,
is_recursive: bool,
) -> Vec<(String, Option<TypeId>)> {
tags.into_iter()

View file

@ -240,11 +240,11 @@ fn expr_of_tag<'a, 'env, M: ReplAppMemory>(
fn tag_id_from_data<'a, 'env, M: ReplAppMemory>(
env: &Env<'a, 'env>,
mem: &M,
union_layout: UnionLayout,
union_layout: UnionLayout<'a>,
data_addr: usize,
) -> i64 {
let offset = union_layout
.data_size_without_tag_id(env.target_info)
.data_size_without_tag_id(&env.layout_cache.interner, env.target_info)
.unwrap();
let tag_id_addr = data_addr + offset as usize;
@ -264,7 +264,7 @@ fn tag_id_from_data<'a, 'env, M: ReplAppMemory>(
fn tag_id_from_recursive_ptr<'a, M: ReplAppMemory>(
env: &Env<'a, '_>,
mem: &M,
union_layout: UnionLayout,
union_layout: UnionLayout<'a>,
rec_addr: usize,
) -> (i64, usize) {
let tag_in_ptr = union_layout.stores_tag_id_in_pointer(env.target_info);
@ -364,7 +364,11 @@ fn jit_to_ast_help<'a, A: ReplApp<'a>>(
))
}
Layout::Struct { field_layouts, .. } => {
let target_info = env.target_info;
let fields = [Layout::u64(), *layout];
let layout = Layout::struct_no_name_order(env.arena.alloc(fields));
let result_stack_size = layout.stack_size(&env.layout_cache.interner, env.target_info);
let struct_addr_to_ast = |mem: &'a A::Memory, addr: usize| match raw_content {
Content::Structure(FlatType::Record(fields, _)) => {
Ok(struct_to_ast(env, mem, addr, *fields))
@ -410,11 +414,6 @@ fn jit_to_ast_help<'a, A: ReplApp<'a>>(
}
};
let fields = [Layout::u64(), *layout];
let layout = Layout::struct_no_name_order(&fields);
let result_stack_size = layout.stack_size(target_info);
app.call_function_dynamic_size(
main_fn_name,
result_stack_size as usize,
@ -422,7 +421,7 @@ fn jit_to_ast_help<'a, A: ReplApp<'a>>(
)
}
Layout::Union(UnionLayout::NonRecursive(_)) => {
let size = layout.stack_size(env.target_info);
let size = layout.stack_size(&env.layout_cache.interner, env.target_info);
Ok(app.call_function_dynamic_size(
main_fn_name,
size as usize,
@ -442,7 +441,7 @@ fn jit_to_ast_help<'a, A: ReplApp<'a>>(
| Layout::Union(UnionLayout::NonNullableUnwrapped(_))
| Layout::Union(UnionLayout::NullableUnwrapped { .. })
| Layout::Union(UnionLayout::NullableWrapped { .. }) => {
let size = layout.stack_size(env.target_info);
let size = layout.stack_size(&env.layout_cache.interner, env.target_info);
Ok(app.call_function_dynamic_size(
main_fn_name,
size as usize,
@ -463,7 +462,7 @@ fn jit_to_ast_help<'a, A: ReplApp<'a>>(
}
Layout::LambdaSet(_) => Ok(OPAQUE_FUNCTION),
Layout::Boxed(_) => {
let size = layout.stack_size(env.target_info);
let size = layout.stack_size(&env.layout_cache.interner, env.target_info);
Ok(app.call_function_dynamic_size(
main_fn_name,
size as usize,
@ -866,7 +865,7 @@ fn list_to_ast<'a, M: ReplAppMemory>(
let arena = env.arena;
let mut output = Vec::with_capacity_in(len, arena);
let elem_size = elem_layout.stack_size(env.target_info) as usize;
let elem_size = elem_layout.stack_size(&env.layout_cache.interner, env.target_info) as usize;
for index in 0..len {
let offset_bytes = index * elem_size;
@ -948,7 +947,7 @@ where
output.push(&*arena.alloc(loc_expr));
// Advance the field pointer to the next field.
field_addr += layout.stack_size(env.target_info) as usize;
field_addr += layout.stack_size(&env.layout_cache.interner, env.target_info) as usize;
}
output
@ -1041,7 +1040,8 @@ fn struct_to_ast<'a, 'env, M: ReplAppMemory>(
output.push(loc_field);
// Advance the field pointer to the next field.
field_addr += field_layout.stack_size(env.target_info) as usize;
field_addr +=
field_layout.stack_size(&env.layout_cache.interner, env.target_info) as usize;
}
let output = output.into_bump_slice();

View file

@ -235,6 +235,7 @@ pub async fn entrypoint_from_js(src: String) -> Result<String, String> {
wasm32_result::insert_wrapper_for_layout(
arena,
&layout_interner,
&mut module,
WRAPPER_NAME,
main_fn_index,