Push interned layouts as mut throughout the backend, and intern box layouts

This commit is contained in:
Ayaz Hafiz 2022-12-28 18:51:26 -06:00
parent dd6a72fc46
commit 7ab7fdfa7b
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
26 changed files with 769 additions and 375 deletions

View file

@ -2,6 +2,7 @@ use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_builtins::bitcode::{self, FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_intern::Interner;
use roc_module::low_level::LowLevel;
use roc_module::symbol::Symbol;
use roc_mono::code_gen_help::HelperOp;
@ -34,7 +35,7 @@ enum CodeGenNumType {
}
impl CodeGenNumType {
pub fn for_symbol(backend: &WasmBackend<'_>, symbol: Symbol) -> Self {
pub fn for_symbol(backend: &WasmBackend<'_, '_>, symbol: Symbol) -> Self {
Self::from(backend.storage.get(&symbol))
}
}
@ -124,7 +125,7 @@ fn layout_is_signed_int(layout: &Layout) -> bool {
}
}
fn symbol_is_signed_int(backend: &WasmBackend<'_>, symbol: Symbol) -> bool {
fn symbol_is_signed_int(backend: &WasmBackend<'_, '_>, symbol: Symbol) -> bool {
layout_is_signed_int(&backend.storage.symbol_layouts[&symbol])
}
@ -141,18 +142,18 @@ impl<'a> LowLevelCall<'a> {
/// For numerical ops, this just pushes the arguments to the Wasm VM's value stack
/// It implements the calling convention used by Zig for both numbers and structs
/// Result is the type signature of the call
fn load_args(&self, backend: &mut WasmBackend<'a>) -> (usize, bool, bool) {
fn load_args(&self, backend: &mut WasmBackend<'a, '_>) -> (usize, bool, bool) {
backend.storage.load_symbols_for_call(
backend.env.arena,
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
)
}
fn load_args_and_call_zig(&self, backend: &mut WasmBackend<'a>, name: &'a str) {
fn load_args_and_call_zig(&self, backend: &mut WasmBackend<'a, '_>, name: &'a str) {
let (num_wasm_args, has_return_val, ret_zig_packed_struct) = self.load_args(backend);
backend.call_host_fn_after_loading_args(name, num_wasm_args, has_return_val);
@ -182,7 +183,7 @@ impl<'a> LowLevelCall<'a> {
/// This may seem like deliberately introducing an error!
/// But we want all targets to behave the same, and hash algos rely on wrapping.
/// Discussion: https://github.com/roc-lang/roc/pull/2117#discussion_r760723063
fn wrap_small_int(&self, backend: &mut WasmBackend<'a>, int_width: IntWidth) {
fn wrap_small_int(&self, backend: &mut WasmBackend<'a, '_>, int_width: IntWidth) {
let bits = 8 * int_width.stack_size() as i32;
let shift = 32 - bits;
if shift <= 0 {
@ -200,7 +201,7 @@ impl<'a> LowLevelCall<'a> {
}
/// Main entrypoint from WasmBackend
pub fn generate(&self, backend: &mut WasmBackend<'a>) {
pub fn generate(&self, backend: &mut WasmBackend<'a, '_>) {
use CodeGenNumType::*;
use LowLevel::*;
@ -281,7 +282,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
backend.code_builder.i32_const(UPDATE_MODE_IMMUTABLE);
@ -360,7 +361,7 @@ impl<'a> LowLevelCall<'a> {
.load_symbols(&mut backend.code_builder, &[index]);
let elem_size = self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_size as i32);
backend.code_builder.i32_mul(); // index*size
@ -418,7 +419,7 @@ impl<'a> LowLevelCall<'a> {
} if value_layout == *list_elem => {
let list_offset = 0;
let elem_offset = Layout::Builtin(Builtin::List(list_elem))
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
(list_offset, elem_offset, value_layout)
}
Layout::Struct {
@ -426,7 +427,7 @@ impl<'a> LowLevelCall<'a> {
..
} if value_layout == *list_elem => {
let list_offset =
value_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
value_layout.stack_size(backend.layout_interner, TARGET_INFO);
let elem_offset = 0;
(list_offset, elem_offset, value_layout)
}
@ -434,7 +435,7 @@ impl<'a> LowLevelCall<'a> {
};
let (elem_width, elem_alignment) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Ensure the new element is stored in memory so we can pass a pointer to Zig
let (new_elem_local, new_elem_offset, _) =
@ -484,7 +485,7 @@ impl<'a> LowLevelCall<'a> {
let capacity: Symbol = self.arguments[0];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -515,14 +516,14 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
self.arguments,
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
// Load monomorphization constants
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
backend.code_builder.i32_const(elem_align as i32);
backend.code_builder.i32_const(elem_width as i32);
@ -537,7 +538,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (spare_local, spare_offset, _) = ensure_symbol_is_in_memory(
backend,
spare,
@ -559,7 +560,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -585,7 +586,7 @@ impl<'a> LowLevelCall<'a> {
let elem: Symbol = self.arguments[1];
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let elem_width = elem_layout.stack_size(backend.env.layout_interner, TARGET_INFO);
let elem_width = elem_layout.stack_size(backend.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -601,7 +602,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -623,7 +624,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (elem_local, elem_offset, _) =
ensure_symbol_is_in_memory(backend, elem, *elem_layout, backend.env.arena);
@ -640,7 +641,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -665,7 +666,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -690,7 +691,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -710,7 +711,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// The refcount function receives a pointer to an element in the list
// This is the same as a Struct containing the element
@ -735,7 +736,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -756,7 +757,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
// Zig arguments Wasm types
// (return pointer) i32
@ -773,7 +774,7 @@ impl<'a> LowLevelCall<'a> {
&mut backend.code_builder,
&[list],
self.ret_symbol,
&WasmLayout::new(backend.env.layout_interner, &self.ret_layout),
&WasmLayout::new(backend.layout_interner, &self.ret_layout),
CallConv::Zig,
);
@ -1631,7 +1632,7 @@ impl<'a> LowLevelCall<'a> {
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO)
.stack_size(backend.layout_interner, TARGET_INFO)
as i32;
if bit_width < 32 && !symbol_is_signed_int(backend, num) {
// Sign-extend the number by shifting left and right again
@ -1680,7 +1681,7 @@ impl<'a> LowLevelCall<'a> {
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout
.stack_size(backend.env.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner, TARGET_INFO);
if bit_width < 32 && symbol_is_signed_int(backend, num) {
let mask = (1 << bit_width) - 1;
@ -1872,11 +1873,11 @@ impl<'a> LowLevelCall<'a> {
/// Equality and inequality
/// These can operate on any data type (except functions) so they're more complex than other operators.
fn eq_or_neq(&self, backend: &mut WasmBackend<'a>) {
fn eq_or_neq(&self, backend: &mut WasmBackend<'a, '_>) {
let arg_layout = backend.storage.symbol_layouts[&self.arguments[0]]
.runtime_representation(backend.env.layout_interner);
.runtime_representation(backend.layout_interner);
let other_arg_layout = backend.storage.symbol_layouts[&self.arguments[1]]
.runtime_representation(backend.env.layout_interner);
.runtime_representation(backend.layout_interner);
debug_assert!(
arg_layout == other_arg_layout,
"Cannot do `==` comparison on different types: {:?} vs {:?}",
@ -1941,7 +1942,7 @@ impl<'a> LowLevelCall<'a> {
}
}
fn eq_or_neq_number(&self, backend: &mut WasmBackend<'a>) {
fn eq_or_neq_number(&self, backend: &mut WasmBackend<'a, '_>) {
use StoredValue::*;
match backend.storage.get(&self.arguments[0]).to_owned() {
@ -1986,7 +1987,7 @@ impl<'a> LowLevelCall<'a> {
/// Takes care of loading the arguments
fn eq_num128(
&self,
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
format: StackMemoryFormat,
locations: [StackMemoryLocation; 2],
) {
@ -2004,7 +2005,7 @@ impl<'a> LowLevelCall<'a> {
/// Check that two 128-bit numbers contain the same bytes
/// Loads *half* an argument at a time
/// (Don't call "load arguments" or "load symbols" helpers before this, it'll just waste instructions)
fn eq_num128_bytes(backend: &mut WasmBackend<'a>, locations: [StackMemoryLocation; 2]) {
fn eq_num128_bytes(backend: &mut WasmBackend<'a, '_>, locations: [StackMemoryLocation; 2]) {
let (local0, offset0) = locations[0].local_and_offset(backend.storage.stack_frame_pointer);
let (local1, offset1) = locations[1].local_and_offset(backend.storage.stack_frame_pointer);
@ -2026,7 +2027,7 @@ impl<'a> LowLevelCall<'a> {
backend.code_builder.i32_and();
}
fn num_to_str(&self, backend: &mut WasmBackend<'a>) {
fn num_to_str(&self, backend: &mut WasmBackend<'a, '_>) {
let arg_layout = backend.storage.symbol_layouts[&self.arguments[0]];
match arg_layout {
Layout::Builtin(Builtin::Int(width)) => {
@ -2051,7 +2052,7 @@ impl<'a> LowLevelCall<'a> {
}
/// Helper for NumIsFinite op, and also part of Eq/NotEq
fn num_is_finite(backend: &mut WasmBackend<'_>, argument: Symbol) {
fn num_is_finite(backend: &mut WasmBackend<'_, '_>, argument: Symbol) {
use StoredValue::*;
let stored = backend.storage.get(&argument).to_owned();
match stored {
@ -2094,7 +2095,7 @@ fn num_is_finite(backend: &mut WasmBackend<'_>, argument: Symbol) {
}
pub fn call_higher_order_lowlevel<'a>(
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
return_sym: Symbol,
return_layout: &Layout<'a>,
higher_order: &'a HigherOrderLowLevel<'a>,
@ -2131,12 +2132,9 @@ pub fn call_higher_order_lowlevel<'a>(
let (closure_data_layout, closure_data_exists) =
match backend.storage.symbol_layouts[captured_environment] {
Layout::LambdaSet(lambda_set) => {
if lambda_set
.is_represented(backend.env.layout_interner)
.is_some()
{
if lambda_set.is_represented(backend.layout_interner).is_some() {
(
lambda_set.runtime_representation(backend.env.layout_interner),
lambda_set.runtime_representation(backend.layout_interner),
true,
)
} else {
@ -2162,7 +2160,7 @@ pub fn call_higher_order_lowlevel<'a>(
// make sure that the wrapping struct is available in stack memory, so we can hand out a
// pointer to it.
let wrapped_storage = backend.storage.allocate_var(
backend.env.layout_interner,
backend.layout_interner,
wrapped_captures_layout,
wrapped_closure_data_sym,
crate::storage::StoredVarKind::Variable,
@ -2226,17 +2224,19 @@ pub fn call_higher_order_lowlevel<'a>(
argument_layouts.len()
};
let boxed_closure_arg_layouts =
argument_layouts.iter().take(n_non_closure_args).map(|lay| {
let lay_in = backend.layout_interner.insert(lay);
Layout::Boxed(lay_in)
});
wrapper_arg_layouts.push(wrapped_captures_layout);
wrapper_arg_layouts.extend(
argument_layouts
.iter()
.take(n_non_closure_args)
.map(Layout::Boxed),
);
wrapper_arg_layouts.extend(boxed_closure_arg_layouts);
match helper_proc_source {
ProcSource::HigherOrderMapper(_) => {
// Our convention for mappers is that they write to the heap via the last argument
let result_layout = backend.layout_interner.insert(result_layout);
wrapper_arg_layouts.push(Layout::Boxed(result_layout));
ProcLayout {
arguments: wrapper_arg_layouts.into_bump_slice(),
@ -2326,7 +2326,7 @@ pub fn call_higher_order_lowlevel<'a>(
ListSortWith { xs } => {
let elem_layout = unwrap_list_elem_layout(backend.storage.symbol_layouts[xs]);
let (element_width, alignment) =
elem_layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2371,7 +2371,7 @@ fn unwrap_list_elem_layout(list_layout: Layout<'_>) -> &Layout<'_> {
#[allow(clippy::too_many_arguments)]
fn list_map_n<'a>(
zig_fn_name: &'static str,
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
arg_symbols: &[Symbol],
return_sym: Symbol,
return_layout: Layout<'a>,
@ -2390,7 +2390,7 @@ fn list_map_n<'a>(
let elem_ret = unwrap_list_elem_layout(return_layout);
let (elem_ret_size, elem_ret_align) =
elem_ret.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
elem_ret.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let cb = &mut backend.code_builder;
@ -2411,7 +2411,7 @@ fn list_map_n<'a>(
cb.i32_const(owns_captured_environment as i32);
cb.i32_const(elem_ret_align as i32);
for el in arg_elem_layouts.iter() {
cb.i32_const(el.stack_size(backend.env.layout_interner, TARGET_INFO) as i32);
cb.i32_const(el.stack_size(backend.layout_interner, TARGET_INFO) as i32);
}
cb.i32_const(elem_ret_size as i32);
@ -2438,7 +2438,7 @@ fn list_map_n<'a>(
}
fn ensure_symbol_is_in_memory<'a>(
backend: &mut WasmBackend<'a>,
backend: &mut WasmBackend<'a, '_>,
symbol: Symbol,
layout: Layout<'a>,
arena: &'a Bump,
@ -2451,7 +2451,7 @@ fn ensure_symbol_is_in_memory<'a>(
}
_ => {
let (width, alignment) =
layout.stack_size_and_alignment(backend.env.layout_interner, TARGET_INFO);
layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
let (frame_ptr, offset) = backend
.storage
.allocate_anonymous_stack_memory(width, alignment);