Merge branch 'trunk' into wasm_arrays

This commit is contained in:
rvcas 2021-12-16 21:07:12 -05:00
commit c14b4b81e0
82 changed files with 1947 additions and 1315 deletions

View file

@ -1,12 +1,13 @@
use bumpalo::{self, collections::Vec};
use code_builder::Align;
use roc_builtins::bitcode::IntWidth;
use roc_collections::all::MutMap;
use roc_module::low_level::LowLevel;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::gen_refcount::{RefcountProcGenerator, REFCOUNT_MAX};
use roc_mono::ir::{CallType, Expr, JoinPointId, Literal, Proc, Stmt};
use roc_mono::layout::{Builtin, Layout, LayoutIds};
use roc_mono::layout::{Builtin, Layout, LayoutIds, TagIdIntType, UnionLayout};
use roc_reporting::internal_error;
use crate::layout::{CallConv, ReturnMethod, WasmLayout};
@ -58,6 +59,8 @@ pub struct WasmBackend<'a> {
/// how many blocks deep are we (used for jumps)
block_depth: u32,
joinpoint_label_map: MutMap<JoinPointId, (u32, Vec<'a, StoredValue>)>,
debug_current_proc_index: usize,
}
impl<'a> WasmBackend<'a> {
@ -149,6 +152,8 @@ impl<'a> WasmBackend<'a> {
code_builder: CodeBuilder::new(arena),
storage: Storage::new(arena),
symbol_layouts: MutMap::default(),
debug_current_proc_index: 0,
}
}
@ -203,6 +208,7 @@ impl<'a> WasmBackend<'a> {
pub fn build_proc(&mut self, proc: &Proc<'a>) {
// println!("\ngenerating procedure {:?}\n", proc.name);
self.debug_current_proc_index += 1;
self.start_proc(proc);
@ -293,18 +299,12 @@ impl<'a> WasmBackend<'a> {
self.build_expr(sym, expr, layout, &sym_storage);
// For primitives, we record that this symbol is at the top of the VM stack
// (For other values, we wrote to memory and there's nothing on the VM stack)
if let WasmLayout::Primitive(value_type, size) = wasm_layout {
let vm_state = self.code_builder.set_top_symbol(*sym);
self.storage.symbol_storage_map.insert(
*sym,
StoredValue::VirtualMachineStack {
vm_state,
value_type,
size,
},
);
// If this value is stored in the VM stack, we need code_builder to track it
// (since every instruction can change the VM stack)
if let Some(StoredValue::VirtualMachineStack { vm_state, .. }) =
self.storage.symbol_storage_map.get_mut(sym)
{
*vm_state = self.code_builder.set_top_symbol(*sym);
}
self.symbol_layouts.insert(*sym, *layout);
@ -529,7 +529,7 @@ impl<'a> WasmBackend<'a> {
}));
}
self.build_stmt(&rc_stmt, ret_layout);
self.build_stmt(rc_stmt, ret_layout);
}
x => todo!("statement {:?}", x),
@ -656,10 +656,9 @@ impl<'a> WasmBackend<'a> {
let (local_id, offset) =
location.local_and_offset(self.storage.stack_frame_pointer);
// This is a minor cheat. We only need the first two 32 bit
// chunks here. We fill both chunks with zeros, so we
// can simplify things to a single group of 64 bit operations instead of
// doing the below twice for 32 bits.
// This is a minor cheat.
// What we want to write to stack memory is { elements: null, length: 0 }
// But instead of two 32-bit stores, we can do a single 64-bit store.
self.code_builder.get_local(local_id);
self.code_builder.i64_const(0);
self.code_builder.i64_store(Align::Bytes4, offset);
@ -668,10 +667,281 @@ impl<'a> WasmBackend<'a> {
}
}
x => todo!("Expression {:?}", x),
Expr::Tag {
tag_layout: union_layout,
tag_id,
arguments,
..
} => self.build_tag(union_layout, *tag_id, arguments, *sym, storage),
Expr::GetTagId {
structure,
union_layout,
} => self.build_get_tag_id(*structure, union_layout),
Expr::UnionAtIndex {
structure,
tag_id,
union_layout,
index,
} => self.build_union_at_index(*structure, *tag_id, union_layout, *index, *sym),
_ => todo!("Expression `{}`", expr.to_pretty(100)),
}
}
fn build_tag(
&mut self,
union_layout: &UnionLayout<'a>,
tag_id: TagIdIntType,
arguments: &'a [Symbol],
symbol: Symbol,
stored: &StoredValue,
) {
if union_layout.tag_is_null(tag_id) {
self.code_builder.i32_const(0);
return;
}
let stores_tag_id_as_data = union_layout.stores_tag_id_as_data(PTR_SIZE);
let stores_tag_id_in_pointer = union_layout.stores_tag_id_in_pointer(PTR_SIZE);
let (data_size, data_alignment) = union_layout.data_size_and_alignment(PTR_SIZE);
// We're going to use the pointer many times, so put it in a local variable
let stored_with_local =
self.storage
.ensure_value_has_local(&mut self.code_builder, symbol, stored.to_owned());
let (local_id, data_offset) = match stored_with_local {
StoredValue::StackMemory { location, .. } => {
location.local_and_offset(self.storage.stack_frame_pointer)
}
StoredValue::Local { local_id, .. } => {
// Tag is stored as a pointer to the heap. Call the allocator to get a memory address.
self.allocate_with_refcount(Some(data_size), data_alignment, 1);
self.code_builder.set_local(local_id);
(local_id, 0)
}
StoredValue::VirtualMachineStack { .. } => {
internal_error!("{:?} should have a local variable", symbol)
}
};
// Write the field values to memory
let mut field_offset = data_offset;
for field_symbol in arguments.iter() {
field_offset += self.storage.copy_value_to_memory(
&mut self.code_builder,
local_id,
field_offset,
*field_symbol,
);
}
// Store the tag ID (if any)
if stores_tag_id_as_data {
let id_offset = data_offset + data_size - data_alignment;
let id_align = Align::from(data_alignment);
self.code_builder.get_local(local_id);
match id_align {
Align::Bytes1 => {
self.code_builder.i32_const(tag_id as i32);
self.code_builder.i32_store8(id_align, id_offset);
}
Align::Bytes2 => {
self.code_builder.i32_const(tag_id as i32);
self.code_builder.i32_store16(id_align, id_offset);
}
Align::Bytes4 => {
self.code_builder.i32_const(tag_id as i32);
self.code_builder.i32_store(id_align, id_offset);
}
Align::Bytes8 => {
self.code_builder.i64_const(tag_id as i64);
self.code_builder.i64_store(id_align, id_offset);
}
}
} else if stores_tag_id_in_pointer {
self.code_builder.get_local(local_id);
self.code_builder.i32_const(tag_id as i32);
self.code_builder.i32_or();
self.code_builder.set_local(local_id);
}
}
fn build_get_tag_id(&mut self, structure: Symbol, union_layout: &UnionLayout<'a>) {
use UnionLayout::*;
let mut need_to_close_block = false;
match union_layout {
NonRecursive(_) => {}
Recursive(_) => {}
NonNullableUnwrapped(_) => {
self.code_builder.i32_const(0);
return;
}
NullableWrapped { nullable_id, .. } => {
self.storage
.load_symbols(&mut self.code_builder, &[structure]);
self.code_builder.i32_eqz();
self.code_builder.if_(BlockType::Value(ValueType::I32));
self.code_builder.i32_const(*nullable_id as i32);
self.code_builder.else_();
need_to_close_block = true;
}
NullableUnwrapped { nullable_id, .. } => {
self.storage
.load_symbols(&mut self.code_builder, &[structure]);
self.code_builder.i32_eqz();
self.code_builder.if_(BlockType::Value(ValueType::I32));
self.code_builder.i32_const(*nullable_id as i32);
self.code_builder.else_();
self.code_builder.i32_const(!(*nullable_id) as i32);
self.code_builder.end();
}
};
if union_layout.stores_tag_id_as_data(PTR_SIZE) {
let (data_size, data_alignment) = union_layout.data_size_and_alignment(PTR_SIZE);
let id_offset = data_size - data_alignment;
let id_align = Align::from(data_alignment);
self.storage
.load_symbols(&mut self.code_builder, &[structure]);
match union_layout.tag_id_builtin() {
Builtin::Bool | Builtin::Int(IntWidth::U8) => {
self.code_builder.i32_load8_u(id_align, id_offset)
}
Builtin::Int(IntWidth::U16) => self.code_builder.i32_load16_u(id_align, id_offset),
Builtin::Int(IntWidth::U32) => self.code_builder.i32_load(id_align, id_offset),
Builtin::Int(IntWidth::U64) => self.code_builder.i64_load(id_align, id_offset),
x => internal_error!("Unexpected layout for tag union id {:?}", x),
}
} else if union_layout.stores_tag_id_in_pointer(PTR_SIZE) {
self.storage
.load_symbols(&mut self.code_builder, &[structure]);
self.code_builder.i32_const(3);
self.code_builder.i32_and();
}
if need_to_close_block {
self.code_builder.end();
}
}
fn build_union_at_index(
&mut self,
structure: Symbol,
tag_id: TagIdIntType,
union_layout: &UnionLayout<'a>,
index: u64,
symbol: Symbol,
) {
use UnionLayout::*;
debug_assert!(!union_layout.tag_is_null(tag_id));
let tag_index = tag_id as usize;
let field_layouts = match union_layout {
NonRecursive(tags) => tags[tag_index],
Recursive(tags) => tags[tag_index],
NonNullableUnwrapped(layouts) => *layouts,
NullableWrapped { other_tags, .. } => other_tags[tag_index],
NullableUnwrapped { other_fields, .. } => *other_fields,
};
let field_offset: u32 = field_layouts
.iter()
.take(index as usize)
.map(|field_layout| field_layout.stack_size(PTR_SIZE))
.sum();
// Get pointer and offset to the tag's data
let structure_storage = self.storage.get(&structure).to_owned();
let stored_with_local = self.storage.ensure_value_has_local(
&mut self.code_builder,
structure,
structure_storage,
);
let (tag_local_id, tag_offset) = match stored_with_local {
StoredValue::StackMemory { location, .. } => {
location.local_and_offset(self.storage.stack_frame_pointer)
}
StoredValue::Local { local_id, .. } => (local_id, 0),
StoredValue::VirtualMachineStack { .. } => {
internal_error!("{:?} should have a local variable", structure)
}
};
let stores_tag_id_in_pointer = union_layout.stores_tag_id_in_pointer(PTR_SIZE);
let from_ptr = if stores_tag_id_in_pointer {
let ptr = self.storage.create_anonymous_local(ValueType::I32);
self.code_builder.get_local(tag_local_id);
self.code_builder.i32_const(-4); // 11111111...1100
self.code_builder.i32_and();
self.code_builder.set_local(ptr);
ptr
} else {
tag_local_id
};
let from_offset = tag_offset + field_offset;
self.storage
.copy_value_from_memory(&mut self.code_builder, symbol, from_ptr, from_offset);
}
/// Allocate heap space and write an initial refcount
/// If the data size is known at compile time, pass it in comptime_data_size.
/// If size is only known at runtime, push *data* size to the VM stack first.
/// Leaves the *data* address on the VM stack
fn allocate_with_refcount(
&mut self,
comptime_data_size: Option<u32>,
alignment_bytes: u32,
initial_refcount: u32,
) {
// Add extra bytes for the refcount
let extra_bytes = alignment_bytes.max(PTR_SIZE);
if let Some(data_size) = comptime_data_size {
// Data size known at compile time and passed as an argument
self.code_builder
.i32_const((data_size + extra_bytes) as i32);
} else {
// Data size known only at runtime and is on top of VM stack
self.code_builder.i32_const(extra_bytes as i32);
self.code_builder.i32_add();
}
// Provide a constant for the alignment argument
self.code_builder.i32_const(alignment_bytes as i32);
// Call the foreign function. (Zig and C calling conventions are the same for this signature)
let param_types = bumpalo::vec![in self.env.arena; ValueType::I32, ValueType::I32];
let ret_type = Some(ValueType::I32);
self.call_zig_builtin("roc_alloc", param_types, ret_type);
// Save the allocation address to a temporary local variable
let local_id = self.storage.create_anonymous_local(ValueType::I32);
self.code_builder.set_local(local_id);
// Write the initial refcount
let refcount_offset = extra_bytes - PTR_SIZE;
let encoded_refcount = (initial_refcount as i32) - 1 + i32::MIN;
self.code_builder.get_local(local_id);
self.code_builder.i32_const(encoded_refcount);
self.code_builder.i32_store(Align::Bytes4, refcount_offset);
// Put the data address on the VM stack
self.code_builder.get_local(local_id);
self.code_builder.i32_const(extra_bytes as i32);
self.code_builder.i32_add();
}
fn build_low_level(
&mut self,
lowlevel: LowLevel,
@ -730,14 +1000,11 @@ impl<'a> WasmBackend<'a> {
};
}
StoredValue::StackMemory { location, .. } => match lit {
Literal::Decimal(decimal) => {
StoredValue::StackMemory { location, .. } => {
let mut write128 = |lower_bits, upper_bits| {
let (local_id, offset) =
location.local_and_offset(self.storage.stack_frame_pointer);
let lower_bits = decimal.0 as i64;
let upper_bits = (decimal.0 >> 64) as i64;
self.code_builder.get_local(local_id);
self.code_builder.i64_const(lower_bits);
self.code_builder.i64_store(Align::Bytes8, offset);
@ -745,39 +1012,56 @@ impl<'a> WasmBackend<'a> {
self.code_builder.get_local(local_id);
self.code_builder.i64_const(upper_bits);
self.code_builder.i64_store(Align::Bytes8, offset + 8);
};
match lit {
Literal::Decimal(decimal) => {
let lower_bits = (decimal.0 & 0xffff_ffff_ffff_ffff) as i64;
let upper_bits = (decimal.0 >> 64) as i64;
write128(lower_bits, upper_bits);
}
Literal::Int(x) => {
let lower_bits = (*x & 0xffff_ffff_ffff_ffff) as i64;
let upper_bits = (*x >> 64) as i64;
write128(lower_bits, upper_bits);
}
Literal::Float(_) => {
// Also not implemented in LLVM backend (nor in Rust!)
todo!("f128 type");
}
Literal::Str(string) => {
let (local_id, offset) =
location.local_and_offset(self.storage.stack_frame_pointer);
let len = string.len();
if len < 8 {
let mut stack_mem_bytes = [0; 8];
stack_mem_bytes[0..len].clone_from_slice(string.as_bytes());
stack_mem_bytes[7] = 0x80 | (len as u8);
let str_as_int = i64::from_le_bytes(stack_mem_bytes);
// Write all 8 bytes at once using an i64
// Str is normally two i32's, but in this special case, we can get away with fewer instructions
self.code_builder.get_local(local_id);
self.code_builder.i64_const(str_as_int);
self.code_builder.i64_store(Align::Bytes4, offset);
} else {
let (linker_sym_index, elements_addr) =
self.lookup_string_constant(string, sym, layout);
self.code_builder.get_local(local_id);
self.code_builder
.i32_const_mem_addr(elements_addr, linker_sym_index);
self.code_builder.i32_store(Align::Bytes4, offset);
self.code_builder.get_local(local_id);
self.code_builder.i32_const(string.len() as i32);
self.code_builder.i32_store(Align::Bytes4, offset + 4);
};
}
_ => not_supported_error(),
}
Literal::Str(string) => {
let (local_id, offset) =
location.local_and_offset(self.storage.stack_frame_pointer);
let len = string.len();
if len < 8 {
let mut stack_mem_bytes = [0; 8];
stack_mem_bytes[0..len].clone_from_slice(string.as_bytes());
stack_mem_bytes[7] = 0x80 | (len as u8);
let str_as_int = i64::from_le_bytes(stack_mem_bytes);
// Write all 8 bytes at once using an i64
// Str is normally two i32's, but in this special case, we can get away with fewer instructions
self.code_builder.get_local(local_id);
self.code_builder.i64_const(str_as_int);
self.code_builder.i64_store(Align::Bytes4, offset);
} else {
let (linker_sym_index, elements_addr) =
self.lookup_string_constant(string, sym, layout);
self.code_builder.get_local(local_id);
self.code_builder
.i32_const_mem_addr(elements_addr, linker_sym_index);
self.code_builder.i32_store(Align::Bytes4, offset);
self.code_builder.get_local(local_id);
self.code_builder.i32_const(string.len() as i32);
self.code_builder.i32_store(Align::Bytes4, offset + 4);
};
}
_ => not_supported_error(),
},
}
_ => not_supported_error(),
};
@ -849,14 +1133,14 @@ impl<'a> WasmBackend<'a> {
// Not passing it as an argument because I'm trying to match Backend method signatures
let storage = self.storage.get(sym).to_owned();
if let Layout::Struct(field_layouts) = layout {
if matches!(layout, Layout::Struct(_)) {
match storage {
StoredValue::StackMemory { location, size, .. } => {
if size > 0 {
let (local_id, struct_offset) =
location.local_and_offset(self.storage.stack_frame_pointer);
let mut field_offset = struct_offset;
for (field, _) in fields.iter().zip(field_layouts.iter()) {
for field in fields.iter() {
field_offset += self.storage.copy_value_to_memory(
&mut self.code_builder,
local_id,
@ -934,4 +1218,18 @@ impl<'a> WasmBackend<'a> {
self.code_builder
.call(fn_index, linker_symbol_index, num_wasm_args, has_return_val);
}
/// Debug utility
///
/// if self._debug_current_proc_is("#UserApp_foo_1") {
/// self.code_builder._debug_assert_i32(0x1234);
/// }
fn _debug_current_proc_is(&self, linker_name: &'static str) -> bool {
let (_, linker_sym_index) = self.proc_symbols[self.debug_current_proc_index];
let sym_info = &self.linker_symbols[linker_sym_index as usize];
match sym_info {
SymInfo::Function(WasmObjectSymbol::Defined { name, .. }) => name == linker_name,
_ => false,
}
}
}

View file

@ -26,7 +26,7 @@ const PTR_TYPE: ValueType = ValueType::I32;
pub const STACK_POINTER_GLOBAL_ID: u32 = 0;
pub const FRAME_ALIGNMENT_BYTES: i32 = 16;
pub const MEMORY_NAME: &str = "memory";
pub const BUILTINS_IMPORT_MODULE_NAME: &str = "builtins";
pub const BUILTINS_IMPORT_MODULE_NAME: &str = "env";
pub const STACK_POINTER_NAME: &str = "__stack_pointer";
pub struct Env<'a> {
@ -176,20 +176,23 @@ pub fn copy_memory(code_builder: &mut CodeBuilder, config: CopyMemoryConfig) {
}
/// Round up to alignment_bytes (which must be a power of 2)
pub fn round_up_to_alignment(unaligned: i32, alignment_bytes: i32) -> i32 {
if alignment_bytes <= 1 {
return unaligned;
}
if alignment_bytes.count_ones() != 1 {
internal_error!(
"Cannot align to {} bytes. Not a power of 2.",
alignment_bytes
);
}
let mut aligned = unaligned;
aligned += alignment_bytes - 1; // if lower bits are non-zero, push it over the next boundary
aligned &= -alignment_bytes; // mask with a flag that has upper bits 1, lower bits 0
aligned
#[macro_export]
macro_rules! round_up_to_alignment {
($unaligned: expr, $alignment_bytes: expr) => {
if $alignment_bytes <= 1 {
$unaligned
} else if $alignment_bytes.count_ones() != 1 {
panic!(
"Cannot align to {} bytes. Not a power of 2.",
$alignment_bytes
);
} else {
let mut aligned = $unaligned;
aligned += $alignment_bytes - 1; // if lower bits are non-zero, push it over the next boundary
aligned &= !$alignment_bytes + 1; // mask with a flag that has upper bits 1, lower bits 0
aligned
}
};
}
pub fn debug_panic<E: std::fmt::Debug>(error: E) {

View file

@ -5,7 +5,7 @@ use roc_reporting::internal_error;
use crate::layout::{StackMemoryFormat::*, WasmLayout};
use crate::storage::{Storage, StoredValue};
use crate::wasm_module::{CodeBuilder, ValueType::*};
use crate::wasm_module::{Align, CodeBuilder, ValueType::*};
pub enum LowlevelBuildResult {
Done,
@ -17,7 +17,7 @@ pub fn decode_low_level<'a>(
code_builder: &mut CodeBuilder<'a>,
storage: &mut Storage<'a>,
lowlevel: LowLevel,
args: &'a [Symbol],
args: &[Symbol],
ret_layout: &WasmLayout,
) -> LowlevelBuildResult {
use LowlevelBuildResult::*;
@ -81,8 +81,6 @@ pub fn decode_low_level<'a>(
WasmLayout::Primitive(value_type, size) => match value_type {
I32 => {
code_builder.i32_add();
// TODO: is *deliberate* wrapping really in the spirit of things?
// The point of choosing NumAddWrap is to go fast by skipping checks, but we're making it slower.
wrap_i32(code_builder, *size);
}
I64 => code_builder.i64_add(),
@ -347,27 +345,57 @@ pub fn decode_low_level<'a>(
},
WasmLayout::StackMemory { .. } => return NotImplemented,
},
NumIsFinite => match ret_layout {
WasmLayout::Primitive(value_type, _) => match value_type {
I32 => code_builder.i32_const(1),
I64 => code_builder.i32_const(1),
F32 => {
code_builder.i32_reinterpret_f32();
code_builder.i32_const(0x7f800000);
code_builder.i32_and();
code_builder.i32_const(0x7f800000);
code_builder.i32_ne();
NumIsFinite => {
use StoredValue::*;
match storage.get(&args[0]) {
VirtualMachineStack { value_type, .. } | Local { value_type, .. } => {
match value_type {
I32 => code_builder.i32_const(1),
I64 => code_builder.i32_const(1),
F32 => {
code_builder.i32_reinterpret_f32();
code_builder.i32_const(0x7f80_0000);
code_builder.i32_and();
code_builder.i32_const(0x7f80_0000);
code_builder.i32_ne();
}
F64 => {
code_builder.i64_reinterpret_f64();
code_builder.i64_const(0x7ff0_0000_0000_0000);
code_builder.i64_and();
code_builder.i64_const(0x7ff0_0000_0000_0000);
code_builder.i64_ne();
}
}
}
F64 => {
code_builder.i64_reinterpret_f64();
code_builder.i64_const(0x7ff0000000000000);
code_builder.i64_and();
code_builder.i64_const(0x7ff0000000000000);
code_builder.i64_ne();
StackMemory {
format, location, ..
} => {
let (local_id, offset) = location.local_and_offset(storage.stack_frame_pointer);
match format {
Int128 => code_builder.i32_const(1),
Float128 => {
code_builder.get_local(local_id);
code_builder.i64_load(Align::Bytes4, offset + 8);
code_builder.i64_const(0x7fff_0000_0000_0000);
code_builder.i64_and();
code_builder.i64_const(0x7fff_0000_0000_0000);
code_builder.i64_ne();
}
Decimal => {
code_builder.get_local(local_id);
code_builder.i64_load(Align::Bytes4, offset + 8);
code_builder.i64_const(0x7100_0000_0000_0000);
code_builder.i64_and();
code_builder.i64_const(0x7100_0000_0000_0000);
code_builder.i64_ne();
}
DataStructure => return NotImplemented,
}
}
},
WasmLayout::StackMemory { .. } => return NotImplemented,
},
}
}
NumAtan => {
let width = float_width_from_layout(ret_layout);
return BuiltinCall(&bitcode::NUM_ATAN[width]);
@ -468,16 +496,79 @@ pub fn decode_low_level<'a>(
WasmLayout::StackMemory { .. } => return NotImplemented,
}
}
Eq => match storage.get(&args[0]) {
StoredValue::VirtualMachineStack { value_type, .. }
| StoredValue::Local { value_type, .. } => match value_type {
I32 => code_builder.i32_eq(),
I64 => code_builder.i64_eq(),
F32 => code_builder.f32_eq(),
F64 => code_builder.f64_eq(),
},
StoredValue::StackMemory { .. } => return NotImplemented,
},
Eq => {
use StoredValue::*;
match storage.get(&args[0]).to_owned() {
VirtualMachineStack { value_type, .. } | Local { value_type, .. } => {
match value_type {
I32 => code_builder.i32_eq(),
I64 => code_builder.i64_eq(),
F32 => code_builder.f32_eq(),
F64 => code_builder.f64_eq(),
}
}
StackMemory {
format,
location: location0,
..
} => {
if let StackMemory {
location: location1,
..
} = storage.get(&args[1]).to_owned()
{
let stack_frame_pointer = storage.stack_frame_pointer;
let compare_bytes = |code_builder: &mut CodeBuilder| {
let (local0, offset0) = location0.local_and_offset(stack_frame_pointer);
let (local1, offset1) = location1.local_and_offset(stack_frame_pointer);
code_builder.get_local(local0);
code_builder.i64_load(Align::Bytes8, offset0);
code_builder.get_local(local1);
code_builder.i64_load(Align::Bytes8, offset1);
code_builder.i64_eq();
code_builder.get_local(local0);
code_builder.i64_load(Align::Bytes8, offset0 + 8);
code_builder.get_local(local1);
code_builder.i64_load(Align::Bytes8, offset1 + 8);
code_builder.i64_eq();
code_builder.i32_and();
};
match format {
Decimal => {
// Both args are finite
let first = [args[0]];
let second = [args[1]];
decode_low_level(
code_builder,
storage,
LowLevel::NumIsFinite,
&first,
ret_layout,
);
decode_low_level(
code_builder,
storage,
LowLevel::NumIsFinite,
&second,
ret_layout,
);
code_builder.i32_and();
// AND they have the same bytes
compare_bytes(code_builder);
code_builder.i32_and();
}
Int128 => compare_bytes(code_builder),
Float128 | DataStructure => return NotImplemented,
}
}
}
}
}
NotEq => match storage.get(&args[0]) {
StoredValue::VirtualMachineStack { value_type, .. }
| StoredValue::Local { value_type, .. } => match value_type {
@ -486,7 +577,10 @@ pub fn decode_low_level<'a>(
F32 => code_builder.f32_ne(),
F64 => code_builder.f64_ne(),
},
StoredValue::StackMemory { .. } => return NotImplemented,
StoredValue::StackMemory { .. } => {
decode_low_level(code_builder, storage, LowLevel::Eq, args, ret_layout);
code_builder.i32_eqz();
}
},
And => code_builder.i32_and(),
Or => code_builder.i32_or(),

View file

@ -108,11 +108,17 @@ impl<'a> Storage<'a> {
self.stack_frame_size = 0;
}
/// Internal use only. If you think you want it externally, you really want `allocate`
/// Internal use only. See `allocate` or `create_anonymous_local`
fn get_next_local_id(&self) -> LocalId {
LocalId((self.arg_types.len() + self.local_types.len()) as u32)
}
pub fn create_anonymous_local(&mut self, value_type: ValueType) -> LocalId {
let id = self.get_next_local_id();
self.local_types.push(value_type);
id
}
/// Allocate storage for a Roc value
///
/// Wasm primitives (i32, i64, f32, f64) are allocated "storage" on the VM stack.
@ -172,7 +178,7 @@ impl<'a> Storage<'a> {
}
let offset =
round_up_to_alignment(self.stack_frame_size, *alignment_bytes as i32);
round_up_to_alignment!(self.stack_frame_size, *alignment_bytes as i32);
self.stack_frame_size = offset + (*size as i32);

View file

@ -85,7 +85,9 @@ impl std::fmt::Debug for VmBlock<'_> {
}
}
/// Wasm memory alignment. (Rust representation matches Wasm encoding)
/// Wasm memory alignment for load/store instructions.
/// Rust representation matches Wasm encoding.
/// It's an error to specify alignment higher than the "natural" alignment of the instruction
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum Align {
@ -93,10 +95,6 @@ pub enum Align {
Bytes2 = 1,
Bytes4 = 2,
Bytes8 = 3,
Bytes16 = 4,
Bytes32 = 5,
Bytes64 = 6,
// ... we can add more if we need them ...
}
impl From<u32> for Align {
@ -105,11 +103,13 @@ impl From<u32> for Align {
1 => Align::Bytes1,
2 => Align::Bytes2,
4 => Align::Bytes4,
8 => Align::Bytes8,
16 => Align::Bytes16,
32 => Align::Bytes32,
64 => Align::Bytes64,
_ => internal_error!("{:?}-byte alignment not supported", x),
_ => {
if x.count_ones() == 1 {
Align::Bytes8 // Max value supported by any Wasm instruction
} else {
internal_error!("Cannot align to {} bytes", x);
}
}
}
}
}
@ -445,7 +445,7 @@ impl<'a> CodeBuilder<'a> {
if frame_size != 0 {
if let Some(frame_ptr_id) = frame_pointer {
let aligned_size = round_up_to_alignment(frame_size, FRAME_ALIGNMENT_BYTES);
let aligned_size = round_up_to_alignment!(frame_size, FRAME_ALIGNMENT_BYTES);
self.build_stack_frame_push(aligned_size, frame_ptr_id);
self.build_stack_frame_pop(aligned_size, frame_ptr_id);
}
@ -901,4 +901,17 @@ impl<'a> CodeBuilder<'a> {
instruction_no_args!(i64_reinterpret_f64, I64REINTERPRETF64, 1, true);
instruction_no_args!(f32_reinterpret_i32, F32REINTERPRETI32, 1, true);
instruction_no_args!(f64_reinterpret_i64, F64REINTERPRETI64, 1, true);
/// Generate a debug assertion for an expected i32 value
pub fn _debug_assert_i32(&mut self, expected: i32) {
self.i32_const(expected);
self.i32_eq();
self.i32_eqz();
self.if_(BlockType::NoResult);
self.unreachable_(); // Tell Wasm runtime to throw an exception
self.end();
// It matches. Restore the original value to the VM stack and continue the program.
// We know it matched the expected value, so just use that!
self.i32_const(expected);
}
}