Update dev backend

This commit is contained in:
Ayaz Hafiz 2023-01-03 20:35:09 -06:00
parent 45aa9768f7
commit 6859c2e15c
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
8 changed files with 1154 additions and 356 deletions

View file

@ -4,7 +4,7 @@ use bumpalo::collections::Vec;
use packed_struct::prelude::*;
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Layout, STLayoutInterner};
use roc_mono::layout::{InLayout, STLayoutInterner};
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
@ -316,8 +316,8 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
AArch64Call,
>,
_layout_interner: &mut STLayoutInterner<'a>,
_args: &'a [(Layout<'a>, Symbol)],
_ret_layout: &Layout<'a>,
_args: &'a [(InLayout<'a>, Symbol)],
_ret_layout: &InLayout<'a>,
) {
todo!("Loading args for AArch64");
}
@ -336,8 +336,8 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
_layout_interner: &mut STLayoutInterner<'a>,
_dst: &Symbol,
_args: &[Symbol],
_arg_layouts: &[Layout<'a>],
_ret_layout: &Layout<'a>,
_arg_layouts: &[InLayout<'a>],
_ret_layout: &InLayout<'a>,
) {
todo!("Storing args for AArch64");
}
@ -354,7 +354,7 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Returning complex symbols for AArch64");
}
@ -371,7 +371,7 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Loading returned complex symbols for AArch64");
}

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,9 @@ use roc_module::symbol::Symbol;
use roc_mono::{
borrow::Ownership,
ir::{JoinPointId, Param},
layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout},
layout::{
Builtin, InLayout, Layout, LayoutInterner, STLayoutInterner, TagIdIntType, UnionLayout,
},
};
use roc_target::TargetInfo;
use std::cmp::max;
@ -186,10 +188,6 @@ impl<
self.fn_call_stack_size = 0;
}
pub fn target_info(&self) -> TargetInfo {
self.target_info
}
pub fn stack_size(&self) -> u32 {
self.stack_size
}
@ -534,7 +532,7 @@ impl<
sym: &Symbol,
structure: &Symbol,
index: u64,
field_layouts: &'a [Layout<'a>],
field_layouts: &'a [InLayout<'a>],
) {
debug_assert!(index < field_layouts.len() as u64);
// This must be removed and reinserted for ownership and mutability reasons.
@ -546,23 +544,20 @@ impl<
let (base_offset, size) = (*base_offset, *size);
let mut data_offset = base_offset;
for layout in field_layouts.iter().take(index as usize) {
let field_size = layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*layout);
data_offset += field_size as i32;
}
debug_assert!(data_offset < base_offset + size as i32);
let layout = field_layouts[index as usize];
let size = layout.stack_size(layout_interner, self.target_info);
let size = layout_interner.stack_size(layout);
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
Stack(if is_primitive(&layout) {
Stack(if is_primitive(layout) {
ReferencedPrimitive {
base_offset: data_offset,
size,
sign_extend: matches!(
layout,
Layout::Builtin(sign_extended_int_builtins!())
),
sign_extend: matches!(layout, sign_extended_int_builtins!()),
}
} else {
Complex {
@ -639,17 +634,17 @@ impl<
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
fields: &'a [Symbol],
) {
let struct_size = layout.stack_size(layout_interner, self.target_info);
let struct_size = layout_interner.stack_size(*layout);
if struct_size == 0 {
self.symbol_storage_map.insert(*sym, NoData);
return;
}
let base_offset = self.claim_stack_area(sym, struct_size);
if let Layout::Struct { field_layouts, .. } = layout {
if let Layout::Struct { field_layouts, .. } = layout_interner.get(*layout) {
let mut current_offset = base_offset;
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
self.copy_symbol_to_stack_offset(
@ -659,7 +654,7 @@ impl<
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
} else {
@ -699,7 +694,7 @@ impl<
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
@ -717,7 +712,7 @@ impl<
&mut self,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
let ret_reg = self.load_to_general_reg(buf, &Symbol::RET_POINTER);
let (base_offset, size) = self.stack_offset_and_size(sym);
@ -741,9 +736,9 @@ impl<
buf: &mut Vec<'a, u8>,
to_offset: i32,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match layout_interner.get(*layout) {
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => {
debug_assert_eq!(to_offset % 8, 0);
let reg = self.load_to_general_reg(buf, sym);
@ -754,16 +749,16 @@ impl<
let reg = self.load_to_float_reg(buf, sym);
ASM::mov_base32_freg64(buf, to_offset, reg);
}
_ if layout.stack_size(layout_interner, self.target_info) == 0 => {}
_ if layout_interner.stack_size(*layout) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout.stack_size(layout_interner, self.target_info) > 8 => {
_ if layout_interner.stack_size(*layout) > 8 => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0);
debug_assert_eq!(size, layout.stack_size(layout_interner, self.target_info));
debug_assert_eq!(size, layout_interner.stack_size(*layout));
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
for i in (0..size as i32).step_by(8) {
ASM::mov_reg64_base32(buf, reg, from_offset + i);
@ -1024,7 +1019,7 @@ impl<
}
// Claim a location for every join point parameter to be loaded at.
// Put everything on the stack for simplicity.
match layout {
match *layout {
single_register_layouts!() => {
let base_offset = self.claim_stack_size(8);
self.symbol_storage_map.insert(
@ -1038,7 +1033,7 @@ impl<
.insert(*symbol, Rc::new((base_offset, 8)));
}
_ => {
let stack_size = layout.stack_size(layout_interner, self.target_info);
let stack_size = layout_interner.stack_size(*layout);
if stack_size == 0 {
self.symbol_storage_map.insert(*symbol, NoData);
} else {
@ -1059,7 +1054,7 @@ impl<
buf: &mut Vec<'a, u8>,
id: &JoinPointId,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
arg_layouts: &[InLayout<'a>],
) {
// TODO: remove was use here and for current_storage to deal with borrow checker.
// See if we can do this better.
@ -1094,7 +1089,7 @@ impl<
Stack(Primitive {
base_offset,
reg: None,
}) => match layout {
}) => match *layout {
single_register_integers!() => {
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg64(buf, *base_offset, reg);
@ -1336,6 +1331,6 @@ impl<
}
}
fn is_primitive(layout: &Layout<'_>) -> bool {
fn is_primitive(layout: InLayout<'_>) -> bool {
matches!(layout, single_register_layouts!())
}

View file

@ -4,13 +4,9 @@ use crate::{
single_register_layouts, Relocation,
};
use bumpalo::collections::Vec;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, STLayoutInterner};
use roc_target::TargetInfo;
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner};
// Not sure exactly how I want to represent registers.
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
@ -262,8 +258,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
args: &'a [(InLayout<'a>, Symbol)],
ret_layout: &InLayout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut general_i = 0;
@ -273,8 +269,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
general_i += 1;
}
for (layout, sym) in args.iter() {
let stack_size = layout.stack_size(layout_interner, TARGET_INFO);
match layout {
let stack_size = layout_interner.stack_size(*layout);
match *layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[general_i]);
@ -322,16 +318,16 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
let mut general_i = 0;
let mut float_i = 0;
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return.
let base_offset = storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
let base_offset =
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout));
// Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1;
@ -343,7 +339,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
);
}
for (sym, layout) in args.iter().zip(arg_layouts.iter()) {
match layout {
match *layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.load_to_specified_general_reg(
@ -390,8 +386,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
tmp_stack_offset += 8;
}
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) > 16 => {
x if layout_interner.stack_size(x) == 0 => {}
x if layout_interner.stack_size(x) > 16 => {
// TODO: Double check this.
// Just copy onto the stack.
// Use return reg as buffer because it will be empty right now.
@ -431,14 +427,14 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match *layout {
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
x if layout_interner.stack_size(x) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, &x) => {
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
debug_assert_eq!(base_offset % 8, 0);
if size <= 8 {
@ -489,15 +485,15 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match *layout {
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let size = layout.stack_size(layout_interner, TARGET_INFO);
x if layout_interner.stack_size(x) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, &x) => {
let size = layout_interner.stack_size(*layout);
let offset = storage_manager.claim_stack_area(sym, size);
if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
@ -526,11 +522,11 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
impl X86_64SystemV {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
) -> bool {
// TODO: This will need to be more complex/extended to fully support the calling convention.
// details here: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
ret_layout.stack_size(interner, TARGET_INFO) > 16
interner.stack_size(*ret_layout) > 16
}
}
@ -675,8 +671,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
args: &'a [(InLayout<'a>, Symbol)],
ret_layout: &InLayout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut i = 0;
@ -686,7 +682,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
for (layout, sym) in args.iter() {
if i < Self::GENERAL_PARAM_REGS.len() {
match layout {
match *layout {
single_register_integers!() => {
storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[i]);
i += 1;
@ -695,13 +691,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
i += 1;
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if layout_interner.stack_size(x) == 0 => {}
x => {
todo!("Loading args with layout {:?}", x);
}
}
} else {
match layout {
match *layout {
single_register_layouts!() => {
storage_manager.primitive_stack_arg(sym, arg_offset);
arg_offset += 8;
@ -728,18 +724,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the arg we will return.
storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout));
todo!("claim first parama reg for the address");
}
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
match layout {
match *layout {
single_register_integers!() => {
if i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.load_to_specified_general_reg(
@ -784,7 +779,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
tmp_stack_offset += 8;
}
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if layout_interner.stack_size(x) == 0 => {}
x => {
todo!("calling with arg type, {:?}", x);
}
@ -805,7 +800,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Returning complex symbols for X86_64");
}
@ -822,7 +817,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Loading returned complex symbols for X86_64");
}
@ -831,11 +826,11 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
impl X86_64WindowsFastcall {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
) -> bool {
// TODO: This is not fully correct there are some exceptions for "vector" types.
// details here: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-160#return-values
ret_layout.stack_size(interner, TARGET_INFO) > 8
interner.stack_size(*ret_layout) > 8
}
}

View file

@ -18,7 +18,8 @@ use roc_mono::ir::{
SelfRecursive, Stmt,
};
use roc_mono::layout::{
Builtin, Layout, LayoutId, LayoutIds, STLayoutInterner, TagIdIntType, UnionLayout,
Builtin, InLayout, Layout, LayoutId, LayoutIds, LayoutInterner, STLayoutInterner, TagIdIntType,
UnionLayout,
};
mod generic64;
@ -63,6 +64,7 @@ pub enum Relocation {
trait Backend<'a> {
fn env(&self) -> &Env<'a>;
fn interns(&self) -> &Interns;
fn interner(&self) -> &STLayoutInterner<'a>;
// This method is suboptimal, but it seems to be the only way to make rust understand
// that all of these values can be mutable at the same time. By returning them together,
@ -104,7 +106,7 @@ trait Backend<'a> {
// load_args is used to let the backend know what the args are.
// The backend should track these args so it can use them as needed.
fn load_args(&mut self, args: &'a [(Layout<'a>, Symbol)], ret_layout: &Layout<'a>);
fn load_args(&mut self, args: &'a [(InLayout<'a>, Symbol)], ret_layout: &InLayout<'a>);
/// Used for generating wrappers for malloc/realloc/free
fn build_wrapped_jmp(&mut self) -> (&'a [u8], u64);
@ -140,7 +142,7 @@ trait Backend<'a> {
}
/// build_stmt builds a statement and outputs at the end of the buffer.
fn build_stmt(&mut self, stmt: &Stmt<'a>, ret_layout: &Layout<'a>) {
fn build_stmt(&mut self, stmt: &Stmt<'a>, ret_layout: &InLayout<'a>) {
match stmt {
Stmt::Let(sym, expr, layout, following) => {
self.build_expr(sym, expr, layout);
@ -210,7 +212,7 @@ trait Backend<'a> {
self.free_symbols(stmt);
}
Stmt::Jump(id, args) => {
let mut arg_layouts: bumpalo::collections::Vec<Layout<'a>> =
let mut arg_layouts: bumpalo::collections::Vec<InLayout<'a>> =
bumpalo::vec![in self.env().arena];
arg_layouts.reserve(args.len());
let layout_map = self.layout_map();
@ -231,10 +233,10 @@ trait Backend<'a> {
fn build_switch(
&mut self,
cond_symbol: &Symbol,
cond_layout: &Layout<'a>,
cond_layout: &InLayout<'a>,
branches: &'a [(u64, BranchInfo<'a>, Stmt<'a>)],
default_branch: &(BranchInfo<'a>, &'a Stmt<'a>),
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
);
// build_join generates a instructions for a join statement.
@ -244,7 +246,7 @@ trait Backend<'a> {
parameters: &'a [Param<'a>],
body: &'a Stmt<'a>,
remainder: &'a Stmt<'a>,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
);
// build_jump generates a instructions for a jump statement.
@ -252,13 +254,13 @@ trait Backend<'a> {
&mut self,
id: &JoinPointId,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
);
/// build_expr builds the expressions for the specified symbol.
/// The builder must keep track of the symbol because it may be referred to later.
fn build_expr(&mut self, sym: &Symbol, expr: &Expr<'a>, layout: &Layout<'a>) {
fn build_expr(&mut self, sym: &Symbol, expr: &Expr<'a>, layout: &InLayout<'a>) {
match expr {
Expr::Literal(lit) => {
if self.env().lazy_literals {
@ -306,7 +308,7 @@ trait Backend<'a> {
}
CallType::LowLevel { op: lowlevel, .. } => {
let mut arg_layouts: bumpalo::collections::Vec<Layout<'a>> =
let mut arg_layouts: bumpalo::collections::Vec<InLayout<'a>> =
bumpalo::vec![in self.env().arena];
arg_layouts.reserve(arguments.len());
let layout_map = self.layout_map();
@ -389,8 +391,8 @@ trait Backend<'a> {
sym: &Symbol,
lowlevel: &LowLevel,
args: &'a [Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
// Now that the arguments are needed, load them if they are literals.
self.load_literal_symbols(args);
@ -515,22 +517,22 @@ trait Backend<'a> {
self.build_num_sub(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumBitwiseAnd => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_and(sym, &args[0], &args[1], *int_width)
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_bitwise_and(sym, &args[0], &args[1], int_width)
} else {
internal_error!("bitwise and on a non-integer")
}
}
LowLevel::NumBitwiseOr => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_or(sym, &args[0], &args[1], *int_width)
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_bitwise_or(sym, &args[0], &args[1], int_width)
} else {
internal_error!("bitwise or on a non-integer")
}
}
LowLevel::NumBitwiseXor => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_xor(sym, &args[0], &args[1], *int_width)
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_bitwise_xor(sym, &args[0], &args[1], int_width)
} else {
internal_error!("bitwise xor on a non-integer")
}
@ -542,7 +544,7 @@ trait Backend<'a> {
"Eq: expected all arguments of to have the same layout"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"Eq: expected to have return layout of type Bool"
);
@ -559,7 +561,7 @@ trait Backend<'a> {
"NotEq: expected all arguments of to have the same layout"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"NotEq: expected to have return layout of type Bool"
);
@ -576,7 +578,7 @@ trait Backend<'a> {
"NumLt: expected all arguments of to have the same layout"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"NumLt: expected to have return layout of type Bool"
);
@ -590,10 +592,7 @@ trait Backend<'a> {
);
debug_assert!(
matches!(
*ret_layout,
Layout::Builtin(Builtin::Float(FloatWidth::F32 | FloatWidth::F64)),
),
matches!(*ret_layout, Layout::F32 | Layout::F64),
"NumToFrac: expected to have return layout of type Float"
);
self.build_num_to_frac(sym, &args[0], &arg_layouts[0], ret_layout)
@ -609,7 +608,7 @@ trait Backend<'a> {
"NumLte: expected all arguments of to have the same layout"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"NumLte: expected to have return layout of type Bool"
);
@ -626,7 +625,7 @@ trait Backend<'a> {
"NumGte: expected all arguments of to have the same layout"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"NumGte: expected to have return layout of type Bool"
);
@ -703,8 +702,8 @@ trait Backend<'a> {
sym: &Symbol,
func_sym: Symbol,
args: &'a [Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
self.load_literal_symbols(args);
match func_sym {
@ -715,7 +714,7 @@ trait Backend<'a> {
"NumIsZero: expected to have exactly one argument"
);
debug_assert_eq!(
Layout::Builtin(Builtin::Bool),
Layout::BOOL,
*ret_layout,
"NumIsZero: expected to have return layout of type Bool"
);
@ -744,12 +743,12 @@ trait Backend<'a> {
self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout)
}
Symbol::BOOL_TRUE => {
let bool_layout = Layout::Builtin(Builtin::Bool);
let bool_layout = Layout::BOOL;
self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(true));
self.return_symbol(&Symbol::DEV_TMP, &bool_layout);
}
Symbol::BOOL_FALSE => {
let bool_layout = Layout::Builtin(Builtin::Bool);
let bool_layout = Layout::BOOL;
self.load_literal(&Symbol::DEV_TMP, &bool_layout, &Literal::Bool(false));
self.return_symbol(&Symbol::DEV_TMP, &bool_layout);
}
@ -764,15 +763,15 @@ trait Backend<'a> {
dst: &Symbol,
fn_name: String,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
);
/// build_num_abs stores the absolute value of src into dst.
fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>);
fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>);
/// build_num_add stores the sum of src1 and src2 into dst.
fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// build_num_add_checked stores the sum of src1 and src2 into dst.
fn build_num_add_checked(
@ -780,21 +779,21 @@ trait Backend<'a> {
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
num_layout: &Layout<'a>,
return_layout: &Layout<'a>,
num_layout: &InLayout<'a>,
return_layout: &InLayout<'a>,
);
/// build_num_mul stores `src1 * src2` into dst.
fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// build_num_mul stores `src1 / src2` into dst.
fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// build_num_neg stores the negated value of src into dst.
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>);
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>);
/// build_num_sub stores the `src1 - src2` difference into dst.
fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// stores the `src1 & src2` into dst.
fn build_int_bitwise_and(
@ -824,21 +823,27 @@ trait Backend<'a> {
);
/// build_eq stores the result of `src1 == src2` into dst.
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>);
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>);
/// build_neq stores the result of `src1 != src2` into dst.
fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>);
fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>);
/// build_num_lt stores the result of `src1 < src2` into dst.
fn build_num_lt(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>);
fn build_num_lt(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
arg_layout: &InLayout<'a>,
);
/// build_num_to_frac convert Number to Frac
fn build_num_to_frac(
&mut self,
dst: &Symbol,
src: &Symbol,
arg_layout: &Layout<'a>,
ret_layout: &Layout<'a>,
arg_layout: &InLayout<'a>,
ret_layout: &InLayout<'a>,
);
/// build_num_lte stores the result of `src1 <= src2` into dst.
@ -847,7 +852,7 @@ trait Backend<'a> {
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
arg_layout: &Layout<'a>,
arg_layout: &InLayout<'a>,
);
/// build_num_gte stores the result of `src1 >= src2` into dst.
@ -856,7 +861,7 @@ trait Backend<'a> {
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
arg_layout: &Layout<'a>,
arg_layout: &InLayout<'a>,
);
/// build_list_len returns the length of a list.
@ -868,7 +873,7 @@ trait Backend<'a> {
dst: &Symbol,
list: &Symbol,
index: &Symbol,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
);
/// build_list_replace_unsafe returns the old element and new list with the list having the new element inserted.
@ -876,15 +881,15 @@ trait Backend<'a> {
&mut self,
dst: &Symbol,
args: &'a [Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
);
/// build_refcount_getptr loads the pointer to the reference count of src into dst.
fn build_ptr_cast(&mut self, dst: &Symbol, src: &Symbol);
/// literal_map gets the map from symbol to literal and layout, used for lazy loading and literal folding.
fn literal_map(&mut self) -> &mut MutMap<Symbol, (*const Literal<'a>, *const Layout<'a>)>;
fn literal_map(&mut self) -> &mut MutMap<Symbol, (*const Literal<'a>, *const InLayout<'a>)>;
fn load_literal_symbols(&mut self, syms: &[Symbol]) {
if self.env().lazy_literals {
@ -901,7 +906,7 @@ trait Backend<'a> {
}
/// load_literal sets a symbol to be equal to a literal.
fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>);
fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>);
/// create_empty_array creates an empty array with nullptr, zero length, and zero capacity.
fn create_empty_array(&mut self, sym: &Symbol);
@ -910,12 +915,12 @@ trait Backend<'a> {
fn create_array(
&mut self,
sym: &Symbol,
elem_layout: &Layout<'a>,
elem_layout: &InLayout<'a>,
elems: &'a [ListLiteralElement<'a>],
);
/// create_struct creates a struct with the elements specified loaded into it as data.
fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]);
fn create_struct(&mut self, sym: &Symbol, layout: &InLayout<'a>, fields: &'a [Symbol]);
/// load_struct_at_index loads into `sym` the value at `index` in `structure`.
fn load_struct_at_index(
@ -923,7 +928,7 @@ trait Backend<'a> {
sym: &Symbol,
structure: &Symbol,
index: u64,
field_layouts: &'a [Layout<'a>],
field_layouts: &'a [InLayout<'a>],
);
/// load_union_at_index loads into `sym` the value at `index` for `tag_id`.
@ -949,7 +954,7 @@ trait Backend<'a> {
);
/// return_symbol moves a symbol to the correct return location for the backend and adds a jump to the end of the function.
fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>);
fn return_symbol(&mut self, sym: &Symbol, layout: &InLayout<'a>);
/// free_symbols will free all symbols for the given statement.
fn free_symbols(&mut self, stmt: &Stmt<'a>) {
@ -973,7 +978,7 @@ trait Backend<'a> {
fn last_seen_map(&mut self) -> &mut MutMap<Symbol, *const Stmt<'a>>;
/// set_layout_map sets the layout for a specific symbol.
fn set_layout_map(&mut self, sym: Symbol, layout: &Layout<'a>) {
fn set_layout_map(&mut self, sym: Symbol, layout: &InLayout<'a>) {
if let Some(old_layout) = self.layout_map().insert(sym, *layout) {
// Layout map already contains the symbol. We should never need to overwrite.
// If the layout is not the same, that is a bug.
@ -989,7 +994,7 @@ trait Backend<'a> {
}
/// layout_map gets the map from symbol to layout.
fn layout_map(&mut self) -> &mut MutMap<Symbol, Layout<'a>>;
fn layout_map(&mut self) -> &mut MutMap<Symbol, InLayout<'a>>;
fn create_free_map(&mut self) {
let mut free_map = MutMap::default();