Update dev backend

This commit is contained in:
Ayaz Hafiz 2023-01-03 20:35:09 -06:00
parent 45aa9768f7
commit 6859c2e15c
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
8 changed files with 1154 additions and 356 deletions

View file

@ -4,7 +4,7 @@ use bumpalo::collections::Vec;
use packed_struct::prelude::*;
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Layout, STLayoutInterner};
use roc_mono::layout::{InLayout, STLayoutInterner};
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
@ -316,8 +316,8 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
AArch64Call,
>,
_layout_interner: &mut STLayoutInterner<'a>,
_args: &'a [(Layout<'a>, Symbol)],
_ret_layout: &Layout<'a>,
_args: &'a [(InLayout<'a>, Symbol)],
_ret_layout: &InLayout<'a>,
) {
todo!("Loading args for AArch64");
}
@ -336,8 +336,8 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
_layout_interner: &mut STLayoutInterner<'a>,
_dst: &Symbol,
_args: &[Symbol],
_arg_layouts: &[Layout<'a>],
_ret_layout: &Layout<'a>,
_arg_layouts: &[InLayout<'a>],
_ret_layout: &InLayout<'a>,
) {
todo!("Storing args for AArch64");
}
@ -354,7 +354,7 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Returning complex symbols for AArch64");
}
@ -371,7 +371,7 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg, AArch64Assembler> for AArch64C
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Loading returned complex symbols for AArch64");
}

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,9 @@ use roc_module::symbol::Symbol;
use roc_mono::{
borrow::Ownership,
ir::{JoinPointId, Param},
layout::{Builtin, Layout, STLayoutInterner, TagIdIntType, UnionLayout},
layout::{
Builtin, InLayout, Layout, LayoutInterner, STLayoutInterner, TagIdIntType, UnionLayout,
},
};
use roc_target::TargetInfo;
use std::cmp::max;
@ -186,10 +188,6 @@ impl<
self.fn_call_stack_size = 0;
}
pub fn target_info(&self) -> TargetInfo {
self.target_info
}
pub fn stack_size(&self) -> u32 {
self.stack_size
}
@ -534,7 +532,7 @@ impl<
sym: &Symbol,
structure: &Symbol,
index: u64,
field_layouts: &'a [Layout<'a>],
field_layouts: &'a [InLayout<'a>],
) {
debug_assert!(index < field_layouts.len() as u64);
// This must be removed and reinserted for ownership and mutability reasons.
@ -546,23 +544,20 @@ impl<
let (base_offset, size) = (*base_offset, *size);
let mut data_offset = base_offset;
for layout in field_layouts.iter().take(index as usize) {
let field_size = layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*layout);
data_offset += field_size as i32;
}
debug_assert!(data_offset < base_offset + size as i32);
let layout = field_layouts[index as usize];
let size = layout.stack_size(layout_interner, self.target_info);
let size = layout_interner.stack_size(layout);
self.allocation_map.insert(*sym, owned_data);
self.symbol_storage_map.insert(
*sym,
Stack(if is_primitive(&layout) {
Stack(if is_primitive(layout) {
ReferencedPrimitive {
base_offset: data_offset,
size,
sign_extend: matches!(
layout,
Layout::Builtin(sign_extended_int_builtins!())
),
sign_extend: matches!(layout, sign_extended_int_builtins!()),
}
} else {
Complex {
@ -639,17 +634,17 @@ impl<
layout_interner: &mut STLayoutInterner<'a>,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
fields: &'a [Symbol],
) {
let struct_size = layout.stack_size(layout_interner, self.target_info);
let struct_size = layout_interner.stack_size(*layout);
if struct_size == 0 {
self.symbol_storage_map.insert(*sym, NoData);
return;
}
let base_offset = self.claim_stack_area(sym, struct_size);
if let Layout::Struct { field_layouts, .. } = layout {
if let Layout::Struct { field_layouts, .. } = layout_interner.get(*layout) {
let mut current_offset = base_offset;
for (field, field_layout) in fields.iter().zip(field_layouts.iter()) {
self.copy_symbol_to_stack_offset(
@ -659,7 +654,7 @@ impl<
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
} else {
@ -699,7 +694,7 @@ impl<
field,
field_layout,
);
let field_size = field_layout.stack_size(layout_interner, self.target_info);
let field_size = layout_interner.stack_size(*field_layout);
current_offset += field_size as i32;
}
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
@ -717,7 +712,7 @@ impl<
&mut self,
buf: &mut Vec<'a, u8>,
sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
let ret_reg = self.load_to_general_reg(buf, &Symbol::RET_POINTER);
let (base_offset, size) = self.stack_offset_and_size(sym);
@ -741,9 +736,9 @@ impl<
buf: &mut Vec<'a, u8>,
to_offset: i32,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match layout_interner.get(*layout) {
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => {
debug_assert_eq!(to_offset % 8, 0);
let reg = self.load_to_general_reg(buf, sym);
@ -754,16 +749,16 @@ impl<
let reg = self.load_to_float_reg(buf, sym);
ASM::mov_base32_freg64(buf, to_offset, reg);
}
_ if layout.stack_size(layout_interner, self.target_info) == 0 => {}
_ if layout_interner.stack_size(*layout) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout.stack_size(layout_interner, self.target_info) > 8 => {
_ if layout_interner.stack_size(*layout) > 8 => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0);
debug_assert_eq!(size, layout.stack_size(layout_interner, self.target_info));
debug_assert_eq!(size, layout_interner.stack_size(*layout));
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
for i in (0..size as i32).step_by(8) {
ASM::mov_reg64_base32(buf, reg, from_offset + i);
@ -1024,7 +1019,7 @@ impl<
}
// Claim a location for every join point parameter to be loaded at.
// Put everything on the stack for simplicity.
match layout {
match *layout {
single_register_layouts!() => {
let base_offset = self.claim_stack_size(8);
self.symbol_storage_map.insert(
@ -1038,7 +1033,7 @@ impl<
.insert(*symbol, Rc::new((base_offset, 8)));
}
_ => {
let stack_size = layout.stack_size(layout_interner, self.target_info);
let stack_size = layout_interner.stack_size(*layout);
if stack_size == 0 {
self.symbol_storage_map.insert(*symbol, NoData);
} else {
@ -1059,7 +1054,7 @@ impl<
buf: &mut Vec<'a, u8>,
id: &JoinPointId,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
arg_layouts: &[InLayout<'a>],
) {
// TODO: remove was use here and for current_storage to deal with borrow checker.
// See if we can do this better.
@ -1094,7 +1089,7 @@ impl<
Stack(Primitive {
base_offset,
reg: None,
}) => match layout {
}) => match *layout {
single_register_integers!() => {
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg64(buf, *base_offset, reg);
@ -1336,6 +1331,6 @@ impl<
}
}
fn is_primitive(layout: &Layout<'_>) -> bool {
fn is_primitive(layout: InLayout<'_>) -> bool {
matches!(layout, single_register_layouts!())
}

View file

@ -4,13 +4,9 @@ use crate::{
single_register_layouts, Relocation,
};
use bumpalo::collections::Vec;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, STLayoutInterner};
use roc_target::TargetInfo;
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner};
// Not sure exactly how I want to represent registers.
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
@ -262,8 +258,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
X86_64SystemV,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
args: &'a [(InLayout<'a>, Symbol)],
ret_layout: &InLayout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut general_i = 0;
@ -273,8 +269,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
general_i += 1;
}
for (layout, sym) in args.iter() {
let stack_size = layout.stack_size(layout_interner, TARGET_INFO);
match layout {
let stack_size = layout_interner.stack_size(*layout);
match *layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[general_i]);
@ -322,16 +318,16 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
let mut general_i = 0;
let mut float_i = 0;
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the result we will be return.
let base_offset = storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
let base_offset =
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout));
// Set the first reg to the address base + offset.
let ret_reg = Self::GENERAL_PARAM_REGS[general_i];
general_i += 1;
@ -343,7 +339,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
);
}
for (sym, layout) in args.iter().zip(arg_layouts.iter()) {
match layout {
match *layout {
single_register_integers!() => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.load_to_specified_general_reg(
@ -390,8 +386,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
tmp_stack_offset += 8;
}
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if x.stack_size(layout_interner, TARGET_INFO) > 16 => {
x if layout_interner.stack_size(x) == 0 => {}
x if layout_interner.stack_size(x) > 16 => {
// TODO: Double check this.
// Just copy onto the stack.
// Use return reg as buffer because it will be empty right now.
@ -431,14 +427,14 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match *layout {
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
x if layout_interner.stack_size(x) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, &x) => {
let (base_offset, size) = storage_manager.stack_offset_and_size(sym);
debug_assert_eq!(base_offset % 8, 0);
if size <= 8 {
@ -489,15 +485,15 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
>,
layout_interner: &mut STLayoutInterner<'a>,
sym: &Symbol,
layout: &Layout<'a>,
layout: &InLayout<'a>,
) {
match layout {
match *layout {
single_register_layouts!() => {
internal_error!("single register layouts are not complex symbols");
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, x) => {
let size = layout.stack_size(layout_interner, TARGET_INFO);
x if layout_interner.stack_size(x) == 0 => {}
x if !Self::returns_via_arg_pointer(layout_interner, &x) => {
let size = layout_interner.stack_size(*layout);
let offset = storage_manager.claim_stack_area(sym, size);
if size <= 8 {
X86_64Assembler::mov_base32_reg64(buf, offset, Self::GENERAL_RETURN_REGS[0]);
@ -526,11 +522,11 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
impl X86_64SystemV {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
) -> bool {
// TODO: This will need to be more complex/extended to fully support the calling convention.
// details here: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
ret_layout.stack_size(interner, TARGET_INFO) > 16
interner.stack_size(*ret_layout) > 16
}
}
@ -675,8 +671,8 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
X86_64WindowsFastcall,
>,
layout_interner: &mut STLayoutInterner<'a>,
args: &'a [(Layout<'a>, Symbol)],
ret_layout: &Layout<'a>,
args: &'a [(InLayout<'a>, Symbol)],
ret_layout: &InLayout<'a>,
) {
let mut arg_offset = Self::SHADOW_SPACE_SIZE as i32 + 16; // 16 is the size of the pushed return address and base pointer.
let mut i = 0;
@ -686,7 +682,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
}
for (layout, sym) in args.iter() {
if i < Self::GENERAL_PARAM_REGS.len() {
match layout {
match *layout {
single_register_integers!() => {
storage_manager.general_reg_arg(sym, Self::GENERAL_PARAM_REGS[i]);
i += 1;
@ -695,13 +691,13 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
storage_manager.float_reg_arg(sym, Self::FLOAT_PARAM_REGS[i]);
i += 1;
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if layout_interner.stack_size(x) == 0 => {}
x => {
todo!("Loading args with layout {:?}", x);
}
}
} else {
match layout {
match *layout {
single_register_layouts!() => {
storage_manager.primitive_stack_arg(sym, arg_offset);
arg_offset += 8;
@ -728,18 +724,17 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
layout_interner: &mut STLayoutInterner<'a>,
dst: &Symbol,
args: &[Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
arg_layouts: &[InLayout<'a>],
ret_layout: &InLayout<'a>,
) {
let mut tmp_stack_offset = Self::SHADOW_SPACE_SIZE as i32;
if Self::returns_via_arg_pointer(layout_interner, ret_layout) {
// Save space on the stack for the arg we will return.
storage_manager
.claim_stack_area(dst, ret_layout.stack_size(layout_interner, TARGET_INFO));
storage_manager.claim_stack_area(dst, layout_interner.stack_size(*ret_layout));
todo!("claim first parama reg for the address");
}
for (i, (sym, layout)) in args.iter().zip(arg_layouts.iter()).enumerate() {
match layout {
match *layout {
single_register_integers!() => {
if i < Self::GENERAL_PARAM_REGS.len() {
storage_manager.load_to_specified_general_reg(
@ -784,7 +779,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
tmp_stack_offset += 8;
}
}
x if x.stack_size(layout_interner, TARGET_INFO) == 0 => {}
x if layout_interner.stack_size(x) == 0 => {}
x => {
todo!("calling with arg type, {:?}", x);
}
@ -805,7 +800,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Returning complex symbols for X86_64");
}
@ -822,7 +817,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
>,
_layout_interner: &mut STLayoutInterner<'a>,
_sym: &Symbol,
_layout: &Layout<'a>,
_layout: &InLayout<'a>,
) {
todo!("Loading returned complex symbols for X86_64");
}
@ -831,11 +826,11 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Windo
impl X86_64WindowsFastcall {
fn returns_via_arg_pointer<'a>(
interner: &STLayoutInterner<'a>,
ret_layout: &Layout<'a>,
ret_layout: &InLayout<'a>,
) -> bool {
// TODO: This is not fully correct there are some exceptions for "vector" types.
// details here: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-160#return-values
ret_layout.stack_size(interner, TARGET_INFO) > 8
interner.stack_size(*ret_layout) > 8
}
}