use allocate_with_refcount in the dev backend

This commit is contained in:
Folkert 2023-02-06 22:56:38 +01:00
parent acd446f6bd
commit c79ae98d16
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
2 changed files with 50 additions and 51 deletions

View file

@ -792,7 +792,14 @@ pub fn rebuild_host(
// For surgical linking, just copy the dynamically linked rust app. // For surgical linking, just copy the dynamically linked rust app.
let mut exe_path = cargo_out_dir.join("host"); let mut exe_path = cargo_out_dir.join("host");
exe_path.set_extension(executable_extension); exe_path.set_extension(executable_extension);
std::fs::copy(&exe_path, &host_dest).unwrap(); if let Err(e) = std::fs::copy(&exe_path, &host_dest) {
panic!(
"unable to copy {} => {}: {:?}\n\nIs the file used by another invocation of roc?",
exe_path.display(),
host_dest.display(),
e,
);
}
} else { } else {
// Cargo hosts depend on a c wrapper for the api. Compile host.c as well. // Cargo hosts depend on a c wrapper for the api. Compile host.c as well.

View file

@ -25,7 +25,6 @@ pub(crate) mod x86_64;
use storage::{RegStorage, StorageManager}; use storage::{RegStorage, StorageManager};
const REFCOUNT_ONE: u64 = i64::MIN as u64;
// TODO: on all number functions double check and deal with over/underflow. // TODO: on all number functions double check and deal with over/underflow.
pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<GeneralReg, FloatReg>>: pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<GeneralReg, FloatReg>>:
@ -1779,63 +1778,41 @@ impl<
fn create_array( fn create_array(
&mut self, &mut self,
sym: &Symbol, sym: &Symbol,
elem_layout: &InLayout<'a>, element_layout: &InLayout<'a>,
elems: &'a [ListLiteralElement<'a>], elements: &'a [ListLiteralElement<'a>],
) { ) {
// Allocate let element_width = self.layout_interner.stack_size(*element_layout) as u64;
// This requires at least 8 for the refcount alignment.
let allocation_alignment = std::cmp::max(
8,
self.layout_interner
.allocation_alignment_bytes(*elem_layout) as u64,
);
let elem_size = self.layout_interner.stack_size(*elem_layout) as u64; // load the total size of the data we want to store (excludes refcount)
let allocation_size = elem_size * elems.len() as u64 + allocation_alignment /* add space for refcount */; let data_bytes_symbol = Symbol::DEV_TMP;
let u64_layout = Layout::U64; let data_bytes = element_width * elements.len() as u64;
self.load_literal( self.load_literal(
&Symbol::DEV_TMP, &data_bytes_symbol,
&u64_layout, &Layout::U64,
&Literal::Int((allocation_size as i128).to_ne_bytes()), &Literal::Int((data_bytes as i128).to_ne_bytes()),
); );
// Load allocation alignment (u32) // Load allocation alignment (u32)
let u32_layout = Layout::U32; let element_alignment_symbol = Symbol::DEV_TMP2;
self.load_literal( self.load_layout_alignment(Layout::U32, element_alignment_symbol);
&Symbol::DEV_TMP2,
&u32_layout, self.allocate_with_refcount(
&Literal::Int((allocation_alignment as i128).to_ne_bytes()), Symbol::DEV_TMP3,
data_bytes_symbol,
element_alignment_symbol,
); );
self.build_fn_call( self.free_symbol(&data_bytes_symbol);
&Symbol::DEV_TMP3, self.free_symbol(&element_alignment_symbol);
"roc_alloc".to_string(),
&[Symbol::DEV_TMP, Symbol::DEV_TMP2],
&[u64_layout, u32_layout],
&u64_layout,
);
self.free_symbol(&Symbol::DEV_TMP);
self.free_symbol(&Symbol::DEV_TMP2);
// Fill pointer with elems // The pointer already points to the first element
let ptr_reg = self let ptr_reg = self
.storage_manager .storage_manager
.load_to_general_reg(&mut self.buf, &Symbol::DEV_TMP3); .load_to_general_reg(&mut self.buf, &Symbol::DEV_TMP3);
// Point to first element of array.
ASM::add_reg64_reg64_imm32(&mut self.buf, ptr_reg, ptr_reg, allocation_alignment as i32);
// fill refcount at -8.
self.storage_manager.with_tmp_general_reg(
&mut self.buf,
|_storage_manager, buf, tmp_reg| {
ASM::mov_reg64_imm64(buf, tmp_reg, REFCOUNT_ONE as i64);
ASM::mov_mem64_offset32_reg64(buf, ptr_reg, -8, tmp_reg);
},
);
// Copy everything into output array. // Copy everything into output array.
let mut elem_offset = 0; let mut elem_offset = 0;
for elem in elems { for elem in elements {
// TODO: this could be a lot faster when loading large lists // TODO: this could be a lot faster when loading large lists
// if we move matching on the element layout to outside this loop. // if we move matching on the element layout to outside this loop.
// It also greatly bloats the code here. // It also greatly bloats the code here.
@ -1844,24 +1821,24 @@ impl<
let elem_sym = match elem { let elem_sym = match elem {
ListLiteralElement::Symbol(sym) => sym, ListLiteralElement::Symbol(sym) => sym,
ListLiteralElement::Literal(lit) => { ListLiteralElement::Literal(lit) => {
self.load_literal(&Symbol::DEV_TMP, elem_layout, lit); self.load_literal(&Symbol::DEV_TMP, element_layout, lit);
&Symbol::DEV_TMP &Symbol::DEV_TMP
} }
}; };
// TODO: Expand to all types. // TODO: Expand to all types.
match self.layout_interner.get(*elem_layout) { match self.layout_interner.get(*element_layout) {
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64) | Builtin::Bool) => { Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64) | Builtin::Bool) => {
let sym_reg = self let sym_reg = self
.storage_manager .storage_manager
.load_to_general_reg(&mut self.buf, elem_sym); .load_to_general_reg(&mut self.buf, elem_sym);
ASM::mov_mem64_offset32_reg64(&mut self.buf, ptr_reg, elem_offset, sym_reg); ASM::mov_mem64_offset32_reg64(&mut self.buf, ptr_reg, elem_offset, sym_reg);
} }
_ if elem_size == 0 => {} _ if element_width == 0 => {}
_ if elem_size > 8 => { _ if element_width > 8 => {
let (from_offset, size) = self.storage_manager.stack_offset_and_size(elem_sym); let (from_offset, size) = self.storage_manager.stack_offset_and_size(elem_sym);
debug_assert!(from_offset % 8 == 0); debug_assert!(from_offset % 8 == 0);
debug_assert!(size % 8 == 0); debug_assert!(size % 8 == 0);
debug_assert_eq!(size as u64, elem_size); debug_assert_eq!(size as u64, element_width);
self.storage_manager.with_tmp_general_reg( self.storage_manager.with_tmp_general_reg(
&mut self.buf, &mut self.buf,
|_storage_manager, buf, tmp_reg| { |_storage_manager, buf, tmp_reg| {
@ -1874,7 +1851,7 @@ impl<
} }
x => todo!("copying data to list with layout, {:?}", x), x => todo!("copying data to list with layout, {:?}", x),
} }
elem_offset += elem_size as i32; elem_offset += element_width as i32;
if elem_sym == &Symbol::DEV_TMP { if elem_sym == &Symbol::DEV_TMP {
self.free_symbol(elem_sym); self.free_symbol(elem_sym);
} }
@ -1887,7 +1864,7 @@ impl<
let base_offset = storage_manager.claim_stack_area(sym, 24); let base_offset = storage_manager.claim_stack_area(sym, 24);
ASM::mov_base32_reg64(buf, base_offset, ptr_reg); ASM::mov_base32_reg64(buf, base_offset, ptr_reg);
ASM::mov_reg64_imm64(buf, tmp_reg, elems.len() as i64); ASM::mov_reg64_imm64(buf, tmp_reg, elements.len() as i64);
ASM::mov_base32_reg64(buf, base_offset + 8, tmp_reg); ASM::mov_base32_reg64(buf, base_offset + 8, tmp_reg);
ASM::mov_base32_reg64(buf, base_offset + 16, tmp_reg); ASM::mov_base32_reg64(buf, base_offset + 16, tmp_reg);
}, },
@ -2274,6 +2251,21 @@ impl<
CC: CallConv<GeneralReg, FloatReg, ASM>, CC: CallConv<GeneralReg, FloatReg, ASM>,
> Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC> > Backend64Bit<'a, 'r, GeneralReg, FloatReg, ASM, CC>
{ {
fn allocate_with_refcount(
&mut self,
dst: Symbol,
data_bytes: Symbol,
element_alignment: Symbol,
) {
self.build_fn_call(
&dst,
bitcode::UTILS_ALLOCATE_WITH_REFCOUNT.to_string(),
&[data_bytes, element_alignment],
&[Layout::U64, Layout::U32],
&Layout::U64,
);
}
/// Updates a jump instruction to a new offset and returns the number of bytes written. /// Updates a jump instruction to a new offset and returns the number of bytes written.
fn update_jmp_imm32_offset( fn update_jmp_imm32_offset(
&mut self, &mut self,