mirror of
https://github.com/roc-lang/roc.git
synced 2025-08-03 03:42:17 +00:00
centralize stack movement
This commit is contained in:
parent
8ec3ab0963
commit
6a84d6ee83
6 changed files with 160 additions and 17 deletions
|
@ -609,6 +609,18 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
|
|||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg32_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) {
|
||||
todo!()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg16_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) {
|
||||
todo!()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg8_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _offset: i32) {
|
||||
todo!()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FloatReg) {
|
||||
todo!("saving floating point reg to base offset for AArch64");
|
||||
}
|
||||
|
@ -624,6 +636,19 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
|
|||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg32(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64GeneralReg) {
|
||||
todo!()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg16(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64GeneralReg) {
|
||||
todo!()
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg8(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64GeneralReg) {
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_reg64_mem64_offset32(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
|
|
|
@ -236,9 +236,18 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
|
|||
|
||||
// base32 is similar to stack based instructions but they reference the base/frame pointer.
|
||||
fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
|
||||
|
||||
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
|
||||
fn mov_reg32_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
|
||||
fn mov_reg16_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
|
||||
fn mov_reg8_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
|
||||
|
||||
fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
|
||||
|
||||
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
|
||||
fn mov_base32_reg32(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
|
||||
fn mov_base32_reg16(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
|
||||
fn mov_base32_reg8(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
|
||||
|
||||
// move from memory (a pointer) to register
|
||||
fn mov_reg64_mem64_offset32(
|
||||
|
|
|
@ -739,16 +739,55 @@ impl<
|
|||
layout: &InLayout<'a>,
|
||||
) {
|
||||
match layout_interner.get(*layout) {
|
||||
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => {
|
||||
debug_assert_eq!(to_offset % 8, 0);
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg64(buf, to_offset, reg);
|
||||
}
|
||||
Layout::Builtin(Builtin::Float(FloatWidth::F64)) => {
|
||||
debug_assert_eq!(to_offset % 8, 0);
|
||||
let reg = self.load_to_float_reg(buf, sym);
|
||||
ASM::mov_base32_freg64(buf, to_offset, reg);
|
||||
}
|
||||
Layout::Builtin(builtin) => match builtin {
|
||||
Builtin::Int(int_width) => match int_width {
|
||||
IntWidth::I128 | IntWidth::U128 => {
|
||||
// can we treat this as 2 u64's?
|
||||
todo!()
|
||||
}
|
||||
IntWidth::I64 | IntWidth::U64 => {
|
||||
debug_assert_eq!(to_offset % 8, 0);
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg64(buf, to_offset, reg);
|
||||
}
|
||||
IntWidth::I32 | IntWidth::U32 => {
|
||||
debug_assert_eq!(to_offset % 4, 0);
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg32(buf, to_offset, reg);
|
||||
}
|
||||
IntWidth::I16 | IntWidth::U16 => {
|
||||
debug_assert_eq!(to_offset % 2, 0);
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg16(buf, to_offset, reg);
|
||||
}
|
||||
IntWidth::I8 | IntWidth::U8 => {
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg8(buf, to_offset, reg);
|
||||
}
|
||||
},
|
||||
|
||||
Builtin::Float(float_width) => match float_width {
|
||||
FloatWidth::F64 => {
|
||||
debug_assert_eq!(to_offset % 8, 0);
|
||||
let reg = self.load_to_float_reg(buf, sym);
|
||||
ASM::mov_base32_freg64(buf, to_offset, reg);
|
||||
}
|
||||
FloatWidth::F32 => todo!(),
|
||||
},
|
||||
Builtin::Bool => {
|
||||
// same as 8-bit integer
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg8(buf, to_offset, reg);
|
||||
}
|
||||
Builtin::Decimal => todo!(),
|
||||
Builtin::Str | Builtin::List(_) => {
|
||||
let (from_offset, size) = self.stack_offset_and_size(sym);
|
||||
debug_assert!(from_offset % 8 == 0);
|
||||
debug_assert!(size % 8 == 0);
|
||||
debug_assert_eq!(size, layout_interner.stack_size(*layout));
|
||||
self.copy_symbol_to_stack_offset_help(buf, size, from_offset, to_offset)
|
||||
}
|
||||
},
|
||||
_ if layout_interner.stack_size(*layout) == 0 => {}
|
||||
// TODO: Verify this is always true.
|
||||
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
|
||||
|
@ -759,17 +798,53 @@ impl<
|
|||
debug_assert!(from_offset % 8 == 0);
|
||||
debug_assert!(size % 8 == 0);
|
||||
debug_assert_eq!(size, layout_interner.stack_size(*layout));
|
||||
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
|
||||
for i in (0..size as i32).step_by(8) {
|
||||
ASM::mov_reg64_base32(buf, reg, from_offset + i);
|
||||
ASM::mov_base32_reg64(buf, to_offset + i, reg);
|
||||
}
|
||||
});
|
||||
self.copy_symbol_to_stack_offset_help(buf, size, from_offset, to_offset)
|
||||
}
|
||||
x => todo!("copying data to the stack with layout, {:?}", x),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn copy_symbol_to_stack_offset_help(
|
||||
&mut self,
|
||||
buf: &mut Vec<'a, u8>,
|
||||
size: u32,
|
||||
from_offset: i32,
|
||||
to_offset: i32,
|
||||
) {
|
||||
let mut copied = 0;
|
||||
let size = size as i32;
|
||||
|
||||
self.with_tmp_general_reg(buf, |_storage_manager, buf, reg| {
|
||||
for _ in (0..(size - copied)).step_by(8) {
|
||||
ASM::mov_reg64_base32(buf, reg, from_offset + copied);
|
||||
ASM::mov_base32_reg64(buf, to_offset + copied, reg);
|
||||
|
||||
copied += 8;
|
||||
}
|
||||
|
||||
for _ in (0..(size - copied)).step_by(4) {
|
||||
ASM::mov_reg32_base32(buf, reg, from_offset + copied);
|
||||
ASM::mov_base32_reg32(buf, to_offset + copied, reg);
|
||||
|
||||
copied += 4;
|
||||
}
|
||||
|
||||
for _ in (0..(size - copied)).step_by(2) {
|
||||
ASM::mov_reg16_base32(buf, reg, from_offset + copied);
|
||||
ASM::mov_base32_reg16(buf, to_offset + copied, reg);
|
||||
|
||||
copied += 2;
|
||||
}
|
||||
|
||||
for _ in (0..(size - copied)).step_by(1) {
|
||||
ASM::mov_reg8_base32(buf, reg, from_offset + copied);
|
||||
ASM::mov_base32_reg8(buf, to_offset + copied, reg);
|
||||
|
||||
copied += 1;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Ensures that a register is free. If it is not free, data will be moved to make it free.
|
||||
pub fn ensure_reg_free(
|
||||
|
|
|
@ -1271,18 +1271,45 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
|||
fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, offset: i32) {
|
||||
movsd_freg64_base64_offset32(buf, dst, X86_64GeneralReg::RBP, offset)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
|
||||
mov_reg64_base64_offset32(buf, dst, X86_64GeneralReg::RBP, offset)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg32_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
|
||||
mov_reg32_base32_offset32(buf, dst, X86_64GeneralReg::RBP, offset)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg16_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
|
||||
mov_reg16_base16_offset32(buf, dst, X86_64GeneralReg::RBP, offset)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg8_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
|
||||
mov_reg8_base8_offset32(buf, dst, X86_64GeneralReg::RBP, offset)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64FloatReg) {
|
||||
movsd_base64_offset32_freg64(buf, X86_64GeneralReg::RBP, offset, src)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
|
||||
mov_base64_offset32_reg64(buf, X86_64GeneralReg::RBP, offset, src)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg32(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
|
||||
mov_base32_offset32_reg32(buf, X86_64GeneralReg::RBP, offset, src)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg16(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
|
||||
mov_base16_offset32_reg16(buf, X86_64GeneralReg::RBP, offset, src)
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_base32_reg8(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
|
||||
mov_base8_offset32_reg8(buf, X86_64GeneralReg::RBP, offset, src)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_reg64_mem64_offset32(
|
||||
|
|
|
@ -819,6 +819,13 @@ trait Backend<'a> {
|
|||
arg_layouts,
|
||||
ret_layout,
|
||||
),
|
||||
LowLevel::StrFromUtf8Range => self.build_fn_call(
|
||||
sym,
|
||||
bitcode::STR_FROM_UTF8_RANGE.to_string(),
|
||||
args,
|
||||
arg_layouts,
|
||||
ret_layout,
|
||||
),
|
||||
LowLevel::PtrCast => {
|
||||
debug_assert_eq!(
|
||||
1,
|
||||
|
|
|
@ -577,7 +577,7 @@ fn str_starts_with_false_small_str() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(feature = "gen-llvm"))]
|
||||
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
|
||||
fn str_from_utf8_pass_single_ascii() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue