mirror of
https://github.com/roc-lang/roc.git
synced 2025-09-26 21:39:07 +00:00
Merge remote-tracking branch 'origin/main' into Frame-Limited
This commit is contained in:
commit
a3cea59bb6
63 changed files with 3552 additions and 846 deletions
File diff suppressed because it is too large
Load diff
|
@ -77,4 +77,24 @@ macro_rules! disassembler_test {
|
|||
}
|
||||
}
|
||||
}};
|
||||
($assemble_fn: expr, $format_fn: expr, $iter:expr, $iter2:expr, $iter3:expr, $iter4:expr) => {{
|
||||
use $crate::generic64::disassembler_test_macro::merge_instructions_without_line_numbers;
|
||||
let arena = bumpalo::Bump::new();
|
||||
let (mut buf, cs) = setup_capstone_and_arena(&arena);
|
||||
for i in $iter.iter() {
|
||||
for i2 in $iter2.iter() {
|
||||
for i3 in $iter3.iter() {
|
||||
for i4 in $iter4.iter() {
|
||||
buf.clear();
|
||||
$assemble_fn(&mut buf, *i, *i2, *i3, *i4);
|
||||
let instructions = cs.disasm_all(&buf, 0).unwrap();
|
||||
assert_eq!(
|
||||
$format_fn(*i, *i2, *i3, *i4),
|
||||
merge_instructions_without_line_numbers(instructions)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
|
|
@ -27,6 +27,14 @@ use storage::{RegStorage, StorageManager};
|
|||
|
||||
// TODO: on all number functions double check and deal with over/underflow.
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum RegisterWidth {
|
||||
W8,
|
||||
W16,
|
||||
W32,
|
||||
W64,
|
||||
}
|
||||
|
||||
pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<GeneralReg, FloatReg>>:
|
||||
Sized + Copy
|
||||
{
|
||||
|
@ -390,6 +398,7 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
|
|||
|
||||
fn eq_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
|
@ -397,20 +406,25 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
|
|||
|
||||
fn neq_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
);
|
||||
|
||||
fn ilt_reg64_reg64_reg64(
|
||||
fn signed_compare_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
operation: CompareOperation,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
);
|
||||
|
||||
fn ult_reg64_reg64_reg64(
|
||||
fn unsigned_compare_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
operation: CompareOperation,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
|
@ -425,20 +439,6 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
|
|||
operation: CompareOperation,
|
||||
);
|
||||
|
||||
fn igt_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
);
|
||||
|
||||
fn ugt_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: GeneralReg,
|
||||
src1: GeneralReg,
|
||||
src2: GeneralReg,
|
||||
);
|
||||
|
||||
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
|
||||
|
||||
fn to_float_freg64_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
|
||||
|
@ -769,6 +769,10 @@ impl<
|
|||
// Call function and generate reloc.
|
||||
ASM::call(&mut self.buf, &mut self.relocs, fn_name);
|
||||
|
||||
self.move_return_value(dst, ret_layout)
|
||||
}
|
||||
|
||||
fn move_return_value(&mut self, dst: &Symbol, ret_layout: &InLayout<'a>) {
|
||||
// move return value to dst.
|
||||
match *ret_layout {
|
||||
single_register_integers!() => {
|
||||
|
@ -786,6 +790,9 @@ impl<
|
|||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]);
|
||||
}
|
||||
Layout::LambdaSet(lambda_set) => {
|
||||
self.move_return_value(dst, &lambda_set.runtime_representation())
|
||||
}
|
||||
_ => {
|
||||
CC::load_returned_complex_symbol(
|
||||
&mut self.buf,
|
||||
|
@ -815,7 +822,11 @@ impl<
|
|||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, cond_symbol);
|
||||
|
||||
// this state is updated destructively in the branches. We don't want the branches to
|
||||
// influence each other, so we must clone here.
|
||||
let mut base_storage = self.storage_manager.clone();
|
||||
let base_literal_map = self.literal_map.clone();
|
||||
|
||||
let mut max_branch_stack_size = 0;
|
||||
let mut ret_jumps = bumpalo::vec![in self.env.arena];
|
||||
let mut tmp = bumpalo::vec![in self.env.arena];
|
||||
|
@ -829,6 +840,7 @@ impl<
|
|||
|
||||
// Build all statements in this branch. Using storage as from before any branch.
|
||||
self.storage_manager = base_storage.clone();
|
||||
self.literal_map = base_literal_map.clone();
|
||||
self.build_stmt(stmt, ret_layout);
|
||||
|
||||
// Build unconditional jump to the end of this switch.
|
||||
|
@ -851,6 +863,7 @@ impl<
|
|||
base_storage.update_fn_call_stack_size(self.storage_manager.fn_call_stack_size());
|
||||
}
|
||||
self.storage_manager = base_storage;
|
||||
self.literal_map = base_literal_map;
|
||||
self.storage_manager
|
||||
.update_stack_size(max_branch_stack_size);
|
||||
let (_branch_info, stmt) = default_branch;
|
||||
|
@ -1191,6 +1204,14 @@ impl<
|
|||
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) {
|
||||
match *arg_layout {
|
||||
single_register_int_builtins!() | Layout::BOOL => {
|
||||
let width = match *arg_layout {
|
||||
Layout::BOOL | Layout::I8 | Layout::U8 => RegisterWidth::W8,
|
||||
Layout::I16 | Layout::U16 => RegisterWidth::W16,
|
||||
Layout::U32 | Layout::I32 => RegisterWidth::W32,
|
||||
Layout::I64 | Layout::U64 => RegisterWidth::W64,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
let src1_reg = self
|
||||
.storage_manager
|
||||
|
@ -1198,7 +1219,7 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::eq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
|
||||
}
|
||||
Layout::STR => {
|
||||
// use a zig call
|
||||
|
@ -1208,7 +1229,17 @@ impl<
|
|||
&[*src1, *src2],
|
||||
&[Layout::STR, Layout::STR],
|
||||
&Layout::BOOL,
|
||||
)
|
||||
);
|
||||
|
||||
// mask the result; we pass booleans around as 64-bit values, but branch on 0x0 and 0x1.
|
||||
// Zig gives back values where not all of the upper bits are zero, so we must clear them ourselves
|
||||
let tmp = &Symbol::DEV_TMP;
|
||||
let tmp_reg = self.storage_manager.claim_general_reg(&mut self.buf, tmp);
|
||||
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, true as i64);
|
||||
|
||||
let width = RegisterWidth::W8; // we're comparing booleans
|
||||
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
|
||||
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
|
||||
}
|
||||
x => todo!("NumEq: layout, {:?}", x),
|
||||
}
|
||||
|
@ -1217,6 +1248,14 @@ impl<
|
|||
fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) {
|
||||
match *arg_layout {
|
||||
single_register_int_builtins!() | Layout::BOOL => {
|
||||
let width = match *arg_layout {
|
||||
Layout::BOOL | Layout::I8 | Layout::U8 => RegisterWidth::W8,
|
||||
Layout::I16 | Layout::U16 => RegisterWidth::W16,
|
||||
Layout::U32 | Layout::I32 => RegisterWidth::W32,
|
||||
Layout::I64 | Layout::U64 => RegisterWidth::W64,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
let src1_reg = self
|
||||
.storage_manager
|
||||
|
@ -1224,7 +1263,7 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::neq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
|
||||
}
|
||||
Layout::STR => {
|
||||
self.build_fn_call(
|
||||
|
@ -1238,10 +1277,11 @@ impl<
|
|||
// negate the result
|
||||
let tmp = &Symbol::DEV_TMP;
|
||||
let tmp_reg = self.storage_manager.claim_general_reg(&mut self.buf, tmp);
|
||||
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, 164);
|
||||
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, true as i64);
|
||||
|
||||
let width = RegisterWidth::W8; // we're comparing booleans
|
||||
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
|
||||
ASM::neq_reg64_reg64_reg64(&mut self.buf, dst_reg, dst_reg, tmp_reg);
|
||||
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
|
||||
}
|
||||
x => todo!("NumNeq: layout, {:?}", x),
|
||||
}
|
||||
|
@ -1280,7 +1320,14 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::ilt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::signed_compare_reg64(
|
||||
&mut self.buf,
|
||||
RegisterWidth::W64,
|
||||
CompareOperation::LessThan,
|
||||
dst_reg,
|
||||
src1_reg,
|
||||
src2_reg,
|
||||
);
|
||||
}
|
||||
Layout::Builtin(Builtin::Int(IntWidth::U64)) => {
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
|
@ -1290,7 +1337,14 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::ult_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::unsigned_compare_reg64(
|
||||
&mut self.buf,
|
||||
RegisterWidth::W64,
|
||||
CompareOperation::LessThan,
|
||||
dst_reg,
|
||||
src1_reg,
|
||||
src2_reg,
|
||||
);
|
||||
}
|
||||
Layout::Builtin(Builtin::Float(width)) => {
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
|
@ -1326,7 +1380,14 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::igt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::signed_compare_reg64(
|
||||
&mut self.buf,
|
||||
RegisterWidth::W64,
|
||||
CompareOperation::GreaterThan,
|
||||
dst_reg,
|
||||
src1_reg,
|
||||
src2_reg,
|
||||
);
|
||||
}
|
||||
Layout::Builtin(Builtin::Int(IntWidth::U64)) => {
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
|
@ -1336,7 +1397,14 @@ impl<
|
|||
let src2_reg = self
|
||||
.storage_manager
|
||||
.load_to_general_reg(&mut self.buf, src2);
|
||||
ASM::ugt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
|
||||
ASM::unsigned_compare_reg64(
|
||||
&mut self.buf,
|
||||
RegisterWidth::W64,
|
||||
CompareOperation::GreaterThan,
|
||||
dst_reg,
|
||||
src1_reg,
|
||||
src2_reg,
|
||||
);
|
||||
}
|
||||
Layout::Builtin(Builtin::Float(width)) => {
|
||||
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
|
||||
|
@ -2216,7 +2284,13 @@ impl<
|
|||
}
|
||||
|
||||
fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>) {
|
||||
match (lit, self.layout_interner.get(*layout)) {
|
||||
let layout = self.layout_interner.get(*layout);
|
||||
|
||||
if let Layout::LambdaSet(lambda_set) = layout {
|
||||
return self.load_literal(sym, &lambda_set.runtime_representation(), lit);
|
||||
}
|
||||
|
||||
match (lit, layout) {
|
||||
(
|
||||
Literal::Int(x),
|
||||
Layout::Builtin(Builtin::Int(
|
||||
|
@ -2263,8 +2337,7 @@ impl<
|
|||
}
|
||||
(Literal::Bool(x), Layout::Builtin(Builtin::Bool)) => {
|
||||
let reg = self.storage_manager.claim_general_reg(&mut self.buf, sym);
|
||||
let val = [*x as u8; 16];
|
||||
ASM::mov_reg64_imm64(&mut self.buf, reg, i128::from_ne_bytes(val) as i64);
|
||||
ASM::mov_reg64_imm64(&mut self.buf, reg, *x as i64);
|
||||
}
|
||||
(Literal::Float(x), Layout::Builtin(Builtin::Float(FloatWidth::F64))) => {
|
||||
let reg = self.storage_manager.claim_float_reg(&mut self.buf, sym);
|
||||
|
@ -2371,6 +2444,9 @@ impl<
|
|||
CC::GENERAL_RETURN_REGS[0],
|
||||
);
|
||||
}
|
||||
Layout::LambdaSet(lambda_set) => {
|
||||
self.return_symbol(sym, &lambda_set.runtime_representation())
|
||||
}
|
||||
_ => {
|
||||
internal_error!("All primitive values should fit in a single register");
|
||||
}
|
||||
|
|
|
@ -554,7 +554,10 @@ impl<
|
|||
let field_size = layout_interner.stack_size(*layout);
|
||||
data_offset += field_size as i32;
|
||||
}
|
||||
debug_assert!(data_offset < base_offset + size as i32);
|
||||
|
||||
// check that the record completely contains the field
|
||||
debug_assert!(data_offset <= base_offset + size as i32,);
|
||||
|
||||
let layout = field_layouts[index as usize];
|
||||
let size = layout_interner.stack_size(layout);
|
||||
self.allocation_map.insert(*sym, owned_data);
|
||||
|
@ -686,14 +689,11 @@ impl<
|
|||
let (data_size, data_alignment) =
|
||||
union_layout.data_size_and_alignment(layout_interner, self.target_info);
|
||||
let id_offset = data_size - data_alignment;
|
||||
if data_alignment < 8 || data_alignment % 8 != 0 {
|
||||
todo!("small/unaligned tagging");
|
||||
}
|
||||
let base_offset = self.claim_stack_area(sym, data_size);
|
||||
let mut current_offset = base_offset;
|
||||
for (field, field_layout) in
|
||||
fields.iter().zip(field_layouts[tag_id as usize].iter())
|
||||
{
|
||||
|
||||
let it = fields.iter().zip(field_layouts[tag_id as usize].iter());
|
||||
for (field, field_layout) in it {
|
||||
self.copy_symbol_to_stack_offset(
|
||||
layout_interner,
|
||||
buf,
|
||||
|
@ -704,10 +704,20 @@ impl<
|
|||
let field_size = layout_interner.stack_size(*field_layout);
|
||||
current_offset += field_size as i32;
|
||||
}
|
||||
|
||||
// put the tag id in the right place
|
||||
self.with_tmp_general_reg(buf, |_symbol_storage, buf, reg| {
|
||||
ASM::mov_reg64_imm64(buf, reg, tag_id as i64);
|
||||
debug_assert!((base_offset + id_offset as i32) % 8 == 0);
|
||||
ASM::mov_base32_reg64(buf, base_offset + id_offset as i32, reg);
|
||||
|
||||
let total_id_offset = base_offset as u32 + id_offset;
|
||||
debug_assert!(total_id_offset % data_alignment == 0);
|
||||
|
||||
// pick the right instruction based on the alignment of the tag id
|
||||
if field_layouts.len() <= u8::MAX as _ {
|
||||
ASM::mov_base32_reg8(buf, total_id_offset as i32, reg);
|
||||
} else {
|
||||
ASM::mov_base32_reg16(buf, total_id_offset as i32, reg);
|
||||
}
|
||||
});
|
||||
}
|
||||
x => todo!("creating unions with layout: {:?}", x),
|
||||
|
@ -785,9 +795,22 @@ impl<
|
|||
FloatWidth::F32 => todo!(),
|
||||
},
|
||||
Builtin::Bool => {
|
||||
// same as 8-bit integer
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg8(buf, to_offset, reg);
|
||||
// same as 8-bit integer, but we special-case true/false because these symbols
|
||||
// are thunks and literal values
|
||||
match *sym {
|
||||
Symbol::BOOL_FALSE => {
|
||||
let reg = self.claim_general_reg(buf, sym);
|
||||
ASM::mov_reg64_imm64(buf, reg, false as i64)
|
||||
}
|
||||
Symbol::BOOL_TRUE => {
|
||||
let reg = self.claim_general_reg(buf, sym);
|
||||
ASM::mov_reg64_imm64(buf, reg, true as i64)
|
||||
}
|
||||
_ => {
|
||||
let reg = self.load_to_general_reg(buf, sym);
|
||||
ASM::mov_base32_reg8(buf, to_offset, reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
Builtin::Decimal => todo!(),
|
||||
Builtin::Str | Builtin::List(_) => {
|
||||
|
@ -1166,9 +1189,9 @@ impl<
|
|||
Some(storages) => storages,
|
||||
None => internal_error!("Jump: unknown point specified to jump to: {:?}", id),
|
||||
};
|
||||
for ((sym, layout), wanted_storage) in
|
||||
args.iter().zip(arg_layouts).zip(param_storage.iter())
|
||||
{
|
||||
|
||||
let it = args.iter().zip(arg_layouts).zip(param_storage.iter());
|
||||
for ((sym, layout), wanted_storage) in it {
|
||||
// Note: it is possible that the storage we want to move to is in use by one of the args we want to pass.
|
||||
if self.get_storage_for_sym(sym) == wanted_storage {
|
||||
continue;
|
||||
|
|
|
@ -7,9 +7,9 @@ use bumpalo::collections::Vec;
|
|||
use roc_builtins::bitcode::FloatWidth;
|
||||
use roc_error_macros::internal_error;
|
||||
use roc_module::symbol::Symbol;
|
||||
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner};
|
||||
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner, UnionLayout};
|
||||
|
||||
use super::CompareOperation;
|
||||
use super::{CompareOperation, RegisterWidth};
|
||||
|
||||
// Not sure exactly how I want to represent registers.
|
||||
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
|
||||
|
@ -511,6 +511,24 @@ impl X64_64SystemVStoreArgs {
|
|||
}
|
||||
self.tmp_stack_offset += size as i32;
|
||||
}
|
||||
Layout::Union(UnionLayout::NonRecursive(_)) => {
|
||||
// for now, just also store this on the stack
|
||||
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
|
||||
debug_assert_eq!(base_offset % 8, 0);
|
||||
for i in (0..size as i32).step_by(8) {
|
||||
X86_64Assembler::mov_reg64_base32(
|
||||
buf,
|
||||
Self::GENERAL_RETURN_REGS[0],
|
||||
base_offset + i,
|
||||
);
|
||||
X86_64Assembler::mov_stack32_reg64(
|
||||
buf,
|
||||
self.tmp_stack_offset + i,
|
||||
Self::GENERAL_RETURN_REGS[0],
|
||||
);
|
||||
}
|
||||
self.tmp_stack_offset += size as i32;
|
||||
}
|
||||
_ => {
|
||||
todo!("calling with arg type, {:?}", layout_interner.dbg(other));
|
||||
}
|
||||
|
@ -615,6 +633,11 @@ impl X64_64SystemVLoadArgs {
|
|||
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
|
||||
self.argument_offset += stack_size as i32;
|
||||
}
|
||||
Layout::Union(UnionLayout::NonRecursive(_)) => {
|
||||
// for now, just also store this on the stack
|
||||
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
|
||||
self.argument_offset += stack_size as i32;
|
||||
}
|
||||
_ => {
|
||||
todo!("Loading args with layout {:?}", layout_interner.dbg(other));
|
||||
}
|
||||
|
@ -1554,45 +1577,71 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
|||
#[inline(always)]
|
||||
fn eq_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
sete_reg64(buf, dst);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn neq_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
setne_reg64(buf, dst);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ilt_reg64_reg64_reg64(
|
||||
fn signed_compare_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
operation: CompareOperation,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
setl_reg64(buf, dst);
|
||||
match operation {
|
||||
CompareOperation::LessThan => {
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
setl_reg64(buf, dst);
|
||||
}
|
||||
CompareOperation::LessThanOrEqual => todo!(),
|
||||
CompareOperation::GreaterThan => {
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
setg_reg64(buf, dst);
|
||||
}
|
||||
CompareOperation::GreaterThanOrEqual => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ult_reg64_reg64_reg64(
|
||||
fn unsigned_compare_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
operation: CompareOperation,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
setb_reg64(buf, dst);
|
||||
match operation {
|
||||
CompareOperation::LessThan => {
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
setb_reg64(buf, dst);
|
||||
}
|
||||
CompareOperation::LessThanOrEqual => todo!(),
|
||||
CompareOperation::GreaterThan => {
|
||||
cmp_reg64_reg64(buf, register_width, src1, src2);
|
||||
seta_reg64(buf, dst);
|
||||
}
|
||||
|
||||
CompareOperation::GreaterThanOrEqual => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -1622,28 +1671,6 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
|||
};
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn igt_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
setg_reg64(buf, dst);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ugt_reg64_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: X86_64GeneralReg,
|
||||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
seta_reg64(buf, dst);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64GeneralReg) {
|
||||
cvtsi2ss_freg64_reg64(buf, dst, src);
|
||||
|
@ -1671,7 +1698,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
|||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
cmp_reg64_reg64(buf, RegisterWidth::W64, src1, src2);
|
||||
setle_reg64(buf, dst);
|
||||
}
|
||||
|
||||
|
@ -1682,7 +1709,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
|||
src1: X86_64GeneralReg,
|
||||
src2: X86_64GeneralReg,
|
||||
) {
|
||||
cmp_reg64_reg64(buf, src1, src2);
|
||||
cmp_reg64_reg64(buf, RegisterWidth::W64, src1, src2);
|
||||
setge_reg64(buf, dst);
|
||||
}
|
||||
|
||||
|
@ -1847,6 +1874,50 @@ fn add_reg_extension<T: RegTrait>(reg: T, byte: u8) -> u8 {
|
|||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn binop_reg16_reg16(
|
||||
op_code: u8,
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: X86_64GeneralReg,
|
||||
src: X86_64GeneralReg,
|
||||
) {
|
||||
let dst_high = dst as u8 > 7;
|
||||
let dst_mod = dst as u8 % 8;
|
||||
let src_high = src as u8 > 7;
|
||||
let src_mod = (src as u8 % 8) << 3;
|
||||
|
||||
if dst_high || src_high {
|
||||
let rex = add_rm_extension(dst, REX);
|
||||
let rex = add_reg_extension(src, rex);
|
||||
|
||||
buf.extend([0x66, rex, op_code, 0xC0 | dst_mod | src_mod])
|
||||
} else {
|
||||
buf.extend([0x66, op_code, 0xC0 | dst_mod | src_mod]);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn binop_reg32_reg32(
|
||||
op_code: u8,
|
||||
buf: &mut Vec<'_, u8>,
|
||||
dst: X86_64GeneralReg,
|
||||
src: X86_64GeneralReg,
|
||||
) {
|
||||
let dst_high = dst as u8 > 7;
|
||||
let dst_mod = dst as u8 % 8;
|
||||
let src_high = src as u8 > 7;
|
||||
let src_mod = (src as u8 % 8) << 3;
|
||||
|
||||
if dst_high || src_high {
|
||||
let rex = add_rm_extension(dst, REX);
|
||||
let rex = add_reg_extension(src, rex);
|
||||
|
||||
buf.extend([rex, op_code, 0xC0 | dst_mod | src_mod])
|
||||
} else {
|
||||
buf.extend([op_code, 0xC0 | dst_mod | src_mod]);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn binop_reg64_reg64(
|
||||
op_code: u8,
|
||||
|
@ -2119,8 +2190,18 @@ fn cmp_reg64_imm32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, imm: i32) {
|
|||
|
||||
/// `CMP r/m64,r64` -> Compare r64 to r/m64.
|
||||
#[inline(always)]
|
||||
fn cmp_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
|
||||
binop_reg64_reg64(0x39, buf, dst, src);
|
||||
fn cmp_reg64_reg64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
register_width: RegisterWidth,
|
||||
dst: X86_64GeneralReg,
|
||||
src: X86_64GeneralReg,
|
||||
) {
|
||||
match register_width {
|
||||
RegisterWidth::W8 => binop_reg64_reg64(0x38, buf, dst, src),
|
||||
RegisterWidth::W16 => binop_reg16_reg16(0x39, buf, dst, src),
|
||||
RegisterWidth::W32 => binop_reg32_reg32(0x39, buf, dst, src),
|
||||
RegisterWidth::W64 => binop_reg64_reg64(0x39, buf, dst, src),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -2419,13 +2500,6 @@ fn mov_base8_offset32_reg8(
|
|||
buf.extend(offset.to_le_bytes());
|
||||
}
|
||||
|
||||
enum RegisterWidth {
|
||||
W8,
|
||||
W16,
|
||||
W32,
|
||||
W64,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_reg_base_offset32(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
|
@ -3671,4 +3745,51 @@ mod tests {
|
|||
ALL_FLOAT_REGS
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_int_cmp() {
|
||||
disassembler_test!(
|
||||
cmp_reg64_reg64,
|
||||
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
|
||||
"cmp {}, {}",
|
||||
dst.low_8bits_string(),
|
||||
src.low_8bits_string()
|
||||
),
|
||||
[RegisterWidth::W8],
|
||||
ALL_GENERAL_REGS,
|
||||
ALL_GENERAL_REGS
|
||||
);
|
||||
|
||||
disassembler_test!(
|
||||
cmp_reg64_reg64,
|
||||
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
|
||||
"cmp {}, {}",
|
||||
dbg!(dst.low_16bits_string()),
|
||||
dbg!(src.low_16bits_string())
|
||||
),
|
||||
[RegisterWidth::W16],
|
||||
ALL_GENERAL_REGS,
|
||||
ALL_GENERAL_REGS
|
||||
);
|
||||
|
||||
disassembler_test!(
|
||||
cmp_reg64_reg64,
|
||||
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
|
||||
"cmp {}, {}",
|
||||
dbg!(dst.low_32bits_string()),
|
||||
dbg!(src.low_32bits_string())
|
||||
),
|
||||
[RegisterWidth::W32],
|
||||
ALL_GENERAL_REGS,
|
||||
ALL_GENERAL_REGS
|
||||
);
|
||||
|
||||
disassembler_test!(
|
||||
cmp_reg64_reg64,
|
||||
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!("cmp {dst}, {src}",),
|
||||
[RegisterWidth::W64],
|
||||
ALL_GENERAL_REGS,
|
||||
ALL_GENERAL_REGS
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ use roc_mono::ir::{
|
|||
SelfRecursive, Stmt,
|
||||
};
|
||||
use roc_mono::layout::{
|
||||
Builtin, InLayout, Layout, LayoutId, LayoutIds, LayoutInterner, STLayoutInterner, TagIdIntType,
|
||||
Builtin, InLayout, Layout, LayoutIds, LayoutInterner, STLayoutInterner, TagIdIntType,
|
||||
UnionLayout,
|
||||
};
|
||||
use roc_mono::list_element_layout;
|
||||
|
@ -79,8 +79,40 @@ trait Backend<'a> {
|
|||
&mut CodeGenHelp<'a>,
|
||||
);
|
||||
|
||||
fn symbol_to_string(&self, symbol: Symbol, layout_id: LayoutId) -> String {
|
||||
layout_id.to_symbol_string(symbol, self.interns())
|
||||
fn function_symbol_to_string<'b, I>(
|
||||
&self,
|
||||
symbol: Symbol,
|
||||
arguments: I,
|
||||
_lambda_set: Option<InLayout>,
|
||||
result: InLayout,
|
||||
) -> String
|
||||
where
|
||||
I: Iterator<Item = InLayout<'b>>,
|
||||
{
|
||||
use std::hash::{BuildHasher, Hash, Hasher};
|
||||
|
||||
// NOTE: due to randomness, this will not be consistent between runs
|
||||
let mut state = roc_collections::all::BuildHasher::default().build_hasher();
|
||||
for a in arguments {
|
||||
a.hash(&mut state);
|
||||
}
|
||||
|
||||
// lambda set should not matter; it should already be added as an argument
|
||||
// lambda_set.hash(&mut state);
|
||||
|
||||
result.hash(&mut state);
|
||||
|
||||
let interns = self.interns();
|
||||
let ident_string = symbol.as_str(interns);
|
||||
let module_string = interns.module_ids.get_name(symbol.module_id()).unwrap();
|
||||
|
||||
// the functions from the generates #help module (refcounting, equality) is always suffixed
|
||||
// with 1. That is fine, they are always unique anyway.
|
||||
if ident_string.contains("#help") {
|
||||
format!("{}_{}_1", module_string, ident_string)
|
||||
} else {
|
||||
format!("{}_{}_{}", module_string, ident_string, state.finish())
|
||||
}
|
||||
}
|
||||
|
||||
fn defined_in_app_module(&self, symbol: Symbol) -> bool {
|
||||
|
@ -119,8 +151,13 @@ trait Backend<'a> {
|
|||
proc: Proc<'a>,
|
||||
layout_ids: &mut LayoutIds<'a>,
|
||||
) -> (Vec<u8>, Vec<Relocation>, Vec<'a, (Symbol, String)>) {
|
||||
let layout_id = layout_ids.get(proc.name.name(), &proc.ret_layout);
|
||||
let proc_name = self.symbol_to_string(proc.name.name(), layout_id);
|
||||
let proc_name = self.function_symbol_to_string(
|
||||
proc.name.name(),
|
||||
proc.args.iter().map(|t| t.0),
|
||||
proc.closure_data_layout,
|
||||
proc.ret_layout,
|
||||
);
|
||||
|
||||
self.reset(proc_name, proc.is_self_recursive);
|
||||
self.load_args(proc.args, &proc.ret_layout);
|
||||
for (layout, sym) in proc.args {
|
||||
|
@ -213,6 +250,7 @@ trait Backend<'a> {
|
|||
self.free_symbols(stmt);
|
||||
}
|
||||
Stmt::Jump(id, args) => {
|
||||
self.load_literal_symbols(args);
|
||||
let mut arg_layouts: bumpalo::collections::Vec<InLayout<'a>> =
|
||||
bumpalo::vec![in self.env().arena];
|
||||
arg_layouts.reserve(args.len());
|
||||
|
@ -303,8 +341,12 @@ trait Backend<'a> {
|
|||
);
|
||||
}
|
||||
|
||||
let layout_id = LayoutIds::default().get(func_sym.name(), layout);
|
||||
let fn_name = self.symbol_to_string(func_sym.name(), layout_id);
|
||||
let fn_name = self.function_symbol_to_string(
|
||||
func_sym.name(),
|
||||
arg_layouts.iter().copied(),
|
||||
None,
|
||||
*ret_layout,
|
||||
);
|
||||
|
||||
// Now that the arguments are needed, load them if they are literals.
|
||||
self.load_literal_symbols(arguments);
|
||||
|
@ -1080,8 +1122,12 @@ trait Backend<'a> {
|
|||
}
|
||||
Symbol::LIST_GET | Symbol::LIST_SET | Symbol::LIST_REPLACE | Symbol::LIST_APPEND => {
|
||||
// TODO: This is probably simple enough to be worth inlining.
|
||||
let layout_id = LayoutIds::default().get(func_sym, ret_layout);
|
||||
let fn_name = self.symbol_to_string(func_sym, layout_id);
|
||||
let fn_name = self.function_symbol_to_string(
|
||||
func_sym,
|
||||
arg_layouts.iter().copied(),
|
||||
None,
|
||||
*ret_layout,
|
||||
);
|
||||
// Now that the arguments are needed, load them if they are literals.
|
||||
self.load_literal_symbols(args);
|
||||
self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout)
|
||||
|
@ -1100,8 +1146,12 @@ trait Backend<'a> {
|
|||
}
|
||||
Symbol::STR_IS_VALID_SCALAR => {
|
||||
// just call the function
|
||||
let layout_id = LayoutIds::default().get(func_sym, ret_layout);
|
||||
let fn_name = self.symbol_to_string(func_sym, layout_id);
|
||||
let fn_name = self.function_symbol_to_string(
|
||||
func_sym,
|
||||
arg_layouts.iter().copied(),
|
||||
None,
|
||||
*ret_layout,
|
||||
);
|
||||
// Now that the arguments are needed, load them if they are literals.
|
||||
self.load_literal_symbols(args);
|
||||
self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout)
|
||||
|
@ -1110,8 +1160,12 @@ trait Backend<'a> {
|
|||
eprintln!("maybe {other:?} should have a custom implementation?");
|
||||
|
||||
// just call the function
|
||||
let layout_id = LayoutIds::default().get(func_sym, ret_layout);
|
||||
let fn_name = self.symbol_to_string(func_sym, layout_id);
|
||||
let fn_name = self.function_symbol_to_string(
|
||||
func_sym,
|
||||
arg_layouts.iter().copied(),
|
||||
None,
|
||||
*ret_layout,
|
||||
);
|
||||
// Now that the arguments are needed, load them if they are literals.
|
||||
self.load_literal_symbols(args);
|
||||
self.build_fn_call(sym, fn_name, args, arg_layouts, ret_layout)
|
||||
|
@ -1130,6 +1184,9 @@ trait Backend<'a> {
|
|||
ret_layout: &InLayout<'a>,
|
||||
);
|
||||
|
||||
/// Move a returned value into `dst`
|
||||
fn move_return_value(&mut self, dst: &Symbol, ret_layout: &InLayout<'a>);
|
||||
|
||||
/// build_num_abs stores the absolute value of src into dst.
|
||||
fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>);
|
||||
|
||||
|
|
|
@ -261,8 +261,13 @@ fn build_object<'a, B: Backend<'a>>(
|
|||
|
||||
// Names and linker data for helpers
|
||||
for ((sym, layout), proc) in helper_symbols_and_layouts.into_iter().zip(helper_procs) {
|
||||
let layout_id = layout_ids.get_toplevel(sym, &layout);
|
||||
let fn_name = backend.symbol_to_string(sym, layout_id);
|
||||
let fn_name = backend.function_symbol_to_string(
|
||||
sym,
|
||||
layout.arguments.iter().copied(),
|
||||
None,
|
||||
layout.result,
|
||||
);
|
||||
|
||||
if let Some(proc_id) = output.symbol_id(fn_name.as_bytes()) {
|
||||
if let SymbolSection::Section(section_id) = output.symbol(proc_id).section {
|
||||
helper_names_symbols_procs.push((fn_name, section_id, proc_id, proc));
|
||||
|
@ -327,8 +332,12 @@ fn build_proc_symbol<'a, B: Backend<'a>>(
|
|||
layout: ProcLayout<'a>,
|
||||
proc: Proc<'a>,
|
||||
) {
|
||||
let layout_id = layout_ids.get_toplevel(sym, &layout);
|
||||
let base_name = backend.symbol_to_string(sym, layout_id);
|
||||
let base_name = backend.function_symbol_to_string(
|
||||
sym,
|
||||
layout.arguments.iter().copied(),
|
||||
None,
|
||||
layout.result,
|
||||
);
|
||||
|
||||
let fn_name = if backend.env().exposed_to_host.contains(&sym) {
|
||||
layout_ids
|
||||
|
@ -459,6 +468,7 @@ fn build_proc<'a, B: Backend<'a>>(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(sym_id) = output.symbol_id(name.as_bytes()) {
|
||||
write::Relocation {
|
||||
offset: offset + proc_offset,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue