Merge branch 'main' into more-dev-aarch64-instructions

Fix merge conflict with fdffcc8b36:
- Add unused _register_width arguments
- Delete comparison functions,
  add empty signed_compare_reg64 and unsigned_compare_reg64 functions
This commit is contained in:
Ajai Nelson 2023-04-09 13:01:51 -04:00
commit f9e31b828b
No known key found for this signature in database
GPG key ID: 5744FCFB528CB779
501 changed files with 32139 additions and 16180 deletions

View file

@ -7,7 +7,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{InLayout, STLayoutInterner};
use super::CompareOperation;
use super::{CompareOperation, RegisterWidth};
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
@ -876,6 +876,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)]
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
@ -887,6 +888,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)]
fn neq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
@ -895,28 +897,6 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
cset_reg64_cond(buf, dst, ConditionCode::NE);
}
#[inline(always)]
fn ilt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::LT);
}
#[inline(always)]
fn ult_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::CCLO);
}
#[inline(always)]
fn cmp_freg_freg_reg64(
_buf: &mut Vec<'_, u8>,
@ -929,28 +909,6 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("registers float comparison for AArch64");
}
#[inline(always)]
fn igt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::GT);
}
#[inline(always)]
fn ugt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::HI);
}
#[inline(always)]
fn to_float_freg64_reg64(
_buf: &mut Vec<'_, u8>,
@ -1091,6 +1049,28 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
fn sqrt_freg32_freg32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _src: AArch64FloatReg) {
todo!("sqrt")
}
fn signed_compare_reg64(
_buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
_operation: CompareOperation,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
todo!("signed compare")
}
fn unsigned_compare_reg64(
_buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
_operation: CompareOperation,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
todo!("unsigned compare")
}
}
impl AArch64Assembler {}

View file

@ -27,6 +27,14 @@ use storage::{RegStorage, StorageManager};
// TODO: on all number functions double check and deal with over/underflow.
#[derive(Debug, Clone, Copy)]
pub enum RegisterWidth {
W8,
W16,
W32,
W64,
}
pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<GeneralReg, FloatReg>>:
Sized + Copy
{
@ -390,6 +398,7 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -397,20 +406,25 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn neq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn ilt_reg64_reg64_reg64(
fn signed_compare_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
operation: CompareOperation,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn ult_reg64_reg64_reg64(
fn unsigned_compare_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
operation: CompareOperation,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
@ -425,20 +439,6 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
operation: CompareOperation,
);
fn igt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn ugt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
fn to_float_freg64_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
@ -769,6 +769,10 @@ impl<
// Call function and generate reloc.
ASM::call(&mut self.buf, &mut self.relocs, fn_name);
self.move_return_value(dst, ret_layout)
}
fn move_return_value(&mut self, dst: &Symbol, ret_layout: &InLayout<'a>) {
// move return value to dst.
match *ret_layout {
single_register_integers!() => {
@ -786,6 +790,9 @@ impl<
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]);
}
Layout::LambdaSet(lambda_set) => {
self.move_return_value(dst, &lambda_set.runtime_representation())
}
_ => {
CC::load_returned_complex_symbol(
&mut self.buf,
@ -1191,6 +1198,14 @@ impl<
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) {
match *arg_layout {
single_register_int_builtins!() | Layout::BOOL => {
let width = match *arg_layout {
Layout::BOOL | Layout::I8 | Layout::U8 => RegisterWidth::W8,
Layout::I16 | Layout::U16 => RegisterWidth::W16,
Layout::U32 | Layout::I32 => RegisterWidth::W32,
Layout::I64 | Layout::U64 => RegisterWidth::W64,
_ => unreachable!(),
};
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
@ -1198,7 +1213,7 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::eq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
}
Layout::STR => {
// use a zig call
@ -1208,7 +1223,17 @@ impl<
&[*src1, *src2],
&[Layout::STR, Layout::STR],
&Layout::BOOL,
)
);
// mask the result; we pass booleans around as 64-bit values, but branch on 0x0 and 0x1.
// Zig gives back values where not all of the upper bits are zero, so we must clear them ourselves
let tmp = &Symbol::DEV_TMP;
let tmp_reg = self.storage_manager.claim_general_reg(&mut self.buf, tmp);
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, true as i64);
let width = RegisterWidth::W8; // we're comparing booleans
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
}
x => todo!("NumEq: layout, {:?}", x),
}
@ -1217,6 +1242,14 @@ impl<
fn build_neq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>) {
match *arg_layout {
single_register_int_builtins!() | Layout::BOOL => {
let width = match *arg_layout {
Layout::BOOL | Layout::I8 | Layout::U8 => RegisterWidth::W8,
Layout::I16 | Layout::U16 => RegisterWidth::W16,
Layout::U32 | Layout::I32 => RegisterWidth::W32,
Layout::I64 | Layout::U64 => RegisterWidth::W64,
_ => unreachable!(),
};
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
@ -1224,7 +1257,7 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::neq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
}
Layout::STR => {
self.build_fn_call(
@ -1238,10 +1271,11 @@ impl<
// negate the result
let tmp = &Symbol::DEV_TMP;
let tmp_reg = self.storage_manager.claim_general_reg(&mut self.buf, tmp);
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, 164);
ASM::mov_reg64_imm64(&mut self.buf, tmp_reg, true as i64);
let width = RegisterWidth::W8; // we're comparing booleans
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
ASM::neq_reg64_reg64_reg64(&mut self.buf, dst_reg, dst_reg, tmp_reg);
ASM::neq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
}
x => todo!("NumNeq: layout, {:?}", x),
}
@ -1280,7 +1314,14 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::ilt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::signed_compare_reg64(
&mut self.buf,
RegisterWidth::W64,
CompareOperation::LessThan,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Int(IntWidth::U64)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
@ -1290,7 +1331,14 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::ult_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::unsigned_compare_reg64(
&mut self.buf,
RegisterWidth::W64,
CompareOperation::LessThan,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Float(width)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
@ -1326,7 +1374,14 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::igt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::signed_compare_reg64(
&mut self.buf,
RegisterWidth::W64,
CompareOperation::GreaterThan,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Int(IntWidth::U64)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
@ -1336,7 +1391,14 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::ugt_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
ASM::unsigned_compare_reg64(
&mut self.buf,
RegisterWidth::W64,
CompareOperation::GreaterThan,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Float(width)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
@ -2216,7 +2278,13 @@ impl<
}
fn load_literal(&mut self, sym: &Symbol, layout: &InLayout<'a>, lit: &Literal<'a>) {
match (lit, self.layout_interner.get(*layout)) {
let layout = self.layout_interner.get(*layout);
if let Layout::LambdaSet(lambda_set) = layout {
return self.load_literal(sym, &lambda_set.runtime_representation(), lit);
}
match (lit, layout) {
(
Literal::Int(x),
Layout::Builtin(Builtin::Int(
@ -2263,8 +2331,7 @@ impl<
}
(Literal::Bool(x), Layout::Builtin(Builtin::Bool)) => {
let reg = self.storage_manager.claim_general_reg(&mut self.buf, sym);
let val = [*x as u8; 16];
ASM::mov_reg64_imm64(&mut self.buf, reg, i128::from_ne_bytes(val) as i64);
ASM::mov_reg64_imm64(&mut self.buf, reg, *x as i64);
}
(Literal::Float(x), Layout::Builtin(Builtin::Float(FloatWidth::F64))) => {
let reg = self.storage_manager.claim_float_reg(&mut self.buf, sym);
@ -2371,6 +2438,9 @@ impl<
CC::GENERAL_RETURN_REGS[0],
);
}
Layout::LambdaSet(lambda_set) => {
self.return_symbol(sym, &lambda_set.runtime_representation())
}
_ => {
internal_error!("All primitive values should fit in a single register");
}

View file

@ -785,9 +785,22 @@ impl<
FloatWidth::F32 => todo!(),
},
Builtin::Bool => {
// same as 8-bit integer
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg8(buf, to_offset, reg);
// same as 8-bit integer, but we special-case true/false because these symbols
// are thunks and literal values
match *sym {
Symbol::BOOL_FALSE => {
let reg = self.claim_general_reg(buf, sym);
ASM::mov_reg64_imm64(buf, reg, false as i64)
}
Symbol::BOOL_TRUE => {
let reg = self.claim_general_reg(buf, sym);
ASM::mov_reg64_imm64(buf, reg, true as i64)
}
_ => {
let reg = self.load_to_general_reg(buf, sym);
ASM::mov_base32_reg8(buf, to_offset, reg);
}
}
}
Builtin::Decimal => todo!(),
Builtin::Str | Builtin::List(_) => {
@ -1166,9 +1179,9 @@ impl<
Some(storages) => storages,
None => internal_error!("Jump: unknown point specified to jump to: {:?}", id),
};
for ((sym, layout), wanted_storage) in
args.iter().zip(arg_layouts).zip(param_storage.iter())
{
let it = args.iter().zip(arg_layouts).zip(param_storage.iter());
for ((sym, layout), wanted_storage) in it {
// Note: it is possible that the storage we want to move to is in use by one of the args we want to pass.
if self.get_storage_for_sym(sym) == wanted_storage {
continue;

View file

@ -9,7 +9,7 @@ use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner};
use super::CompareOperation;
use super::{CompareOperation, RegisterWidth};
// Not sure exactly how I want to represent registers.
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
@ -1554,45 +1554,71 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
#[inline(always)]
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cmp_reg64_reg64(buf, register_width, src1, src2);
sete_reg64(buf, dst);
}
#[inline(always)]
fn neq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cmp_reg64_reg64(buf, register_width, src1, src2);
setne_reg64(buf, dst);
}
#[inline(always)]
fn ilt_reg64_reg64_reg64(
fn signed_compare_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
operation: CompareOperation,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
setl_reg64(buf, dst);
match operation {
CompareOperation::LessThan => {
cmp_reg64_reg64(buf, register_width, src1, src2);
setl_reg64(buf, dst);
}
CompareOperation::LessThanOrEqual => todo!(),
CompareOperation::GreaterThan => {
cmp_reg64_reg64(buf, register_width, src1, src2);
setg_reg64(buf, dst);
}
CompareOperation::GreaterThanOrEqual => todo!(),
}
}
#[inline(always)]
fn ult_reg64_reg64_reg64(
fn unsigned_compare_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
operation: CompareOperation,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
setb_reg64(buf, dst);
match operation {
CompareOperation::LessThan => {
cmp_reg64_reg64(buf, register_width, src1, src2);
setb_reg64(buf, dst);
}
CompareOperation::LessThanOrEqual => todo!(),
CompareOperation::GreaterThan => {
cmp_reg64_reg64(buf, register_width, src1, src2);
seta_reg64(buf, dst);
}
CompareOperation::GreaterThanOrEqual => todo!(),
}
}
#[inline(always)]
@ -1622,28 +1648,6 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
};
}
#[inline(always)]
fn igt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
setg_reg64(buf, dst);
}
#[inline(always)]
fn ugt_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
seta_reg64(buf, dst);
}
#[inline(always)]
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64GeneralReg) {
cvtsi2ss_freg64_reg64(buf, dst, src);
@ -1671,7 +1675,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cmp_reg64_reg64(buf, RegisterWidth::W64, src1, src2);
setle_reg64(buf, dst);
}
@ -1682,7 +1686,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
cmp_reg64_reg64(buf, src1, src2);
cmp_reg64_reg64(buf, RegisterWidth::W64, src1, src2);
setge_reg64(buf, dst);
}
@ -1847,6 +1851,50 @@ fn add_reg_extension<T: RegTrait>(reg: T, byte: u8) -> u8 {
}
}
#[inline(always)]
fn binop_reg16_reg16(
op_code: u8,
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = (src as u8 % 8) << 3;
if dst_high || src_high {
let rex = add_rm_extension(dst, REX);
let rex = add_reg_extension(src, rex);
buf.extend([0x66, rex, op_code, 0xC0 | dst_mod | src_mod])
} else {
buf.extend([0x66, op_code, 0xC0 | dst_mod | src_mod]);
}
}
#[inline(always)]
fn binop_reg32_reg32(
op_code: u8,
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = (src as u8 % 8) << 3;
if dst_high || src_high {
let rex = add_rm_extension(dst, REX);
let rex = add_reg_extension(src, rex);
buf.extend([rex, op_code, 0xC0 | dst_mod | src_mod])
} else {
buf.extend([op_code, 0xC0 | dst_mod | src_mod]);
}
}
#[inline(always)]
fn binop_reg64_reg64(
op_code: u8,
@ -2119,8 +2167,18 @@ fn cmp_reg64_imm32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, imm: i32) {
/// `CMP r/m64,r64` -> Compare r64 to r/m64.
#[inline(always)]
fn cmp_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
binop_reg64_reg64(0x39, buf, dst, src);
fn cmp_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
match register_width {
RegisterWidth::W8 => binop_reg64_reg64(0x38, buf, dst, src),
RegisterWidth::W16 => binop_reg16_reg16(0x39, buf, dst, src),
RegisterWidth::W32 => binop_reg32_reg32(0x39, buf, dst, src),
RegisterWidth::W64 => binop_reg64_reg64(0x39, buf, dst, src),
}
}
#[inline(always)]
@ -2419,13 +2477,6 @@ fn mov_base8_offset32_reg8(
buf.extend(offset.to_le_bytes());
}
enum RegisterWidth {
W8,
W16,
W32,
W64,
}
#[inline(always)]
fn mov_reg_base_offset32(
buf: &mut Vec<'_, u8>,
@ -3671,4 +3722,51 @@ mod tests {
ALL_FLOAT_REGS
);
}
#[test]
fn test_int_cmp() {
disassembler_test!(
cmp_reg64_reg64,
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
"cmp {}, {}",
dst.low_8bits_string(),
src.low_8bits_string()
),
[RegisterWidth::W8],
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);
disassembler_test!(
cmp_reg64_reg64,
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
"cmp {}, {}",
dbg!(dst.low_16bits_string()),
dbg!(src.low_16bits_string())
),
[RegisterWidth::W16],
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);
disassembler_test!(
cmp_reg64_reg64,
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!(
"cmp {}, {}",
dbg!(dst.low_32bits_string()),
dbg!(src.low_32bits_string())
),
[RegisterWidth::W32],
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);
disassembler_test!(
cmp_reg64_reg64,
|_, dst: X86_64GeneralReg, src: X86_64GeneralReg| format!("cmp {dst}, {src}",),
[RegisterWidth::W64],
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);
}
}

View file

@ -213,6 +213,7 @@ trait Backend<'a> {
self.free_symbols(stmt);
}
Stmt::Jump(id, args) => {
self.load_literal_symbols(args);
let mut arg_layouts: bumpalo::collections::Vec<InLayout<'a>> =
bumpalo::vec![in self.env().arena];
arg_layouts.reserve(args.len());
@ -1130,6 +1131,9 @@ trait Backend<'a> {
ret_layout: &InLayout<'a>,
);
/// Move a returned value into `dst`
fn move_return_value(&mut self, dst: &Symbol, ret_layout: &InLayout<'a>);
/// build_num_abs stores the absolute value of src into dst.
fn build_num_abs(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>);
@ -1501,6 +1505,7 @@ trait Backend<'a> {
self.set_last_seen(*sym, stmt);
match expr {
Expr::Literal(_) => {}
Expr::NullPointer => {}
Expr::Call(call) => self.scan_ast_call(call, stmt),