many extra gen_primitives tests for gen-dev

This commit is contained in:
Folkert 2023-06-01 16:13:08 +02:00
parent 0e43510103
commit 515d60a138
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
7 changed files with 263 additions and 118 deletions

View file

@ -668,12 +668,17 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
}
#[inline(always)]
fn jne_reg64_imm64_imm32(
buf: &mut Vec<'_, u8>,
fn jne_reg64_imm64_imm32<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, '_, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
reg: AArch64GeneralReg,
imm: u64,
offset: i32,
) -> usize {
) -> usize
where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
if imm < (1 << 12) {
cmp_reg64_imm12(buf, reg, imm as u16);
} else {
@ -1081,6 +1086,28 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
cset_reg64_cond(buf, dst, ConditionCode::NE);
}
fn eq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64FloatReg,
src2: AArch64FloatReg,
width: FloatWidth,
) {
fcmp_freg_freg(buf, width, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::EQ);
}
fn neq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src1: AArch64FloatReg,
src2: AArch64FloatReg,
width: FloatWidth,
) {
fcmp_freg_freg(buf, width, src1, src2);
cset_reg64_cond(buf, dst, ConditionCode::NE);
}
#[inline(always)]
fn cmp_freg_freg_reg64(
buf: &mut Vec<'_, u8>,

View file

@ -250,12 +250,16 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
/// Jumps by an offset of offset bytes if reg is not equal to imm.
/// It should always generate the same number of bytes to enable replacement if offset changes.
/// It returns the base offset to calculate the jump from (generally the instruction after the jump).
fn jne_reg64_imm64_imm32(
buf: &mut Vec<'_, u8>,
fn jne_reg64_imm64_imm32<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, GeneralReg, FloatReg, ASM, CC>,
reg: GeneralReg,
imm: u64,
offset: i32,
) -> usize;
) -> usize
where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn mov_freg32_imm32(
buf: &mut Vec<'_, u8>,
@ -545,6 +549,22 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src2: GeneralReg,
);
fn eq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: FloatReg,
src2: FloatReg,
width: FloatWidth,
);
fn neq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: FloatReg,
src2: FloatReg,
width: FloatWidth,
);
fn cmp_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
@ -964,7 +984,13 @@ impl<
// Create jump to next branch if cond_sym not equal to value.
// Since we don't know the offset yet, set it to 0 and overwrite later.
let jne_location = self.buf.len();
let start_offset = ASM::jne_reg64_imm64_imm32(&mut self.buf, cond_reg, *val, 0);
let start_offset = ASM::jne_reg64_imm64_imm32(
&mut self.buf,
&mut self.storage_manager,
cond_reg,
*val,
0,
);
// Build all statements in this branch. Using storage as from before any branch.
self.storage_manager = base_storage.clone();
@ -980,7 +1006,13 @@ impl<
// Overwrite the original jne with the correct offset.
let end_offset = self.buf.len();
let jne_offset = end_offset - start_offset;
ASM::jne_reg64_imm64_imm32(&mut tmp, cond_reg, *val, jne_offset as i32);
ASM::jne_reg64_imm64_imm32(
&mut tmp,
&mut self.storage_manager,
cond_reg,
*val,
jne_offset as i32,
);
for (i, byte) in tmp.iter().enumerate() {
self.buf[jne_location + i] = *byte;
}
@ -1485,8 +1517,22 @@ impl<
self.storage_manager.free_symbol(&tmp1_symbol);
self.storage_manager.free_symbol(&tmp2_symbol);
}
LayoutRepr::F32 => todo!("NumEq: layout, {:?}", self.layout_interner.dbg(Layout::F32)),
LayoutRepr::F64 => todo!("NumEq: layout, {:?}", self.layout_interner.dbg(Layout::F64)),
LayoutRepr::F32 | LayoutRepr::F64 => {
let float_width = if repr == LayoutRepr::F32 {
FloatWidth::F32
} else {
FloatWidth::F64
};
let buf = &mut self.buf;
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src_reg1 = self.storage_manager.load_to_float_reg(buf, src1);
let src_reg2 = self.storage_manager.load_to_float_reg(buf, src2);
ASM::eq_freg_freg_reg64(&mut self.buf, dst_reg, src_reg1, src_reg2, float_width)
}
LayoutRepr::DEC => todo!("NumEq: layout, {:?}", self.layout_interner.dbg(Layout::DEC)),
LayoutRepr::STR => {
// use a zig call
@ -3765,7 +3811,8 @@ impl<
// jump to where the pointer is valid, because it is already valid if non-zero
let jmp_start_index = self.buf.len();
let jmp_end_index = ASM::jne_reg64_imm64_imm32(&mut self.buf, src_reg, 0x0, 0);
let jmp_end_index =
ASM::jne_reg64_imm64_imm32(&mut self.buf, &mut self.storage_manager, src_reg, 0x0, 0);
self.free_symbol(&dst);
@ -3788,6 +3835,7 @@ impl<
let destination_index = self.buf.len();
ASM::jne_reg64_imm64_imm32(
&mut tmp,
&mut self.storage_manager,
src_reg,
0x0,
(destination_index - jmp_end_index) as i32,

View file

@ -1432,18 +1432,29 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
#[inline(always)]
fn jne_reg64_imm64_imm32(
buf: &mut Vec<'_, u8>,
fn jne_reg64_imm64_imm32<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
reg: X86_64GeneralReg,
imm: u64,
offset: i32,
) -> usize {
) -> usize
where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
buf.reserve(13);
if imm > i32::MAX as u64 {
todo!("comparison with values greater than i32::max");
storage_manager.with_tmp_general_reg(buf, |_, buf, tmp| {
mov_reg64_imm64(buf, tmp, imm as _);
cmp_reg64_reg64(buf, RegisterWidth::W64, reg, tmp);
})
} else {
cmp_reg64_imm32(buf, reg, imm as i32);
}
cmp_reg64_imm32(buf, reg, imm as i32);
jne_imm32(buf, offset);
buf.len()
}
@ -1796,6 +1807,36 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
}
fn eq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
width: FloatWidth,
) {
match width {
FloatWidth::F32 => cmp_freg32_freg32(buf, src1, src2),
FloatWidth::F64 => cmp_freg64_freg64(buf, src1, src2),
}
sete_reg64(buf, dst);
}
fn neq_freg_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
width: FloatWidth,
) {
match width {
FloatWidth::F32 => cmp_freg32_freg32(buf, src1, src2),
FloatWidth::F64 => cmp_freg64_freg64(buf, src1, src2),
}
setne_reg64(buf, dst);
}
#[inline(always)]
fn cmp_freg_freg_reg64(
buf: &mut Vec<'_, u8>,

View file

@ -1121,14 +1121,17 @@ trait Backend<'a> {
}
LowLevel::Eq => {
debug_assert_eq!(2, args.len(), "Eq: expected to have exactly two argument");
let a = Layout::runtime_representation_in(arg_layouts[0], self.interner());
let b = Layout::runtime_representation_in(arg_layouts[1], self.interner());
debug_assert!(
self.interner().eq_repr(arg_layouts[0], arg_layouts[1],),
"Eq: expected all arguments of to have the same layout"
);
debug_assert!(
self.interner().eq_repr(Layout::BOOL, *ret_layout,),
"Eq: expected to have return layout of type Bool"
self.interner().eq_repr(a, b),
"Eq: expected all arguments to have the same layout, but {} != {}",
self.interner().dbg(a),
self.interner().dbg(b),
);
self.build_eq(sym, &args[0], &args[1], &arg_layouts[0])
}
LowLevel::NotEq => {
@ -1137,9 +1140,15 @@ trait Backend<'a> {
args.len(),
"NotEq: expected to have exactly two argument"
);
let a = Layout::runtime_representation_in(arg_layouts[0], self.interner());
let b = Layout::runtime_representation_in(arg_layouts[1], self.interner());
debug_assert!(
self.interner().eq_repr(arg_layouts[0], arg_layouts[1],),
"NotEq: expected all arguments of to have the same layout"
self.interner().eq_repr(a, b),
"NotEq: expected all arguments to have the same layout, but {} != {}",
self.interner().dbg(a),
self.interner().dbg(b),
);
debug_assert!(
self.interner().eq_repr(Layout::BOOL, *ret_layout,),
@ -1546,6 +1555,21 @@ trait Backend<'a> {
args.len(),
"RefCountGetPtr: expected to have exactly one argument"
);
debug_assert_eq!(
self.interner().stack_size_and_alignment(arg_layouts[0]),
(8, 8),
"cannot pointer cast from source: {}",
self.interner().dbg(arg_layouts[0])
);
debug_assert_eq!(
self.interner().stack_size_and_alignment(*ret_layout),
(8, 8),
"cannot pointer cast to target: {}",
self.interner().dbg(*ret_layout)
);
self.build_ptr_cast(sym, &args[0])
}
LowLevel::PtrWrite => {