bitwise logical operators in the dev backend

This commit is contained in:
Folkert 2022-09-03 15:48:57 +02:00
parent 4508a27fda
commit b7cb7bb786
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
5 changed files with 228 additions and 10 deletions

View file

@ -834,6 +834,33 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
fn ret(buf: &mut Vec<'_, u8>) {
ret_reg64(buf, AArch64GeneralReg::LR)
}
fn and_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
todo!("bitwise and for AArch64")
}
fn or_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
todo!("bitwise or for AArch64")
}
fn xor_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
todo!("bitwise xor for AArch64")
}
}
impl AArch64Assembler {}

View file

@ -143,6 +143,27 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src2: GeneralReg,
);
fn and_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn or_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn xor_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn call(buf: &mut Vec<'_, u8>, relocs: &mut Vec<'_, Relocation>, fn_name: String);
/// Jumps by an offset of offset bytes unconditionally.
@ -1600,6 +1621,66 @@ impl<
offset,
});
}
fn build_int_bitwise_and(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
ASM::and_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg);
}
}
}
fn build_int_bitwise_or(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
ASM::or_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg);
}
}
}
fn build_int_bitwise_xor(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
ASM::xor_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg);
}
}
}
}
/// This impl block is for ir related instructions that need backend specific information.

View file

@ -1408,6 +1408,54 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
fn set_if_overflow(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
seto_reg64(buf, dst);
}
fn and_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
if dst == src1 {
and_reg64_reg64(buf, dst, src2);
} else if dst == src2 {
and_reg64_reg64(buf, dst, src1);
} else {
mov_reg64_reg64(buf, dst, src1);
and_reg64_reg64(buf, dst, src2);
}
}
fn or_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
if dst == src1 {
or_reg64_reg64(buf, dst, src2);
} else if dst == src2 {
or_reg64_reg64(buf, dst, src1);
} else {
mov_reg64_reg64(buf, dst, src1);
or_reg64_reg64(buf, dst, src2);
}
}
fn xor_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
if dst == src1 {
xor_reg64_reg64(buf, dst, src2);
} else if dst == src2 {
xor_reg64_reg64(buf, dst, src1);
} else {
mov_reg64_reg64(buf, dst, src1);
xor_reg64_reg64(buf, dst, src2);
}
}
}
impl X86_64Assembler {
@ -1511,6 +1559,27 @@ fn add_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64Gene
binop_reg64_reg64(0x01, buf, dst, src);
}
/// `AND r/m64,r64` -> Bitwise logical and r64 to r/m64.
#[inline(always)]
fn and_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
// NOTE: src and dst are flipped by design
binop_reg64_reg64(0x23, buf, src, dst);
}
/// `OR r/m64,r64` -> Bitwise logical or r64 to r/m64.
#[inline(always)]
fn or_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
// NOTE: src and dst are flipped by design
binop_reg64_reg64(0x0B, buf, src, dst);
}
/// `XOR r/m64,r64` -> Bitwise logical exclusive or r64 to r/m64.
#[inline(always)]
fn xor_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
// NOTE: src and dst are flipped by design
binop_reg64_reg64(0x33, buf, src, dst);
}
/// `ADDSD xmm1,xmm2/m64` -> Add the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
@ -2189,13 +2258,6 @@ fn push_reg64(buf: &mut Vec<'_, u8>, reg: X86_64GeneralReg) {
}
}
/// `XOR r/m64,r64` -> Xor r64 to r/m64.
#[inline(always)]
#[allow(dead_code)]
fn xor_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
binop_reg64_reg64(0x31, buf, dst, src);
}
// When writing tests, it is a good idea to test both a number and unnumbered register.
// This is because R8-R15 often have special instruction prefixes.
#[cfg(test)]

View file

@ -499,6 +499,27 @@ trait Backend<'a> {
);
self.build_num_sub(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumBitwiseAnd => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_and(sym, &args[0], &args[1], *int_width)
} else {
internal_error!("bitwise and on a non-integer")
}
}
LowLevel::NumBitwiseOr => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_or(sym, &args[0], &args[1], *int_width)
} else {
internal_error!("bitwise or on a non-integer")
}
}
LowLevel::NumBitwiseXor => {
if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout {
self.build_int_bitwise_xor(sym, &args[0], &args[1], *int_width)
} else {
internal_error!("bitwise xor on a non-integer")
}
}
LowLevel::Eq => {
debug_assert_eq!(2, args.len(), "Eq: expected to have exactly two argument");
debug_assert_eq!(
@ -750,6 +771,33 @@ trait Backend<'a> {
/// build_num_sub stores the `src1 - src2` difference into dst.
fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
/// stores the `src1 & src2` into dst.
fn build_int_bitwise_and(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// stores the `src1 | src2` into dst.
fn build_int_bitwise_or(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// stores the `src1 ^ src2` into dst.
fn build_int_bitwise_xor(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// build_eq stores the result of `src1 == src2` into dst.
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>);