From b7cb7bb786304e647c3fd75e7b4a963865aa2f8c Mon Sep 17 00:00:00 2001 From: Folkert Date: Sat, 3 Sep 2022 15:48:57 +0200 Subject: [PATCH] bitwise logical operators in the dev backend --- .../compiler/gen_dev/src/generic64/aarch64.rs | 27 +++++++ crates/compiler/gen_dev/src/generic64/mod.rs | 81 +++++++++++++++++++ .../compiler/gen_dev/src/generic64/x86_64.rs | 76 +++++++++++++++-- crates/compiler/gen_dev/src/lib.rs | 48 +++++++++++ crates/compiler/test_gen/src/gen_num.rs | 6 +- 5 files changed, 228 insertions(+), 10 deletions(-) diff --git a/crates/compiler/gen_dev/src/generic64/aarch64.rs b/crates/compiler/gen_dev/src/generic64/aarch64.rs index 32309279a4..806b6b765f 100644 --- a/crates/compiler/gen_dev/src/generic64/aarch64.rs +++ b/crates/compiler/gen_dev/src/generic64/aarch64.rs @@ -834,6 +834,33 @@ impl Assembler for AArch64Assembler { fn ret(buf: &mut Vec<'_, u8>) { ret_reg64(buf, AArch64GeneralReg::LR) } + + fn and_reg64_reg64_reg64( + _buf: &mut Vec<'_, u8>, + _dst: AArch64GeneralReg, + _src1: AArch64GeneralReg, + _src2: AArch64GeneralReg, + ) { + todo!("bitwise and for AArch64") + } + + fn or_reg64_reg64_reg64( + _buf: &mut Vec<'_, u8>, + _dst: AArch64GeneralReg, + _src1: AArch64GeneralReg, + _src2: AArch64GeneralReg, + ) { + todo!("bitwise or for AArch64") + } + + fn xor_reg64_reg64_reg64( + _buf: &mut Vec<'_, u8>, + _dst: AArch64GeneralReg, + _src1: AArch64GeneralReg, + _src2: AArch64GeneralReg, + ) { + todo!("bitwise xor for AArch64") + } } impl AArch64Assembler {} diff --git a/crates/compiler/gen_dev/src/generic64/mod.rs b/crates/compiler/gen_dev/src/generic64/mod.rs index a271db6c53..6c0d63f292 100644 --- a/crates/compiler/gen_dev/src/generic64/mod.rs +++ b/crates/compiler/gen_dev/src/generic64/mod.rs @@ -143,6 +143,27 @@ pub trait Assembler: Sized + Copy { src2: GeneralReg, ); + fn and_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: GeneralReg, + src1: GeneralReg, + src2: GeneralReg, + ); + + fn or_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: GeneralReg, + src1: GeneralReg, + src2: GeneralReg, + ); + + fn xor_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: GeneralReg, + src1: GeneralReg, + src2: GeneralReg, + ); + fn call(buf: &mut Vec<'_, u8>, relocs: &mut Vec<'_, Relocation>, fn_name: String); /// Jumps by an offset of offset bytes unconditionally. @@ -1600,6 +1621,66 @@ impl< offset, }); } + + fn build_int_bitwise_and( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ) { + let buf = &mut self.buf; + + match int_width { + IntWidth::U128 | IntWidth::I128 => todo!(), + _ => { + let dst_reg = self.storage_manager.claim_general_reg(buf, dst); + let src1_reg = self.storage_manager.load_to_general_reg(buf, src1); + let src2_reg = self.storage_manager.load_to_general_reg(buf, src2); + ASM::and_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg); + } + } + } + + fn build_int_bitwise_or( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ) { + let buf = &mut self.buf; + + match int_width { + IntWidth::U128 | IntWidth::I128 => todo!(), + _ => { + let dst_reg = self.storage_manager.claim_general_reg(buf, dst); + let src1_reg = self.storage_manager.load_to_general_reg(buf, src1); + let src2_reg = self.storage_manager.load_to_general_reg(buf, src2); + ASM::or_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg); + } + } + } + + fn build_int_bitwise_xor( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ) { + let buf = &mut self.buf; + + match int_width { + IntWidth::U128 | IntWidth::I128 => todo!(), + _ => { + let dst_reg = self.storage_manager.claim_general_reg(buf, dst); + let src1_reg = self.storage_manager.load_to_general_reg(buf, src1); + let src2_reg = self.storage_manager.load_to_general_reg(buf, src2); + ASM::xor_reg64_reg64_reg64(buf, dst_reg, src1_reg, src2_reg); + } + } + } } /// This impl block is for ir related instructions that need backend specific information. diff --git a/crates/compiler/gen_dev/src/generic64/x86_64.rs b/crates/compiler/gen_dev/src/generic64/x86_64.rs index 61d216ded5..cf43f8cf29 100644 --- a/crates/compiler/gen_dev/src/generic64/x86_64.rs +++ b/crates/compiler/gen_dev/src/generic64/x86_64.rs @@ -1408,6 +1408,54 @@ impl Assembler for X86_64Assembler { fn set_if_overflow(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) { seto_reg64(buf, dst); } + + fn and_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: X86_64GeneralReg, + src1: X86_64GeneralReg, + src2: X86_64GeneralReg, + ) { + if dst == src1 { + and_reg64_reg64(buf, dst, src2); + } else if dst == src2 { + and_reg64_reg64(buf, dst, src1); + } else { + mov_reg64_reg64(buf, dst, src1); + and_reg64_reg64(buf, dst, src2); + } + } + + fn or_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: X86_64GeneralReg, + src1: X86_64GeneralReg, + src2: X86_64GeneralReg, + ) { + if dst == src1 { + or_reg64_reg64(buf, dst, src2); + } else if dst == src2 { + or_reg64_reg64(buf, dst, src1); + } else { + mov_reg64_reg64(buf, dst, src1); + or_reg64_reg64(buf, dst, src2); + } + } + + fn xor_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: X86_64GeneralReg, + src1: X86_64GeneralReg, + src2: X86_64GeneralReg, + ) { + if dst == src1 { + xor_reg64_reg64(buf, dst, src2); + } else if dst == src2 { + xor_reg64_reg64(buf, dst, src1); + } else { + mov_reg64_reg64(buf, dst, src1); + xor_reg64_reg64(buf, dst, src2); + } + } } impl X86_64Assembler { @@ -1511,6 +1559,27 @@ fn add_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64Gene binop_reg64_reg64(0x01, buf, dst, src); } +/// `AND r/m64,r64` -> Bitwise logical and r64 to r/m64. +#[inline(always)] +fn and_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) { + // NOTE: src and dst are flipped by design + binop_reg64_reg64(0x23, buf, src, dst); +} + +/// `OR r/m64,r64` -> Bitwise logical or r64 to r/m64. +#[inline(always)] +fn or_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) { + // NOTE: src and dst are flipped by design + binop_reg64_reg64(0x0B, buf, src, dst); +} + +/// `XOR r/m64,r64` -> Bitwise logical exclusive or r64 to r/m64. +#[inline(always)] +fn xor_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) { + // NOTE: src and dst are flipped by design + binop_reg64_reg64(0x33, buf, src, dst); +} + /// `ADDSD xmm1,xmm2/m64` -> Add the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1. #[inline(always)] fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) { @@ -2189,13 +2258,6 @@ fn push_reg64(buf: &mut Vec<'_, u8>, reg: X86_64GeneralReg) { } } -/// `XOR r/m64,r64` -> Xor r64 to r/m64. -#[inline(always)] -#[allow(dead_code)] -fn xor_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) { - binop_reg64_reg64(0x31, buf, dst, src); -} - // When writing tests, it is a good idea to test both a number and unnumbered register. // This is because R8-R15 often have special instruction prefixes. #[cfg(test)] diff --git a/crates/compiler/gen_dev/src/lib.rs b/crates/compiler/gen_dev/src/lib.rs index 69f7caab19..ea36f1f9b3 100644 --- a/crates/compiler/gen_dev/src/lib.rs +++ b/crates/compiler/gen_dev/src/lib.rs @@ -499,6 +499,27 @@ trait Backend<'a> { ); self.build_num_sub(sym, &args[0], &args[1], ret_layout) } + LowLevel::NumBitwiseAnd => { + if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { + self.build_int_bitwise_and(sym, &args[0], &args[1], *int_width) + } else { + internal_error!("bitwise and on a non-integer") + } + } + LowLevel::NumBitwiseOr => { + if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { + self.build_int_bitwise_or(sym, &args[0], &args[1], *int_width) + } else { + internal_error!("bitwise or on a non-integer") + } + } + LowLevel::NumBitwiseXor => { + if let Layout::Builtin(Builtin::Int(int_width)) = ret_layout { + self.build_int_bitwise_xor(sym, &args[0], &args[1], *int_width) + } else { + internal_error!("bitwise xor on a non-integer") + } + } LowLevel::Eq => { debug_assert_eq!(2, args.len(), "Eq: expected to have exactly two argument"); debug_assert_eq!( @@ -750,6 +771,33 @@ trait Backend<'a> { /// build_num_sub stores the `src1 - src2` difference into dst. fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>); + /// stores the `src1 & src2` into dst. + fn build_int_bitwise_and( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ); + + /// stores the `src1 | src2` into dst. + fn build_int_bitwise_or( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ); + + /// stores the `src1 ^ src2` into dst. + fn build_int_bitwise_xor( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + int_width: IntWidth, + ); + /// build_eq stores the result of `src1 == src2` into dst. fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &Layout<'a>); diff --git a/crates/compiler/test_gen/src/gen_num.rs b/crates/compiler/test_gen/src/gen_num.rs index 4e3fd9df00..d6d32c986a 100644 --- a/crates/compiler/test_gen/src/gen_num.rs +++ b/crates/compiler/test_gen/src/gen_num.rs @@ -1299,7 +1299,7 @@ fn tan() { } #[test] -#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))] +#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))] fn bitwise_and() { assert_evals_to!("Num.bitwiseAnd 20 20", 20, i64); assert_evals_to!("Num.bitwiseAnd 25 10", 8, i64); @@ -1307,7 +1307,7 @@ fn bitwise_and() { } #[test] -#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))] +#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))] fn bitwise_xor() { assert_evals_to!("Num.bitwiseXor 20 20", 0, i64); assert_evals_to!("Num.bitwiseXor 15 14", 1, i64); @@ -1316,7 +1316,7 @@ fn bitwise_xor() { } #[test] -#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))] +#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))] fn bitwise_or() { assert_evals_to!("Num.bitwiseOr 1 1", 1, i64); assert_evals_to!("Num.bitwiseOr 1 2", 3, i64);