bitshifts for the dev backend

This commit is contained in:
Folkert 2022-09-04 01:30:17 +02:00
parent 081c61ead6
commit 843f5b15e5
No known key found for this signature in database
GPG key ID: 1F17F6FFD112B97C
5 changed files with 389 additions and 9 deletions

View file

@ -899,6 +899,45 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
) {
todo!("bitwise xor for AArch64")
}
fn shl_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!("shl for AArch64")
}
fn shr_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!("shr for AArch64")
}
fn sar_reg64_reg64_reg64<'a, 'r, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, 'r, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!("sar for AArch64")
}
}
impl AArch64Assembler {}

View file

@ -170,6 +170,36 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src2: GeneralReg,
);
fn shl_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn shr_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn sar_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn call(buf: &mut Vec<'_, u8>, relocs: &mut Vec<'_, Relocation>, fn_name: String);
/// Jumps by an offset of offset bytes unconditionally.
@ -2042,6 +2072,123 @@ impl<
}
}
}
fn build_int_shift_left(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
ASM::shl_reg64_reg64_reg64(
buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
}
}
fn build_int_shift_right(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
// to get sign extension "for free", we move our bits to the left
let shift_left_amount = 64 - (int_width.stack_size() as i64 * 8);
if shift_left_amount > 0 {
self.storage_manager.with_tmp_general_reg(
buf,
|storage_manager, buf, tmp_reg| {
ASM::mov_reg64_imm64(buf, tmp_reg, shift_left_amount);
ASM::shl_reg64_reg64_reg64(
buf,
storage_manager,
src1_reg,
src1_reg,
tmp_reg,
);
},
)
}
ASM::sar_reg64_reg64_reg64(
buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
if shift_left_amount > 0 {
// shift back if needed
self.storage_manager.with_tmp_general_reg(
&mut self.buf,
|storage_manager, buf, tmp_reg| {
ASM::mov_reg64_imm64(buf, tmp_reg, shift_left_amount);
ASM::shr_reg64_reg64_reg64(
buf,
storage_manager,
dst_reg,
dst_reg,
tmp_reg,
);
},
)
}
}
}
}
fn build_int_shift_right_zero_fill(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
) {
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
let src2_reg = self.storage_manager.load_to_general_reg(buf, src2);
ASM::shr_reg64_reg64_reg64(
buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
}
}
}
/// This impl block is for ir related instructions that need backend specific information.

View file

@ -1452,6 +1452,82 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
fn xor_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: Reg64, src1: Reg64, src2: Reg64) {
binop_move_src_to_dst_reg64(buf, xor_reg64_reg64, dst, src1, src2)
}
fn shl_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RCX));
mov_reg64_reg64(buf, dst, src1);
mov_reg64_reg64(buf, X86_64GeneralReg::RCX, src2);
shl_reg64_reg64(buf, dst)
}
fn shr_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RCX));
mov_reg64_reg64(buf, dst, src1);
mov_reg64_reg64(buf, X86_64GeneralReg::RCX, src2);
shr_reg64_reg64(buf, dst)
}
fn sar_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
shift_reg64_reg64_reg64(buf, storage_manager, sar_reg64_reg64, dst, src1, src2)
}
}
fn shift_reg64_reg64_reg64<'a, 'r, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, 'r, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
shift_function: fn(buf: &mut Vec<'_, u8>, X86_64GeneralReg),
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
macro_rules! helper {
($buf:expr, $dst:expr, $src1:expr, $src2:expr) => {{
mov_reg64_reg64($buf, $dst, $src1);
mov_reg64_reg64($buf, X86_64GeneralReg::RCX, $src2);
shift_function($buf, $dst)
}};
}
helper!(buf, dst, src1, src2)
}
impl X86_64Assembler {
@ -1576,6 +1652,36 @@ fn xor_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64Gene
binop_reg64_reg64(0x33, buf, src, dst);
}
/// `SHL r/m64,r64` -> Bitwise logical exclusive or r64 to r/m64.
#[inline(always)]
fn shl_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(dst, rex);
let dst_mod = dst as u8 % 8;
buf.extend(&[rex, 0xD3, 0xC0 | (4 << 3) | dst_mod]);
}
/// `SHR r/m64,r64` -> Bitwise logical exclusive or r64 to r/m64.
#[inline(always)]
fn shr_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(dst, rex);
let dst_mod = dst as u8 % 8;
buf.extend(&[rex, 0xD3, 0xC0 | (5 << 3) | dst_mod]);
}
/// `SAR r/m64,r64` -> Bitwise logical exclusive or r64 to r/m64.
#[inline(always)]
fn sar_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(dst, rex);
let dst_mod = dst as u8 % 8;
buf.extend(&[rex, 0xD3, 0xC0 | (7 << 3) | dst_mod]);
}
/// `ADDSD xmm1,xmm2/m64` -> Add the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
@ -2447,6 +2553,33 @@ mod tests {
);
}
#[test]
fn test_shl_reg64_reg64() {
disassembler_test!(
shl_reg64_reg64,
|reg| format!("shl {reg}, cl"),
ALL_GENERAL_REGS
);
}
#[test]
fn test_shr_reg64_reg64() {
disassembler_test!(
shr_reg64_reg64,
|reg| format!("shr {reg}, cl"),
ALL_GENERAL_REGS
);
}
#[test]
fn test_sar_reg64_reg64() {
disassembler_test!(
sar_reg64_reg64,
|reg| format!("sar {reg}, cl"),
ALL_GENERAL_REGS
);
}
#[test]
fn test_cmovl_reg64_reg64() {
disassembler_test!(

View file

@ -553,6 +553,27 @@ trait Backend<'a> {
internal_error!("bitwise xor on a non-integer")
}
}
LowLevel::NumShiftLeftBy => {
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_shift_left(sym, &args[0], &args[1], int_width)
} else {
internal_error!("shift left on a non-integer")
}
}
LowLevel::NumShiftRightBy => {
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_shift_right(sym, &args[0], &args[1], int_width)
} else {
internal_error!("shift right on a non-integer")
}
}
LowLevel::NumShiftRightZfBy => {
if let Layout::Builtin(Builtin::Int(int_width)) = self.interner().get(*ret_layout) {
self.build_int_shift_right_zero_fill(sym, &args[0], &args[1], int_width)
} else {
internal_error!("shift right zero-fill on a non-integer")
}
}
LowLevel::Eq => {
debug_assert_eq!(2, args.len(), "Eq: expected to have exactly two argument");
debug_assert_eq!(
@ -888,6 +909,33 @@ trait Backend<'a> {
int_width: IntWidth,
);
/// stores the `Num.shiftLeftBy src1 src2` into dst.
fn build_int_shift_left(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// stores the `Num.shiftRightBy src1 src2` into dst.
fn build_int_shift_right(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// stores the `Num.shiftRightZfBy src1 src2` into dst.
fn build_int_shift_right_zero_fill(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
int_width: IntWidth,
);
/// build_eq stores the result of `src1 == src2` into dst.
fn build_eq(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, arg_layout: &InLayout<'a>);

View file

@ -2089,7 +2089,7 @@ fn float_mul_checked() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn shift_left_by() {
assert_evals_to!("Num.shiftLeftBy 0b0000_0001 0", 0b0000_0001, i64);
assert_evals_to!("Num.shiftLeftBy 0b0000_0001 1", 0b0000_0010, i64);
@ -2098,18 +2098,12 @@ fn shift_left_by() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn shift_right_by() {
// Sign Extended Right Shift
let is_llvm_release_mode = cfg!(feature = "gen-llvm") && !cfg!(debug_assertions);
// FIXME (Brian) Something funny happening with 8-bit binary literals in tests
assert_evals_to!(
"Num.shiftRightBy (Num.toI8 0b1100_0000u8) 2",
0b1111_0000u8 as i8,
i8
);
assert_evals_to!("Num.shiftRightBy 0b0100_0000i8 2", 0b0001_0000i8, i8);
assert_evals_to!("Num.shiftRightBy 0b1110_0000u8 1", 0b1111_0000u8, u8);
assert_evals_to!("Num.shiftRightBy 0b1100_0000u8 2", 0b1111_0000u8, u8);
@ -2144,9 +2138,28 @@ fn shift_right_by() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn shift_right_zf_by() {
// Logical Right Shift
assert_evals_to!("Num.shiftRightZfBy 0b1100_0000u8 2", 0b0011_0000u8, u8);
assert_evals_to!("Num.shiftRightZfBy 0b0000_0010u8 1", 0b0000_0001u8, u8);
assert_evals_to!("Num.shiftRightZfBy 0b0000_1100u8 2", 0b0000_0011u8, u8);
assert_evals_to!("Num.shiftRightZfBy 0b1000_0000u8 12", 0b0000_0000u8, u8);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn shift_right_cast_i8() {
// FIXME (Brian) Something funny happening with 8-bit binary literals in tests
// arithmetic
assert_evals_to!(
"Num.shiftRightBy (Num.toI8 0b1100_0000u8) 2",
0b1111_0000u8 as i8,
i8
);
// logical
assert_evals_to!(
"Num.shiftRightZfBy (Num.toI8 0b1100_0000u8) 2",
0b0011_0000i8,