Merge pull request #7235 from shua/devneg

impl Num.neg for Dec,F32,F64 in repl
This commit is contained in:
Brendan Hansknecht 2024-11-26 13:29:19 -08:00 committed by GitHub
commit e40b3cd6a9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 162 additions and 3 deletions

View file

@ -1846,6 +1846,26 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
neg_reg64_reg64(buf, dst, src);
}
#[inline(always)]
fn neg_freg64_freg64(
buf: &mut Vec<'_, u8>,
_relocs: &mut Vec<'_, Relocation>,
dst: AArch64FloatReg,
src: AArch64FloatReg,
) {
fneg_freg_freg(buf, FloatWidth::F64, dst, src);
}
#[inline(always)]
fn neg_freg32_freg32(
buf: &mut Vec<'_, u8>,
_relocs: &mut Vec<'_, Relocation>,
dst: AArch64FloatReg,
src: AArch64FloatReg,
) {
fneg_freg_freg(buf, FloatWidth::F32, dst, src);
}
#[inline(always)]
fn sub_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
@ -3953,6 +3973,24 @@ fn fsub_freg_freg_freg(
buf.extend(inst.bytes());
}
/// `FNEG Sd/Dd, Sn/Dn`
#[inline(always)]
fn fneg_freg_freg(
buf: &mut Vec<'_, u8>,
ftype: FloatWidth,
dst: AArch64FloatReg,
src: AArch64FloatReg,
) {
let inst =
FloatingPointDataProcessingOneSource::new(FloatingPointDataProcessingOneSourceParams {
ptype: ftype,
opcode: 0b00010,
rn: src,
rd: dst,
});
buf.extend(inst.bytes());
}
/// `FCMP Sn/Dn, Sm/Dm` -> Compare Sn/Dn and Sm/Dm, setting condition flags.
#[inline(always)]
fn fcmp_freg_freg(

View file

@ -557,6 +557,18 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn sqrt_freg32_freg32(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn neg_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn neg_freg64_freg64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: FloatReg,
src: FloatReg,
);
fn neg_freg32_freg32(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: FloatReg,
src: FloatReg,
);
fn mul_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
@ -1791,7 +1803,24 @@ impl<
let src_reg = self.storage_manager.load_to_general_reg(&mut self.buf, src);
ASM::neg_reg64_reg64(&mut self.buf, dst_reg, src_reg);
}
x => todo!("NumNeg: layout, {:?}", x),
LayoutRepr::F32 => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src);
ASM::neg_freg32_freg32(&mut self.buf, &mut self.relocs, dst_reg, src_reg);
}
LayoutRepr::F64 => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src);
ASM::neg_freg64_freg64(&mut self.buf, &mut self.relocs, dst_reg, src_reg);
}
LayoutRepr::DEC => self.build_fn_call(
dst,
bitcode::DEC_NEGATE.to_string(),
&[*src],
&[Layout::DEC],
&Layout::DEC,
),
other => internal_error!("unreachable: NumNeg for layout, {:?}", other),
}
}

View file

@ -2602,6 +2602,28 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
neg_reg64(buf, dst);
}
#[inline(always)]
fn neg_freg64_freg64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: X86_64FloatReg,
src: X86_64FloatReg,
) {
Self::mov_freg64_imm64(buf, relocs, dst, f64::from_bits(0x8000_0000_0000_0000));
xorpd_freg64_freg64(buf, dst, src);
}
#[inline(always)]
fn neg_freg32_freg32(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: X86_64FloatReg,
src: X86_64FloatReg,
) {
Self::mov_freg32_imm32(buf, relocs, dst, f32::from_bits(0x8000_0000));
xorps_freg32_freg32(buf, dst, src);
}
#[inline(always)]
fn sub_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
@ -3352,6 +3374,49 @@ fn sqrtss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64F
}
}
/// `XORPD xmm1, xmm2/m128` -> Bitwise exclusive-OR of xmm2/m128 and xmm1.
#[inline(always)]
fn xorpd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0x66,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x57,
0xC0 | (dst_mod << 3) | src_mod,
])
} else {
buf.extend([0x66, 0x0F, 0x57, 0xC0 | (dst_mod << 3) | src_mod]);
}
}
/// `XORPS xmm1,xmm2/m128` -> Bitwise exclusive-OR of xmm2/m128 and xmm1.
#[inline(always)]
fn xorps_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x57,
0xC0 | (dst_mod << 3) | src_mod,
]);
} else {
buf.extend([0x0F, 0x57, 0xC0 | (dst_mod << 3) | src_mod]);
}
}
/// `TEST r/m64,r64` -> AND r64 with r/m64; set SF, ZF, PF according to result.
#[allow(dead_code)]
#[inline(always)]