Merge pull request #5348 from basile-henry/basile/num-is-nan

Implement builtins for Num.isNan, Num.isInfinite, and Num.isFinite
This commit is contained in:
Richard Feldman 2023-05-05 06:15:18 -04:00 committed by GitHub
commit df0ab01128
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
80 changed files with 697 additions and 258 deletions

View file

@ -750,6 +750,16 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: AArch64FloatReg, src: AArch64FloatReg) {
fmov_freg_freg(buf, FloatWidth::F64, dst, src);
}
#[inline(always)]
fn mov_reg32_freg32(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _src: AArch64FloatReg) {
unimplemented!();
}
#[inline(always)]
fn mov_reg64_freg64(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _src: AArch64FloatReg) {
unimplemented!();
}
#[inline(always)]
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
@ -1086,6 +1096,17 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
cset_reg64_cond(buf, dst, cond);
}
#[inline(always)]
fn is_nan_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64FloatReg,
width: FloatWidth,
) {
fcmp_freg_freg(buf, width, src, src);
cset_reg64_cond(buf, dst, ConditionCode::NE);
}
#[inline(always)]
fn to_float_freg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64FloatReg, src: AArch64GeneralReg) {
scvtf_freg_reg64(buf, FloatWidth::F64, dst, src);

View file

@ -269,6 +269,9 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GeneralReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn mov_reg32_freg32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: FloatReg);
fn mov_reg64_freg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: FloatReg);
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
@ -535,6 +538,8 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
operation: CompareOperation,
);
fn is_nan_freg_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: FloatReg, width: FloatWidth);
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
fn to_float_freg64_reg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: GeneralReg);
@ -1585,6 +1590,89 @@ impl<
}
}
fn build_num_is_nan(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>) {
let float_width = match *arg_layout {
Layout::F32 => FloatWidth::F32,
Layout::F64 => FloatWidth::F64,
_ => unreachable!(),
};
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src);
ASM::is_nan_freg_reg64(&mut self.buf, dst_reg, src_reg, float_width);
}
fn build_num_is_infinite(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>) {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src);
self.storage_manager.with_tmp_general_reg(
&mut self.buf,
|_storage_manager, buf, mask_reg| {
match *arg_layout {
Layout::F32 => {
ASM::mov_reg64_imm64(buf, mask_reg, 0x7fff_ffff);
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg); // zero out dst reg
ASM::mov_reg32_freg32(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::mov_reg64_imm64(buf, mask_reg, 0x7f80_0000);
ASM::eq_reg_reg_reg(buf, RegisterWidth::W32, dst_reg, dst_reg, mask_reg);
}
Layout::F64 => {
ASM::mov_reg64_imm64(buf, mask_reg, 0x7fff_ffff_ffff_ffff);
ASM::mov_reg64_freg64(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::mov_reg64_imm64(buf, mask_reg, 0x7ff0_0000_0000_0000);
ASM::eq_reg_reg_reg(buf, RegisterWidth::W64, dst_reg, dst_reg, mask_reg);
}
_ => unreachable!(),
}
},
);
}
fn build_num_is_finite(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>) {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src);
self.storage_manager.with_tmp_general_reg(
&mut self.buf,
|_storage_manager, buf, mask_reg| {
match *arg_layout {
Layout::F32 => {
ASM::mov_reg64_imm64(buf, mask_reg, 0x7f80_0000);
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg); // zero out dst reg
ASM::mov_reg32_freg32(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::neq_reg64_reg64_reg64(
buf,
RegisterWidth::W32,
dst_reg,
dst_reg,
mask_reg,
);
}
Layout::F64 => {
ASM::mov_reg64_imm64(buf, mask_reg, 0x7ff0_0000_0000_0000);
ASM::mov_reg64_freg64(buf, dst_reg, src_reg);
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, mask_reg);
ASM::neq_reg64_reg64_reg64(
buf,
RegisterWidth::W64,
dst_reg,
dst_reg,
mask_reg,
);
}
_ => unreachable!(),
}
},
);
}
fn build_num_lt(
&mut self,
dst: &Symbol,

View file

@ -1483,6 +1483,16 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
movsd_freg64_freg64(buf, dst, src);
}
#[inline(always)]
fn mov_reg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64FloatReg) {
movd_reg32_freg32(buf, dst, src);
}
#[inline(always)]
fn mov_reg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64FloatReg) {
movq_reg64_freg64(buf, dst, src);
}
#[inline(always)]
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
@ -1795,6 +1805,21 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
};
}
#[inline(always)]
fn is_nan_freg_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64FloatReg,
width: FloatWidth,
) {
match width {
FloatWidth::F32 => cmp_freg32_freg32(buf, src, src),
FloatWidth::F64 => cmp_freg64_freg64(buf, src, src),
}
setp_reg64(buf, dst)
}
#[inline(always)]
fn to_float_freg32_reg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64GeneralReg) {
cvtsi2ss_freg64_reg64(buf, dst, src);
@ -2901,6 +2926,33 @@ fn movzx_reg64_base16_offset32(
movzx_reg64_base_offset32(buf, dst, base, offset, 0xB7)
}
#[inline(always)]
fn movd_reg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
let rex = add_rm_extension(dst, REX);
let rex = add_reg_extension(src, rex);
buf.extend([0x66, rex, 0x0F, 0x7E, 0xC0 | (src_mod << 3) | (dst_mod)])
} else {
buf.extend([0x66, 0x0F, 0x7E, 0xC0 | (src_mod << 3) | (dst_mod)])
}
}
#[inline(always)]
fn movq_reg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64FloatReg) {
let dst_mod = dst as u8 % 8;
let src_mod = src as u8 % 8;
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(src, rex);
buf.extend([0x66, rex, 0x0F, 0x7E, 0xC0 | (src_mod << 3) | (dst_mod)]);
}
/// `MOVSD xmm1,xmm2` -> Move scalar double-precision floating-point value from xmm2 to xmm1 register.
/// This will not generate anything if dst and src are the same.
#[inline(always)]
@ -3191,12 +3243,18 @@ fn setge_reg64(buf: &mut Vec<'_, u8>, reg: X86_64GeneralReg) {
set_reg64_help(0x9d, buf, reg);
}
/// `SETO r/m64` -> Set byte if oveflow flag is set.
/// `SETO r/m64` -> Set byte if overflow flag is set.
#[inline(always)]
fn seto_reg64(buf: &mut Vec<'_, u8>, reg: X86_64GeneralReg) {
set_reg64_help(0x90, buf, reg);
}
/// `SETP r/m64` -> Set byte if parity (PF=1).
#[inline(always)]
fn setp_reg64(buf: &mut Vec<'_, u8>, reg: X86_64GeneralReg) {
set_reg64_help(0x9A, buf, reg);
}
/// `RET` -> Near return to calling procedure.
#[inline(always)]
fn ret(buf: &mut Vec<'_, u8>) {
@ -3905,6 +3963,26 @@ mod tests {
);
}
#[test]
fn test_movd_reg32_freg32() {
disassembler_test!(
movd_reg32_freg32,
|dst: X86_64GeneralReg, src| format!("movd {}, {}", dst.low_32bits_string(), src),
ALL_GENERAL_REGS,
ALL_FLOAT_REGS
);
}
#[test]
fn test_movq_reg64_freg64() {
disassembler_test!(
movq_reg64_freg64,
|dst, src| format!("movq {}, {}", dst, src),
ALL_GENERAL_REGS,
ALL_FLOAT_REGS
);
}
#[test]
fn test_movsd_freg64_freg64() {
disassembler_test!(

View file

@ -830,6 +830,48 @@ trait Backend<'a> {
);
self.build_num_to_frac(sym, &args[0], &arg_layouts[0], ret_layout)
}
LowLevel::NumIsNan => {
debug_assert_eq!(
1,
args.len(),
"NumIsNan: expected to have exactly one argument"
);
debug_assert_eq!(
Layout::BOOL,
*ret_layout,
"NumIsNan: expected to have return layout of type Bool"
);
self.build_num_is_nan(sym, &args[0], &arg_layouts[0])
}
LowLevel::NumIsInfinite => {
debug_assert_eq!(
1,
args.len(),
"NumIsInfinite: expected to have exactly one argument"
);
debug_assert_eq!(
Layout::BOOL,
*ret_layout,
"NumIsInfinite: expected to have return layout of type Bool"
);
self.build_num_is_infinite(sym, &args[0], &arg_layouts[0])
}
LowLevel::NumIsFinite => {
debug_assert_eq!(
1,
args.len(),
"NumIsFinite: expected to have exactly one argument"
);
debug_assert_eq!(
Layout::BOOL,
*ret_layout,
"NumIsFinite: expected to have return layout of type Bool"
);
self.build_num_is_finite(sym, &args[0], &arg_layouts[0])
}
LowLevel::NumLte => {
debug_assert_eq!(
2,
@ -1477,6 +1519,15 @@ trait Backend<'a> {
ret_layout: &InLayout<'a>,
);
/// build_num_is_nan check is a Frac is NaN
fn build_num_is_nan(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>);
/// build_num_is_infinite check is a Frac is infinite
fn build_num_is_infinite(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>);
/// build_num_is_finite check is a Frac is finite
fn build_num_is_finite(&mut self, dst: &Symbol, src: &Symbol, arg_layout: &InLayout<'a>);
/// build_num_lte stores the result of `src1 <= src2` into dst.
fn build_num_lte(
&mut self,