Merge pull request #3870 from roc-lang/dev-backend-div

add int/float division to the dev backend
This commit is contained in:
Folkert de Vries 2022-08-25 09:03:19 +02:00 committed by GitHub
commit 51d5aa789a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 384 additions and 40 deletions

View file

@ -55,9 +55,6 @@ macro_rules! map_symbol_to_lowlevel_and_arity {
Symbol::NUM_TO_F32_CHECKED => Some(to_num_checked(Symbol::NUM_TO_F32_CHECKED, var_store, LowLevel::NumToFloatChecked)),
Symbol::NUM_TO_F64_CHECKED => Some(to_num_checked(Symbol::NUM_TO_F64_CHECKED, var_store, LowLevel::NumToFloatChecked)),
Symbol::NUM_DIV_FRAC => Some(lowlevel_2(Symbol::NUM_DIV_FRAC, LowLevel::NumDivUnchecked, var_store)),
Symbol::NUM_DIV_TRUNC => Some(lowlevel_2(Symbol::NUM_DIV_TRUNC, LowLevel::NumDivUnchecked, var_store)),
_ => None,
}
}
@ -81,7 +78,6 @@ macro_rules! map_symbol_to_lowlevel_and_arity {
LowLevel::NumToFloatCast => unreachable!(),
LowLevel::NumToIntChecked => unreachable!(),
LowLevel::NumToFloatChecked => unreachable!(),
LowLevel::NumDivUnchecked => unreachable!(),
// these are used internally and not tied to a symbol
LowLevel::Hash => unimplemented!(),
@ -162,6 +158,8 @@ map_symbol_to_lowlevel_and_arity! {
NumLt; NUM_LT; 2,
NumLte; NUM_LTE; 2,
NumCompare; NUM_COMPARE; 2,
NumDivFrac; NUM_DIV_FRAC; 2,
NumDivTruncUnchecked; NUM_DIV_TRUNC; 2,
NumDivCeilUnchecked; NUM_DIV_CEIL; 2,
NumRemUnchecked; NUM_REM; 2,
NumIsMultipleOf; NUM_IS_MULTIPLE_OF; 2,

View file

@ -456,6 +456,32 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("register unsigned multiplication for AArch64");
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!("register signed division for AArch64");
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!("register unsigned division for AArch64");
}
#[inline(always)]
fn mul_freg32_freg32_freg32(
_buf: &mut Vec<'_, u8>,
@ -475,6 +501,25 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("multiplication for floats for AArch64");
}
#[inline(always)]
fn div_freg32_freg32_freg32(
_buf: &mut Vec<'_, u8>,
_dst: AArch64FloatReg,
_src1: AArch64FloatReg,
_src2: AArch64FloatReg,
) {
todo!("division for floats for AArch64");
}
#[inline(always)]
fn div_freg64_freg64_freg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64FloatReg,
_src1: AArch64FloatReg,
_src2: AArch64FloatReg,
) {
todo!("division for floats for AArch64");
}
#[inline(always)]
fn jmp_imm32(_buf: &mut Vec<'_, u8>, _offset: i32) -> usize {
todo!("jump instructions for AArch64");

View file

@ -222,6 +222,18 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src1: FloatReg,
src2: FloatReg,
);
fn div_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
src1: FloatReg,
src2: FloatReg,
);
fn div_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
src1: FloatReg,
src2: FloatReg,
);
fn imul_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
@ -238,6 +250,25 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -808,6 +839,62 @@ impl<
}
}
fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>) {
match layout {
Layout::Builtin(Builtin::Int(
IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8,
)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::idiv_reg64_reg64_reg64(
&mut self.buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Int(
IntWidth::U64 | IntWidth::U32 | IntWidth::U16 | IntWidth::U8,
)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::udiv_reg64_reg64_reg64(
&mut self.buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::div_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
Layout::Builtin(Builtin::Float(FloatWidth::F32)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::div_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
x => todo!("NumDiv: layout, {:?}", x),
}
}
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>) {
match layout {
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => {

View file

@ -1061,6 +1061,78 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
}
fn div_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: X86_64FloatReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
) {
if dst == src1 {
divss_freg32_freg32(buf, dst, src2);
} else if dst == src2 {
divss_freg32_freg32(buf, dst, src1);
} else {
movsd_freg64_freg64(buf, dst, src1);
divss_freg32_freg32(buf, dst, src2);
}
}
fn div_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: X86_64FloatReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
) {
if dst == src1 {
divsd_freg64_freg64(buf, dst, src2);
} else if dst == src2 {
divsd_freg64_freg64(buf, dst, src1);
} else {
movsd_freg64_freg64(buf, dst, src1);
divsd_freg64_freg64(buf, dst, src2);
}
}
fn idiv_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RAX));
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RDX));
mov_reg64_reg64(buf, X86_64GeneralReg::RAX, src1);
idiv_reg64_reg64(buf, src2);
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RAX);
}
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RAX));
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RDX));
mov_reg64_reg64(buf, X86_64GeneralReg::RAX, src1);
udiv_reg64_reg64(buf, src2);
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RAX);
}
#[inline(always)]
fn jmp_imm32(buf: &mut Vec<'_, u8>, offset: i32) -> usize {
jmp_imm32(buf, offset);
@ -1481,6 +1553,46 @@ fn mulsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64Fl
}
}
/// `DIVSS xmm1,xmm2/m64` -> Divide the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn divss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend(&[
0xF3,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x5E,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend(&[0xF3, 0x0F, 0x5E, 0xC0 | (dst_mod << 3) | (src_mod)])
}
}
/// `DIVSD xmm1,xmm2/m64` -> Divide the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn divsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend(&[
0xF2,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x5E,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend(&[0xF2, 0x0F, 0x5E, 0xC0 | (dst_mod << 3) | (src_mod)])
}
}
/// `ADDSS xmm1,xmm2/m64` -> Add the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn mulss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
@ -1583,6 +1695,47 @@ fn mul_reg64_reg64(buf: &mut Vec<'_, u8>, src: X86_64GeneralReg) {
buf.extend(&[rex, 0xF7, 0b1110_0000 | (src as u8 % 8)]);
}
/// `IDIV r/m64` -> Signed divide RDX:RAX by r/m64, with result stored in RAX ← Quotient, RDX ← Remainder.
#[inline(always)]
fn idiv_reg64_reg64(buf: &mut Vec<'_, u8>, src: X86_64GeneralReg) {
let mut rex = REX_W;
rex = add_reg_extension(src, rex);
if src.value() > 7 {
rex |= REX_PREFIX_B;
}
// The CQO instruction can be used to produce a double quadword dividend
// from a quadword before a quadword division.
//
// The CQO instruction (available in 64-bit mode only) copies the sign (bit 63)
// of the value in the RAX register into every bit position in the RDX register
buf.extend(&[0x48, 0x99]);
buf.extend(&[rex, 0xF7, 0b1111_1000 | (src as u8 % 8)]);
}
/// `DIV r/m64` -> Unsigned divide RDX:RAX by r/m64, with result stored in RAX ← Quotient, RDX ← Remainder.
#[inline(always)]
fn udiv_reg64_reg64(buf: &mut Vec<'_, u8>, src: X86_64GeneralReg) {
let mut rex = REX_W;
rex = add_reg_extension(src, rex);
if src.value() > 7 {
rex |= REX_PREFIX_B;
}
// The CQO instruction can be used to produce a double quadword dividend
// from a quadword before a quadword division.
//
// The CQO instruction (available in 64-bit mode only) copies the sign (bit 63)
// of the value in the RAX register into every bit position in the RDX register
buf.extend(&[0x48, 0x99]);
// adds a cqo (convert doubleword to quadword)
buf.extend(&[rex, 0xF7, 0b1111_0000 | (src as u8 % 8)]);
}
/// Jump near, relative, RIP = RIP + 32-bit displacement sign extended to 64-bits.
#[inline(always)]
fn jmp_imm32(buf: &mut Vec<'_, u8>, imm: i32) {
@ -2233,6 +2386,44 @@ mod tests {
);
}
#[test]
fn test_idiv_reg64_reg64() {
disassembler_test!(
idiv_reg64_reg64,
|reg| format!("cqo\nidiv {}", reg),
ALL_GENERAL_REGS
);
}
#[test]
fn test_div_reg64_reg64() {
disassembler_test!(
udiv_reg64_reg64,
|reg| format!("cqo\ndiv {}", reg),
ALL_GENERAL_REGS
);
}
#[test]
fn test_divsd_freg64_freg64() {
disassembler_test!(
divsd_freg64_freg64,
|reg1, reg2| format!("divsd {}, {}", reg1, reg2),
ALL_FLOAT_REGS,
ALL_FLOAT_REGS
);
}
#[test]
fn test_divss_freg32_freg32() {
disassembler_test!(
divss_freg32_freg32,
|reg1, reg2| format!("divss {}, {}", reg1, reg2),
ALL_FLOAT_REGS,
ALL_FLOAT_REGS
);
}
#[test]
fn test_jmp_imm32() {
const INST_SIZE: i32 = 5;

View file

@ -442,6 +442,22 @@ trait Backend<'a> {
);
self.build_num_mul(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumDivTruncUnchecked | LowLevel::NumDivFrac => {
debug_assert_eq!(
2,
args.len(),
"NumDiv: expected to have exactly two argument"
);
debug_assert_eq!(
arg_layouts[0], arg_layouts[1],
"NumDiv: expected all arguments of to have the same layout"
);
debug_assert_eq!(
arg_layouts[0], *ret_layout,
"NumDiv: expected to have the same argument and return layout"
);
self.build_num_div(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumNeg => {
debug_assert_eq!(
1,
@ -702,6 +718,9 @@ trait Backend<'a> {
/// build_num_mul stores `src1 * src2` into dst.
fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
/// build_num_mul stores `src1 / src2` into dst.
fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &Layout<'a>);
/// build_num_neg stores the negated value of src into dst.
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &Layout<'a>);

View file

@ -6266,9 +6266,9 @@ fn run_low_level<'a, 'ctx, 'env>(
}
NumAdd | NumSub | NumMul | NumLt | NumLte | NumGt | NumGte | NumRemUnchecked
| NumIsMultipleOf | NumAddWrap | NumAddChecked | NumAddSaturated | NumDivUnchecked
| NumDivCeilUnchecked | NumPow | NumPowInt | NumSubWrap | NumSubChecked
| NumSubSaturated | NumMulWrap | NumMulSaturated | NumMulChecked => {
| NumIsMultipleOf | NumAddWrap | NumAddChecked | NumAddSaturated | NumDivFrac
| NumDivTruncUnchecked | NumDivCeilUnchecked | NumPow | NumPowInt | NumSubWrap
| NumSubChecked | NumSubSaturated | NumMulWrap | NumMulSaturated | NumMulChecked => {
debug_assert_eq!(args.len(), 2);
let (lhs_arg, lhs_layout) = load_symbol_and_layout(scope, &args[0]);
@ -7072,7 +7072,7 @@ fn build_int_binop<'a, 'ctx, 'env>(
&[lhs.into(), rhs.into()],
&bitcode::NUM_POW_INT[int_width],
),
NumDivUnchecked => {
NumDivTruncUnchecked => {
if int_width.is_signed() {
bd.build_int_signed_div(lhs, rhs, "div_int").into()
} else {
@ -7253,7 +7253,7 @@ fn build_float_binop<'a, 'ctx, 'env>(
NumGte => bd.build_float_compare(OGE, lhs, rhs, "float_gte").into(),
NumLt => bd.build_float_compare(OLT, lhs, rhs, "float_lt").into(),
NumLte => bd.build_float_compare(OLE, lhs, rhs, "float_lte").into(),
NumDivUnchecked => bd.build_float_div(lhs, rhs, "div_float").into(),
NumDivFrac => bd.build_float_div(lhs, rhs, "div_float").into(),
NumPow => env.call_intrinsic(&LLVM_POW[float_width], &[lhs.into(), rhs.into()]),
_ => {
unreachable!("Unrecognized int binary operation: {:?}", op);
@ -7370,7 +7370,7 @@ fn build_dec_binop<'a, 'ctx, 'env>(
rhs,
"decimal multiplication overflowed",
),
NumDivUnchecked => dec_binop_with_unchecked(env, bitcode::DEC_DIV, lhs, rhs),
NumDivFrac => dec_binop_with_unchecked(env, bitcode::DEC_DIV, lhs, rhs),
_ => {
unreachable!("Unrecognized int binary operation: {:?}", op);
}

View file

@ -1191,7 +1191,16 @@ impl<'a> LowLevelCall<'a> {
x => todo!("{:?} for {:?}", self.lowlevel, x),
}
}
NumDivUnchecked => {
NumDivFrac => {
self.load_args(backend);
match CodeGenNumType::for_symbol(backend, self.arguments[0]) {
F32 => backend.code_builder.f32_div(),
F64 => backend.code_builder.f64_div(),
Decimal => self.load_args_and_call_zig(backend, bitcode::DEC_DIV),
x => todo!("{:?} for {:?}", self.lowlevel, x),
}
}
NumDivTruncUnchecked => {
self.load_args(backend);
let is_signed = symbol_is_signed_int(backend, self.arguments[0]);
match CodeGenNumType::for_symbol(backend, self.arguments[0]) {
@ -1209,9 +1218,6 @@ impl<'a> LowLevelCall<'a> {
backend.code_builder.i64_div_u()
}
}
F32 => backend.code_builder.f32_div(),
F64 => backend.code_builder.f64_div(),
Decimal => self.load_args_and_call_zig(backend, bitcode::DEC_DIV),
x => todo!("{:?} for {:?}", self.lowlevel, x),
}
}

View file

@ -65,7 +65,8 @@ pub enum LowLevel {
NumLt,
NumLte,
NumCompare,
NumDivUnchecked,
NumDivFrac,
NumDivTruncUnchecked,
NumDivCeilUnchecked,
NumRemUnchecked,
NumIsMultipleOf,
@ -204,7 +205,6 @@ macro_rules! map_symbol_to_lowlevel {
LowLevel::NumToFloatCast => unreachable!(),
LowLevel::NumToIntChecked => unreachable!(),
LowLevel::NumToFloatChecked => unreachable!(),
LowLevel::NumDivUnchecked => unreachable!(),
// these are used internally and not tied to a symbol
LowLevel::Hash => unimplemented!(),
@ -279,7 +279,9 @@ map_symbol_to_lowlevel! {
NumLt <= NUM_LT,
NumLte <= NUM_LTE,
NumCompare <= NUM_COMPARE,
NumDivFrac <= NUM_DIV_FRAC,
NumDivCeilUnchecked <= NUM_DIV_CEIL,
NumDivTruncUnchecked <= NUM_DIV_TRUNC,
NumRemUnchecked <= NUM_REM,
NumIsMultipleOf <= NUM_IS_MULTIPLE_OF,
NumAbs <= NUM_ABS,

View file

@ -913,10 +913,10 @@ pub fn lowlevel_borrow_signature(arena: &Bump, op: LowLevel) -> &[bool] {
And | Or | NumAdd | NumAddWrap | NumAddChecked | NumAddSaturated | NumSub | NumSubWrap
| NumSubChecked | NumSubSaturated | NumMul | NumMulWrap | NumMulSaturated
| NumMulChecked | NumGt | NumGte | NumLt | NumLte | NumCompare | NumDivUnchecked
| NumDivCeilUnchecked | NumRemUnchecked | NumIsMultipleOf | NumPow | NumPowInt
| NumBitwiseAnd | NumBitwiseXor | NumBitwiseOr | NumShiftLeftBy | NumShiftRightBy
| NumShiftRightZfBy => arena.alloc_slice_copy(&[irrelevant, irrelevant]),
| NumMulChecked | NumGt | NumGte | NumLt | NumLte | NumCompare | NumDivFrac
| NumDivTruncUnchecked | NumDivCeilUnchecked | NumRemUnchecked | NumIsMultipleOf
| NumPow | NumPowInt | NumBitwiseAnd | NumBitwiseXor | NumBitwiseOr | NumShiftLeftBy
| NumShiftRightBy | NumShiftRightZfBy => arena.alloc_slice_copy(&[irrelevant, irrelevant]),
NumToStr | NumAbs | NumNeg | NumSin | NumCos | NumSqrtUnchecked | NumLogUnchecked
| NumRound | NumCeiling | NumFloor | NumToFrac | Not | NumIsFinite | NumAtan | NumAcos

View file

@ -735,17 +735,15 @@ fn gen_wrap_add_nums() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_div_f64() {
assert_evals_to!(
indoc!(
r#"
48 / 2
"#
),
24.0,
f64
);
assert_evals_to!("48 / 2", 24.0, f64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_div_f32() {
assert_evals_to!("48f32 / 2", 24.0, f32);
}
#[test]
@ -1146,17 +1144,15 @@ fn gen_mul_f32() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_div_i64() {
assert_evals_to!(
indoc!(
r#"
1000 // 10
"#
),
100,
i64
);
assert_evals_to!("1000i64 // 10", 100, i64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_div_u64() {
assert_evals_to!("1000u64 // 10", 100, u64);
}
#[test]