Merge branch 'main' into rust-1-72-upgrade

This commit is contained in:
Brendan Hansknecht 2024-01-17 14:36:16 -08:00 committed by GitHub
commit 260b94b36d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
101 changed files with 1254 additions and 805 deletions

View file

@ -1269,6 +1269,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
) {
add_reg64_reg64_reg64(buf, dst, src1, src2);
}
#[inline(always)]
fn add_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
@ -1288,6 +1289,25 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
fadd_freg_freg_freg(buf, FloatWidth::F64, dst, src1, src2);
}
#[inline(always)]
fn sub_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: AArch64FloatReg,
src1: AArch64FloatReg,
src2: AArch64FloatReg,
) {
fsub_freg_freg_freg(buf, FloatWidth::F32, dst, src1, src2);
}
#[inline(always)]
fn sub_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: AArch64FloatReg,
src1: AArch64FloatReg,
src2: AArch64FloatReg,
) {
fsub_freg_freg_freg(buf, FloatWidth::F64, dst, src1, src2);
}
#[inline(always)]
fn call(buf: &mut Vec<'_, u8>, relocs: &mut Vec<'_, Relocation>, fn_name: String) {
let inst = 0b1001_0100_0000_0000_0000_0000_0000_0000u32;
@ -3897,6 +3917,27 @@ fn fadd_freg_freg_freg(
buf.extend(inst.bytes());
}
/// `FSUB Sd/Dd, Sn/Dn, Sm/Dm` -> Sub Sn/Dn and Sm/Dm and place the result into Sd/Dd.
#[inline(always)]
fn fsub_freg_freg_freg(
buf: &mut Vec<'_, u8>,
ftype: FloatWidth,
dst: AArch64FloatReg,
src1: AArch64FloatReg,
src2: AArch64FloatReg,
) {
let inst =
FloatingPointDataProcessingTwoSource::new(FloatingPointDataProcessingTwoSourceParams {
opcode: 0b0011,
ptype: ftype,
rd: dst,
rn: src1,
rm: src2,
});
buf.extend(inst.bytes());
}
/// `FCMP Sn/Dn, Sm/Dm` -> Compare Sn/Dn and Sm/Dm, setting condition flags.
#[inline(always)]
fn fcmp_freg_freg(

View file

@ -166,6 +166,13 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
);
fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn add_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
@ -178,12 +185,6 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src1: FloatReg,
src2: FloatReg,
);
fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn and_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -629,6 +630,19 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn sub_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
src1: FloatReg,
src2: FloatReg,
);
fn sub_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
src1: FloatReg,
src2: FloatReg,
);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -1308,16 +1322,14 @@ impl<
fn build_num_add(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) {
match self.layout_interner.get_repr(*layout) {
LayoutRepr::Builtin(Builtin::Int(quadword_and_smaller!())) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::add_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Int(int_width)) => self.build_fn_call(
dst,
bitcode::NUM_ADD_OR_PANIC_INT[int_width].to_string(),
&[*src1, *src2],
&[*layout, *layout],
layout,
),
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
@ -1330,16 +1342,60 @@ impl<
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::add_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Decimal) => {
self.build_fn_call(
dst,
bitcode::DEC_ADD_OR_PANIC.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
&Layout::DEC,
);
LayoutRepr::DEC => self.build_fn_call(
dst,
bitcode::DEC_ADD_OR_PANIC.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
&Layout::DEC,
),
other => unreachable!("NumAdd for layout {other:?}"),
}
}
fn build_num_add_wrap(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
layout: &InLayout<'a>,
) {
match self.layout_interner.get_repr(*layout) {
LayoutRepr::Builtin(Builtin::Int(quadword_and_smaller!())) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::add_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
x => todo!("NumAdd: layout, {:?}", x),
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::add_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F32)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::add_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::DEC => self.build_fn_call(
dst,
bitcode::DEC_ADD_SATURATED.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
&Layout::DEC,
),
other => unreachable!("NumAddWrap for layout {other:?}"),
}
}
@ -1424,9 +1480,38 @@ impl<
}
fn build_num_mul(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) {
// for the time being, `num_mul` is implemented as wrapping multiplication. In roc, the normal
// `mul` should panic on overflow, but we just don't do that yet
self.build_num_mul_wrap(dst, src1, src2, layout)
match self.layout_interner.get_repr(*layout) {
LayoutRepr::Builtin(Builtin::Int(int_width)) => self.build_fn_call(
dst,
bitcode::NUM_MUL_OR_PANIC_INT[int_width].to_string(),
&[*src1, *src2],
&[*layout, *layout],
layout,
),
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::mul_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F32)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::mul_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::DEC => self.build_fn_call(
dst,
bitcode::DEC_MUL_OR_PANIC.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
&Layout::DEC,
),
other => unreachable!("NumMul for layout {other:?}"),
}
}
fn build_num_mul_wrap(
@ -1609,6 +1694,15 @@ impl<
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::div_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Decimal) => {
self.build_fn_call(
dst,
bitcode::DEC_DIV.to_string(),
&[*src1, *src2],
&[*layout, *layout],
layout,
);
}
x => todo!("NumDiv: layout, {:?}", x),
}
}
@ -1688,9 +1782,38 @@ impl<
}
fn build_num_sub(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) {
// for the time being, `num_sub` is implemented as wrapping subtraction. In roc, the normal
// `sub` should panic on overflow, but we just don't do that yet
self.build_num_sub_wrap(dst, src1, src2, layout)
match self.layout_interner.get_repr(*layout) {
LayoutRepr::Builtin(Builtin::Int(int_width)) => self.build_fn_call(
dst,
bitcode::NUM_SUB_OR_PANIC_INT[int_width].to_string(),
&[*src1, *src2],
&[*layout, *layout],
layout,
),
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::sub_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::Builtin(Builtin::Float(FloatWidth::F32)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
let src2_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src2);
ASM::sub_freg32_freg32_freg32(&mut self.buf, dst_reg, src1_reg, src2_reg);
}
LayoutRepr::DEC => self.build_fn_call(
dst,
bitcode::DEC_SUB_OR_PANIC.to_string(),
&[*src1, *src2],
&[Layout::DEC, Layout::DEC],
&Layout::DEC,
),
other => unreachable!("NumMul for layout {other:?}"),
}
}
fn build_num_sub_wrap(

View file

@ -526,12 +526,12 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg, X86_64Assembler> for X86_64Syste
use X86_64GeneralReg::*;
type ASM = X86_64Assembler;
// move the first argument to roc_panic (a *RocStr) into r8
ASM::add_reg64_reg64_imm32(buf, R8, RSP, 8);
// move the first argument to roc_panic (a *const RocStr) into r8
ASM::mov_reg64_reg64(buf, R8, RDI);
// move the crash tag into the second return register. We add 1 to it because the 0 value
// is already used for "no crash occurred"
ASM::add_reg64_reg64_imm32(buf, RDX, RDI, 1);
ASM::add_reg64_reg64_imm32(buf, RDX, RSI, 1);
// the setlongjmp_buffer
ASM::data_pointer(buf, relocs, String::from("setlongjmp_buffer"), RDI);
@ -2007,6 +2007,39 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
}
#[inline(always)]
fn sub_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
dst: X86_64FloatReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
) {
if dst == src1 {
subss_freg32_freg32(buf, dst, src2);
} else if dst == src2 {
subss_freg32_freg32(buf, dst, src1);
} else {
movss_freg32_freg32(buf, dst, src1);
subss_freg32_freg32(buf, dst, src2);
}
}
#[inline(always)]
fn sub_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: X86_64FloatReg,
src1: X86_64FloatReg,
src2: X86_64FloatReg,
) {
if dst == src1 {
subsd_freg64_freg64(buf, dst, src2);
} else if dst == src2 {
subsd_freg64_freg64(buf, dst, src1);
} else {
movsd_freg64_freg64(buf, dst, src1);
subsd_freg64_freg64(buf, dst, src2);
}
}
#[inline(always)]
fn call(buf: &mut Vec<'_, u8>, relocs: &mut Vec<'_, Relocation>, fn_name: String) {
buf.extend([0xE8, 0x00, 0x00, 0x00, 0x00]);
@ -3055,124 +3088,78 @@ fn sar_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
buf.extend([rex, 0xD3, 0xC0 | (7 << 3) | dst_mod]);
}
/// `ADDSD xmm1,xmm2/m64` -> Add the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
fn double_binary_operation(
buf: &mut Vec<'_, u8>,
dst: X86_64FloatReg,
src: X86_64FloatReg,
float_width: FloatWidth,
op_code2: u8,
) {
let op_code1 = match float_width {
FloatWidth::F32 => 0xF3,
FloatWidth::F64 => 0xF2,
};
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF2,
op_code1,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x58,
op_code2,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF2, 0x0F, 0x58, 0xC0 | (dst_mod << 3) | (src_mod)])
buf.extend([op_code1, 0x0F, op_code2, 0xC0 | (dst_mod << 3) | (src_mod)])
}
}
/// `ADDSD xmm1,xmm2/m64` -> Add the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
double_binary_operation(buf, dst, src, FloatWidth::F64, 0x58)
}
/// `ADDSS xmm1,xmm2/m64` -> Add the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn addss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF3,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x58,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF3, 0x0F, 0x58, 0xC0 | (dst_mod << 3) | (src_mod)])
}
double_binary_operation(buf, dst, src, FloatWidth::F32, 0x58)
}
/// `SUBSD xmm1,xmm2/m64` -> Sub the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn subsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
double_binary_operation(buf, dst, src, FloatWidth::F64, 0x5C)
}
/// `SUBSS xmm1,xmm2/m64` -> Sub the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn subss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
double_binary_operation(buf, dst, src, FloatWidth::F32, 0x5C)
}
/// `MULSD xmm1,xmm2/m64` -> Multiply the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn mulsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF2,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x59,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF2, 0x0F, 0x59, 0xC0 | (dst_mod << 3) | (src_mod)])
}
double_binary_operation(buf, dst, src, FloatWidth::F64, 0x59)
}
#[inline(always)]
fn mulss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
double_binary_operation(buf, dst, src, FloatWidth::F32, 0x59)
}
/// `DIVSS xmm1,xmm2/m64` -> Divide the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn divss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF3,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x5E,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF3, 0x0F, 0x5E, 0xC0 | (dst_mod << 3) | (src_mod)])
}
double_binary_operation(buf, dst, src, FloatWidth::F32, 0x5E)
}
/// `DIVSD xmm1,xmm2/m64` -> Divide the low double-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn divsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF2,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x5E,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF2, 0x0F, 0x5E, 0xC0 | (dst_mod << 3) | (src_mod)])
}
}
/// `ADDSS xmm1,xmm2/m64` -> Add the low single-precision floating-point value from xmm2/mem to xmm1 and store the result in xmm1.
#[inline(always)]
fn mulss_freg32_freg32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend([
0xF3,
0x40 | ((dst_high as u8) << 2) | (src_high as u8),
0x0F,
0x59,
0xC0 | (dst_mod << 3) | (src_mod),
])
} else {
buf.extend([0xF3, 0x0F, 0x59, 0xC0 | (dst_mod << 3) | (src_mod)])
}
double_binary_operation(buf, dst, src, FloatWidth::F64, 0x5E)
}
#[inline(always)]