Expand register names for more readability

This commit is contained in:
Brendan Hansknecht 2021-01-21 21:14:21 -08:00
parent 5cabdd83b0
commit 9032c8c43b
5 changed files with 644 additions and 540 deletions

View file

@ -4,7 +4,7 @@ use bumpalo::collections::Vec;
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
pub enum AArch64GPReg {
pub enum AArch64GeneralReg {
X0 = 0,
X1 = 1,
X2 = 2,
@ -39,12 +39,12 @@ pub enum AArch64GPReg {
/// This can mean Zero or Stack Pointer depending on the context.
ZRSP = 31,
}
impl RegTrait for AArch64GPReg {}
impl RegTrait for AArch64GeneralReg {}
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
pub enum AArch64FPReg {}
impl RegTrait for AArch64FPReg {}
pub enum AArch64FloatReg {}
impl RegTrait for AArch64FloatReg {}
pub struct AArch64Assembler {}
@ -54,90 +54,90 @@ pub struct AArch64Call {}
const STACK_ALIGNMENT: u8 = 16;
impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
const GP_PARAM_REGS: &'static [AArch64GPReg] = &[
AArch64GPReg::X0,
AArch64GPReg::X1,
AArch64GPReg::X2,
AArch64GPReg::X3,
AArch64GPReg::X4,
AArch64GPReg::X5,
AArch64GPReg::X6,
AArch64GPReg::X7,
impl CallConv<AArch64GeneralReg, AArch64FloatReg> for AArch64Call {
const GENERAL_PARAM_REGS: &'static [AArch64GeneralReg] = &[
AArch64GeneralReg::X0,
AArch64GeneralReg::X1,
AArch64GeneralReg::X2,
AArch64GeneralReg::X3,
AArch64GeneralReg::X4,
AArch64GeneralReg::X5,
AArch64GeneralReg::X6,
AArch64GeneralReg::X7,
];
const GP_RETURN_REGS: &'static [AArch64GPReg] = Self::GP_PARAM_REGS;
const GP_DEFAULT_FREE_REGS: &'static [AArch64GPReg] = &[
const GENERAL_RETURN_REGS: &'static [AArch64GeneralReg] = Self::GENERAL_PARAM_REGS;
const GENERAL_DEFAULT_FREE_REGS: &'static [AArch64GeneralReg] = &[
// The regs we want to use first should be at the end of this vec.
// We will use pop to get which reg to use next
// Don't use frame pointer: AArch64GPReg::FP,
// Don't user indirect result location: AArch64GPReg::XR,
// Don't use platform register: AArch64GPReg::PR,
// Don't use link register: AArch64GPReg::LR,
// Don't use zero register/stack pointer: AArch64GPReg::ZRSP,
// Don't use frame pointer: AArch64GeneralReg::FP,
// Don't user indirect result location: AArch64GeneralReg::XR,
// Don't use platform register: AArch64GeneralReg::PR,
// Don't use link register: AArch64GeneralReg::LR,
// Don't use zero register/stack pointer: AArch64GeneralReg::ZRSP,
// Use callee saved regs last.
AArch64GPReg::X19,
AArch64GPReg::X20,
AArch64GPReg::X21,
AArch64GPReg::X22,
AArch64GPReg::X23,
AArch64GPReg::X24,
AArch64GPReg::X25,
AArch64GPReg::X26,
AArch64GPReg::X27,
AArch64GPReg::X28,
AArch64GeneralReg::X19,
AArch64GeneralReg::X20,
AArch64GeneralReg::X21,
AArch64GeneralReg::X22,
AArch64GeneralReg::X23,
AArch64GeneralReg::X24,
AArch64GeneralReg::X25,
AArch64GeneralReg::X26,
AArch64GeneralReg::X27,
AArch64GeneralReg::X28,
// Use caller saved regs first.
AArch64GPReg::X0,
AArch64GPReg::X1,
AArch64GPReg::X2,
AArch64GPReg::X3,
AArch64GPReg::X4,
AArch64GPReg::X5,
AArch64GPReg::X6,
AArch64GPReg::X7,
AArch64GPReg::X9,
AArch64GPReg::X10,
AArch64GPReg::X11,
AArch64GPReg::X12,
AArch64GPReg::X13,
AArch64GPReg::X14,
AArch64GPReg::X15,
AArch64GPReg::IP0,
AArch64GPReg::IP1,
AArch64GeneralReg::X0,
AArch64GeneralReg::X1,
AArch64GeneralReg::X2,
AArch64GeneralReg::X3,
AArch64GeneralReg::X4,
AArch64GeneralReg::X5,
AArch64GeneralReg::X6,
AArch64GeneralReg::X7,
AArch64GeneralReg::X9,
AArch64GeneralReg::X10,
AArch64GeneralReg::X11,
AArch64GeneralReg::X12,
AArch64GeneralReg::X13,
AArch64GeneralReg::X14,
AArch64GeneralReg::X15,
AArch64GeneralReg::IP0,
AArch64GeneralReg::IP1,
];
const FP_PARAM_REGS: &'static [AArch64FPReg] = &[];
const FP_RETURN_REGS: &'static [AArch64FPReg] = Self::FP_PARAM_REGS;
const FP_DEFAULT_FREE_REGS: &'static [AArch64FPReg] = &[];
const FLOAT_PARAM_REGS: &'static [AArch64FloatReg] = &[];
const FLOAT_RETURN_REGS: &'static [AArch64FloatReg] = Self::FLOAT_PARAM_REGS;
const FLOAT_DEFAULT_FREE_REGS: &'static [AArch64FloatReg] = &[];
const SHADOW_SPACE_SIZE: u8 = 0;
#[inline(always)]
fn gp_callee_saved(reg: &AArch64GPReg) -> bool {
fn general_callee_saved(reg: &AArch64GeneralReg) -> bool {
matches!(
reg,
AArch64GPReg::X19
| AArch64GPReg::X20
| AArch64GPReg::X21
| AArch64GPReg::X22
| AArch64GPReg::X23
| AArch64GPReg::X24
| AArch64GPReg::X25
| AArch64GPReg::X26
| AArch64GPReg::X27
| AArch64GPReg::X28
AArch64GeneralReg::X19
| AArch64GeneralReg::X20
| AArch64GeneralReg::X21
| AArch64GeneralReg::X22
| AArch64GeneralReg::X23
| AArch64GeneralReg::X24
| AArch64GeneralReg::X25
| AArch64GeneralReg::X26
| AArch64GeneralReg::X27
| AArch64GeneralReg::X28
)
}
#[inline(always)]
fn fp_callee_saved(_reg: &AArch64FPReg) -> bool {
unimplemented!("AArch64 FPRegs not implemented yet");
fn float_callee_saved(_reg: &AArch64FloatReg) -> bool {
unimplemented!("AArch64 FloatRegs not implemented yet");
}
#[inline(always)]
fn setup_stack(
buf: &mut Vec<'_, u8>,
leaf_function: bool,
saved_regs: &[AArch64GPReg],
saved_regs: &[AArch64GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String> {
// full size is upcast to i64 to make sure we don't overflow here.
@ -161,8 +161,8 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
if aligned_stack_size > 0 {
AArch64Assembler::sub_reg64_reg64_imm32(
buf,
AArch64GPReg::ZRSP,
AArch64GPReg::ZRSP,
AArch64GeneralReg::ZRSP,
AArch64GeneralReg::ZRSP,
aligned_stack_size,
);
@ -170,9 +170,9 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
let mut offset = aligned_stack_size;
if !leaf_function {
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GPReg::LR);
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::LR);
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GPReg::FP);
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::FP);
}
for reg in saved_regs {
offset -= 8;
@ -191,7 +191,7 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
fn cleanup_stack(
buf: &mut Vec<'_, u8>,
leaf_function: bool,
saved_regs: &[AArch64GPReg],
saved_regs: &[AArch64GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String> {
if aligned_stack_size > 0 {
@ -199,9 +199,9 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
let mut offset = aligned_stack_size;
if !leaf_function {
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GPReg::LR, offset);
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::LR, offset);
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GPReg::FP, offset);
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::FP, offset);
}
for reg in saved_regs {
offset -= 8;
@ -209,8 +209,8 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
}
AArch64Assembler::add_reg64_reg64_imm32(
buf,
AArch64GPReg::ZRSP,
AArch64GPReg::ZRSP,
AArch64GeneralReg::ZRSP,
AArch64GeneralReg::ZRSP,
aligned_stack_size,
);
}
@ -218,17 +218,17 @@ impl CallConv<AArch64GPReg, AArch64FPReg> for AArch64Call {
}
}
impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)]
fn abs_reg64_reg64(_buf: &mut Vec<'_, u8>, _dst: AArch64GPReg, _src: AArch64GPReg) {
fn abs_reg64_reg64(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _src: AArch64GeneralReg) {
unimplemented!("abs_reg64_reg64 is not yet implement for AArch64");
}
#[inline(always)]
fn add_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
dst: AArch64GPReg,
src: AArch64GPReg,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm32: i32,
) {
if imm32 < 0 {
@ -245,9 +245,9 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
#[inline(always)]
fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GPReg,
src1: AArch64GPReg,
src2: AArch64GPReg,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
add_reg64_reg64_reg64(buf, dst, src1, src2);
}
@ -255,9 +255,9 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
#[inline(always)]
fn add_freg64_freg64_freg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64FPReg,
_src1: AArch64FPReg,
_src2: AArch64FPReg,
_dst: AArch64FloatReg,
_src1: AArch64FloatReg,
_src2: AArch64FloatReg,
) {
unimplemented!("adding floats not yet implemented for AArch64");
}
@ -266,14 +266,14 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
fn mov_freg64_imm64(
_buf: &mut Vec<'_, u8>,
_relocs: &mut Vec<'_, Relocation>,
_dst: AArch64FPReg,
_dst: AArch64FloatReg,
_imm: f64,
) {
unimplemented!("loading float literal not yet implemented for AArch64");
}
#[inline(always)]
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm: i64) {
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm: i64) {
let mut remaining = imm as u64;
movz_reg64_imm16(buf, dst, remaining as u16, 0);
remaining >>= 16;
@ -291,43 +291,43 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
}
#[inline(always)]
fn mov_freg64_freg64(_buf: &mut Vec<'_, u8>, _dst: AArch64FPReg, _src: AArch64FPReg) {
fn mov_freg64_freg64(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _src: AArch64FloatReg) {
unimplemented!("moving data between float registers not yet implemented for AArch64");
}
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg) {
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
mov_reg64_reg64(buf, dst, src);
}
#[inline(always)]
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, offset: i32) {
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) {
if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
ldr_reg64_imm12(buf, dst, AArch64GPReg::ZRSP, (offset as u16) >> 3);
ldr_reg64_imm12(buf, dst, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else {
unimplemented!("stack offsets over 32k are not yet implement for AArch64");
}
}
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: AArch64FPReg, _offset: i32) {
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _offset: i32) {
unimplemented!("loading floating point reg from stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_stack32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FPReg) {
fn mov_stack32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FloatReg) {
unimplemented!("saving floating point reg to stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GPReg) {
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_imm12(buf, src, AArch64GPReg::ZRSP, (offset as u16) >> 3);
str_reg64_imm12(buf, src, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else {
unimplemented!("stack offsets over 32k are not yet implement for AArch64");
}
@ -336,8 +336,8 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
#[inline(always)]
fn sub_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
dst: AArch64GPReg,
src: AArch64GPReg,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm32: i32,
) {
if imm32 < 0 {
@ -356,9 +356,9 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
#[inline(always)]
fn sub_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GPReg,
_src1: AArch64GPReg,
_src2: AArch64GPReg,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
unimplemented!("registers subtractions not implemented yet for AArch64");
}
@ -366,15 +366,15 @@ impl Assembler<AArch64GPReg, AArch64FPReg> for AArch64Assembler {
#[inline(always)]
fn eq_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GPReg,
_src1: AArch64GPReg,
_src2: AArch64GPReg,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
unimplemented!("registers equality not implemented yet for AArch64");
}
#[inline(always)]
fn ret(buf: &mut Vec<'_, u8>) {
ret_reg64(buf, AArch64GPReg::LR)
ret_reg64(buf, AArch64GeneralReg::LR)
}
}
@ -401,7 +401,7 @@ enum BranchGroup {
opc: u8,
op2: u8,
op3: u8,
reg_n: AArch64GPReg,
reg_n: AArch64GeneralReg,
op4: u8,
},
}
@ -413,19 +413,19 @@ enum DPRegGroup {
subtract: bool,
set_flags: bool,
shift: u8,
reg_m: AArch64GPReg,
reg_m: AArch64GeneralReg,
imm6: u8,
reg_n: AArch64GPReg,
reg_d: AArch64GPReg,
reg_n: AArch64GeneralReg,
reg_d: AArch64GeneralReg,
},
Logical {
sf: bool,
op: DPRegLogicalOp,
shift: u8,
reg_m: AArch64GPReg,
reg_m: AArch64GeneralReg,
imm6: u8,
reg_n: AArch64GPReg,
reg_d: AArch64GPReg,
reg_n: AArch64GeneralReg,
reg_d: AArch64GeneralReg,
},
}
@ -437,15 +437,15 @@ enum DPImmGroup {
set_flags: bool,
shift: bool,
imm12: u16,
reg_n: AArch64GPReg,
reg_d: AArch64GPReg,
reg_n: AArch64GeneralReg,
reg_d: AArch64GeneralReg,
},
MoveWide {
sf: bool,
opc: u8,
hw: u8,
imm16: u16,
reg_d: AArch64GPReg,
reg_d: AArch64GeneralReg,
},
}
@ -456,8 +456,8 @@ enum LdStrGroup {
v: bool,
opc: u8,
imm12: u16,
reg_n: AArch64GPReg,
reg_t: AArch64GPReg,
reg_n: AArch64GeneralReg,
reg_t: AArch64GeneralReg,
},
}
@ -633,7 +633,12 @@ fn build_instruction(inst: AArch64Instruction) -> [u8; 4] {
/// `ADD Xd, Xn, imm12` -> Add Xn and imm12 and place the result into Xd.
#[inline(always)]
fn add_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg, imm12: u16) {
fn add_reg64_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm12: u16,
) {
buf.extend(&build_instruction(AArch64Instruction::DPImm(
DPImmGroup::AddSubImm {
sf: true,
@ -651,9 +656,9 @@ fn add_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64G
#[inline(always)]
fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: AArch64GPReg,
src1: AArch64GPReg,
src2: AArch64GPReg,
dst: AArch64GeneralReg,
src1: AArch64GeneralReg,
src2: AArch64GeneralReg,
) {
buf.extend(&build_instruction(AArch64Instruction::DPReg(
DPRegGroup::AddSubShifted {
@ -672,7 +677,12 @@ fn add_reg64_reg64_reg64(
/// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP.
/// Note: imm12 is the offest divided by 8.
#[inline(always)]
fn ldr_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, base: AArch64GPReg, imm12: u16) {
fn ldr_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
base: AArch64GeneralReg,
imm12: u16,
) {
debug_assert!(imm12 <= 0xFFF);
buf.extend(&build_instruction(AArch64Instruction::LdStr(
LdStrGroup::UnsignedImm {
@ -688,7 +698,7 @@ fn ldr_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, base: AArch64GPReg,
/// `MOV Xd, Xm` -> Move Xm to Xd.
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg) {
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPReg(
DPRegGroup::Logical {
@ -697,7 +707,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg)
shift: 0,
reg_m: src,
imm6: 0,
reg_n: AArch64GPReg::ZRSP,
reg_n: AArch64GeneralReg::ZRSP,
reg_d: dst,
},
)));
@ -705,7 +715,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg)
/// `MOVK Xd, imm16` -> Keeps Xd and moves an optionally shifted imm16 to Xd.
#[inline(always)]
fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8) {
fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
debug_assert!(hw <= 0b11);
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPImm(
@ -721,7 +731,7 @@ fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8
/// `MOVZ Xd, imm16` -> Zeros Xd and moves an optionally shifted imm16 to Xd.
#[inline(always)]
fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8) {
fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
debug_assert!(hw <= 0b11);
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPImm(
@ -738,7 +748,12 @@ fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8
/// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP.
/// Note: imm12 is the offest divided by 8.
#[inline(always)]
fn str_reg64_imm12(buf: &mut Vec<'_, u8>, src: AArch64GPReg, base: AArch64GPReg, imm12: u16) {
fn str_reg64_imm12(
buf: &mut Vec<'_, u8>,
src: AArch64GeneralReg,
base: AArch64GeneralReg,
imm12: u16,
) {
debug_assert!(imm12 <= 0xFFF);
buf.extend(&build_instruction(AArch64Instruction::LdStr(
LdStrGroup::UnsignedImm {
@ -754,7 +769,12 @@ fn str_reg64_imm12(buf: &mut Vec<'_, u8>, src: AArch64GPReg, base: AArch64GPReg,
/// `SUB Xd, Xn, imm12` -> Subtract Xn and imm12 and place the result into Xd.
#[inline(always)]
fn sub_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg, imm12: u16) {
fn sub_reg64_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm12: u16,
) {
buf.extend(&build_instruction(AArch64Instruction::DPImm(
DPImmGroup::AddSubImm {
sf: true,
@ -770,7 +790,7 @@ fn sub_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64G
/// `RET Xn` -> Return to the address stored in Xn.
#[inline(always)]
fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GPReg) {
fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GeneralReg) {
buf.extend(&build_instruction(AArch64Instruction::Branch(
BranchGroup::UnconditionBranchReg {
opc: 0b0010,
@ -796,9 +816,9 @@ mod tests {
let mut buf = bumpalo::vec![in &arena];
add_reg64_reg64_reg64(
&mut buf,
AArch64GPReg::X10,
AArch64GPReg::ZRSP,
AArch64GPReg::X21,
AArch64GeneralReg::X10,
AArch64GeneralReg::ZRSP,
AArch64GeneralReg::X21,
);
assert_eq!(&buf, &[0xAA, 0x02, 0x1F, 0x8B]);
}
@ -807,7 +827,12 @@ mod tests {
fn test_add_reg64_reg64_imm12() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
add_reg64_reg64_imm12(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21, 0x123);
add_reg64_reg64_imm12(
&mut buf,
AArch64GeneralReg::X10,
AArch64GeneralReg::X21,
0x123,
);
assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0x91]);
}
@ -815,7 +840,12 @@ mod tests {
fn test_ldr_reg64_imm12() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
ldr_reg64_imm12(&mut buf, AArch64GPReg::X21, AArch64GPReg::ZRSP, 0x123);
ldr_reg64_imm12(
&mut buf,
AArch64GeneralReg::X21,
AArch64GeneralReg::ZRSP,
0x123,
);
assert_eq!(&buf, &[0xF5, 0x8F, 0x44, 0xF9]);
}
@ -823,7 +853,7 @@ mod tests {
fn test_mov_reg64_reg64() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
mov_reg64_reg64(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21);
mov_reg64_reg64(&mut buf, AArch64GeneralReg::X10, AArch64GeneralReg::X21);
assert_eq!(&buf, &[0xEA, 0x03, 0x15, 0xAA]);
}
@ -831,7 +861,7 @@ mod tests {
fn test_movk_reg64_imm16() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
movk_reg64_imm16(&mut buf, AArch64GPReg::X21, TEST_U16, 3);
movk_reg64_imm16(&mut buf, AArch64GeneralReg::X21, TEST_U16, 3);
assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xF2]);
}
@ -839,7 +869,7 @@ mod tests {
fn test_movz_reg64_imm16() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
movz_reg64_imm16(&mut buf, AArch64GPReg::X21, TEST_U16, 3);
movz_reg64_imm16(&mut buf, AArch64GeneralReg::X21, TEST_U16, 3);
assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xD2]);
}
@ -847,7 +877,12 @@ mod tests {
fn test_str_reg64_imm12() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
str_reg64_imm12(&mut buf, AArch64GPReg::X21, AArch64GPReg::ZRSP, 0x123);
str_reg64_imm12(
&mut buf,
AArch64GeneralReg::X21,
AArch64GeneralReg::ZRSP,
0x123,
);
assert_eq!(&buf, &[0xF5, 0x8F, 0x04, 0xF9]);
}
@ -855,7 +890,12 @@ mod tests {
fn test_sub_reg64_reg64_imm12() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
sub_reg64_reg64_imm12(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21, 0x123);
sub_reg64_reg64_imm12(
&mut buf,
AArch64GeneralReg::X10,
AArch64GeneralReg::X21,
0x123,
);
assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0xD1]);
}
@ -863,7 +903,7 @@ mod tests {
fn test_ret_reg64() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
ret_reg64(&mut buf, AArch64GPReg::LR);
ret_reg64(&mut buf, AArch64GeneralReg::LR);
assert_eq!(&buf, &[0xC0, 0x03, 0x5F, 0xD6]);
}
}

View file

@ -9,38 +9,38 @@ use target_lexicon::Triple;
pub mod aarch64;
pub mod x86_64;
pub trait CallConv<GPReg: RegTrait, FPReg: RegTrait> {
const GP_PARAM_REGS: &'static [GPReg];
const GP_RETURN_REGS: &'static [GPReg];
const GP_DEFAULT_FREE_REGS: &'static [GPReg];
pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait> {
const GENERAL_PARAM_REGS: &'static [GeneralReg];
const GENERAL_RETURN_REGS: &'static [GeneralReg];
const GENERAL_DEFAULT_FREE_REGS: &'static [GeneralReg];
const FP_PARAM_REGS: &'static [FPReg];
const FP_RETURN_REGS: &'static [FPReg];
const FP_DEFAULT_FREE_REGS: &'static [FPReg];
const FLOAT_PARAM_REGS: &'static [FloatReg];
const FLOAT_RETURN_REGS: &'static [FloatReg];
const FLOAT_DEFAULT_FREE_REGS: &'static [FloatReg];
const SHADOW_SPACE_SIZE: u8;
fn gp_callee_saved(reg: &GPReg) -> bool;
fn general_callee_saved(reg: &GeneralReg) -> bool;
#[inline(always)]
fn gp_caller_saved(reg: &GPReg) -> bool {
!Self::gp_callee_saved(reg)
fn general_caller_saved(reg: &GeneralReg) -> bool {
!Self::general_callee_saved(reg)
}
fn fp_callee_saved(reg: &FPReg) -> bool;
fn float_callee_saved(reg: &FloatReg) -> bool;
#[inline(always)]
fn fp_caller_saved(reg: &FPReg) -> bool {
!Self::fp_callee_saved(reg)
fn float_caller_saved(reg: &FloatReg) -> bool {
!Self::float_callee_saved(reg)
}
fn setup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
gp_saved_regs: &[GPReg],
general_saved_regs: &[GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String>;
fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
gp_saved_regs: &[GPReg],
general_saved_regs: &[GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String>;
}
@ -51,50 +51,70 @@ pub trait CallConv<GPReg: RegTrait, FPReg: RegTrait> {
/// Thus, some backends will need to use mulitiple instructions to preform a single one of this calls.
/// Generally, I prefer explicit sources, as opposed to dst being one of the sources. Ex: `x = x + y` would be `add x, x, y` instead of `add x, y`.
/// dst should always come before sources.
pub trait Assembler<GPReg: RegTrait, FPReg: RegTrait> {
fn abs_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src: GPReg);
fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, imm32: i32);
fn add_freg64_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FPReg, src1: FPReg, src2: FPReg);
fn add_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg);
pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn abs_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn add_freg64_freg64_freg64(
buf: &mut Vec<'_, u8>,
dst: FloatReg,
src1: FloatReg,
src2: FloatReg,
);
fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn mov_freg64_imm64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: FPReg,
dst: FloatReg,
imm: f64,
);
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GPReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FPReg, src: FPReg);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src: GPReg);
fn mov_freg64_stack32(buf: &mut Vec<'_, u8>, dst: FPReg, offset: i32);
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GPReg, offset: i32);
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FPReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GPReg);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, imm32: i32);
fn sub_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg);
fn eq_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg);
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GeneralReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn mov_freg64_stack32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn ret(buf: &mut Vec<'_, u8>);
}
#[derive(Clone, Debug, PartialEq)]
#[allow(dead_code)]
enum SymbolStorage<GPReg: RegTrait, FPReg: RegTrait> {
enum SymbolStorage<GeneralReg: RegTrait, FloatReg: RegTrait> {
// These may need layout, but I am not sure.
// I think whenever a symbol would be used, we specify layout anyways.
GPReg(GPReg),
FPReg(FPReg),
GeneralReg(GeneralReg),
FloatReg(FloatReg),
Stack(i32),
StackAndGPReg(GPReg, i32),
StackAndFPReg(FPReg, i32),
StackAndGeneralReg(GeneralReg, i32),
StackAndFloatReg(FloatReg, i32),
}
pub trait RegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {}
pub struct Backend64Bit<
'a,
GPReg: RegTrait,
FPReg: RegTrait,
ASM: Assembler<GPReg, FPReg>,
CC: CallConv<GPReg, FPReg>,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> {
phantom_asm: PhantomData<ASM>,
phantom_cc: PhantomData<CC>,
@ -108,34 +128,34 @@ pub struct Backend64Bit<
last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
symbols_map: MutMap<Symbol, SymbolStorage<GPReg, FPReg>>,
symbols_map: MutMap<Symbol, SymbolStorage<GeneralReg, FloatReg>>,
literal_map: MutMap<Symbol, Literal<'a>>,
// This should probably be smarter than a vec.
// There are certain registers we should always use first. With pushing and popping, this could get mixed.
gp_free_regs: Vec<'a, GPReg>,
fp_free_regs: Vec<'a, FPReg>,
general_free_regs: Vec<'a, GeneralReg>,
float_free_regs: Vec<'a, FloatReg>,
// The last major thing we need is a way to decide what reg to free when all of them are full.
// Theoretically we want a basic lru cache for the currently loaded symbols.
// For now just a vec of used registers and the symbols they contain.
gp_used_regs: Vec<'a, (GPReg, Symbol)>,
fp_used_regs: Vec<'a, (FPReg, Symbol)>,
general_used_regs: Vec<'a, (GeneralReg, Symbol)>,
float_used_regs: Vec<'a, (FloatReg, Symbol)>,
// used callee saved regs must be tracked for pushing and popping at the beginning/end of the function.
gp_used_callee_saved_regs: MutSet<GPReg>,
fp_used_callee_saved_regs: MutSet<FPReg>,
general_used_callee_saved_regs: MutSet<GeneralReg>,
float_used_callee_saved_regs: MutSet<FloatReg>,
stack_size: i32,
}
impl<
'a,
GPReg: RegTrait,
FPReg: RegTrait,
ASM: Assembler<GPReg, FPReg>,
CC: CallConv<GPReg, FPReg>,
> Backend<'a> for Backend64Bit<'a, GPReg, FPReg, ASM, CC>
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> Backend<'a> for Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
{
fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> {
Ok(Backend64Bit {
@ -149,12 +169,12 @@ impl<
free_map: MutMap::default(),
symbols_map: MutMap::default(),
literal_map: MutMap::default(),
gp_free_regs: bumpalo::vec![in env.arena],
gp_used_regs: bumpalo::vec![in env.arena],
gp_used_callee_saved_regs: MutSet::default(),
fp_free_regs: bumpalo::vec![in env.arena],
fp_used_regs: bumpalo::vec![in env.arena],
fp_used_callee_saved_regs: MutSet::default(),
general_free_regs: bumpalo::vec![in env.arena],
general_used_regs: bumpalo::vec![in env.arena],
general_used_callee_saved_regs: MutSet::default(),
float_free_regs: bumpalo::vec![in env.arena],
float_used_regs: bumpalo::vec![in env.arena],
float_used_callee_saved_regs: MutSet::default(),
stack_size: 0,
})
}
@ -170,16 +190,16 @@ impl<
self.free_map.clear();
self.symbols_map.clear();
self.buf.clear();
self.gp_used_callee_saved_regs.clear();
self.gp_free_regs.clear();
self.gp_used_regs.clear();
self.gp_free_regs
.extend_from_slice(CC::GP_DEFAULT_FREE_REGS);
self.fp_used_callee_saved_regs.clear();
self.fp_free_regs.clear();
self.fp_used_regs.clear();
self.fp_free_regs
.extend_from_slice(CC::FP_DEFAULT_FREE_REGS);
self.general_used_callee_saved_regs.clear();
self.general_free_regs.clear();
self.general_used_regs.clear();
self.general_free_regs
.extend_from_slice(CC::GENERAL_DEFAULT_FREE_REGS);
self.float_used_callee_saved_regs.clear();
self.float_free_regs.clear();
self.float_used_regs.clear();
self.float_free_regs
.extend_from_slice(CC::FLOAT_DEFAULT_FREE_REGS);
}
fn set_not_leaf_function(&mut self) {
@ -208,7 +228,7 @@ impl<
// Setup stack.
let mut used_regs = bumpalo::vec![in self.env.arena];
used_regs.extend(&self.gp_used_callee_saved_regs);
used_regs.extend(&self.general_used_callee_saved_regs);
let aligned_stack_size =
CC::setup_stack(&mut out, self.leaf_function, &used_regs, self.stack_size)?;
@ -225,8 +245,8 @@ impl<
}
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_gpreg(dst)?;
let src_reg = self.load_to_gpreg(src)?;
let dst_reg = self.claim_general_reg(dst)?;
let src_reg = self.load_to_general_reg(src)?;
ASM::abs_reg64_reg64(&mut self.buf, dst_reg, src_reg);
Ok(())
}
@ -237,9 +257,9 @@ impl<
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_gpreg(dst)?;
let src1_reg = self.load_to_gpreg(src1)?;
let src2_reg = self.load_to_gpreg(src2)?;
let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_general_reg(src2)?;
ASM::add_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
@ -250,9 +270,9 @@ impl<
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_fpreg(dst)?;
let src1_reg = self.load_to_fpreg(src1)?;
let src2_reg = self.load_to_fpreg(src2)?;
let dst_reg = self.claim_float_reg(dst)?;
let src1_reg = self.load_to_float_reg(src1)?;
let src2_reg = self.load_to_float_reg(src2)?;
ASM::add_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
@ -263,17 +283,17 @@ impl<
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_gpreg(dst)?;
let src1_reg = self.load_to_gpreg(src1)?;
let src2_reg = self.load_to_gpreg(src2)?;
let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_general_reg(src2)?;
ASM::sub_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
fn build_eq_i64(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_gpreg(dst)?;
let src1_reg = self.load_to_gpreg(src1)?;
let src2_reg = self.load_to_gpreg(src2)?;
let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_general_reg(src2)?;
ASM::eq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
@ -281,13 +301,13 @@ impl<
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> {
match lit {
Literal::Int(x) => {
let reg = self.claim_gpreg(sym)?;
let reg = self.claim_general_reg(sym)?;
let val = *x;
ASM::mov_reg64_imm64(&mut self.buf, reg, val);
Ok(())
}
Literal::Float(x) => {
let reg = self.claim_fpreg(sym)?;
let reg = self.claim_float_reg(sym)?;
let val = *x;
ASM::mov_freg64_imm64(&mut self.buf, &mut self.relocs, reg, val);
Ok(())
@ -298,11 +318,11 @@ impl<
fn free_symbol(&mut self, sym: &Symbol) {
self.symbols_map.remove(sym);
for i in 0..self.gp_used_regs.len() {
let (reg, saved_sym) = self.gp_used_regs[i];
for i in 0..self.general_used_regs.len() {
let (reg, saved_sym) = self.general_used_regs[i];
if saved_sym == *sym {
self.gp_free_regs.push(reg);
self.gp_used_regs.remove(i);
self.general_free_regs.push(reg);
self.general_used_regs.remove(i);
break;
}
}
@ -311,16 +331,16 @@ impl<
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.get(sym);
match val {
Some(SymbolStorage::GPReg(reg)) if *reg == CC::GP_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::GPReg(reg)) => {
Some(SymbolStorage::GeneralReg(reg)) if *reg == CC::GENERAL_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::GeneralReg(reg)) => {
// If it fits in a general purpose register, just copy it over to.
// Technically this can be optimized to produce shorter instructions if less than 64bits.
ASM::mov_reg64_reg64(&mut self.buf, CC::GP_RETURN_REGS[0], *reg);
ASM::mov_reg64_reg64(&mut self.buf, CC::GENERAL_RETURN_REGS[0], *reg);
Ok(())
}
Some(SymbolStorage::FPReg(reg)) if *reg == CC::FP_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::FPReg(reg)) => {
ASM::mov_freg64_freg64(&mut self.buf, CC::FP_RETURN_REGS[0], *reg);
Some(SymbolStorage::FloatReg(reg)) if *reg == CC::FLOAT_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::FloatReg(reg)) => {
ASM::mov_freg64_freg64(&mut self.buf, CC::FLOAT_RETURN_REGS[0], *reg);
Ok(())
}
Some(x) => Err(format!(
@ -336,74 +356,76 @@ impl<
/// For example, loading a symbol for doing a computation.
impl<
'a,
FPReg: RegTrait,
GPReg: RegTrait,
ASM: Assembler<GPReg, FPReg>,
CC: CallConv<GPReg, FPReg>,
> Backend64Bit<'a, GPReg, FPReg, ASM, CC>
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
{
fn claim_gpreg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
let reg = if !self.gp_free_regs.is_empty() {
let free_reg = self.gp_free_regs.pop().unwrap();
if CC::gp_callee_saved(&free_reg) {
self.gp_used_callee_saved_regs.insert(free_reg);
fn claim_general_reg(&mut self, sym: &Symbol) -> Result<GeneralReg, String> {
let reg = if !self.general_free_regs.is_empty() {
let free_reg = self.general_free_regs.pop().unwrap();
if CC::general_callee_saved(&free_reg) {
self.general_used_callee_saved_regs.insert(free_reg);
}
Ok(free_reg)
} else if !self.gp_used_regs.is_empty() {
let (reg, sym) = self.gp_used_regs.remove(0);
} else if !self.general_used_regs.is_empty() {
let (reg, sym) = self.general_used_regs.remove(0);
self.free_to_stack(&sym)?;
Ok(reg)
} else {
Err("completely out of general purpose registers".to_string())
}?;
self.gp_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::GPReg(reg));
self.general_used_regs.push((reg, *sym));
self.symbols_map
.insert(*sym, SymbolStorage::GeneralReg(reg));
Ok(reg)
}
fn claim_fpreg(&mut self, sym: &Symbol) -> Result<FPReg, String> {
let reg = if !self.fp_free_regs.is_empty() {
let free_reg = self.fp_free_regs.pop().unwrap();
if CC::fp_callee_saved(&free_reg) {
self.fp_used_callee_saved_regs.insert(free_reg);
fn claim_float_reg(&mut self, sym: &Symbol) -> Result<FloatReg, String> {
let reg = if !self.float_free_regs.is_empty() {
let free_reg = self.float_free_regs.pop().unwrap();
if CC::float_callee_saved(&free_reg) {
self.float_used_callee_saved_regs.insert(free_reg);
}
Ok(free_reg)
} else if !self.fp_used_regs.is_empty() {
let (reg, sym) = self.fp_used_regs.remove(0);
} else if !self.float_used_regs.is_empty() {
let (reg, sym) = self.float_used_regs.remove(0);
self.free_to_stack(&sym)?;
Ok(reg)
} else {
Err("completely out of floating point registers".to_string())
}?;
self.fp_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::FPReg(reg));
self.float_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::FloatReg(reg));
Ok(reg)
}
fn load_to_gpreg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
fn load_to_general_reg(&mut self, sym: &Symbol) -> Result<GeneralReg, String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GPReg(reg)) => {
self.symbols_map.insert(*sym, SymbolStorage::GPReg(reg));
Ok(reg)
}
Some(SymbolStorage::FPReg(_reg)) => {
Err("Cannot load floating point symbol into GPReg".to_string())
}
Some(SymbolStorage::StackAndGPReg(reg, offset)) => {
Some(SymbolStorage::GeneralReg(reg)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPReg(reg, offset));
.insert(*sym, SymbolStorage::GeneralReg(reg));
Ok(reg)
}
Some(SymbolStorage::StackAndFPReg(_reg, _offset)) => {
Err("Cannot load floating point symbol into GPReg".to_string())
Some(SymbolStorage::FloatReg(_reg)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::StackAndGeneralReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::StackAndFloatReg(_reg, _offset)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::Stack(offset)) => {
let reg = self.claim_gpreg(sym)?;
let reg = self.claim_general_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPReg(reg, offset));
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
ASM::mov_reg64_stack32(&mut self.buf, reg, offset as i32);
Ok(reg)
}
@ -411,28 +433,28 @@ impl<
}
}
fn load_to_fpreg(&mut self, sym: &Symbol) -> Result<FPReg, String> {
fn load_to_float_reg(&mut self, sym: &Symbol) -> Result<FloatReg, String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GPReg(_reg)) => {
Err("Cannot load integer point symbol into FPReg".to_string())
Some(SymbolStorage::GeneralReg(_reg)) => {
Err("Cannot load integer point symbol into FloatReg".to_string())
}
Some(SymbolStorage::FPReg(reg)) => {
self.symbols_map.insert(*sym, SymbolStorage::FPReg(reg));
Some(SymbolStorage::FloatReg(reg)) => {
self.symbols_map.insert(*sym, SymbolStorage::FloatReg(reg));
Ok(reg)
}
Some(SymbolStorage::StackAndGPReg(_reg, _offset)) => {
Err("Cannot load integer point symbol into FPReg".to_string())
Some(SymbolStorage::StackAndGeneralReg(_reg, _offset)) => {
Err("Cannot load integer point symbol into FloatReg".to_string())
}
Some(SymbolStorage::StackAndFPReg(reg, offset)) => {
Some(SymbolStorage::StackAndFloatReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndFPReg(reg, offset));
.insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::Stack(offset)) => {
let reg = self.claim_fpreg(sym)?;
let reg = self.claim_float_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndFPReg(reg, offset));
.insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
ASM::mov_freg64_stack32(&mut self.buf, reg, offset as i32);
Ok(reg)
}
@ -443,23 +465,23 @@ impl<
fn free_to_stack(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GPReg(reg)) => {
Some(SymbolStorage::GeneralReg(reg)) => {
let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_reg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::FPReg(reg)) => {
Some(SymbolStorage::FloatReg(reg)) => {
let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_freg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::StackAndGPReg(_, offset)) => {
Some(SymbolStorage::StackAndGeneralReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::StackAndFPReg(_, offset)) => {
Some(SymbolStorage::StackAndFloatReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}

File diff suppressed because it is too large Load diff

View file

@ -29,8 +29,8 @@ pub fn build_module<'a>(
..
} => {
let backend: Backend64Bit<
x86_64::X86_64GPReg,
x86_64::X86_64FPReg,
x86_64::X86_64GeneralReg,
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
> = Backend::new(env, target)?;
@ -47,8 +47,8 @@ pub fn build_module<'a>(
..
} => {
let backend: Backend64Bit<
aarch64::AArch64GPReg,
aarch64::AArch64FPReg,
aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
> = Backend::new(env, target)?;