Always inline assembly functions and calling conventions

This commit is contained in:
Brendan Hansknecht 2020-11-30 23:31:14 -08:00
parent fd77d92851
commit f9d571ebc2
3 changed files with 23 additions and 0 deletions

View file

@ -104,6 +104,7 @@ impl CallConv<AArch64GPReg> for AArch64Call {
const SHADOW_SPACE_SIZE: u8 = 0; const SHADOW_SPACE_SIZE: u8 = 0;
#[inline(always)]
fn callee_saved(reg: &AArch64GPReg) -> bool { fn callee_saved(reg: &AArch64GPReg) -> bool {
matches!( matches!(
reg, reg,
@ -120,6 +121,7 @@ impl CallConv<AArch64GPReg> for AArch64Call {
) )
} }
#[inline(always)]
fn setup_stack<'a>( fn setup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -173,6 +175,8 @@ impl CallConv<AArch64GPReg> for AArch64Call {
Err("Ran out of stack space".to_string()) Err("Ran out of stack space".to_string())
} }
} }
#[inline(always)]
fn cleanup_stack<'a>( fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -205,6 +209,7 @@ impl CallConv<AArch64GPReg> for AArch64Call {
} }
impl Assembler<AArch64GPReg> for AArch64Assembler { impl Assembler<AArch64GPReg> for AArch64Assembler {
#[inline(always)]
fn add_reg64_reg64_reg64<'a>( fn add_reg64_reg64_reg64<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
dst: AArch64GPReg, dst: AArch64GPReg,
@ -214,6 +219,7 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
add_reg64_reg64_reg64(buf, dst, src1, src2); add_reg64_reg64_reg64(buf, dst, src1, src2);
} }
#[inline(always)]
fn mov_reg64_imm64<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, imm: i64) { fn mov_reg64_imm64<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, imm: i64) {
let mut remaining = imm as u64; let mut remaining = imm as u64;
movz_reg64_imm16(buf, dst, remaining as u16, 0); movz_reg64_imm16(buf, dst, remaining as u16, 0);
@ -231,10 +237,12 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
} }
} }
#[inline(always)]
fn mov_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, src: AArch64GPReg) { fn mov_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, src: AArch64GPReg) {
mov_reg64_reg64(buf, dst, src); mov_reg64_reg64(buf, dst, src);
} }
#[inline(always)]
fn mov_reg64_stack32<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, offset: i32) { fn mov_reg64_stack32<'a>(buf: &mut Vec<'a, u8>, dst: AArch64GPReg, offset: i32) {
if offset < 0 { if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64"); unimplemented!("negative stack offsets are not yet implement for AArch64");
@ -246,6 +254,7 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
} }
} }
#[inline(always)]
fn mov_stack32_reg64<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: AArch64GPReg) { fn mov_stack32_reg64<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: AArch64GPReg) {
if offset < 0 { if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64"); unimplemented!("negative stack offsets are not yet implement for AArch64");
@ -257,10 +266,12 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
} }
} }
#[inline(always)]
fn abs_reg64_reg64<'a>(_buf: &mut Vec<'a, u8>, _dst: AArch64GPReg, _src: AArch64GPReg) { fn abs_reg64_reg64<'a>(_buf: &mut Vec<'a, u8>, _dst: AArch64GPReg, _src: AArch64GPReg) {
unimplemented!("abs_reg64_reg64 is not yet implement for AArch64"); unimplemented!("abs_reg64_reg64 is not yet implement for AArch64");
} }
#[inline(always)]
fn ret<'a>(buf: &mut Vec<'a, u8>) { fn ret<'a>(buf: &mut Vec<'a, u8>) {
ret_reg64(buf, AArch64GPReg::LR) ret_reg64(buf, AArch64GPReg::LR)
} }

View file

@ -17,6 +17,7 @@ pub trait CallConv<GPReg: GPRegTrait> {
const SHADOW_SPACE_SIZE: u8; const SHADOW_SPACE_SIZE: u8;
fn callee_saved(reg: &GPReg) -> bool; fn callee_saved(reg: &GPReg) -> bool;
#[inline(always)]
fn caller_saved_regs(reg: &GPReg) -> bool { fn caller_saved_regs(reg: &GPReg) -> bool {
!Self::callee_saved(reg) !Self::callee_saved(reg)
} }

View file

@ -79,6 +79,7 @@ impl CallConv<X86_64GPReg> for X86_64SystemV {
) )
} }
#[inline(always)]
fn setup_stack<'a>( fn setup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -88,6 +89,7 @@ impl CallConv<X86_64GPReg> for X86_64SystemV {
x86_64_generic_setup_stack(buf, leaf_function, saved_regs, requested_stack_size) x86_64_generic_setup_stack(buf, leaf_function, saved_regs, requested_stack_size)
} }
#[inline(always)]
fn cleanup_stack<'a>( fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -148,6 +150,7 @@ impl CallConv<X86_64GPReg> for X86_64WindowsFastcall {
) )
} }
#[inline(always)]
fn setup_stack<'a>( fn setup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -157,6 +160,7 @@ impl CallConv<X86_64GPReg> for X86_64WindowsFastcall {
x86_64_generic_setup_stack(buf, leaf_function, saved_regs, requested_stack_size) x86_64_generic_setup_stack(buf, leaf_function, saved_regs, requested_stack_size)
} }
#[inline(always)]
fn cleanup_stack<'a>( fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
@ -231,6 +235,7 @@ fn x86_64_generic_cleanup_stack<'a>(
impl Assembler<X86_64GPReg> for X86_64Assembler { impl Assembler<X86_64GPReg> for X86_64Assembler {
// These functions should map to the raw assembly functions below. // These functions should map to the raw assembly functions below.
// In some cases, that means you can just directly call one of the direct assembly functions. // In some cases, that means you can just directly call one of the direct assembly functions.
#[inline(always)]
fn add_reg64_reg64_reg64<'a>( fn add_reg64_reg64_reg64<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
dst: X86_64GPReg, dst: X86_64GPReg,
@ -246,23 +251,29 @@ impl Assembler<X86_64GPReg> for X86_64Assembler {
add_reg64_reg64(buf, dst, src2); add_reg64_reg64(buf, dst, src2);
} }
} }
#[inline(always)]
fn mov_reg64_imm64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i64) { fn mov_reg64_imm64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i64) {
mov_reg64_imm64(buf, dst, imm); mov_reg64_imm64(buf, dst, imm);
} }
#[inline(always)]
fn mov_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, src: X86_64GPReg) { fn mov_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, src: X86_64GPReg) {
mov_reg64_reg64(buf, dst, src); mov_reg64_reg64(buf, dst, src);
} }
#[inline(always)]
fn mov_reg64_stack32<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, offset: i32) { fn mov_reg64_stack32<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, offset: i32) {
mov_reg64_stack32(buf, dst, offset); mov_reg64_stack32(buf, dst, offset);
} }
#[inline(always)]
fn mov_stack32_reg64<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: X86_64GPReg) { fn mov_stack32_reg64<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: X86_64GPReg) {
mov_stack32_reg64(buf, offset, src); mov_stack32_reg64(buf, offset, src);
} }
#[inline(always)]
fn abs_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, src: X86_64GPReg) { fn abs_reg64_reg64<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, src: X86_64GPReg) {
mov_reg64_reg64(buf, dst, src); mov_reg64_reg64(buf, dst, src);
neg_reg64(buf, dst); neg_reg64(buf, dst);
cmovl_reg64_reg64(buf, dst, src); cmovl_reg64_reg64(buf, dst, src);
} }
#[inline(always)]
fn ret<'a>(buf: &mut Vec<'a, u8>) { fn ret<'a>(buf: &mut Vec<'a, u8>) {
ret(buf); ret(buf);
} }