Add Num.add as inlined function

This commit is contained in:
Brendan Hansknecht 2020-11-21 00:08:12 -08:00
parent 65d6d64102
commit 4b3926be50
5 changed files with 89 additions and 31 deletions

View file

@ -38,7 +38,7 @@ pub fn build_module<'a>(
let mut procs = Vec::with_capacity_in(procedures.len(), env.arena);
for ((sym, layout), proc) in procedures {
// This is temporary until we support passing args to functions.
if sym == symbol::Symbol::NUM_ABS {
if [symbol::Symbol::NUM_ABS, symbol::Symbol::NUM_ADD].contains(&sym) {
continue;
}

View file

@ -137,7 +137,7 @@ where
) -> Result<(), String> {
match expr {
Expr::Literal(lit) => {
self.load_literal(sym, lit, layout);
self.load_literal(sym, lit, layout)?;
Ok(())
}
Expr::FunctionCall {
@ -150,6 +150,10 @@ where
// Instead of calling the function, just inline it.
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAbs, args), layout)
}
Symbol::NUM_ADD => {
// Instead of calling the function, just inline it.
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAdd, args), layout)
}
x => Err(format!("the function, {:?}, is not yet implemented", x)),
}
}
@ -170,29 +174,39 @@ where
layout: &Layout<'a>,
) -> Result<(), String> {
match lowlevel {
LowLevel::NumAbs => self.build_num_abs(sym, &args[0], layout),
LowLevel::NumAbs => {
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => self.build_num_abs_i64(sym, &args[0]),
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
LowLevel::NumAdd => {
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => {
self.build_num_add_i64(sym, &args[0], &args[1])
}
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
x => Err(format!("low level, {:?}. is not yet implemented", x)),
}
}
/// build_num_abs stores the absolute value of src into dst.
fn build_num_abs(
&mut self,
dst: &Symbol,
src: &Symbol,
layout: &Layout<'a>,
) -> Result<(), String> {
// TODO: when this is expanded to flaots. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => self.build_num_abs_i64(dst, src),
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
/// build_num_abs stores the absolute value of src into dst.
/// build_num_abs_i64 stores the absolute value of src into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>;
/// build_num_add_i64 stores the absolute value of src into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_add_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String>;
/// free_symbols will free all symbols for the given statement.
fn free_symbols(&mut self, stmt: &Stmt<'a>) {
match self.free_map().remove(&(stmt as *const Stmt<'a>)) {

View file

@ -51,6 +51,15 @@ fn add_reg_extension(reg: GPReg, byte: u8) -> u8 {
// Unit tests are added at the bottom of the file to ensure correct asm generation.
// Please keep these in alphanumeric order.
/// `ADD r/m64,r64` -> Add r64 to r/m64.
pub fn add_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(src, rex);
let dst_mod = dst as u8 % 8;
let src_mod = (src as u8 % 8) << 3;
buf.extend(&[rex, 0x01, 0xC0 + dst_mod + src_mod]);
}
/// `CMOVL r64,r/m64` -> Move if less (SF≠ OF).
pub fn cmovl_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg) {
let rex = add_reg_extension(dst, REX_W);
@ -130,6 +139,22 @@ mod tests {
const TEST_I32: i32 = 0x12345678;
const TEST_I64: i64 = 0x12345678_9ABCDEF0;
#[test]
fn test_add_register64bit_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((in1, in2), expected) in &[
((GPReg::RAX, GPReg::RAX), [0x48, 0x01, 0xC0]),
((GPReg::RAX, GPReg::R15), [0x4C, 0x01, 0xF8]),
((GPReg::R15, GPReg::RAX), [0x49, 0x01, 0xC7]),
((GPReg::R15, GPReg::R15), [0x4D, 0x01, 0xFF]),
] {
buf.clear();
add_register64bit_register64bit(&mut buf, *in1, *in2);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_cmovl_register64bit_register64bit() {
let arena = bumpalo::Bump::new();

View file

@ -267,6 +267,25 @@ impl<'a> Backend<'a> for X86_64Backend<'a> {
Ok(())
}
fn build_num_add_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_gp_reg()?;
self.gp_used_regs.push((dst_reg, *dst));
self.symbols_map.insert(
*dst,
SymbolStorage::GPReg(dst_reg, Layout::Builtin(Builtin::Int64)),
);
let src1_reg = self.load_to_reg(src1)?;
asm::mov_register64bit_register64bit(&mut self.buf, dst_reg, src1_reg);
let src2_reg = self.load_to_reg(src2)?;
asm::add_register64bit_register64bit(&mut self.buf, dst_reg, src2_reg);
Ok(())
}
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String> {
// TODO: handle allocating and cleaning up data on the stack.
let mut out = bumpalo::vec![in self.env.arena];
@ -299,7 +318,7 @@ impl<'a> X86_64Backend<'a> {
Ok(self.gp_free_regs.pop().unwrap())
} else if self.gp_used_regs.len() > 0 {
let (reg, sym) = self.gp_used_regs.remove(0);
self.free_to_stack(&sym);
self.free_to_stack(&sym)?;
Ok(reg)
} else {
Err(format!("completely out of registers"))