diff --git a/compiler/gen_dev/src/generic64/aarch64.rs b/compiler/gen_dev/src/generic64/aarch64.rs index 194346358a..bbf5fb60c2 100644 --- a/compiler/gen_dev/src/generic64/aarch64.rs +++ b/compiler/gen_dev/src/generic64/aarch64.rs @@ -307,6 +307,16 @@ impl Assembler for AArch64Assembler { } } + #[inline(always)] + fn sub_reg64_reg64_reg64( + _buf: &mut Vec<'_, u8>, + _dst: AArch64GPReg, + _src1: AArch64GPReg, + _src2: AArch64GPReg, + ) { + unimplemented!("registers subtractions not implemented yet for AArch64"); + } + #[inline(always)] fn ret(buf: &mut Vec<'_, u8>) { ret_reg64(buf, AArch64GPReg::LR) diff --git a/compiler/gen_dev/src/generic64/mod.rs b/compiler/gen_dev/src/generic64/mod.rs index 5166a5aeb6..0432aa8a58 100644 --- a/compiler/gen_dev/src/generic64/mod.rs +++ b/compiler/gen_dev/src/generic64/mod.rs @@ -51,6 +51,7 @@ pub trait Assembler { fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GPReg, offset: i32); fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GPReg); fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, imm32: i32); + fn sub_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg); fn ret(buf: &mut Vec<'_, u8>); } @@ -194,6 +195,19 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler, CC: CallConv> Backend< Ok(()) } + fn build_num_sub_i64( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + ) -> Result<(), String> { + let dst_reg = self.claim_gp_reg(dst)?; + let src1_reg = self.load_to_reg(src1)?; + let src2_reg = self.load_to_reg(src2)?; + ASM::sub_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg); + Ok(()) + } + fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> { match lit { Literal::Int(x) => { diff --git a/compiler/gen_dev/src/generic64/x86_64.rs b/compiler/gen_dev/src/generic64/x86_64.rs index b9471f9780..2f43ac920b 100644 --- a/compiler/gen_dev/src/generic64/x86_64.rs +++ b/compiler/gen_dev/src/generic64/x86_64.rs @@ -310,6 +310,22 @@ impl Assembler for X86_64Assembler { } } #[inline(always)] + fn sub_reg64_reg64_reg64( + buf: &mut Vec<'_, u8>, + dst: X86_64GPReg, + src1: X86_64GPReg, + src2: X86_64GPReg, + ) { + if dst == src1 { + sub_reg64_reg64(buf, dst, src2); + } else if dst == src2 { + sub_reg64_reg64(buf, dst, src1); + } else { + mov_reg64_reg64(buf, dst, src1); + sub_reg64_reg64(buf, dst, src2); + } + } + #[inline(always)] fn ret(buf: &mut Vec<'_, u8>) { ret(buf); } @@ -379,6 +395,16 @@ fn add_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GPReg, src: X86_64GPReg) { buf.extend(&[rex, 0x01, 0xC0 + dst_mod + src_mod]); } +/// `SUB r/m64,r64` -> Sub r64 to r/m64. +#[inline(always)] +fn sub_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GPReg, src: X86_64GPReg) { + let rex = add_rm_extension(dst, REX_W); + let rex = add_reg_extension(src, rex); + let dst_mod = dst as u8 % 8; + let src_mod = (src as u8 % 8) << 3; + buf.extend(&[rex, 0x29, 0xC0 + dst_mod + src_mod]); +} + /// `CMOVL r64,r/m64` -> Move if less (SF=ΜΈ OF). #[inline(always)] fn cmovl_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GPReg, src: X86_64GPReg) { diff --git a/compiler/gen_dev/src/lib.rs b/compiler/gen_dev/src/lib.rs index a4df102b59..e3a80567e1 100644 --- a/compiler/gen_dev/src/lib.rs +++ b/compiler/gen_dev/src/lib.rs @@ -24,7 +24,7 @@ pub struct Env<'a> { } // INLINED_SYMBOLS is a set of all of the functions we automatically inline if seen. -const INLINED_SYMBOLS: [Symbol; 2] = [Symbol::NUM_ABS, Symbol::NUM_ADD]; +const INLINED_SYMBOLS: [Symbol; 3] = [Symbol::NUM_ABS, Symbol::NUM_ADD, Symbol::NUM_SUB]; // These relocations likely will need a length. // They may even need more definition, but this should be at least good enough for how we will use elf. @@ -130,6 +130,10 @@ where // Instead of calling the function, just inline it. self.build_run_low_level(sym, &LowLevel::NumAdd, arguments, layout) } + Symbol::NUM_SUB => { + // Instead of calling the function, just inline it. + self.build_run_low_level(sym, &LowLevel::NumSub, arguments, layout) + } x => Err(format!("the function, {:?}, is not yet implemented", x)), } } @@ -172,6 +176,15 @@ where x => Err(format!("layout, {:?}, not implemented yet", x)), } } + LowLevel::NumSub => { + // TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method. + match layout { + Layout::Builtin(Builtin::Int64) => { + self.build_num_sub_i64(sym, &args[0], &args[1]) + } + x => Err(format!("layout, {:?}, not implemented yet", x)), + } + } x => Err(format!("low level, {:?}. is not yet implemented", x)), } } @@ -180,7 +193,7 @@ where /// It only deals with inputs and outputs of i64 type. fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>; - /// build_num_add_i64 stores the absolute value of src into dst. + /// build_num_add_i64 stores the sum of src1 and src2 into dst. /// It only deals with inputs and outputs of i64 type. fn build_num_add_i64( &mut self, @@ -189,6 +202,15 @@ where src2: &Symbol, ) -> Result<(), String>; + /// build_num_sub_i64 stores the `src1 - src2` difference into dst. + /// It only deals with inputs and outputs of i64 type. + fn build_num_sub_i64( + &mut self, + dst: &Symbol, + src1: &Symbol, + src2: &Symbol, + ) -> Result<(), String>; + /// literal_map gets the map from symbol to literal, used for lazy loading and literal folding. fn literal_map(&mut self) -> &mut MutMap>; diff --git a/compiler/gen_dev/tests/gen_num.rs b/compiler/gen_dev/tests/gen_num.rs index 24be18721f..eb23442e8f 100644 --- a/compiler/gen_dev/tests/gen_num.rs +++ b/compiler/gen_dev/tests/gen_num.rs @@ -40,6 +40,19 @@ mod gen_num { ); } + #[test] + fn gen_sub_i64() { + assert_evals_to!( + indoc!( + r#" + 1 - 2 - 3 + "# + ), + -4, + i64 + ); + } + #[test] fn i64_force_stack() { // This claims 33 registers. One more than Arm and RISC-V, and many more than x86-64.