mirror of
https://github.com/roc-lang/roc.git
synced 2025-10-02 16:21:11 +00:00
Merge branch 'trunk' into format-final-comments-record-type
This commit is contained in:
commit
a55d755108
11 changed files with 2689 additions and 1 deletions
44
compiler/gen_dev/Cargo.toml
Normal file
44
compiler/gen_dev/Cargo.toml
Normal file
|
@ -0,0 +1,44 @@
|
|||
[package]
|
||||
name = "roc_gen_dev"
|
||||
version = "0.1.0"
|
||||
authors = ["Richard Feldman <oss@rtfeldman.com>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
roc_collections = { path = "../collections" }
|
||||
roc_region = { path = "../region" }
|
||||
roc_load = { path = "../load" }
|
||||
roc_module = { path = "../module" }
|
||||
roc_problem = { path = "../problem" }
|
||||
roc_types = { path = "../types" }
|
||||
roc_builtins = { path = "../builtins" }
|
||||
roc_constrain = { path = "../constrain" }
|
||||
roc_uniq = { path = "../uniq" }
|
||||
roc_unify = { path = "../unify" }
|
||||
roc_solve = { path = "../solve" }
|
||||
roc_mono = { path = "../mono" }
|
||||
im = "14" # im and im-rc should always have the same version!
|
||||
im-rc = "14" # im and im-rc should always have the same version!
|
||||
bumpalo = { version = "3.2", features = ["collections"] }
|
||||
inlinable_string = "0.1"
|
||||
target-lexicon = "0.10"
|
||||
libloading = "0.6"
|
||||
object = { version = "0.22", features = ["write"] }
|
||||
|
||||
[dev-dependencies]
|
||||
roc_can = { path = "../can" }
|
||||
roc_parse = { path = "../parse" }
|
||||
roc_reporting = { path = "../reporting" }
|
||||
roc_build = { path = "../build" }
|
||||
roc_std = { path = "../../roc_std" }
|
||||
pretty_assertions = "0.5.1"
|
||||
maplit = "1.0.1"
|
||||
indoc = "0.3.3"
|
||||
quickcheck = "0.8"
|
||||
quickcheck_macros = "0.8"
|
||||
tokio = { version = "0.2", features = ["blocking", "fs", "sync", "rt-threaded"] }
|
||||
bumpalo = { version = "3.2", features = ["collections"] }
|
||||
libc = "0.2"
|
||||
tempfile = "3.1.0"
|
||||
itertools = "0.9"
|
331
compiler/gen_dev/src/generic64/mod.rs
Normal file
331
compiler/gen_dev/src/generic64/mod.rs
Normal file
|
@ -0,0 +1,331 @@
|
|||
use crate::{Backend, Env, Relocation};
|
||||
use bumpalo::collections::Vec;
|
||||
use roc_collections::all::{ImSet, MutMap, MutSet};
|
||||
use roc_module::symbol::Symbol;
|
||||
use roc_mono::ir::{Literal, Stmt};
|
||||
use std::marker::PhantomData;
|
||||
use target_lexicon::Triple;
|
||||
|
||||
pub mod x86_64;
|
||||
|
||||
pub trait CallConv<GPReg> {
|
||||
fn gp_param_regs() -> &'static [GPReg];
|
||||
fn gp_return_regs() -> &'static [GPReg];
|
||||
fn gp_default_free_regs() -> &'static [GPReg];
|
||||
|
||||
// A linear scan of an array may be faster than a set technically.
|
||||
// That being said, fastest would likely be a trait based on calling convention/register.
|
||||
fn caller_saved_regs() -> ImSet<GPReg>;
|
||||
fn callee_saved_regs() -> ImSet<GPReg>;
|
||||
|
||||
fn stack_pointer() -> GPReg;
|
||||
fn frame_pointer() -> GPReg;
|
||||
|
||||
fn shadow_space_size() -> u8;
|
||||
// It may be worth ignoring the red zone and keeping things simpler.
|
||||
fn red_zone_size() -> u8;
|
||||
}
|
||||
|
||||
pub trait Assembler<GPReg> {
|
||||
fn add_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
|
||||
fn add_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
|
||||
fn cmovl_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
|
||||
fn mov_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
|
||||
fn mov_register64bit_immediate64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i64);
|
||||
fn mov_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
|
||||
fn mov_register64bit_stackoffset32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, offset: i32);
|
||||
fn mov_stackoffset32bit_register64bit<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: GPReg);
|
||||
fn neg_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
|
||||
fn ret<'a>(buf: &mut Vec<'a, u8>);
|
||||
fn sub_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
|
||||
fn pop_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
|
||||
fn push_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum SymbolStorage<GPReg> {
|
||||
// These may need layout, but I am not sure.
|
||||
// I think whenever a symbol would be used, we specify layout anyways.
|
||||
GPRegeg(GPReg),
|
||||
Stack(i32),
|
||||
StackAndGPRegeg(GPReg, i32),
|
||||
}
|
||||
|
||||
pub trait GPRegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {}
|
||||
|
||||
pub struct Backend64Bit<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> {
|
||||
phantom_asm: PhantomData<ASM>,
|
||||
phantom_cc: PhantomData<CC>,
|
||||
env: &'a Env<'a>,
|
||||
buf: Vec<'a, u8>,
|
||||
|
||||
/// leaf_function is true if the only calls this function makes are tail calls.
|
||||
/// If that is the case, we can skip emitting the frame pointer and updating the stack.
|
||||
leaf_function: bool,
|
||||
|
||||
last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
|
||||
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
|
||||
symbols_map: MutMap<Symbol, SymbolStorage<GPReg>>,
|
||||
literal_map: MutMap<Symbol, Literal<'a>>,
|
||||
|
||||
// This should probably be smarter than a vec.
|
||||
// There are certain registers we should always use first. With pushing and poping, this could get mixed.
|
||||
gp_free_regs: Vec<'a, GPReg>,
|
||||
|
||||
// The last major thing we need is a way to decide what reg to free when all of them are full.
|
||||
// Theoretically we want a basic lru cache for the currently loaded symbols.
|
||||
// For now just a vec of used registers and the symbols they contain.
|
||||
gp_used_regs: Vec<'a, (GPReg, Symbol)>,
|
||||
|
||||
stack_size: i32,
|
||||
|
||||
// used callee saved regs must be tracked for pushing and popping at the beginning/end of the function.
|
||||
used_callee_saved_regs: MutSet<GPReg>,
|
||||
}
|
||||
|
||||
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<'a>
|
||||
for Backend64Bit<'a, GPReg, ASM, CC>
|
||||
{
|
||||
fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> {
|
||||
Ok(Backend64Bit {
|
||||
phantom_asm: PhantomData,
|
||||
phantom_cc: PhantomData,
|
||||
env,
|
||||
leaf_function: true,
|
||||
buf: bumpalo::vec!(in env.arena),
|
||||
last_seen_map: MutMap::default(),
|
||||
free_map: MutMap::default(),
|
||||
symbols_map: MutMap::default(),
|
||||
literal_map: MutMap::default(),
|
||||
gp_free_regs: bumpalo::vec![in env.arena],
|
||||
gp_used_regs: bumpalo::vec![in env.arena],
|
||||
stack_size: 0,
|
||||
used_callee_saved_regs: MutSet::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn env(&self) -> &'a Env<'a> {
|
||||
self.env
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.stack_size = -(CC::red_zone_size() as i32);
|
||||
self.leaf_function = true;
|
||||
self.last_seen_map.clear();
|
||||
self.free_map.clear();
|
||||
self.symbols_map.clear();
|
||||
self.buf.clear();
|
||||
self.used_callee_saved_regs.clear();
|
||||
self.gp_free_regs.clear();
|
||||
self.gp_used_regs.clear();
|
||||
self.gp_free_regs
|
||||
.extend_from_slice(CC::gp_default_free_regs());
|
||||
}
|
||||
|
||||
fn set_not_leaf_function(&mut self) {
|
||||
self.leaf_function = false;
|
||||
// If this is not a leaf function, it can't use the shadow space.
|
||||
self.stack_size = CC::shadow_space_size() as i32 - CC::red_zone_size() as i32;
|
||||
}
|
||||
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>> {
|
||||
&mut self.literal_map
|
||||
}
|
||||
|
||||
fn last_seen_map(&mut self) -> &mut MutMap<Symbol, *const Stmt<'a>> {
|
||||
&mut self.last_seen_map
|
||||
}
|
||||
|
||||
fn set_free_map(&mut self, map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>) {
|
||||
self.free_map = map;
|
||||
}
|
||||
|
||||
fn free_map(&mut self) -> &mut MutMap<*const Stmt<'a>, Vec<'a, Symbol>> {
|
||||
&mut self.free_map
|
||||
}
|
||||
|
||||
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String> {
|
||||
let mut out = bumpalo::vec![in self.env.arena];
|
||||
|
||||
if !self.leaf_function {
|
||||
// I believe that this will have to move away from push and to mov to be generic across backends.
|
||||
ASM::push_register64bit(&mut out, CC::frame_pointer());
|
||||
ASM::mov_register64bit_register64bit(
|
||||
&mut out,
|
||||
CC::frame_pointer(),
|
||||
CC::stack_pointer(),
|
||||
);
|
||||
}
|
||||
// Save data in all callee saved regs.
|
||||
let mut pop_order = bumpalo::vec![in self.env.arena];
|
||||
for reg in &self.used_callee_saved_regs {
|
||||
ASM::push_register64bit(&mut out, *reg);
|
||||
pop_order.push(*reg);
|
||||
}
|
||||
if self.stack_size > 0 {
|
||||
ASM::sub_register64bit_immediate32bit(&mut out, CC::stack_pointer(), self.stack_size);
|
||||
}
|
||||
|
||||
// Add function body.
|
||||
out.extend(&self.buf);
|
||||
|
||||
if self.stack_size > 0 {
|
||||
ASM::add_register64bit_immediate32bit(&mut out, CC::stack_pointer(), self.stack_size);
|
||||
}
|
||||
// Restore data in callee saved regs.
|
||||
while let Some(reg) = pop_order.pop() {
|
||||
ASM::pop_register64bit(&mut out, reg);
|
||||
}
|
||||
if !self.leaf_function {
|
||||
ASM::pop_register64bit(&mut out, CC::frame_pointer());
|
||||
}
|
||||
ASM::ret(&mut out);
|
||||
|
||||
Ok((out.into_bump_slice(), &[]))
|
||||
}
|
||||
|
||||
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> {
|
||||
let dst_reg = self.claim_gp_reg(dst)?;
|
||||
let src_reg = self.load_to_reg(src)?;
|
||||
ASM::mov_register64bit_register64bit(&mut self.buf, dst_reg, src_reg);
|
||||
ASM::neg_register64bit(&mut self.buf, dst_reg);
|
||||
ASM::cmovl_register64bit_register64bit(&mut self.buf, dst_reg, src_reg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_num_add_i64(
|
||||
&mut self,
|
||||
dst: &Symbol,
|
||||
src1: &Symbol,
|
||||
src2: &Symbol,
|
||||
) -> Result<(), String> {
|
||||
let dst_reg = self.claim_gp_reg(dst)?;
|
||||
let src1_reg = self.load_to_reg(src1)?;
|
||||
ASM::mov_register64bit_register64bit(&mut self.buf, dst_reg, src1_reg);
|
||||
let src2_reg = self.load_to_reg(src2)?;
|
||||
ASM::add_register64bit_register64bit(&mut self.buf, dst_reg, src2_reg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> {
|
||||
match lit {
|
||||
Literal::Int(x) => {
|
||||
let reg = self.claim_gp_reg(sym)?;
|
||||
let val = *x;
|
||||
ASM::mov_register64bit_immediate64bit(&mut self.buf, reg, val);
|
||||
Ok(())
|
||||
}
|
||||
x => Err(format!("loading literal, {:?}, is not yet implemented", x)),
|
||||
}
|
||||
}
|
||||
|
||||
fn free_symbol(&mut self, sym: &Symbol) {
|
||||
self.symbols_map.remove(sym);
|
||||
for i in 0..self.gp_used_regs.len() {
|
||||
let (reg, saved_sym) = self.gp_used_regs[i];
|
||||
if saved_sym == *sym {
|
||||
self.gp_free_regs.push(reg);
|
||||
self.gp_used_regs.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String> {
|
||||
let val = self.symbols_map.get(sym);
|
||||
match val {
|
||||
Some(SymbolStorage::GPRegeg(reg)) if *reg == CC::gp_return_regs()[0] => Ok(()),
|
||||
Some(SymbolStorage::GPRegeg(reg)) => {
|
||||
// If it fits in a general purpose register, just copy it over to.
|
||||
// Technically this can be optimized to produce shorter instructions if less than 64bits.
|
||||
ASM::mov_register64bit_register64bit(&mut self.buf, CC::gp_return_regs()[0], *reg);
|
||||
Ok(())
|
||||
}
|
||||
Some(x) => Err(format!(
|
||||
"returning symbol storage, {:?}, is not yet implemented",
|
||||
x
|
||||
)),
|
||||
None => Err(format!("Unknown return symbol: {}", sym)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This impl block is for ir related instructions that need backend specific information.
|
||||
/// For example, loading a symbol for doing a computation.
|
||||
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>>
|
||||
Backend64Bit<'a, GPReg, ASM, CC>
|
||||
{
|
||||
fn claim_gp_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
|
||||
let reg = if !self.gp_free_regs.is_empty() {
|
||||
let free_reg = self.gp_free_regs.pop().unwrap();
|
||||
if CC::callee_saved_regs().contains(&free_reg) {
|
||||
self.used_callee_saved_regs.insert(free_reg);
|
||||
}
|
||||
Ok(free_reg)
|
||||
} else if !self.gp_used_regs.is_empty() {
|
||||
let (reg, sym) = self.gp_used_regs.remove(0);
|
||||
self.free_to_stack(&sym)?;
|
||||
Ok(reg)
|
||||
} else {
|
||||
Err("completely out of registers".to_string())
|
||||
}?;
|
||||
|
||||
self.gp_used_regs.push((reg, *sym));
|
||||
self.symbols_map.insert(*sym, SymbolStorage::GPRegeg(reg));
|
||||
Ok(reg)
|
||||
}
|
||||
|
||||
fn load_to_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
|
||||
let val = self.symbols_map.remove(sym);
|
||||
match val {
|
||||
Some(SymbolStorage::GPRegeg(reg)) => {
|
||||
self.symbols_map.insert(*sym, SymbolStorage::GPRegeg(reg));
|
||||
Ok(reg)
|
||||
}
|
||||
Some(SymbolStorage::StackAndGPRegeg(reg, offset)) => {
|
||||
self.symbols_map
|
||||
.insert(*sym, SymbolStorage::StackAndGPRegeg(reg, offset));
|
||||
Ok(reg)
|
||||
}
|
||||
Some(SymbolStorage::Stack(offset)) => {
|
||||
let reg = self.claim_gp_reg(sym)?;
|
||||
self.symbols_map
|
||||
.insert(*sym, SymbolStorage::StackAndGPRegeg(reg, offset));
|
||||
ASM::mov_register64bit_stackoffset32bit(&mut self.buf, reg, offset as i32);
|
||||
Ok(reg)
|
||||
}
|
||||
None => Err(format!("Unknown symbol: {}", sym)),
|
||||
}
|
||||
}
|
||||
|
||||
fn free_to_stack(&mut self, sym: &Symbol) -> Result<(), String> {
|
||||
let val = self.symbols_map.remove(sym);
|
||||
match val {
|
||||
Some(SymbolStorage::GPRegeg(reg)) => {
|
||||
let offset = self.stack_size;
|
||||
self.stack_size += 8;
|
||||
if let Some(size) = self.stack_size.checked_add(8) {
|
||||
self.stack_size = size;
|
||||
} else {
|
||||
return Err(format!(
|
||||
"Ran out of stack space while saving symbol: {}",
|
||||
sym
|
||||
));
|
||||
}
|
||||
ASM::mov_stackoffset32bit_register64bit(&mut self.buf, offset as i32, reg);
|
||||
self.symbols_map
|
||||
.insert(*sym, SymbolStorage::Stack(offset as i32));
|
||||
Ok(())
|
||||
}
|
||||
Some(SymbolStorage::StackAndGPRegeg(_, offset)) => {
|
||||
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
|
||||
Ok(())
|
||||
}
|
||||
Some(SymbolStorage::Stack(offset)) => {
|
||||
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
|
||||
Ok(())
|
||||
}
|
||||
None => Err(format!("Unknown symbol: {}", sym)),
|
||||
}
|
||||
}
|
||||
}
|
582
compiler/gen_dev/src/generic64/x86_64.rs
Normal file
582
compiler/gen_dev/src/generic64/x86_64.rs
Normal file
|
@ -0,0 +1,582 @@
|
|||
use crate::generic64::{Assembler, CallConv, GPRegTrait};
|
||||
use bumpalo::collections::Vec;
|
||||
use roc_collections::all::ImSet;
|
||||
|
||||
// Not sure exactly how I want to represent registers.
|
||||
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
|
||||
pub enum X86_64GPReg {
|
||||
RAX = 0,
|
||||
RCX = 1,
|
||||
RDX = 2,
|
||||
RBX = 3,
|
||||
RSP = 4,
|
||||
RBP = 5,
|
||||
RSI = 6,
|
||||
RDI = 7,
|
||||
R8 = 8,
|
||||
R9 = 9,
|
||||
R10 = 10,
|
||||
R11 = 11,
|
||||
R12 = 12,
|
||||
R13 = 13,
|
||||
R14 = 14,
|
||||
R15 = 15,
|
||||
}
|
||||
|
||||
impl GPRegTrait for X86_64GPReg {}
|
||||
|
||||
const REX: u8 = 0x40;
|
||||
const REX_W: u8 = REX + 0x8;
|
||||
|
||||
fn add_rm_extension(reg: X86_64GPReg, byte: u8) -> u8 {
|
||||
if reg as u8 > 7 {
|
||||
byte + 1
|
||||
} else {
|
||||
byte
|
||||
}
|
||||
}
|
||||
|
||||
fn add_opcode_extension(reg: X86_64GPReg, byte: u8) -> u8 {
|
||||
add_rm_extension(reg, byte)
|
||||
}
|
||||
|
||||
fn add_reg_extension(reg: X86_64GPReg, byte: u8) -> u8 {
|
||||
if reg as u8 > 7 {
|
||||
byte + 4
|
||||
} else {
|
||||
byte
|
||||
}
|
||||
}
|
||||
|
||||
pub struct X86_64Assembler {}
|
||||
pub struct X86_64WindowsFastcall {}
|
||||
pub struct X86_64SystemV {}
|
||||
|
||||
impl CallConv<X86_64GPReg> for X86_64SystemV {
|
||||
fn gp_param_regs() -> &'static [X86_64GPReg] {
|
||||
&[
|
||||
X86_64GPReg::RDI,
|
||||
X86_64GPReg::RSI,
|
||||
X86_64GPReg::RDX,
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
]
|
||||
}
|
||||
fn gp_return_regs() -> &'static [X86_64GPReg] {
|
||||
&[X86_64GPReg::RAX, X86_64GPReg::RDX]
|
||||
}
|
||||
fn gp_default_free_regs() -> &'static [X86_64GPReg] {
|
||||
&[
|
||||
// The regs we want to use first should be at the end of this vec.
|
||||
// We will use pop to get which reg to use next
|
||||
// Use callee saved regs last.
|
||||
X86_64GPReg::RBX,
|
||||
// Don't use frame pointer: X86_64GPReg::RBP,
|
||||
X86_64GPReg::R12,
|
||||
X86_64GPReg::R13,
|
||||
X86_64GPReg::R14,
|
||||
X86_64GPReg::R15,
|
||||
// Use caller saved regs first.
|
||||
X86_64GPReg::RAX,
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::RDX,
|
||||
// Don't use stack pionter: X86_64GPReg::RSP,
|
||||
X86_64GPReg::RSI,
|
||||
X86_64GPReg::RDI,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
X86_64GPReg::R10,
|
||||
X86_64GPReg::R11,
|
||||
]
|
||||
}
|
||||
fn caller_saved_regs() -> ImSet<X86_64GPReg> {
|
||||
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
|
||||
ImSet::from(vec![
|
||||
X86_64GPReg::RAX,
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::RDX,
|
||||
X86_64GPReg::RSP,
|
||||
X86_64GPReg::RSI,
|
||||
X86_64GPReg::RDI,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
X86_64GPReg::R10,
|
||||
X86_64GPReg::R11,
|
||||
])
|
||||
}
|
||||
fn callee_saved_regs() -> ImSet<X86_64GPReg> {
|
||||
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
|
||||
ImSet::from(vec![
|
||||
X86_64GPReg::RBX,
|
||||
X86_64GPReg::RBP,
|
||||
X86_64GPReg::R12,
|
||||
X86_64GPReg::R13,
|
||||
X86_64GPReg::R14,
|
||||
X86_64GPReg::R15,
|
||||
])
|
||||
}
|
||||
fn stack_pointer() -> X86_64GPReg {
|
||||
X86_64GPReg::RSP
|
||||
}
|
||||
fn frame_pointer() -> X86_64GPReg {
|
||||
X86_64GPReg::RBP
|
||||
}
|
||||
fn shadow_space_size() -> u8 {
|
||||
0
|
||||
}
|
||||
fn red_zone_size() -> u8 {
|
||||
128
|
||||
}
|
||||
}
|
||||
|
||||
impl CallConv<X86_64GPReg> for X86_64WindowsFastcall {
|
||||
fn gp_param_regs() -> &'static [X86_64GPReg] {
|
||||
&[
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::RDX,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
]
|
||||
}
|
||||
fn gp_return_regs() -> &'static [X86_64GPReg] {
|
||||
&[X86_64GPReg::RAX]
|
||||
}
|
||||
fn gp_default_free_regs() -> &'static [X86_64GPReg] {
|
||||
&[
|
||||
// The regs we want to use first should be at the end of this vec.
|
||||
// We will use pop to get which reg to use next
|
||||
// Use callee saved regs last.
|
||||
X86_64GPReg::RBX,
|
||||
// Don't use frame pointer: X86_64GPReg::RBP,
|
||||
X86_64GPReg::RSI,
|
||||
// Don't use stack pionter: X86_64GPReg::RSP,
|
||||
X86_64GPReg::RDI,
|
||||
X86_64GPReg::R12,
|
||||
X86_64GPReg::R13,
|
||||
X86_64GPReg::R14,
|
||||
X86_64GPReg::R15,
|
||||
// Use caller saved regs first.
|
||||
X86_64GPReg::RAX,
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::RDX,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
X86_64GPReg::R10,
|
||||
X86_64GPReg::R11,
|
||||
]
|
||||
}
|
||||
fn caller_saved_regs() -> ImSet<X86_64GPReg> {
|
||||
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
|
||||
ImSet::from(vec![
|
||||
X86_64GPReg::RAX,
|
||||
X86_64GPReg::RCX,
|
||||
X86_64GPReg::RDX,
|
||||
X86_64GPReg::R8,
|
||||
X86_64GPReg::R9,
|
||||
X86_64GPReg::R10,
|
||||
X86_64GPReg::R11,
|
||||
])
|
||||
}
|
||||
fn callee_saved_regs() -> ImSet<X86_64GPReg> {
|
||||
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
|
||||
ImSet::from(vec![
|
||||
X86_64GPReg::RBX,
|
||||
X86_64GPReg::RBP,
|
||||
X86_64GPReg::RSI,
|
||||
X86_64GPReg::RSP,
|
||||
X86_64GPReg::RDI,
|
||||
X86_64GPReg::R12,
|
||||
X86_64GPReg::R13,
|
||||
X86_64GPReg::R14,
|
||||
X86_64GPReg::R15,
|
||||
])
|
||||
}
|
||||
fn stack_pointer() -> X86_64GPReg {
|
||||
X86_64GPReg::RSP
|
||||
}
|
||||
fn frame_pointer() -> X86_64GPReg {
|
||||
X86_64GPReg::RBP
|
||||
}
|
||||
fn shadow_space_size() -> u8 {
|
||||
32
|
||||
}
|
||||
fn red_zone_size() -> u8 {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl Assembler<X86_64GPReg> for X86_64Assembler {
|
||||
// Below here are the functions for all of the assembly instructions.
|
||||
// Their names are based on the instruction and operators combined.
|
||||
// You should call `buf.reserve()` if you push or extend more than once.
|
||||
// Unit tests are added at the bottom of the file to ensure correct asm generation.
|
||||
// Please keep these in alphanumeric order.
|
||||
|
||||
/// `ADD r/m64, imm32` -> Add imm32 sign-extended to 64-bits from r/m64.
|
||||
fn add_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
|
||||
// This can be optimized if the immediate is 1 byte.
|
||||
let rex = add_rm_extension(dst, REX_W);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
buf.reserve(7);
|
||||
buf.extend(&[rex, 0x81, 0xC0 + dst_mod]);
|
||||
buf.extend(&imm.to_le_bytes());
|
||||
}
|
||||
|
||||
/// `ADD r/m64,r64` -> Add r64 to r/m64.
|
||||
fn add_register64bit_register64bit<'a>(
|
||||
buf: &mut Vec<'a, u8>,
|
||||
dst: X86_64GPReg,
|
||||
src: X86_64GPReg,
|
||||
) {
|
||||
let rex = add_rm_extension(dst, REX_W);
|
||||
let rex = add_reg_extension(src, rex);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
let src_mod = (src as u8 % 8) << 3;
|
||||
buf.extend(&[rex, 0x01, 0xC0 + dst_mod + src_mod]);
|
||||
}
|
||||
|
||||
/// `CMOVL r64,r/m64` -> Move if less (SF≠ OF).
|
||||
fn cmovl_register64bit_register64bit<'a>(
|
||||
buf: &mut Vec<'a, u8>,
|
||||
dst: X86_64GPReg,
|
||||
src: X86_64GPReg,
|
||||
) {
|
||||
let rex = add_reg_extension(dst, REX_W);
|
||||
let rex = add_rm_extension(src, rex);
|
||||
let dst_mod = (dst as u8 % 8) << 3;
|
||||
let src_mod = src as u8 % 8;
|
||||
buf.extend(&[rex, 0x0F, 0x4C, 0xC0 + dst_mod + src_mod]);
|
||||
}
|
||||
|
||||
/// `MOV r/m64, imm32` -> Move imm32 sign extended to 64-bits to r/m64.
|
||||
fn mov_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
|
||||
let rex = add_rm_extension(dst, REX_W);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
buf.reserve(7);
|
||||
buf.extend(&[rex, 0xC7, 0xC0 + dst_mod]);
|
||||
buf.extend(&imm.to_le_bytes());
|
||||
}
|
||||
|
||||
/// `MOV r64, imm64` -> Move imm64 to r64.
|
||||
fn mov_register64bit_immediate64bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i64) {
|
||||
if imm <= i32::MAX as i64 && imm >= i32::MIN as i64 {
|
||||
Self::mov_register64bit_immediate32bit(buf, dst, imm as i32)
|
||||
} else {
|
||||
let rex = add_opcode_extension(dst, REX_W);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
buf.reserve(10);
|
||||
buf.extend(&[rex, 0xB8 + dst_mod]);
|
||||
buf.extend(&imm.to_le_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
/// `MOV r/m64,r64` -> Move r64 to r/m64.
|
||||
fn mov_register64bit_register64bit<'a>(
|
||||
buf: &mut Vec<'a, u8>,
|
||||
dst: X86_64GPReg,
|
||||
src: X86_64GPReg,
|
||||
) {
|
||||
let rex = add_rm_extension(dst, REX_W);
|
||||
let rex = add_reg_extension(src, rex);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
let src_mod = (src as u8 % 8) << 3;
|
||||
buf.extend(&[rex, 0x89, 0xC0 + dst_mod + src_mod]);
|
||||
}
|
||||
|
||||
/// `MOV r64,r/m64` -> Move r/m64 to r64.
|
||||
fn mov_register64bit_stackoffset32bit<'a>(
|
||||
buf: &mut Vec<'a, u8>,
|
||||
dst: X86_64GPReg,
|
||||
offset: i32,
|
||||
) {
|
||||
// This can be optimized based on how many bytes the offset actually is.
|
||||
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
|
||||
// Also, this may technically be faster genration since stack operations should be so common.
|
||||
let rex = add_reg_extension(dst, REX_W);
|
||||
let dst_mod = (dst as u8 % 8) << 3;
|
||||
buf.reserve(8);
|
||||
buf.extend(&[rex, 0x8B, 0x84 + dst_mod, 0x24]);
|
||||
buf.extend(&offset.to_le_bytes());
|
||||
}
|
||||
|
||||
/// `MOV r/m64,r64` -> Move r64 to r/m64.
|
||||
fn mov_stackoffset32bit_register64bit<'a>(
|
||||
buf: &mut Vec<'a, u8>,
|
||||
offset: i32,
|
||||
src: X86_64GPReg,
|
||||
) {
|
||||
// This can be optimized based on how many bytes the offset actually is.
|
||||
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
|
||||
// Also, this may technically be faster genration since stack operations should be so common.
|
||||
let rex = add_reg_extension(src, REX_W);
|
||||
let src_mod = (src as u8 % 8) << 3;
|
||||
buf.reserve(8);
|
||||
buf.extend(&[rex, 0x89, 0x84 + src_mod, 0x24]);
|
||||
buf.extend(&offset.to_le_bytes());
|
||||
}
|
||||
|
||||
/// `NEG r/m64` -> Two's complement negate r/m64.
|
||||
fn neg_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
|
||||
let rex = add_rm_extension(reg, REX_W);
|
||||
let reg_mod = reg as u8 % 8;
|
||||
buf.extend(&[rex, 0xF7, 0xD8 + reg_mod]);
|
||||
}
|
||||
|
||||
/// `RET` -> Near return to calling procedure.
|
||||
fn ret<'a>(buf: &mut Vec<'a, u8>) {
|
||||
buf.push(0xC3);
|
||||
}
|
||||
|
||||
/// `SUB r/m64, imm32` -> Subtract imm32 sign-extended to 64-bits from r/m64.
|
||||
fn sub_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
|
||||
// This can be optimized if the immediate is 1 byte.
|
||||
let rex = add_rm_extension(dst, REX_W);
|
||||
let dst_mod = dst as u8 % 8;
|
||||
buf.reserve(7);
|
||||
buf.extend(&[rex, 0x81, 0xE8 + dst_mod]);
|
||||
buf.extend(&imm.to_le_bytes());
|
||||
}
|
||||
|
||||
/// `POP r64` -> Pop top of stack into r64; increment stack pointer. Cannot encode 32-bit operand size.
|
||||
fn pop_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
|
||||
let reg_mod = reg as u8 % 8;
|
||||
if reg as u8 > 7 {
|
||||
let rex = add_opcode_extension(reg, REX);
|
||||
buf.extend(&[rex, 0x58 + reg_mod]);
|
||||
} else {
|
||||
buf.push(0x58 + reg_mod);
|
||||
}
|
||||
}
|
||||
|
||||
/// `PUSH r64` -> Push r64,
|
||||
fn push_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
|
||||
let reg_mod = reg as u8 % 8;
|
||||
if reg as u8 > 7 {
|
||||
let rex = add_opcode_extension(reg, REX);
|
||||
buf.extend(&[rex, 0x50 + reg_mod]);
|
||||
} else {
|
||||
buf.push(0x50 + reg_mod);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When writing tests, it is a good idea to test both a number and unnumbered register.
|
||||
// This is because R8-R15 often have special instruction prefixes.
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const TEST_I32: i32 = 0x12345678;
|
||||
const TEST_I64: i64 = 0x12345678_9ABCDEF0;
|
||||
|
||||
#[test]
|
||||
fn test_add_register64bit_immediate32bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0x81, 0xC0]),
|
||||
(X86_64GPReg::R15, [0x49, 0x81, 0xC7]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::add_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
|
||||
assert_eq!(expected, &buf[..3]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_register64bit_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((dst, src), expected) in &[
|
||||
((X86_64GPReg::RAX, X86_64GPReg::RAX), [0x48, 0x01, 0xC0]),
|
||||
((X86_64GPReg::RAX, X86_64GPReg::R15), [0x4C, 0x01, 0xF8]),
|
||||
((X86_64GPReg::R15, X86_64GPReg::RAX), [0x49, 0x01, 0xC7]),
|
||||
((X86_64GPReg::R15, X86_64GPReg::R15), [0x4D, 0x01, 0xFF]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::add_register64bit_register64bit(&mut buf, *dst, *src);
|
||||
assert_eq!(expected, &buf[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmovl_register64bit_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((dst, src), expected) in &[
|
||||
(
|
||||
(X86_64GPReg::RAX, X86_64GPReg::RAX),
|
||||
[0x48, 0x0F, 0x4C, 0xC0],
|
||||
),
|
||||
(
|
||||
(X86_64GPReg::RAX, X86_64GPReg::R15),
|
||||
[0x49, 0x0F, 0x4C, 0xC7],
|
||||
),
|
||||
(
|
||||
(X86_64GPReg::R15, X86_64GPReg::RAX),
|
||||
[0x4C, 0x0F, 0x4C, 0xF8],
|
||||
),
|
||||
(
|
||||
(X86_64GPReg::R15, X86_64GPReg::R15),
|
||||
[0x4D, 0x0F, 0x4C, 0xFF],
|
||||
),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::cmovl_register64bit_register64bit(&mut buf, *dst, *src);
|
||||
assert_eq!(expected, &buf[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mov_register64bit_immediate32bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0xC7, 0xC0]),
|
||||
(X86_64GPReg::R15, [0x49, 0xC7, 0xC7]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
|
||||
assert_eq!(expected, &buf[..3]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mov_register64bit_immediate64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0xB8]),
|
||||
(X86_64GPReg::R15, [0x49, 0xBF]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_register64bit_immediate64bit(&mut buf, *dst, TEST_I64);
|
||||
assert_eq!(expected, &buf[..2]);
|
||||
assert_eq!(TEST_I64.to_le_bytes(), &buf[2..]);
|
||||
}
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0xC7, 0xC0]),
|
||||
(X86_64GPReg::R15, [0x49, 0xC7, 0xC7]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_register64bit_immediate64bit(&mut buf, *dst, TEST_I32 as i64);
|
||||
assert_eq!(expected, &buf[..3]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mov_register64bit_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((dst, src), expected) in &[
|
||||
((X86_64GPReg::RAX, X86_64GPReg::RAX), [0x48, 0x89, 0xC0]),
|
||||
((X86_64GPReg::RAX, X86_64GPReg::R15), [0x4C, 0x89, 0xF8]),
|
||||
((X86_64GPReg::R15, X86_64GPReg::RAX), [0x49, 0x89, 0xC7]),
|
||||
((X86_64GPReg::R15, X86_64GPReg::R15), [0x4D, 0x89, 0xFF]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_register64bit_register64bit(&mut buf, *dst, *src);
|
||||
assert_eq!(expected, &buf[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mov_register64bit_stackoffset32bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((dst, offset), expected) in &[
|
||||
((X86_64GPReg::RAX, TEST_I32), [0x48, 0x8B, 0x84, 0x24]),
|
||||
((X86_64GPReg::R15, TEST_I32), [0x4C, 0x8B, 0xBC, 0x24]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_register64bit_stackoffset32bit(&mut buf, *dst, *offset);
|
||||
assert_eq!(expected, &buf[..4]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[4..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mov_stackoffset32bit_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((offset, src), expected) in &[
|
||||
((TEST_I32, X86_64GPReg::RAX), [0x48, 0x89, 0x84, 0x24]),
|
||||
((TEST_I32, X86_64GPReg::R15), [0x4C, 0x89, 0xBC, 0x24]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::mov_stackoffset32bit_register64bit(&mut buf, *offset, *src);
|
||||
assert_eq!(expected, &buf[..4]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[4..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_neg_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (reg, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0xF7, 0xD8]),
|
||||
(X86_64GPReg::R15, [0x49, 0xF7, 0xDF]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::neg_register64bit(&mut buf, *reg);
|
||||
assert_eq!(expected, &buf[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ret() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
X86_64Assembler::ret(&mut buf);
|
||||
assert_eq!(&[0xC3], &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub_register64bit_immediate32bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, [0x48, 0x81, 0xE8]),
|
||||
(X86_64GPReg::R15, [0x49, 0x81, 0xEF]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::sub_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
|
||||
assert_eq!(expected, &buf[..3]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (dst, expected) in &[
|
||||
(X86_64GPReg::RAX, vec![0x58]),
|
||||
(X86_64GPReg::R15, vec![0x41, 0x5F]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::pop_register64bit(&mut buf, *dst);
|
||||
assert_eq!(&expected[..], &buf[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_register64bit() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for (src, expected) in &[
|
||||
(X86_64GPReg::RAX, vec![0x50]),
|
||||
(X86_64GPReg::R15, vec![0x41, 0x57]),
|
||||
] {
|
||||
buf.clear();
|
||||
X86_64Assembler::push_register64bit(&mut buf, *src);
|
||||
assert_eq!(&expected[..], &buf[..]);
|
||||
}
|
||||
}
|
||||
}
|
388
compiler/gen_dev/src/lib.rs
Normal file
388
compiler/gen_dev/src/lib.rs
Normal file
|
@ -0,0 +1,388 @@
|
|||
#![warn(clippy::all, clippy::dbg_macro)]
|
||||
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled.
|
||||
//
|
||||
// It warns about a performance problem where the only quick remediation is
|
||||
// to allocate more on the heap, which has lots of tradeoffs - including making it
|
||||
// long-term unclear which allocations *need* to happen for compilation's sake
|
||||
// (e.g. recursive structures) versus those which were only added to appease clippy.
|
||||
//
|
||||
// Effectively optimizing data struture memory layout isn't a quick fix,
|
||||
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
|
||||
// re-enable this when working on performance optimizations than have it block PRs.
|
||||
#![allow(clippy::large_enum_variant)]
|
||||
|
||||
use bumpalo::{collections::Vec, Bump};
|
||||
use roc_collections::all::{MutMap, MutSet};
|
||||
use roc_module::ident::TagName;
|
||||
use roc_module::low_level::LowLevel;
|
||||
use roc_module::symbol::{Interns, Symbol};
|
||||
use roc_mono::ir::{CallType, Expr, JoinPointId, Literal, Proc, Stmt};
|
||||
use roc_mono::layout::{Builtin, Layout};
|
||||
use target_lexicon::Triple;
|
||||
|
||||
mod generic64;
|
||||
mod object_builder;
|
||||
pub use object_builder::build_module;
|
||||
mod run_roc;
|
||||
|
||||
pub struct Env<'a> {
|
||||
pub arena: &'a Bump,
|
||||
pub interns: Interns,
|
||||
pub exposed_to_host: MutSet<Symbol>,
|
||||
pub lazy_literals: bool,
|
||||
}
|
||||
|
||||
// INLINED_SYMBOLS is a set of all of the functions we automatically inline if seen.
|
||||
const INLINED_SYMBOLS: [Symbol; 2] = [Symbol::NUM_ABS, Symbol::NUM_ADD];
|
||||
|
||||
// These relocations likely will need a length.
|
||||
// They may even need more definition, but this should be at least good enough for how we will use elf.
|
||||
#[allow(dead_code)]
|
||||
enum Relocation<'a> {
|
||||
LocalData { offset: u64, data: &'a [u8] },
|
||||
LinkedFunction { offset: u64, name: &'a str },
|
||||
LinkedData { offset: u64, name: &'a str },
|
||||
}
|
||||
|
||||
trait Backend<'a>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
/// new creates a new backend that will output to the specific Object.
|
||||
fn new(env: &'a Env, target: &Triple) -> Result<Self, String>;
|
||||
|
||||
fn env(&self) -> &'a Env<'a>;
|
||||
|
||||
/// reset resets any registers or other values that may be occupied at the end of a procedure.
|
||||
fn reset(&mut self);
|
||||
|
||||
/// finalize does any setup and cleanup that should happen around the procedure.
|
||||
/// finalize does setup because things like stack size and jump locations are not know until the function is written.
|
||||
/// For example, this can store the frame pionter and setup stack space.
|
||||
/// finalize is run at the end of build_proc when all internal code is finalized.
|
||||
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String>;
|
||||
|
||||
/// build_proc creates a procedure and outputs it to the wrapped object writer.
|
||||
fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[Relocation]), String> {
|
||||
self.reset();
|
||||
// TODO: let the backend know of all the arguments.
|
||||
// let start = std::time::Instant::now();
|
||||
self.scan_ast(&proc.body);
|
||||
self.create_free_map();
|
||||
// let duration = start.elapsed();
|
||||
// println!("Time to calculate lifetimes: {:?}", duration);
|
||||
// println!("{:?}", self.last_seen_map());
|
||||
self.build_stmt(&proc.body)?;
|
||||
self.finalize()
|
||||
}
|
||||
|
||||
/// build_stmt builds a statement and outputs at the end of the buffer.
|
||||
fn build_stmt(&mut self, stmt: &Stmt<'a>) -> Result<(), String> {
|
||||
match stmt {
|
||||
Stmt::Let(sym, expr, layout, following) => {
|
||||
self.build_expr(sym, expr, layout)?;
|
||||
self.free_symbols(stmt);
|
||||
self.build_stmt(following)?;
|
||||
Ok(())
|
||||
}
|
||||
Stmt::Ret(sym) => {
|
||||
self.load_literal_symbols(&[*sym])?;
|
||||
self.return_symbol(sym)?;
|
||||
self.free_symbols(stmt);
|
||||
Ok(())
|
||||
}
|
||||
x => Err(format!("the statement, {:?}, is not yet implemented", x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// build_expr builds the expressions for the specified symbol.
|
||||
/// The builder must keep track of the symbol because it may be refered to later.
|
||||
fn build_expr(
|
||||
&mut self,
|
||||
sym: &Symbol,
|
||||
expr: &Expr<'a>,
|
||||
layout: &Layout<'a>,
|
||||
) -> Result<(), String> {
|
||||
match expr {
|
||||
Expr::Literal(lit) => {
|
||||
if self.env().lazy_literals {
|
||||
self.literal_map().insert(*sym, lit.clone());
|
||||
} else {
|
||||
self.load_literal(sym, lit)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Expr::FunctionCall {
|
||||
call_type: CallType::ByName(func_sym),
|
||||
args,
|
||||
..
|
||||
} => {
|
||||
match *func_sym {
|
||||
Symbol::NUM_ABS => {
|
||||
// Instead of calling the function, just inline it.
|
||||
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAbs, args), layout)
|
||||
}
|
||||
Symbol::NUM_ADD => {
|
||||
// Instead of calling the function, just inline it.
|
||||
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAdd, args), layout)
|
||||
}
|
||||
x => Err(format!("the function, {:?}, is not yet implemented", x)),
|
||||
}
|
||||
}
|
||||
Expr::RunLowLevel(lowlevel, args) => {
|
||||
self.build_run_low_level(sym, lowlevel, args, layout)
|
||||
}
|
||||
x => Err(format!("the expression, {:?}, is not yet implemented", x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// build_run_low_level builds the low level opertation and outputs to the specified symbol.
|
||||
/// The builder must keep track of the symbol because it may be refered to later.
|
||||
fn build_run_low_level(
|
||||
&mut self,
|
||||
sym: &Symbol,
|
||||
lowlevel: &LowLevel,
|
||||
args: &'a [Symbol],
|
||||
layout: &Layout<'a>,
|
||||
) -> Result<(), String> {
|
||||
// Now that the arguments are needed, load them if they are literals.
|
||||
self.load_literal_symbols(args)?;
|
||||
match lowlevel {
|
||||
LowLevel::NumAbs => {
|
||||
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
|
||||
match layout {
|
||||
Layout::Builtin(Builtin::Int64) => self.build_num_abs_i64(sym, &args[0]),
|
||||
x => Err(format!("layout, {:?}, not implemented yet", x)),
|
||||
}
|
||||
}
|
||||
LowLevel::NumAdd => {
|
||||
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
|
||||
match layout {
|
||||
Layout::Builtin(Builtin::Int64) => {
|
||||
self.build_num_add_i64(sym, &args[0], &args[1])
|
||||
}
|
||||
x => Err(format!("layout, {:?}, not implemented yet", x)),
|
||||
}
|
||||
}
|
||||
x => Err(format!("low level, {:?}. is not yet implemented", x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// build_num_abs_i64 stores the absolute value of src into dst.
|
||||
/// It only deals with inputs and outputs of i64 type.
|
||||
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>;
|
||||
|
||||
/// build_num_add_i64 stores the absolute value of src into dst.
|
||||
/// It only deals with inputs and outputs of i64 type.
|
||||
fn build_num_add_i64(
|
||||
&mut self,
|
||||
dst: &Symbol,
|
||||
src1: &Symbol,
|
||||
src2: &Symbol,
|
||||
) -> Result<(), String>;
|
||||
|
||||
/// literal_map gets the map from symbol to literal, used for lazy loading and literal folding.
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>>;
|
||||
|
||||
fn load_literal_symbols(&mut self, syms: &[Symbol]) -> Result<(), String> {
|
||||
if self.env().lazy_literals {
|
||||
for sym in syms {
|
||||
if let Some(lit) = self.literal_map().remove(sym) {
|
||||
self.load_literal(sym, &lit)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// load_literal sets a symbol to be equal to a literal.
|
||||
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String>;
|
||||
|
||||
/// return_symbol moves a symbol to the correct return location for the backend.
|
||||
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String>;
|
||||
|
||||
/// free_symbols will free all symbols for the given statement.
|
||||
fn free_symbols(&mut self, stmt: &Stmt<'a>) {
|
||||
if let Some(syms) = self.free_map().remove(&(stmt as *const Stmt<'a>)) {
|
||||
for sym in syms {
|
||||
//println!("Freeing symbol: {:?}", sym);
|
||||
self.free_symbol(&sym);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// free_symbol frees any registers or stack space used to hold a symbol.
|
||||
fn free_symbol(&mut self, sym: &Symbol);
|
||||
|
||||
/// set_last_seen sets the statement a symbol was last seen in.
|
||||
fn set_last_seen(&mut self, sym: Symbol, stmt: &Stmt<'a>) {
|
||||
self.last_seen_map().insert(sym, stmt);
|
||||
}
|
||||
|
||||
/// last_seen_map gets the map from symbol to when it is last seen in the function.
|
||||
fn last_seen_map(&mut self) -> &mut MutMap<Symbol, *const Stmt<'a>>;
|
||||
|
||||
fn create_free_map(&mut self) {
|
||||
let mut free_map = MutMap::default();
|
||||
let arena = self.env().arena;
|
||||
for (sym, stmt) in self.last_seen_map() {
|
||||
let vals = free_map
|
||||
.entry(*stmt)
|
||||
.or_insert_with(|| bumpalo::vec![in arena]);
|
||||
vals.push(*sym);
|
||||
}
|
||||
self.set_free_map(free_map);
|
||||
}
|
||||
|
||||
/// free_map gets the map statement to the symbols that are free after they run.
|
||||
fn free_map(&mut self) -> &mut MutMap<*const Stmt<'a>, Vec<'a, Symbol>>;
|
||||
|
||||
/// set_free_map sets the free map to the given map.
|
||||
fn set_free_map(&mut self, map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>);
|
||||
|
||||
/// set_not_leaf_function lets the backend know that it is not a leaf function.
|
||||
fn set_not_leaf_function(&mut self);
|
||||
|
||||
/// scan_ast runs through the ast and fill the last seen map.
|
||||
/// It also checks if the function is a leaf function or not.
|
||||
/// This must iterate through the ast in the same way that build_stmt does. i.e. then before else.
|
||||
fn scan_ast(&mut self, stmt: &Stmt<'a>) {
|
||||
match stmt {
|
||||
Stmt::Let(sym, expr, _, following) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
match expr {
|
||||
Expr::Literal(_) => {}
|
||||
Expr::FunctionPointer(sym, _) => self.set_last_seen(*sym, stmt),
|
||||
Expr::FunctionCall {
|
||||
call_type, args, ..
|
||||
} => {
|
||||
for sym in *args {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
match call_type {
|
||||
CallType::ByName(sym) => {
|
||||
// For functions that we won't inline, we should not be a leaf function.
|
||||
if !INLINED_SYMBOLS.contains(sym) {
|
||||
self.set_not_leaf_function();
|
||||
}
|
||||
}
|
||||
CallType::ByPointer(sym) => {
|
||||
self.set_not_leaf_function();
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
Expr::RunLowLevel(_, args) => {
|
||||
for sym in *args {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Expr::ForeignCall { arguments, .. } => {
|
||||
for sym in *arguments {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
self.set_not_leaf_function();
|
||||
}
|
||||
Expr::Tag { arguments, .. } => {
|
||||
for sym in *arguments {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Expr::Struct(syms) => {
|
||||
for sym in *syms {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Expr::AccessAtIndex { structure, .. } => {
|
||||
self.set_last_seen(*structure, stmt);
|
||||
}
|
||||
Expr::Array { elems, .. } => {
|
||||
for sym in *elems {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Expr::Reuse {
|
||||
symbol,
|
||||
arguments,
|
||||
tag_name,
|
||||
..
|
||||
} => {
|
||||
self.set_last_seen(*symbol, stmt);
|
||||
match tag_name {
|
||||
TagName::Closure(sym) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
TagName::Private(sym) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
TagName::Global(_) => {}
|
||||
}
|
||||
for sym in *arguments {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Expr::Reset(sym) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
Expr::EmptyArray => {}
|
||||
Expr::RuntimeErrorFunction(_) => {}
|
||||
}
|
||||
self.scan_ast(following);
|
||||
}
|
||||
Stmt::Switch {
|
||||
cond_symbol,
|
||||
branches,
|
||||
default_branch,
|
||||
..
|
||||
} => {
|
||||
self.set_last_seen(*cond_symbol, stmt);
|
||||
for (_, branch) in *branches {
|
||||
self.scan_ast(branch);
|
||||
}
|
||||
self.scan_ast(default_branch);
|
||||
}
|
||||
Stmt::Cond {
|
||||
cond_symbol,
|
||||
branching_symbol,
|
||||
pass,
|
||||
fail,
|
||||
..
|
||||
} => {
|
||||
self.set_last_seen(*cond_symbol, stmt);
|
||||
self.set_last_seen(*branching_symbol, stmt);
|
||||
self.scan_ast(pass);
|
||||
self.scan_ast(fail);
|
||||
}
|
||||
Stmt::Ret(sym) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
Stmt::Inc(sym, following) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
self.scan_ast(following);
|
||||
}
|
||||
Stmt::Dec(sym, following) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
self.scan_ast(following);
|
||||
}
|
||||
Stmt::Join {
|
||||
parameters,
|
||||
continuation,
|
||||
remainder,
|
||||
..
|
||||
} => {
|
||||
for param in *parameters {
|
||||
self.set_last_seen(param.symbol, stmt);
|
||||
}
|
||||
self.scan_ast(continuation);
|
||||
self.scan_ast(remainder);
|
||||
}
|
||||
Stmt::Jump(JoinPointId(sym), symbols) => {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
for sym in *symbols {
|
||||
self.set_last_seen(*sym, stmt);
|
||||
}
|
||||
}
|
||||
Stmt::RuntimeError(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
154
compiler/gen_dev/src/object_builder.rs
Normal file
154
compiler/gen_dev/src/object_builder.rs
Normal file
|
@ -0,0 +1,154 @@
|
|||
use crate::generic64::{x86_64, Backend64Bit};
|
||||
use crate::{Backend, Env, Relocation, INLINED_SYMBOLS};
|
||||
use bumpalo::collections::Vec;
|
||||
use object::write;
|
||||
use object::write::{Object, StandardSection, Symbol, SymbolSection};
|
||||
use object::{
|
||||
Architecture, BinaryFormat, Endianness, RelocationEncoding, RelocationKind, SectionKind,
|
||||
SymbolFlags, SymbolKind, SymbolScope,
|
||||
};
|
||||
use roc_collections::all::MutMap;
|
||||
use roc_module::symbol;
|
||||
use roc_mono::ir::Proc;
|
||||
use roc_mono::layout::Layout;
|
||||
use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Triple};
|
||||
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// build_module is the high level builder/delegator.
|
||||
/// It takes the request to build a module and output the object file for the module.
|
||||
pub fn build_module<'a>(
|
||||
env: &'a Env,
|
||||
target: &Triple,
|
||||
procedures: MutMap<(symbol::Symbol, Layout<'a>), Proc<'a>>,
|
||||
) -> Result<Object, String> {
|
||||
let (mut output, mut backend) = match target {
|
||||
Triple {
|
||||
architecture: TargetArch::X86_64,
|
||||
binary_format: TargetBF::Elf,
|
||||
..
|
||||
} => {
|
||||
let backend: Backend64Bit<
|
||||
x86_64::X86_64GPReg,
|
||||
x86_64::X86_64Assembler,
|
||||
x86_64::X86_64SystemV,
|
||||
> = Backend::new(env, target)?;
|
||||
Ok((
|
||||
Object::new(BinaryFormat::Elf, Architecture::X86_64, Endianness::Little),
|
||||
backend,
|
||||
))
|
||||
}
|
||||
x => Err(format! {
|
||||
"the target, {:?}, is not yet implemented",
|
||||
x}),
|
||||
}?;
|
||||
let text = output.section_id(StandardSection::Text);
|
||||
let data_section = output.section_id(StandardSection::Data);
|
||||
let comment = output.add_section(vec![], b"comment".to_vec(), SectionKind::OtherString);
|
||||
output.append_section_data(
|
||||
comment,
|
||||
format!("\0roc dev backend version {} \0", VERSION).as_bytes(),
|
||||
1,
|
||||
);
|
||||
|
||||
// Setup layout_ids for procedure calls.
|
||||
let mut layout_ids = roc_mono::layout::LayoutIds::default();
|
||||
let mut procs = Vec::with_capacity_in(procedures.len(), env.arena);
|
||||
for ((sym, layout), proc) in procedures {
|
||||
// This is temporary until we support passing args to functions.
|
||||
if INLINED_SYMBOLS.contains(&sym) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let fn_name = layout_ids
|
||||
.get(sym, &layout)
|
||||
.to_symbol_string(sym, &env.interns);
|
||||
|
||||
let proc_symbol = Symbol {
|
||||
name: fn_name.as_bytes().to_vec(),
|
||||
value: 0,
|
||||
size: 0,
|
||||
kind: SymbolKind::Text,
|
||||
// TODO: Depending on whether we are building a static or dynamic lib, this should change.
|
||||
// We should use Dynamic -> anyone, Linkage -> static link, Compilation -> this module only.
|
||||
scope: if env.exposed_to_host.contains(&sym) {
|
||||
SymbolScope::Dynamic
|
||||
} else {
|
||||
SymbolScope::Linkage
|
||||
},
|
||||
weak: false,
|
||||
section: SymbolSection::Section(text),
|
||||
flags: SymbolFlags::None,
|
||||
};
|
||||
let proc_id = output.add_symbol(proc_symbol);
|
||||
procs.push((fn_name, proc_id, proc));
|
||||
}
|
||||
|
||||
// Build procedures.
|
||||
for (fn_name, proc_id, proc) in procs {
|
||||
let mut local_data_index = 0;
|
||||
let (proc_data, relocations) = backend.build_proc(proc)?;
|
||||
let proc_offset = output.add_symbol_data(proc_id, text, proc_data, 16);
|
||||
for reloc in relocations {
|
||||
let elfreloc = match reloc {
|
||||
Relocation::LocalData { offset, data } => {
|
||||
let data_symbol = write::Symbol {
|
||||
name: format!("{}.data{}", fn_name, local_data_index)
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
value: 0,
|
||||
size: 0,
|
||||
kind: SymbolKind::Data,
|
||||
scope: SymbolScope::Compilation,
|
||||
weak: false,
|
||||
section: write::SymbolSection::Section(data_section),
|
||||
flags: SymbolFlags::None,
|
||||
};
|
||||
local_data_index += 1;
|
||||
let data_id = output.add_symbol(data_symbol);
|
||||
output.add_symbol_data(data_id, data_section, data, 4);
|
||||
write::Relocation {
|
||||
offset: offset + proc_offset,
|
||||
size: 32,
|
||||
kind: RelocationKind::Relative,
|
||||
encoding: RelocationEncoding::Generic,
|
||||
symbol: data_id,
|
||||
addend: -4,
|
||||
}
|
||||
}
|
||||
Relocation::LinkedData { offset, name } => {
|
||||
if let Some(sym_id) = output.symbol_id(name.as_bytes()) {
|
||||
write::Relocation {
|
||||
offset: offset + proc_offset,
|
||||
size: 32,
|
||||
kind: RelocationKind::GotRelative,
|
||||
encoding: RelocationEncoding::Generic,
|
||||
symbol: sym_id,
|
||||
addend: -4,
|
||||
}
|
||||
} else {
|
||||
return Err(format!("failed to find symbol for {:?}", name));
|
||||
}
|
||||
}
|
||||
Relocation::LinkedFunction { offset, name } => {
|
||||
if let Some(sym_id) = output.symbol_id(name.as_bytes()) {
|
||||
write::Relocation {
|
||||
offset: offset + proc_offset,
|
||||
size: 32,
|
||||
kind: RelocationKind::PltRelative,
|
||||
encoding: RelocationEncoding::Generic,
|
||||
symbol: sym_id,
|
||||
addend: -4,
|
||||
}
|
||||
} else {
|
||||
return Err(format!("failed to find symbol for {:?}", name));
|
||||
}
|
||||
}
|
||||
};
|
||||
output
|
||||
.add_relocation(text, elfreloc)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
}
|
||||
}
|
||||
Ok(output)
|
||||
}
|
31
compiler/gen_dev/src/run_roc.rs
Normal file
31
compiler/gen_dev/src/run_roc.rs
Normal file
|
@ -0,0 +1,31 @@
|
|||
#[macro_export]
|
||||
/// run_jit_function_raw runs an unwrapped jit function.
|
||||
/// The function could throw an exception and break things, or worse, it could not throw an exception and break things.
|
||||
/// This functions is generally a bad idea with an untrused backend, but is being used for now for development purposes.
|
||||
macro_rules! run_jit_function_raw {
|
||||
($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr) => {{
|
||||
let v: std::vec::Vec<roc_problem::can::Problem> = std::vec::Vec::new();
|
||||
run_jit_function_raw!($lib, $main_fn_name, $ty, $transform, v)
|
||||
}};
|
||||
|
||||
($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr, $errors:expr) => {{
|
||||
unsafe {
|
||||
let main: libloading::Symbol<unsafe extern "C" fn() -> $ty> = $lib
|
||||
.get($main_fn_name.as_bytes())
|
||||
.ok()
|
||||
.ok_or(format!("Unable to JIT compile `{}`", $main_fn_name))
|
||||
.expect("errored");
|
||||
|
||||
let result = main();
|
||||
|
||||
assert_eq!(
|
||||
$errors,
|
||||
std::vec::Vec::new(),
|
||||
"Encountered errors: {:?}",
|
||||
$errors
|
||||
);
|
||||
|
||||
$transform(result)
|
||||
}
|
||||
}};
|
||||
}
|
802
compiler/gen_dev/tests/gen_num.rs
Normal file
802
compiler/gen_dev/tests/gen_num.rs
Normal file
|
@ -0,0 +1,802 @@
|
|||
#[macro_use]
|
||||
extern crate pretty_assertions;
|
||||
#[macro_use]
|
||||
extern crate indoc;
|
||||
|
||||
extern crate bumpalo;
|
||||
extern crate libc;
|
||||
|
||||
#[macro_use]
|
||||
mod helpers;
|
||||
|
||||
#[cfg(all(test, target_os = "linux", target_arch = "x86_64"))]
|
||||
mod gen_num {
|
||||
//use roc_std::RocOrder;
|
||||
|
||||
#[test]
|
||||
fn i64_values() {
|
||||
assert_evals_to!("0", 0, i64);
|
||||
assert_evals_to!("-0", 0, i64);
|
||||
assert_evals_to!("-1", -1, i64);
|
||||
assert_evals_to!("1", 1, i64);
|
||||
assert_evals_to!("9_000_000_000_000", 9_000_000_000_000, i64);
|
||||
assert_evals_to!("-9_000_000_000_000", -9_000_000_000_000, i64);
|
||||
assert_evals_to!("0b1010", 0b1010, i64);
|
||||
assert_evals_to!("0o17", 0o17, i64);
|
||||
assert_evals_to!("0x1000_0000_0000_0000", 0x1000_0000_0000_0000, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_add_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1 + 2 + 3
|
||||
"#
|
||||
),
|
||||
6,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn i64_force_stack() {
|
||||
// This claims 33 registers. One more than Arm and RISC-V, and many more than x86-64.
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
a = 0
|
||||
b = 1
|
||||
c = 2
|
||||
d = 3
|
||||
e = 4
|
||||
f = 5
|
||||
g = 6
|
||||
h = 7
|
||||
i = 8
|
||||
j = 9
|
||||
k = 10
|
||||
l = 11
|
||||
m = 12
|
||||
n = 13
|
||||
o = 14
|
||||
p = 15
|
||||
q = 16
|
||||
r = 17
|
||||
s = 18
|
||||
t = 19
|
||||
u = 20
|
||||
v = 21
|
||||
w = 22
|
||||
x = 23
|
||||
y = 24
|
||||
z = 25
|
||||
aa = 26
|
||||
ab = 27
|
||||
ac = 28
|
||||
ad = 29
|
||||
ae = 30
|
||||
af = 31
|
||||
ag = 32
|
||||
|
||||
# This can't be one line because it causes a stack overflow in the frontend :(
|
||||
tmp = a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q
|
||||
tmp + r + s + t + u + v + w + x + y + z + aa + ab + ac + ad + ae + af + ag
|
||||
"#
|
||||
),
|
||||
528,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn i64_abs() {
|
||||
assert_evals_to!("Num.abs -6", 6, i64);
|
||||
assert_evals_to!("Num.abs 7", 7, i64);
|
||||
assert_evals_to!("Num.abs 0", 0, i64);
|
||||
assert_evals_to!("Num.abs -0", 0, i64);
|
||||
assert_evals_to!("Num.abs -1", 1, i64);
|
||||
assert_evals_to!("Num.abs 1", 1, i64);
|
||||
assert_evals_to!("Num.abs 9_000_000_000_000", 9_000_000_000_000, i64);
|
||||
assert_evals_to!("Num.abs -9_000_000_000_000", 9_000_000_000_000, i64);
|
||||
}
|
||||
|
||||
/*
|
||||
#[test]
|
||||
fn f64_sqrt() {
|
||||
// FIXME this works with normal types, but fails when checking uniqueness types
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.sqrt 100 is
|
||||
Ok val -> val
|
||||
Err _ -> -1
|
||||
"#
|
||||
),
|
||||
10.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn f64_round_old() {
|
||||
assert_evals_to!("Num.round 3.6", 4, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn f64_abs() {
|
||||
assert_evals_to!("Num.abs -4.7", 4.7, f64);
|
||||
assert_evals_to!("Num.abs 5.8", 5.8, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_if_fn() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
limitedNegate = \num ->
|
||||
x =
|
||||
if num == 1 then
|
||||
-1
|
||||
else if num == -1 then
|
||||
1
|
||||
else
|
||||
num
|
||||
x
|
||||
|
||||
limitedNegate 1
|
||||
"#
|
||||
),
|
||||
-1,
|
||||
i64
|
||||
);
|
||||
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
limitedNegate = \num ->
|
||||
if num == 1 then
|
||||
-1
|
||||
else if num == -1 then
|
||||
1
|
||||
else
|
||||
num
|
||||
|
||||
limitedNegate 1
|
||||
"#
|
||||
),
|
||||
-1,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_float_eq() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1.0 == 1.0
|
||||
"#
|
||||
),
|
||||
true,
|
||||
bool
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_add_f64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1.1 + 2.4 + 3
|
||||
"#
|
||||
),
|
||||
6.5,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_wrap_add_nums() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
add2 = \num1, num2 -> num1 + num2
|
||||
|
||||
add2 4 5
|
||||
"#
|
||||
),
|
||||
9,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_div_f64() {
|
||||
// FIXME this works with normal types, but fails when checking uniqueness types
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when 48 / 2 is
|
||||
Ok val -> val
|
||||
Err _ -> -1
|
||||
"#
|
||||
),
|
||||
24.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_int_eq() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
4 == 4
|
||||
"#
|
||||
),
|
||||
true,
|
||||
bool
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_int_neq() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
4 != 5
|
||||
"#
|
||||
),
|
||||
true,
|
||||
bool
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_wrap_int_neq() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
wrappedNotEq : a, a -> Bool
|
||||
wrappedNotEq = \num1, num2 ->
|
||||
num1 != num2
|
||||
|
||||
wrappedNotEq 2 3
|
||||
"#
|
||||
),
|
||||
true,
|
||||
bool
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_sub_f64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1.5 - 2.4 - 3
|
||||
"#
|
||||
),
|
||||
-3.9,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_sub_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1 - 2 - 3
|
||||
"#
|
||||
),
|
||||
-4,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_mul_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
2 * 4 * 6
|
||||
"#
|
||||
),
|
||||
48,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_div_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when 1000 // 10 is
|
||||
Ok val -> val
|
||||
Err _ -> -1
|
||||
"#
|
||||
),
|
||||
100,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_div_by_zero_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when 1000 // 0 is
|
||||
Err DivByZero -> 99
|
||||
_ -> -24
|
||||
"#
|
||||
),
|
||||
99,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_rem_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.rem 8 3 is
|
||||
Ok val -> val
|
||||
Err _ -> -1
|
||||
"#
|
||||
),
|
||||
2,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_rem_div_by_zero_i64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.rem 8 0 is
|
||||
Err DivByZero -> 4
|
||||
Ok _ -> -23
|
||||
"#
|
||||
),
|
||||
4,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_zero_i64() {
|
||||
assert_evals_to!("Num.isZero 0", true, bool);
|
||||
assert_evals_to!("Num.isZero 1", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_positive_i64() {
|
||||
assert_evals_to!("Num.isPositive 0", false, bool);
|
||||
assert_evals_to!("Num.isPositive 1", true, bool);
|
||||
assert_evals_to!("Num.isPositive -5", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_negative_i64() {
|
||||
assert_evals_to!("Num.isNegative 0", false, bool);
|
||||
assert_evals_to!("Num.isNegative 3", false, bool);
|
||||
assert_evals_to!("Num.isNegative -2", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_positive_f64() {
|
||||
assert_evals_to!("Num.isPositive 0.0", false, bool);
|
||||
assert_evals_to!("Num.isPositive 4.7", true, bool);
|
||||
assert_evals_to!("Num.isPositive -8.5", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_negative_f64() {
|
||||
assert_evals_to!("Num.isNegative 0.0", false, bool);
|
||||
assert_evals_to!("Num.isNegative 9.9", false, bool);
|
||||
assert_evals_to!("Num.isNegative -4.4", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_zero_f64() {
|
||||
assert_evals_to!("Num.isZero 0", true, bool);
|
||||
assert_evals_to!("Num.isZero 0_0", true, bool);
|
||||
assert_evals_to!("Num.isZero 0.0", true, bool);
|
||||
assert_evals_to!("Num.isZero 1", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_odd() {
|
||||
assert_evals_to!("Num.isOdd 4", false, bool);
|
||||
assert_evals_to!("Num.isOdd 5", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_is_even() {
|
||||
assert_evals_to!("Num.isEven 6", true, bool);
|
||||
assert_evals_to!("Num.isEven 7", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sin() {
|
||||
assert_evals_to!("Num.sin 0", 0.0, f64);
|
||||
assert_evals_to!("Num.sin 1.41421356237", 0.9877659459922529, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cos() {
|
||||
assert_evals_to!("Num.cos 0", 1.0, f64);
|
||||
assert_evals_to!("Num.cos 3.14159265359", -1.0, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tan() {
|
||||
assert_evals_to!("Num.tan 0", 0.0, f64);
|
||||
assert_evals_to!("Num.tan 1", 1.557407724654902, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lt_i64() {
|
||||
assert_evals_to!("1 < 2", true, bool);
|
||||
assert_evals_to!("1 < 1", false, bool);
|
||||
assert_evals_to!("2 < 1", false, bool);
|
||||
assert_evals_to!("0 < 0", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lte_i64() {
|
||||
assert_evals_to!("1 <= 1", true, bool);
|
||||
assert_evals_to!("2 <= 1", false, bool);
|
||||
assert_evals_to!("1 <= 2", true, bool);
|
||||
assert_evals_to!("0 <= 0", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gt_i64() {
|
||||
assert_evals_to!("2 > 1", true, bool);
|
||||
assert_evals_to!("2 > 2", false, bool);
|
||||
assert_evals_to!("1 > 1", false, bool);
|
||||
assert_evals_to!("0 > 0", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gte_i64() {
|
||||
assert_evals_to!("1 >= 1", true, bool);
|
||||
assert_evals_to!("1 >= 2", false, bool);
|
||||
assert_evals_to!("2 >= 1", true, bool);
|
||||
assert_evals_to!("0 >= 0", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lt_f64() {
|
||||
assert_evals_to!("1.1 < 1.2", true, bool);
|
||||
assert_evals_to!("1.1 < 1.1", false, bool);
|
||||
assert_evals_to!("1.2 < 1.1", false, bool);
|
||||
assert_evals_to!("0.0 < 0.0", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lte_f64() {
|
||||
assert_evals_to!("1.1 <= 1.1", true, bool);
|
||||
assert_evals_to!("1.2 <= 1.1", false, bool);
|
||||
assert_evals_to!("1.1 <= 1.2", true, bool);
|
||||
assert_evals_to!("0.0 <= 0.0", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gt_f64() {
|
||||
assert_evals_to!("2.2 > 1.1", true, bool);
|
||||
assert_evals_to!("2.2 > 2.2", false, bool);
|
||||
assert_evals_to!("1.1 > 2.2", false, bool);
|
||||
assert_evals_to!("0.0 > 0.0", false, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gte_f64() {
|
||||
assert_evals_to!("1.1 >= 1.1", true, bool);
|
||||
assert_evals_to!("1.1 >= 1.2", false, bool);
|
||||
assert_evals_to!("1.2 >= 1.1", true, bool);
|
||||
assert_evals_to!("0.0 >= 0.0", true, bool);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_order_of_arithmetic_ops() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
1 + 3 * 7 - 2
|
||||
"#
|
||||
),
|
||||
20,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_order_of_arithmetic_ops_complex_float() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
3 - 48 * 2.0
|
||||
"#
|
||||
),
|
||||
-93.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn if_guard_bind_variable_false() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
wrapper = \{} ->
|
||||
when 10 is
|
||||
x if x == 5 -> 0
|
||||
_ -> 42
|
||||
|
||||
wrapper {}
|
||||
"#
|
||||
),
|
||||
42,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn if_guard_bind_variable_true() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
wrapper = \{} ->
|
||||
when 10 is
|
||||
x if x == 10 -> 42
|
||||
_ -> 0
|
||||
|
||||
wrapper {}
|
||||
"#
|
||||
),
|
||||
42,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tail_call_elimination() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
sum = \n, accum ->
|
||||
when n is
|
||||
0 -> accum
|
||||
_ -> sum (n - 1) (n + accum)
|
||||
|
||||
sum 1_000_000 0
|
||||
"#
|
||||
),
|
||||
500000500000,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_negate() {
|
||||
assert_evals_to!("Num.neg 123", -123, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_wrap_int_neg() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
wrappedNeg = \num -> -num
|
||||
|
||||
wrappedNeg 3
|
||||
"#
|
||||
),
|
||||
-3,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gen_basic_fn() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
always42 : Num.Num Num.Integer -> Num.Num Num.Integer
|
||||
always42 = \_ -> 42
|
||||
|
||||
always42 5
|
||||
"#
|
||||
),
|
||||
42,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_to_float() {
|
||||
assert_evals_to!("Num.toFloat 0x9", 9.0, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn num_to_float() {
|
||||
assert_evals_to!("Num.toFloat 9", 9.0, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_to_float() {
|
||||
assert_evals_to!("Num.toFloat 0.5", 0.5, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_compare() {
|
||||
assert_evals_to!("Num.compare 0 1", RocOrder::Lt, RocOrder);
|
||||
assert_evals_to!("Num.compare 1 1", RocOrder::Eq, RocOrder);
|
||||
assert_evals_to!("Num.compare 1 0", RocOrder::Gt, RocOrder);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_compare() {
|
||||
assert_evals_to!("Num.compare 0.01 3.14", RocOrder::Lt, RocOrder);
|
||||
assert_evals_to!("Num.compare 3.14 3.14", RocOrder::Eq, RocOrder);
|
||||
assert_evals_to!("Num.compare 3.14 0.01", RocOrder::Gt, RocOrder);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pow() {
|
||||
assert_evals_to!("Num.pow 2.0 2.0", 4.0, f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ceiling() {
|
||||
assert_evals_to!("Num.ceiling 1.1", 2, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn floor() {
|
||||
assert_evals_to!("Num.floor 1.9", 1, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pow_int() {
|
||||
assert_evals_to!("Num.powInt 2 3", 8, i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn atan() {
|
||||
assert_evals_to!("Num.atan 10", 1.4711276743037347, f64);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// #[should_panic(expected = r#"Roc failed with message: "integer addition overflowed!"#)]
|
||||
// fn int_overflow() {
|
||||
// assert_evals_to!(
|
||||
// indoc!(
|
||||
// r#"
|
||||
// 9_223_372_036_854_775_807 + 1
|
||||
// "#
|
||||
// ),
|
||||
// 0,
|
||||
// i64
|
||||
// );
|
||||
// }
|
||||
|
||||
#[test]
|
||||
fn int_add_checked() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.addChecked 1 2 is
|
||||
Ok v -> v
|
||||
_ -> -1
|
||||
"#
|
||||
),
|
||||
3,
|
||||
i64
|
||||
);
|
||||
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.addChecked 9_223_372_036_854_775_807 1 is
|
||||
Err Overflow -> -1
|
||||
Ok v -> v
|
||||
"#
|
||||
),
|
||||
-1,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_add_wrap() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
Num.addWrap 9_223_372_036_854_775_807 1
|
||||
"#
|
||||
),
|
||||
std::i64::MIN,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_add_checked_pass() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.addChecked 1.0 0.0 is
|
||||
Ok v -> v
|
||||
Err Overflow -> -1.0
|
||||
"#
|
||||
),
|
||||
1.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_add_checked_fail() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
when Num.addChecked 1.7976931348623157e308 1.7976931348623157e308 is
|
||||
Err Overflow -> -1
|
||||
Ok v -> v
|
||||
"#
|
||||
),
|
||||
-1.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// #[should_panic(expected = r#"Roc failed with message: "float addition overflowed!"#)]
|
||||
// fn float_overflow() {
|
||||
// assert_evals_to!(
|
||||
// indoc!(
|
||||
// r#"
|
||||
// 1.7976931348623157e308 + 1.7976931348623157e308
|
||||
// "#
|
||||
// ),
|
||||
// 0.0,
|
||||
// f64
|
||||
// );
|
||||
// }
|
||||
|
||||
#[test]
|
||||
fn num_max_int() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
Num.maxInt
|
||||
"#
|
||||
),
|
||||
i64::MAX,
|
||||
i64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn num_min_int() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
Num.minInt
|
||||
"#
|
||||
),
|
||||
i64::MIN,
|
||||
i64
|
||||
);
|
||||
}
|
||||
*/
|
||||
}
|
233
compiler/gen_dev/tests/helpers/eval.rs
Normal file
233
compiler/gen_dev/tests/helpers/eval.rs
Normal file
|
@ -0,0 +1,233 @@
|
|||
use libloading::Library;
|
||||
use roc_build::link::{link, LinkType};
|
||||
use roc_collections::all::MutMap;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn promote_expr_to_module(src: &str) -> String {
|
||||
let mut buffer = String::from("app \"test\" provides [ main ] to \"./platform\"\n\nmain =\n");
|
||||
|
||||
for line in src.lines() {
|
||||
// indent the body!
|
||||
buffer.push_str(" ");
|
||||
buffer.push_str(line);
|
||||
buffer.push('\n');
|
||||
}
|
||||
|
||||
buffer
|
||||
}
|
||||
|
||||
pub fn helper<'a>(
|
||||
arena: &'a bumpalo::Bump,
|
||||
src: &str,
|
||||
stdlib: roc_builtins::std::StdLib,
|
||||
_leak: bool,
|
||||
lazy_literals: bool,
|
||||
) -> (String, Vec<roc_problem::can::Problem>, Library) {
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
//let stdlib_mode = stdlib.mode;
|
||||
let dir = tempdir().unwrap();
|
||||
let filename = PathBuf::from("Test.roc");
|
||||
let src_dir = Path::new("fake/test/path");
|
||||
let app_o_file = dir.path().join("app.o");
|
||||
|
||||
let module_src;
|
||||
let temp;
|
||||
if src.starts_with("app") {
|
||||
// this is already a module
|
||||
module_src = src;
|
||||
} else {
|
||||
// this is an expression, promote it to a module
|
||||
temp = promote_expr_to_module(src);
|
||||
module_src = &temp;
|
||||
}
|
||||
|
||||
let exposed_types = MutMap::default();
|
||||
let loaded = roc_load::file::load_and_monomorphize_from_str(
|
||||
arena,
|
||||
filename,
|
||||
&module_src,
|
||||
stdlib,
|
||||
src_dir,
|
||||
exposed_types,
|
||||
);
|
||||
|
||||
let mut loaded = loaded.expect("failed to load module");
|
||||
|
||||
use roc_load::file::MonomorphizedModule;
|
||||
let MonomorphizedModule {
|
||||
procedures,
|
||||
interns,
|
||||
exposed_to_host,
|
||||
..
|
||||
} = loaded;
|
||||
|
||||
/*
|
||||
println!("=========== Procedures ==========");
|
||||
println!("{:?}", procedures);
|
||||
println!("=================================\n");
|
||||
|
||||
println!("=========== Interns ==========");
|
||||
println!("{:?}", interns);
|
||||
println!("=================================\n");
|
||||
|
||||
println!("=========== Exposed ==========");
|
||||
println!("{:?}", exposed_to_host);
|
||||
println!("=================================\n");
|
||||
*/
|
||||
|
||||
debug_assert_eq!(exposed_to_host.len(), 1);
|
||||
let main_fn_symbol = exposed_to_host.keys().copied().nth(0).unwrap();
|
||||
|
||||
let (_, main_fn_layout) = procedures
|
||||
.keys()
|
||||
.find(|(s, _)| *s == main_fn_symbol)
|
||||
.unwrap()
|
||||
.clone();
|
||||
let mut layout_ids = roc_mono::layout::LayoutIds::default();
|
||||
let main_fn_name = layout_ids
|
||||
.get(main_fn_symbol, &main_fn_layout)
|
||||
.to_symbol_string(main_fn_symbol, &interns);
|
||||
|
||||
let mut lines = Vec::new();
|
||||
// errors whose reporting we delay (so we can see that code gen generates runtime errors)
|
||||
let mut delayed_errors = Vec::new();
|
||||
|
||||
for (home, (module_path, src)) in loaded.sources {
|
||||
use roc_reporting::report::{
|
||||
can_problem, mono_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE,
|
||||
};
|
||||
|
||||
let can_problems = loaded.can_problems.remove(&home).unwrap_or_default();
|
||||
let type_problems = loaded.type_problems.remove(&home).unwrap_or_default();
|
||||
let mono_problems = loaded.mono_problems.remove(&home).unwrap_or_default();
|
||||
|
||||
let error_count = can_problems.len() + type_problems.len() + mono_problems.len();
|
||||
|
||||
if error_count == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let src_lines: Vec<&str> = src.split('\n').collect();
|
||||
let palette = DEFAULT_PALETTE;
|
||||
|
||||
// Report parsing and canonicalization problems
|
||||
let alloc = RocDocAllocator::new(&src_lines, home, &interns);
|
||||
|
||||
use roc_problem::can::Problem::*;
|
||||
for problem in can_problems.into_iter() {
|
||||
// Ignore "unused" problems
|
||||
match problem {
|
||||
UnusedDef(_, _) | UnusedArgument(_, _, _) | UnusedImport(_, _) => {
|
||||
delayed_errors.push(problem);
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
let report = can_problem(&alloc, module_path.clone(), problem);
|
||||
let mut buf = String::new();
|
||||
|
||||
report.render_color_terminal(&mut buf, &alloc, &palette);
|
||||
|
||||
lines.push(buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for problem in type_problems {
|
||||
let report = type_problem(&alloc, module_path.clone(), problem);
|
||||
let mut buf = String::new();
|
||||
|
||||
report.render_color_terminal(&mut buf, &alloc, &palette);
|
||||
|
||||
lines.push(buf);
|
||||
}
|
||||
|
||||
for problem in mono_problems {
|
||||
let report = mono_problem(&alloc, module_path.clone(), problem);
|
||||
let mut buf = String::new();
|
||||
|
||||
report.render_color_terminal(&mut buf, &alloc, &palette);
|
||||
|
||||
lines.push(buf);
|
||||
}
|
||||
}
|
||||
|
||||
if !lines.is_empty() {
|
||||
println!("{}", lines.join("\n"));
|
||||
assert_eq!(0, 1, "Mistakes were made");
|
||||
}
|
||||
|
||||
let env = roc_gen_dev::Env {
|
||||
arena,
|
||||
interns,
|
||||
exposed_to_host: exposed_to_host.keys().copied().collect(),
|
||||
lazy_literals,
|
||||
};
|
||||
|
||||
let target = target_lexicon::Triple::host();
|
||||
let module_object =
|
||||
roc_gen_dev::build_module(&env, &target, procedures).expect("failed to compile module");
|
||||
|
||||
let module_out = module_object
|
||||
.write()
|
||||
.expect("failed to build output object");
|
||||
std::fs::write(&app_o_file, module_out).expect("failed to write object to file");
|
||||
|
||||
let (mut child, dylib_path) = link(
|
||||
&target,
|
||||
app_o_file.clone(),
|
||||
&[app_o_file.to_str().unwrap()],
|
||||
LinkType::Dylib,
|
||||
)
|
||||
.expect("failed to link dynamic library");
|
||||
|
||||
child.wait().unwrap();
|
||||
|
||||
// Load the dylib
|
||||
let path = dylib_path.as_path().to_str().unwrap();
|
||||
|
||||
// std::fs::copy(&app_o_file, "/tmp/app.o").unwrap();
|
||||
// std::fs::copy(&path, "/tmp/libapp.so").unwrap();
|
||||
|
||||
let lib = Library::new(path).expect("failed to load shared library");
|
||||
|
||||
(main_fn_name, delayed_errors, lib)
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! assert_evals_to {
|
||||
($src:expr, $expected:expr, $ty:ty) => {{
|
||||
assert_evals_to!($src, $expected, $ty, (|val| val));
|
||||
}};
|
||||
($src:expr, $expected:expr, $ty:ty, $transform:expr) => {
|
||||
// Same as above, except with an additional transformation argument.
|
||||
{
|
||||
assert_evals_to!($src, $expected, $ty, $transform, true);
|
||||
}
|
||||
};
|
||||
($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr) => {
|
||||
// Run both with and without lazy literal optimization.
|
||||
{
|
||||
assert_evals_to!($src, $expected, $ty, $transform, $leak, false);
|
||||
}
|
||||
{
|
||||
assert_evals_to!($src, $expected, $ty, $transform, $leak, true);
|
||||
}
|
||||
};
|
||||
($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr, $lazy_literals:expr) => {
|
||||
use bumpalo::Bump;
|
||||
use roc_gen_dev::run_jit_function_raw;
|
||||
let stdlib = roc_builtins::std::standard_stdlib();
|
||||
|
||||
let arena = Bump::new();
|
||||
let (main_fn_name, errors, lib) =
|
||||
$crate::helpers::eval::helper(&arena, $src, stdlib, $leak, $lazy_literals);
|
||||
|
||||
let transform = |success| {
|
||||
let expected = $expected;
|
||||
let given = $transform(success);
|
||||
assert_eq!(&given, &expected);
|
||||
};
|
||||
run_jit_function_raw!(lib, main_fn_name, $ty, transform, errors)
|
||||
};
|
||||
}
|
44
compiler/gen_dev/tests/helpers/mod.rs
Normal file
44
compiler/gen_dev/tests/helpers/mod.rs
Normal file
|
@ -0,0 +1,44 @@
|
|||
extern crate bumpalo;
|
||||
|
||||
#[macro_use]
|
||||
pub mod eval;
|
||||
|
||||
/// Used in the with_larger_debug_stack() function, for tests that otherwise
|
||||
/// run out of stack space in debug builds (but don't in --release builds)
|
||||
#[allow(dead_code)]
|
||||
const EXPANDED_STACK_SIZE: usize = 8 * 1024 * 1024;
|
||||
|
||||
/// Without this, some tests pass in `cargo test --release` but fail without
|
||||
/// the --release flag because they run out of stack space. This increases
|
||||
/// stack size for debug builds only, while leaving the stack space at the default
|
||||
/// amount for release builds.
|
||||
#[allow(dead_code)]
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn with_larger_debug_stack<F>(run_test: F)
|
||||
where
|
||||
F: FnOnce() -> (),
|
||||
F: Send,
|
||||
F: 'static,
|
||||
{
|
||||
std::thread::Builder::new()
|
||||
.stack_size(EXPANDED_STACK_SIZE)
|
||||
.spawn(run_test)
|
||||
.expect("Error while spawning expanded dev stack size thread")
|
||||
.join()
|
||||
.expect("Error while joining expanded dev stack size thread")
|
||||
}
|
||||
|
||||
/// In --release builds, don't increase the stack size. Run the test normally.
|
||||
/// This way, we find out if any of our tests are blowing the stack even after
|
||||
/// optimizations in release builds.
|
||||
#[allow(dead_code)]
|
||||
#[cfg(not(debug_assertions))]
|
||||
#[inline(always)]
|
||||
pub fn with_larger_debug_stack<F>(run_test: F)
|
||||
where
|
||||
F: FnOnce() -> (),
|
||||
F: Send,
|
||||
F: 'static,
|
||||
{
|
||||
run_test()
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue