Merge branch 'trunk' into gen-dev/records-base

This commit is contained in:
Brendan Hansknecht 2021-08-14 21:21:31 -07:00
commit cc1b7df06a
466 changed files with 40237 additions and 19367 deletions

View file

@ -261,6 +261,16 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
unimplemented!("abs_reg64_reg64 is not yet implement for AArch64");
}
#[inline(always)]
fn abs_freg64_freg64(
_buf: &mut Vec<'_, u8>,
_relocs: &mut Vec<'_, Relocation>,
_dst: AArch64FloatReg,
_src: AArch64FloatReg,
) {
unimplemented!("abs_reg64_reg64 is not yet implement for AArch64");
}
#[inline(always)]
fn add_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
@ -302,10 +312,27 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
unimplemented!("calling functions literal not yet implemented for AArch64");
}
#[inline(always)]
fn imul_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) {
unimplemented!("register multiplication not implemented yet for AArch64");
}
#[inline(always)]
fn jmp_imm32(_buf: &mut Vec<'_, u8>, _offset: i32) -> usize {
unimplemented!("jump instructions not yet implemented for AArch64");
}
#[inline(always)]
fn tail_call(buf: &mut Vec<'_, u8>) -> u64 {
Self::jmp_imm32(buf, 0);
buf.len() as u64 - 4 // TODO is 4 the correct offset in ARM?
}
#[inline(always)]
fn jne_reg64_imm64_imm32(
_buf: &mut Vec<'_, u8>,

View file

@ -2,7 +2,7 @@ use crate::{Backend, Env, Relocation};
use bumpalo::collections::Vec;
use roc_collections::all::{MutMap, MutSet};
use roc_module::symbol::Symbol;
use roc_mono::ir::{BranchInfo, Literal, Stmt, Wrapped};
use roc_mono::ir::{BranchInfo, Literal, Stmt};
use roc_mono::layout::{Builtin, Layout};
use std::marker::PhantomData;
use target_lexicon::Triple;
@ -85,6 +85,12 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait> {
/// dst should always come before sources.
pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn abs_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn abs_freg64_freg64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: FloatReg,
src: FloatReg,
);
fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn add_freg64_freg64_freg64(
@ -107,6 +113,8 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
// It returns the base offset to calculate the jump from (generally the instruction after the jump).
fn jmp_imm32(buf: &mut Vec<'_, u8>, offset: i32) -> usize;
fn tail_call(buf: &mut Vec<'_, u8>) -> u64;
// Jumps by an offset of offset bytes if reg is not equal to imm.
// It should always generate the same number of bytes to enable replacement if offset changes.
// It returns the base offset to calculate the jump from (generally the instruction after the jump).
@ -138,6 +146,13 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn imul_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -202,7 +217,7 @@ pub struct Backend64Bit<
float_used_callee_saved_regs: MutSet<FloatReg>,
stack_size: u32,
// The ammount of stack space needed to pass args for function calling.
// The amount of stack space needed to pass args for function calling.
fn_call_stack_size: u32,
}
@ -344,6 +359,14 @@ impl<
Ok(())
}
/// Used for generating wrappers for malloc/realloc/free
fn build_wrapped_jmp(&mut self) -> Result<(&'a [u8], u64), String> {
let mut out = bumpalo::vec![in self.env.arena];
let offset = ASM::tail_call(&mut out);
Ok((out.into_bump_slice(), offset))
}
fn build_fn_call(
&mut self,
dst: &Symbol,
@ -402,7 +425,7 @@ impl<
Ok(())
}
x => Err(format!(
"recieving return type, {:?}, is not yet implemented",
"receiving return type, {:?}, is not yet implemented",
x
)),
}
@ -484,6 +507,15 @@ impl<
Ok(())
}
fn build_num_abs_f64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_float_reg(dst)?;
let src_reg = self.load_to_float_reg(src)?;
ASM::abs_freg64_freg64(&mut self.buf, &mut self.relocs, dst_reg, src_reg);
Ok(())
}
fn build_num_add_i64(
&mut self,
dst: &Symbol,
@ -510,6 +542,19 @@ impl<
Ok(())
}
fn build_num_mul_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_general_reg(src2)?;
ASM::imul_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
fn build_num_sub_i64(
&mut self,
dst: &Symbol,
@ -565,13 +610,12 @@ impl<
}
}
fn load_access_at_index(
fn load_struct_at_index(
&mut self,
sym: &Symbol,
structure: &Symbol,
index: u64,
field_layouts: &'a [Layout<'a>],
_wrapped: &Wrapped,
) -> Result<(), String> {
if let Some(SymbolStorage::Base(struct_offset)) = self.symbol_storage_map.get(structure) {
let mut data_offset = *struct_offset;

View file

@ -260,7 +260,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
Layout::Builtin(Builtin::Float64) => {}
x => {
return Err(format!(
"recieving return type, {:?}, is not yet implemented",
"receiving return type, {:?}, is not yet implemented",
x
));
}
@ -574,7 +574,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
Layout::Builtin(Builtin::Float64) => {}
x => {
return Err(format!(
"recieving return type, {:?}, is not yet implemented",
"receiving return type, {:?}, is not yet implemented",
x
));
}
@ -794,6 +794,24 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
cmovl_reg64_reg64(buf, dst, src);
}
#[inline(always)]
fn abs_freg64_freg64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: X86_64FloatReg,
src: X86_64FloatReg,
) {
movsd_freg64_rip_offset32(buf, dst, 0);
// TODO: make sure this constant only loads once instead of every call to abs
relocs.push(Relocation::LocalData {
offset: buf.len() as u64 - 4,
data: 0x7fffffffffffffffu64.to_le_bytes().to_vec(),
});
andpd_freg64_freg64(buf, dst, src);
}
#[inline(always)]
fn add_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>,
@ -801,12 +819,11 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
src1: X86_64GeneralReg,
imm32: i32,
) {
if dst == src1 {
add_reg64_imm32(buf, dst, imm32);
} else {
if dst != src1 {
mov_reg64_reg64(buf, dst, src1);
add_reg64_imm32(buf, dst, imm32);
}
add_reg64_imm32(buf, dst, imm32);
}
#[inline(always)]
fn add_reg64_reg64_reg64(
@ -850,11 +867,32 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
});
}
#[inline(always)]
fn imul_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
if dst != src1 {
mov_reg64_reg64(buf, dst, src1);
}
imul_reg64_reg64(buf, dst, src2);
}
#[inline(always)]
fn jmp_imm32(buf: &mut Vec<'_, u8>, offset: i32) -> usize {
jmp_imm32(buf, offset);
buf.len()
}
#[inline(always)]
fn tail_call(buf: &mut Vec<'_, u8>) -> u64 {
Self::jmp_imm32(buf, 0);
buf.len() as u64 - 4
}
#[inline(always)]
fn jne_reg64_imm64_imm32(
buf: &mut Vec<'_, u8>,
@ -940,12 +978,11 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
src1: X86_64GeneralReg,
imm32: i32,
) {
if dst == src1 {
sub_reg64_imm32(buf, dst, imm32);
} else {
if dst != src1 {
mov_reg64_reg64(buf, dst, src1);
sub_reg64_imm32(buf, dst, imm32);
}
sub_reg64_imm32(buf, dst, imm32);
}
#[inline(always)]
fn sub_reg64_reg64_reg64(
@ -954,12 +991,11 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) {
if dst == src1 {
sub_reg64_reg64(buf, dst, src2);
} else {
if dst != src1 {
mov_reg64_reg64(buf, dst, src1);
sub_reg64_reg64(buf, dst, src2);
}
sub_reg64_reg64(buf, dst, src2);
}
#[inline(always)]
@ -1030,6 +1066,21 @@ fn binop_reg64_reg64(
buf.extend(&[rex, op_code, 0xC0 + dst_mod + src_mod]);
}
#[inline(always)]
fn extended_binop_reg64_reg64(
op_code1: u8,
op_code2: u8,
buf: &mut Vec<'_, u8>,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(src, rex);
let dst_mod = dst as u8 % 8;
let src_mod = (src as u8 % 8) << 3;
buf.extend(&[rex, op_code1, op_code2, 0xC0 + dst_mod + src_mod]);
}
// Below here are the functions for all of the assembly instructions.
// Their names are based on the instruction and operators combined.
// You should call `buf.reserve()` if you push or extend more than once.
@ -1072,6 +1123,26 @@ fn addsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64Fl
}
}
#[inline(always)]
fn andpd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64FloatReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
buf.extend(&[
0x66,
0x40 + ((dst_high as u8) << 2) + (src_high as u8),
0x0F,
0x54,
0xC0 + (dst_mod << 3) + (src_mod),
])
} else {
buf.extend(&[0x66, 0x0F, 0x54, 0xC0 + (dst_mod << 3) + (src_mod)])
}
}
/// r/m64 AND imm8 (sign-extended).
#[inline(always)]
fn and_reg64_imm8(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, imm: i8) {
@ -1106,6 +1177,14 @@ fn cmp_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64Gene
binop_reg64_reg64(0x39, buf, dst, src);
}
/// `IMUL r64,r/m64` -> Signed Multiply r/m64 to r64.
#[inline(always)]
fn imul_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
// IMUL is strange, the parameters are reversed from must other binary ops.
// The final encoding is (src, dst) instead of (dst, src).
extended_binop_reg64_reg64(0x0F, 0xAF, buf, src, dst);
}
/// Jump near, relative, RIP = RIP + 32-bit displacement sign extended to 64-bits.
#[inline(always)]
fn jmp_imm32(buf: &mut Vec<'_, u8>, imm: i32) {
@ -1443,6 +1522,35 @@ mod tests {
}
}
#[test]
fn test_andpd_freg64_freg64() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, src), expected) in &[
(
(X86_64FloatReg::XMM0, X86_64FloatReg::XMM0),
vec![0x66, 0x0F, 0x54, 0xC0],
),
(
(X86_64FloatReg::XMM0, X86_64FloatReg::XMM15),
vec![0x66, 0x41, 0x0F, 0x54, 0xC7],
),
(
(X86_64FloatReg::XMM15, X86_64FloatReg::XMM0),
vec![0x66, 0x44, 0x0F, 0x54, 0xF8],
),
(
(X86_64FloatReg::XMM15, X86_64FloatReg::XMM15),
vec![0x66, 0x45, 0x0F, 0x54, 0xFF],
),
] {
buf.clear();
andpd_freg64_freg64(&mut buf, *dst, *src);
assert_eq!(&expected[..], &buf[..]);
}
}
#[test]
fn test_xor_reg64_reg64() {
let arena = bumpalo::Bump::new();
@ -1514,6 +1622,34 @@ mod tests {
}
}
#[test]
fn test_imul_reg64_reg64() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, src), expected) in &[
(
(X86_64GeneralReg::RAX, X86_64GeneralReg::RAX),
[0x48, 0x0F, 0xAF, 0xC0],
),
(
(X86_64GeneralReg::RAX, X86_64GeneralReg::R15),
[0x49, 0x0F, 0xAF, 0xC7],
),
(
(X86_64GeneralReg::R15, X86_64GeneralReg::RAX),
[0x4C, 0x0F, 0xAF, 0xF8],
),
(
(X86_64GeneralReg::R15, X86_64GeneralReg::R15),
[0x4D, 0x0F, 0xAF, 0xFF],
),
] {
buf.clear();
imul_reg64_reg64(&mut buf, *dst, *src);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_jmp_imm32() {
let arena = bumpalo::Bump::new();

View file

@ -1,4 +1,4 @@
#![warn(clippy::all, clippy::dbg_macro)]
#![warn(clippy::dbg_macro)]
// See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
#![allow(clippy::large_enum_variant, clippy::upper_case_acronyms)]
@ -8,7 +8,7 @@ use roc_collections::all::{MutMap, MutSet};
use roc_module::ident::{ModuleName, TagName};
use roc_module::low_level::LowLevel;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::ir::{BranchInfo, CallType, Expr, JoinPointId, Literal, Proc, Stmt, Wrapped};
use roc_mono::ir::{BranchInfo, CallType, Expr, JoinPointId, Literal, Proc, Stmt};
use roc_mono::layout::{Builtin, Layout, LayoutIds};
use target_lexicon::Triple;
@ -22,6 +22,7 @@ pub struct Env<'a> {
pub interns: Interns,
pub exposed_to_host: MutSet<Symbol>,
pub lazy_literals: bool,
pub generate_allocators: bool,
}
// These relocations likely will need a length.
@ -71,6 +72,9 @@ where
ret_layout: &Layout<'a>,
) -> Result<(), String>;
/// Used for generating wrappers for malloc/realloc/free
fn build_wrapped_jmp(&mut self) -> Result<(&'a [u8], u64), String>;
/// build_proc creates a procedure and outputs it to the wrapped object writer.
fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[Relocation]), String> {
self.reset();
@ -106,9 +110,9 @@ where
call,
pass,
fail: _,
exception_id: _,
} => {
// for now, treat invoke as a normal call
let stmt = Stmt::Let(*symbol, Expr::Call(call.clone()), *layout, pass);
self.build_stmt(&stmt, ret_layout)
}
@ -144,7 +148,7 @@ where
) -> Result<(), String>;
/// build_expr builds the expressions for the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later.
/// The builder must keep track of the symbol because it may be referred to later.
fn build_expr(
&mut self,
sym: &Symbol,
@ -188,6 +192,9 @@ where
Symbol::NUM_ATAN => {
self.build_run_low_level(sym, &LowLevel::NumAtan, arguments, layout)
}
Symbol::NUM_MUL => {
self.build_run_low_level(sym, &LowLevel::NumMul, arguments, layout)
}
Symbol::NUM_POW_INT => self.build_run_low_level(
sym,
&LowLevel::NumPowInt,
@ -215,7 +222,7 @@ where
}
}
CallType::LowLevel { op: lowlevel } => {
CallType::LowLevel { op: lowlevel, .. } => {
self.build_run_low_level(sym, lowlevel, arguments, layout)
}
x => Err(format!("the call type, {:?}, is not yet implemented", x)),
@ -225,18 +232,17 @@ where
self.load_literal_symbols(fields)?;
self.create_struct(sym, layout, fields)
}
Expr::AccessAtIndex {
Expr::StructAtIndex {
index,
field_layouts,
structure,
wrapped,
} => self.load_access_at_index(sym, structure, *index, field_layouts, wrapped),
} => self.load_struct_at_index(sym, structure, *index, field_layouts),
x => Err(format!("the expression, {:?}, is not yet implemented", x)),
}
}
/// build_run_low_level builds the low level opertation and outputs to the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later.
/// The builder must keep track of the symbol because it may be referred to later.
fn build_run_low_level(
&mut self,
sym: &Symbol,
@ -251,6 +257,7 @@ where
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => self.build_num_abs_i64(sym, &args[0]),
Layout::Builtin(Builtin::Float64) => self.build_num_abs_f64(sym, &args[0]),
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
@ -275,6 +282,15 @@ where
LowLevel::NumAtan => {
self.build_fn_call(sym, bitcode::NUM_ATAN.to_string(), args, &[*layout], layout)
}
LowLevel::NumMul => {
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => {
self.build_num_mul_i64(sym, &args[0], &args[1])
}
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
LowLevel::NumPowInt => self.build_fn_call(
sym,
bitcode::NUM_POW_INT.to_string(),
@ -316,6 +332,10 @@ where
/// It only deals with inputs and outputs of i64 type.
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>;
/// build_num_abs_f64 stores the absolute value of src into dst.
/// It only deals with inputs and outputs of f64 type.
fn build_num_abs_f64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>;
/// build_num_add_i64 stores the sum of src1 and src2 into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_add_i64(
@ -334,6 +354,15 @@ where
src2: &Symbol,
) -> Result<(), String>;
/// build_num_mul_i64 stores `src1 * src2` into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_mul_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String>;
/// build_num_sub_i64 stores the `src1 - src2` difference into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_sub_i64(
@ -369,14 +398,13 @@ where
fields: &'a [Symbol],
) -> Result<(), String>;
/// load_access_at_index loads into `sym` the value at `index` in `structure`.
fn load_access_at_index(
/// load_struct_at_index loads into `sym` the value at `index` in `structure`.
fn load_struct_at_index(
&mut self,
sym: &Symbol,
structure: &Symbol,
index: u64,
field_layouts: &'a [Layout<'a>],
wrapped: &Wrapped,
) -> Result<(), String>;
/// load_literal sets a symbol to be equal to a literal.
@ -432,7 +460,6 @@ where
self.set_last_seen(*sym, stmt);
match expr {
Expr::Literal(_) => {}
Expr::FunctionPointer(sym, _) => self.set_last_seen(*sym, stmt),
Expr::Call(call) => self.scan_ast_call(call, stmt),
@ -446,7 +473,13 @@ where
self.set_last_seen(*sym, stmt);
}
}
Expr::AccessAtIndex { structure, .. } => {
Expr::StructAtIndex { structure, .. } => {
self.set_last_seen(*structure, stmt);
}
Expr::GetTagId { structure, .. } => {
self.set_last_seen(*structure, stmt);
}
Expr::UnionAtIndex { structure, .. } => {
self.set_last_seen(*structure, stmt);
}
Expr::Array { elems, .. } => {
@ -485,15 +518,16 @@ where
Stmt::Invoke {
symbol,
layout,
layout: _,
call,
pass,
fail: _,
exception_id: _,
} => {
// for now, treat invoke as a normal call
let stmt = Stmt::Let(*symbol, Expr::Call(call.clone()), *layout, pass);
self.scan_ast(&stmt);
self.set_last_seen(*symbol, stmt);
self.scan_ast_call(call, stmt);
self.scan_ast(pass);
}
Stmt::Switch {
@ -511,7 +545,7 @@ where
Stmt::Ret(sym) => {
self.set_last_seen(*sym, stmt);
}
Stmt::Rethrow => {}
Stmt::Resume(_exception_id) => {}
Stmt::Refcounting(modify, following) => {
let sym = modify.get_symbol();
@ -520,7 +554,7 @@ where
}
Stmt::Join {
parameters,
continuation,
body: continuation,
remainder,
..
} => {
@ -552,10 +586,8 @@ where
match call_type {
CallType::ByName { .. } => {}
CallType::ByPointer { name: sym, .. } => {
self.set_last_seen(*sym, stmt);
}
CallType::LowLevel { .. } => {}
CallType::HigherOrderLowLevel { .. } => {}
CallType::Foreign { .. } => {}
}
}

View file

@ -9,18 +9,19 @@ use object::{
};
use roc_collections::all::MutMap;
use roc_module::symbol;
use roc_mono::ir::Proc;
use roc_mono::layout::Layout;
use roc_mono::ir::{Proc, ProcLayout};
use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Triple};
const VERSION: &str = env!("CARGO_PKG_VERSION");
// This is used by some code below which is currently commented out.
// See that code for more details!
// const VERSION: &str = env!("CARGO_PKG_VERSION");
/// build_module is the high level builder/delegator.
/// It takes the request to build a module and output the object file for the module.
pub fn build_module<'a>(
env: &'a Env,
target: &Triple,
procedures: MutMap<(symbol::Symbol, Layout<'a>), Proc<'a>>,
procedures: MutMap<(symbol::Symbol, ProcLayout<'a>), Proc<'a>>,
) -> Result<Object, String> {
match target {
Triple {
@ -41,6 +42,28 @@ pub fn build_module<'a>(
Object::new(BinaryFormat::Elf, Architecture::X86_64, Endianness::Little),
)
}
Triple {
architecture: TargetArch::X86_64,
binary_format: TargetBF::Macho,
..
} => {
let backend: Backend64Bit<
x86_64::X86_64GeneralReg,
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
> = Backend::new(env, target)?;
build_object(
env,
procedures,
backend,
Object::new(
BinaryFormat::MachO,
Architecture::X86_64,
Endianness::Little,
),
)
}
Triple {
architecture: TargetArch::Aarch64(_),
binary_format: TargetBF::Elf,
@ -65,31 +88,110 @@ pub fn build_module<'a>(
}
}
fn generate_wrapper<'a, B: Backend<'a>>(
backend: &mut B,
output: &mut Object,
wrapper_name: String,
wraps: String,
) -> Result<(), String> {
let text_section = output.section_id(StandardSection::Text);
let proc_symbol = Symbol {
name: wrapper_name.as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Dynamic,
weak: false,
section: SymbolSection::Section(text_section),
flags: SymbolFlags::None,
};
let proc_id = output.add_symbol(proc_symbol);
let (proc_data, offset) = backend.build_wrapped_jmp()?;
let proc_offset = output.add_symbol_data(proc_id, text_section, proc_data, 16);
let name = wraps.as_str().as_bytes();
// If the symbol is an undefined zig builtin, we need to add it here.
let symbol = Symbol {
name: name.to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Dynamic,
weak: true,
section: SymbolSection::Undefined,
flags: SymbolFlags::None,
};
output.add_symbol(symbol);
if let Some(sym_id) = output.symbol_id(name) {
let reloc = write::Relocation {
offset: offset + proc_offset,
size: 32,
kind: RelocationKind::PltRelative,
encoding: RelocationEncoding::X86Branch,
symbol: sym_id,
addend: -4,
};
output
.add_relocation(text_section, reloc)
.map_err(|e| format!("{:?}", e))?;
Ok(())
} else {
Err(format!("failed to find fn symbol for {:?}", wraps))
}
}
fn build_object<'a, B: Backend<'a>>(
env: &'a Env,
procedures: MutMap<(symbol::Symbol, Layout<'a>), Proc<'a>>,
procedures: MutMap<(symbol::Symbol, ProcLayout<'a>), Proc<'a>>,
mut backend: B,
mut output: Object,
) -> Result<Object, String> {
let data_section = output.section_id(StandardSection::Data);
let comment = output.add_section(vec![], b"comment".to_vec(), SectionKind::OtherString);
/*
// Commented out because we couldn't figure out how to get it to work on mac - see https://github.com/rtfeldman/roc/pull/1323
let comment = output.add_section(vec![], b".comment".to_vec(), SectionKind::OtherString);
output.append_section_data(
comment,
format!("\0roc dev backend version {} \0", VERSION).as_bytes(),
1,
);
*/
if env.generate_allocators {
generate_wrapper(
&mut backend,
&mut output,
"roc_alloc".into(),
"malloc".into(),
)?;
generate_wrapper(
&mut backend,
&mut output,
"roc_realloc".into(),
"realloc".into(),
)?;
generate_wrapper(
&mut backend,
&mut output,
"roc_dealloc".into(),
"free".into(),
)?;
}
// Setup layout_ids for procedure calls.
let mut layout_ids = roc_mono::layout::LayoutIds::default();
let mut procs = Vec::with_capacity_in(procedures.len(), env.arena);
for ((sym, layout), proc) in procedures {
let fn_name = layout_ids
.get(sym, &layout)
.get_toplevel(sym, &layout)
.to_symbol_string(sym, &env.interns);
let section_id = output.add_section(
output.segment_name(StandardSegment::Text).to_vec(),
format!(".text.{}", fn_name).as_bytes().to_vec(),
format!(".text.{:x}", sym.as_u64()).as_bytes().to_vec(),
SectionKind::Text,
);
@ -182,7 +284,7 @@ fn build_object<'a, B: Backend<'a>>(
offset: offset + proc_offset,
size: 32,
kind: RelocationKind::PltRelative,
encoding: RelocationEncoding::Generic,
encoding: RelocationEncoding::X86Branch,
symbol: sym_id,
addend: -4,
}