Use roc_target over target_lexicon

Tailors a target class for our needs.
Replaces tons of uses across the entire compiler.
This is a base for later adding new targets like thumb.
This commit is contained in:
Brendan Hansknecht 2024-03-21 21:54:58 -07:00
parent 185262510c
commit 6dc5bfb1b7
No known key found for this signature in database
GPG key ID: 0EA784685083E75B
72 changed files with 1008 additions and 1371 deletions

View file

@ -3,7 +3,7 @@ use bumpalo::collections::CollectIn;
use bumpalo::Bump;
use roc_module::low_level::LowLevel;
use roc_module::symbol::{IdentIds, ModuleId, Symbol};
use roc_target::TargetInfo;
use roc_target::Target;
use crate::ir::{
BranchInfo, Call, CallSpecId, CallType, Expr, JoinPointId, Literal, ModifyRc, PassedFunction,
@ -93,20 +93,20 @@ pub struct Context<'a> {
pub struct CodeGenHelp<'a> {
arena: &'a Bump,
home: ModuleId,
target_info: TargetInfo,
target: Target,
layout_isize: InLayout<'a>,
specializations: Vec<'a, Specialization<'a>>,
debug_recursion_depth: usize,
}
impl<'a> CodeGenHelp<'a> {
pub fn new(arena: &'a Bump, target_info: TargetInfo, home: ModuleId) -> Self {
let layout_isize = Layout::isize(target_info);
pub fn new(arena: &'a Bump, target: Target, home: ModuleId) -> Self {
let layout_isize = Layout::isize(target);
CodeGenHelp {
arena,
home,
target_info,
target,
layout_isize,
specializations: Vec::with_capacity_in(16, arena),
debug_recursion_depth: 0,

View file

@ -383,7 +383,7 @@ pub fn refcount_reset_proc_body<'a>(
};
// Constant for unique refcount
let refcount_1_encoded = match root.target_info.ptr_width() {
let refcount_1_encoded = match root.target.ptr_width() {
PtrWidth::Bytes4 => i32::MIN as i128,
PtrWidth::Bytes8 => i64::MIN as i128,
}
@ -407,7 +407,7 @@ pub fn refcount_reset_proc_body<'a>(
);
let mask_lower_bits = match layout_interner.get_repr(layout) {
LayoutRepr::Union(ul) => ul.stores_tag_id_in_pointer(root.target_info),
LayoutRepr::Union(ul) => ul.stores_tag_id_in_pointer(root.target),
_ => false,
};
@ -508,7 +508,7 @@ pub fn refcount_resetref_proc_body<'a>(
};
// Constant for unique refcount
let refcount_1_encoded = match root.target_info.ptr_width() {
let refcount_1_encoded = match root.target.ptr_width() {
PtrWidth::Bytes4 => i32::MIN as i128,
PtrWidth::Bytes8 => i64::MIN as i128,
}
@ -532,7 +532,7 @@ pub fn refcount_resetref_proc_body<'a>(
);
let mask_lower_bits = match layout_interner.get_repr(layout) {
LayoutRepr::Union(ul) => ul.stores_tag_id_in_pointer(root.target_info),
LayoutRepr::Union(ul) => ul.stores_tag_id_in_pointer(root.target),
_ => false,
};
@ -617,7 +617,7 @@ fn rc_ptr_from_data_ptr_help<'a>(
// Pointer size constant
let ptr_size_sym = root.create_symbol(ident_ids, "ptr_size");
let ptr_size_expr = Expr::Literal(Literal::Int(
(root.target_info.ptr_width() as i128).to_ne_bytes(),
(root.target.ptr_width() as i128).to_ne_bytes(),
));
let ptr_size_stmt = |next| Stmt::Let(ptr_size_sym, ptr_size_expr, root.layout_isize, next);
@ -630,7 +630,7 @@ fn rc_ptr_from_data_ptr_help<'a>(
},
arguments: root.arena.alloc([addr_sym, ptr_size_sym]),
});
let sub_stmt = |next| Stmt::Let(rc_addr_sym, sub_expr, Layout::usize(root.target_info), next);
let sub_stmt = |next| Stmt::Let(rc_addr_sym, sub_expr, Layout::usize(root.target), next);
// Typecast the refcount address from integer to pointer
let cast_expr = Expr::Call(Call {
@ -697,7 +697,7 @@ fn modify_refcount<'a>(
}
HelperOp::Dec | HelperOp::DecRef(_) => {
debug_assert!(alignment >= root.target_info.ptr_width() as u32);
debug_assert!(alignment >= root.target.ptr_width() as u32);
let (op, ptr) = match ptr {
Pointer::ToData(s) => (LowLevel::RefCountDecDataPtr, s),
@ -780,7 +780,7 @@ fn refcount_str<'a>(
};
let length_stmt = |next| Stmt::Let(length, length_expr, layout_isize, next);
let alignment = root.target_info.ptr_width() as u32;
let alignment = root.target.ptr_width() as u32;
// let is_slice = lowlevel NumLt length zero
let is_slice = root.create_symbol(ident_ids, "is_slice");
@ -1032,7 +1032,7 @@ fn refcount_list<'a>(
//
let alignment = Ord::max(
root.target_info.ptr_width() as u32,
root.target.ptr_width() as u32,
layout_interner.alignment_bytes(elem_layout),
);

View file

@ -1181,7 +1181,7 @@ fn specialize_list<'a, 'i>(
newer_continuation = arena.alloc(Stmt::Let(
index_symbol,
Expr::Literal(Literal::Int(i128::to_ne_bytes(i as i128))),
Layout::isize(layout_interner.target_info()),
Layout::isize(layout_interner.target()),
index,
));
}

View file

@ -31,7 +31,7 @@ use roc_module::symbol::{IdentIds, ModuleId, Symbol};
use roc_problem::can::{RuntimeError, ShadowKind};
use roc_region::all::{Loc, Region};
use roc_std::RocDec;
use roc_target::TargetInfo;
use roc_target::Target;
use roc_types::subs::{
instantiate_rigids, storage_copy_var_to, Content, ExhaustiveMark, FlatType, RedundantMark,
StorageSubs, Subs, Variable, VariableSubsSlice,
@ -1354,7 +1354,7 @@ pub struct Env<'a, 'i> {
pub expectation_subs: Option<&'i mut Subs>,
pub home: ModuleId,
pub ident_ids: &'i mut IdentIds,
pub target_info: TargetInfo,
pub target: Target,
pub update_mode_ids: &'i mut UpdateModeIds,
pub call_specialization_counter: u32,
// TODO: WorldAbilities and exposed_by_module share things, think about how to combine them

View file

@ -1779,7 +1779,7 @@ fn test_to_comparison<'a>(
let real_len = env.unique_symbol();
let test_len = env.unique_symbol();
let usize_layout = Layout::usize(env.target_info);
let usize_layout = Layout::usize(env.target);
stores.push((real_len, usize_layout, real_len_expr));
stores.push((test_len, usize_layout, test_len_expr));
@ -2337,7 +2337,7 @@ fn decide_to_branching<'a>(
let len_symbol = env.unique_symbol();
let switch = Stmt::Switch {
cond_layout: Layout::usize(env.target_info),
cond_layout: Layout::usize(env.target),
cond_symbol: len_symbol,
branches: branches.into_bump_slice(),
default_branch: (default_branch_info, env.arena.alloc(default_branch)),
@ -2355,7 +2355,7 @@ fn decide_to_branching<'a>(
Stmt::Let(
len_symbol,
len_expr,
Layout::usize(env.target_info),
Layout::usize(env.target),
env.arena.alloc(switch),
)
} else {

View file

@ -10,7 +10,7 @@ use roc_error_macros::{internal_error, todo_abilities};
use roc_module::ident::{Lowercase, TagName};
use roc_module::symbol::{Interns, Symbol};
use roc_problem::can::RuntimeError;
use roc_target::{PtrWidth, TargetInfo};
use roc_target::{PtrWidth, Target};
use roc_types::num::NumericRange;
use roc_types::subs::{
self, Content, FlatType, GetSubsSlice, OptVariable, RecordFields, Subs, TagExt, TupleElems,
@ -114,7 +114,7 @@ macro_rules! inc_stat {
/// Layout cache to avoid recomputing [Layout] from a [Variable] multiple times.
#[derive(Debug)]
pub struct LayoutCache<'a> {
pub target_info: TargetInfo,
pub target: Target,
cache: std::vec::Vec<CacheLayer<LayoutResult<'a>>>,
raw_function_cache: std::vec::Vec<CacheLayer<RawFunctionLayoutResult<'a>>>,
@ -128,13 +128,13 @@ pub struct LayoutCache<'a> {
}
impl<'a> LayoutCache<'a> {
pub fn new(interner: TLLayoutInterner<'a>, target_info: TargetInfo) -> Self {
pub fn new(interner: TLLayoutInterner<'a>, target: Target) -> Self {
let mut cache = std::vec::Vec::with_capacity(4);
cache.push(Default::default());
let mut raw_cache = std::vec::Vec::with_capacity(4);
raw_cache.push(Default::default());
Self {
target_info,
target,
cache,
raw_function_cache: raw_cache,
@ -964,39 +964,39 @@ impl<'a> UnionLayout<'a> {
self.discriminant().layout()
}
fn stores_tag_id_in_pointer_bits(tags: &[&[InLayout<'a>]], target_info: TargetInfo) -> bool {
tags.len() < target_info.ptr_width() as usize
fn stores_tag_id_in_pointer_bits(tags: &[&[InLayout<'a>]], target: Target) -> bool {
tags.len() < target.ptr_width() as usize
}
pub const POINTER_MASK_32BIT: usize = 0b0000_0111;
pub const POINTER_MASK_64BIT: usize = 0b0000_0011;
pub fn tag_id_pointer_bits_and_mask(target_info: TargetInfo) -> (usize, usize) {
match target_info.ptr_width() {
pub fn tag_id_pointer_bits_and_mask(target: Target) -> (usize, usize) {
match target.ptr_width() {
PtrWidth::Bytes8 => (3, Self::POINTER_MASK_64BIT),
PtrWidth::Bytes4 => (2, Self::POINTER_MASK_32BIT),
}
}
// i.e. it is not implicit and not stored in the pointer bits
pub fn stores_tag_id_as_data(&self, target_info: TargetInfo) -> bool {
pub fn stores_tag_id_as_data(&self, target: Target) -> bool {
match self {
UnionLayout::NonRecursive(_) => true,
UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => !Self::stores_tag_id_in_pointer_bits(tags, target_info),
} => !Self::stores_tag_id_in_pointer_bits(tags, target),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => false,
}
}
pub fn stores_tag_id_in_pointer(&self, target_info: TargetInfo) -> bool {
pub fn stores_tag_id_in_pointer(&self, target: Target) -> bool {
match self {
UnionLayout::NonRecursive(_) => false,
UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => Self::stores_tag_id_in_pointer_bits(tags, target_info),
} => Self::stores_tag_id_in_pointer_bits(tags, target),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => false,
}
}
@ -1049,7 +1049,7 @@ impl<'a> UnionLayout<'a> {
};
// because we store a refcount, the alignment must be at least the size of a pointer
allocation.max(interner.target_info().ptr_width() as u32)
allocation.max(interner.target().ptr_width() as u32)
}
/// Size of the data in memory, whether it's stack or heap (for non-null tag ids)
@ -1059,7 +1059,7 @@ impl<'a> UnionLayout<'a> {
{
let (data_width, data_align) = self.data_size_and_alignment_help_match(interner);
if self.stores_tag_id_as_data(interner.target_info()) {
if self.stores_tag_id_as_data(interner.target()) {
use Discriminant::*;
match self.discriminant() {
U0 => (round_up_to_alignment(data_width, data_align), data_align),
@ -1089,7 +1089,7 @@ impl<'a> UnionLayout<'a> {
where
I: LayoutInterner<'a>,
{
if !self.stores_tag_id_as_data(interner.target_info()) {
if !self.stores_tag_id_as_data(interner.target()) {
return None;
};
@ -1151,7 +1151,7 @@ impl<'a> UnionLayout<'a> {
UnionLayout::Recursive(_)
| UnionLayout::NonNullableUnwrapped(_)
| UnionLayout::NullableWrapped { .. }
| UnionLayout::NullableUnwrapped { .. } => interner.target_info().ptr_width() as u32,
| UnionLayout::NullableUnwrapped { .. } => interner.target().ptr_width() as u32,
}
}
@ -2686,7 +2686,7 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Builtin(builtin) => {
use Builtin::*;
match interner.target_info().ptr_width() {
match interner.target().ptr_width() {
PtrWidth::Bytes4 => {
// more things fit into a register
false
@ -2700,7 +2700,7 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => true,
LayoutRepr::Struct(_) => {
// TODO: write tests for this!
self.stack_size(interner) as usize > interner.target_info().max_by_value_size()
self.stack_size(interner) as usize > interner.target().max_by_value_size()
}
LayoutRepr::LambdaSet(lambda_set) => interner
@ -2739,7 +2739,7 @@ impl<'a> LayoutRepr<'a> {
use LayoutRepr::*;
match self {
Builtin(builtin) => builtin.stack_size(interner.target_info()),
Builtin(builtin) => builtin.stack_size(interner.target()),
Struct(field_layouts) => {
let mut sum = 0;
@ -2754,9 +2754,9 @@ impl<'a> LayoutRepr<'a> {
.get_repr(lambda_set.runtime_representation())
.stack_size_without_alignment(interner),
RecursivePointer(_) | Ptr(_) | FunctionPointer(_) => {
interner.target_info().ptr_width() as u32
interner.target().ptr_width() as u32
}
Erased(e) => e.stack_size_without_alignment(interner.target_info()),
Erased(e) => e.stack_size_without_alignment(interner.target()),
}
}
@ -2801,17 +2801,17 @@ impl<'a> LayoutRepr<'a> {
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => interner.target_info().ptr_width() as u32,
| NonNullableUnwrapped(_) => interner.target().ptr_width() as u32,
}
}
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.alignment_bytes(interner),
Builtin(builtin) => builtin.alignment_bytes(interner.target_info()),
Builtin(builtin) => builtin.alignment_bytes(interner.target()),
RecursivePointer(_) | Ptr(_) | FunctionPointer(_) => {
interner.target_info().ptr_width() as u32
interner.target().ptr_width() as u32
}
Erased(e) => e.alignment_bytes(interner.target_info()),
Erased(e) => e.alignment_bytes(interner.target()),
}
}
@ -2819,7 +2819,7 @@ impl<'a> LayoutRepr<'a> {
where
I: LayoutInterner<'a>,
{
let ptr_width = interner.target_info().ptr_width() as u32;
let ptr_width = interner.target().ptr_width() as u32;
use LayoutRepr::*;
match self {
@ -2834,7 +2834,7 @@ impl<'a> LayoutRepr<'a> {
}
Ptr(inner) => interner.get_repr(*inner).alignment_bytes(interner),
FunctionPointer(_) => ptr_width,
Erased(e) => e.allocation_alignment_bytes(interner.target_info()),
Erased(e) => e.allocation_alignment_bytes(interner.target()),
}
}
@ -2994,15 +2994,15 @@ impl<'a> LayoutRepr<'a> {
pub type SeenRecPtrs<'a> = VecSet<InLayout<'a>>;
impl<'a> Layout<'a> {
pub fn usize(target_info: TargetInfo) -> InLayout<'a> {
match target_info.ptr_width() {
pub fn usize(target: Target) -> InLayout<'a> {
match target.ptr_width() {
roc_target::PtrWidth::Bytes4 => Layout::U32,
roc_target::PtrWidth::Bytes8 => Layout::U64,
}
}
pub fn isize(target_info: TargetInfo) -> InLayout<'a> {
match target_info.ptr_width() {
pub fn isize(target: Target) -> InLayout<'a> {
match target.ptr_width() {
roc_target::PtrWidth::Bytes4 => Layout::I32,
roc_target::PtrWidth::Bytes8 => Layout::I64,
}
@ -3067,10 +3067,10 @@ impl<'a> Builtin<'a> {
pub const WRAPPER_LEN: u32 = 1;
pub const WRAPPER_CAPACITY: u32 = 2;
pub fn stack_size(&self, target_info: TargetInfo) -> u32 {
pub fn stack_size(&self, target: Target) -> u32 {
use Builtin::*;
let ptr_width = target_info.ptr_width() as u32;
let ptr_width = target.ptr_width() as u32;
match self {
Int(int) => int.stack_size(),
@ -3082,20 +3082,20 @@ impl<'a> Builtin<'a> {
}
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn alignment_bytes(&self, target: Target) -> u32 {
use std::mem::align_of;
use Builtin::*;
let ptr_width = target_info.ptr_width() as u32;
let ptr_width = target.ptr_width() as u32;
// for our data structures, what counts is the alignment of the `( ptr, len )` tuple, and
// since both of those are one pointer size, the alignment of that structure is a pointer
// size
match self {
Int(int_width) => int_width.alignment_bytes(target_info),
Float(float_width) => float_width.alignment_bytes(target_info),
Int(int_width) => int_width.alignment_bytes(target),
Float(float_width) => float_width.alignment_bytes(target),
Bool => align_of::<bool>() as u32,
Decimal => IntWidth::I128.alignment_bytes(target_info),
Decimal => IntWidth::I128.alignment_bytes(target),
// we often treat these as i128 (64-bit systems)
// or i64 (32-bit systems).
//
@ -3186,8 +3186,8 @@ impl<'a> Builtin<'a> {
where
I: LayoutInterner<'a>,
{
let target_info = interner.target_info();
let ptr_width = target_info.ptr_width() as u32;
let target = interner.target();
let ptr_width = target.ptr_width() as u32;
let allocation = match self {
Builtin::Str => ptr_width,
@ -3196,10 +3196,10 @@ impl<'a> Builtin<'a> {
e.alignment_bytes(interner).max(ptr_width)
}
// The following are usually not heap-allocated, but they might be when inside a Box.
Builtin::Int(int_width) => int_width.alignment_bytes(target_info).max(ptr_width),
Builtin::Float(float_width) => float_width.alignment_bytes(target_info).max(ptr_width),
Builtin::Int(int_width) => int_width.alignment_bytes(target).max(ptr_width),
Builtin::Float(float_width) => float_width.alignment_bytes(target).max(ptr_width),
Builtin::Bool => (core::mem::align_of::<bool>() as u32).max(ptr_width),
Builtin::Decimal => IntWidth::I128.alignment_bytes(target_info).max(ptr_width),
Builtin::Decimal => IntWidth::I128.alignment_bytes(target).max(ptr_width),
};
allocation.max(ptr_width)
@ -4788,7 +4788,7 @@ mod test {
#[test]
fn width_and_alignment_union_empty_struct() {
let mut interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let mut interner = STLayoutInterner::with_capacity(4, Target::LinuxX64);
let lambda_set = LambdaSet {
args: &(&[] as &[InLayout]),
@ -4813,7 +4813,7 @@ mod test {
#[test]
fn memcpy_size_result_u32_unit() {
let mut interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let mut interner = STLayoutInterner::with_capacity(4, Target::LinuxX64);
let ok_tag = &[interner.insert(Layout {
repr: LayoutRepr::Builtin(Builtin::Int(IntWidth::U32)).direct(),
@ -4829,13 +4829,13 @@ mod test {
#[test]
fn void_stack_size() {
let interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let interner = STLayoutInterner::with_capacity(4, Target::LinuxX64);
assert_eq!(Layout::VOID_NAKED.repr(&interner).stack_size(&interner), 0);
}
#[test]
fn align_u128_in_tag_union() {
let interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let interner = STLayoutInterner::with_capacity(4, Target::LinuxX64);
assert_eq!(interner.alignment_bytes(Layout::U128), 16);
}
}

View file

@ -1,4 +1,4 @@
use roc_target::TargetInfo;
use roc_target::Target;
use super::{InLayout, LayoutRepr, UnionLayout};
@ -27,16 +27,16 @@ impl Erased {
false
}
pub fn stack_size_without_alignment(&self, target_info: TargetInfo) -> u32 {
(target_info.ptr_width() as u32) * 3
pub fn stack_size_without_alignment(&self, target: Target) -> u32 {
(target.ptr_width() as u32) * 3
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
target_info.ptr_width() as u32
pub fn alignment_bytes(&self, target: Target) -> u32 {
target.ptr_width() as u32
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
target_info.ptr_width() as u32
pub fn allocation_alignment_bytes(&self, target: Target) -> u32 {
target.ptr_width() as u32
}
pub fn is_refcounted(&self) -> bool {

View file

@ -10,7 +10,7 @@ use parking_lot::{Mutex, RwLock};
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_collections::{default_hasher, BumpMap};
use roc_module::symbol::Symbol;
use roc_target::TargetInfo;
use roc_target::Target;
use crate::layout::LayoutRepr;
@ -208,7 +208,7 @@ pub trait LayoutInterner<'a>: Sized {
self.get_repr(a) == self.get_repr(b)
}
fn target_info(&self) -> TargetInfo;
fn target(&self) -> Target;
fn alignment_bytes(&self, layout: InLayout<'a>) -> u32 {
self.get_repr(layout).alignment_bytes(self)
@ -545,7 +545,7 @@ struct GlobalLayoutInternerInner<'a> {
map: Mutex<BumpMap<Layout<'a>, InLayout<'a>>>,
normalized_lambda_set_map: Mutex<BumpMap<LambdaSet<'a>, LambdaSet<'a>>>,
vec: RwLock<Vec<Layout<'a>>>,
target_info: TargetInfo,
target: Target,
}
/// A derivative of a [GlobalLayoutInterner] interner that provides caching desirable for
@ -564,7 +564,7 @@ pub struct TLLayoutInterner<'a> {
normalized_lambda_set_map: BumpMap<LambdaSet<'a>, LambdaSet<'a>>,
/// Cache of interned values from the parent for local access.
vec: RefCell<Vec<Option<Layout<'a>>>>,
target_info: TargetInfo,
target: Target,
}
/// A single-threaded interner, with no concurrency properties.
@ -576,7 +576,7 @@ pub struct STLayoutInterner<'a> {
map: BumpMap<Layout<'a>, InLayout<'a>>,
normalized_lambda_set_map: BumpMap<LambdaSet<'a>, LambdaSet<'a>>,
vec: Vec<Layout<'a>>,
target_info: TargetInfo,
target: Target,
}
/// Interner constructed with an exclusive lock over [GlobalLayoutInterner]
@ -584,7 +584,7 @@ struct LockedGlobalInterner<'a, 'r> {
map: &'r mut BumpMap<Layout<'a>, InLayout<'a>>,
normalized_lambda_set_map: &'r mut BumpMap<LambdaSet<'a>, LambdaSet<'a>>,
vec: &'r mut Vec<Layout<'a>>,
target_info: TargetInfo,
target: Target,
}
/// Generic hasher for a value, to be used by all interners.
@ -614,8 +614,8 @@ fn make_normalized_lamdba_set<'a>(
impl<'a> GlobalLayoutInterner<'a> {
/// Creates a new global interner with the given capacity.
pub fn with_capacity(cap: usize, target_info: TargetInfo) -> Self {
STLayoutInterner::with_capacity(cap, target_info).into_global()
pub fn with_capacity(cap: usize, target: Target) -> Self {
STLayoutInterner::with_capacity(cap, target).into_global()
}
/// Creates a derivative [TLLayoutInterner] pointing back to this global interner.
@ -625,7 +625,7 @@ impl<'a> GlobalLayoutInterner<'a> {
map: Default::default(),
normalized_lambda_set_map: Default::default(),
vec: Default::default(),
target_info: self.0.target_info,
target: self.0.target,
}
}
@ -637,7 +637,7 @@ impl<'a> GlobalLayoutInterner<'a> {
map,
normalized_lambda_set_map,
vec,
target_info,
target,
} = match Arc::try_unwrap(self.0) {
Ok(inner) => inner,
Err(li) => return Err(Self(li)),
@ -649,7 +649,7 @@ impl<'a> GlobalLayoutInterner<'a> {
map,
normalized_lambda_set_map,
vec,
target_info,
target,
})
}
@ -703,7 +703,7 @@ impl<'a> GlobalLayoutInterner<'a> {
map: &mut map,
normalized_lambda_set_map: &mut normalized_lambda_set_map,
vec: &mut vec,
target_info: self.0.target_info,
target: self.0.target,
};
reify::reify_lambda_set_captures(arena, &mut interner, slot, normalized.set)
} else {
@ -765,7 +765,7 @@ impl<'a> GlobalLayoutInterner<'a> {
map: &mut map,
normalized_lambda_set_map: &mut normalized_lambda_set_map,
vec: &mut vec,
target_info: self.0.target_info,
target: self.0.target,
};
let full_layout = reify::reify_recursive_layout(arena, &mut interner, slot, normalized);
@ -927,19 +927,19 @@ impl<'a> LayoutInterner<'a> for TLLayoutInterner<'a> {
value
}
fn target_info(&self) -> TargetInfo {
self.target_info
fn target(&self) -> Target {
self.target
}
}
impl<'a> STLayoutInterner<'a> {
/// Creates a new single threaded interner with the given capacity.
pub fn with_capacity(cap: usize, target_info: TargetInfo) -> Self {
pub fn with_capacity(cap: usize, target: Target) -> Self {
let mut interner = Self {
map: BumpMap::with_capacity_and_hasher(cap, default_hasher()),
normalized_lambda_set_map: BumpMap::with_capacity_and_hasher(cap, default_hasher()),
vec: Vec::with_capacity(cap),
target_info,
target,
};
fill_reserved_layouts(&mut interner);
interner
@ -954,13 +954,13 @@ impl<'a> STLayoutInterner<'a> {
map,
normalized_lambda_set_map,
vec,
target_info,
target,
} = self;
GlobalLayoutInterner(Arc::new(GlobalLayoutInternerInner {
map: Mutex::new(map),
normalized_lambda_set_map: Mutex::new(normalized_lambda_set_map),
vec: RwLock::new(vec),
target_info,
target,
}))
}
@ -1072,8 +1072,8 @@ macro_rules! st_impl {
self.vec[index]
}
fn target_info(&self) -> TargetInfo {
self.target_info
fn target(&self) -> Target{
self.target
}
}
};
@ -1758,13 +1758,13 @@ pub mod dbg_stable {
mod insert_lambda_set {
use bumpalo::Bump;
use roc_module::symbol::Symbol;
use roc_target::TargetInfo;
use roc_target::Target;
use crate::layout::{LambdaSet, Layout, LayoutRepr, SemanticRepr};
use super::{GlobalLayoutInterner, InLayout, LayoutInterner, NeedsRecursionPointerFixup};
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
const TARGET: Target = Target::LinuxX64;
const TEST_SET: &&[(Symbol, &[InLayout])] =
&(&[(Symbol::ATTR_ATTR, &[Layout::UNIT] as &[_])] as &[_]);
const TEST_ARGS: &&[InLayout] = &(&[Layout::UNIT] as &[_]);
@ -1776,7 +1776,7 @@ mod insert_lambda_set {
fn two_threads_write() {
for _ in 0..100 {
let mut arenas: Vec<_> = std::iter::repeat_with(Bump::new).take(10).collect();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let set = TEST_SET;
let repr = Layout::UNIT;
std::thread::scope(|s| {
@ -1797,7 +1797,7 @@ mod insert_lambda_set {
#[test]
fn insert_then_reintern() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut interner = global.fork();
let lambda_set =
@ -1812,7 +1812,7 @@ mod insert_lambda_set {
#[test]
fn write_global_then_single_threaded() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let set = TEST_SET;
let repr = Layout::UNIT;
@ -1832,7 +1832,7 @@ mod insert_lambda_set {
#[test]
fn write_single_threaded_then_global() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut st_interner = global.unwrap().unwrap();
let set = TEST_SET;
@ -1852,13 +1852,13 @@ mod insert_lambda_set {
#[cfg(test)]
mod insert_recursive_layout {
use bumpalo::Bump;
use roc_target::TargetInfo;
use roc_target::Target;
use crate::layout::{Builtin, InLayout, Layout, LayoutRepr, SemanticRepr, UnionLayout};
use super::{GlobalLayoutInterner, LayoutInterner};
const TARGET_INFO: TargetInfo = TargetInfo::default_x86_64();
const TARGET: Target = Target::LinuxX64;
fn make_layout<'a>(arena: &'a Bump, interner: &mut impl LayoutInterner<'a>) -> Layout<'a> {
let list_rec = Layout {
@ -1905,7 +1905,7 @@ mod insert_recursive_layout {
#[test]
fn write_two_threads() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let layout = {
let mut interner = global.fork();
make_layout(arena, &mut interner)
@ -1927,7 +1927,7 @@ mod insert_recursive_layout {
#[test]
fn write_twice_thread_local_single_thread() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut interner = global.fork();
let layout = make_layout(arena, &mut interner);
@ -1943,7 +1943,7 @@ mod insert_recursive_layout {
#[test]
fn write_twice_single_thread() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut interner = GlobalLayoutInterner::unwrap(global).unwrap();
let layout = make_layout(arena, &mut interner);
@ -1960,7 +1960,7 @@ mod insert_recursive_layout {
fn many_threads_read_write() {
for _ in 0..100 {
let mut arenas: Vec<_> = std::iter::repeat_with(Bump::new).take(10).collect();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
std::thread::scope(|s| {
let mut handles = Vec::with_capacity(10);
for arena in arenas.iter_mut() {
@ -1983,7 +1983,7 @@ mod insert_recursive_layout {
#[test]
fn insert_then_reintern() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut interner = global.fork();
let layout = make_layout(arena, &mut interner);
@ -1996,7 +1996,7 @@ mod insert_recursive_layout {
#[test]
fn write_global_then_single_threaded() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let layout = {
let mut interner = global.fork();
make_layout(arena, &mut interner)
@ -2018,7 +2018,7 @@ mod insert_recursive_layout {
#[test]
fn write_single_threaded_then_global() {
let arena = &Bump::new();
let global = GlobalLayoutInterner::with_capacity(2, TARGET_INFO);
let global = GlobalLayoutInterner::with_capacity(2, TARGET);
let mut st_interner = global.unwrap().unwrap();
let layout = make_layout(arena, &mut st_interner);

View file

@ -20,7 +20,7 @@ use bumpalo::collections::CollectIn;
use roc_collections::{MutMap, MutSet};
use roc_module::low_level::LowLevel;
use roc_module::symbol::{IdentIds, ModuleId, Symbol};
use roc_target::TargetInfo;
use roc_target::Target;
/**
Insert reset and reuse operations into the IR.
@ -30,7 +30,7 @@ pub fn insert_reset_reuse_operations<'a, 'i>(
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
home: ModuleId,
target_info: TargetInfo,
target: Target,
ident_ids: &'i mut IdentIds,
update_mode_ids: &'i mut UpdateModeIds,
procs: &mut MutMap<(Symbol, ProcLayout<'a>), Proc<'a>>,
@ -44,7 +44,7 @@ pub fn insert_reset_reuse_operations<'a, 'i>(
let new_proc = insert_reset_reuse_operations_proc(
arena,
layout_interner,
target_info,
target,
home,
ident_ids,
update_mode_ids,
@ -58,7 +58,7 @@ pub fn insert_reset_reuse_operations<'a, 'i>(
fn insert_reset_reuse_operations_proc<'a, 'i>(
arena: &'a Bump,
layout_interner: &'i STLayoutInterner<'a>,
target_info: TargetInfo,
target: Target,
home: ModuleId,
ident_ids: &'i mut IdentIds,
update_mode_ids: &'i mut UpdateModeIds,
@ -70,7 +70,7 @@ fn insert_reset_reuse_operations_proc<'a, 'i>(
}
let mut env = ReuseEnvironment {
target_info,
target,
symbol_tags: MutMap::default(),
non_unique_symbols: MutSet::default(),
reuse_tokens: MutMap::default(),
@ -464,7 +464,7 @@ fn insert_reset_reuse_operations_stmt<'a, 'i>(
),
ModifyRc::Free(_) => {
if union_layout
.stores_tag_id_in_pointer(environment.target_info)
.stores_tag_id_in_pointer(environment.target)
{
(
Symbol::new(home, ident_ids.gen_unique()),
@ -761,7 +761,7 @@ fn insert_reset_reuse_operations_stmt<'a, 'i>(
// Create a new environment for the body. With everything but the jump reuse tokens. As those should be given by the jump.
let mut first_pass_body_environment = ReuseEnvironment {
target_info: environment.target_info,
target: environment.target,
symbol_tags: environment.symbol_tags.clone(),
non_unique_symbols: environment.non_unique_symbols.clone(),
reuse_tokens: max_reuse_token_symbols.clone(),
@ -924,7 +924,7 @@ fn insert_reset_reuse_operations_stmt<'a, 'i>(
let (second_pass_body_environment, second_pass_body) = {
// Create a new environment for the body. With everything but the jump reuse tokens. As those should be given by the jump.
let mut body_environment = ReuseEnvironment {
target_info: environment.target_info,
target: environment.target,
symbol_tags: environment.symbol_tags.clone(),
non_unique_symbols: environment.non_unique_symbols.clone(),
reuse_tokens: used_reuse_tokens.clone(),
@ -1182,7 +1182,7 @@ enum JoinPointReuseTokens<'a> {
#[derive(Clone)]
struct ReuseEnvironment<'a> {
target_info: TargetInfo,
target: Target,
symbol_tags: MutMap<Symbol, Tag>,
non_unique_symbols: MutSet<Symbol>,
reuse_tokens: ReuseTokens<'a>,