Get rid of passing target_info a bunch

This commit is contained in:
Ayaz Hafiz 2023-06-16 21:38:30 -05:00
parent 8495f3b085
commit bc41db2a34
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
18 changed files with 154 additions and 287 deletions

View file

@ -3065,9 +3065,7 @@ impl<
let target_info = self.storage_manager.target_info;
if union_layout.stores_tag_id_as_data(target_info) {
let offset = union_layout
.tag_id_offset(self.interner(), target_info)
.unwrap() as i32;
let offset = union_layout.tag_id_offset(self.interner()).unwrap() as i32;
let ptr_reg = self
.storage_manager
@ -3111,13 +3109,10 @@ impl<
tag_id: TagIdIntType,
reuse: Option<Symbol>,
) {
let target_info = self.storage_manager.target_info;
let layout_interner: &mut STLayoutInterner<'a> = self.layout_interner;
let buf: &mut Vec<'a, u8> = &mut self.buf;
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, target_info);
let (data_size, data_alignment) = union_layout.data_size_and_alignment(layout_interner);
match union_layout {
UnionLayout::NonRecursive(field_layouts) => {

View file

@ -633,8 +633,7 @@ impl<
let (union_offset, _) = self.stack_offset_and_size(structure);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(layout_interner, self.target_info);
let (data_size, data_alignment) = union_layout.data_size_and_alignment(layout_interner);
let id_offset = data_size - data_alignment;
let discriminant = union_layout.discriminant();

View file

@ -1656,8 +1656,7 @@ fn build_tag<'a, 'ctx>(
let data_layout_repr = LayoutRepr::Struct(tags[tag_id as usize]);
let data = RocStruct::build(env, layout_interner, data_layout_repr, scope, arguments);
let roc_union =
RocUnion::tagged_from_slices(layout_interner, env.context, tags, env.target_info);
let roc_union = RocUnion::tagged_from_slices(layout_interner, env.context, tags);
let tag_alloca = env
.builder
@ -1765,12 +1764,8 @@ fn build_tag<'a, 'ctx>(
nullable_id,
other_fields,
} => {
let roc_union = RocUnion::untagged_from_slices(
layout_interner,
env.context,
&[other_fields],
env.target_info,
);
let roc_union =
RocUnion::untagged_from_slices(layout_interner, env.context, &[other_fields]);
if tag_id == *nullable_id as _ {
let output_type = roc_union.struct_type().ptr_type(AddressSpace::default());
@ -2127,9 +2122,9 @@ fn reserve_with_refcount_union_as_block_of_memory<'a, 'ctx>(
let ptr_bytes = env.target_info;
let roc_union = if union_layout.stores_tag_id_as_data(ptr_bytes) {
RocUnion::tagged_from_slices(layout_interner, env.context, fields, env.target_info)
RocUnion::tagged_from_slices(layout_interner, env.context, fields)
} else {
RocUnion::untagged_from_slices(layout_interner, env.context, fields, env.target_info)
RocUnion::untagged_from_slices(layout_interner, env.context, fields)
};
reserve_with_refcount_help(
@ -2709,8 +2704,7 @@ pub(crate) fn build_exp_stmt<'a, 'ctx>(
LayoutRepr::Builtin(Builtin::List(element_layout)) => {
debug_assert!(value.is_struct_value());
let element_layout = layout_interner.get_repr(element_layout);
let alignment =
element_layout.alignment_bytes(layout_interner, env.target_info);
let alignment = element_layout.alignment_bytes(layout_interner);
build_list::decref(env, value.into_struct_value(), alignment);
}

View file

@ -16,7 +16,7 @@ use super::struct_::RocStruct;
pub fn basic_type_from_layout<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout_interner: &'env STLayoutInterner<'a>,
layout_interner: &STLayoutInterner<'a>,
layout: LayoutRepr<'_>,
) -> BasicTypeEnum<'ctx> {
use LayoutRepr::*;
@ -81,32 +81,25 @@ pub fn struct_type_from_union_layout<'a, 'ctx>(
match union_layout {
NonRecursive([]) => env.context.struct_type(&[], false),
NonRecursive(tags) => {
RocUnion::tagged_from_slices(layout_interner, env.context, tags, env.target_info)
.struct_type()
RocUnion::tagged_from_slices(layout_interner, env.context, tags).struct_type()
}
Recursive(tags)
| NullableWrapped {
other_tags: tags, ..
} => {
if union_layout.stores_tag_id_as_data(env.target_info) {
RocUnion::tagged_from_slices(layout_interner, env.context, tags, env.target_info)
.struct_type()
RocUnion::tagged_from_slices(layout_interner, env.context, tags).struct_type()
} else {
RocUnion::untagged_from_slices(layout_interner, env.context, tags, env.target_info)
.struct_type()
RocUnion::untagged_from_slices(layout_interner, env.context, tags).struct_type()
}
}
NullableUnwrapped { other_fields, .. } => RocUnion::untagged_from_slices(
layout_interner,
env.context,
&[other_fields],
env.target_info,
)
.struct_type(),
NonNullableUnwrapped(fields) => {
RocUnion::untagged_from_slices(layout_interner, env.context, &[fields], env.target_info)
NullableUnwrapped { other_fields, .. } => {
RocUnion::untagged_from_slices(layout_interner, env.context, &[other_fields])
.struct_type()
}
NonNullableUnwrapped(fields) => {
RocUnion::untagged_from_slices(layout_interner, env.context, &[fields]).struct_type()
}
}
}
@ -289,7 +282,6 @@ impl<'ctx> RocUnion<'ctx> {
fn new(
context: &'ctx Context,
_target_info: TargetInfo,
data_align: u32,
data_width: u32,
tag_type: Option<TagType>,
@ -353,7 +345,6 @@ impl<'ctx> RocUnion<'ctx> {
interner: &STLayoutInterner,
context: &'ctx Context,
layouts: &[&[InLayout<'_>]],
target_info: TargetInfo,
) -> Self {
let tag_type = match layouts.len() {
0 => unreachable!("zero-element tag union is not represented as a RocUnion"),
@ -361,22 +352,19 @@ impl<'ctx> RocUnion<'ctx> {
_ => TagType::I16,
};
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
let (data_width, data_align) = Layout::stack_size_and_alignment_slices(interner, layouts);
Self::new(context, target_info, data_align, data_width, Some(tag_type))
Self::new(context, data_align, data_width, Some(tag_type))
}
pub fn untagged_from_slices(
interner: &STLayoutInterner,
context: &'ctx Context,
layouts: &[&[InLayout<'_>]],
target_info: TargetInfo,
) -> Self {
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
let (data_width, data_align) = Layout::stack_size_and_alignment_slices(interner, layouts);
Self::new(context, target_info, data_align, data_width, None)
Self::new(context, data_align, data_width, None)
}
pub fn data_width(&self) -> u32 {

View file

@ -679,12 +679,7 @@ fn build_clone_tag_help<'a, 'ctx>(
let block = env.context.append_basic_block(parent, "tag_id_modify");
env.builder.position_at_end(block);
let roc_union = RocUnion::tagged_from_slices(
layout_interner,
env.context,
tags,
env.target_info,
);
let roc_union = RocUnion::tagged_from_slices(layout_interner, env.context, tags);
// load the tag payload (if any)
let payload_layout = LayoutRepr::struct_(field_layouts);
@ -760,8 +755,7 @@ fn build_clone_tag_help<'a, 'ctx>(
basic_type_from_layout(env, layout_interner, layout_interner.get_repr(layout));
let data = load_tag_data(env, layout_interner, union_layout, tag_value, basic_type);
let (width, _) =
union_layout.data_size_and_alignment(layout_interner, env.target_info);
let (width, _) = union_layout.data_size_and_alignment(layout_interner);
let cursors = Cursors {
offset: extra_offset,
@ -801,7 +795,7 @@ fn build_clone_tag_help<'a, 'ctx>(
let basic_type =
basic_type_from_layout(env, layout_interner, layout_interner.get_repr(layout));
let (width, _) = union_layout.data_size_and_alignment(layout_interner, env.target_info);
let (width, _) = union_layout.data_size_and_alignment(layout_interner);
let cursors = Cursors {
offset: extra_offset,
@ -862,8 +856,7 @@ fn build_clone_tag_help<'a, 'ctx>(
layout_interner.get_repr(layout),
);
let (width, _) =
union_layout.data_size_and_alignment(layout_interner, env.target_info);
let (width, _) = union_layout.data_size_and_alignment(layout_interner);
let cursors = Cursors {
offset: extra_offset,

View file

@ -805,9 +805,7 @@ fn modify_refcount_str_help<'a, 'ctx>(
let parent = fn_val;
let str_type = zig_str_type(env);
let str_wrapper = if LayoutRepr::Builtin(Builtin::Str)
.is_passed_by_reference(layout_interner, env.target_info)
{
let str_wrapper = if LayoutRepr::Builtin(Builtin::Str).is_passed_by_reference(layout_interner) {
env.builder
.new_build_load(str_type, arg_val.into_pointer_value(), "load_str_to_stack")
} else {

View file

@ -55,7 +55,7 @@ impl<'ctx> RocStruct<'ctx> {
struct_val,
} = build_struct_helper(env, layout_interner, scope, sorted_fields);
let passed_by_ref = layout_repr.is_passed_by_reference(layout_interner, env.target_info);
let passed_by_ref = layout_repr.is_passed_by_reference(layout_interner);
if passed_by_ref {
let alloca = env.builder.build_alloca(struct_type, "struct_alloca");

View file

@ -1636,7 +1636,7 @@ impl<'a, 'r> WasmBackend<'a, 'r> {
let stores_tag_id_as_data = union_layout.stores_tag_id_as_data(TARGET_INFO);
let stores_tag_id_in_pointer = union_layout.stores_tag_id_in_pointer(TARGET_INFO);
let (data_size, data_alignment) =
union_layout.data_size_and_alignment(self.layout_interner, TARGET_INFO);
union_layout.data_size_and_alignment(self.layout_interner);
// We're going to use the pointer many times, so put it in a local variable
let stored_with_local =
@ -1688,10 +1688,7 @@ impl<'a, 'r> WasmBackend<'a, 'r> {
// Store the tag ID (if any)
if stores_tag_id_as_data {
let id_offset = data_offset
+ union_layout
.tag_id_offset(self.layout_interner, TARGET_INFO)
.unwrap();
let id_offset = data_offset + union_layout.tag_id_offset(self.layout_interner).unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
let id_align = Align::from(id_align);
@ -1774,9 +1771,7 @@ impl<'a, 'r> WasmBackend<'a, 'r> {
};
if union_layout.stores_tag_id_as_data(TARGET_INFO) {
let id_offset = union_layout
.tag_id_offset(self.layout_interner, TARGET_INFO)
.unwrap();
let id_offset = union_layout.tag_id_offset(self.layout_interner).unwrap();
let id_align = union_layout.discriminant().alignment_bytes();
let id_align = Align::from(id_align);

View file

@ -12,7 +12,7 @@ use roc_mono::low_level::HigherOrder;
use crate::backend::{ProcLookupData, ProcSource, WasmBackend};
use crate::layout::{CallConv, StackMemoryFormat, WasmLayout};
use crate::storage::{AddressValue, StackMemoryLocation, StoredValue};
use crate::{PTR_TYPE, TARGET_INFO};
use crate::PTR_TYPE;
use roc_wasm_module::{Align, LocalId, ValueType};
/// Number types used for Wasm code gen
@ -398,14 +398,13 @@ impl<'a> LowLevelCall<'a> {
{
let list_offset = 0;
let elem_offset = LayoutRepr::Builtin(Builtin::List(list_elem))
.stack_size(backend.layout_interner, TARGET_INFO);
.stack_size(backend.layout_interner);
(list_offset, elem_offset, f2)
}
(_, LayoutRepr::Builtin(Builtin::List(list_elem)))
if l1 == backend.layout_interner.get_repr(list_elem) =>
{
let list_offset =
l1.stack_size(backend.layout_interner, TARGET_INFO);
let list_offset = l1.stack_size(backend.layout_interner);
let elem_offset = 0;
(list_offset, elem_offset, f1)
}
@ -468,7 +467,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout_raw);
let elem_layout = backend.layout_interner.get_repr(elem_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner);
// Zig arguments Wasm types
// (return pointer) i32
@ -507,7 +506,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout_raw);
let elem_layout = backend.layout_interner.get_repr(elem_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner);
backend.code_builder.i32_const(elem_align as i32);
backend.code_builder.i32_const(elem_width as i32);
@ -523,7 +522,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout_raw);
let elem_layout = backend.layout_interner.get_repr(elem_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner);
// Zig arguments Wasm types
// (return pointer) i32
@ -564,7 +563,7 @@ impl<'a> LowLevelCall<'a> {
let elem_layout = unwrap_list_elem_layout(self.ret_layout_raw);
let elem_layout = backend.layout_interner.get_repr(elem_layout);
let (elem_width, elem_align) =
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner);
// Zig arguments Wasm types
// (return pointer) i32
@ -1692,10 +1691,8 @@ impl<'a> LowLevelCall<'a> {
// In most languages this operation is for signed numbers, but Roc defines it on all integers.
// So the argument is implicitly converted to signed before the shift operator.
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout_raw
.stack_size(backend.layout_interner, TARGET_INFO)
as i32;
let bit_width =
8 * self.ret_layout_raw.stack_size(backend.layout_interner) as i32;
if bit_width < 32 && !symbol_is_signed_int(backend, num) {
// Sign-extend the number by shifting left and right again
backend
@ -1741,9 +1738,7 @@ impl<'a> LowLevelCall<'a> {
// In most languages this operation is for unsigned numbers, but Roc defines it on all integers.
// So the argument is implicitly converted to unsigned before the shift operator.
// We need to make that conversion explicit for i8 and i16, which use Wasm's i32 type.
let bit_width = 8 * self
.ret_layout_raw
.stack_size(backend.layout_interner, TARGET_INFO);
let bit_width = 8 * self.ret_layout_raw.stack_size(backend.layout_interner);
if bit_width < 32 && symbol_is_signed_int(backend, num) {
let mask = (1 << bit_width) - 1;
@ -2561,7 +2556,7 @@ pub fn call_higher_order_lowlevel<'a>(
);
let elem_layout = backend.layout_interner.get_repr(elem_layout);
let (element_width, alignment) =
elem_layout.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_layout.stack_size_and_alignment(backend.layout_interner);
let cb = &mut backend.code_builder;
@ -2630,7 +2625,7 @@ fn list_map_n<'a>(
let elem_ret = unwrap_list_elem_layout(backend.layout_interner.get_repr(return_layout));
let elem_ret = backend.layout_interner.get_repr(elem_ret);
let (elem_ret_size, elem_ret_align) =
elem_ret.stack_size_and_alignment(backend.layout_interner, TARGET_INFO);
elem_ret.stack_size_and_alignment(backend.layout_interner);
let cb = &mut backend.code_builder;

View file

@ -710,7 +710,7 @@ fn eq_list<'a>(
let size_expr = Expr::Literal(Literal::Int(
(layout_interner
.get_repr(elem_layout)
.stack_size(layout_interner, root.target_info) as i128)
.stack_size(layout_interner) as i128)
.to_ne_bytes(),
));
let size_stmt = |next| Stmt::Let(size, size_expr, layout_isize, next);

View file

@ -1687,8 +1687,7 @@ fn refcount_union_rec<'a>(
};
let rc_structure_stmt = {
let alignment = LayoutRepr::Union(union_layout)
.allocation_alignment_bytes(layout_interner, root.target_info);
let alignment = LayoutRepr::Union(union_layout).allocation_alignment_bytes(layout_interner);
let ret_stmt = rc_return_stmt(root, ident_ids, ctx);
modify_refcount(

View file

@ -3505,15 +3505,13 @@ fn specialize_proc_help<'a>(
env.arena,
);
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout_cache
.get_repr(**layout1)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
let size2 = layout_cache
.get_repr(**layout2)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
size2.cmp(&size1)
});
@ -3553,15 +3551,13 @@ fn specialize_proc_help<'a>(
env.arena,
);
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout_cache
.get_repr(**layout1)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
let size2 = layout_cache
.get_repr(**layout2)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
size2.cmp(&size1)
});
@ -5986,15 +5982,13 @@ where
combined.push((*symbol, layout))
}
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout_cache
.get_repr(**layout1)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
let size2 = layout_cache
.get_repr(**layout2)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
size2.cmp(&size1)
});
@ -6020,15 +6014,13 @@ where
combined.push((*symbol, layout))
}
let ptr_bytes = env.target_info;
combined.sort_by(|(_, layout1), (_, layout2)| {
let size1 = layout_cache
.get_repr(**layout1)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
let size2 = layout_cache
.get_repr(**layout2)
.alignment_bytes(&layout_cache.interner, ptr_bytes);
.alignment_bytes(&layout_cache.interner);
size2.cmp(&size1)
});
@ -6513,7 +6505,7 @@ fn sorted_field_symbols<'a>(
let alignment = layout_cache
.get_repr(layout)
.alignment_bytes(&layout_cache.interner, env.target_info);
.alignment_bytes(&layout_cache.interner);
let symbol = possible_reuse_symbol_or_specialize(env, procs, layout_cache, &arg.value, var);
field_symbols_temp.push((alignment, symbol, ((var, arg), &*env.arena.alloc(symbol))));

View file

@ -972,55 +972,46 @@ impl<'a> UnionLayout<'a> {
}
}
fn tags_alignment_bytes<I>(
interner: &I,
tags: &[&'a [InLayout<'a>]],
target_info: TargetInfo,
) -> u32
fn tags_alignment_bytes<I>(interner: &I, tags: &[&'a [InLayout<'a>]]) -> u32
where
I: LayoutInterner<'a>,
{
tags.iter()
.map(|field_layouts| {
LayoutRepr::struct_(field_layouts).alignment_bytes(interner, target_info)
})
.map(|field_layouts| LayoutRepr::struct_(field_layouts).alignment_bytes(interner))
.max()
.unwrap_or(0)
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let allocation = match self {
UnionLayout::NonRecursive(tags) => {
Self::tags_alignment_bytes(interner, tags, target_info)
}
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags, target_info),
UnionLayout::NonRecursive(tags) => Self::tags_alignment_bytes(interner, tags),
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags),
UnionLayout::NonNullableUnwrapped(field_layouts) => {
LayoutRepr::struct_(field_layouts).alignment_bytes(interner, target_info)
LayoutRepr::struct_(field_layouts).alignment_bytes(interner)
}
UnionLayout::NullableWrapped { other_tags, .. } => {
Self::tags_alignment_bytes(interner, other_tags, target_info)
Self::tags_alignment_bytes(interner, other_tags)
}
UnionLayout::NullableUnwrapped { other_fields, .. } => {
LayoutRepr::struct_(other_fields).alignment_bytes(interner, target_info)
LayoutRepr::struct_(other_fields).alignment_bytes(interner)
}
};
// because we store a refcount, the alignment must be at least the size of a pointer
allocation.max(target_info.ptr_width() as u32)
allocation.max(interner.target_info().ptr_width() as u32)
}
/// Size of the data in memory, whether it's stack or heap (for non-null tag ids)
pub fn data_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
pub fn data_size_and_alignment<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
let (data_width, data_align) =
self.data_size_and_alignment_help_match(interner, target_info);
let (data_width, data_align) = self.data_size_and_alignment_help_match(interner);
if self.stores_tag_id_as_data(target_info) {
if self.stores_tag_id_as_data(interner.target_info()) {
use Discriminant::*;
match self.discriminant() {
U0 => (round_up_to_alignment(data_width, data_align), data_align),
@ -1046,48 +1037,37 @@ impl<'a> UnionLayout<'a> {
/// Size of the data before the tag_id, if it exists.
/// Returns None if the tag_id is not stored as data in the layout.
pub fn data_size_without_tag_id<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
pub fn data_size_without_tag_id<I>(&self, interner: &I) -> Option<u32>
where
I: LayoutInterner<'a>,
{
if !self.stores_tag_id_as_data(target_info) {
if !self.stores_tag_id_as_data(interner.target_info()) {
return None;
};
Some(
self.data_size_and_alignment_help_match(interner, target_info)
.0,
)
Some(self.data_size_and_alignment_help_match(interner).0)
}
fn data_size_and_alignment_help_match<I>(
&self,
interner: &I,
target_info: TargetInfo,
) -> (u32, u32)
fn data_size_and_alignment_help_match<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
match self {
Self::NonRecursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::Recursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::NonRecursive(tags) => Layout::stack_size_and_alignment_slices(interner, tags),
Self::Recursive(tags) => Layout::stack_size_and_alignment_slices(interner, tags),
Self::NonNullableUnwrapped(fields) => {
Layout::stack_size_and_alignment_slices(interner, &[fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[fields])
}
Self::NullableWrapped { other_tags, .. } => {
Layout::stack_size_and_alignment_slices(interner, other_tags, target_info)
Layout::stack_size_and_alignment_slices(interner, other_tags)
}
Self::NullableUnwrapped { other_fields, .. } => {
Layout::stack_size_and_alignment_slices(interner, &[other_fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[other_fields])
}
}
}
pub fn tag_id_offset<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
pub fn tag_id_offset<I>(&self, interner: &I) -> Option<u32>
where
I: LayoutInterner<'a>,
{
@ -1096,39 +1076,34 @@ impl<'a> UnionLayout<'a> {
| UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => Some(Self::tag_id_offset_help(interner, tags, target_info)),
} => Some(Self::tag_id_offset_help(interner, tags)),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => None,
}
}
fn tag_id_offset_help<I>(
interner: &I,
layouts: &[&[InLayout<'a>]],
target_info: TargetInfo,
) -> u32
fn tag_id_offset_help<I>(interner: &I, layouts: &[&[InLayout<'a>]]) -> u32
where
I: LayoutInterner<'a>,
{
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
let (data_width, data_align) = Layout::stack_size_and_alignment_slices(interner, layouts);
round_up_to_alignment(data_width, data_align)
}
/// Very important to use this when doing a memcpy!
fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
fn stack_size_without_alignment<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
match self {
UnionLayout::NonRecursive(_) => {
let (width, align) = self.data_size_and_alignment(interner, target_info);
let (width, align) = self.data_size_and_alignment(interner);
round_up_to_alignment(width, align)
}
UnionLayout::Recursive(_)
| UnionLayout::NonNullableUnwrapped(_)
| UnionLayout::NullableWrapped { .. }
| UnionLayout::NullableUnwrapped { .. } => target_info.ptr_width() as u32,
| UnionLayout::NullableUnwrapped { .. } => interner.target_info().ptr_width() as u32,
}
}
}
@ -1935,13 +1910,11 @@ impl<'a> LambdaSet<'a> {
}
}
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
interner
.get_repr(self.representation)
.stack_size(interner, target_info)
interner.get_repr(self.representation).stack_size(interner)
}
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
@ -1960,13 +1933,13 @@ impl<'a> LambdaSet<'a> {
.safe_to_memcpy(interner)
}
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
interner
.get_repr(self.representation)
.alignment_bytes(interner, target_info)
.alignment_bytes(interner)
}
}
@ -2475,7 +2448,6 @@ impl<'a> Layout<'a> {
pub fn stack_size_and_alignment_slices<I>(
interner: &I,
slices: &[&[InLayout<'a>]],
target_info: TargetInfo,
) -> (u32, u32)
where
I: LayoutInterner<'a>,
@ -2488,7 +2460,7 @@ impl<'a> Layout<'a> {
for layout in tag.iter() {
let (stack_size, alignment) = interner
.get_repr(*layout)
.stack_size_and_alignment(interner, target_info);
.stack_size_and_alignment(interner);
total += stack_size;
data_align = data_align.max(alignment);
}
@ -2597,7 +2569,7 @@ impl<'a> LayoutRepr<'a> {
false // TODO this should use is_zero_sized once doing so doesn't break things!
}
pub fn is_passed_by_reference<I>(&self, interner: &I, target_info: TargetInfo) -> bool
pub fn is_passed_by_reference<I>(&self, interner: &I) -> bool
where
I: LayoutInterner<'a>,
{
@ -2605,7 +2577,7 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Builtin(builtin) => {
use Builtin::*;
match target_info.ptr_width() {
match interner.target_info().ptr_width() {
PtrWidth::Bytes4 => {
// more things fit into a register
false
@ -2619,67 +2591,65 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => true,
LayoutRepr::Struct(_) => {
// TODO: write tests for this!
self.stack_size(interner, target_info) as usize > target_info.max_by_value_size()
self.stack_size(interner) as usize > interner.target_info().max_by_value_size()
}
LayoutRepr::LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.is_passed_by_reference(interner, target_info),
.is_passed_by_reference(interner),
_ => false,
}
}
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let width = self.stack_size_without_alignment(interner);
let alignment = self.alignment_bytes(interner);
round_up_to_alignment(width, alignment)
}
pub fn stack_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
pub fn stack_size_and_alignment<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let width = self.stack_size_without_alignment(interner);
let alignment = self.alignment_bytes(interner);
let size = round_up_to_alignment(width, alignment);
(size, alignment)
}
/// Very important to use this when doing a memcpy!
pub fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size_without_alignment<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
use LayoutRepr::*;
match self {
Builtin(builtin) => builtin.stack_size(target_info),
Builtin(builtin) => builtin.stack_size(interner.target_info()),
Struct(field_layouts) => {
let mut sum = 0;
for field_layout in *field_layouts {
sum += interner
.get_repr(*field_layout)
.stack_size(interner, target_info);
sum += interner.get_repr(*field_layout).stack_size(interner);
}
sum
}
Union(variant) => variant.stack_size_without_alignment(interner, target_info),
Union(variant) => variant.stack_size_without_alignment(interner),
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.stack_size_without_alignment(interner, target_info),
RecursivePointer(_) => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
.stack_size_without_alignment(interner),
RecursivePointer(_) => interner.target_info().ptr_width() as u32,
Boxed(_) => interner.target_info().ptr_width() as u32,
}
}
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
@ -2687,7 +2657,7 @@ impl<'a> LayoutRepr<'a> {
match self {
Struct(field_layouts) => field_layouts
.iter()
.map(|x| interner.get_repr(*x).alignment_bytes(interner, target_info))
.map(|x| interner.get_repr(*x).alignment_bytes(interner))
.max()
.unwrap_or(0),
@ -2700,9 +2670,7 @@ impl<'a> LayoutRepr<'a> {
.iter()
.flat_map(|layouts| {
layouts.iter().map(|layout| {
interner
.get_repr(*layout)
.alignment_bytes(interner, target_info)
interner.get_repr(*layout).alignment_bytes(interner)
})
})
.max();
@ -2722,40 +2690,38 @@ impl<'a> LayoutRepr<'a> {
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => target_info.ptr_width() as u32,
| NonNullableUnwrapped(_) => interner.target_info().ptr_width() as u32,
}
}
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.alignment_bytes(interner, target_info),
Builtin(builtin) => builtin.alignment_bytes(target_info),
RecursivePointer(_) => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
.alignment_bytes(interner),
Builtin(builtin) => builtin.alignment_bytes(interner.target_info()),
RecursivePointer(_) => interner.target_info().ptr_width() as u32,
Boxed(_) => interner.target_info().ptr_width() as u32,
}
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let ptr_width = target_info.ptr_width() as u32;
let ptr_width = interner.target_info().ptr_width() as u32;
use LayoutRepr::*;
match self {
Builtin(builtin) => builtin.allocation_alignment_bytes(interner, target_info),
Struct { .. } => self.alignment_bytes(interner, target_info).max(ptr_width),
Union(union_layout) => union_layout.allocation_alignment_bytes(interner, target_info),
Builtin(builtin) => builtin.allocation_alignment_bytes(interner),
Struct { .. } => self.alignment_bytes(interner).max(ptr_width),
Union(union_layout) => union_layout.allocation_alignment_bytes(interner),
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.allocation_alignment_bytes(interner, target_info),
.allocation_alignment_bytes(interner),
RecursivePointer(_) => {
unreachable!("should be looked up to get an actual layout")
}
Boxed(inner) => Ord::max(
ptr_width,
interner
.get_repr(*inner)
.alignment_bytes(interner, target_info),
interner.get_repr(*inner).alignment_bytes(interner),
),
}
}
@ -3073,17 +3039,18 @@ impl<'a> Builtin<'a> {
}
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let target_info = interner.target_info();
let ptr_width = target_info.ptr_width() as u32;
let allocation = match self {
Builtin::Str => ptr_width,
Builtin::List(e) => {
let e = interner.get_repr(*e);
e.alignment_bytes(interner, target_info).max(ptr_width)
e.alignment_bytes(interner).max(ptr_width)
}
// The following are usually not heap-allocated, but they might be when inside a Box.
Builtin::Int(int_width) => int_width.alignment_bytes(target_info).max(ptr_width),
@ -3256,14 +3223,7 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(label1, layout1), (label2, layout2)| {
cmp_fields(
&env.cache.interner,
label1,
*layout1,
label2,
*layout2,
target_info,
)
cmp_fields(&env.cache.interner, label1, *layout1, label2, *layout2)
});
let ordered_field_names = Vec::from_iter_in(
@ -3305,14 +3265,7 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(index1, layout1), (index2, layout2)| {
cmp_fields(
&env.cache.interner,
index1,
*layout1,
index2,
*layout2,
target_info,
)
cmp_fields(&env.cache.interner, index1, *layout1, index2, *layout2)
});
let result = if sortables.len() == 1 {
@ -3383,8 +3336,6 @@ fn sort_tuple_elems_help<'a>(
env: &mut Env<'a, '_>,
elems_map: impl Iterator<Item = (usize, Variable)>,
) -> Result<Vec<'a, SortedTupleElem<'a>>, LayoutProblem> {
let target_info = env.target_info;
let mut sorted_elems = Vec::with_capacity_in(elems_map.size_hint().0, env.arena);
for (index, elem) in elems_map {
@ -3400,7 +3351,6 @@ fn sort_tuple_elems_help<'a>(
*res_layout1,
index2,
*res_layout2,
target_info,
)
});
@ -3429,8 +3379,6 @@ fn sort_record_fields_help<'a>(
env: &mut Env<'a, '_>,
fields_map: impl Iterator<Item = (Lowercase, RecordField<Variable>)>,
) -> Result<Vec<'a, SortedField<'a>>, LayoutProblem> {
let target_info = env.target_info;
// Sort the fields by label
let mut sorted_fields = Vec::with_capacity_in(fields_map.size_hint().0, env.arena);
@ -3452,14 +3400,9 @@ fn sort_record_fields_help<'a>(
sorted_fields.sort_by(
|(label1, _, res_layout1), (label2, _, res_layout2)| match res_layout1 {
Ok(layout1) | Err(layout1) => match res_layout2 {
Ok(layout2) | Err(layout2) => cmp_fields(
&env.cache.interner,
label1,
*layout1,
label2,
*layout2,
target_info,
),
Ok(layout2) | Err(layout2) => {
cmp_fields(&env.cache.interner, label1, *layout1, label2, *layout2)
}
},
},
);
@ -3759,11 +3702,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -3821,11 +3764,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -4033,11 +3976,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -4624,17 +4567,12 @@ pub fn cmp_fields<'a, L: Ord, I>(
layout1: InLayout<'a>,
label2: &L,
layout2: InLayout<'a>,
target_info: TargetInfo,
) -> Ordering
where
I: LayoutInterner<'a>,
{
let size1 = interner
.get_repr(layout1)
.alignment_bytes(interner, target_info);
let size2 = interner
.get_repr(layout2)
.alignment_bytes(interner, target_info);
let size1 = interner.get_repr(layout1).alignment_bytes(interner);
let size2 = interner.get_repr(layout2).alignment_bytes(interner);
size2.cmp(&size1).then(label1.cmp(label2))
}
@ -4664,9 +4602,8 @@ mod test {
let repr = LayoutRepr::Union(UnionLayout::NonRecursive(&tt));
let target_info = TargetInfo::default_x86_64();
assert_eq!(repr.stack_size(&interner, target_info), 1);
assert_eq!(repr.alignment_bytes(&interner, target_info), 1);
assert_eq!(repr.stack_size(&interner), 1);
assert_eq!(repr.alignment_bytes(&interner), 1);
}
#[test]
@ -4682,20 +4619,13 @@ mod test {
let union_layout = UnionLayout::NonRecursive(&tags as &[_]);
let repr = LayoutRepr::Union(union_layout);
let target_info = TargetInfo::default_x86_64();
assert_eq!(repr.stack_size_without_alignment(&interner, target_info), 8);
assert_eq!(repr.stack_size_without_alignment(&interner), 8);
}
#[test]
fn void_stack_size() {
let interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let target_info = TargetInfo::default_x86_64();
assert_eq!(
Layout::VOID_NAKED
.repr(&interner)
.stack_size(&interner, target_info),
0
);
assert_eq!(Layout::VOID_NAKED.repr(&interner).stack_size(&interner), 0);
}
#[test]

View file

@ -203,27 +203,23 @@ pub trait LayoutInterner<'a>: Sized {
fn target_info(&self) -> TargetInfo;
fn alignment_bytes(&self, layout: InLayout<'a>) -> u32 {
self.get_repr(layout)
.alignment_bytes(self, self.target_info())
self.get_repr(layout).alignment_bytes(self)
}
fn allocation_alignment_bytes(&self, layout: InLayout<'a>) -> u32 {
self.get_repr(layout)
.allocation_alignment_bytes(self, self.target_info())
self.get_repr(layout).allocation_alignment_bytes(self)
}
fn stack_size(&self, layout: InLayout<'a>) -> u32 {
self.get_repr(layout).stack_size(self, self.target_info())
self.get_repr(layout).stack_size(self)
}
fn stack_size_and_alignment(&self, layout: InLayout<'a>) -> (u32, u32) {
self.get_repr(layout)
.stack_size_and_alignment(self, self.target_info())
self.get_repr(layout).stack_size_and_alignment(self)
}
fn stack_size_without_alignment(&self, layout: InLayout<'a>) -> u32 {
self.get_repr(layout)
.stack_size_without_alignment(self, self.target_info())
self.get_repr(layout).stack_size_without_alignment(self)
}
fn contains_refcounted(&self, layout: InLayout<'a>) -> bool {
@ -235,8 +231,7 @@ pub trait LayoutInterner<'a>: Sized {
}
fn is_passed_by_reference(&self, layout: InLayout<'a>) -> bool {
self.get_repr(layout)
.is_passed_by_reference(self, self.target_info())
self.get_repr(layout).is_passed_by_reference(self)
}
fn runtime_representation(&self, layout: InLayout<'a>) -> LayoutRepr<'a> {

View file

@ -1314,8 +1314,7 @@ fn get_reuse_layout_info<'a, 'i>(
layout_interner: &'i STLayoutInterner<'a>,
union_layout: UnionLayout<'a>,
) -> TokenLayout {
let (size, alignment) =
union_layout.data_size_and_alignment(layout_interner, layout_interner.target_info());
let (size, alignment) = union_layout.data_size_and_alignment(layout_interner);
let has_tag = match union_layout {
UnionLayout::NonRecursive(_) => unreachable!("Non recursive unions should not be reused."),
// The memory for union layouts that has a tag_id can be reused for new allocations with tag_id.

View file

@ -29,8 +29,8 @@ fn width_and_alignment_u8_u8() {
let layout = LayoutRepr::Union(UnionLayout::NonRecursive(&tt));
assert_eq!(layout.alignment_bytes(&interner, target_info), 1);
assert_eq!(layout.stack_size(&interner, target_info), 2);
assert_eq!(layout.alignment_bytes(&interner), 1);
assert_eq!(layout.stack_size(&interner), 2);
}
#[test]