Get rid of passing target_info a bunch

This commit is contained in:
Ayaz Hafiz 2023-06-16 21:38:30 -05:00
parent 8495f3b085
commit bc41db2a34
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
18 changed files with 154 additions and 287 deletions

View file

@ -972,55 +972,46 @@ impl<'a> UnionLayout<'a> {
}
}
fn tags_alignment_bytes<I>(
interner: &I,
tags: &[&'a [InLayout<'a>]],
target_info: TargetInfo,
) -> u32
fn tags_alignment_bytes<I>(interner: &I, tags: &[&'a [InLayout<'a>]]) -> u32
where
I: LayoutInterner<'a>,
{
tags.iter()
.map(|field_layouts| {
LayoutRepr::struct_(field_layouts).alignment_bytes(interner, target_info)
})
.map(|field_layouts| LayoutRepr::struct_(field_layouts).alignment_bytes(interner))
.max()
.unwrap_or(0)
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let allocation = match self {
UnionLayout::NonRecursive(tags) => {
Self::tags_alignment_bytes(interner, tags, target_info)
}
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags, target_info),
UnionLayout::NonRecursive(tags) => Self::tags_alignment_bytes(interner, tags),
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags),
UnionLayout::NonNullableUnwrapped(field_layouts) => {
LayoutRepr::struct_(field_layouts).alignment_bytes(interner, target_info)
LayoutRepr::struct_(field_layouts).alignment_bytes(interner)
}
UnionLayout::NullableWrapped { other_tags, .. } => {
Self::tags_alignment_bytes(interner, other_tags, target_info)
Self::tags_alignment_bytes(interner, other_tags)
}
UnionLayout::NullableUnwrapped { other_fields, .. } => {
LayoutRepr::struct_(other_fields).alignment_bytes(interner, target_info)
LayoutRepr::struct_(other_fields).alignment_bytes(interner)
}
};
// because we store a refcount, the alignment must be at least the size of a pointer
allocation.max(target_info.ptr_width() as u32)
allocation.max(interner.target_info().ptr_width() as u32)
}
/// Size of the data in memory, whether it's stack or heap (for non-null tag ids)
pub fn data_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
pub fn data_size_and_alignment<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
let (data_width, data_align) =
self.data_size_and_alignment_help_match(interner, target_info);
let (data_width, data_align) = self.data_size_and_alignment_help_match(interner);
if self.stores_tag_id_as_data(target_info) {
if self.stores_tag_id_as_data(interner.target_info()) {
use Discriminant::*;
match self.discriminant() {
U0 => (round_up_to_alignment(data_width, data_align), data_align),
@ -1046,48 +1037,37 @@ impl<'a> UnionLayout<'a> {
/// Size of the data before the tag_id, if it exists.
/// Returns None if the tag_id is not stored as data in the layout.
pub fn data_size_without_tag_id<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
pub fn data_size_without_tag_id<I>(&self, interner: &I) -> Option<u32>
where
I: LayoutInterner<'a>,
{
if !self.stores_tag_id_as_data(target_info) {
if !self.stores_tag_id_as_data(interner.target_info()) {
return None;
};
Some(
self.data_size_and_alignment_help_match(interner, target_info)
.0,
)
Some(self.data_size_and_alignment_help_match(interner).0)
}
fn data_size_and_alignment_help_match<I>(
&self,
interner: &I,
target_info: TargetInfo,
) -> (u32, u32)
fn data_size_and_alignment_help_match<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
match self {
Self::NonRecursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::Recursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::NonRecursive(tags) => Layout::stack_size_and_alignment_slices(interner, tags),
Self::Recursive(tags) => Layout::stack_size_and_alignment_slices(interner, tags),
Self::NonNullableUnwrapped(fields) => {
Layout::stack_size_and_alignment_slices(interner, &[fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[fields])
}
Self::NullableWrapped { other_tags, .. } => {
Layout::stack_size_and_alignment_slices(interner, other_tags, target_info)
Layout::stack_size_and_alignment_slices(interner, other_tags)
}
Self::NullableUnwrapped { other_fields, .. } => {
Layout::stack_size_and_alignment_slices(interner, &[other_fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[other_fields])
}
}
}
pub fn tag_id_offset<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
pub fn tag_id_offset<I>(&self, interner: &I) -> Option<u32>
where
I: LayoutInterner<'a>,
{
@ -1096,39 +1076,34 @@ impl<'a> UnionLayout<'a> {
| UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => Some(Self::tag_id_offset_help(interner, tags, target_info)),
} => Some(Self::tag_id_offset_help(interner, tags)),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => None,
}
}
fn tag_id_offset_help<I>(
interner: &I,
layouts: &[&[InLayout<'a>]],
target_info: TargetInfo,
) -> u32
fn tag_id_offset_help<I>(interner: &I, layouts: &[&[InLayout<'a>]]) -> u32
where
I: LayoutInterner<'a>,
{
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
let (data_width, data_align) = Layout::stack_size_and_alignment_slices(interner, layouts);
round_up_to_alignment(data_width, data_align)
}
/// Very important to use this when doing a memcpy!
fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
fn stack_size_without_alignment<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
match self {
UnionLayout::NonRecursive(_) => {
let (width, align) = self.data_size_and_alignment(interner, target_info);
let (width, align) = self.data_size_and_alignment(interner);
round_up_to_alignment(width, align)
}
UnionLayout::Recursive(_)
| UnionLayout::NonNullableUnwrapped(_)
| UnionLayout::NullableWrapped { .. }
| UnionLayout::NullableUnwrapped { .. } => target_info.ptr_width() as u32,
| UnionLayout::NullableUnwrapped { .. } => interner.target_info().ptr_width() as u32,
}
}
}
@ -1935,13 +1910,11 @@ impl<'a> LambdaSet<'a> {
}
}
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
interner
.get_repr(self.representation)
.stack_size(interner, target_info)
interner.get_repr(self.representation).stack_size(interner)
}
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
@ -1960,13 +1933,13 @@ impl<'a> LambdaSet<'a> {
.safe_to_memcpy(interner)
}
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
interner
.get_repr(self.representation)
.alignment_bytes(interner, target_info)
.alignment_bytes(interner)
}
}
@ -2475,7 +2448,6 @@ impl<'a> Layout<'a> {
pub fn stack_size_and_alignment_slices<I>(
interner: &I,
slices: &[&[InLayout<'a>]],
target_info: TargetInfo,
) -> (u32, u32)
where
I: LayoutInterner<'a>,
@ -2488,7 +2460,7 @@ impl<'a> Layout<'a> {
for layout in tag.iter() {
let (stack_size, alignment) = interner
.get_repr(*layout)
.stack_size_and_alignment(interner, target_info);
.stack_size_and_alignment(interner);
total += stack_size;
data_align = data_align.max(alignment);
}
@ -2597,7 +2569,7 @@ impl<'a> LayoutRepr<'a> {
false // TODO this should use is_zero_sized once doing so doesn't break things!
}
pub fn is_passed_by_reference<I>(&self, interner: &I, target_info: TargetInfo) -> bool
pub fn is_passed_by_reference<I>(&self, interner: &I) -> bool
where
I: LayoutInterner<'a>,
{
@ -2605,7 +2577,7 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Builtin(builtin) => {
use Builtin::*;
match target_info.ptr_width() {
match interner.target_info().ptr_width() {
PtrWidth::Bytes4 => {
// more things fit into a register
false
@ -2619,67 +2591,65 @@ impl<'a> LayoutRepr<'a> {
LayoutRepr::Union(UnionLayout::NonRecursive(_)) => true,
LayoutRepr::Struct(_) => {
// TODO: write tests for this!
self.stack_size(interner, target_info) as usize > target_info.max_by_value_size()
self.stack_size(interner) as usize > interner.target_info().max_by_value_size()
}
LayoutRepr::LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.is_passed_by_reference(interner, target_info),
.is_passed_by_reference(interner),
_ => false,
}
}
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let width = self.stack_size_without_alignment(interner);
let alignment = self.alignment_bytes(interner);
round_up_to_alignment(width, alignment)
}
pub fn stack_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
pub fn stack_size_and_alignment<I>(&self, interner: &I) -> (u32, u32)
where
I: LayoutInterner<'a>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let width = self.stack_size_without_alignment(interner);
let alignment = self.alignment_bytes(interner);
let size = round_up_to_alignment(width, alignment);
(size, alignment)
}
/// Very important to use this when doing a memcpy!
pub fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn stack_size_without_alignment<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
use LayoutRepr::*;
match self {
Builtin(builtin) => builtin.stack_size(target_info),
Builtin(builtin) => builtin.stack_size(interner.target_info()),
Struct(field_layouts) => {
let mut sum = 0;
for field_layout in *field_layouts {
sum += interner
.get_repr(*field_layout)
.stack_size(interner, target_info);
sum += interner.get_repr(*field_layout).stack_size(interner);
}
sum
}
Union(variant) => variant.stack_size_without_alignment(interner, target_info),
Union(variant) => variant.stack_size_without_alignment(interner),
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.stack_size_without_alignment(interner, target_info),
RecursivePointer(_) => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
.stack_size_without_alignment(interner),
RecursivePointer(_) => interner.target_info().ptr_width() as u32,
Boxed(_) => interner.target_info().ptr_width() as u32,
}
}
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
@ -2687,7 +2657,7 @@ impl<'a> LayoutRepr<'a> {
match self {
Struct(field_layouts) => field_layouts
.iter()
.map(|x| interner.get_repr(*x).alignment_bytes(interner, target_info))
.map(|x| interner.get_repr(*x).alignment_bytes(interner))
.max()
.unwrap_or(0),
@ -2700,9 +2670,7 @@ impl<'a> LayoutRepr<'a> {
.iter()
.flat_map(|layouts| {
layouts.iter().map(|layout| {
interner
.get_repr(*layout)
.alignment_bytes(interner, target_info)
interner.get_repr(*layout).alignment_bytes(interner)
})
})
.max();
@ -2722,40 +2690,38 @@ impl<'a> LayoutRepr<'a> {
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => target_info.ptr_width() as u32,
| NonNullableUnwrapped(_) => interner.target_info().ptr_width() as u32,
}
}
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.alignment_bytes(interner, target_info),
Builtin(builtin) => builtin.alignment_bytes(target_info),
RecursivePointer(_) => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
.alignment_bytes(interner),
Builtin(builtin) => builtin.alignment_bytes(interner.target_info()),
RecursivePointer(_) => interner.target_info().ptr_width() as u32,
Boxed(_) => interner.target_info().ptr_width() as u32,
}
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let ptr_width = target_info.ptr_width() as u32;
let ptr_width = interner.target_info().ptr_width() as u32;
use LayoutRepr::*;
match self {
Builtin(builtin) => builtin.allocation_alignment_bytes(interner, target_info),
Struct { .. } => self.alignment_bytes(interner, target_info).max(ptr_width),
Union(union_layout) => union_layout.allocation_alignment_bytes(interner, target_info),
Builtin(builtin) => builtin.allocation_alignment_bytes(interner),
Struct { .. } => self.alignment_bytes(interner).max(ptr_width),
Union(union_layout) => union_layout.allocation_alignment_bytes(interner),
LambdaSet(lambda_set) => interner
.get_repr(lambda_set.runtime_representation())
.allocation_alignment_bytes(interner, target_info),
.allocation_alignment_bytes(interner),
RecursivePointer(_) => {
unreachable!("should be looked up to get an actual layout")
}
Boxed(inner) => Ord::max(
ptr_width,
interner
.get_repr(*inner)
.alignment_bytes(interner, target_info),
interner.get_repr(*inner).alignment_bytes(interner),
),
}
}
@ -3073,17 +3039,18 @@ impl<'a> Builtin<'a> {
}
}
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
pub fn allocation_alignment_bytes<I>(&self, interner: &I) -> u32
where
I: LayoutInterner<'a>,
{
let target_info = interner.target_info();
let ptr_width = target_info.ptr_width() as u32;
let allocation = match self {
Builtin::Str => ptr_width,
Builtin::List(e) => {
let e = interner.get_repr(*e);
e.alignment_bytes(interner, target_info).max(ptr_width)
e.alignment_bytes(interner).max(ptr_width)
}
// The following are usually not heap-allocated, but they might be when inside a Box.
Builtin::Int(int_width) => int_width.alignment_bytes(target_info).max(ptr_width),
@ -3256,14 +3223,7 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(label1, layout1), (label2, layout2)| {
cmp_fields(
&env.cache.interner,
label1,
*layout1,
label2,
*layout2,
target_info,
)
cmp_fields(&env.cache.interner, label1, *layout1, label2, *layout2)
});
let ordered_field_names = Vec::from_iter_in(
@ -3305,14 +3265,7 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(index1, layout1), (index2, layout2)| {
cmp_fields(
&env.cache.interner,
index1,
*layout1,
index2,
*layout2,
target_info,
)
cmp_fields(&env.cache.interner, index1, *layout1, index2, *layout2)
});
let result = if sortables.len() == 1 {
@ -3383,8 +3336,6 @@ fn sort_tuple_elems_help<'a>(
env: &mut Env<'a, '_>,
elems_map: impl Iterator<Item = (usize, Variable)>,
) -> Result<Vec<'a, SortedTupleElem<'a>>, LayoutProblem> {
let target_info = env.target_info;
let mut sorted_elems = Vec::with_capacity_in(elems_map.size_hint().0, env.arena);
for (index, elem) in elems_map {
@ -3400,7 +3351,6 @@ fn sort_tuple_elems_help<'a>(
*res_layout1,
index2,
*res_layout2,
target_info,
)
});
@ -3429,8 +3379,6 @@ fn sort_record_fields_help<'a>(
env: &mut Env<'a, '_>,
fields_map: impl Iterator<Item = (Lowercase, RecordField<Variable>)>,
) -> Result<Vec<'a, SortedField<'a>>, LayoutProblem> {
let target_info = env.target_info;
// Sort the fields by label
let mut sorted_fields = Vec::with_capacity_in(fields_map.size_hint().0, env.arena);
@ -3452,14 +3400,9 @@ fn sort_record_fields_help<'a>(
sorted_fields.sort_by(
|(label1, _, res_layout1), (label2, _, res_layout2)| match res_layout1 {
Ok(layout1) | Err(layout1) => match res_layout2 {
Ok(layout2) | Err(layout2) => cmp_fields(
&env.cache.interner,
label1,
*layout1,
label2,
*layout2,
target_info,
),
Ok(layout2) | Err(layout2) => {
cmp_fields(&env.cache.interner, label1, *layout1, label2, *layout2)
}
},
},
);
@ -3759,11 +3702,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -3821,11 +3764,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -4033,11 +3976,11 @@ where
let size1 = env
.cache
.get_repr(*layout1)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
let size2 = env
.cache
.get_repr(*layout2)
.alignment_bytes(&env.cache.interner, env.target_info);
.alignment_bytes(&env.cache.interner);
size2.cmp(&size1)
});
@ -4624,17 +4567,12 @@ pub fn cmp_fields<'a, L: Ord, I>(
layout1: InLayout<'a>,
label2: &L,
layout2: InLayout<'a>,
target_info: TargetInfo,
) -> Ordering
where
I: LayoutInterner<'a>,
{
let size1 = interner
.get_repr(layout1)
.alignment_bytes(interner, target_info);
let size2 = interner
.get_repr(layout2)
.alignment_bytes(interner, target_info);
let size1 = interner.get_repr(layout1).alignment_bytes(interner);
let size2 = interner.get_repr(layout2).alignment_bytes(interner);
size2.cmp(&size1).then(label1.cmp(label2))
}
@ -4664,9 +4602,8 @@ mod test {
let repr = LayoutRepr::Union(UnionLayout::NonRecursive(&tt));
let target_info = TargetInfo::default_x86_64();
assert_eq!(repr.stack_size(&interner, target_info), 1);
assert_eq!(repr.alignment_bytes(&interner, target_info), 1);
assert_eq!(repr.stack_size(&interner), 1);
assert_eq!(repr.alignment_bytes(&interner), 1);
}
#[test]
@ -4682,20 +4619,13 @@ mod test {
let union_layout = UnionLayout::NonRecursive(&tags as &[_]);
let repr = LayoutRepr::Union(union_layout);
let target_info = TargetInfo::default_x86_64();
assert_eq!(repr.stack_size_without_alignment(&interner, target_info), 8);
assert_eq!(repr.stack_size_without_alignment(&interner), 8);
}
#[test]
fn void_stack_size() {
let interner = STLayoutInterner::with_capacity(4, TargetInfo::default_x86_64());
let target_info = TargetInfo::default_x86_64();
assert_eq!(
Layout::VOID_NAKED
.repr(&interner)
.stack_size(&interner, target_info),
0
);
assert_eq!(Layout::VOID_NAKED.repr(&interner).stack_size(&interner), 0);
}
#[test]