Push layout interner further through Layout

This commit is contained in:
Ayaz Hafiz 2022-08-31 14:14:34 -05:00
parent ed04c2040a
commit 3b4b1838b8
No known key found for this signature in database
GPG key ID: 0E2A37416A25EF58
34 changed files with 1279 additions and 634 deletions

View file

@ -106,7 +106,7 @@ pub struct LayoutCache<'a> {
cache: std::vec::Vec<CacheLayer<LayoutResult<'a>>>,
raw_function_cache: std::vec::Vec<CacheLayer<RawFunctionLayoutResult<'a>>>,
pub(crate) interner: LayoutInterner<'a>,
pub interner: LayoutInterner<'a>,
/// Statistics on the usage of the layout cache.
#[cfg(debug_assertions)]
@ -720,21 +720,31 @@ pub enum UnionLayout<'a> {
}
impl<'a> UnionLayout<'a> {
pub fn to_doc<D, A>(self, alloc: &'a D, _parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
_parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use UnionLayout::*;
match self {
NonRecursive(tags) => {
let tags_doc = tags.iter().map(|fields| {
alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
" ",
))
alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
),
)
});
alloc
@ -744,10 +754,14 @@ impl<'a> UnionLayout<'a> {
}
Recursive(tags) => {
let tags_doc = tags.iter().map(|fields| {
alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
" ",
))
alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
),
)
});
alloc
.text("[<r>")
@ -755,10 +769,14 @@ impl<'a> UnionLayout<'a> {
.append(alloc.text("]"))
}
NonNullableUnwrapped(fields) => {
let fields_doc = alloc.text("C ").append(alloc.intersperse(
fields.iter().map(|x| x.to_doc(alloc, Parens::InTypeParam)),
" ",
));
let fields_doc = alloc.text("C ").append(
alloc.intersperse(
fields
.iter()
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
),
);
alloc
.text("[<rnnu>")
.append(fields_doc)
@ -772,7 +790,7 @@ impl<'a> UnionLayout<'a> {
alloc.intersperse(
other_fields
.iter()
.map(|x| x.to_doc(alloc, Parens::InTypeParam)),
.map(|x| x.to_doc(alloc, interner, Parens::InTypeParam)),
" ",
),
);
@ -927,27 +945,39 @@ impl<'a> UnionLayout<'a> {
}
}
fn tags_alignment_bytes(tags: &[&[Layout]], target_info: TargetInfo) -> u32 {
fn tags_alignment_bytes<I>(
interner: &I,
tags: &[&'a [Layout<'a>]],
target_info: TargetInfo,
) -> u32
where
I: Interner<'a, Layout<'a>>,
{
tags.iter()
.map(|field_layouts| {
Layout::struct_no_name_order(field_layouts).alignment_bytes(target_info)
Layout::struct_no_name_order(field_layouts).alignment_bytes(interner, target_info)
})
.max()
.unwrap_or(0)
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let allocation = match self {
UnionLayout::NonRecursive(tags) => Self::tags_alignment_bytes(tags, target_info),
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(tags, target_info),
UnionLayout::NonRecursive(tags) => {
Self::tags_alignment_bytes(interner, tags, target_info)
}
UnionLayout::Recursive(tags) => Self::tags_alignment_bytes(interner, tags, target_info),
UnionLayout::NonNullableUnwrapped(field_layouts) => {
Layout::struct_no_name_order(field_layouts).alignment_bytes(target_info)
Layout::struct_no_name_order(field_layouts).alignment_bytes(interner, target_info)
}
UnionLayout::NullableWrapped { other_tags, .. } => {
Self::tags_alignment_bytes(other_tags, target_info)
Self::tags_alignment_bytes(interner, other_tags, target_info)
}
UnionLayout::NullableUnwrapped { other_fields, .. } => {
Layout::struct_no_name_order(other_fields).alignment_bytes(target_info)
Layout::struct_no_name_order(other_fields).alignment_bytes(interner, target_info)
}
};
@ -956,8 +986,12 @@ impl<'a> UnionLayout<'a> {
}
/// Size of the data in memory, whether it's stack or heap (for non-null tag ids)
pub fn data_size_and_alignment(&self, target_info: TargetInfo) -> (u32, u32) {
let (data_width, data_align) = self.data_size_and_alignment_help_match(target_info);
pub fn data_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let (data_width, data_align) =
self.data_size_and_alignment_help_match(interner, target_info);
if self.stores_tag_id_as_data(target_info) {
use Discriminant::*;
@ -985,53 +1019,83 @@ impl<'a> UnionLayout<'a> {
/// Size of the data before the tag_id, if it exists.
/// Returns None if the tag_id is not stored as data in the layout.
pub fn data_size_without_tag_id(&self, target_info: TargetInfo) -> Option<u32> {
pub fn data_size_without_tag_id<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
where
I: Interner<'a, Layout<'a>>,
{
if !self.stores_tag_id_as_data(target_info) {
return None;
};
Some(self.data_size_and_alignment_help_match(target_info).0)
Some(
self.data_size_and_alignment_help_match(interner, target_info)
.0,
)
}
fn data_size_and_alignment_help_match(&self, target_info: TargetInfo) -> (u32, u32) {
fn data_size_and_alignment_help_match<I>(
&self,
interner: &I,
target_info: TargetInfo,
) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
match self {
Self::NonRecursive(tags) => Layout::stack_size_and_alignment_slices(tags, target_info),
Self::Recursive(tags) => Layout::stack_size_and_alignment_slices(tags, target_info),
Self::NonRecursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::Recursive(tags) => {
Layout::stack_size_and_alignment_slices(interner, tags, target_info)
}
Self::NonNullableUnwrapped(fields) => {
Layout::stack_size_and_alignment_slices(&[fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[fields], target_info)
}
Self::NullableWrapped { other_tags, .. } => {
Layout::stack_size_and_alignment_slices(other_tags, target_info)
Layout::stack_size_and_alignment_slices(interner, other_tags, target_info)
}
Self::NullableUnwrapped { other_fields, .. } => {
Layout::stack_size_and_alignment_slices(&[other_fields], target_info)
Layout::stack_size_and_alignment_slices(interner, &[other_fields], target_info)
}
}
}
pub fn tag_id_offset(&self, target_info: TargetInfo) -> Option<u32> {
pub fn tag_id_offset<I>(&self, interner: &I, target_info: TargetInfo) -> Option<u32>
where
I: Interner<'a, Layout<'a>>,
{
match self {
UnionLayout::NonRecursive(tags)
| UnionLayout::Recursive(tags)
| UnionLayout::NullableWrapped {
other_tags: tags, ..
} => Some(Self::tag_id_offset_help(tags, target_info)),
} => Some(Self::tag_id_offset_help(interner, tags, target_info)),
UnionLayout::NonNullableUnwrapped(_) | UnionLayout::NullableUnwrapped { .. } => None,
}
}
fn tag_id_offset_help(layouts: &[&[Layout]], target_info: TargetInfo) -> u32 {
fn tag_id_offset_help<I>(
interner: &I,
layouts: &[&[Layout<'a>]],
target_info: TargetInfo,
) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let (data_width, data_align) =
Layout::stack_size_and_alignment_slices(layouts, target_info);
Layout::stack_size_and_alignment_slices(interner, layouts, target_info);
round_up_to_alignment(data_width, data_align)
}
/// Very important to use this when doing a memcpy!
fn stack_size_without_alignment(&self, target_info: TargetInfo) -> u32 {
fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
match self {
UnionLayout::NonRecursive(_) => {
let (width, align) = self.data_size_and_alignment(target_info);
let (width, align) = self.data_size_and_alignment(interner, target_info);
round_up_to_alignment(width, align)
}
UnionLayout::Recursive(_)
@ -1238,7 +1302,10 @@ pub enum ClosureCallOptions<'a> {
}
impl<'a> LambdaSet<'a> {
pub fn runtime_representation(&self) -> Layout<'a> {
pub fn runtime_representation<I>(&self, _interner: &I) -> Layout<'a>
where
I: Interner<'a, Layout<'a>>,
{
*self.representation
}
@ -1300,19 +1367,24 @@ impl<'a> LambdaSet<'a> {
}
/// Finds an alias name for a possible-multimorphic lambda variant in the lambda set.
pub fn find_lambda_name(
pub fn find_lambda_name<I>(
&self,
interner: &I,
function_symbol: Symbol,
captures_layouts: &[Layout],
) -> LambdaName<'a> {
) -> LambdaName<'a>
where
I: Interner<'a, Layout<'a>>,
{
debug_assert!(self.contains(function_symbol), "function symbol not in set");
let comparator = |other_name: Symbol, other_captures_layouts: &[Layout]| {
other_name == function_symbol
&& other_captures_layouts
.iter()
.zip(captures_layouts)
.all(|(other_layout, layout)| self.capture_layouts_eq(other_layout, layout))
&& other_captures_layouts.iter().zip(captures_layouts).all(
|(other_layout, layout)| {
self.capture_layouts_eq(interner, other_layout, layout)
},
)
};
let (name, layouts) = self
@ -1336,13 +1408,16 @@ impl<'a> LambdaSet<'a> {
/// Checks if two captured layouts are equivalent under the current lambda set.
/// Resolves recursive pointers to the layout of the lambda set.
fn capture_layouts_eq(&self, left: &Layout, right: &Layout) -> bool {
fn capture_layouts_eq<I>(&self, interner: &I, left: &Layout, right: &Layout) -> bool
where
I: Interner<'a, Layout<'a>>,
{
if left == right {
return true;
}
let left = if left == &Layout::RecursivePointer {
let runtime_repr = self.runtime_representation();
let runtime_repr = self.runtime_representation(interner);
debug_assert!(matches!(
runtime_repr,
Layout::Union(UnionLayout::Recursive(_) | UnionLayout::NullableUnwrapped { .. })
@ -1353,7 +1428,7 @@ impl<'a> LambdaSet<'a> {
};
let right = if right == &Layout::RecursivePointer {
let runtime_repr = self.runtime_representation();
let runtime_repr = self.runtime_representation(interner);
debug_assert!(matches!(
runtime_repr,
Layout::Union(UnionLayout::Recursive(_) | UnionLayout::NullableUnwrapped { .. })
@ -1706,18 +1781,30 @@ impl<'a> LambdaSet<'a> {
}
}
pub fn stack_size(&self, target_info: TargetInfo) -> u32 {
self.representation.stack_size(target_info)
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
self.representation.stack_size(interner, target_info)
}
pub fn contains_refcounted(&self) -> bool {
self.representation.contains_refcounted()
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
self.representation.contains_refcounted(interner)
}
pub fn safe_to_memcpy(&self) -> bool {
self.representation.safe_to_memcpy()
pub fn safe_to_memcpy<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
self.representation.safe_to_memcpy(interner)
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
self.representation.alignment_bytes(target_info)
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
self.representation.alignment_bytes(interner, target_info)
}
}
@ -2199,21 +2286,26 @@ impl<'a> Layout<'a> {
})
}
pub fn safe_to_memcpy(&self) -> bool {
pub fn safe_to_memcpy<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.safe_to_memcpy(),
Struct { field_layouts, .. } => field_layouts
.iter()
.all(|field_layout| field_layout.safe_to_memcpy()),
.all(|field_layout| field_layout.safe_to_memcpy(interner)),
Union(variant) => {
use UnionLayout::*;
match variant {
NonRecursive(tags) => tags
.iter()
.all(|tag_layout| tag_layout.iter().all(|field| field.safe_to_memcpy())),
NonRecursive(tags) => tags.iter().all(|tag_layout| {
tag_layout
.iter()
.all(|field| field.safe_to_memcpy(interner))
}),
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
@ -2223,7 +2315,9 @@ impl<'a> Layout<'a> {
}
}
}
LambdaSet(lambda_set) => lambda_set.runtime_representation().safe_to_memcpy(),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.safe_to_memcpy(interner),
Boxed(_) | RecursivePointer => {
// We cannot memcpy pointers, because then we would have the same pointer in multiple places!
false
@ -2273,7 +2367,10 @@ impl<'a> Layout<'a> {
}
}
pub fn is_passed_by_reference(&self, target_info: TargetInfo) -> bool {
pub fn is_passed_by_reference<I>(&self, interner: &I, target_info: TargetInfo) -> bool
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::Builtin(builtin) => {
use Builtin::*;
@ -2291,29 +2388,38 @@ impl<'a> Layout<'a> {
}
Layout::Union(UnionLayout::NonRecursive(_)) => true,
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.is_passed_by_reference(target_info),
.runtime_representation(interner)
.is_passed_by_reference(interner, target_info),
_ => false,
}
}
pub fn stack_size(&self, target_info: TargetInfo) -> u32 {
let width = self.stack_size_without_alignment(target_info);
let alignment = self.alignment_bytes(target_info);
pub fn stack_size<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
round_up_to_alignment(width, alignment)
}
pub fn stack_size_and_alignment(&self, target_info: TargetInfo) -> (u32, u32) {
let width = self.stack_size_without_alignment(target_info);
let alignment = self.alignment_bytes(target_info);
pub fn stack_size_and_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let width = self.stack_size_without_alignment(interner, target_info);
let alignment = self.alignment_bytes(interner, target_info);
let size = round_up_to_alignment(width, alignment);
(size, alignment)
}
/// Very important to use this when doing a memcpy!
pub fn stack_size_without_alignment(&self, target_info: TargetInfo) -> u32 {
pub fn stack_size_without_alignment<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
@ -2322,25 +2428,28 @@ impl<'a> Layout<'a> {
let mut sum = 0;
for field_layout in *field_layouts {
sum += field_layout.stack_size(target_info);
sum += field_layout.stack_size(interner, target_info);
}
sum
}
Union(variant) => variant.stack_size_without_alignment(target_info),
Union(variant) => variant.stack_size_without_alignment(interner, target_info),
LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.stack_size_without_alignment(target_info),
.runtime_representation(interner)
.stack_size_without_alignment(interner, target_info),
RecursivePointer => target_info.ptr_width() as u32,
Boxed(_) => target_info.ptr_width() as u32,
}
}
pub fn alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::Struct { field_layouts, .. } => field_layouts
.iter()
.map(|x| x.alignment_bytes(target_info))
.map(|x| x.alignment_bytes(interner, target_info))
.max()
.unwrap_or(0),
@ -2354,7 +2463,7 @@ impl<'a> Layout<'a> {
.flat_map(|layouts| {
layouts
.iter()
.map(|layout| layout.alignment_bytes(target_info))
.map(|layout| layout.alignment_bytes(interner, target_info))
})
.max();
@ -2377,40 +2486,50 @@ impl<'a> Layout<'a> {
}
}
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.alignment_bytes(target_info),
.runtime_representation(interner)
.alignment_bytes(interner, target_info),
Layout::Builtin(builtin) => builtin.alignment_bytes(target_info),
Layout::RecursivePointer => target_info.ptr_width() as u32,
Layout::Boxed(_) => target_info.ptr_width() as u32,
}
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let ptr_width = target_info.ptr_width() as u32;
match self {
Layout::Builtin(builtin) => builtin.allocation_alignment_bytes(target_info),
Layout::Struct { .. } => self.alignment_bytes(target_info).max(ptr_width),
Layout::Union(union_layout) => union_layout.allocation_alignment_bytes(target_info),
Layout::Builtin(builtin) => builtin.allocation_alignment_bytes(interner, target_info),
Layout::Struct { .. } => self.alignment_bytes(interner, target_info).max(ptr_width),
Layout::Union(union_layout) => {
union_layout.allocation_alignment_bytes(interner, target_info)
}
Layout::LambdaSet(lambda_set) => lambda_set
.runtime_representation()
.allocation_alignment_bytes(target_info),
.runtime_representation(interner)
.allocation_alignment_bytes(interner, target_info),
Layout::RecursivePointer => unreachable!("should be looked up to get an actual layout"),
Layout::Boxed(inner) => inner.allocation_alignment_bytes(target_info),
Layout::Boxed(inner) => inner.allocation_alignment_bytes(interner, target_info),
}
}
pub fn stack_size_and_alignment_slices(
pub fn stack_size_and_alignment_slices<I>(
interner: &I,
slices: &[&[Self]],
target_info: TargetInfo,
) -> (u32, u32) {
) -> (u32, u32)
where
I: Interner<'a, Layout<'a>>,
{
let mut data_align = 1;
let mut data_width = 0;
for tag in slices {
let mut total = 0;
for layout in tag.iter() {
let (stack_size, alignment) = layout.stack_size_and_alignment(target_info);
let (stack_size, alignment) =
layout.stack_size_and_alignment(interner, target_info);
total += stack_size;
data_align = data_align.max(alignment);
}
@ -2443,12 +2562,17 @@ impl<'a> Layout<'a> {
/// Even if a value (say, a record) is not itself reference counted,
/// it may contains values/fields that are. Therefore when this record
/// goes out of scope, the refcount on those values/fields must be decremented.
pub fn contains_refcounted(&self) -> bool {
pub fn contains_refcounted<I>(&self, interner: &I) -> bool
where
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.is_refcounted(),
Struct { field_layouts, .. } => field_layouts.iter().any(|f| f.contains_refcounted()),
Struct { field_layouts, .. } => field_layouts
.iter()
.any(|f| f.contains_refcounted(interner)),
Union(variant) => {
use UnionLayout::*;
@ -2456,43 +2580,55 @@ impl<'a> Layout<'a> {
NonRecursive(fields) => fields
.iter()
.flat_map(|ls| ls.iter())
.any(|f| f.contains_refcounted()),
.any(|f| f.contains_refcounted(interner)),
Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => true,
}
}
LambdaSet(lambda_set) => lambda_set.runtime_representation().contains_refcounted(),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.contains_refcounted(interner),
RecursivePointer => true,
Boxed(_) => true,
}
}
pub fn to_doc<D, A>(self, alloc: &'a D, parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use Layout::*;
match self {
Builtin(builtin) => builtin.to_doc(alloc, parens),
Builtin(builtin) => builtin.to_doc(alloc, interner, parens),
Struct { field_layouts, .. } => {
let fields_doc = field_layouts.iter().map(|x| x.to_doc(alloc, parens));
let fields_doc = field_layouts
.iter()
.map(|x| x.to_doc(alloc, interner, parens));
alloc
.text("{")
.append(alloc.intersperse(fields_doc, ", "))
.append(alloc.text("}"))
}
Union(union_layout) => union_layout.to_doc(alloc, parens),
LambdaSet(lambda_set) => lambda_set.runtime_representation().to_doc(alloc, parens),
Union(union_layout) => union_layout.to_doc(alloc, interner, parens),
LambdaSet(lambda_set) => lambda_set
.runtime_representation(interner)
.to_doc(alloc, interner, parens),
RecursivePointer => alloc.text("*self"),
Boxed(inner) => alloc
.text("Boxed(")
.append(inner.to_doc(alloc, parens))
.append(inner.to_doc(alloc, interner, parens))
.append(")"),
}
}
@ -2509,9 +2645,12 @@ impl<'a> Layout<'a> {
}
}
pub fn runtime_representation(&self) -> Self {
pub fn runtime_representation<I>(&self, interner: &I) -> Self
where
I: Interner<'a, Layout<'a>>,
{
match self {
Layout::LambdaSet(lambda_set) => lambda_set.runtime_representation(),
Layout::LambdaSet(lambda_set) => lambda_set.runtime_representation(interner),
other => *other,
}
}
@ -2700,11 +2839,17 @@ impl<'a> Builtin<'a> {
}
}
pub fn to_doc<D, A>(self, alloc: &'a D, _parens: Parens) -> DocBuilder<'a, D, A>
pub fn to_doc<'b, D, A, I>(
self,
alloc: &'b D,
interner: &I,
_parens: Parens,
) -> DocBuilder<'b, D, A>
where
D: DocAllocator<'a, A>,
D: DocAllocator<'b, A>,
D::Doc: Clone,
A: Clone,
I: Interner<'a, Layout<'a>>,
{
use Builtin::*;
@ -2740,18 +2885,23 @@ impl<'a> Builtin<'a> {
Decimal => alloc.text("Decimal"),
Str => alloc.text("Str"),
List(layout) => alloc
.text("List ")
.append(layout.to_doc(alloc, Parens::InTypeParam)),
List(layout) => {
alloc
.text("List ")
.append(layout.to_doc(alloc, interner, Parens::InTypeParam))
}
}
}
pub fn allocation_alignment_bytes(&self, target_info: TargetInfo) -> u32 {
pub fn allocation_alignment_bytes<I>(&self, interner: &I, target_info: TargetInfo) -> u32
where
I: Interner<'a, Layout<'a>>,
{
let ptr_width = target_info.ptr_width() as u32;
let allocation = match self {
Builtin::Str => ptr_width,
Builtin::List(e) => e.alignment_bytes(target_info).max(ptr_width),
Builtin::List(e) => e.alignment_bytes(interner, target_info).max(ptr_width),
// The following are usually not heap-allocated, but they might be when inside a Box.
Builtin::Int(int_width) => int_width.alignment_bytes(target_info).max(ptr_width),
Builtin::Float(float_width) => float_width.alignment_bytes(target_info).max(ptr_width),
@ -2947,7 +3097,14 @@ fn layout_from_flat_type<'a>(
}
sortables.sort_by(|(label1, layout1), (label2, layout2)| {
cmp_fields(label1, layout1, label2, layout2, target_info)
cmp_fields(
&env.cache.interner,
label1,
layout1,
label2,
layout2,
target_info,
)
});
let ordered_field_names =
@ -3044,9 +3201,14 @@ fn sort_record_fields_help<'a>(
sorted_fields.sort_by(
|(label1, _, res_layout1), (label2, _, res_layout2)| match res_layout1 {
Ok(layout1) | Err(layout1) => match res_layout2 {
Ok(layout2) | Err(layout2) => {
cmp_fields(label1, layout1, label2, layout2, target_info)
}
Ok(layout2) | Err(layout2) => cmp_fields(
&env.cache.interner,
label1,
layout1,
label2,
layout2,
target_info,
),
},
},
);
@ -3323,8 +3485,8 @@ where
}
layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3373,8 +3535,8 @@ where
}
arg_layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3477,8 +3639,8 @@ where
}
layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3569,8 +3731,8 @@ where
}
arg_layouts.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(env.target_info);
let size2 = layout2.alignment_bytes(env.target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, env.target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, env.target_info);
size2.cmp(&size1)
});
@ -3786,8 +3948,8 @@ where
}
tag_layout.sort_by(|layout1, layout2| {
let size1 = layout1.alignment_bytes(target_info);
let size2 = layout2.alignment_bytes(target_info);
let size1 = layout1.alignment_bytes(&env.cache.interner, target_info);
let size2 = layout2.alignment_bytes(&env.cache.interner, target_info);
size2.cmp(&size1)
});
@ -4086,15 +4248,19 @@ impl<'a> LayoutIds<'a> {
/// This is called by both code gen and glue, so that
/// their field orderings agree.
#[inline(always)]
pub fn cmp_fields<L: Ord>(
pub fn cmp_fields<'a, L: Ord, I>(
interner: &I,
label1: &L,
layout1: &Layout<'_>,
layout1: &Layout<'a>,
label2: &L,
layout2: &Layout<'_>,
layout2: &Layout<'a>,
target_info: TargetInfo,
) -> Ordering {
let size1 = layout1.alignment_bytes(target_info);
let size2 = layout2.alignment_bytes(target_info);
) -> Ordering
where
I: Interner<'a, Layout<'a>>,
{
let size1 = layout1.alignment_bytes(interner, target_info);
let size2 = layout2.alignment_bytes(interner, target_info);
size2.cmp(&size1).then(label1.cmp(label2))
}
@ -4105,6 +4271,8 @@ mod test {
#[test]
fn width_and_alignment_union_empty_struct() {
let interner = SingleThreadedInterner::with_capacity(4);
let lambda_set = LambdaSet {
set: &[(Symbol::LIST_MAP, &[])],
representation: &Layout::UNIT,
@ -4117,12 +4285,14 @@ mod test {
let layout = Layout::Union(UnionLayout::NonRecursive(&tt));
let target_info = TargetInfo::default_x86_64();
assert_eq!(layout.stack_size(target_info), 1);
assert_eq!(layout.alignment_bytes(target_info), 1);
assert_eq!(layout.stack_size(&interner, target_info), 1);
assert_eq!(layout.alignment_bytes(&interner, target_info), 1);
}
#[test]
fn memcpy_size_result_u32_unit() {
let interner = SingleThreadedInterner::with_capacity(4);
let ok_tag = &[Layout::Builtin(Builtin::Int(IntWidth::U32))];
let err_tag = &[Layout::UNIT];
let tags = [ok_tag as &[_], err_tag as &[_]];
@ -4130,12 +4300,16 @@ mod test {
let layout = Layout::Union(union_layout);
let target_info = TargetInfo::default_x86_64();
assert_eq!(layout.stack_size_without_alignment(target_info), 8);
assert_eq!(
layout.stack_size_without_alignment(&interner, target_info),
8
);
}
#[test]
fn void_stack_size() {
let interner = SingleThreadedInterner::with_capacity(4);
let target_info = TargetInfo::default_x86_64();
assert_eq!(Layout::VOID.stack_size(target_info), 0);
assert_eq!(Layout::VOID.stack_size(&interner, target_info), 0);
}
}