mirror of
https://github.com/roc-lang/roc.git
synced 2025-11-25 21:37:48 +00:00
Merge branch 'main' into list-walk-with-index-until
This commit is contained in:
commit
89febf52bc
24 changed files with 1105 additions and 1029 deletions
|
|
@ -21,16 +21,18 @@ const SEAMLESS_SLICE_BIT: usize =
|
|||
pub const RocList = extern struct {
|
||||
bytes: ?[*]u8,
|
||||
length: usize,
|
||||
// This technically points to directly after the refcount.
|
||||
// This is an optimization that enables use one code path for regular lists and slices for geting the refcount ptr.
|
||||
capacity_or_ref_ptr: usize,
|
||||
// For normal lists, contains the capacity.
|
||||
// For seamless slices contains the pointer to the original allocation.
|
||||
// This pointer is to the first element of the original list.
|
||||
// Note we storing an allocation pointer, the pointer must be right shifted by one.
|
||||
capacity_or_alloc_ptr: usize,
|
||||
|
||||
pub inline fn len(self: RocList) usize {
|
||||
return self.length;
|
||||
}
|
||||
|
||||
pub fn getCapacity(self: RocList) usize {
|
||||
const list_capacity = self.capacity_or_ref_ptr;
|
||||
const list_capacity = self.capacity_or_alloc_ptr;
|
||||
const slice_capacity = self.length;
|
||||
const slice_mask = self.seamlessSliceMask();
|
||||
const capacity = (list_capacity & ~slice_mask) | (slice_capacity & slice_mask);
|
||||
|
|
@ -38,14 +40,14 @@ pub const RocList = extern struct {
|
|||
}
|
||||
|
||||
pub fn isSeamlessSlice(self: RocList) bool {
|
||||
return @as(isize, @bitCast(self.capacity_or_ref_ptr)) < 0;
|
||||
return @as(isize, @bitCast(self.capacity_or_alloc_ptr)) < 0;
|
||||
}
|
||||
|
||||
// This returns all ones if the list is a seamless slice.
|
||||
// Otherwise, it returns all zeros.
|
||||
// This is done without branching for optimization purposes.
|
||||
pub fn seamlessSliceMask(self: RocList) usize {
|
||||
return @as(usize, @bitCast(@as(isize, @bitCast(self.capacity_or_ref_ptr)) >> (@bitSizeOf(isize) - 1)));
|
||||
return @as(usize, @bitCast(@as(isize, @bitCast(self.capacity_or_alloc_ptr)) >> (@bitSizeOf(isize) - 1)));
|
||||
}
|
||||
|
||||
pub fn isEmpty(self: RocList) bool {
|
||||
|
|
@ -53,7 +55,7 @@ pub const RocList = extern struct {
|
|||
}
|
||||
|
||||
pub fn empty() RocList {
|
||||
return RocList{ .bytes = null, .length = 0, .capacity_or_ref_ptr = 0 };
|
||||
return RocList{ .bytes = null, .length = 0, .capacity_or_alloc_ptr = 0 };
|
||||
}
|
||||
|
||||
pub fn eql(self: RocList, other: RocList) bool {
|
||||
|
|
@ -99,21 +101,22 @@ pub const RocList = extern struct {
|
|||
return list;
|
||||
}
|
||||
|
||||
// returns a pointer to just after the refcount.
|
||||
// It is just after the refcount as an optimization for other shared code paths.
|
||||
// For regular list, it just returns their bytes pointer.
|
||||
// For seamless slices, it returns the pointer stored in capacity_or_ref_ptr.
|
||||
pub fn getRefcountPtr(self: RocList) ?[*]u8 {
|
||||
const list_ref_ptr = @intFromPtr(self.bytes);
|
||||
const slice_ref_ptr = self.capacity_or_ref_ptr << 1;
|
||||
// returns a pointer to the original allocation.
|
||||
// This pointer points to the first element of the allocation.
|
||||
// The pointer is to just after the refcount.
|
||||
// For big lists, it just returns their bytes pointer.
|
||||
// For seamless slices, it returns the pointer stored in capacity_or_alloc_ptr.
|
||||
pub fn getAllocationPtr(self: RocList) ?[*]u8 {
|
||||
const list_alloc_ptr = @intFromPtr(self.bytes);
|
||||
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
|
||||
const slice_mask = self.seamlessSliceMask();
|
||||
const ref_ptr = (list_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
|
||||
return @as(?[*]u8, @ptrFromInt(ref_ptr));
|
||||
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
|
||||
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
|
||||
}
|
||||
|
||||
pub fn decref(self: RocList, alignment: u32) void {
|
||||
// We use the raw capacity to ensure we always decrement the refcount of seamless slices.
|
||||
utils.decref(self.getRefcountPtr(), self.capacity_or_ref_ptr, alignment);
|
||||
utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, alignment);
|
||||
}
|
||||
|
||||
pub fn elements(self: RocList, comptime T: type) ?[*]T {
|
||||
|
|
@ -187,7 +190,7 @@ pub const RocList = extern struct {
|
|||
return RocList{
|
||||
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
|
||||
.length = length,
|
||||
.capacity_or_ref_ptr = capacity,
|
||||
.capacity_or_alloc_ptr = capacity,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -204,7 +207,7 @@ pub const RocList = extern struct {
|
|||
return RocList{
|
||||
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
|
||||
.length = length,
|
||||
.capacity_or_ref_ptr = length,
|
||||
.capacity_or_alloc_ptr = length,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -216,13 +219,13 @@ pub const RocList = extern struct {
|
|||
) RocList {
|
||||
if (self.bytes) |source_ptr| {
|
||||
if (self.isUnique() and !self.isSeamlessSlice()) {
|
||||
const capacity = self.capacity_or_ref_ptr;
|
||||
const capacity = self.capacity_or_alloc_ptr;
|
||||
if (capacity >= new_length) {
|
||||
return RocList{ .bytes = self.bytes, .length = new_length, .capacity_or_ref_ptr = capacity };
|
||||
return RocList{ .bytes = self.bytes, .length = new_length, .capacity_or_alloc_ptr = capacity };
|
||||
} else {
|
||||
const new_capacity = utils.calculateCapacity(capacity, new_length, element_width);
|
||||
const new_source = utils.unsafeReallocate(source_ptr, alignment, capacity, new_capacity, element_width);
|
||||
return RocList{ .bytes = new_source, .length = new_length, .capacity_or_ref_ptr = new_capacity };
|
||||
return RocList{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity };
|
||||
}
|
||||
}
|
||||
return self.reallocateFresh(alignment, new_length, element_width);
|
||||
|
|
@ -500,8 +503,8 @@ pub fn listReleaseExcessCapacity(
|
|||
update_mode: UpdateMode,
|
||||
) callconv(.C) RocList {
|
||||
const old_length = list.len();
|
||||
// We use the direct list.capacity_or_ref_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
|
||||
if ((update_mode == .InPlace or list.isUnique()) and list.capacity_or_ref_ptr == old_length) {
|
||||
// We use the direct list.capacity_or_alloc_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
|
||||
if ((update_mode == .InPlace or list.isUnique()) and list.capacity_or_alloc_ptr == old_length) {
|
||||
return list;
|
||||
} else if (old_length == 0) {
|
||||
list.decref(alignment);
|
||||
|
|
@ -649,14 +652,14 @@ pub fn listSublist(
|
|||
output.length = keep_len;
|
||||
return output;
|
||||
} else {
|
||||
const list_ref_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT;
|
||||
const slice_ref_ptr = list.capacity_or_ref_ptr;
|
||||
const list_alloc_ptr = (@intFromPtr(source_ptr) >> 1) | SEAMLESS_SLICE_BIT;
|
||||
const slice_alloc_ptr = list.capacity_or_alloc_ptr;
|
||||
const slice_mask = list.seamlessSliceMask();
|
||||
const ref_ptr = (list_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
|
||||
const alloc_ptr = (list_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
|
||||
return RocList{
|
||||
.bytes = source_ptr + start * element_width,
|
||||
.length = keep_len,
|
||||
.capacity_or_ref_ptr = ref_ptr,
|
||||
.capacity_or_alloc_ptr = alloc_ptr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -965,10 +968,10 @@ pub fn listCapacity(
|
|||
return list.getCapacity();
|
||||
}
|
||||
|
||||
pub fn listRefcountPtr(
|
||||
pub fn listAllocationPtr(
|
||||
list: RocList,
|
||||
) callconv(.C) ?[*]u8 {
|
||||
return list.getRefcountPtr();
|
||||
return list.getAllocationPtr();
|
||||
}
|
||||
|
||||
test "listConcat: non-unique with unique overlapping" {
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ comptime {
|
|||
exportListFn(list.listSwap, "swap");
|
||||
exportListFn(list.listIsUnique, "is_unique");
|
||||
exportListFn(list.listCapacity, "capacity");
|
||||
exportListFn(list.listRefcountPtr, "refcount_ptr");
|
||||
exportListFn(list.listAllocationPtr, "allocation_ptr");
|
||||
exportListFn(list.listReleaseExcessCapacity, "release_excess_capacity");
|
||||
}
|
||||
|
||||
|
|
@ -218,7 +218,7 @@ comptime {
|
|||
exportStrFn(str.strCloneTo, "clone_to");
|
||||
exportStrFn(str.withCapacity, "with_capacity");
|
||||
exportStrFn(str.strGraphemes, "graphemes");
|
||||
exportStrFn(str.strRefcountPtr, "refcount_ptr");
|
||||
exportStrFn(str.strAllocationPtr, "allocation_ptr");
|
||||
exportStrFn(str.strReleaseExcessCapacity, "release_excess_capacity");
|
||||
|
||||
inline for (INTEGERS) |T| {
|
||||
|
|
|
|||
|
|
@ -34,17 +34,21 @@ fn init_blank_small_string(comptime n: usize) [n]u8 {
|
|||
}
|
||||
|
||||
pub const RocStr = extern struct {
|
||||
str_bytes: ?[*]u8,
|
||||
str_len: usize,
|
||||
str_capacity: usize,
|
||||
bytes: ?[*]u8,
|
||||
length: usize,
|
||||
// For big strs, contains the capacity.
|
||||
// For seamless slices contains the pointer to the original allocation.
|
||||
// This pointer is to the first character of the original string.
|
||||
// Note we storing an allocation pointer, the pointer must be right shifted by one.
|
||||
capacity_or_alloc_ptr: usize,
|
||||
|
||||
pub const alignment = @alignOf(usize);
|
||||
|
||||
pub inline fn empty() RocStr {
|
||||
return RocStr{
|
||||
.str_len = 0,
|
||||
.str_bytes = null,
|
||||
.str_capacity = MASK,
|
||||
.length = 0,
|
||||
.bytes = null,
|
||||
.capacity_or_alloc_ptr = MASK,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -63,29 +67,29 @@ pub const RocStr = extern struct {
|
|||
const start_byte = @as([*]u8, @ptrCast(list.bytes)) + start;
|
||||
if (list.isSeamlessSlice()) {
|
||||
return RocStr{
|
||||
.str_bytes = start_byte,
|
||||
.str_len = count | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = list.capacity_or_ref_ptr & (~SEAMLESS_SLICE_BIT),
|
||||
.bytes = start_byte,
|
||||
.length = count | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = list.capacity_or_alloc_ptr & (~SEAMLESS_SLICE_BIT),
|
||||
};
|
||||
} else if (start == 0 and (update_mode == .InPlace or list.isUnique())) {
|
||||
// Rare case, we can take over the original list.
|
||||
return RocStr{
|
||||
.str_bytes = start_byte,
|
||||
.str_len = count,
|
||||
.str_capacity = list.capacity_or_ref_ptr, // This is guaranteed to be a proper capacity.
|
||||
.bytes = start_byte,
|
||||
.length = count,
|
||||
.capacity_or_alloc_ptr = list.capacity_or_alloc_ptr, // This is guaranteed to be a proper capacity.
|
||||
};
|
||||
} else {
|
||||
// Create seamless slice pointing to the list.
|
||||
return RocStr{
|
||||
.str_bytes = start_byte,
|
||||
.str_len = count | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = @intFromPtr(list.bytes) >> 1,
|
||||
.bytes = start_byte,
|
||||
.length = count | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = @intFromPtr(list.bytes) >> 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn isSeamlessSlice(self: RocStr) bool {
|
||||
return !self.isSmallStr() and @as(isize, @bitCast(self.str_len)) < 0;
|
||||
return !self.isSmallStr() and @as(isize, @bitCast(self.length)) < 0;
|
||||
}
|
||||
|
||||
pub fn fromSlice(slice: []const u8) RocStr {
|
||||
|
|
@ -96,9 +100,9 @@ pub const RocStr = extern struct {
|
|||
const first_element = utils.allocateWithRefcount(capacity, @sizeOf(usize));
|
||||
|
||||
return RocStr{
|
||||
.str_bytes = first_element,
|
||||
.str_len = length,
|
||||
.str_capacity = capacity,
|
||||
.bytes = first_element,
|
||||
.length = length,
|
||||
.capacity_or_alloc_ptr = capacity,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -140,27 +144,28 @@ pub const RocStr = extern struct {
|
|||
// Otherwise, it returns all zeros.
|
||||
// This is done without branching for optimization purposes.
|
||||
pub fn seamlessSliceMask(self: RocStr) usize {
|
||||
return @as(usize, @bitCast(@as(isize, @bitCast(self.str_len)) >> (@bitSizeOf(isize) - 1)));
|
||||
return @as(usize, @bitCast(@as(isize, @bitCast(self.length)) >> (@bitSizeOf(isize) - 1)));
|
||||
}
|
||||
|
||||
// returns a pointer to just after the refcount.
|
||||
// It is just after the refcount as an optimization for other shared code paths.
|
||||
// For regular list, it just returns their bytes pointer.
|
||||
// For seamless slices, it returns the pointer stored in capacity_or_ref_ptr.
|
||||
// returns a pointer to the original allocation.
|
||||
// This pointer points to the first element of the allocation.
|
||||
// The pointer is to just after the refcount.
|
||||
// For big strings, it just returns their bytes pointer.
|
||||
// For seamless slices, it returns the pointer stored in capacity_or_alloc_ptr.
|
||||
// This does not return a valid value if the input is a small string.
|
||||
pub fn getRefcountPtr(self: RocStr) ?[*]u8 {
|
||||
const str_ref_ptr = @intFromPtr(self.str_bytes);
|
||||
const slice_ref_ptr = self.str_capacity << 1;
|
||||
pub fn getAllocationPtr(self: RocStr) ?[*]u8 {
|
||||
const str_alloc_ptr = @intFromPtr(self.bytes);
|
||||
const slice_alloc_ptr = self.capacity_or_alloc_ptr << 1;
|
||||
const slice_mask = self.seamlessSliceMask();
|
||||
const ref_ptr = (str_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
|
||||
return @as(?[*]u8, @ptrFromInt(ref_ptr));
|
||||
const alloc_ptr = (str_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
|
||||
return @as(?[*]u8, @ptrFromInt(alloc_ptr));
|
||||
}
|
||||
|
||||
pub fn incref(self: RocStr, n: usize) void {
|
||||
if (!self.isSmallStr()) {
|
||||
const ref_ptr = self.getRefcountPtr();
|
||||
if (ref_ptr != null) {
|
||||
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(ref_ptr)));
|
||||
const alloc_ptr = self.getAllocationPtr();
|
||||
if (alloc_ptr != null) {
|
||||
const isizes: [*]isize = @as([*]isize, @ptrCast(@alignCast(alloc_ptr)));
|
||||
utils.increfRcPtrC(@as(*isize, @ptrCast(isizes - 1)), @as(isize, @intCast(n)));
|
||||
}
|
||||
}
|
||||
|
|
@ -168,13 +173,13 @@ pub const RocStr = extern struct {
|
|||
|
||||
pub fn decref(self: RocStr) void {
|
||||
if (!self.isSmallStr()) {
|
||||
utils.decref(self.getRefcountPtr(), self.str_capacity, RocStr.alignment);
|
||||
utils.decref(self.getAllocationPtr(), self.capacity_or_alloc_ptr, RocStr.alignment);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn eq(self: RocStr, other: RocStr) bool {
|
||||
// If they are byte-for-byte equal, they're definitely equal!
|
||||
if (self.str_bytes == other.str_bytes and self.str_len == other.str_len and self.str_capacity == other.str_capacity) {
|
||||
if (self.bytes == other.bytes and self.length == other.length and self.capacity_or_alloc_ptr == other.capacity_or_alloc_ptr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -208,12 +213,12 @@ pub const RocStr = extern struct {
|
|||
// just return the bytes
|
||||
return str;
|
||||
} else {
|
||||
var new_str = RocStr.allocateBig(str.str_len, str.str_len);
|
||||
var new_str = RocStr.allocateBig(str.length, str.length);
|
||||
|
||||
var old_bytes: [*]u8 = @as([*]u8, @ptrCast(str.str_bytes));
|
||||
var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_str.str_bytes));
|
||||
var old_bytes: [*]u8 = @as([*]u8, @ptrCast(str.bytes));
|
||||
var new_bytes: [*]u8 = @as([*]u8, @ptrCast(new_str.bytes));
|
||||
|
||||
@memcpy(new_bytes[0..str.str_len], old_bytes[0..str.str_len]);
|
||||
@memcpy(new_bytes[0..str.length], old_bytes[0..str.length]);
|
||||
|
||||
return new_str;
|
||||
}
|
||||
|
|
@ -230,7 +235,7 @@ pub const RocStr = extern struct {
|
|||
return self.reallocateFresh(new_length);
|
||||
}
|
||||
|
||||
if (self.str_bytes) |source_ptr| {
|
||||
if (self.bytes) |source_ptr| {
|
||||
if (old_capacity > new_length) {
|
||||
var output = self;
|
||||
output.setLen(new_length);
|
||||
|
|
@ -245,7 +250,7 @@ pub const RocStr = extern struct {
|
|||
element_width,
|
||||
);
|
||||
|
||||
return RocStr{ .str_bytes = new_source, .str_len = new_length, .str_capacity = new_capacity };
|
||||
return RocStr{ .bytes = new_source, .length = new_length, .capacity_or_alloc_ptr = new_capacity };
|
||||
}
|
||||
return self.reallocateFresh(new_length);
|
||||
}
|
||||
|
|
@ -295,7 +300,7 @@ pub const RocStr = extern struct {
|
|||
}
|
||||
|
||||
pub fn isSmallStr(self: RocStr) bool {
|
||||
return @as(isize, @bitCast(self.str_capacity)) < 0;
|
||||
return @as(isize, @bitCast(self.capacity_or_alloc_ptr)) < 0;
|
||||
}
|
||||
|
||||
test "isSmallStr: returns true for empty string" {
|
||||
|
|
@ -313,7 +318,7 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
return self.asArray()[@sizeOf(RocStr) - 1] ^ 0b1000_0000;
|
||||
} else {
|
||||
return self.str_len & (~SEAMLESS_SLICE_BIT);
|
||||
return self.length & (~SEAMLESS_SLICE_BIT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -321,7 +326,7 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
self.asU8ptrMut()[@sizeOf(RocStr) - 1] = @as(u8, @intCast(length)) | 0b1000_0000;
|
||||
} else {
|
||||
self.str_len = length | (SEAMLESS_SLICE_BIT & self.str_len);
|
||||
self.length = length | (SEAMLESS_SLICE_BIT & self.length);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -329,9 +334,9 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
return SMALL_STR_MAX_LENGTH;
|
||||
} else if (self.isSeamlessSlice()) {
|
||||
return self.str_len & (~SEAMLESS_SLICE_BIT);
|
||||
return self.length & (~SEAMLESS_SLICE_BIT);
|
||||
} else {
|
||||
return self.str_capacity;
|
||||
return self.capacity_or_alloc_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -340,7 +345,7 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
return self.asArray()[index];
|
||||
} else {
|
||||
const bytes = self.str_bytes orelse unreachable;
|
||||
const bytes = self.bytes orelse unreachable;
|
||||
|
||||
return bytes[index];
|
||||
}
|
||||
|
|
@ -369,7 +374,7 @@ pub const RocStr = extern struct {
|
|||
return utils.REFCOUNT_ONE;
|
||||
}
|
||||
|
||||
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(self.str_bytes)));
|
||||
const ptr: [*]usize = @as([*]usize, @ptrCast(@alignCast(self.bytes)));
|
||||
return (ptr - 1)[0];
|
||||
}
|
||||
|
||||
|
|
@ -393,7 +398,7 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
return @as([*]const u8, @ptrCast(self));
|
||||
} else {
|
||||
return @as([*]const u8, @ptrCast(self.str_bytes));
|
||||
return @as([*]const u8, @ptrCast(self.bytes));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -401,7 +406,7 @@ pub const RocStr = extern struct {
|
|||
if (self.isSmallStr()) {
|
||||
return @as([*]u8, @ptrCast(self));
|
||||
} else {
|
||||
return @as([*]u8, @ptrCast(self.str_bytes));
|
||||
return @as([*]u8, @ptrCast(self.bytes));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -516,13 +521,13 @@ pub const RocStr = extern struct {
|
|||
const content = "012345678901234567890123456789";
|
||||
const roc_str1 = RocStr.init(content, content.len);
|
||||
const roc_str2 = RocStr.init(content, content.len);
|
||||
try expect(roc_str1.str_bytes != roc_str2.str_bytes);
|
||||
try expect(roc_str1.bytes != roc_str2.bytes);
|
||||
|
||||
// Insert garbage after the end of each string
|
||||
roc_str1.str_bytes.?[30] = '!';
|
||||
roc_str1.str_bytes.?[31] = '!';
|
||||
roc_str2.str_bytes.?[30] = '-';
|
||||
roc_str2.str_bytes.?[31] = '-';
|
||||
roc_str1.bytes.?[30] = '!';
|
||||
roc_str1.bytes.?[31] = '!';
|
||||
roc_str2.bytes.?[30] = '-';
|
||||
roc_str2.bytes.?[31] = '-';
|
||||
|
||||
defer {
|
||||
roc_str1.decref();
|
||||
|
|
@ -553,13 +558,13 @@ pub fn strToScalarsC(str: RocStr) callconv(.C) RocList {
|
|||
}
|
||||
|
||||
fn strToScalars(string: RocStr) callconv(.C) RocList {
|
||||
const str_len = string.len();
|
||||
const len = string.len();
|
||||
|
||||
if (str_len == 0) {
|
||||
if (len == 0) {
|
||||
return RocList.empty();
|
||||
}
|
||||
|
||||
var capacity = str_len;
|
||||
var capacity = len;
|
||||
|
||||
if (!string.isSmallStr()) {
|
||||
capacity = string.getCapacity();
|
||||
|
|
@ -576,7 +581,7 @@ fn strToScalars(string: RocStr) callconv(.C) RocList {
|
|||
var src_index: usize = 0;
|
||||
var answer_index: usize = 0;
|
||||
|
||||
while (src_index < str_len) {
|
||||
while (src_index < len) {
|
||||
src_index += writeNextScalar(string, src_index, answer_elems, answer_index);
|
||||
answer_index += 1;
|
||||
}
|
||||
|
|
@ -846,13 +851,13 @@ fn initFromSmallStr(slice_bytes: [*]u8, len: usize, _: usize) RocStr {
|
|||
return RocStr.init(slice_bytes, len);
|
||||
}
|
||||
|
||||
// The ref_ptr must already be shifted to be ready for storing in a seamless slice.
|
||||
fn initFromBigStr(slice_bytes: [*]u8, len: usize, ref_ptr: usize) RocStr {
|
||||
// The alloc_ptr must already be shifted to be ready for storing in a seamless slice.
|
||||
fn initFromBigStr(slice_bytes: [*]u8, len: usize, alloc_ptr: usize) RocStr {
|
||||
// Here we can make seamless slices instead of copying to a new small str.
|
||||
return RocStr{
|
||||
.str_bytes = slice_bytes,
|
||||
.str_len = len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = ref_ptr,
|
||||
.bytes = slice_bytes,
|
||||
.length = len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = alloc_ptr,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -861,9 +866,9 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
|
|||
var slice_start_index: usize = 0;
|
||||
var str_index: usize = 0;
|
||||
|
||||
const str_bytes = string.asU8ptr();
|
||||
const str_len = string.len();
|
||||
const ref_ptr = @intFromPtr(string.getRefcountPtr()) >> 1;
|
||||
const bytes = string.asU8ptr();
|
||||
const len = string.len();
|
||||
const alloc_ptr = @intFromPtr(string.getAllocationPtr()) >> 1;
|
||||
const init_fn = if (string.isSmallStr())
|
||||
&initFromSmallStr
|
||||
else
|
||||
|
|
@ -872,8 +877,8 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
|
|||
const delimiter_bytes_ptrs = delimiter.asU8ptr();
|
||||
const delimiter_len = delimiter.len();
|
||||
|
||||
if (str_len >= delimiter_len and delimiter_len > 0) {
|
||||
const end_index: usize = str_len - delimiter_len + 1;
|
||||
if (len >= delimiter_len and delimiter_len > 0) {
|
||||
const end_index: usize = len - delimiter_len + 1;
|
||||
while (str_index <= end_index) {
|
||||
var delimiter_index: usize = 0;
|
||||
var matches_delimiter = true;
|
||||
|
|
@ -881,12 +886,12 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
|
|||
while (delimiter_index < delimiter_len) {
|
||||
var delimiterChar = delimiter_bytes_ptrs[delimiter_index];
|
||||
|
||||
if (str_index + delimiter_index >= str_len) {
|
||||
if (str_index + delimiter_index >= len) {
|
||||
matches_delimiter = false;
|
||||
break;
|
||||
}
|
||||
|
||||
var strChar = str_bytes[str_index + delimiter_index];
|
||||
var strChar = bytes[str_index + delimiter_index];
|
||||
|
||||
if (delimiterChar != strChar) {
|
||||
matches_delimiter = false;
|
||||
|
|
@ -899,7 +904,7 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
|
|||
if (matches_delimiter) {
|
||||
const segment_len: usize = str_index - slice_start_index;
|
||||
|
||||
array[ret_array_index] = init_fn(@constCast(str_bytes) + slice_start_index, segment_len, ref_ptr);
|
||||
array[ret_array_index] = init_fn(@constCast(bytes) + slice_start_index, segment_len, alloc_ptr);
|
||||
slice_start_index = str_index + delimiter_len;
|
||||
ret_array_index += 1;
|
||||
str_index += delimiter_len;
|
||||
|
|
@ -909,7 +914,7 @@ fn strSplitHelp(array: [*]RocStr, string: RocStr, delimiter: RocStr) void {
|
|||
}
|
||||
}
|
||||
|
||||
array[ret_array_index] = init_fn(@constCast(str_bytes) + slice_start_index, str_len - slice_start_index, ref_ptr);
|
||||
array[ret_array_index] = init_fn(@constCast(bytes) + slice_start_index, len - slice_start_index, alloc_ptr);
|
||||
|
||||
if (!string.isSmallStr()) {
|
||||
// Correct refcount for all of the splits made.
|
||||
|
|
@ -1240,17 +1245,17 @@ test "strSplitHelp: overlapping delimiter 2" {
|
|||
// needs to be broken into, so that we can allocate a array
|
||||
// of that size. It always returns at least 1.
|
||||
pub fn countSegments(string: RocStr, delimiter: RocStr) callconv(.C) usize {
|
||||
const str_bytes = string.asU8ptr();
|
||||
const str_len = string.len();
|
||||
const bytes = string.asU8ptr();
|
||||
const len = string.len();
|
||||
|
||||
const delimiter_bytes_ptrs = delimiter.asU8ptr();
|
||||
const delimiter_len = delimiter.len();
|
||||
|
||||
var count: usize = 1;
|
||||
|
||||
if (str_len >= delimiter_len and delimiter_len > 0) {
|
||||
if (len >= delimiter_len and delimiter_len > 0) {
|
||||
var str_index: usize = 0;
|
||||
const end_cond: usize = str_len - delimiter_len + 1;
|
||||
const end_cond: usize = len - delimiter_len + 1;
|
||||
|
||||
while (str_index < end_cond) {
|
||||
var delimiter_index: usize = 0;
|
||||
|
|
@ -1259,7 +1264,7 @@ pub fn countSegments(string: RocStr, delimiter: RocStr) callconv(.C) usize {
|
|||
|
||||
while (delimiter_index < delimiter_len) {
|
||||
const delimiterChar = delimiter_bytes_ptrs[delimiter_index];
|
||||
const strChar = str_bytes[str_index + delimiter_index];
|
||||
const strChar = bytes[str_index + delimiter_index];
|
||||
|
||||
if (delimiterChar != strChar) {
|
||||
matches_delimiter = false;
|
||||
|
|
@ -1409,7 +1414,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
|
|||
var index: usize = 0;
|
||||
var last_codepoint_len: u8 = 0;
|
||||
|
||||
const ref_ptr = @intFromPtr(roc_str.getRefcountPtr()) >> 1;
|
||||
const alloc_ptr = @intFromPtr(roc_str.getAllocationPtr()) >> 1;
|
||||
const init_fn = if (roc_str.isSmallStr())
|
||||
&initFromSmallStr
|
||||
else
|
||||
|
|
@ -1425,7 +1430,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
|
|||
if (opt_last_codepoint) |last_codepoint| {
|
||||
var did_break = grapheme.isGraphemeBreak(last_codepoint, cur_codepoint, &break_state);
|
||||
if (did_break) {
|
||||
graphemes[index] = init_fn(@constCast(slice.ptr), last_codepoint_len, ref_ptr);
|
||||
graphemes[index] = init_fn(@constCast(slice.ptr), last_codepoint_len, alloc_ptr);
|
||||
slice = slice[last_codepoint_len..];
|
||||
index += 1;
|
||||
break_state = null;
|
||||
|
|
@ -1436,7 +1441,7 @@ pub fn strGraphemes(roc_str: RocStr) callconv(.C) RocList {
|
|||
opt_last_codepoint = cur_codepoint;
|
||||
}
|
||||
// Append last grapheme
|
||||
graphemes[index] = init_fn(@constCast(slice.ptr), slice.len, ref_ptr);
|
||||
graphemes[index] = init_fn(@constCast(slice.ptr), slice.len, alloc_ptr);
|
||||
|
||||
if (!roc_str.isSmallStr()) {
|
||||
// Correct refcount for all of the splits made.
|
||||
|
|
@ -1507,7 +1512,7 @@ pub fn substringUnsafe(string: RocStr, start: usize, length: usize) callconv(.C)
|
|||
const slice = string.asSlice()[start .. start + length];
|
||||
return RocStr.fromSlice(slice);
|
||||
}
|
||||
if (string.str_bytes) |source_ptr| {
|
||||
if (string.bytes) |source_ptr| {
|
||||
if (start == 0 and string.isUnique()) {
|
||||
var output = string;
|
||||
output.setLen(length);
|
||||
|
|
@ -1515,14 +1520,14 @@ pub fn substringUnsafe(string: RocStr, start: usize, length: usize) callconv(.C)
|
|||
} else {
|
||||
// Shifting right by 1 is required to avoid the highest bit of capacity being set.
|
||||
// If it was set, the slice would get interpreted as a small string.
|
||||
const str_ref_ptr = (@intFromPtr(source_ptr) >> 1);
|
||||
const slice_ref_ptr = string.str_capacity;
|
||||
const str_alloc_ptr = (@intFromPtr(source_ptr) >> 1);
|
||||
const slice_alloc_ptr = string.capacity_or_alloc_ptr;
|
||||
const slice_mask = string.seamlessSliceMask();
|
||||
const ref_ptr = (str_ref_ptr & ~slice_mask) | (slice_ref_ptr & slice_mask);
|
||||
const alloc_ptr = (str_alloc_ptr & ~slice_mask) | (slice_alloc_ptr & slice_mask);
|
||||
return RocStr{
|
||||
.str_bytes = source_ptr + start,
|
||||
.str_len = length | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = ref_ptr,
|
||||
.bytes = source_ptr + start,
|
||||
.length = length | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = alloc_ptr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -1611,9 +1616,9 @@ pub fn repeat(string: RocStr, count: usize) callconv(.C) RocStr {
|
|||
|
||||
// Str.startsWithScalar
|
||||
pub fn startsWithScalar(string: RocStr, prefix: u32) callconv(.C) bool {
|
||||
const str_len = string.len();
|
||||
const len = string.len();
|
||||
|
||||
if (str_len == 0) {
|
||||
if (len == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -1777,7 +1782,7 @@ test "RocStr.concat: small concat small" {
|
|||
pub const RocListStr = extern struct {
|
||||
list_elements: ?[*]RocStr,
|
||||
list_length: usize,
|
||||
list_capacity_or_ref_ptr: usize,
|
||||
list_capacity_or_alloc_ptr: usize,
|
||||
};
|
||||
|
||||
// Str.joinWith
|
||||
|
|
@ -1785,7 +1790,7 @@ pub fn strJoinWithC(list: RocList, separator: RocStr) callconv(.C) RocStr {
|
|||
const roc_list_str = RocListStr{
|
||||
.list_elements = @as(?[*]RocStr, @ptrCast(@alignCast(list.bytes))),
|
||||
.list_length = list.length,
|
||||
.list_capacity_or_ref_ptr = list.capacity_or_ref_ptr,
|
||||
.list_capacity_or_alloc_ptr = list.capacity_or_alloc_ptr,
|
||||
};
|
||||
|
||||
return @call(.always_inline, strJoinWith, .{ roc_list_str, separator });
|
||||
|
|
@ -1847,7 +1852,7 @@ test "RocStr.joinWith: result is big" {
|
|||
var elements: [3]RocStr = .{ roc_elem, roc_elem, roc_elem };
|
||||
const list = RocListStr{
|
||||
.list_length = 3,
|
||||
.list_capacity_or_ref_ptr = 3,
|
||||
.list_capacity_or_alloc_ptr = 3,
|
||||
.list_elements = @as([*]RocStr, @ptrCast(&elements)),
|
||||
};
|
||||
|
||||
|
|
@ -1878,10 +1883,10 @@ inline fn strToBytes(arg: RocStr) RocList {
|
|||
|
||||
@memcpy(ptr[0..length], arg.asU8ptr()[0..length]);
|
||||
|
||||
return RocList{ .length = length, .bytes = ptr, .capacity_or_ref_ptr = length };
|
||||
return RocList{ .length = length, .bytes = ptr, .capacity_or_alloc_ptr = length };
|
||||
} else {
|
||||
const is_seamless_slice = arg.str_len & SEAMLESS_SLICE_BIT;
|
||||
return RocList{ .length = length, .bytes = arg.str_bytes, .capacity_or_ref_ptr = arg.str_capacity | is_seamless_slice };
|
||||
const is_seamless_slice = arg.length & SEAMLESS_SLICE_BIT;
|
||||
return RocList{ .length = length, .bytes = arg.bytes, .capacity_or_alloc_ptr = arg.capacity_or_alloc_ptr | is_seamless_slice };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2042,7 +2047,7 @@ pub const Utf8ByteProblem = enum(u8) {
|
|||
};
|
||||
|
||||
fn validateUtf8Bytes(bytes: [*]u8, length: usize) FromUtf8Result {
|
||||
return fromUtf8Range(RocList{ .bytes = bytes, .length = length, .capacity_or_ref_ptr = length }, 0, length, .Immutable);
|
||||
return fromUtf8Range(RocList{ .bytes = bytes, .length = length, .capacity_or_alloc_ptr = length }, 0, length, .Immutable);
|
||||
}
|
||||
|
||||
fn validateUtf8BytesX(str: RocList) FromUtf8Result {
|
||||
|
|
@ -2123,10 +2128,10 @@ test "validateUtf8Bytes: unicode ∆ in middle of array" {
|
|||
|
||||
fn expectErr(list: RocList, index: usize, err: Utf8DecodeError, problem: Utf8ByteProblem) !void {
|
||||
const str_ptr = @as([*]u8, @ptrCast(list.bytes));
|
||||
const str_len = list.length;
|
||||
const len = list.length;
|
||||
|
||||
try expectError(err, numberOfNextCodepointBytes(str_ptr, str_len, index));
|
||||
try expectEqual(toErrUtf8ByteResponse(index, problem), validateUtf8Bytes(str_ptr, str_len));
|
||||
try expectError(err, numberOfNextCodepointBytes(str_ptr, len, index));
|
||||
try expectEqual(toErrUtf8ByteResponse(index, problem), validateUtf8Bytes(str_ptr, len));
|
||||
}
|
||||
|
||||
test "validateUtf8Bytes: invalid start byte" {
|
||||
|
|
@ -2274,22 +2279,22 @@ pub fn strTrim(input_string: RocStr) callconv(.C) RocStr {
|
|||
// Big and unique with no leading bytes to remove.
|
||||
// Just take ownership and shrink the length.
|
||||
var new_string = string;
|
||||
new_string.str_len = new_len;
|
||||
new_string.length = new_len;
|
||||
|
||||
return new_string;
|
||||
} else if (string.isSeamlessSlice()) {
|
||||
// Already a seamless slice, just update the range.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr + leading_bytes,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = string.str_capacity,
|
||||
.bytes = bytes_ptr + leading_bytes,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
|
||||
};
|
||||
} else {
|
||||
// Not unique or removing leading bytes, just make a slice.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr + leading_bytes,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
|
||||
.bytes = bytes_ptr + leading_bytes,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -2322,22 +2327,22 @@ pub fn strTrimStart(input_string: RocStr) callconv(.C) RocStr {
|
|||
// Big and unique with no leading bytes to remove.
|
||||
// Just take ownership and shrink the length.
|
||||
var new_string = string;
|
||||
new_string.str_len = new_len;
|
||||
new_string.length = new_len;
|
||||
|
||||
return new_string;
|
||||
} else if (string.isSeamlessSlice()) {
|
||||
// Already a seamless slice, just update the range.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr + leading_bytes,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = string.str_capacity,
|
||||
.bytes = bytes_ptr + leading_bytes,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
|
||||
};
|
||||
} else {
|
||||
// Not unique or removing leading bytes, just make a slice.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr + leading_bytes,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
|
||||
.bytes = bytes_ptr + leading_bytes,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -2370,22 +2375,22 @@ pub fn strTrimEnd(input_string: RocStr) callconv(.C) RocStr {
|
|||
// Big and unique with no leading bytes to remove.
|
||||
// Just take ownership and shrink the length.
|
||||
var new_string = string;
|
||||
new_string.str_len = new_len;
|
||||
new_string.length = new_len;
|
||||
|
||||
return new_string;
|
||||
} else if (string.isSeamlessSlice()) {
|
||||
// Already a seamless slice, just update the range.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = string.str_capacity,
|
||||
.bytes = bytes_ptr,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = string.capacity_or_alloc_ptr,
|
||||
};
|
||||
} else {
|
||||
// Not unique, just make a slice.
|
||||
return RocStr{
|
||||
.str_bytes = bytes_ptr,
|
||||
.str_len = new_len | SEAMLESS_SLICE_BIT,
|
||||
.str_capacity = @intFromPtr(bytes_ptr) >> 1,
|
||||
.bytes = bytes_ptr,
|
||||
.length = new_len | SEAMLESS_SLICE_BIT,
|
||||
.capacity_or_alloc_ptr = @intFromPtr(bytes_ptr) >> 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -2885,7 +2890,7 @@ pub fn strCloneTo(
|
|||
const slice = string.asSlice();
|
||||
|
||||
var relative = string;
|
||||
relative.str_bytes = @as(?[*]u8, @ptrFromInt(extra_offset)); // i.e. just after the string struct
|
||||
relative.bytes = @as(?[*]u8, @ptrFromInt(extra_offset)); // i.e. just after the string struct
|
||||
|
||||
// write the string struct
|
||||
const array = relative.asArray();
|
||||
|
|
@ -2898,17 +2903,17 @@ pub fn strCloneTo(
|
|||
}
|
||||
}
|
||||
|
||||
pub fn strRefcountPtr(
|
||||
pub fn strAllocationPtr(
|
||||
string: RocStr,
|
||||
) callconv(.C) ?[*]u8 {
|
||||
return string.getRefcountPtr();
|
||||
return string.getAllocationPtr();
|
||||
}
|
||||
|
||||
pub fn strReleaseExcessCapacity(
|
||||
string: RocStr,
|
||||
) callconv(.C) RocStr {
|
||||
const old_length = string.len();
|
||||
// We use the direct list.capacity_or_ref_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
|
||||
// We use the direct list.capacity_or_alloc_ptr to make sure both that there is no extra capacity and that it isn't a seamless slice.
|
||||
if (string.isSmallStr()) {
|
||||
// SmallStr has no excess capacity.
|
||||
return string;
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ interface Dict
|
|||
clear,
|
||||
capacity,
|
||||
reserve,
|
||||
releaseExcessCapacity,
|
||||
len,
|
||||
isEmpty,
|
||||
get,
|
||||
|
|
@ -155,7 +156,7 @@ withCapacity = \requested ->
|
|||
empty {}
|
||||
|> reserve requested
|
||||
|
||||
# Enlarge the dictionary for at least capacity additional elements
|
||||
## Enlarge the dictionary for at least capacity additional elements
|
||||
reserve : Dict k v, Nat -> Dict k v
|
||||
reserve = \@Dict { buckets, data, maxBucketCapacity: originalMaxBucketCapacity, maxLoadFactor, shifts }, requested ->
|
||||
currentSize = List.len data
|
||||
|
|
@ -177,6 +178,28 @@ reserve = \@Dict { buckets, data, maxBucketCapacity: originalMaxBucketCapacity,
|
|||
else
|
||||
@Dict { buckets, data, maxBucketCapacity: originalMaxBucketCapacity, maxLoadFactor, shifts }
|
||||
|
||||
## Shrink the memory footprint of a dictionary such that capacity is as small as possible.
|
||||
## This function will require regenerating the metadata if the size changes.
|
||||
## There will still be some overhead due to dictionary metadata always being a power of 2.
|
||||
releaseExcessCapacity : Dict k v -> Dict k v
|
||||
releaseExcessCapacity = \@Dict { buckets, data, maxBucketCapacity: originalMaxBucketCapacity, maxLoadFactor, shifts } ->
|
||||
size = List.len data
|
||||
|
||||
# NOTE: If we want, we technically could increase the load factor here to potentially minimize size more.
|
||||
minShifts = calcShiftsForSize (Num.toU64 size) maxLoadFactor
|
||||
if minShifts < shifts then
|
||||
(buckets0, maxBucketCapacity) = allocBucketsFromShift minShifts maxLoadFactor
|
||||
buckets1 = fillBucketsFromData buckets0 data minShifts
|
||||
@Dict {
|
||||
buckets: buckets1,
|
||||
data: List.releaseExcessCapacity data,
|
||||
maxBucketCapacity,
|
||||
maxLoadFactor,
|
||||
shifts: minShifts,
|
||||
}
|
||||
else
|
||||
@Dict { buckets, data, maxBucketCapacity: originalMaxBucketCapacity, maxLoadFactor, shifts }
|
||||
|
||||
## Returns the max number of elements the dictionary can hold before requiring a rehash.
|
||||
## ```
|
||||
## foodDict =
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ interface Set
|
|||
empty,
|
||||
withCapacity,
|
||||
reserve,
|
||||
releaseExcessCapacity,
|
||||
single,
|
||||
walk,
|
||||
walkUntil,
|
||||
|
|
@ -83,11 +84,18 @@ withCapacity : Nat -> Set *
|
|||
withCapacity = \cap ->
|
||||
@Set (Dict.withCapacity cap)
|
||||
|
||||
# Enlarge the set for at least capacity additional elements
|
||||
## Enlarge the set for at least capacity additional elements
|
||||
reserve : Set k, Nat -> Set k
|
||||
reserve = \@Set dict, requested ->
|
||||
@Set (Dict.reserve dict requested)
|
||||
|
||||
## Shrink the memory footprint of a set such that capacity is as small as possible.
|
||||
## This function will require regenerating the metadata if the size changes.
|
||||
## There will still be some overhead due to dictionary metadata always being a power of 2.
|
||||
releaseExcessCapacity : Set k -> Set k
|
||||
releaseExcessCapacity = \@Set dict ->
|
||||
@Set (Dict.releaseExcessCapacity dict)
|
||||
|
||||
## Creates a new `Set` with a single value.
|
||||
## ```
|
||||
## singleItemSet = Set.single "Apple"
|
||||
|
|
|
|||
|
|
@ -370,7 +370,7 @@ pub const STR_GET_SCALAR_UNSAFE: &str = "roc_builtins.str.get_scalar_unsafe";
|
|||
pub const STR_CLONE_TO: &str = "roc_builtins.str.clone_to";
|
||||
pub const STR_WITH_CAPACITY: &str = "roc_builtins.str.with_capacity";
|
||||
pub const STR_GRAPHEMES: &str = "roc_builtins.str.graphemes";
|
||||
pub const STR_REFCOUNT_PTR: &str = "roc_builtins.str.refcount_ptr";
|
||||
pub const STR_ALLOCATION_PTR: &str = "roc_builtins.str.allocation_ptr";
|
||||
pub const STR_RELEASE_EXCESS_CAPACITY: &str = "roc_builtins.str.release_excess_capacity";
|
||||
|
||||
pub const LIST_MAP: &str = "roc_builtins.list.map";
|
||||
|
|
@ -390,7 +390,7 @@ pub const LIST_PREPEND: &str = "roc_builtins.list.prepend";
|
|||
pub const LIST_APPEND_UNSAFE: &str = "roc_builtins.list.append_unsafe";
|
||||
pub const LIST_RESERVE: &str = "roc_builtins.list.reserve";
|
||||
pub const LIST_CAPACITY: &str = "roc_builtins.list.capacity";
|
||||
pub const LIST_REFCOUNT_PTR: &str = "roc_builtins.list.refcount_ptr";
|
||||
pub const LIST_ALLOCATION_PTR: &str = "roc_builtins.list.allocation_ptr";
|
||||
pub const LIST_RELEASE_EXCESS_CAPACITY: &str = "roc_builtins.list.release_excess_capacity";
|
||||
|
||||
pub const DEC_ABS: &str = "roc_builtins.dec.abs";
|
||||
|
|
|
|||
|
|
@ -302,25 +302,14 @@ fn should_outdent(mut rhs: &TypeAnnotation) -> bool {
|
|||
}
|
||||
}
|
||||
|
||||
fn fmt_dbg_in_def<'a>(
|
||||
buf: &mut Buf,
|
||||
condition: &'a Loc<Expr<'a>>,
|
||||
is_multiline: bool,
|
||||
indent: u16,
|
||||
) {
|
||||
fn fmt_dbg_in_def<'a>(buf: &mut Buf, condition: &'a Loc<Expr<'a>>, _: bool, indent: u16) {
|
||||
buf.ensure_ends_with_newline();
|
||||
buf.indent(indent);
|
||||
buf.push_str("dbg");
|
||||
|
||||
let return_indent = if is_multiline {
|
||||
buf.newline();
|
||||
indent + INDENT
|
||||
} else {
|
||||
buf.spaces(1);
|
||||
indent
|
||||
};
|
||||
buf.spaces(1);
|
||||
|
||||
condition.format(buf, return_indent);
|
||||
condition.format(buf, indent);
|
||||
}
|
||||
|
||||
fn fmt_expect<'a>(buf: &mut Buf, condition: &'a Loc<Expr<'a>>, is_multiline: bool, indent: u16) {
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ impl<'a> Formattable for Expr<'a> {
|
|||
Expect(condition, continuation) => {
|
||||
condition.is_multiline() || continuation.is_multiline()
|
||||
}
|
||||
Dbg(condition, continuation) => condition.is_multiline() || continuation.is_multiline(),
|
||||
Dbg(condition, _) => condition.is_multiline(),
|
||||
LowLevelDbg(_, _, _) => unreachable!(
|
||||
"LowLevelDbg should only exist after desugaring, not during formatting"
|
||||
),
|
||||
|
|
@ -956,22 +956,16 @@ fn fmt_dbg<'a>(
|
|||
buf: &mut Buf,
|
||||
condition: &'a Loc<Expr<'a>>,
|
||||
continuation: &'a Loc<Expr<'a>>,
|
||||
is_multiline: bool,
|
||||
_: bool,
|
||||
indent: u16,
|
||||
) {
|
||||
buf.ensure_ends_with_newline();
|
||||
buf.indent(indent);
|
||||
buf.push_str("dbg");
|
||||
|
||||
let return_indent = if is_multiline {
|
||||
buf.newline();
|
||||
indent + INDENT
|
||||
} else {
|
||||
buf.spaces(1);
|
||||
indent
|
||||
};
|
||||
buf.spaces(1);
|
||||
|
||||
condition.format(buf, return_indent);
|
||||
condition.format(buf, indent);
|
||||
|
||||
// Always put a blank line after the `dbg` line(s)
|
||||
buf.ensure_ends_with_blank_line();
|
||||
|
|
|
|||
|
|
@ -710,7 +710,7 @@ impl LlvmBackendMode {
|
|||
match self {
|
||||
LlvmBackendMode::Binary => false,
|
||||
LlvmBackendMode::BinaryDev => false,
|
||||
LlvmBackendMode::BinaryGlue => false,
|
||||
LlvmBackendMode::BinaryGlue => true,
|
||||
LlvmBackendMode::GenTest => true,
|
||||
LlvmBackendMode::WasmGenTest => true,
|
||||
LlvmBackendMode::CliTest => true,
|
||||
|
|
@ -1058,46 +1058,20 @@ pub fn module_from_builtins<'ctx>(
|
|||
let module = Module::parse_bitcode_from_buffer(&memory_buffer, ctx)
|
||||
.unwrap_or_else(|err| panic!("Unable to import builtins bitcode. LLVM error: {err:?}"));
|
||||
|
||||
// In my testing, this adds about 20ms extra to compilation.
|
||||
// In testing, this adds about 20ms extra to compilation.
|
||||
// Long term it would be best if we could do this on the zig side.
|
||||
// This change enables us to dce all the parts of compiler-rt we don't use.
|
||||
// That said, it would be better to dce them before roc app compiltation time.
|
||||
// Anything not depended on by a `roc_builtin.` function could alread by DCE'd theoretically.
|
||||
// The core issue is that we have to properly labael certain functions as private and DCE them.
|
||||
// Otherwise, now that zig bundles all of compiler-rt, we would optimize and compile the entire library.
|
||||
// Anything not depended on by a `roc_builtin.` function could already by DCE'd theoretically.
|
||||
// That said, this workaround is good enough and fixes compilations times.
|
||||
|
||||
// Also, must_keep is the functions we depend on that would normally be provide by libc.
|
||||
// They are magically linked to by llvm builtins, so we must specify that they can't be DCE'd.
|
||||
let must_keep = [
|
||||
"_fltused",
|
||||
"floorf",
|
||||
"memcpy",
|
||||
"memset",
|
||||
// Roc special functions
|
||||
"__roc_force_longjmp",
|
||||
"__roc_force_setjmp",
|
||||
"set_shared_buffer",
|
||||
];
|
||||
let must_keep = ["_fltused", "floorf", "memcpy", "memset"];
|
||||
for func in module.get_functions() {
|
||||
let has_definition = func.count_basic_blocks() > 0;
|
||||
let name = func.get_name().to_string_lossy();
|
||||
if has_definition
|
||||
&& !name.starts_with("roc_builtins.")
|
||||
&& !must_keep.contains(&name.as_ref())
|
||||
{
|
||||
func.set_linkage(Linkage::Private);
|
||||
}
|
||||
}
|
||||
|
||||
// Note, running DCE here is faster then waiting until full app DCE.
|
||||
let mpm = PassManager::create(());
|
||||
mpm.add_global_dce_pass();
|
||||
mpm.run_on(&module);
|
||||
|
||||
// Now that the unused compiler-rt functions have been removed,
|
||||
// mark that the builtin functions are allowed to be DCE'd if they aren't used.
|
||||
for func in module.get_functions() {
|
||||
let name = func.get_name().to_string_lossy();
|
||||
if name.starts_with("roc_builtins.") {
|
||||
if has_definition && !must_keep.contains(&name.as_ref()) {
|
||||
func.set_linkage(Linkage::Private);
|
||||
}
|
||||
}
|
||||
|
|
@ -4463,31 +4437,68 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
|
|||
}
|
||||
}
|
||||
|
||||
let arguments_for_call = &arguments_for_call.into_bump_slice();
|
||||
|
||||
let call_result = if env.mode.returns_roc_result() {
|
||||
debug_assert_eq!(args.len(), roc_function.get_params().len());
|
||||
if args.len() == roc_function.get_params().len() {
|
||||
let arguments_for_call = &arguments_for_call.into_bump_slice();
|
||||
|
||||
let dbg_loc = builder.get_current_debug_location().unwrap();
|
||||
let roc_wrapper_function =
|
||||
make_exception_catcher(env, layout_interner, roc_function, return_layout);
|
||||
debug_assert_eq!(
|
||||
arguments_for_call.len(),
|
||||
roc_wrapper_function.get_params().len()
|
||||
);
|
||||
let dbg_loc = builder.get_current_debug_location().unwrap();
|
||||
let roc_wrapper_function =
|
||||
make_exception_catcher(env, layout_interner, roc_function, return_layout);
|
||||
debug_assert_eq!(
|
||||
arguments_for_call.len(),
|
||||
roc_wrapper_function.get_params().len()
|
||||
);
|
||||
|
||||
builder.position_at_end(entry);
|
||||
builder.set_current_debug_location(dbg_loc);
|
||||
builder.position_at_end(entry);
|
||||
builder.set_current_debug_location(dbg_loc);
|
||||
|
||||
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
|
||||
call_direct_roc_function(
|
||||
env,
|
||||
layout_interner,
|
||||
roc_function,
|
||||
wrapped_layout,
|
||||
arguments_for_call,
|
||||
)
|
||||
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
|
||||
call_direct_roc_function(
|
||||
env,
|
||||
layout_interner,
|
||||
roc_function,
|
||||
wrapped_layout,
|
||||
arguments_for_call,
|
||||
)
|
||||
} else {
|
||||
debug_assert_eq!(args.len() + 1, roc_function.get_params().len());
|
||||
|
||||
arguments_for_call.push(args[0]);
|
||||
|
||||
let arguments_for_call = &arguments_for_call.into_bump_slice();
|
||||
|
||||
let dbg_loc = builder.get_current_debug_location().unwrap();
|
||||
let roc_wrapper_function =
|
||||
make_exception_catcher(env, layout_interner, roc_function, return_layout);
|
||||
|
||||
builder.position_at_end(entry);
|
||||
builder.set_current_debug_location(dbg_loc);
|
||||
|
||||
let wrapped_layout = roc_call_result_layout(env.arena, return_layout);
|
||||
let call_result = call_direct_roc_function(
|
||||
env,
|
||||
layout_interner,
|
||||
roc_wrapper_function,
|
||||
wrapped_layout,
|
||||
arguments_for_call,
|
||||
);
|
||||
|
||||
let output_arg_index = 0;
|
||||
|
||||
let output_arg = c_function
|
||||
.get_nth_param(output_arg_index as u32)
|
||||
.unwrap()
|
||||
.into_pointer_value();
|
||||
|
||||
env.builder.new_build_store(output_arg, call_result);
|
||||
|
||||
builder.new_build_return(None);
|
||||
|
||||
return c_function;
|
||||
}
|
||||
} else {
|
||||
let arguments_for_call = &arguments_for_call.into_bump_slice();
|
||||
|
||||
call_direct_roc_function(
|
||||
env,
|
||||
layout_interner,
|
||||
|
|
@ -4511,6 +4522,7 @@ fn expose_function_to_host_help_c_abi_generic<'a, 'ctx>(
|
|||
output_arg,
|
||||
call_result,
|
||||
);
|
||||
|
||||
builder.new_build_return(None);
|
||||
|
||||
c_function
|
||||
|
|
|
|||
|
|
@ -450,7 +450,7 @@ pub(crate) fn list_capacity_or_ref_ptr<'ctx>(
|
|||
|
||||
// Gets a pointer to just after the refcount for a list or seamless slice.
|
||||
// The value is just after the refcount so that normal lists and seamless slices can share code paths easily.
|
||||
pub(crate) fn list_refcount_ptr<'ctx>(
|
||||
pub(crate) fn list_allocation_ptr<'ctx>(
|
||||
env: &Env<'_, 'ctx, '_>,
|
||||
wrapper_struct: StructValue<'ctx>,
|
||||
) -> PointerValue<'ctx> {
|
||||
|
|
@ -459,7 +459,7 @@ pub(crate) fn list_refcount_ptr<'ctx>(
|
|||
&[wrapper_struct],
|
||||
&[],
|
||||
BitcodeReturns::Basic,
|
||||
bitcode::LIST_REFCOUNT_PTR,
|
||||
bitcode::LIST_ALLOCATION_PTR,
|
||||
)
|
||||
.into_pointer_value()
|
||||
}
|
||||
|
|
@ -864,7 +864,7 @@ pub(crate) fn decref<'ctx>(
|
|||
wrapper_struct: StructValue<'ctx>,
|
||||
alignment: u32,
|
||||
) {
|
||||
let refcount_ptr = list_refcount_ptr(env, wrapper_struct);
|
||||
let refcount_ptr = list_allocation_ptr(env, wrapper_struct);
|
||||
|
||||
crate::llvm::refcounting::decref_pointer_check_null(env, refcount_ptr, alignment);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ pub(crate) fn str_equal<'ctx>(
|
|||
|
||||
// Gets a pointer to just after the refcount for a list or seamless slice.
|
||||
// The value is just after the refcount so that normal lists and seamless slices can share code paths easily.
|
||||
pub(crate) fn str_refcount_ptr<'ctx>(
|
||||
pub(crate) fn str_allocation_ptr<'ctx>(
|
||||
env: &Env<'_, 'ctx, '_>,
|
||||
value: BasicValueEnum<'ctx>,
|
||||
) -> PointerValue<'ctx> {
|
||||
|
|
@ -57,7 +57,7 @@ pub(crate) fn str_refcount_ptr<'ctx>(
|
|||
&[value],
|
||||
&[],
|
||||
BitcodeReturns::Basic,
|
||||
bitcode::STR_REFCOUNT_PTR,
|
||||
bitcode::STR_ALLOCATION_PTR,
|
||||
)
|
||||
.into_pointer_value()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,10 +80,6 @@ pub(crate) fn add_intrinsics<'ctx>(ctx: &'ctx Context, module: &Module<'ctx>) {
|
|||
let i32_type = ctx.i32_type();
|
||||
let void_type = ctx.void_type();
|
||||
|
||||
if let Some(func) = module.get_function("__muloti4") {
|
||||
func.set_linkage(Linkage::WeakAny);
|
||||
}
|
||||
|
||||
add_intrinsic(
|
||||
ctx,
|
||||
module,
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ use crate::llvm::build::{
|
|||
add_func, cast_basic_basic, get_tag_id, tag_pointer_clear_tag_id, Env, FAST_CALL_CONV,
|
||||
};
|
||||
use crate::llvm::build_list::{
|
||||
incrementing_elem_loop, list_capacity_or_ref_ptr, list_refcount_ptr, load_list,
|
||||
incrementing_elem_loop, list_allocation_ptr, list_capacity_or_ref_ptr, load_list,
|
||||
};
|
||||
use crate::llvm::build_str::str_refcount_ptr;
|
||||
use crate::llvm::build_str::str_allocation_ptr;
|
||||
use crate::llvm::convert::{basic_type_from_layout, zig_str_type, RocUnion};
|
||||
use crate::llvm::struct_::RocStruct;
|
||||
use bumpalo::collections::Vec;
|
||||
|
|
@ -866,7 +866,7 @@ fn modify_refcount_list_help<'a, 'ctx>(
|
|||
}
|
||||
|
||||
let refcount_ptr =
|
||||
PointerToRefcount::from_ptr_to_data(env, list_refcount_ptr(env, original_wrapper));
|
||||
PointerToRefcount::from_ptr_to_data(env, list_allocation_ptr(env, original_wrapper));
|
||||
let call_mode = mode_to_call_mode(fn_val, mode);
|
||||
refcount_ptr.modify(call_mode, layout, env, layout_interner);
|
||||
|
||||
|
|
@ -973,7 +973,7 @@ fn modify_refcount_str_help<'a, 'ctx>(
|
|||
builder.new_build_conditional_branch(is_big_and_non_empty, modification_block, cont_block);
|
||||
builder.position_at_end(modification_block);
|
||||
|
||||
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, str_refcount_ptr(env, arg_val));
|
||||
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, str_allocation_ptr(env, arg_val));
|
||||
let call_mode = mode_to_call_mode(fn_val, mode);
|
||||
refcount_ptr.modify(
|
||||
call_mode,
|
||||
|
|
|
|||
|
|
@ -1488,6 +1488,7 @@ define_builtins! {
|
|||
27 DICT_KEEP_IF: "keepIf"
|
||||
28 DICT_DROP_IF: "dropIf"
|
||||
29 DICT_RESERVE: "reserve"
|
||||
30 DICT_RELEASE_EXCESS_CAPACITY: "releaseExcessCapacity"
|
||||
}
|
||||
9 SET: "Set" => {
|
||||
0 SET_SET: "Set" exposed_type=true // the Set.Set type alias
|
||||
|
|
@ -1514,6 +1515,7 @@ define_builtins! {
|
|||
21 SET_DROP_IF: "dropIf"
|
||||
22 SET_WITH_CAPACITY: "withCapacity"
|
||||
23 SET_RESERVE: "reserve"
|
||||
24 SET_RELEASE_EXCESS_CAPACITY: "releaseExcessCapacity"
|
||||
}
|
||||
10 BOX: "Box" => {
|
||||
0 BOX_BOX_TYPE: "Box" exposed_apply_type=true // the Box.Box opaque type
|
||||
|
|
|
|||
|
|
@ -1,29 +1,29 @@
|
|||
procedure Dict.1 (Dict.679):
|
||||
let Dict.688 : List {U32, U32} = Array [];
|
||||
let Dict.689 : List {[], []} = Array [];
|
||||
let Dict.690 : U64 = 0i64;
|
||||
let Dict.41 : Float32 = CallByName Dict.41;
|
||||
let Dict.42 : U8 = CallByName Dict.42;
|
||||
let Dict.687 : {List {U32, U32}, List {[], []}, U64, Float32, U8} = Struct {Dict.688, Dict.689, Dict.690, Dict.41, Dict.42};
|
||||
ret Dict.687;
|
||||
procedure Dict.1 (Dict.692):
|
||||
let Dict.701 : List {U32, U32} = Array [];
|
||||
let Dict.702 : List {[], []} = Array [];
|
||||
let Dict.703 : U64 = 0i64;
|
||||
let Dict.42 : Float32 = CallByName Dict.42;
|
||||
let Dict.43 : U8 = CallByName Dict.43;
|
||||
let Dict.700 : {List {U32, U32}, List {[], []}, U64, Float32, U8} = Struct {Dict.701, Dict.702, Dict.703, Dict.42, Dict.43};
|
||||
ret Dict.700;
|
||||
|
||||
procedure Dict.4 (Dict.685):
|
||||
let Dict.138 : List {[], []} = StructAtIndex 1 Dict.685;
|
||||
let #Derived_gen.0 : List {U32, U32} = StructAtIndex 0 Dict.685;
|
||||
procedure Dict.4 (Dict.698):
|
||||
let Dict.150 : List {[], []} = StructAtIndex 1 Dict.698;
|
||||
let #Derived_gen.0 : List {U32, U32} = StructAtIndex 0 Dict.698;
|
||||
dec #Derived_gen.0;
|
||||
let Dict.686 : U64 = CallByName List.6 Dict.138;
|
||||
dec Dict.138;
|
||||
ret Dict.686;
|
||||
|
||||
procedure Dict.41 ():
|
||||
let Dict.694 : Float32 = 0.8f64;
|
||||
ret Dict.694;
|
||||
let Dict.699 : U64 = CallByName List.6 Dict.150;
|
||||
dec Dict.150;
|
||||
ret Dict.699;
|
||||
|
||||
procedure Dict.42 ():
|
||||
let Dict.692 : U8 = 64i64;
|
||||
let Dict.693 : U8 = 3i64;
|
||||
let Dict.691 : U8 = CallByName Num.20 Dict.692 Dict.693;
|
||||
ret Dict.691;
|
||||
let Dict.707 : Float32 = 0.8f64;
|
||||
ret Dict.707;
|
||||
|
||||
procedure Dict.43 ():
|
||||
let Dict.705 : U8 = 64i64;
|
||||
let Dict.706 : U8 = 3i64;
|
||||
let Dict.704 : U8 = CallByName Num.20 Dict.705 Dict.706;
|
||||
ret Dict.704;
|
||||
|
||||
procedure List.6 (#Attr.2):
|
||||
let List.553 : U64 = lowlevel ListLen #Attr.2;
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +0,0 @@
|
|||
dbg
|
||||
1 == 1
|
||||
|
||||
4
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
dbg (
|
||||
5,
|
||||
666,
|
||||
)
|
||||
|
||||
4
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
Dbg(
|
||||
@4-16 Tuple(
|
||||
[
|
||||
@5-6 Num(
|
||||
"5",
|
||||
),
|
||||
@12-15 SpaceBefore(
|
||||
Num(
|
||||
"666",
|
||||
),
|
||||
[
|
||||
Newline,
|
||||
],
|
||||
),
|
||||
],
|
||||
),
|
||||
@18-19 SpaceBefore(
|
||||
Num(
|
||||
"4",
|
||||
),
|
||||
[
|
||||
Newline,
|
||||
Newline,
|
||||
],
|
||||
),
|
||||
)
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
dbg (5,
|
||||
666)
|
||||
|
||||
4
|
||||
|
|
@ -246,8 +246,8 @@ mod test_snapshots {
|
|||
fail/when_outdented_branch.expr,
|
||||
fail/when_over_indented_int.expr,
|
||||
fail/when_over_indented_underscore.expr,
|
||||
fail/wild_case_arrow.expr,
|
||||
fail/where_type_variable.expr,
|
||||
fail/wild_case_arrow.expr,
|
||||
malformed/bad_opaque_ref.expr,
|
||||
malformed/malformed_ident_due_to_underscore.expr,
|
||||
malformed/malformed_pattern_field_access.expr, // See https://github.com/roc-lang/roc/issues/399
|
||||
|
|
@ -296,6 +296,7 @@ mod test_snapshots {
|
|||
pass/control_characters_in_scalar.expr,
|
||||
pass/crash.expr,
|
||||
pass/dbg.expr,
|
||||
pass/dbg_multiline.expr,
|
||||
pass/def_without_newline.expr,
|
||||
pass/destructure_tag_assignment.expr,
|
||||
pass/empty_app_header.header,
|
||||
|
|
@ -489,9 +490,9 @@ mod test_snapshots {
|
|||
pass/where_clause_multiple_has_across_newlines.expr,
|
||||
pass/where_clause_non_function.expr,
|
||||
pass/where_clause_on_newline.expr,
|
||||
pass/where_ident.expr,
|
||||
pass/zero_float.expr,
|
||||
pass/zero_int.expr,
|
||||
pass/where_ident.expr,
|
||||
// END SNAPSHOTS (for automatic test detection via `env ROC_SNAPSHOT_TEST_OVERWRITE=1 cargo test`)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,5 +8,5 @@ main =
|
|||
s2 = Set.empty {}
|
||||
|
||||
Bool.isEq s1 s1 && Bool.isEq s2 s2
|
||||
# ^^^^^^^^^ Set#Bool.isEq(24): Set Str, Set Str -[[Set.isEq(24)]]-> Bool
|
||||
# ^^^^^^^^^ Set#Bool.isEq(24): Set U8, Set U8 -[[Set.isEq(24)]]-> Bool
|
||||
# ^^^^^^^^^ Set#Bool.isEq(25): Set Str, Set Str -[[Set.isEq(25)]]-> Bool
|
||||
# ^^^^^^^^^ Set#Bool.isEq(25): Set U8, Set U8 -[[Set.isEq(25)]]-> Bool
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue