Merge branch 'trunk' of github.com:rtfeldman/roc into pure-roc-list-walk

This commit is contained in:
Brian Carroll 2022-07-02 18:08:43 +01:00
commit 219e6d11cf
No known key found for this signature in database
GPG key ID: 9CF4E3BF9C4722C7
1067 changed files with 105 additions and 104 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,815 @@
const std = @import("std");
const testing = std.testing;
const expectEqual = testing.expectEqual;
const mem = std.mem;
const assert = std.debug.assert;
const utils = @import("utils.zig");
const RocList = @import("list.zig").RocList;
const INITIAL_SEED = 0xc70f6907;
const InPlace = enum(u8) {
InPlace,
Clone,
};
const Slot = enum(u8) {
Empty,
Filled,
PreviouslyFilled,
};
const MaybeIndexTag = enum { index, not_found };
const MaybeIndex = union(MaybeIndexTag) { index: usize, not_found: void };
fn nextSeed(seed: u64) u64 {
// TODO is this a valid way to get a new seed? are there better ways?
return seed + 1;
}
fn totalCapacityAtLevel(input: usize) usize {
if (input == 0) {
return 0;
}
var n = input;
var slots: usize = 8;
while (n > 1) : (n -= 1) {
slots = slots * 2 + slots;
}
return slots;
}
fn capacityOfLevel(input: usize) usize {
if (input == 0) {
return 0;
}
var n = input;
var slots: usize = 8;
while (n > 1) : (n -= 1) {
slots = slots * 2;
}
return slots;
}
// aligmnent of elements. The number (16 or 8) indicates the maximum
// alignment of the key and value. The tag furthermore indicates
// which has the biggest aligmnent. If both are the same, we put
// the key first
const Alignment = extern struct {
bits: u8,
const VALUE_BEFORE_KEY_FLAG: u8 = 0b1000_0000;
fn toU32(self: Alignment) u32 {
if (self.bits >= VALUE_BEFORE_KEY_FLAG) {
return self.bits ^ Alignment.VALUE_BEFORE_KEY_FLAG;
} else {
return self.bits;
}
}
fn keyFirst(self: Alignment) bool {
if (self.bits & Alignment.VALUE_BEFORE_KEY_FLAG > 0) {
return false;
} else {
return true;
}
}
};
pub fn decref(
bytes_or_null: ?[*]u8,
data_bytes: usize,
alignment: Alignment,
) void {
return utils.decref(bytes_or_null, data_bytes, alignment.toU32());
}
pub fn allocateWithRefcount(
data_bytes: usize,
alignment: Alignment,
) [*]u8 {
return utils.allocateWithRefcount(data_bytes, alignment.toU32());
}
pub const RocDict = extern struct {
dict_bytes: ?[*]u8,
dict_entries_len: usize,
number_of_levels: usize,
pub fn empty() RocDict {
return RocDict{
.dict_entries_len = 0,
.number_of_levels = 0,
.dict_bytes = null,
};
}
pub fn allocate(
number_of_levels: usize,
number_of_entries: usize,
alignment: Alignment,
key_size: usize,
value_size: usize,
) RocDict {
const number_of_slots = totalCapacityAtLevel(number_of_levels);
const slot_size = slotSize(key_size, value_size);
const data_bytes = number_of_slots * slot_size;
return RocDict{
.dict_bytes = allocateWithRefcount(data_bytes, alignment),
.number_of_levels = number_of_levels,
.dict_entries_len = number_of_entries,
};
}
pub fn reallocate(
self: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
) RocDict {
const new_level = self.number_of_levels + 1;
const slot_size = slotSize(key_width, value_width);
const old_capacity = self.capacity();
const new_capacity = totalCapacityAtLevel(new_level);
const delta_capacity = new_capacity - old_capacity;
const data_bytes = new_capacity * slot_size;
const first_slot = allocateWithRefcount(data_bytes, alignment);
// transfer the memory
if (self.dict_bytes) |source_ptr| {
const dest_ptr = first_slot;
var source_offset: usize = 0;
var dest_offset: usize = 0;
if (alignment.keyFirst()) {
// copy keys
@memcpy(dest_ptr + dest_offset, source_ptr + source_offset, old_capacity * key_width);
// copy values
source_offset = old_capacity * key_width;
dest_offset = new_capacity * key_width;
@memcpy(dest_ptr + dest_offset, source_ptr + source_offset, old_capacity * value_width);
} else {
// copy values
@memcpy(dest_ptr + dest_offset, source_ptr + source_offset, old_capacity * value_width);
// copy keys
source_offset = old_capacity * value_width;
dest_offset = new_capacity * value_width;
@memcpy(dest_ptr + dest_offset, source_ptr + source_offset, old_capacity * key_width);
}
// copy slots
source_offset = old_capacity * (key_width + value_width);
dest_offset = new_capacity * (key_width + value_width);
@memcpy(dest_ptr + dest_offset, source_ptr + source_offset, old_capacity * @sizeOf(Slot));
}
var i: usize = 0;
const first_new_slot_value = first_slot + old_capacity * slot_size + delta_capacity * (key_width + value_width);
while (i < (new_capacity - old_capacity)) : (i += 1) {
(first_new_slot_value)[i] = @enumToInt(Slot.Empty);
}
const result = RocDict{
.dict_bytes = first_slot,
.number_of_levels = self.number_of_levels + 1,
.dict_entries_len = self.dict_entries_len,
};
// NOTE we fuse an increment of all keys/values with a decrement of the input dict
decref(self.dict_bytes, self.capacity() * slotSize(key_width, value_width), alignment);
return result;
}
pub fn asU8ptr(self: RocDict) [*]u8 {
return @ptrCast([*]u8, self.dict_bytes);
}
pub fn len(self: RocDict) usize {
return self.dict_entries_len;
}
pub fn isEmpty(self: RocDict) bool {
return self.len() == 0;
}
pub fn isUnique(self: RocDict) bool {
// the empty dict is unique (in the sense that copying it will not leak memory)
if (self.isEmpty()) {
return true;
}
// otherwise, check if the refcount is one
const ptr: [*]usize = @ptrCast([*]usize, @alignCast(@alignOf(usize), self.dict_bytes));
return (ptr - 1)[0] == utils.REFCOUNT_ONE;
}
pub fn capacity(self: RocDict) usize {
return totalCapacityAtLevel(self.number_of_levels);
}
pub fn makeUnique(self: RocDict, alignment: Alignment, key_width: usize, value_width: usize) RocDict {
if (self.isEmpty()) {
return self;
}
if (self.isUnique()) {
return self;
}
// unfortunately, we have to clone
var new_dict = RocDict.allocate(self.number_of_levels, self.dict_entries_len, alignment, key_width, value_width);
var old_bytes: [*]u8 = @ptrCast([*]u8, self.dict_bytes);
var new_bytes: [*]u8 = @ptrCast([*]u8, new_dict.dict_bytes);
const number_of_bytes = self.capacity() * (@sizeOf(Slot) + key_width + value_width);
@memcpy(new_bytes, old_bytes, number_of_bytes);
// NOTE we fuse an increment of all keys/values with a decrement of the input dict
const data_bytes = self.capacity() * slotSize(key_width, value_width);
decref(self.dict_bytes, data_bytes, alignment);
return new_dict;
}
fn getSlot(self: *const RocDict, index: usize, key_width: usize, value_width: usize) Slot {
const offset = self.capacity() * (key_width + value_width) + index * @sizeOf(Slot);
const ptr = self.dict_bytes orelse unreachable;
return @intToEnum(Slot, ptr[offset]);
}
fn setSlot(self: *RocDict, index: usize, key_width: usize, value_width: usize, slot: Slot) void {
const offset = self.capacity() * (key_width + value_width) + index * @sizeOf(Slot);
const ptr = self.dict_bytes orelse unreachable;
ptr[offset] = @enumToInt(slot);
}
fn setKey(self: *RocDict, index: usize, alignment: Alignment, key_width: usize, value_width: usize, data: Opaque) void {
if (key_width == 0) {
return;
}
const offset = blk: {
if (alignment.keyFirst()) {
break :blk (index * key_width);
} else {
break :blk (self.capacity() * value_width) + (index * key_width);
}
};
const ptr = self.dict_bytes orelse unreachable;
const source = data orelse unreachable;
const dest = ptr + offset;
@memcpy(dest, source, key_width);
}
fn getKey(self: *const RocDict, index: usize, alignment: Alignment, key_width: usize, value_width: usize) Opaque {
if (key_width == 0) {
return null;
}
const offset = blk: {
if (alignment.keyFirst()) {
break :blk (index * key_width);
} else {
break :blk (self.capacity() * value_width) + (index * key_width);
}
};
const ptr = self.dict_bytes orelse unreachable;
return ptr + offset;
}
fn setValue(self: *RocDict, index: usize, alignment: Alignment, key_width: usize, value_width: usize, data: Opaque) void {
if (value_width == 0) {
return;
}
const offset = blk: {
if (alignment.keyFirst()) {
break :blk (self.capacity() * key_width) + (index * value_width);
} else {
break :blk (index * value_width);
}
};
const ptr = self.dict_bytes orelse unreachable;
const source = data orelse unreachable;
const dest = ptr + offset;
@memcpy(dest, source, value_width);
}
fn getValue(self: *const RocDict, index: usize, alignment: Alignment, key_width: usize, value_width: usize) Opaque {
if (value_width == 0) {
return null;
}
const offset = blk: {
if (alignment.keyFirst()) {
break :blk (self.capacity() * key_width) + (index * value_width);
} else {
break :blk (index * value_width);
}
};
const ptr = self.dict_bytes orelse unreachable;
return ptr + offset;
}
fn findIndex(self: *const RocDict, alignment: Alignment, key: Opaque, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn) MaybeIndex {
if (self.isEmpty()) {
return MaybeIndex.not_found;
}
var seed: u64 = INITIAL_SEED;
var current_level: usize = 1;
var current_level_size: usize = 8;
var next_level_size: usize = 2 * current_level_size;
while (true) {
if (current_level > self.number_of_levels) {
return MaybeIndex.not_found;
}
// hash the key, and modulo by the maximum size
// (so we get an in-bounds index)
const hash = hash_fn(seed, key);
const index = capacityOfLevel(current_level - 1) + @intCast(usize, (hash % current_level_size));
switch (self.getSlot(index, key_width, value_width)) {
Slot.Empty, Slot.PreviouslyFilled => {
return MaybeIndex.not_found;
},
Slot.Filled => {
// is this the same key, or a new key?
const current_key = self.getKey(index, alignment, key_width, value_width);
if (is_eq(key, current_key)) {
return MaybeIndex{ .index = index };
} else {
current_level += 1;
current_level_size *= 2;
next_level_size *= 2;
seed = nextSeed(seed);
continue;
}
},
}
}
}
};
// Dict.empty
pub fn dictEmpty(dict: *RocDict) callconv(.C) void {
dict.* = RocDict.empty();
}
pub fn slotSize(key_size: usize, value_size: usize) usize {
return @sizeOf(Slot) + key_size + value_size;
}
// Dict.len
pub fn dictLen(dict: RocDict) callconv(.C) usize {
return dict.dict_entries_len;
}
// commonly used type aliases
const Opaque = ?[*]u8;
const HashFn = fn (u64, ?[*]u8) callconv(.C) u64;
const EqFn = fn (?[*]u8, ?[*]u8) callconv(.C) bool;
const Inc = fn (?[*]u8) callconv(.C) void;
const IncN = fn (?[*]u8, usize) callconv(.C) void;
const Dec = fn (?[*]u8) callconv(.C) void;
const Caller3 = fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
// Dict.insert : Dict k v, k, v -> Dict k v
pub fn dictInsert(
input: RocDict,
alignment: Alignment,
key: Opaque,
key_width: usize,
value: Opaque,
value_width: usize,
hash_fn: HashFn,
is_eq: EqFn,
dec_key: Dec,
dec_value: Dec,
output: *RocDict,
) callconv(.C) void {
var seed: u64 = INITIAL_SEED;
var result = input.makeUnique(alignment, key_width, value_width);
var current_level: usize = 1;
var current_level_size: usize = 8;
var next_level_size: usize = 2 * current_level_size;
while (true) {
if (current_level > result.number_of_levels) {
result = result.reallocate(alignment, key_width, value_width);
}
const hash = hash_fn(seed, key);
const index = capacityOfLevel(current_level - 1) + @intCast(usize, (hash % current_level_size));
assert(index < result.capacity());
switch (result.getSlot(index, key_width, value_width)) {
Slot.Empty, Slot.PreviouslyFilled => {
result.setSlot(index, key_width, value_width, Slot.Filled);
result.setKey(index, alignment, key_width, value_width, key);
result.setValue(index, alignment, key_width, value_width, value);
result.dict_entries_len += 1;
break;
},
Slot.Filled => {
// is this the same key, or a new key?
const current_key = result.getKey(index, alignment, key_width, value_width);
if (is_eq(key, current_key)) {
// we will override the old value, but first have to decrement its refcount
const current_value = result.getValue(index, alignment, key_width, value_width);
dec_value(current_value);
// we must consume the key argument!
dec_key(key);
result.setValue(index, alignment, key_width, value_width, value);
break;
} else {
seed = nextSeed(seed);
current_level += 1;
current_level_size *= 2;
next_level_size *= 2;
continue;
}
},
}
}
// write result into pointer
output.* = result;
}
// Dict.remove : Dict k v, k -> Dict k v
pub fn dictRemove(input: RocDict, alignment: Alignment, key: Opaque, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, dec_key: Dec, dec_value: Dec, output: *RocDict) callconv(.C) void {
switch (input.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
// the key was not found; we're done
output.* = input;
return;
},
MaybeIndex.index => |index| {
var dict = input.makeUnique(alignment, key_width, value_width);
assert(index < dict.capacity());
dict.setSlot(index, key_width, value_width, Slot.PreviouslyFilled);
const old_key = dict.getKey(index, alignment, key_width, value_width);
const old_value = dict.getValue(index, alignment, key_width, value_width);
dec_key(old_key);
dec_value(old_value);
dict.dict_entries_len -= 1;
// if the dict is now completely empty, free its allocation
if (dict.dict_entries_len == 0) {
const data_bytes = dict.capacity() * slotSize(key_width, value_width);
decref(dict.dict_bytes, data_bytes, alignment);
output.* = RocDict.empty();
return;
}
output.* = dict;
},
}
}
// Dict.contains : Dict k v, k -> Bool
pub fn dictContains(dict: RocDict, alignment: Alignment, key: Opaque, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn) callconv(.C) bool {
switch (dict.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
return false;
},
MaybeIndex.index => |_| {
return true;
},
}
}
// Dict.get : Dict k v, k -> { flag: bool, value: Opaque }
pub fn dictGet(dict: RocDict, alignment: Alignment, key: Opaque, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, inc_value: Inc) callconv(.C) extern struct { value: Opaque, flag: bool } {
switch (dict.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
return .{ .flag = false, .value = null };
},
MaybeIndex.index => |index| {
var value = dict.getValue(index, alignment, key_width, value_width);
inc_value(value);
return .{ .flag = true, .value = value };
},
}
}
// Dict.elementsRc
// increment or decrement all dict elements (but not the dict's allocation itself)
pub fn elementsRc(dict: RocDict, alignment: Alignment, key_width: usize, value_width: usize, modify_key: Inc, modify_value: Inc) callconv(.C) void {
const size = dict.capacity();
var i: usize = 0;
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
modify_key(dict.getKey(i, alignment, key_width, value_width));
modify_value(dict.getValue(i, alignment, key_width, value_width));
},
else => {},
}
}
}
pub fn dictKeys(
dict: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
inc_key: Inc,
) callconv(.C) RocList {
const size = dict.capacity();
var length: usize = 0;
var i: usize = 0;
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
length += 1;
},
else => {},
}
}
if (length == 0) {
return RocList.empty();
}
const data_bytes = length * key_width;
var ptr = allocateWithRefcount(data_bytes, alignment);
i = 0;
var copied: usize = 0;
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const key = dict.getKey(i, alignment, key_width, value_width);
inc_key(key);
const key_cast = @ptrCast([*]const u8, key);
@memcpy(ptr + (copied * key_width), key_cast, key_width);
copied += 1;
},
else => {},
}
}
return RocList{ .bytes = ptr, .length = length, .capacity = length };
}
pub fn dictValues(
dict: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
inc_value: Inc,
) callconv(.C) RocList {
const size = dict.capacity();
var length: usize = 0;
var i: usize = 0;
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
length += 1;
},
else => {},
}
}
if (length == 0) {
return RocList.empty();
}
const data_bytes = length * value_width;
var ptr = allocateWithRefcount(data_bytes, alignment);
i = 0;
var copied: usize = 0;
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const value = dict.getValue(i, alignment, key_width, value_width);
inc_value(value);
const value_cast = @ptrCast([*]const u8, value);
@memcpy(ptr + (copied * value_width), value_cast, value_width);
copied += 1;
},
else => {},
}
}
return RocList{ .bytes = ptr, .length = length, .capacity = length };
}
fn doNothing(_: Opaque) callconv(.C) void {
return;
}
pub fn dictUnion(
dict1: RocDict,
dict2: RocDict,
alignment: Alignment,
key_width: usize,
value_width: usize,
hash_fn: HashFn,
is_eq: EqFn,
inc_key: Inc,
inc_value: Inc,
output: *RocDict,
) callconv(.C) void {
output.* = dict1.makeUnique(alignment, key_width, value_width);
var i: usize = 0;
while (i < dict2.capacity()) : (i += 1) {
switch (dict2.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const key = dict2.getKey(i, alignment, key_width, value_width);
switch (output.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
const value = dict2.getValue(i, alignment, key_width, value_width);
inc_value(value);
// we need an extra RC token for the key
inc_key(key);
inc_value(value);
// we know the newly added key is not a duplicate, so the `dec`s are unreachable
const dec_key = doNothing;
const dec_value = doNothing;
dictInsert(output.*, alignment, key, key_width, value, value_width, hash_fn, is_eq, dec_key, dec_value, output);
},
MaybeIndex.index => |_| {
// the key is already in the output dict
continue;
},
}
},
else => {},
}
}
}
pub fn dictIntersection(dict1: RocDict, dict2: RocDict, alignment: Alignment, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, dec_key: Inc, dec_value: Inc, output: *RocDict) callconv(.C) void {
output.* = dict1.makeUnique(alignment, key_width, value_width);
var i: usize = 0;
const size = dict1.capacity();
while (i < size) : (i += 1) {
switch (output.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const key = dict1.getKey(i, alignment, key_width, value_width);
switch (dict2.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
dictRemove(output.*, alignment, key, key_width, value_width, hash_fn, is_eq, dec_key, dec_value, output);
},
MaybeIndex.index => |_| {
// keep this key/value
continue;
},
}
},
else => {},
}
}
}
pub fn dictDifference(dict1: RocDict, dict2: RocDict, alignment: Alignment, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, dec_key: Dec, dec_value: Dec, output: *RocDict) callconv(.C) void {
output.* = dict1.makeUnique(alignment, key_width, value_width);
var i: usize = 0;
const size = dict1.capacity();
while (i < size) : (i += 1) {
switch (output.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const key = dict1.getKey(i, alignment, key_width, value_width);
switch (dict2.findIndex(alignment, key, key_width, value_width, hash_fn, is_eq)) {
MaybeIndex.not_found => {
// keep this key/value
continue;
},
MaybeIndex.index => |_| {
dictRemove(output.*, alignment, key, key_width, value_width, hash_fn, is_eq, dec_key, dec_value, output);
},
}
},
else => {},
}
}
}
pub fn setFromList(list: RocList, alignment: Alignment, key_width: usize, value_width: usize, hash_fn: HashFn, is_eq: EqFn, dec_key: Dec, output: *RocDict) callconv(.C) void {
output.* = RocDict.empty();
var ptr = @ptrCast([*]u8, list.bytes);
const dec_value = doNothing;
const value = null;
const size = list.length;
var i: usize = 0;
while (i < size) : (i += 1) {
const key = ptr + i * key_width;
dictInsert(output.*, alignment, key, key_width, value, value_width, hash_fn, is_eq, dec_key, dec_value, output);
}
// NOTE: decref checks for the empty case
const data_bytes = size * key_width;
decref(list.bytes, data_bytes, alignment);
}
pub fn dictWalk(
dict: RocDict,
caller: Caller3,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
accum: Opaque,
alignment: Alignment,
key_width: usize,
value_width: usize,
accum_width: usize,
output: Opaque,
) callconv(.C) void {
const alignment_u32 = alignment.toU32();
// allocate space to write the result of the stepper into
// experimentally aliasing the accum and output pointers is not a good idea
// TODO handle alloc failing!
const bytes_ptr: [*]u8 = utils.alloc(accum_width, alignment_u32) orelse unreachable;
var b1 = output orelse unreachable;
var b2 = bytes_ptr;
if (data_is_owned) {
inc_n_data(data, dict.len());
}
@memcpy(b2, accum orelse unreachable, accum_width);
var i: usize = 0;
const size = dict.capacity();
while (i < size) : (i += 1) {
switch (dict.getSlot(i, key_width, value_width)) {
Slot.Filled => {
const key = dict.getKey(i, alignment, key_width, value_width);
const value = dict.getValue(i, alignment, key_width, value_width);
caller(data, b2, key, value, b1);
std.mem.swap([*]u8, &b1, &b2);
},
else => {},
}
}
@memcpy(output orelse unreachable, b2, accum_width);
utils.dealloc(bytes_ptr, alignment_u32);
}

View file

@ -0,0 +1,134 @@
const std = @import("std");
const utils = @import("utils.zig");
const CSlice = utils.CSlice;
const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const Failure = struct {
start_line: u32,
end_line: u32,
start_col: u16,
end_col: u16,
};
// BEGIN FAILURES GLOBALS ///////////////////
var failures_mutex = std.Thread.Mutex{};
var failures: [*]Failure = undefined;
var failure_length: usize = 0;
var failure_capacity: usize = 0;
// END FAILURES GLOBALS /////////////////////
pub fn expectFailed(
start_line: u32,
end_line: u32,
start_col: u16,
end_col: u16,
) void {
const new_failure = Failure{ .start_line = start_line, .end_line = end_line, .start_col = start_col, .end_col = end_col };
// Lock the failures mutex before reading from any of the failures globals,
// and then release the lock once we're done modifying things.
failures_mutex.lock();
defer failures_mutex.unlock();
// If we don't have enough capacity to add a failure, allocate a new failures pointer.
if (failure_length >= failure_capacity) {
if (failure_capacity > 0) {
// We already had previous failures allocated, so try to realloc in order
// to grow the size in-place without having to memcpy bytes over.
const old_pointer = failures;
const old_bytes = failure_capacity * @sizeOf(Failure);
failure_capacity *= 2;
const new_bytes = failure_capacity * @sizeOf(Failure);
const failures_u8 = @ptrCast([*]u8, @alignCast(@alignOf(Failure), failures));
const raw_pointer = utils.realloc(failures_u8, new_bytes, old_bytes, @alignOf(Failure));
failures = @ptrCast([*]Failure, @alignCast(@alignOf(Failure), raw_pointer));
// If realloc wasn't able to expand in-place (that is, it returned a different pointer),
// then copy the data into the new pointer and dealloc the old one.
if (failures != old_pointer) {
const old_pointer_u8 = @ptrCast([*]u8, old_pointer);
utils.memcpy(@ptrCast([*]u8, failures), old_pointer_u8, old_bytes);
utils.dealloc(old_pointer_u8, @alignOf(Failure));
}
} else {
// We've never had any failures before, so allocate the failures for the first time.
failure_capacity = 10;
const raw_pointer = utils.alloc(failure_capacity * @sizeOf(Failure), @alignOf(Failure));
failures = @ptrCast([*]Failure, @alignCast(@alignOf(Failure), raw_pointer));
}
}
failures[failure_length] = new_failure;
failure_length += 1;
}
pub fn expectFailedC(
start_line: u32,
end_line: u32,
start_col: u16,
end_col: u16,
) callconv(.C) void {
return @call(.{ .modifier = always_inline }, expectFailed, .{ start_line, end_line, start_col, end_col });
}
pub fn getExpectFailures() []Failure {
failures_mutex.lock();
defer failures_mutex.unlock();
if (failure_length > 0) {
// defensively clone failures, in case someone modifies the originals after the mutex has been released.
const num_bytes = failure_length * @sizeOf(Failure);
// TODO handle the possibility of alloc failing
const raw_clones = utils.alloc(num_bytes, @alignOf(Failure)) orelse unreachable;
utils.memcpy(raw_clones, @ptrCast([*]u8, failures), num_bytes);
const clones = @ptrCast([*]Failure, @alignCast(@alignOf(Failure), raw_clones));
return clones[0..failure_length];
} else {
return failures[0..0];
}
}
pub fn getExpectFailuresC() callconv(.C) CSlice {
var bytes = @ptrCast(*anyopaque, failures);
return .{ .pointer = bytes, .len = failure_length };
}
pub fn deinitFailures() void {
failures_mutex.lock();
defer failures_mutex.unlock();
utils.dealloc(@ptrCast([*]u8, failures), @alignOf(Failure));
failure_length = 0;
}
pub fn deinitFailuresC() callconv(.C) void {
return @call(.{ .modifier = always_inline }, deinitFailures, .{});
}
test "expectFailure does something" {
defer deinitFailures();
var fails = getExpectFailures();
try std.testing.expectEqual(fails.len, 0);
expectFailed(1, 2, 3, 4);
fails = getExpectFailures();
try std.testing.expectEqual(fails.len, 1);
utils.dealloc(@ptrCast([*]u8, fails.ptr), @alignOf([*]Failure));
const what_it_should_look_like = Failure{ .start_line = 1, .end_line = 2, .start_col = 3, .end_col = 4 };
fails = getExpectFailures();
try std.testing.expectEqual(fails[0], what_it_should_look_like);
utils.dealloc(@ptrCast([*]u8, fails.ptr), @alignOf([*]Failure));
}

View file

@ -0,0 +1,254 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const str = @import("str.zig");
const mem = std.mem;
pub fn wyhash(seed: u64, bytes: ?[*]const u8, length: usize) callconv(.C) u64 {
if (bytes) |nonnull| {
const slice = nonnull[0..length];
return wyhash_hash(seed, slice);
} else {
return 42;
}
}
pub fn wyhash_rocstr(seed: u64, input: str.RocStr) callconv(.C) u64 {
return wyhash_hash(seed, input.asSlice());
}
const primes = [_]u64{
0xa0761d6478bd642f,
0xe7037ed1a0b428db,
0x8ebc6af09c88c6e3,
0x589965cc75374cc3,
0x1d8e4e27c47d124f,
};
fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
const T = std.meta.Int(.unsigned, 8 * bytes);
return mem.readIntLittle(T, data[0..bytes]);
}
fn read_8bytes_swapped(data: []const u8) u64 {
return (read_bytes(4, data) << 32 | read_bytes(4, data[4..]));
}
fn mum(a: u64, b: u64) u64 {
var r = std.math.mulWide(u64, a, b);
r = (r >> 64) ^ r;
return @truncate(u64, r);
}
fn mix0(a: u64, b: u64, seed: u64) u64 {
return mum(a ^ seed ^ primes[0], b ^ seed ^ primes[1]);
}
fn mix1(a: u64, b: u64, seed: u64) u64 {
return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]);
}
// Wyhash version which does not store internal state for handling partial buffers.
// This is needed so that we can maximize the speed for the short key case, which will
// use the non-iterative api which the public Wyhash exposes.
const WyhashStateless = struct {
seed: u64,
msg_len: usize,
pub fn init(seed: u64) WyhashStateless {
return WyhashStateless{
.seed = seed,
.msg_len = 0,
};
}
fn round(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len == 32);
self.seed = mix0(
read_bytes(8, b[0..]),
read_bytes(8, b[8..]),
self.seed,
) ^ mix1(
read_bytes(8, b[16..]),
read_bytes(8, b[24..]),
self.seed,
);
}
pub fn update(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len % 32 == 0);
var off: usize = 0;
while (off < b.len) : (off += 32) {
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
}
self.msg_len += b.len;
}
pub fn final(self: *WyhashStateless, b: []const u8) u64 {
std.debug.assert(b.len < 32);
const seed = self.seed;
const rem_len = @intCast(u5, b.len);
const rem_key = b[0..rem_len];
self.seed = switch (rem_len) {
0 => seed,
1 => mix0(read_bytes(1, rem_key), primes[4], seed),
2 => mix0(read_bytes(2, rem_key), primes[4], seed),
3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed),
4 => mix0(read_bytes(4, rem_key), primes[4], seed),
5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed),
6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed),
7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed),
8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed),
9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed),
10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed),
11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed),
12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed),
13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed),
14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed),
15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed),
16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed),
17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed),
18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed),
19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed),
20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed),
21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed),
22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed),
23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed),
24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed),
25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed),
26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed),
27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed),
28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed),
29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed),
30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed),
31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed),
};
self.msg_len += b.len;
return mum(self.seed ^ self.msg_len, primes[4]);
}
pub fn hash(seed: u64, input: []const u8) u64 {
const aligned_len = input.len - (input.len % 32);
var c = WyhashStateless.init(seed);
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
/// Fast non-cryptographic 64bit hash function.
/// See https://github.com/wangyi-fudan/wyhash
pub const Wyhash = struct {
state: WyhashStateless,
buf: [32]u8,
buf_len: usize,
pub fn init(seed: u64) Wyhash {
return Wyhash{
.state = WyhashStateless.init(seed),
.buf = undefined,
.buf_len = 0,
};
}
pub fn update(self: *Wyhash, b: []const u8) void {
var off: usize = 0;
if (self.buf_len != 0 and self.buf_len + b.len >= 32) {
off += 32 - self.buf_len;
mem.copy(u8, self.buf[self.buf_len..], b[0..off]);
self.state.update(self.buf[0..]);
self.buf_len = 0;
}
const remain_len = b.len - off;
const aligned_len = remain_len - (remain_len % 32);
self.state.update(b[off .. off + aligned_len]);
mem.copy(u8, self.buf[self.buf_len..], b[off + aligned_len ..]);
self.buf_len += @intCast(u8, b[off + aligned_len ..].len);
}
pub fn final(self: *Wyhash) u64 {
// const seed = self.state.seed;
// const rem_len = @intCast(u5, self.buf_len);
const rem_key = self.buf[0..self.buf_len];
return self.state.final(rem_key);
}
pub fn hash(seed: u64, input: []const u8) u64 {
return WyhashStateless.hash(seed, input);
}
};
fn wyhash_hash(seed: u64, input: []const u8) u64 {
return Wyhash.hash(seed, input);
}
const expectEqual = std.testing.expectEqual;
test "test vectors" {
const hash = Wyhash.hash;
try expectEqual(hash(0, ""), 0x0);
try expectEqual(hash(1, "a"), 0xbed235177f41d328);
try expectEqual(hash(2, "abc"), 0xbe348debe59b27c3);
try expectEqual(hash(3, "message digest"), 0x37320f657213a290);
try expectEqual(hash(4, "abcdefghijklmnopqrstuvwxyz"), 0xd0b270e1d8a7019c);
try expectEqual(hash(5, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x602a1894d3bbfe7f);
try expectEqual(hash(6, "12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x829e9c148b75970e);
}
test "test vectors streaming" {
var wh = Wyhash.init(5);
for ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") |e| {
wh.update(mem.asBytes(&e));
}
try expectEqual(wh.final(), 0x602a1894d3bbfe7f);
const pattern = "1234567890";
const count = 8;
const result = 0x829e9c148b75970e;
try expectEqual(Wyhash.hash(6, pattern ** 8), result);
wh = Wyhash.init(6);
var i: u32 = 0;
while (i < count) : (i += 1) {
wh.update(pattern);
}
try expectEqual(wh.final(), result);
}
test "iterative non-divisible update" {
var buf: [8192]u8 = undefined;
for (buf) |*e, i| {
e.* = @truncate(u8, i);
}
const seed = 0x128dad08f;
var end: usize = 32;
while (end < buf.len) : (end += 32) {
const non_iterative_hash = Wyhash.hash(seed, buf[0..end]);
var wy = Wyhash.init(seed);
var i: usize = 0;
while (i < end) : (i += 33) {
wy.update(buf[i..std.math.min(i + 33, end)]);
}
const iterative_hash = wy.final();
try std.testing.expectEqual(iterative_hash, non_iterative_hash);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,942 @@
const std = @import("std");
const utils = @import("utils.zig");
const RocResult = utils.RocResult;
const UpdateMode = utils.UpdateMode;
const mem = std.mem;
const EqFn = fn (?[*]u8, ?[*]u8) callconv(.C) bool;
const CompareFn = fn (?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) u8;
const Opaque = ?[*]u8;
const Inc = fn (?[*]u8) callconv(.C) void;
const IncN = fn (?[*]u8, usize) callconv(.C) void;
const Dec = fn (?[*]u8) callconv(.C) void;
const HasTagId = fn (u16, ?[*]u8) callconv(.C) extern struct { matched: bool, data: ?[*]u8 };
pub const RocList = extern struct {
bytes: ?[*]u8,
length: usize,
capacity: usize,
pub fn len(self: RocList) usize {
return self.length;
}
pub fn isEmpty(self: RocList) bool {
return self.len() == 0;
}
pub fn empty() RocList {
return RocList{ .bytes = null, .length = 0, .capacity = 0 };
}
pub fn isUnique(self: RocList) bool {
// the empty list is unique (in the sense that copying it will not leak memory)
if (self.isEmpty()) {
return true;
}
// otherwise, check if the refcount is one
const ptr: [*]usize = @ptrCast([*]usize, @alignCast(@alignOf(usize), self.bytes));
return (ptr - 1)[0] == utils.REFCOUNT_ONE;
}
pub fn allocate(
alignment: u32,
length: usize,
element_size: usize,
) RocList {
const data_bytes = length * element_size;
return RocList{
.bytes = utils.allocateWithRefcount(data_bytes, alignment),
.length = length,
.capacity = length,
};
}
pub fn makeUniqueExtra(self: RocList, alignment: u32, element_width: usize, update_mode: UpdateMode) RocList {
if (update_mode == .InPlace) {
return self;
} else {
return self.makeUnique(alignment, element_width);
}
}
pub fn makeUnique(self: RocList, alignment: u32, element_width: usize) RocList {
if (self.isEmpty()) {
return self;
}
if (self.isUnique()) {
return self;
}
// unfortunately, we have to clone
var new_list = RocList.allocate(alignment, self.length, element_width);
var old_bytes: [*]u8 = @ptrCast([*]u8, self.bytes);
var new_bytes: [*]u8 = @ptrCast([*]u8, new_list.bytes);
const number_of_bytes = self.len() * element_width;
@memcpy(new_bytes, old_bytes, number_of_bytes);
// NOTE we fuse an increment of all keys/values with a decrement of the input dict
const data_bytes = self.len() * element_width;
utils.decref(self.bytes, data_bytes, alignment);
return new_list;
}
pub fn reallocate(
self: RocList,
alignment: u32,
new_length: usize,
element_width: usize,
) RocList {
if (self.bytes) |source_ptr| {
if (self.isUnique()) {
const new_source = utils.unsafeReallocate(source_ptr, alignment, self.len(), new_length, element_width);
return RocList{ .bytes = new_source, .length = new_length, .capacity = new_length };
}
}
return self.reallocateFresh(alignment, new_length, element_width);
}
/// reallocate by explicitly making a new allocation and copying elements over
fn reallocateFresh(
self: RocList,
alignment: u32,
new_length: usize,
element_width: usize,
) RocList {
const old_length = self.length;
const delta_length = new_length - old_length;
const data_bytes = new_length * element_width;
const first_slot = utils.allocateWithRefcount(data_bytes, alignment);
// transfer the memory
if (self.bytes) |source_ptr| {
const dest_ptr = first_slot;
@memcpy(dest_ptr, source_ptr, old_length * element_width);
@memset(dest_ptr + old_length * element_width, 0, delta_length * element_width);
}
const result = RocList{
.bytes = first_slot,
.length = new_length,
.capacity = new_length,
};
utils.decref(self.bytes, old_length * element_width, alignment);
return result;
}
};
const Caller0 = fn (?[*]u8, ?[*]u8) callconv(.C) void;
const Caller1 = fn (?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
const Caller2 = fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
const Caller3 = fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
const Caller4 = fn (?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8, ?[*]u8) callconv(.C) void;
pub fn listMap(
list: RocList,
caller: Caller1,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
old_element_width: usize,
new_element_width: usize,
) callconv(.C) RocList {
if (list.bytes) |source_ptr| {
const size = list.len();
var i: usize = 0;
const output = RocList.allocate(alignment, size, new_element_width);
const target_ptr = output.bytes orelse unreachable;
if (data_is_owned) {
inc_n_data(data, size);
}
while (i < size) : (i += 1) {
caller(data, source_ptr + (i * old_element_width), target_ptr + (i * new_element_width));
}
return output;
} else {
return RocList.empty();
}
}
// List.mapWithIndex : List before, (before, Nat -> after) -> List after
pub fn listMapWithIndex(
list: RocList,
caller: Caller2,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
old_element_width: usize,
new_element_width: usize,
) callconv(.C) RocList {
if (list.bytes) |source_ptr| {
const size = list.len();
var i: usize = 0;
const output = RocList.allocate(alignment, size, new_element_width);
const target_ptr = output.bytes orelse unreachable;
if (data_is_owned) {
inc_n_data(data, size);
}
while (i < size) : (i += 1) {
// before, Nat -> after
caller(data, source_ptr + (i * old_element_width), @ptrCast(?[*]u8, &i), target_ptr + (i * new_element_width));
}
return output;
} else {
return RocList.empty();
}
}
fn decrementTail(list: RocList, start_index: usize, element_width: usize, dec: Dec) void {
if (list.bytes) |source| {
var i = start_index;
while (i < list.len()) : (i += 1) {
const element = source + i * element_width;
dec(element);
}
}
}
pub fn listMap2(
list1: RocList,
list2: RocList,
caller: Caller2,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
a_width: usize,
b_width: usize,
c_width: usize,
dec_a: Dec,
dec_b: Dec,
) callconv(.C) RocList {
const output_length = std.math.min(list1.len(), list2.len());
// if the lists don't have equal length, we must consume the remaining elements
// In this case we consume by (recursively) decrementing the elements
decrementTail(list1, output_length, a_width, dec_a);
decrementTail(list2, output_length, b_width, dec_b);
if (data_is_owned) {
inc_n_data(data, output_length);
}
if (list1.bytes) |source_a| {
if (list2.bytes) |source_b| {
const output = RocList.allocate(alignment, output_length, c_width);
const target_ptr = output.bytes orelse unreachable;
var i: usize = 0;
while (i < output_length) : (i += 1) {
const element_a = source_a + i * a_width;
const element_b = source_b + i * b_width;
const target = target_ptr + i * c_width;
caller(data, element_a, element_b, target);
}
return output;
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
}
pub fn listMap3(
list1: RocList,
list2: RocList,
list3: RocList,
caller: Caller3,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
a_width: usize,
b_width: usize,
c_width: usize,
d_width: usize,
dec_a: Dec,
dec_b: Dec,
dec_c: Dec,
) callconv(.C) RocList {
const smaller_length = std.math.min(list1.len(), list2.len());
const output_length = std.math.min(smaller_length, list3.len());
decrementTail(list1, output_length, a_width, dec_a);
decrementTail(list2, output_length, b_width, dec_b);
decrementTail(list3, output_length, c_width, dec_c);
if (data_is_owned) {
inc_n_data(data, output_length);
}
if (list1.bytes) |source_a| {
if (list2.bytes) |source_b| {
if (list3.bytes) |source_c| {
const output = RocList.allocate(alignment, output_length, d_width);
const target_ptr = output.bytes orelse unreachable;
var i: usize = 0;
while (i < output_length) : (i += 1) {
const element_a = source_a + i * a_width;
const element_b = source_b + i * b_width;
const element_c = source_c + i * c_width;
const target = target_ptr + i * d_width;
caller(data, element_a, element_b, element_c, target);
}
return output;
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
}
pub fn listMap4(
list1: RocList,
list2: RocList,
list3: RocList,
list4: RocList,
caller: Caller4,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
a_width: usize,
b_width: usize,
c_width: usize,
d_width: usize,
e_width: usize,
dec_a: Dec,
dec_b: Dec,
dec_c: Dec,
dec_d: Dec,
) callconv(.C) RocList {
const output_length = std.math.min(std.math.min(list1.len(), list2.len()), std.math.min(list3.len(), list4.len()));
decrementTail(list1, output_length, a_width, dec_a);
decrementTail(list2, output_length, b_width, dec_b);
decrementTail(list3, output_length, c_width, dec_c);
decrementTail(list4, output_length, d_width, dec_d);
if (data_is_owned) {
inc_n_data(data, output_length);
}
if (list1.bytes) |source_a| {
if (list2.bytes) |source_b| {
if (list3.bytes) |source_c| {
if (list4.bytes) |source_d| {
const output = RocList.allocate(alignment, output_length, e_width);
const target_ptr = output.bytes orelse unreachable;
var i: usize = 0;
while (i < output_length) : (i += 1) {
const element_a = source_a + i * a_width;
const element_b = source_b + i * b_width;
const element_c = source_c + i * c_width;
const element_d = source_d + i * d_width;
const target = target_ptr + i * e_width;
caller(data, element_a, element_b, element_c, element_d, target);
}
return output;
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
} else {
return RocList.empty();
}
}
pub fn listKeepIf(
list: RocList,
caller: Caller1,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
element_width: usize,
inc: Inc,
dec: Dec,
) callconv(.C) RocList {
if (list.bytes) |source_ptr| {
const size = list.len();
var i: usize = 0;
var output = RocList.allocate(alignment, list.len(), list.len() * element_width);
const target_ptr = output.bytes orelse unreachable;
if (data_is_owned) {
inc_n_data(data, size);
}
var kept: usize = 0;
while (i < size) : (i += 1) {
var keep = false;
const element = source_ptr + (i * element_width);
inc(element);
caller(data, element, @ptrCast(?[*]u8, &keep));
if (keep) {
@memcpy(target_ptr + (kept * element_width), element, element_width);
kept += 1;
} else {
dec(element);
}
}
if (kept == 0) {
// if the output is empty, deallocate the space we made for the result
utils.decref(output.bytes, size * element_width, alignment);
return RocList.empty();
} else {
output.length = kept;
return output;
}
} else {
return RocList.empty();
}
}
pub fn listKeepOks(
list: RocList,
caller: Caller1,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
before_width: usize,
result_width: usize,
after_width: usize,
has_tag_id: HasTagId,
dec_result: Dec,
) callconv(.C) RocList {
const good_constructor: u16 = 1;
return listKeepResult(
list,
good_constructor,
caller,
data,
inc_n_data,
data_is_owned,
alignment,
before_width,
result_width,
after_width,
has_tag_id,
dec_result,
);
}
pub fn listKeepErrs(
list: RocList,
caller: Caller1,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
before_width: usize,
result_width: usize,
after_width: usize,
has_tag_id: HasTagId,
dec_result: Dec,
) callconv(.C) RocList {
const good_constructor: u16 = 0;
return listKeepResult(
list,
good_constructor,
caller,
data,
inc_n_data,
data_is_owned,
alignment,
before_width,
result_width,
after_width,
has_tag_id,
dec_result,
);
}
pub fn listKeepResult(
list: RocList,
good_constructor: u16,
caller: Caller1,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
before_width: usize,
result_width: usize,
after_width: usize,
has_tag_id: HasTagId,
dec_result: Dec,
) RocList {
if (list.bytes) |source_ptr| {
const size = list.len();
var i: usize = 0;
var output = RocList.allocate(alignment, list.len(), list.len() * after_width);
const target_ptr = output.bytes orelse unreachable;
// TODO handle alloc failing!
var temporary = utils.alloc(result_width, alignment) orelse unreachable;
if (data_is_owned) {
inc_n_data(data, size);
}
var kept: usize = 0;
while (i < size) : (i += 1) {
const before_element = source_ptr + (i * before_width);
caller(data, before_element, temporary);
// a record { matched: bool, data: ?[*]u8 }
// for now, that data pointer is just the input `temporary` pointer
// this will change in the future to only return a pointer to the
// payload of the tag
const answer = has_tag_id(good_constructor, temporary);
if (answer.matched) {
const contents = (answer.data orelse unreachable);
@memcpy(target_ptr + (kept * after_width), contents, after_width);
kept += 1;
} else {
dec_result(temporary);
}
}
utils.dealloc(temporary, alignment);
if (kept == 0) {
utils.decref(output.bytes, size * after_width, alignment);
return RocList.empty();
} else {
output.length = kept;
return output;
}
} else {
return RocList.empty();
}
}
pub fn listWithCapacity(capacity: usize, alignment: u32, element_width: usize) callconv(.C) RocList {
var output = RocList.allocate(alignment, capacity, element_width);
output.length = 0;
return output;
}
pub fn listAppend(list: RocList, alignment: u32, element: Opaque, element_width: usize, update_mode: UpdateMode) callconv(.C) RocList {
const old_length = list.len();
var output: RocList = undefined;
if (update_mode == .InPlace and list.capacity >= old_length + 1) {
output = list;
output.length += 1;
} else {
output = list.reallocate(alignment, old_length + 1, element_width);
}
if (output.bytes) |target| {
if (element) |source| {
@memcpy(target + old_length * element_width, source, element_width);
}
}
return output;
}
pub fn listPrepend(list: RocList, alignment: u32, element: Opaque, element_width: usize) callconv(.C) RocList {
const old_length = list.len();
var output = list.reallocate(alignment, old_length + 1, element_width);
// can't use one memcpy here because source and target overlap
if (output.bytes) |target| {
var i: usize = old_length;
while (i > 0) {
i -= 1;
// move the ith element to the (i + 1)th position
@memcpy(target + (i + 1) * element_width, target + i * element_width, element_width);
}
// finally copy in the new first element
if (element) |source| {
@memcpy(target, source, element_width);
}
}
return output;
}
pub fn listSwap(
list: RocList,
alignment: u32,
element_width: usize,
index_1: usize,
index_2: usize,
update_mode: UpdateMode,
) callconv(.C) RocList {
const size = list.len();
if (index_1 == index_2 or index_1 >= size or index_2 >= size) {
// Either index out of bounds so we just return
return list;
}
const newList = blk: {
if (update_mode == .InPlace) {
break :blk list;
} else {
break :blk list.makeUnique(alignment, element_width);
}
};
const source_ptr = @ptrCast([*]u8, newList.bytes);
swapElements(source_ptr, element_width, index_1, index_2);
return newList;
}
pub fn listSublist(
list: RocList,
alignment: u32,
element_width: usize,
start: usize,
len: usize,
dec: Dec,
) callconv(.C) RocList {
if (len == 0) {
return RocList.empty();
}
if (list.bytes) |source_ptr| {
const size = list.len();
if (start >= size) {
return RocList.empty();
}
const keep_len = std.math.min(len, size - start);
const drop_start_len = start;
const drop_end_len = size - (start + keep_len);
// Decrement the reference counts of elements before `start`.
var i: usize = 0;
while (i < drop_start_len) : (i += 1) {
const element = source_ptr + i * element_width;
dec(element);
}
// Decrement the reference counts of elements after `start + keep_len`.
i = 0;
while (i < drop_end_len) : (i += 1) {
const element = source_ptr + (start + keep_len + i) * element_width;
dec(element);
}
const output = RocList.allocate(alignment, keep_len, element_width);
const target_ptr = output.bytes orelse unreachable;
@memcpy(target_ptr, source_ptr + start * element_width, keep_len * element_width);
utils.decref(list.bytes, size * element_width, alignment);
return output;
}
return RocList.empty();
}
pub fn listDropAt(
list: RocList,
alignment: u32,
element_width: usize,
drop_index: usize,
dec: Dec,
) callconv(.C) RocList {
if (list.bytes) |source_ptr| {
const size = list.len();
if (drop_index >= size) {
return list;
}
if (drop_index < size) {
const element = source_ptr + drop_index * element_width;
dec(element);
}
// NOTE
// we need to return an empty list explicitly,
// because we rely on the pointer field being null if the list is empty
// which also requires duplicating the utils.decref call to spend the RC token
if (size < 2) {
utils.decref(list.bytes, size * element_width, alignment);
return RocList.empty();
}
if (list.isUnique()) {
var i = drop_index;
while (i < size) : (i += 1) {
const copy_target = source_ptr + i * element_width;
const copy_source = copy_target + element_width;
@memcpy(copy_target, copy_source, element_width);
}
var new_list = list;
new_list.length -= 1;
return new_list;
}
const output = RocList.allocate(alignment, size - 1, element_width);
const target_ptr = output.bytes orelse unreachable;
const head_size = drop_index * element_width;
@memcpy(target_ptr, source_ptr, head_size);
const tail_target = target_ptr + drop_index * element_width;
const tail_source = source_ptr + (drop_index + 1) * element_width;
const tail_size = (size - drop_index - 1) * element_width;
@memcpy(tail_target, tail_source, tail_size);
utils.decref(list.bytes, size * element_width, alignment);
return output;
} else {
return RocList.empty();
}
}
fn partition(source_ptr: [*]u8, transform: Opaque, wrapper: CompareFn, element_width: usize, low: isize, high: isize) isize {
const pivot = source_ptr + (@intCast(usize, high) * element_width);
var i = (low - 1); // Index of smaller element and indicates the right position of pivot found so far
var j = low;
while (j <= high - 1) : (j += 1) {
const current_elem = source_ptr + (@intCast(usize, j) * element_width);
const ordering = wrapper(transform, current_elem, pivot);
const order = @intToEnum(utils.Ordering, ordering);
switch (order) {
utils.Ordering.LT => {
// the current element is smaller than the pivot; swap it
i += 1;
swapElements(source_ptr, element_width, @intCast(usize, i), @intCast(usize, j));
},
utils.Ordering.EQ, utils.Ordering.GT => {},
}
}
swapElements(source_ptr, element_width, @intCast(usize, i + 1), @intCast(usize, high));
return (i + 1);
}
fn quicksort(source_ptr: [*]u8, transform: Opaque, wrapper: CompareFn, element_width: usize, low: isize, high: isize) void {
if (low < high) {
// partition index
const pi = partition(source_ptr, transform, wrapper, element_width, low, high);
_ = quicksort(source_ptr, transform, wrapper, element_width, low, pi - 1); // before pi
_ = quicksort(source_ptr, transform, wrapper, element_width, pi + 1, high); // after pi
}
}
pub fn listSortWith(
input: RocList,
caller: CompareFn,
data: Opaque,
inc_n_data: IncN,
data_is_owned: bool,
alignment: u32,
element_width: usize,
) callconv(.C) RocList {
var list = input.makeUnique(alignment, element_width);
if (data_is_owned) {
inc_n_data(data, list.len());
}
if (list.bytes) |source_ptr| {
const low = 0;
const high: isize = @intCast(isize, list.len()) - 1;
quicksort(source_ptr, data, caller, element_width, low, high);
}
return list;
}
// SWAP ELEMENTS
inline fn swapHelp(width: usize, temporary: [*]u8, ptr1: [*]u8, ptr2: [*]u8) void {
@memcpy(temporary, ptr1, width);
@memcpy(ptr1, ptr2, width);
@memcpy(ptr2, temporary, width);
}
fn swap(width_initial: usize, p1: [*]u8, p2: [*]u8) void {
const threshold: usize = 64;
var width = width_initial;
var ptr1 = p1;
var ptr2 = p2;
var buffer_actual: [threshold]u8 = undefined;
var buffer: [*]u8 = buffer_actual[0..];
while (true) {
if (width < threshold) {
swapHelp(width, buffer, ptr1, ptr2);
return;
} else {
swapHelp(threshold, buffer, ptr1, ptr2);
ptr1 += threshold;
ptr2 += threshold;
width -= threshold;
}
}
}
fn swapElements(source_ptr: [*]u8, element_width: usize, index_1: usize, index_2: usize) void {
var element_at_i = source_ptr + (index_1 * element_width);
var element_at_j = source_ptr + (index_2 * element_width);
return swap(element_width, element_at_i, element_at_j);
}
pub fn listConcat(list_a: RocList, list_b: RocList, alignment: u32, element_width: usize) callconv(.C) RocList {
if (list_a.isEmpty()) {
return list_b;
} else if (list_b.isEmpty()) {
return list_a;
} else if (!list_a.isEmpty() and list_a.isUnique()) {
const total_length: usize = list_a.len() + list_b.len();
if (list_a.bytes) |source| {
const new_source = utils.unsafeReallocate(
source,
alignment,
list_a.len(),
total_length,
element_width,
);
if (list_b.bytes) |source_b| {
@memcpy(new_source + list_a.len() * element_width, source_b, list_b.len() * element_width);
}
return RocList{ .bytes = new_source, .length = total_length, .capacity = total_length };
}
}
const total_length: usize = list_a.len() + list_b.len();
const output = RocList.allocate(alignment, total_length, element_width);
if (output.bytes) |target| {
if (list_a.bytes) |source| {
@memcpy(target, source, list_a.len() * element_width);
}
if (list_b.bytes) |source| {
@memcpy(target + list_a.len() * element_width, source, list_b.len() * element_width);
}
}
return output;
}
pub fn listReplaceInPlace(
list: RocList,
index: usize,
element: Opaque,
element_width: usize,
out_element: ?[*]u8,
) callconv(.C) RocList {
// INVARIANT: bounds checking happens on the roc side
//
// at the time of writing, the function is implemented roughly as
// `if inBounds then LowLevelListReplace input index item else input`
// so we don't do a bounds check here. Hence, the list is also non-empty,
// because inserting into an empty list is always out of bounds
return listReplaceInPlaceHelp(list, index, element, element_width, out_element);
}
pub fn listReplace(
list: RocList,
alignment: u32,
index: usize,
element: Opaque,
element_width: usize,
out_element: ?[*]u8,
) callconv(.C) RocList {
// INVARIANT: bounds checking happens on the roc side
//
// at the time of writing, the function is implemented roughly as
// `if inBounds then LowLevelListReplace input index item else input`
// so we don't do a bounds check here. Hence, the list is also non-empty,
// because inserting into an empty list is always out of bounds
return listReplaceInPlaceHelp(list.makeUnique(alignment, element_width), index, element, element_width, out_element);
}
inline fn listReplaceInPlaceHelp(
list: RocList,
index: usize,
element: Opaque,
element_width: usize,
out_element: ?[*]u8,
) RocList {
// the element we will replace
var element_at_index = (list.bytes orelse undefined) + (index * element_width);
// copy out the old element
@memcpy(out_element orelse undefined, element_at_index, element_width);
// copy in the new element
@memcpy(element_at_index, element orelse undefined, element_width);
return list;
}
pub fn listIsUnique(
list: RocList,
) callconv(.C) bool {
return list.isEmpty() or list.isUnique();
}

View file

@ -0,0 +1,317 @@
const std = @import("std");
const builtin = @import("builtin");
const math = std.math;
const utils = @import("utils.zig");
const expect = @import("expect.zig");
const ROC_BUILTINS = "roc_builtins";
const NUM = "num";
const STR = "str";
// Dec Module
const dec = @import("dec.zig");
comptime {
exportDecFn(dec.fromStr, "from_str");
exportDecFn(dec.fromF64C, "from_f64");
exportDecFn(dec.eqC, "eq");
exportDecFn(dec.neqC, "neq");
exportDecFn(dec.negateC, "negate");
exportDecFn(dec.divC, "div");
exportDecFn(dec.addC, "add_with_overflow");
exportDecFn(dec.addOrPanicC, "add_or_panic");
exportDecFn(dec.addSaturatedC, "add_saturated");
exportDecFn(dec.subC, "sub_with_overflow");
exportDecFn(dec.subOrPanicC, "sub_or_panic");
exportDecFn(dec.subSaturatedC, "sub_saturated");
exportDecFn(dec.mulC, "mul_with_overflow");
exportDecFn(dec.mulOrPanicC, "mul_or_panic");
exportDecFn(dec.mulSaturatedC, "mul_saturated");
}
// List Module
const list = @import("list.zig");
comptime {
exportListFn(list.listMap, "map");
exportListFn(list.listMap2, "map2");
exportListFn(list.listMap3, "map3");
exportListFn(list.listMap4, "map4");
exportListFn(list.listMapWithIndex, "map_with_index");
exportListFn(list.listKeepIf, "keep_if");
exportListFn(list.listKeepOks, "keep_oks");
exportListFn(list.listKeepErrs, "keep_errs");
exportListFn(list.listAppend, "append");
exportListFn(list.listPrepend, "prepend");
exportListFn(list.listWithCapacity, "with_capacity");
exportListFn(list.listSortWith, "sort_with");
exportListFn(list.listConcat, "concat");
exportListFn(list.listSublist, "sublist");
exportListFn(list.listDropAt, "drop_at");
exportListFn(list.listReplace, "replace");
exportListFn(list.listReplaceInPlace, "replace_in_place");
exportListFn(list.listSwap, "swap");
exportListFn(list.listIsUnique, "is_unique");
}
// Dict Module
const dict = @import("dict.zig");
const hash = @import("hash.zig");
comptime {
exportDictFn(dict.dictLen, "len");
exportDictFn(dict.dictEmpty, "empty");
exportDictFn(dict.dictInsert, "insert");
exportDictFn(dict.dictRemove, "remove");
exportDictFn(dict.dictContains, "contains");
exportDictFn(dict.dictGet, "get");
exportDictFn(dict.elementsRc, "elementsRc");
exportDictFn(dict.dictKeys, "keys");
exportDictFn(dict.dictValues, "values");
exportDictFn(dict.dictUnion, "union");
exportDictFn(dict.dictIntersection, "intersection");
exportDictFn(dict.dictDifference, "difference");
exportDictFn(dict.dictWalk, "walk");
exportDictFn(dict.setFromList, "set_from_list");
exportDictFn(hash.wyhash, "hash");
exportDictFn(hash.wyhash_rocstr, "hash_str");
}
// Num Module
const num = @import("num.zig");
const INTEGERS = [_]type{ i8, i16, i32, i64, i128, u8, u16, u32, u64, u128 };
const WIDEINTS = [_]type{ i16, i32, i64, i128, i256, u16, u32, u64, u128, u256 };
const FLOATS = [_]type{ f32, f64 };
const NUMBERS = INTEGERS ++ FLOATS;
comptime {
exportNumFn(num.bytesToU16C, "bytes_to_u16");
exportNumFn(num.bytesToU32C, "bytes_to_u32");
inline for (INTEGERS) |T, i| {
num.exportPow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".pow_int.");
num.exportDivCeil(T, ROC_BUILTINS ++ "." ++ NUM ++ ".div_ceil.");
num.exportRoundF32(T, ROC_BUILTINS ++ "." ++ NUM ++ ".round_f32.");
num.exportRoundF64(T, ROC_BUILTINS ++ "." ++ NUM ++ ".round_f64.");
num.exportAddWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_with_overflow.");
num.exportAddOrPanic(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_or_panic.");
num.exportAddSaturatedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_saturated.");
num.exportSubWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_with_overflow.");
num.exportSubOrPanic(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_or_panic.");
num.exportSubSaturatedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_saturated.");
num.exportMulWithOverflow(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_with_overflow.");
num.exportMulOrPanic(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_or_panic.");
num.exportMulSaturatedInt(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_saturated.");
}
inline for (INTEGERS) |FROM| {
inline for (INTEGERS) |TO| {
// We're exporting more than we need here, but that's okay.
num.exportToIntCheckingMax(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max.");
num.exportToIntCheckingMaxAndMin(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max_and_min.");
}
}
inline for (FLOATS) |T| {
num.exportAsin(T, ROC_BUILTINS ++ "." ++ NUM ++ ".asin.");
num.exportAcos(T, ROC_BUILTINS ++ "." ++ NUM ++ ".acos.");
num.exportAtan(T, ROC_BUILTINS ++ "." ++ NUM ++ ".atan.");
num.exportSin(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sin.");
num.exportCos(T, ROC_BUILTINS ++ "." ++ NUM ++ ".cos.");
num.exportPow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".pow.");
num.exportLog(T, ROC_BUILTINS ++ "." ++ NUM ++ ".log.");
num.exportAddWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_with_overflow.");
num.exportSubWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_with_overflow.");
num.exportMulWithOverflow(T, T, ROC_BUILTINS ++ "." ++ NUM ++ ".mul_with_overflow.");
num.exportIsFinite(T, ROC_BUILTINS ++ "." ++ NUM ++ ".is_finite.");
}
}
// Str Module
const str = @import("str.zig");
comptime {
exportStrFn(str.init, "init");
exportStrFn(str.strSplitInPlaceC, "str_split_in_place");
exportStrFn(str.countSegments, "count_segments");
exportStrFn(str.countGraphemeClusters, "count_grapheme_clusters");
exportStrFn(str.startsWith, "starts_with");
exportStrFn(str.startsWithCodePt, "starts_with_code_point");
exportStrFn(str.endsWith, "ends_with");
exportStrFn(str.strConcatC, "concat");
exportStrFn(str.strJoinWithC, "joinWith");
exportStrFn(str.strNumberOfBytes, "number_of_bytes");
exportStrFn(str.strFromFloatC, "from_float");
exportStrFn(str.strEqual, "equal");
exportStrFn(str.strToUtf8C, "to_utf8");
exportStrFn(str.fromUtf8C, "from_utf8");
exportStrFn(str.fromUtf8RangeC, "from_utf8_range");
exportStrFn(str.repeat, "repeat");
exportStrFn(str.strTrim, "trim");
exportStrFn(str.strTrimLeft, "trim_left");
exportStrFn(str.strTrimRight, "trim_right");
inline for (INTEGERS) |T| {
str.exportFromInt(T, ROC_BUILTINS ++ "." ++ STR ++ ".from_int.");
num.exportParseInt(T, ROC_BUILTINS ++ "." ++ STR ++ ".to_int.");
}
inline for (FLOATS) |T| {
num.exportParseFloat(T, ROC_BUILTINS ++ "." ++ STR ++ ".to_float.");
}
}
// Utils
comptime {
exportUtilsFn(utils.test_panic, "test_panic");
exportUtilsFn(utils.increfC, "incref");
exportUtilsFn(utils.decrefC, "decref");
exportUtilsFn(utils.decrefCheckNullC, "decref_check_null");
exportUtilsFn(utils.allocateWithRefcountC, "allocate_with_refcount");
exportExpectFn(expect.expectFailedC, "expect_failed");
exportExpectFn(expect.getExpectFailuresC, "get_expect_failures");
exportExpectFn(expect.deinitFailuresC, "deinit_failures");
@export(utils.panic, .{ .name = "roc_builtins.utils." ++ "panic", .linkage = .Weak });
if (builtin.target.cpu.arch == .aarch64) {
@export(__roc_force_setjmp, .{ .name = "__roc_force_setjmp", .linkage = .Weak });
@export(__roc_force_longjmp, .{ .name = "__roc_force_longjmp", .linkage = .Weak });
}
}
// Utils continued - SJLJ
// For tests (in particular test_gen), roc_panic is implemented in terms of
// setjmp/longjmp. LLVM is unable to generate code for longjmp on AArch64 (https://github.com/rtfeldman/roc/issues/2965),
// so instead we ask Zig to please provide implementations for us, which is does
// (seemingly via musl).
pub extern fn setjmp([*c]c_int) c_int;
pub extern fn longjmp([*c]c_int, c_int) noreturn;
pub extern fn _setjmp([*c]c_int) c_int;
pub extern fn _longjmp([*c]c_int, c_int) noreturn;
pub extern fn sigsetjmp([*c]c_int, c_int) c_int;
pub extern fn siglongjmp([*c]c_int, c_int) noreturn;
pub extern fn longjmperror() void;
// Zig won't expose the externs (and hence link correctly) unless we force them to be used.
fn __roc_force_setjmp(it: [*c]c_int) callconv(.C) c_int {
return setjmp(it);
}
fn __roc_force_longjmp(a0: [*c]c_int, a1: c_int) callconv(.C) noreturn {
longjmp(a0, a1);
}
// Export helpers - Must be run inside a comptime
fn exportBuiltinFn(comptime func: anytype, comptime func_name: []const u8) void {
@export(func, .{ .name = "roc_builtins." ++ func_name, .linkage = .Strong });
}
fn exportNumFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "num." ++ func_name);
}
fn exportStrFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "str." ++ func_name);
}
fn exportDictFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "dict." ++ func_name);
}
fn exportListFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "list." ++ func_name);
}
fn exportDecFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "dec." ++ func_name);
}
fn exportUtilsFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "utils." ++ func_name);
}
fn exportExpectFn(comptime func: anytype, comptime func_name: []const u8) void {
exportBuiltinFn(func, "expect." ++ func_name);
}
// Custom panic function, as builtin Zig version errors during LLVM verification
pub fn panic(message: []const u8, stacktrace: ?*std.builtin.StackTrace) noreturn {
if (builtin.is_test) {
std.debug.print("{s}: {?}", .{ message, stacktrace });
} else {
_ = message;
_ = stacktrace;
}
unreachable;
}
// Run all tests in imported modules
// https://github.com/ziglang/zig/blob/master/lib/std/std.zig#L94
test "" {
const testing = std.testing;
testing.refAllDecls(@This());
}
// For unclear reasons, sometimes this function is not linked in on some machines.
// Therefore we provide it as LLVM bitcode and mark it as externally linked during our LLVM codegen
//
// Taken from
// https://github.com/ziglang/zig/blob/85755c51d529e7d9b406c6bdf69ce0a0f33f3353/lib/std/special/compiler_rt/muloti4.zig
//
// Thank you Zig Contributors!
// Export it as weak incase it is already linked in by something else.
comptime {
@export(__muloti4, .{ .name = "__muloti4", .linkage = .Weak });
}
fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
// @setRuntimeSafety(std.builtin.is_test);
const min = @bitCast(i128, @as(u128, 1 << (128 - 1)));
const max = ~min;
overflow.* = 0;
const r = a *% b;
if (a == min) {
if (b != 0 and b != 1) {
overflow.* = 1;
}
return r;
}
if (b == min) {
if (a != 0 and a != 1) {
overflow.* = 1;
}
return r;
}
const sa = a >> (128 - 1);
const abs_a = (a ^ sa) -% sa;
const sb = b >> (128 - 1);
const abs_b = (b ^ sb) -% sb;
if (abs_a < 2 or abs_b < 2) {
return r;
}
if (sa == sb) {
if (abs_a > @divTrunc(max, abs_b)) {
overflow.* = 1;
}
} else {
if (abs_a > @divTrunc(min, -abs_b)) {
overflow.* = 1;
}
}
return r;
}

View file

@ -0,0 +1,462 @@
const std = @import("std");
const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const math = std.math;
const RocList = @import("list.zig").RocList;
const RocStr = @import("str.zig").RocStr;
const WithOverflow = @import("utils.zig").WithOverflow;
const roc_panic = @import("utils.zig").panic;
pub fn NumParseResult(comptime T: type) type {
// on the roc side we sort by alignment; putting the errorcode last
// always works out (no number with smaller alignment than 1)
return extern struct {
value: T,
errorcode: u8, // 0 indicates success
};
}
pub const U256 = struct {
hi: u128,
lo: u128,
};
pub fn mul_u128(a: u128, b: u128) U256 {
var hi: u128 = undefined;
var lo: u128 = undefined;
const bits_in_dword_2: u32 = 64;
const lower_mask: u128 = math.maxInt(u128) >> bits_in_dword_2;
lo = (a & lower_mask) * (b & lower_mask);
var t = lo >> bits_in_dword_2;
lo &= lower_mask;
t += (a >> bits_in_dword_2) * (b & lower_mask);
lo += (t & lower_mask) << bits_in_dword_2;
hi = t >> bits_in_dword_2;
t = lo >> bits_in_dword_2;
lo &= lower_mask;
t += (b >> bits_in_dword_2) * (a & lower_mask);
lo += (t & lower_mask) << bits_in_dword_2;
hi += t >> bits_in_dword_2;
hi += (a >> bits_in_dword_2) * (b >> bits_in_dword_2);
return .{ .hi = hi, .lo = lo };
}
pub fn exportParseInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(buf: RocStr) callconv(.C) NumParseResult(T) {
// a radix of 0 will make zig determine the radix from the frefix:
// * A prefix of "0b" implies radix=2,
// * A prefix of "0o" implies radix=8,
// * A prefix of "0x" implies radix=16,
// * Otherwise radix=10 is assumed.
const radix = 0;
if (std.fmt.parseInt(T, buf.asSlice(), radix)) |success| {
return .{ .errorcode = 0, .value = success };
} else |_| {
return .{ .errorcode = 1, .value = 0 };
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportParseFloat(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(buf: RocStr) callconv(.C) NumParseResult(T) {
if (std.fmt.parseFloat(T, buf.asSlice())) |success| {
return .{ .errorcode = 0, .value = success };
} else |_| {
return .{ .errorcode = 1, .value = 0 };
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportPow(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(base: T, exp: T) callconv(.C) T {
return std.math.pow(T, base, exp);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportIsFinite(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) bool {
return std.math.isFinite(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAsin(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return std.math.asin(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAcos(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return std.math.acos(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAtan(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return std.math.atan(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportSin(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return @sin(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportCos(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return @cos(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportLog(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) T {
return @log(input);
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportRoundF32(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: f32) callconv(.C) T {
return @floatToInt(T, (@round(input)));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportRoundF64(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: f64) callconv(.C) T {
return @floatToInt(T, (@round(input)));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportDivCeil(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(a: T, b: T) callconv(.C) T {
return math.divCeil(T, a, b) catch @panic("TODO runtime exception for dividing by 0!");
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn ToIntCheckedResult(comptime T: type) type {
// On the Roc side we sort by alignment; putting the errorcode last
// always works out (no number with smaller alignment than 1).
return extern struct {
value: T,
out_of_bounds: bool,
};
}
pub fn exportToIntCheckingMax(comptime From: type, comptime To: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: From) callconv(.C) ToIntCheckedResult(To) {
if (input > std.math.maxInt(To)) {
return .{ .out_of_bounds = true, .value = 0 };
}
return .{ .out_of_bounds = false, .value = @intCast(To, input) };
}
}.func;
@export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong });
}
pub fn exportToIntCheckingMaxAndMin(comptime From: type, comptime To: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: From) callconv(.C) ToIntCheckedResult(To) {
if (input > std.math.maxInt(To) or input < std.math.minInt(To)) {
return .{ .out_of_bounds = true, .value = 0 };
}
return .{ .out_of_bounds = false, .value = @intCast(To, input) };
}
}.func;
@export(f, .{ .name = name ++ @typeName(From), .linkage = .Strong });
}
pub fn bytesToU16C(arg: RocList, position: usize) callconv(.C) u16 {
return @call(.{ .modifier = always_inline }, bytesToU16, .{ arg, position });
}
fn bytesToU16(arg: RocList, position: usize) u16 {
const bytes = @ptrCast([*]const u8, arg.bytes);
return @bitCast(u16, [_]u8{ bytes[position], bytes[position + 1] });
}
pub fn bytesToU32C(arg: RocList, position: usize) callconv(.C) u32 {
return @call(.{ .modifier = always_inline }, bytesToU32, .{ arg, position });
}
fn bytesToU32(arg: RocList, position: usize) u32 {
const bytes = @ptrCast([*]const u8, arg.bytes);
return @bitCast(u32, [_]u8{ bytes[position], bytes[position + 1], bytes[position + 2], bytes[position + 3] });
}
fn addWithOverflow(comptime T: type, self: T, other: T) WithOverflow(T) {
switch (@typeInfo(T)) {
.Int => {
var answer: T = undefined;
const overflowed = @addWithOverflow(T, self, other, &answer);
return .{ .value = answer, .has_overflowed = overflowed };
},
else => {
const answer = self + other;
const overflowed = !std.math.isFinite(answer);
return .{ .value = answer, .has_overflowed = overflowed };
},
}
}
pub fn exportAddWithOverflow(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) WithOverflow(T) {
return @call(.{ .modifier = always_inline }, addWithOverflow, .{ T, self, other });
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAddSaturatedInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = addWithOverflow(T, self, other);
if (result.has_overflowed) {
// We can unambiguously tell which way it wrapped, because we have N+1 bits including the overflow bit
if (result.value >= 0 and @typeInfo(T).Int.signedness == .signed) {
return std.math.minInt(T);
} else {
return std.math.maxInt(T);
}
} else {
return result.value;
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAddOrPanic(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = addWithOverflow(T, self, other);
if (result.has_overflowed) {
roc_panic("integer addition overflowed!", 1);
unreachable;
} else {
return result.value;
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
fn subWithOverflow(comptime T: type, self: T, other: T) WithOverflow(T) {
switch (@typeInfo(T)) {
.Int => {
var answer: T = undefined;
const overflowed = @subWithOverflow(T, self, other, &answer);
return .{ .value = answer, .has_overflowed = overflowed };
},
else => {
const answer = self - other;
const overflowed = !std.math.isFinite(answer);
return .{ .value = answer, .has_overflowed = overflowed };
},
}
}
pub fn exportSubWithOverflow(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) WithOverflow(T) {
return @call(.{ .modifier = always_inline }, subWithOverflow, .{ T, self, other });
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportSubSaturatedInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = subWithOverflow(T, self, other);
if (result.has_overflowed) {
if (@typeInfo(T).Int.signedness == .unsigned) {
return 0;
} else if (self < 0) {
return std.math.minInt(T);
} else {
return std.math.maxInt(T);
}
} else {
return result.value;
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportSubOrPanic(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = subWithOverflow(T, self, other);
if (result.has_overflowed) {
roc_panic("integer subtraction overflowed!", 1);
unreachable;
} else {
return result.value;
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
fn mulWithOverflow(comptime T: type, comptime W: type, self: T, other: T) WithOverflow(T) {
switch (@typeInfo(T)) {
.Int => {
if (T == i128) {
const is_answer_negative = (self < 0) != (other < 0);
const max = std.math.maxInt(i128);
const min = std.math.minInt(i128);
const self_u128 = @intCast(u128, math.absInt(self) catch {
if (other == 0) {
return .{ .value = 0, .has_overflowed = false };
} else if (other == 1) {
return .{ .value = self, .has_overflowed = false };
} else if (is_answer_negative) {
return .{ .value = min, .has_overflowed = true };
} else {
return .{ .value = max, .has_overflowed = true };
}
});
const other_u128 = @intCast(u128, math.absInt(other) catch {
if (self == 0) {
return .{ .value = 0, .has_overflowed = false };
} else if (self == 1) {
return .{ .value = other, .has_overflowed = false };
} else if (is_answer_negative) {
return .{ .value = min, .has_overflowed = true };
} else {
return .{ .value = max, .has_overflowed = true };
}
});
const answer256: U256 = mul_u128(self_u128, other_u128);
if (is_answer_negative) {
if (answer256.hi != 0 or answer256.lo > (1 << 127)) {
return .{ .value = min, .has_overflowed = true };
} else if (answer256.lo == (1 << 127)) {
return .{ .value = min, .has_overflowed = false };
} else {
return .{ .value = -@intCast(i128, answer256.lo), .has_overflowed = false };
}
} else {
if (answer256.hi != 0 or answer256.lo > @intCast(u128, max)) {
return .{ .value = max, .has_overflowed = true };
} else {
return .{ .value = @intCast(i128, answer256.lo), .has_overflowed = false };
}
}
} else {
const self_wide: W = self;
const other_wide: W = other;
const answer: W = self_wide * other_wide;
const max: W = std.math.maxInt(T);
const min: W = std.math.minInt(T);
if (answer > max) {
return .{ .value = max, .has_overflowed = true };
} else if (answer < min) {
return .{ .value = min, .has_overflowed = true };
} else {
return .{ .value = @intCast(T, answer), .has_overflowed = false };
}
}
},
else => {
const answer = self * other;
const overflowed = !std.math.isFinite(answer);
return .{ .value = answer, .has_overflowed = overflowed };
},
}
}
pub fn exportMulWithOverflow(comptime T: type, comptime W: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) WithOverflow(T) {
return @call(.{ .modifier = always_inline }, mulWithOverflow, .{ T, W, self, other });
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportMulSaturatedInt(comptime T: type, comptime W: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = @call(.{ .modifier = always_inline }, mulWithOverflow, .{ T, W, self, other });
return result.value;
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportMulOrPanic(comptime T: type, comptime W: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
const result = @call(.{ .modifier = always_inline }, mulWithOverflow, .{ T, W, self, other });
if (result.has_overflowed) {
roc_panic("integer multiplication overflowed!", 1);
unreachable;
} else {
return result.value;
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,310 @@
const std = @import("std");
const always_inline = std.builtin.CallOptions.Modifier.always_inline;
const Monotonic = std.builtin.AtomicOrder.Monotonic;
pub fn WithOverflow(comptime T: type) type {
return extern struct { value: T, has_overflowed: bool };
}
// If allocation fails, this must cxa_throw - it must not return a null pointer!
extern fn roc_alloc(size: usize, alignment: u32) callconv(.C) ?*anyopaque;
// This should never be passed a null pointer.
// If allocation fails, this must cxa_throw - it must not return a null pointer!
extern fn roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, alignment: u32) callconv(.C) ?*anyopaque;
// This should never be passed a null pointer.
extern fn roc_dealloc(c_ptr: *anyopaque, alignment: u32) callconv(.C) void;
// Signals to the host that the program has panicked
extern fn roc_panic(c_ptr: *const anyopaque, tag_id: u32) callconv(.C) void;
// should work just like libc memcpy (we can't assume libc is present)
extern fn roc_memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void;
comptime {
const builtin = @import("builtin");
// During tests, use the testing allocators to satisfy these functions.
if (builtin.is_test) {
@export(testing_roc_alloc, .{ .name = "roc_alloc", .linkage = .Strong });
@export(testing_roc_realloc, .{ .name = "roc_realloc", .linkage = .Strong });
@export(testing_roc_dealloc, .{ .name = "roc_dealloc", .linkage = .Strong });
@export(testing_roc_panic, .{ .name = "roc_panic", .linkage = .Strong });
@export(testing_roc_memcpy, .{ .name = "roc_memcpy", .linkage = .Strong });
}
}
fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*anyopaque {
return @ptrCast(?*anyopaque, std.testing.allocator.alloc(u8, size) catch unreachable);
}
fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*anyopaque {
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
const slice = ptr[0..old_size];
return @ptrCast(?*anyopaque, std.testing.allocator.realloc(slice, new_size) catch unreachable);
}
fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.C) void {
const ptr = @ptrCast([*]u8, @alignCast(2 * @alignOf(usize), c_ptr));
std.testing.allocator.destroy(ptr);
}
fn testing_roc_panic(c_ptr: *anyopaque, tag_id: u32) callconv(.C) void {
_ = c_ptr;
_ = tag_id;
@panic("Roc panicked");
}
fn testing_roc_memcpy(dest: *anyopaque, src: *anyopaque, bytes: usize) callconv(.C) ?*anyopaque {
const zig_dest = @ptrCast([*]u8, dest);
const zig_src = @ptrCast([*]u8, src);
@memcpy(zig_dest, zig_src, bytes);
return dest;
}
pub fn alloc(size: usize, alignment: u32) ?[*]u8 {
return @ptrCast(?[*]u8, @call(.{ .modifier = always_inline }, roc_alloc, .{ size, alignment }));
}
pub fn realloc(c_ptr: [*]u8, new_size: usize, old_size: usize, alignment: u32) [*]u8 {
return @ptrCast([*]u8, @call(.{ .modifier = always_inline }, roc_realloc, .{ c_ptr, new_size, old_size, alignment }));
}
pub fn dealloc(c_ptr: [*]u8, alignment: u32) void {
return @call(.{ .modifier = always_inline }, roc_dealloc, .{ c_ptr, alignment });
}
// must export this explicitly because right now it is not used from zig code
pub fn panic(c_ptr: *const anyopaque, alignment: u32) callconv(.C) void {
return @call(.{ .modifier = always_inline }, roc_panic, .{ c_ptr, alignment });
}
pub fn memcpy(dst: [*]u8, src: [*]u8, size: usize) void {
@call(.{ .modifier = always_inline }, roc_memcpy, .{ dst, src, size });
}
// indirection because otherwise zig creates an alias to the panic function which our LLVM code
// does not know how to deal with
pub fn test_panic(c_ptr: *anyopaque, alignment: u32) callconv(.C) void {
_ = c_ptr;
_ = alignment;
// const cstr = @ptrCast([*:0]u8, c_ptr);
// const stderr = std.io.getStdErr().writer();
// stderr.print("Roc panicked: {s}!\n", .{cstr}) catch unreachable;
// std.c.exit(1);
}
pub const Inc = fn (?[*]u8) callconv(.C) void;
pub const IncN = fn (?[*]u8, u64) callconv(.C) void;
pub const Dec = fn (?[*]u8) callconv(.C) void;
const REFCOUNT_MAX_ISIZE: isize = 0;
pub const REFCOUNT_ONE_ISIZE: isize = std.math.minInt(isize);
pub const REFCOUNT_ONE: usize = @bitCast(usize, REFCOUNT_ONE_ISIZE);
pub const IntWidth = enum(u8) {
U8 = 0,
U16 = 1,
U32 = 2,
U64 = 3,
U128 = 4,
I8 = 5,
I16 = 6,
I32 = 7,
I64 = 8,
I128 = 9,
};
const Refcount = enum {
none,
normal,
atomic,
};
const RC_TYPE = Refcount.normal;
pub fn increfC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void {
if (RC_TYPE == Refcount.none) return;
var refcount = ptr_to_refcount.*;
if (refcount < REFCOUNT_MAX_ISIZE) {
switch (RC_TYPE) {
Refcount.normal => {
ptr_to_refcount.* = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
},
Refcount.atomic => {
var next = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
while (@cmpxchgWeak(isize, ptr_to_refcount, refcount, next, Monotonic, Monotonic)) |found| {
refcount = found;
next = std.math.min(refcount + amount, REFCOUNT_MAX_ISIZE);
}
},
Refcount.none => unreachable,
}
}
}
pub fn decrefC(
bytes_or_null: ?[*]isize,
alignment: u32,
) callconv(.C) void {
// IMPORTANT: bytes_or_null is this case is expected to be a pointer to the refcount
// (NOT the start of the data, or the start of the allocation)
// this is of course unsafe, but we trust what we get from the llvm side
var bytes = @ptrCast([*]isize, bytes_or_null);
return @call(.{ .modifier = always_inline }, decref_ptr_to_refcount, .{ bytes, alignment });
}
pub fn decrefCheckNullC(
bytes_or_null: ?[*]u8,
alignment: u32,
) callconv(.C) void {
if (bytes_or_null) |bytes| {
const isizes: [*]isize = @ptrCast([*]isize, @alignCast(@sizeOf(isize), bytes));
return @call(.{ .modifier = always_inline }, decref_ptr_to_refcount, .{ isizes - 1, alignment });
}
}
pub fn decref(
bytes_or_null: ?[*]u8,
data_bytes: usize,
alignment: u32,
) void {
if (data_bytes == 0) {
return;
}
var bytes = bytes_or_null orelse return;
const isizes: [*]isize = @ptrCast([*]isize, @alignCast(@sizeOf(isize), bytes));
decref_ptr_to_refcount(isizes - 1, alignment);
}
inline fn decref_ptr_to_refcount(
refcount_ptr: [*]isize,
alignment: u32,
) void {
if (RC_TYPE == Refcount.none) return;
const extra_bytes = std.math.max(alignment, @sizeOf(usize));
switch (RC_TYPE) {
Refcount.normal => {
const refcount: isize = refcount_ptr[0];
if (refcount == REFCOUNT_ONE_ISIZE) {
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
} else if (refcount < REFCOUNT_MAX_ISIZE) {
refcount_ptr[0] = refcount - 1;
}
},
Refcount.atomic => {
if (refcount_ptr[0] < REFCOUNT_MAX_ISIZE) {
var last = @atomicRmw(isize, &refcount_ptr[0], std.builtin.AtomicRmwOp.Sub, 1, Monotonic);
if (last == REFCOUNT_ONE_ISIZE) {
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
}
}
},
Refcount.none => unreachable,
}
}
pub fn allocateWithRefcountC(
data_bytes: usize,
element_alignment: u32,
) callconv(.C) [*]u8 {
return allocateWithRefcount(data_bytes, element_alignment);
}
pub fn allocateWithRefcount(
data_bytes: usize,
element_alignment: u32,
) [*]u8 {
const ptr_width = @sizeOf(usize);
const alignment = std.math.max(ptr_width, element_alignment);
const length = alignment + data_bytes;
var new_bytes: [*]u8 = alloc(length, alignment) orelse unreachable;
const data_ptr = new_bytes + alignment;
const refcount_ptr = @ptrCast([*]usize, @alignCast(ptr_width, data_ptr) - ptr_width);
refcount_ptr[0] = if (RC_TYPE == Refcount.none) REFCOUNT_MAX_ISIZE else REFCOUNT_ONE;
return data_ptr;
}
pub const CSlice = extern struct {
pointer: *anyopaque,
len: usize,
};
pub fn unsafeReallocate(
source_ptr: [*]u8,
alignment: u32,
old_length: usize,
new_length: usize,
element_width: usize,
) [*]u8 {
const align_width: usize = std.math.max(alignment, @sizeOf(usize));
const old_width = align_width + old_length * element_width;
const new_width = align_width + new_length * element_width;
// TODO handle out of memory
// NOTE realloc will dealloc the original allocation
const old_allocation = source_ptr - align_width;
const new_allocation = realloc(old_allocation, new_width, old_width, alignment);
const new_source = @ptrCast([*]u8, new_allocation) + align_width;
return new_source;
}
pub const RocResult = extern struct {
bytes: ?[*]u8,
pub fn isOk(self: RocResult) bool {
// assumptions
//
// - the tag is the first field
// - the tag is usize bytes wide
// - Ok has tag_id 1, because Err < Ok
const usizes: [*]usize = @ptrCast([*]usize, @alignCast(@alignOf(usize), self.bytes));
return usizes[0] == 1;
}
pub fn isErr(self: RocResult) bool {
return !self.isOk();
}
};
pub const Ordering = enum(u8) {
EQ = 0,
GT = 1,
LT = 2,
};
pub const UpdateMode = enum(u8) {
Immutable = 0,
InPlace = 1,
};
test "increfC, refcounted data" {
var mock_rc: isize = REFCOUNT_ONE_ISIZE + 17;
var ptr_to_refcount: *isize = &mock_rc;
increfC(ptr_to_refcount, 2);
try std.testing.expectEqual(mock_rc, REFCOUNT_ONE_ISIZE + 19);
}
test "increfC, static data" {
var mock_rc: isize = REFCOUNT_MAX_ISIZE;
var ptr_to_refcount: *isize = &mock_rc;
increfC(ptr_to_refcount, 2);
try std.testing.expectEqual(mock_rc, REFCOUNT_MAX_ISIZE);
}