mirror of
https://github.com/roc-lang/roc.git
synced 2025-08-02 19:32:17 +00:00

+ An error maessage that says what the failures were + Not panicking (so these are effectively error productions)
410 lines
13 KiB
Zig
410 lines
13 KiB
Zig
const std = @import("std");
|
|
const always_inline = std.builtin.CallOptions.Modifier.always_inline;
|
|
|
|
pub fn WithOverflow(comptime T: type) type {
|
|
return extern struct { value: T, has_overflowed: bool };
|
|
}
|
|
|
|
// If allocation fails, this must cxa_throw - it must not return a null pointer!
|
|
extern fn roc_alloc(size: usize, alignment: u32) callconv(.C) ?*c_void;
|
|
|
|
// This should never be passed a null pointer.
|
|
// If allocation fails, this must cxa_throw - it must not return a null pointer!
|
|
extern fn roc_realloc(c_ptr: *c_void, new_size: usize, old_size: usize, alignment: u32) callconv(.C) ?*c_void;
|
|
|
|
// This should never be passed a null pointer.
|
|
extern fn roc_dealloc(c_ptr: *c_void, alignment: u32) callconv(.C) void;
|
|
|
|
// Signals to the host that the program has panicked
|
|
extern fn roc_panic(c_ptr: *c_void, tag_id: u32) callconv(.C) void;
|
|
|
|
// should work just like libc memcpy (we can't assume libc is present)
|
|
extern fn roc_memcpy(dst: [*]u8, src: [*]u8, size: usize) callconv(.C) void;
|
|
|
|
comptime {
|
|
const builtin = @import("builtin");
|
|
// During tests, use the testing allocators to satisfy these functions.
|
|
if (builtin.is_test) {
|
|
@export(testing_roc_alloc, .{ .name = "roc_alloc", .linkage = .Strong });
|
|
@export(testing_roc_realloc, .{ .name = "roc_realloc", .linkage = .Strong });
|
|
@export(testing_roc_dealloc, .{ .name = "roc_dealloc", .linkage = .Strong });
|
|
@export(testing_roc_panic, .{ .name = "roc_panic", .linkage = .Strong });
|
|
@export(testing_roc_memcpy, .{ .name = "roc_memcpy", .linkage = .Strong });
|
|
}
|
|
}
|
|
|
|
fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*c_void {
|
|
return @ptrCast(?*c_void, std.testing.allocator.alloc(u8, size) catch unreachable);
|
|
}
|
|
|
|
fn testing_roc_realloc(c_ptr: *c_void, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*c_void {
|
|
const ptr = @ptrCast([*]u8, @alignCast(16, c_ptr));
|
|
const slice = ptr[0..old_size];
|
|
|
|
return @ptrCast(?*c_void, std.testing.allocator.realloc(slice, new_size) catch unreachable);
|
|
}
|
|
|
|
fn testing_roc_dealloc(c_ptr: *c_void, _: u32) callconv(.C) void {
|
|
const ptr = @ptrCast([*]u8, @alignCast(16, c_ptr));
|
|
|
|
std.testing.allocator.destroy(ptr);
|
|
}
|
|
|
|
fn testing_roc_panic(c_ptr: *c_void, tag_id: u32) callconv(.C) void {
|
|
_ = c_ptr;
|
|
_ = tag_id;
|
|
|
|
@panic("Roc panicked");
|
|
}
|
|
|
|
fn testing_roc_memcpy(dest: *c_void, src: *c_void, bytes: usize) callconv(.C) ?*c_void {
|
|
const zig_dest = @ptrCast([*]u8, dest);
|
|
const zig_src = @ptrCast([*]u8, src);
|
|
|
|
@memcpy(zig_dest, zig_src, bytes);
|
|
return dest;
|
|
}
|
|
|
|
pub fn alloc(size: usize, alignment: u32) [*]u8 {
|
|
return @ptrCast([*]u8, @call(.{ .modifier = always_inline }, roc_alloc, .{ size, alignment }));
|
|
}
|
|
|
|
pub fn realloc(c_ptr: [*]u8, new_size: usize, old_size: usize, alignment: u32) [*]u8 {
|
|
return @ptrCast([*]u8, @call(.{ .modifier = always_inline }, roc_realloc, .{ c_ptr, new_size, old_size, alignment }));
|
|
}
|
|
|
|
pub fn dealloc(c_ptr: [*]u8, alignment: u32) void {
|
|
return @call(.{ .modifier = always_inline }, roc_dealloc, .{ c_ptr, alignment });
|
|
}
|
|
|
|
// must export this explicitly because right now it is not used from zig code
|
|
pub fn panic(c_ptr: *c_void, alignment: u32) callconv(.C) void {
|
|
return @call(.{ .modifier = always_inline }, roc_panic, .{ c_ptr, alignment });
|
|
}
|
|
|
|
// indirection because otherwise zig creates an alias to the panic function which our LLVM code
|
|
// does not know how to deal with
|
|
pub fn test_panic(c_ptr: *c_void, alignment: u32) callconv(.C) void {
|
|
_ = c_ptr;
|
|
_ = alignment;
|
|
// const cstr = @ptrCast([*:0]u8, c_ptr);
|
|
|
|
// const stderr = std.io.getStdErr().writer();
|
|
// stderr.print("Roc panicked: {s}!\n", .{cstr}) catch unreachable;
|
|
|
|
// std.c.exit(1);
|
|
}
|
|
|
|
pub const Inc = fn (?[*]u8) callconv(.C) void;
|
|
pub const IncN = fn (?[*]u8, u64) callconv(.C) void;
|
|
pub const Dec = fn (?[*]u8) callconv(.C) void;
|
|
|
|
const REFCOUNT_MAX_ISIZE: isize = 0;
|
|
pub const REFCOUNT_ONE_ISIZE: isize = std.math.minInt(isize);
|
|
pub const REFCOUNT_ONE: usize = @bitCast(usize, REFCOUNT_ONE_ISIZE);
|
|
|
|
pub const IntWidth = enum(u8) {
|
|
U8 = 0,
|
|
U16 = 1,
|
|
U32 = 2,
|
|
U64 = 3,
|
|
U128 = 4,
|
|
I8 = 5,
|
|
I16 = 6,
|
|
I32 = 7,
|
|
I64 = 8,
|
|
I128 = 9,
|
|
};
|
|
|
|
pub fn increfC(ptr_to_refcount: *isize, amount: isize) callconv(.C) void {
|
|
var refcount = ptr_to_refcount.*;
|
|
var masked_amount = if (refcount == REFCOUNT_MAX_ISIZE) 0 else amount;
|
|
ptr_to_refcount.* = refcount + masked_amount;
|
|
}
|
|
|
|
pub fn decrefC(
|
|
bytes_or_null: ?[*]isize,
|
|
alignment: u32,
|
|
) callconv(.C) void {
|
|
// IMPORTANT: bytes_or_null is this case is expected to be a pointer to the refcount
|
|
// (NOT the start of the data, or the start of the allocation)
|
|
|
|
// this is of course unsafe, but we trust what we get from the llvm side
|
|
var bytes = @ptrCast([*]isize, bytes_or_null);
|
|
|
|
return @call(.{ .modifier = always_inline }, decref_ptr_to_refcount, .{ bytes, alignment });
|
|
}
|
|
|
|
pub fn decrefCheckNullC(
|
|
bytes_or_null: ?[*]u8,
|
|
alignment: u32,
|
|
) callconv(.C) void {
|
|
if (bytes_or_null) |bytes| {
|
|
const isizes: [*]isize = @ptrCast([*]isize, @alignCast(@sizeOf(isize), bytes));
|
|
return @call(.{ .modifier = always_inline }, decref_ptr_to_refcount, .{ isizes - 1, alignment });
|
|
}
|
|
}
|
|
|
|
pub fn decref(
|
|
bytes_or_null: ?[*]u8,
|
|
data_bytes: usize,
|
|
alignment: u32,
|
|
) void {
|
|
if (data_bytes == 0) {
|
|
return;
|
|
}
|
|
|
|
var bytes = bytes_or_null orelse return;
|
|
|
|
const isizes: [*]isize = @ptrCast([*]isize, @alignCast(@sizeOf(isize), bytes));
|
|
|
|
decref_ptr_to_refcount(isizes - 1, alignment);
|
|
}
|
|
|
|
inline fn decref_ptr_to_refcount(
|
|
refcount_ptr: [*]isize,
|
|
alignment: u32,
|
|
) void {
|
|
const refcount: isize = refcount_ptr[0];
|
|
const extra_bytes = std.math.max(alignment, @sizeOf(usize));
|
|
|
|
if (refcount == REFCOUNT_ONE_ISIZE) {
|
|
dealloc(@ptrCast([*]u8, refcount_ptr) - (extra_bytes - @sizeOf(usize)), alignment);
|
|
} else if (refcount < 0) {
|
|
refcount_ptr[0] = refcount - 1;
|
|
}
|
|
}
|
|
|
|
pub fn allocateWithRefcount(
|
|
data_bytes: usize,
|
|
element_alignment: u32,
|
|
) [*]u8 {
|
|
const alignment = std.math.max(@sizeOf(usize), element_alignment);
|
|
const first_slot_offset = std.math.max(@sizeOf(usize), element_alignment);
|
|
const length = alignment + data_bytes;
|
|
|
|
switch (alignment) {
|
|
16 => {
|
|
var new_bytes: [*]align(16) u8 = @alignCast(16, alloc(length, alignment));
|
|
|
|
var as_usize_array = @ptrCast([*]usize, new_bytes);
|
|
as_usize_array[0] = 0;
|
|
as_usize_array[1] = REFCOUNT_ONE;
|
|
|
|
var as_u8_array = @ptrCast([*]u8, new_bytes);
|
|
const first_slot = as_u8_array + first_slot_offset;
|
|
|
|
return first_slot;
|
|
},
|
|
8 => {
|
|
var raw = alloc(length, alignment);
|
|
var new_bytes: [*]align(8) u8 = @alignCast(8, raw);
|
|
|
|
var as_isize_array = @ptrCast([*]isize, new_bytes);
|
|
as_isize_array[0] = REFCOUNT_ONE_ISIZE;
|
|
|
|
var as_u8_array = @ptrCast([*]u8, new_bytes);
|
|
const first_slot = as_u8_array + first_slot_offset;
|
|
|
|
return first_slot;
|
|
},
|
|
4 => {
|
|
var raw = alloc(length, alignment);
|
|
var new_bytes: [*]align(@alignOf(isize)) u8 = @alignCast(@alignOf(isize), raw);
|
|
|
|
var as_isize_array = @ptrCast([*]isize, new_bytes);
|
|
as_isize_array[0] = REFCOUNT_ONE_ISIZE;
|
|
|
|
var as_u8_array = @ptrCast([*]u8, new_bytes);
|
|
const first_slot = as_u8_array + first_slot_offset;
|
|
|
|
return first_slot;
|
|
},
|
|
else => {
|
|
// const stdout = std.io.getStdOut().writer();
|
|
// stdout.print("alignment: {d}", .{alignment}) catch unreachable;
|
|
// @panic("allocateWithRefcount with invalid alignment");
|
|
unreachable;
|
|
},
|
|
}
|
|
}
|
|
|
|
const Failure = struct {
|
|
start_line: u32,
|
|
end_line: u32,
|
|
start_col: u16,
|
|
end_col: u16,
|
|
};
|
|
|
|
// BEGIN FAILURES GLOBALS ///////////////////
|
|
var failures_mutex = std.Thread.Mutex{};
|
|
var failures: [*]Failure = undefined;
|
|
var failure_length: usize = 0;
|
|
var failure_capacity: usize = 0;
|
|
// END FAILURES GLOBALS /////////////////////
|
|
|
|
pub fn expectFailed(
|
|
start_line: u32,
|
|
end_line: u32,
|
|
start_col: u16,
|
|
end_col: u16,
|
|
) void {
|
|
const new_failure = Failure{ .start_line = start_line, .end_line = end_line, .start_col = start_col, .end_col = end_col };
|
|
|
|
// Lock the failures mutex before reading from any of the failures globals,
|
|
// and then release the lock once we're done modifying things.
|
|
|
|
// TODO FOR ZIG 0.9: this API changed in https://github.com/ziglang/zig/commit/008b0ec5e58fc7e31f3b989868a7d1ea4df3f41d
|
|
// to this: https://github.com/ziglang/zig/blob/c710d5eefe3f83226f1651947239730e77af43cb/lib/std/Thread/Mutex.zig
|
|
//
|
|
// ...so just use these two lines of code instead of the non-commented-out ones to make this work in Zig 0.9:
|
|
//
|
|
// failures_mutex.lock();
|
|
// defer failures_mutex.release();
|
|
//
|
|
// 👆 👆 👆 IF UPGRADING TO ZIG 0.9, LOOK HERE! 👆 👆 👆
|
|
const held = failures_mutex.acquire();
|
|
defer held.release();
|
|
|
|
// If we don't have enough capacity to add a failure, allocate a new failures pointer.
|
|
if (failure_length >= failure_capacity) {
|
|
if (failure_capacity > 0) {
|
|
// We already had previous failures allocated, so try to realloc in order
|
|
// to grow the size in-place without having to memcpy bytes over.
|
|
const old_pointer = failures;
|
|
const old_bytes = failure_capacity * @sizeOf(Failure);
|
|
|
|
failure_capacity *= 2;
|
|
|
|
const new_bytes = failure_capacity * @sizeOf(Failure);
|
|
const raw_pointer = roc_realloc(failures, new_bytes, old_bytes, @alignOf(Failure));
|
|
|
|
failures = @ptrCast([*]Failure, @alignCast(@alignOf(Failure), raw_pointer));
|
|
|
|
// If realloc wasn't able to expand in-place (that is, it returned a different pointer),
|
|
// then copy the data into the new pointer and dealloc the old one.
|
|
if (failures != old_pointer) {
|
|
roc_memcpy(@ptrCast([*]u8, failures), @ptrCast([*]u8, old_pointer), old_bytes);
|
|
roc_dealloc(old_pointer, @alignOf(Failure));
|
|
}
|
|
} else {
|
|
// We've never had any failures before, so allocate the failures for the first time.
|
|
failure_capacity = 10;
|
|
|
|
const raw_pointer = roc_alloc(failure_capacity * @sizeOf(Failure), @alignOf(Failure));
|
|
|
|
failures = @ptrCast([*]Failure, @alignCast(@alignOf(Failure), raw_pointer));
|
|
}
|
|
}
|
|
|
|
failures[failure_length] = new_failure;
|
|
failure_length += 1;
|
|
}
|
|
|
|
pub fn expectFailedC(
|
|
start_line: u32,
|
|
end_line: u32,
|
|
start_col: u16,
|
|
end_col: u16,
|
|
) callconv(.C) void {
|
|
return @call(.{ .modifier = always_inline }, expectFailed, .{ start_line, end_line, start_col, end_col });
|
|
}
|
|
|
|
pub fn getExpectFailures() []Failure {
|
|
return failures[0..failure_length];
|
|
}
|
|
|
|
const CSlice = extern struct {
|
|
pointer: *c_void,
|
|
len: usize,
|
|
};
|
|
pub fn getExpectFailuresC() callconv(.C) CSlice {
|
|
|
|
var bytes = @ptrCast(*c_void, failures);
|
|
|
|
return .{.pointer = bytes, .len = failure_length};
|
|
}
|
|
|
|
pub fn deinitFailures() void {
|
|
roc_dealloc(failures, @alignOf(Failure));
|
|
failure_length = 0;
|
|
}
|
|
|
|
pub fn deinitFailuresC() callconv(.C) void {
|
|
return @call(.{ .modifier = always_inline }, deinitFailures, .{});
|
|
}
|
|
|
|
pub fn unsafeReallocate(
|
|
source_ptr: [*]u8,
|
|
alignment: u32,
|
|
old_length: usize,
|
|
new_length: usize,
|
|
element_width: usize,
|
|
) [*]u8 {
|
|
const align_width: usize = std.math.max(alignment, @sizeOf(usize));
|
|
|
|
const old_width = align_width + old_length * element_width;
|
|
const new_width = align_width + new_length * element_width;
|
|
|
|
// TODO handle out of memory
|
|
// NOTE realloc will dealloc the original allocation
|
|
const old_allocation = source_ptr - align_width;
|
|
const new_allocation = realloc(old_allocation, new_width, old_width, alignment);
|
|
|
|
const new_source = @ptrCast([*]u8, new_allocation) + align_width;
|
|
return new_source;
|
|
}
|
|
|
|
pub const RocResult = extern struct {
|
|
bytes: ?[*]u8,
|
|
|
|
pub fn isOk(self: RocResult) bool {
|
|
// assumptions
|
|
//
|
|
// - the tag is the first field
|
|
// - the tag is usize bytes wide
|
|
// - Ok has tag_id 1, because Err < Ok
|
|
const usizes: [*]usize = @ptrCast([*]usize, @alignCast(8, self.bytes));
|
|
|
|
return usizes[0] == 1;
|
|
}
|
|
|
|
pub fn isErr(self: RocResult) bool {
|
|
return !self.isOk();
|
|
}
|
|
};
|
|
|
|
pub const Ordering = enum(u8) {
|
|
EQ = 0,
|
|
GT = 1,
|
|
LT = 2,
|
|
};
|
|
|
|
pub const UpdateMode = enum(u8) {
|
|
Immutable = 0,
|
|
InPlace = 1,
|
|
};
|
|
|
|
test "increfC, refcounted data" {
|
|
var mock_rc: isize = REFCOUNT_ONE_ISIZE + 17;
|
|
var ptr_to_refcount: *isize = &mock_rc;
|
|
increfC(ptr_to_refcount, 2);
|
|
try std.testing.expectEqual(mock_rc, REFCOUNT_ONE_ISIZE + 19);
|
|
}
|
|
|
|
test "increfC, static data" {
|
|
var mock_rc: isize = REFCOUNT_MAX_ISIZE;
|
|
var ptr_to_refcount: *isize = &mock_rc;
|
|
increfC(ptr_to_refcount, 2);
|
|
try std.testing.expectEqual(mock_rc, REFCOUNT_MAX_ISIZE);
|
|
}
|
|
|
|
test "expectFailure does something" {
|
|
defer deinitFailures();
|
|
|
|
try std.testing.expectEqual(getExpectFailures().len, 0);
|
|
expectFailed(1, 2, 3, 4);
|
|
try std.testing.expectEqual(getExpectFailures().len, 1);
|
|
const what_it_should_look_like = Failure{ .start_line = 1, .end_line = 2, .start_col = 3, .end_col = 4 };
|
|
try std.testing.expectEqual(getExpectFailures()[0], what_it_should_look_like);
|
|
}
|