Merge remote-tracking branch 'origin/main' into upgrade-llvm-zig

This commit is contained in:
Brendan Hansknecht 2024-12-11 16:38:34 -08:00
commit 0a573ca557
No known key found for this signature in database
GPG key ID: 0EA784685083E75B
818 changed files with 15185 additions and 4951 deletions

View file

@ -343,12 +343,11 @@ pub const RocDec = extern struct {
}
}
const unsigned_answer: i128 = mul_and_decimalize(self_u128, other_u128);
const unsigned_answer = mul_and_decimalize(self_u128, other_u128);
if (is_answer_negative) {
return .{ .value = RocDec{ .num = -unsigned_answer }, .has_overflowed = false };
return .{ .value = RocDec{ .num = -unsigned_answer.value }, .has_overflowed = unsigned_answer.has_overflowed };
} else {
return .{ .value = RocDec{ .num = unsigned_answer }, .has_overflowed = false };
return .{ .value = RocDec{ .num = unsigned_answer.value }, .has_overflowed = unsigned_answer.has_overflowed };
}
}
@ -435,7 +434,15 @@ pub const RocDec = extern struct {
pub fn mulSaturated(self: RocDec, other: RocDec) RocDec {
const answer = RocDec.mulWithOverflow(self, other);
return answer.value;
if (answer.has_overflowed) {
if (answer.value.num < 0) {
return RocDec.max;
} else {
return RocDec.min;
}
} else {
return answer.value;
}
}
pub fn div(self: RocDec, other: RocDec) RocDec {
@ -597,7 +604,7 @@ inline fn count_trailing_zeros_base10(input: i128) u6 {
return count;
}
fn mul_and_decimalize(a: u128, b: u128) i128 {
fn mul_and_decimalize(a: u128, b: u128) WithOverflow(i128) {
const answer_u256 = mul_u128(a, b);
var lhs_hi = answer_u256.hi;
@ -715,14 +722,15 @@ fn mul_and_decimalize(a: u128, b: u128) i128 {
overflowed = overflowed | answer[1];
const d = answer[0];
if (overflowed == 1) {
roc_panic("Decimal multiplication overflow!", 0);
}
// Final 512bit value is d, c, b, a
// need to left shift 321 times
// 315 - 256 is 59. So left shift d, c 59 times.
return @as(i128, @intCast(c >> 59 | (d << (128 - 59))));
// need to right shift 315 times
// 315 - 256 is 59. So shift c right 59 times.
// d takes up the higher space above c, so shift it left by (128 - 59 = 69).
// Since d is being shift left 69 times, all of those 69 bits (+1 for the sign bit)
// must be zero. Otherwise, we have an overflow.
const d_high_bits = d >> 58;
return .{ .value = @as(i128, @intCast(c >> 59 | (d << (128 - 59)))), .has_overflowed = overflowed | d_high_bits != 0 };
}
// Multiply two 128-bit ints and divide the result by 10^DECIMAL_PLACES

View file

@ -132,10 +132,12 @@ comptime {
num.exportAddWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_with_overflow.");
num.exportAddOrPanic(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_or_panic.");
num.exportAddSaturatedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_saturated.");
num.exportAddWrappedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".add_wrapped.");
num.exportSubWithOverflow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_with_overflow.");
num.exportSubOrPanic(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_or_panic.");
num.exportSubSaturatedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_saturated.");
num.exportSubWrappedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".sub_wrapped.");
num.exportMulWithOverflow(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_with_overflow.");
num.exportMulOrPanic(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_or_panic.");

View file

@ -110,7 +110,21 @@ pub fn exportNumToFloatCast(comptime T: type, comptime F: type, comptime name: [
pub fn exportPow(comptime T: type, comptime name: []const u8) void {
const f = struct {
fn func(base: T, exp: T) callconv(.C) T {
return std.math.pow(T, base, exp);
switch (@typeInfo(T)) {
// std.math.pow can handle ints via powi, but it turns any errors to unreachable
// we want to catch overflow and report a proper error to the user
.Int => {
if (std.math.powi(T, base, exp)) |value| {
return value;
} else |err| switch (err) {
error.Overflow => roc_panic("Integer raised to power overflowed!", 0),
error.Underflow => return 0,
}
},
else => {
return std.math.pow(T, base, exp);
},
}
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .strong });
@ -361,6 +375,15 @@ pub fn exportAddSaturatedInt(comptime T: type, comptime name: []const u8) void {
@export(f, .{ .name = name ++ @typeName(T), .linkage = .strong });
}
pub fn exportAddWrappedInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
return self +% other;
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportAddOrPanic(comptime T: type, comptime name: []const u8) void {
const f = struct {
fn func(self: T, other: T) callconv(.C) T {
@ -418,6 +441,15 @@ pub fn exportSubSaturatedInt(comptime T: type, comptime name: []const u8) void {
@export(f, .{ .name = name ++ @typeName(T), .linkage = .strong });
}
pub fn exportSubWrappedInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
return self -% other;
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportSubOrPanic(comptime T: type, comptime name: []const u8) void {
const f = struct {
fn func(self: T, other: T) callconv(.C) T {

View file

@ -73,19 +73,29 @@ comptime {
}
}
fn testing_roc_alloc(size: usize, _: u32) callconv(.C) ?*anyopaque {
// We store an extra usize which is the size of the full allocation.
const full_size = size + @sizeOf(usize);
var raw_ptr = (std.testing.allocator.alloc(u8, full_size) catch unreachable).ptr;
@as([*]usize, @alignCast(@ptrCast(raw_ptr)))[0] = full_size;
raw_ptr += @sizeOf(usize);
const ptr = @as(?*anyopaque, @ptrCast(raw_ptr));
fn testing_roc_alloc(size: usize, nominal_alignment: u32) callconv(.C) ?*anyopaque {
const real_alignment = 16;
if (nominal_alignment > real_alignment) {
@panic("alignments larger than that of usize are not currently supported");
}
// We store an extra usize which is the size of the data plus the size of the size, directly before the data.
// We need enough clocks of the alignment size to fit this (usually this will be one)
const size_of_size = @sizeOf(usize);
const alignments_needed = size_of_size / real_alignment + comptime if (size_of_size % real_alignment == 0) 0 else 1;
const extra_bytes = alignments_needed * size_of_size;
const full_size = size + extra_bytes;
const whole_ptr = (std.testing.allocator.alignedAlloc(u8, real_alignment, full_size) catch unreachable).ptr;
const written_to_size = size + size_of_size;
@as([*]align(real_alignment) usize, @ptrCast(whole_ptr))[extra_bytes - size_of_size] = written_to_size;
const data_ptr = @as(?*anyopaque, @ptrCast(whole_ptr + extra_bytes));
if (DEBUG_TESTING_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("+ alloc {*}: {} bytes\n", .{ ptr, size });
std.debug.print("+ alloc {*}: {} bytes\n", .{ data_ptr, size });
}
return ptr;
return data_ptr;
}
fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u32) callconv(.C) ?*anyopaque {
@ -106,9 +116,16 @@ fn testing_roc_realloc(c_ptr: *anyopaque, new_size: usize, old_size: usize, _: u
}
fn testing_roc_dealloc(c_ptr: *anyopaque, _: u32) callconv(.C) void {
const raw_ptr = @as([*]u8, @ptrCast(c_ptr)) - @sizeOf(usize);
const full_size = @as([*]usize, @alignCast(@ptrCast(raw_ptr)))[0];
const slice = raw_ptr[0..full_size];
const alignment = 16;
const size_of_size = @sizeOf(usize);
const alignments_needed = size_of_size / alignment + comptime if (size_of_size % alignment == 0) 0 else 1;
const extra_bytes = alignments_needed * size_of_size;
const byte_array = @as([*]u8, @ptrCast(c_ptr)) - extra_bytes;
const allocation_ptr = @as([*]align(alignment) u8, @alignCast(byte_array));
const offset_from_allocation_to_size = extra_bytes - size_of_size;
const size_of_data_and_size = @as([*]usize, @alignCast(@ptrCast(allocation_ptr)))[offset_from_allocation_to_size];
const full_size = size_of_data_and_size + offset_from_allocation_to_size;
const slice = allocation_ptr[0..full_size];
if (DEBUG_TESTING_ALLOC and builtin.target.cpu.arch != .wasm32) {
std.debug.print("💀 dealloc {*}\n", .{slice.ptr});