mirror of
https://github.com/roc-lang/roc.git
synced 2025-10-03 00:24:34 +00:00
Get lots of Num lowlevel ops working
This commit is contained in:
parent
6421ff07a5
commit
7c95189e4a
7 changed files with 532 additions and 61 deletions
|
@ -4,10 +4,13 @@ use roc_module::symbol::Symbol;
|
|||
|
||||
use crate::layout::WasmLayout;
|
||||
use crate::storage::Storage;
|
||||
use crate::wasm_module::{CodeBuilder, ValueType::*};
|
||||
use crate::wasm_module::{
|
||||
CodeBuilder,
|
||||
ValueType::{self, *},
|
||||
};
|
||||
|
||||
pub enum LowlevelBuildResult {
|
||||
WasmInstructions,
|
||||
Done,
|
||||
BuiltinCall(&'static str),
|
||||
NotImplemented,
|
||||
}
|
||||
|
@ -24,63 +27,330 @@ pub fn build_call_low_level<'a>(
|
|||
let panic_ret_type = || panic!("Invalid return layout for {:?}: {:?}", lowlevel, ret_layout);
|
||||
|
||||
match lowlevel {
|
||||
StrConcat | StrJoinWith | StrIsEmpty | StrStartsWith | StrStartsWithCodePt
|
||||
| StrEndsWith | StrSplit | StrCountGraphemes | StrFromInt | StrFromUtf8
|
||||
| StrFromUtf8Range | StrToUtf8 | StrRepeat | StrFromFloat | StrTrim | ListLen
|
||||
| ListGetUnsafe | ListSet | ListSingle | ListRepeat | ListReverse | ListConcat
|
||||
| ListContains | ListAppend | ListPrepend | ListJoin | ListRange | ListMap | ListMap2
|
||||
| ListMap3 | ListMap4 | ListMapWithIndex | ListKeepIf | ListWalk | ListWalkUntil
|
||||
| ListWalkBackwards | ListKeepOks | ListKeepErrs | ListSortWith | ListTakeFirst
|
||||
| ListTakeLast | ListDrop | ListDropAt | ListSwap | ListAny | ListFindUnsafe | DictSize
|
||||
| DictEmpty | DictInsert | DictRemove | DictContains | DictGetUnsafe | DictKeys
|
||||
| DictValues | DictUnion | DictIntersection | DictDifference | DictWalk | SetFromList => {
|
||||
return NotImplemented;
|
||||
}
|
||||
|
||||
NumAdd => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_add(),
|
||||
I64 => code_builder.i64_add(),
|
||||
F32 => code_builder.f32_add(),
|
||||
F64 => code_builder.f64_add(),
|
||||
},
|
||||
NumAddWrap => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.i32_add();
|
||||
wrap_i32(code_builder, ret_layout.size());
|
||||
}
|
||||
I64 => code_builder.i64_add(),
|
||||
F32 => code_builder.f32_add(),
|
||||
F64 => code_builder.f64_add(),
|
||||
},
|
||||
NumAddChecked => return NotImplemented,
|
||||
NumSub => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_sub(),
|
||||
I64 => code_builder.i64_sub(),
|
||||
F32 => code_builder.f32_sub(),
|
||||
F64 => code_builder.f64_sub(),
|
||||
},
|
||||
NumSubWrap => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.i32_sub();
|
||||
wrap_i32(code_builder, ret_layout.size());
|
||||
}
|
||||
I64 => code_builder.i64_sub(),
|
||||
F32 => code_builder.f32_sub(),
|
||||
F64 => code_builder.f64_sub(),
|
||||
},
|
||||
NumSubChecked => return NotImplemented,
|
||||
NumMul => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_mul(),
|
||||
I64 => code_builder.i64_mul(),
|
||||
F32 => code_builder.f32_mul(),
|
||||
F64 => code_builder.f64_mul(),
|
||||
},
|
||||
NumMulWrap => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.i32_mul();
|
||||
wrap_i32(code_builder, ret_layout.size());
|
||||
}
|
||||
I64 => code_builder.i64_mul(),
|
||||
F32 => code_builder.f32_mul(),
|
||||
F64 => code_builder.f64_mul(),
|
||||
},
|
||||
NumMulChecked => return NotImplemented,
|
||||
NumGt => match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_gt_s(),
|
||||
I64 => code_builder.i64_gt_s(),
|
||||
F32 => code_builder.f32_gt(),
|
||||
F64 => code_builder.f64_gt(),
|
||||
},
|
||||
Eq => match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_eq(),
|
||||
I64 => code_builder.i64_eq(),
|
||||
F32 => code_builder.f32_eq(),
|
||||
F64 => code_builder.f64_eq(),
|
||||
NumGte => match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_ge_s(),
|
||||
I64 => code_builder.i64_ge_s(),
|
||||
F32 => code_builder.f32_ge(),
|
||||
F64 => code_builder.f64_ge(),
|
||||
},
|
||||
NumNeg => match ret_layout.value_type() {
|
||||
// TODO: it would be better to subtract the arg from zero.
|
||||
// But we'd need to insert the zero constant *before* the argument
|
||||
// in the VM stack, and we don't have a good way to do that yet!
|
||||
// Before solving this one case, let's see what other issues we run into.
|
||||
NumLt => match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_lt_s(),
|
||||
I64 => code_builder.i64_lt_s(),
|
||||
F32 => code_builder.f32_lt(),
|
||||
F64 => code_builder.f64_lt(),
|
||||
},
|
||||
NumLte => match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_le_s(),
|
||||
I64 => code_builder.i64_le_s(),
|
||||
F32 => code_builder.f32_le(),
|
||||
F64 => code_builder.f64_le(),
|
||||
},
|
||||
NumCompare => return NotImplemented,
|
||||
NumDivUnchecked => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_div_s(),
|
||||
I64 => code_builder.i64_div_s(),
|
||||
F32 => code_builder.f32_div(),
|
||||
F64 => code_builder.f64_div(),
|
||||
},
|
||||
NumDivCeilUnchecked => return NotImplemented,
|
||||
NumRemUnchecked => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_rem_s(),
|
||||
I64 => code_builder.i64_rem_s(),
|
||||
F32 => return NotImplemented,
|
||||
F64 => return NotImplemented,
|
||||
},
|
||||
NumIsMultipleOf => return NotImplemented,
|
||||
NumAbs => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.i32_const(-1);
|
||||
code_builder.i32_mul();
|
||||
code_builder.i32_const(0);
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i32_sub();
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i32_const(0);
|
||||
code_builder.i32_ge_s();
|
||||
code_builder.select();
|
||||
}
|
||||
I64 => {
|
||||
code_builder.i64_const(-1);
|
||||
code_builder.i64_mul();
|
||||
code_builder.i64_const(0);
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i64_sub();
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i64_const(0);
|
||||
code_builder.i64_ge_s();
|
||||
code_builder.select();
|
||||
}
|
||||
F32 => code_builder.f32_abs(),
|
||||
F64 => code_builder.f64_abs(),
|
||||
},
|
||||
NumNeg => {
|
||||
match ret_layout.value_type() {
|
||||
I32 => {
|
||||
// Unfortunate local.set/local.get
|
||||
code_builder.i32_const(0);
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i32_sub();
|
||||
}
|
||||
I64 => {
|
||||
// Unfortunate local.set/local.get
|
||||
code_builder.i64_const(0);
|
||||
storage.load_symbols(code_builder, args);
|
||||
code_builder.i64_sub();
|
||||
}
|
||||
F32 => code_builder.f32_neg(),
|
||||
F64 => code_builder.f64_neg(),
|
||||
}
|
||||
}
|
||||
NumSin => return NotImplemented,
|
||||
NumCos => return NotImplemented,
|
||||
NumSqrtUnchecked => return NotImplemented,
|
||||
NumLogUnchecked => return NotImplemented,
|
||||
NumRound => match ret_layout.value_type() {
|
||||
I32 => code_builder.f32_nearest(),
|
||||
I64 => code_builder.f64_nearest(),
|
||||
F32 => {}
|
||||
F64 => {}
|
||||
},
|
||||
NumToFloat => match (ret_layout.value_type(), storage.get(&args[0]).value_type()) {
|
||||
(F32, I32) => code_builder.f32_convert_s_i32(),
|
||||
(F32, I64) => code_builder.f32_convert_s_i64(),
|
||||
(F32, F32) => {}
|
||||
(F32, F64) => code_builder.f32_demote_f64(),
|
||||
(F64, I32) => code_builder.f64_convert_s_i32(),
|
||||
(F64, I64) => code_builder.f64_convert_s_i64(),
|
||||
(F64, F32) => code_builder.f64_promote_f32(),
|
||||
(F64, F64) => {}
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumPow => return NotImplemented,
|
||||
NumCeiling => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.f32_ceil();
|
||||
code_builder.i32_trunc_s_f32()
|
||||
}
|
||||
I64 => {
|
||||
code_builder.f64_ceil();
|
||||
code_builder.i64_trunc_s_f64()
|
||||
}
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumPowInt => return NotImplemented,
|
||||
NumFloor => match ret_layout.value_type() {
|
||||
I32 => {
|
||||
code_builder.f32_floor();
|
||||
code_builder.i32_trunc_s_f32()
|
||||
}
|
||||
I64 => {
|
||||
code_builder.f64_floor();
|
||||
code_builder.i64_trunc_s_f64()
|
||||
}
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumIsFinite => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_const(1),
|
||||
I64 => code_builder.i32_const(1),
|
||||
F32 => {
|
||||
code_builder.i32_reinterpret_f32();
|
||||
code_builder.i32_const(0x7f800000);
|
||||
code_builder.i32_and();
|
||||
code_builder.i32_const(0x7f800000);
|
||||
code_builder.i32_ne();
|
||||
}
|
||||
F64 => {
|
||||
code_builder.i64_reinterpret_f64();
|
||||
code_builder.i64_const(0x7ff0000000000000);
|
||||
code_builder.i64_and();
|
||||
code_builder.i64_const(0x7ff0000000000000);
|
||||
code_builder.i64_ne();
|
||||
}
|
||||
F32 => code_builder.f32_neg(),
|
||||
F64 => code_builder.f64_neg(),
|
||||
},
|
||||
NumAtan => {
|
||||
let name: &'static str = match ret_layout.value_type() {
|
||||
F32 => &bitcode::NUM_ATAN[FloatWidth::F32],
|
||||
F64 => &bitcode::NUM_ATAN[FloatWidth::F64],
|
||||
let width = float_width_from_layout(ret_layout);
|
||||
return BuiltinCall(&bitcode::NUM_ATAN[width]);
|
||||
}
|
||||
NumAcos => {
|
||||
let width = float_width_from_layout(ret_layout);
|
||||
return BuiltinCall(&bitcode::NUM_ACOS[width]);
|
||||
}
|
||||
NumAsin => {
|
||||
let width = float_width_from_layout(ret_layout);
|
||||
return BuiltinCall(&bitcode::NUM_ASIN[width]);
|
||||
}
|
||||
NumBytesToU16 => return NotImplemented,
|
||||
NumBytesToU32 => return NotImplemented,
|
||||
NumBitwiseAnd => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_and(),
|
||||
I64 => code_builder.i64_and(),
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumBitwiseXor => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_xor(),
|
||||
I64 => code_builder.i64_xor(),
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumBitwiseOr => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_or(),
|
||||
I64 => code_builder.i64_or(),
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumShiftLeftBy => {
|
||||
// Unfortunate local.set/local.get
|
||||
storage.load_symbols(code_builder, &[args[1], args[0]]);
|
||||
match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_shl(),
|
||||
I64 => code_builder.i64_shl(),
|
||||
_ => panic_ret_type(),
|
||||
};
|
||||
return BuiltinCall(name);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return NotImplemented;
|
||||
NumShiftRightBy => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_shr_s(),
|
||||
I64 => code_builder.i64_shr_s(),
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumShiftRightZfBy => match ret_layout.value_type() {
|
||||
I32 => code_builder.i32_shr_u(),
|
||||
I64 => code_builder.i64_shr_u(),
|
||||
_ => panic_ret_type(),
|
||||
},
|
||||
NumIntCast => match (ret_layout.value_type(), storage.get(&args[0]).value_type()) {
|
||||
(I32, I32) => {}
|
||||
(I32, I64) => code_builder.i32_wrap_i64(),
|
||||
(I32, F32) => code_builder.i32_trunc_s_f32(),
|
||||
(I32, F64) => code_builder.i32_trunc_s_f64(),
|
||||
|
||||
(I64, I32) => code_builder.i64_extend_s_i32(),
|
||||
(I64, I64) => {}
|
||||
(I64, F32) => code_builder.i64_trunc_s_f32(),
|
||||
(I64, F64) => code_builder.i64_trunc_s_f64(),
|
||||
|
||||
(F32, I32) => code_builder.f32_convert_s_i32(),
|
||||
(F32, I64) => code_builder.f32_convert_s_i64(),
|
||||
(F32, F32) => {}
|
||||
(F32, F64) => code_builder.f32_demote_f64(),
|
||||
|
||||
(F64, I32) => code_builder.f64_convert_s_i32(),
|
||||
(F64, I64) => code_builder.f64_convert_s_i64(),
|
||||
(F64, F32) => code_builder.f64_promote_f32(),
|
||||
(F64, F64) => {}
|
||||
},
|
||||
Eq => {
|
||||
// TODO: For non-number types, this will implement pointer equality, which is wrong
|
||||
match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_eq(),
|
||||
I64 => code_builder.i64_eq(),
|
||||
F32 => code_builder.f32_eq(),
|
||||
F64 => code_builder.f64_eq(),
|
||||
}
|
||||
}
|
||||
};
|
||||
WasmInstructions
|
||||
NotEq => {
|
||||
// TODO: For non-number types, this will implement pointer inequality, which is wrong
|
||||
match storage.get(&args[0]).value_type() {
|
||||
I32 => code_builder.i32_ne(),
|
||||
I64 => code_builder.i64_ne(),
|
||||
F32 => code_builder.f32_ne(),
|
||||
F64 => code_builder.f64_ne(),
|
||||
}
|
||||
}
|
||||
And => code_builder.i32_and(),
|
||||
Or => code_builder.i32_or(),
|
||||
Not => code_builder.i32_eqz(),
|
||||
Hash => return NotImplemented,
|
||||
ExpectTrue => return NotImplemented,
|
||||
}
|
||||
Done
|
||||
}
|
||||
|
||||
/// Wrap an integer whose Wasm representation is i32
|
||||
fn wrap_i32(code_builder: &mut CodeBuilder, size: u32) {
|
||||
match size {
|
||||
1 => {
|
||||
// Underlying Roc value is i8
|
||||
code_builder.i32_const(24);
|
||||
code_builder.i32_shl();
|
||||
code_builder.i32_const(24);
|
||||
code_builder.i32_shr_s();
|
||||
}
|
||||
2 => {
|
||||
// Underlying Roc value is i16
|
||||
code_builder.i32_const(16);
|
||||
code_builder.i32_shl();
|
||||
code_builder.i32_const(16);
|
||||
code_builder.i32_shr_s();
|
||||
}
|
||||
_ => {} // the only other possible value is 4, and i32 wraps natively
|
||||
}
|
||||
}
|
||||
|
||||
fn float_width_from_layout(wasm_layout: &WasmLayout) -> FloatWidth {
|
||||
if wasm_layout.value_type() == ValueType::F32 {
|
||||
FloatWidth::F32
|
||||
} else {
|
||||
FloatWidth::F64
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue