Merge branch 'trunk' into dict

This commit is contained in:
Chadtech 2021-01-25 20:25:37 -05:00 committed by GitHub
commit d787d3ef7b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 1795 additions and 545 deletions

66
Cargo.lock generated
View file

@ -1082,7 +1082,18 @@ checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
dependencies = [ dependencies = [
"cfg-if 0.1.10", "cfg-if 0.1.10",
"libc", "libc",
"wasi", "wasi 0.9.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6"
dependencies = [
"cfg-if 1.0.0",
"libc",
"wasi 0.10.1+wasi-snapshot-preview1",
] ]
[[package]] [[package]]
@ -2268,13 +2279,25 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [ dependencies = [
"getrandom", "getrandom 0.1.15",
"libc", "libc",
"rand_chacha 0.2.2", "rand_chacha 0.2.2",
"rand_core 0.5.1", "rand_core 0.5.1",
"rand_hc 0.2.0", "rand_hc 0.2.0",
] ]
[[package]]
name = "rand"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e"
dependencies = [
"libc",
"rand_chacha 0.3.0",
"rand_core 0.6.1",
"rand_hc 0.3.0",
]
[[package]] [[package]]
name = "rand_chacha" name = "rand_chacha"
version = "0.1.1" version = "0.1.1"
@ -2295,6 +2318,16 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "rand_chacha"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
dependencies = [
"ppv-lite86",
"rand_core 0.6.1",
]
[[package]] [[package]]
name = "rand_core" name = "rand_core"
version = "0.3.1" version = "0.3.1"
@ -2316,7 +2349,16 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [ dependencies = [
"getrandom", "getrandom 0.1.15",
]
[[package]]
name = "rand_core"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5"
dependencies = [
"getrandom 0.2.1",
] ]
[[package]] [[package]]
@ -2337,6 +2379,15 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "rand_hc"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
dependencies = [
"rand_core 0.6.1",
]
[[package]] [[package]]
name = "rand_isaac" name = "rand_isaac"
version = "0.1.1" version = "0.1.1"
@ -2460,7 +2511,7 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d"
dependencies = [ dependencies = [
"getrandom", "getrandom 0.1.15",
"redox_syscall", "redox_syscall",
"rust-argon2", "rust-argon2",
] ]
@ -2679,6 +2730,7 @@ dependencies = [
"pretty_assertions", "pretty_assertions",
"quickcheck", "quickcheck",
"quickcheck_macros", "quickcheck_macros",
"rand 0.8.2",
"roc_builtins", "roc_builtins",
"roc_can", "roc_can",
"roc_collections", "roc_collections",
@ -3728,6 +3780,12 @@ version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9"
[[package]] [[package]]
name = "wasm-bindgen" name = "wasm-bindgen"
version = "0.2.69" version = "0.2.69"

View file

@ -34,6 +34,6 @@ members = [
[profile.release] [profile.release]
lto = "fat" lto = "fat"
codegen-units = 1 codegen-units = 1
debug = true # enable when profiling # debug = true # enable when profiling

View file

@ -224,6 +224,7 @@ fn jit_to_ast_help<'a>(
Layout::Union(UnionLayout::Recursive(_)) Layout::Union(UnionLayout::Recursive(_))
| Layout::Union(UnionLayout::NullableWrapped { .. }) | Layout::Union(UnionLayout::NullableWrapped { .. })
| Layout::Union(UnionLayout::NullableUnwrapped { .. }) | Layout::Union(UnionLayout::NullableUnwrapped { .. })
| Layout::Union(UnionLayout::NonNullableUnwrapped(_))
| Layout::RecursivePointer => { | Layout::RecursivePointer => {
todo!("add support for rendering recursive tag unions in the REPL") todo!("add support for rendering recursive tag unions in the REPL")
} }
@ -305,6 +306,9 @@ fn ptr_to_ast<'a>(
let (tag_name, payload_vars) = tags.iter().next().unwrap(); let (tag_name, payload_vars) = tags.iter().next().unwrap();
single_tag_union_to_ast(env, ptr, field_layouts, tag_name.clone(), payload_vars) single_tag_union_to_ast(env, ptr, field_layouts, tag_name.clone(), payload_vars)
} }
Content::Structure(FlatType::EmptyRecord) => {
struct_to_ast(env, ptr, &[], &MutMap::default())
}
other => { other => {
unreachable!( unreachable!(
"Something had a Struct layout, but instead of a Record type, it had: {:?}", "Something had a Struct layout, but instead of a Record type, it had: {:?}",

View file

@ -199,6 +199,11 @@ mod repl_eval {
expect_success("[]", "[] : List *"); expect_success("[]", "[] : List *");
} }
#[test]
fn literal_empty_list_empty_record() {
expect_success("[ {} ]", "[ {} ] : List {}");
}
#[test] #[test]
fn literal_num_list() { fn literal_num_list() {
expect_success("[ 1, 2, 3 ]", "[ 1, 2, 3 ] : List (Num *)"); expect_success("[ 1, 2, 3 ]", "[ 1, 2, 3 ] : List (Num *)");

View file

@ -821,8 +821,11 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
Tag { Tag {
union_size, union_size,
arguments, arguments,
tag_layout,
.. ..
} if *union_size == 1 => { } if *union_size == 1
&& matches!(tag_layout, Layout::Union(UnionLayout::NonRecursive(_))) =>
{
let it = arguments.iter(); let it = arguments.iter();
let ctx = env.context; let ctx = env.context;
@ -1053,6 +1056,83 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
data_ptr.into() data_ptr.into()
} }
Tag {
arguments,
tag_layout: Layout::Union(UnionLayout::NonNullableUnwrapped(fields)),
union_size,
tag_id,
..
} => {
debug_assert_eq!(*union_size, 1);
debug_assert_eq!(*tag_id, 0);
debug_assert_eq!(arguments.len(), fields.len());
let struct_layout =
Layout::Union(UnionLayout::NonRecursive(env.arena.alloc([*fields])));
let ptr_size = env.ptr_bytes;
let ctx = env.context;
let builder = env.builder;
// Determine types
let num_fields = arguments.len() + 1;
let mut field_types = Vec::with_capacity_in(num_fields, env.arena);
let mut field_vals = Vec::with_capacity_in(num_fields, env.arena);
for (field_symbol, tag_field_layout) in arguments.iter().zip(fields.iter()) {
let val = load_symbol(env, scope, field_symbol);
// Zero-sized fields have no runtime representation.
// The layout of the struct expects them to be dropped!
if !tag_field_layout.is_dropped_because_empty() {
let field_type =
basic_type_from_layout(env.arena, env.context, tag_field_layout, ptr_size);
field_types.push(field_type);
if let Layout::RecursivePointer = tag_field_layout {
debug_assert!(val.is_pointer_value());
// we store recursive pointers as `i64*`
let ptr = env.builder.build_bitcast(
val,
ctx.i64_type().ptr_type(AddressSpace::Generic),
"cast_recursive_pointer",
);
field_vals.push(ptr);
} else {
// this check fails for recursive tag unions, but can be helpful while debugging
field_vals.push(val);
}
}
}
// Create the struct_type
let data_ptr = reserve_with_refcount(env, &struct_layout);
let struct_type = ctx.struct_type(field_types.into_bump_slice(), false);
let struct_ptr = env
.builder
.build_bitcast(
data_ptr,
struct_type.ptr_type(AddressSpace::Generic),
"block_of_memory_to_tag",
)
.into_pointer_value();
// Insert field exprs into struct_val
for (index, field_val) in field_vals.into_iter().enumerate() {
let field_ptr = builder
.build_struct_gep(struct_ptr, index as u32, "struct_gep")
.unwrap();
builder.build_store(field_ptr, field_val);
}
data_ptr.into()
}
Tag { Tag {
arguments, arguments,
tag_layout: tag_layout:
@ -1282,15 +1362,17 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
} => { } => {
// extract field from a record // extract field from a record
match load_symbol_and_layout(env, scope, structure) { match load_symbol_and_layout(env, scope, structure) {
(StructValue(argument), Layout::Struct(fields)) if fields.len() > 1 => env (StructValue(argument), Layout::Struct(fields)) => {
.builder debug_assert!(fields.len() > 1);
env.builder
.build_extract_value( .build_extract_value(
argument, argument,
*index as u32, *index as u32,
env.arena env.arena
.alloc(format!("struct_field_access_record_{}", index)), .alloc(format!("struct_field_access_record_{}", index)),
) )
.unwrap(), .unwrap()
}
(StructValue(argument), Layout::Closure(_, _, _)) => env (StructValue(argument), Layout::Closure(_, _, _)) => env
.builder .builder
.build_extract_value( .build_extract_value(
@ -1299,6 +1381,38 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
env.arena.alloc(format!("closure_field_access_{}_", index)), env.arena.alloc(format!("closure_field_access_{}_", index)),
) )
.unwrap(), .unwrap(),
(
PointerValue(argument),
Layout::Union(UnionLayout::NonNullableUnwrapped(fields)),
) => {
let struct_layout = Layout::Struct(fields);
let struct_type = basic_type_from_layout(
env.arena,
env.context,
&struct_layout,
env.ptr_bytes,
);
let cast_argument = env
.builder
.build_bitcast(
argument,
struct_type.ptr_type(AddressSpace::Generic),
"cast_rosetree_like",
)
.into_pointer_value();
let ptr = env
.builder
.build_struct_gep(
cast_argument,
*index as u32,
env.arena.alloc(format!("non_nullable_unwrapped_{}", index)),
)
.unwrap();
env.builder.build_load(ptr, "load_rosetree_like")
}
(other, layout) => unreachable!( (other, layout) => unreachable!(
"can only index into struct layout\nValue: {:?}\nLayout: {:?}\nIndex: {:?}", "can only index into struct layout\nValue: {:?}\nLayout: {:?}\nIndex: {:?}",
other, layout, index other, layout, index
@ -1830,6 +1944,7 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
} else { } else {
basic_type_from_layout(env.arena, context, &layout, env.ptr_bytes) basic_type_from_layout(env.arena, context, &layout, env.ptr_bytes)
}; };
let alloca = create_entry_block_alloca( let alloca = create_entry_block_alloca(
env, env,
parent, parent,
@ -2333,6 +2448,7 @@ fn build_switch_ir<'a, 'ctx, 'env>(
debug_assert!(cond_value.is_pointer_value()); debug_assert!(cond_value.is_pointer_value());
extract_tag_discriminant_ptr(env, cond_value.into_pointer_value()) extract_tag_discriminant_ptr(env, cond_value.into_pointer_value())
} }
NonNullableUnwrapped(_) => unreachable!("there is no tag to switch on"),
NullableWrapped { nullable_id, .. } => { NullableWrapped { nullable_id, .. } => {
// we match on the discriminant, not the whole Tag // we match on the discriminant, not the whole Tag
cond_layout = Layout::Builtin(Builtin::Int64); cond_layout = Layout::Builtin(Builtin::Int64);

View file

@ -146,6 +146,10 @@ pub fn basic_type_from_layout<'ctx>(
let block = block_of_memory_slices(context, &[&other_fields[1..]], ptr_bytes); let block = block_of_memory_slices(context, &[&other_fields[1..]], ptr_bytes);
block.ptr_type(AddressSpace::Generic).into() block.ptr_type(AddressSpace::Generic).into()
} }
NonNullableUnwrapped(fields) => {
let block = block_of_memory_slices(context, &[fields], ptr_bytes);
block.ptr_type(AddressSpace::Generic).into()
}
NonRecursive(_) => block_of_memory(context, layout, ptr_bytes), NonRecursive(_) => block_of_memory(context, layout, ptr_bytes),
} }
} }

View file

@ -1,6 +1,6 @@
use crate::llvm::build::{ use crate::llvm::build::{
cast_basic_basic, cast_block_of_memory_to_tag, create_entry_block_alloca, set_name, Env, Scope, cast_basic_basic, cast_block_of_memory_to_tag, set_name, Env, FAST_CALL_CONV,
FAST_CALL_CONV, LLVM_SADD_WITH_OVERFLOW_I64, LLVM_SADD_WITH_OVERFLOW_I64,
}; };
use crate::llvm::build_list::{incrementing_elem_loop, list_len, load_list}; use crate::llvm::build_list::{incrementing_elem_loop, list_len, load_list};
use crate::llvm::convert::{ use crate::llvm::convert::{
@ -455,6 +455,19 @@ fn modify_refcount_layout<'a, 'ctx, 'env>(
); );
} }
NonNullableUnwrapped(fields) => {
debug_assert!(value.is_pointer_value());
build_rec_union(
env,
layout_ids,
mode,
&*env.arena.alloc([*fields]),
value.into_pointer_value(),
true,
);
}
Recursive(tags) => { Recursive(tags) => {
debug_assert!(value.is_pointer_value()); debug_assert!(value.is_pointer_value());
build_rec_union( build_rec_union(
@ -578,25 +591,12 @@ fn modify_refcount_list_help<'a, 'ctx, 'env>(
); );
builder.set_current_debug_location(&ctx, loc); builder.set_current_debug_location(&ctx, loc);
let mut scope = Scope::default();
// Add args to scope // Add args to scope
let arg_symbol = Symbol::ARG_1; let arg_symbol = Symbol::ARG_1;
let arg_val = fn_val.get_param_iter().next().unwrap(); let arg_val = fn_val.get_param_iter().next().unwrap();
set_name(arg_val, arg_symbol.ident_string(&env.interns)); set_name(arg_val, arg_symbol.ident_string(&env.interns));
let alloca = create_entry_block_alloca(
env,
fn_val,
arg_val.get_type(),
arg_symbol.ident_string(&env.interns),
);
builder.build_store(alloca, arg_val);
scope.insert(arg_symbol, (layout.clone(), alloca));
let parent = fn_val; let parent = fn_val;
let original_wrapper = arg_val.into_struct_value(); let original_wrapper = arg_val.into_struct_value();
@ -698,25 +698,12 @@ fn modify_refcount_str_help<'a, 'ctx, 'env>(
); );
builder.set_current_debug_location(&ctx, loc); builder.set_current_debug_location(&ctx, loc);
let mut scope = Scope::default();
// Add args to scope // Add args to scope
let arg_symbol = Symbol::ARG_1; let arg_symbol = Symbol::ARG_1;
let arg_val = fn_val.get_param_iter().next().unwrap(); let arg_val = fn_val.get_param_iter().next().unwrap();
set_name(arg_val, arg_symbol.ident_string(&env.interns)); set_name(arg_val, arg_symbol.ident_string(&env.interns));
let alloca = create_entry_block_alloca(
env,
fn_val,
arg_val.get_type(),
arg_symbol.ident_string(&env.interns),
);
builder.build_store(alloca, arg_val);
scope.insert(arg_symbol, (layout.clone(), alloca));
let parent = fn_val; let parent = fn_val;
let str_wrapper = arg_val.into_struct_value(); let str_wrapper = arg_val.into_struct_value();
@ -731,7 +718,7 @@ fn modify_refcount_str_help<'a, 'ctx, 'env>(
IntPredicate::SGT, IntPredicate::SGT,
len, len,
ptr_int(ctx, env.ptr_bytes).const_zero(), ptr_int(ctx, env.ptr_bytes).const_zero(),
"len > 0", "is_big_str",
); );
// the block we'll always jump to when we're done // the block we'll always jump to when we're done
@ -926,6 +913,10 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
false false
})(); })();
// to increment/decrement the cons-cell itself
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, value_ptr);
let call_mode = mode_to_call_mode(fn_val, mode);
let ctx = env.context; let ctx = env.context;
let cont_block = ctx.append_basic_block(parent, "cont"); let cont_block = ctx.append_basic_block(parent, "cont");
if is_nullable { if is_nullable {
@ -950,10 +941,6 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
// next, make a jump table for all possible values of the tag_id // next, make a jump table for all possible values of the tag_id
let mut cases = Vec::with_capacity_in(tags.len(), env.arena); let mut cases = Vec::with_capacity_in(tags.len(), env.arena);
let merge_block = env
.context
.append_basic_block(parent, pick("increment_merge", "decrement_merge"));
builder.set_current_debug_location(&context, loc); builder.set_current_debug_location(&context, loc);
for (tag_id, field_layouts) in tags.iter().enumerate() { for (tag_id, field_layouts) in tags.iter().enumerate() {
@ -988,6 +975,11 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
) )
.into_pointer_value(); .into_pointer_value();
// defer actually performing the refcount modifications until after the current cell has
// been decremented, see below
let mut deferred_rec = Vec::new_in(env.arena);
let mut deferred_nonrec = Vec::new_in(env.arena);
for (i, field_layout) in field_layouts.iter().enumerate() { for (i, field_layout) in field_layouts.iter().enumerate() {
if let Layout::RecursivePointer = field_layout { if let Layout::RecursivePointer = field_layout {
// this field has type `*i64`, but is really a pointer to the data we want // this field has type `*i64`, but is really a pointer to the data we want
@ -1010,13 +1002,8 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
union_type.ptr_type(AddressSpace::Generic).into(), union_type.ptr_type(AddressSpace::Generic).into(),
); );
// recursively decrement the field deferred_rec.push(recursive_field_ptr);
let call_name = pick("recursive_tag_increment", "recursive_tag_decrement");
call_help(env, fn_val, mode, recursive_field_ptr, call_name);
} else if field_layout.contains_refcounted() { } else if field_layout.contains_refcounted() {
// TODO this loads the whole field onto the stack;
// that's wasteful if e.g. the field is a big record, where only
// some fields are actually refcounted.
let elem_pointer = env let elem_pointer = env
.builder .builder
.build_struct_gep(struct_ptr, i as u32, "gep_recursive_pointer") .build_struct_gep(struct_ptr, i as u32, "gep_recursive_pointer")
@ -1027,11 +1014,31 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
pick("increment_struct_field", "decrement_struct_field"), pick("increment_struct_field", "decrement_struct_field"),
); );
modify_refcount_layout(env, parent, layout_ids, mode, field, field_layout); deferred_nonrec.push((field, field_layout));
} }
} }
env.builder.build_unconditional_branch(merge_block); // OPTIMIZATION
//
// We really would like `inc/dec` to be tail-recursive; it gives roughly a 2X speedup on linked
// lists. To achieve it, we must first load all fields that we want to inc/dec (done above)
// and store them on the stack, then modify (and potentially free) the current cell, then
// actually inc/dec the fields.
refcount_ptr.modify(call_mode, &layout, env);
for (field, field_layout) in deferred_nonrec {
modify_refcount_layout(env, parent, layout_ids, mode, field, field_layout);
}
let call_name = pick("recursive_tag_increment", "recursive_tag_decrement");
for ptr in deferred_rec {
// recursively decrement the field
let call = call_help(env, fn_val, mode, ptr, call_name);
call.set_tail_call(true);
}
// this function returns void
builder.build_return(None);
cases.push(( cases.push((
env.context.i64_type().const_int(tag_id as u64, false), env.context.i64_type().const_int(tag_id as u64, false),
@ -1054,21 +1061,23 @@ fn build_rec_union_help<'a, 'ctx, 'env>(
// read the tag_id // read the tag_id
let current_tag_id = rec_union_read_tag(env, value_ptr); let current_tag_id = rec_union_read_tag(env, value_ptr);
let merge_block = env
.context
.append_basic_block(parent, pick("increment_merge", "decrement_merge"));
// switch on it // switch on it
env.builder env.builder
.build_switch(current_tag_id, merge_block, &cases); .build_switch(current_tag_id, merge_block, &cases);
}
env.builder.position_at_end(merge_block); env.builder.position_at_end(merge_block);
// increment/decrement the cons-cell itself // increment/decrement the cons-cell itself
let refcount_ptr = PointerToRefcount::from_ptr_to_data(env, value_ptr);
let call_mode = mode_to_call_mode(fn_val, mode);
refcount_ptr.modify(call_mode, &layout, env); refcount_ptr.modify(call_mode, &layout, env);
// this function returns void // this function returns void
builder.build_return(None); builder.build_return(None);
} }
}
fn rec_union_read_tag<'a, 'ctx, 'env>( fn rec_union_read_tag<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>, env: &Env<'a, 'ctx, 'env>,
@ -1093,7 +1102,7 @@ fn call_help<'a, 'ctx, 'env>(
mode: Mode, mode: Mode,
value: BasicValueEnum<'ctx>, value: BasicValueEnum<'ctx>,
call_name: &str, call_name: &str,
) { ) -> inkwell::values::CallSiteValue<'ctx> {
let call = match mode { let call = match mode {
Mode::Inc(inc_amount) => { Mode::Inc(inc_amount) => {
let rc_increment = ptr_int(env.context, env.ptr_bytes).const_int(inc_amount, false); let rc_increment = ptr_int(env.context, env.ptr_bytes).const_int(inc_amount, false);
@ -1105,6 +1114,8 @@ fn call_help<'a, 'ctx, 'env>(
}; };
call.set_call_convention(FAST_CALL_CONV); call.set_call_convention(FAST_CALL_CONV);
call
} }
fn modify_refcount_union<'a, 'ctx, 'env>( fn modify_refcount_union<'a, 'ctx, 'env>(

View file

@ -27,6 +27,11 @@ mod gen_list {
assert_evals_to!("[]", RocList::from_slice(&[]), RocList<i64>); assert_evals_to!("[]", RocList::from_slice(&[]), RocList<i64>);
} }
#[test]
fn list_literal_empty_record() {
assert_evals_to!("[{}]", RocList::from_slice(&[()]), RocList<()>);
}
#[test] #[test]
fn int_singleton_list_literal() { fn int_singleton_list_literal() {
assert_evals_to!("[1, 2]", RocList::from_slice(&[1, 2]), RocList<i64>); assert_evals_to!("[1, 2]", RocList::from_slice(&[1, 2]), RocList<i64>);

View file

@ -1936,29 +1936,23 @@ mod gen_primitives {
} }
#[test] #[test]
#[ignore]
fn rosetree_basic() { fn rosetree_basic() {
assert_non_opt_evals_to!( assert_non_opt_evals_to!(
indoc!( indoc!(
r#" r#"
app "test" provides [ main ] to "./platform" app "test" provides [ main ] to "./platform"
# RoseTree
Tree a : [ Tree a (List (Tree a)) ] Tree a : [ Tree a (List (Tree a)) ]
tree : a, List (Tree a) -> Tree a
tree = \a, t -> Tree a t
singleton : a -> Tree a singleton : a -> Tree a
singleton = \x -> Tree x [] singleton = \x -> Tree x []
main : Bool main : Bool
main = main =
x : I64 x : Tree F64
x = 1 x = singleton 3
when x is
when tree x [ singleton 5, singleton 3 ] is Tree 3.0 _ -> True
Tree 0x1 _ -> True
_ -> False _ -> False
"# "#
), ),

View file

@ -1,9 +1,10 @@
use crate::generic64::{Assembler, CallConv, GPRegTrait}; use crate::generic64::{Assembler, CallConv, RegTrait};
use crate::Relocation;
use bumpalo::collections::Vec; use bumpalo::collections::Vec;
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)] #[allow(dead_code)]
pub enum AArch64GPReg { pub enum AArch64GeneralReg {
X0 = 0, X0 = 0,
X1 = 1, X1 = 1,
X2 = 2, X2 = 2,
@ -38,8 +39,12 @@ pub enum AArch64GPReg {
/// This can mean Zero or Stack Pointer depending on the context. /// This can mean Zero or Stack Pointer depending on the context.
ZRSP = 31, ZRSP = 31,
} }
impl RegTrait for AArch64GeneralReg {}
impl GPRegTrait for AArch64GPReg {} #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[allow(dead_code)]
pub enum AArch64FloatReg {}
impl RegTrait for AArch64FloatReg {}
pub struct AArch64Assembler {} pub struct AArch64Assembler {}
@ -49,83 +54,90 @@ pub struct AArch64Call {}
const STACK_ALIGNMENT: u8 = 16; const STACK_ALIGNMENT: u8 = 16;
impl CallConv<AArch64GPReg> for AArch64Call { impl CallConv<AArch64GeneralReg, AArch64FloatReg> for AArch64Call {
const GP_PARAM_REGS: &'static [AArch64GPReg] = &[ const GENERAL_PARAM_REGS: &'static [AArch64GeneralReg] = &[
AArch64GPReg::X0, AArch64GeneralReg::X0,
AArch64GPReg::X1, AArch64GeneralReg::X1,
AArch64GPReg::X2, AArch64GeneralReg::X2,
AArch64GPReg::X3, AArch64GeneralReg::X3,
AArch64GPReg::X4, AArch64GeneralReg::X4,
AArch64GPReg::X5, AArch64GeneralReg::X5,
AArch64GPReg::X6, AArch64GeneralReg::X6,
AArch64GPReg::X7, AArch64GeneralReg::X7,
]; ];
const GP_RETURN_REGS: &'static [AArch64GPReg] = Self::GP_PARAM_REGS; const GENERAL_RETURN_REGS: &'static [AArch64GeneralReg] = Self::GENERAL_PARAM_REGS;
const GP_DEFAULT_FREE_REGS: &'static [AArch64GPReg] = &[ const GENERAL_DEFAULT_FREE_REGS: &'static [AArch64GeneralReg] = &[
// The regs we want to use first should be at the end of this vec. // The regs we want to use first should be at the end of this vec.
// We will use pop to get which reg to use next // We will use pop to get which reg to use next
// Don't use frame pointer: AArch64GPReg::FP, // Don't use frame pointer: AArch64GeneralReg::FP,
// Don't user indirect result location: AArch64GPReg::XR, // Don't user indirect result location: AArch64GeneralReg::XR,
// Don't use platform register: AArch64GPReg::PR, // Don't use platform register: AArch64GeneralReg::PR,
// Don't use link register: AArch64GPReg::LR, // Don't use link register: AArch64GeneralReg::LR,
// Don't use zero register/stack pointer: AArch64GPReg::ZRSP, // Don't use zero register/stack pointer: AArch64GeneralReg::ZRSP,
// Use callee saved regs last. // Use callee saved regs last.
AArch64GPReg::X19, AArch64GeneralReg::X19,
AArch64GPReg::X20, AArch64GeneralReg::X20,
AArch64GPReg::X21, AArch64GeneralReg::X21,
AArch64GPReg::X22, AArch64GeneralReg::X22,
AArch64GPReg::X23, AArch64GeneralReg::X23,
AArch64GPReg::X24, AArch64GeneralReg::X24,
AArch64GPReg::X25, AArch64GeneralReg::X25,
AArch64GPReg::X26, AArch64GeneralReg::X26,
AArch64GPReg::X27, AArch64GeneralReg::X27,
AArch64GPReg::X28, AArch64GeneralReg::X28,
// Use caller saved regs first. // Use caller saved regs first.
AArch64GPReg::X0, AArch64GeneralReg::X0,
AArch64GPReg::X1, AArch64GeneralReg::X1,
AArch64GPReg::X2, AArch64GeneralReg::X2,
AArch64GPReg::X3, AArch64GeneralReg::X3,
AArch64GPReg::X4, AArch64GeneralReg::X4,
AArch64GPReg::X5, AArch64GeneralReg::X5,
AArch64GPReg::X6, AArch64GeneralReg::X6,
AArch64GPReg::X7, AArch64GeneralReg::X7,
AArch64GPReg::X9, AArch64GeneralReg::X9,
AArch64GPReg::X10, AArch64GeneralReg::X10,
AArch64GPReg::X11, AArch64GeneralReg::X11,
AArch64GPReg::X12, AArch64GeneralReg::X12,
AArch64GPReg::X13, AArch64GeneralReg::X13,
AArch64GPReg::X14, AArch64GeneralReg::X14,
AArch64GPReg::X15, AArch64GeneralReg::X15,
AArch64GPReg::IP0, AArch64GeneralReg::IP0,
AArch64GPReg::IP1, AArch64GeneralReg::IP1,
]; ];
const FLOAT_PARAM_REGS: &'static [AArch64FloatReg] = &[];
const FLOAT_RETURN_REGS: &'static [AArch64FloatReg] = Self::FLOAT_PARAM_REGS;
const FLOAT_DEFAULT_FREE_REGS: &'static [AArch64FloatReg] = &[];
const SHADOW_SPACE_SIZE: u8 = 0; const SHADOW_SPACE_SIZE: u8 = 0;
#[inline(always)] #[inline(always)]
fn callee_saved(reg: &AArch64GPReg) -> bool { fn general_callee_saved(reg: &AArch64GeneralReg) -> bool {
matches!( matches!(
reg, reg,
AArch64GPReg::X19 AArch64GeneralReg::X19
| AArch64GPReg::X20 | AArch64GeneralReg::X20
| AArch64GPReg::X21 | AArch64GeneralReg::X21
| AArch64GPReg::X22 | AArch64GeneralReg::X22
| AArch64GPReg::X23 | AArch64GeneralReg::X23
| AArch64GPReg::X24 | AArch64GeneralReg::X24
| AArch64GPReg::X25 | AArch64GeneralReg::X25
| AArch64GPReg::X26 | AArch64GeneralReg::X26
| AArch64GPReg::X27 | AArch64GeneralReg::X27
| AArch64GPReg::X28 | AArch64GeneralReg::X28
) )
} }
#[inline(always)]
fn float_callee_saved(_reg: &AArch64FloatReg) -> bool {
unimplemented!("AArch64 FloatRegs not implemented yet");
}
#[inline(always)] #[inline(always)]
fn setup_stack( fn setup_stack(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
leaf_function: bool, leaf_function: bool,
saved_regs: &[AArch64GPReg], saved_regs: &[AArch64GeneralReg],
requested_stack_size: i32, requested_stack_size: i32,
) -> Result<i32, String> { ) -> Result<i32, String> {
// full size is upcast to i64 to make sure we don't overflow here. // full size is upcast to i64 to make sure we don't overflow here.
@ -149,8 +161,8 @@ impl CallConv<AArch64GPReg> for AArch64Call {
if aligned_stack_size > 0 { if aligned_stack_size > 0 {
AArch64Assembler::sub_reg64_reg64_imm32( AArch64Assembler::sub_reg64_reg64_imm32(
buf, buf,
AArch64GPReg::ZRSP, AArch64GeneralReg::ZRSP,
AArch64GPReg::ZRSP, AArch64GeneralReg::ZRSP,
aligned_stack_size, aligned_stack_size,
); );
@ -158,9 +170,9 @@ impl CallConv<AArch64GPReg> for AArch64Call {
let mut offset = aligned_stack_size; let mut offset = aligned_stack_size;
if !leaf_function { if !leaf_function {
offset -= 8; offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GPReg::LR); AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::LR);
offset -= 8; offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GPReg::FP); AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::FP);
} }
for reg in saved_regs { for reg in saved_regs {
offset -= 8; offset -= 8;
@ -179,7 +191,7 @@ impl CallConv<AArch64GPReg> for AArch64Call {
fn cleanup_stack( fn cleanup_stack(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
leaf_function: bool, leaf_function: bool,
saved_regs: &[AArch64GPReg], saved_regs: &[AArch64GeneralReg],
aligned_stack_size: i32, aligned_stack_size: i32,
) -> Result<(), String> { ) -> Result<(), String> {
if aligned_stack_size > 0 { if aligned_stack_size > 0 {
@ -187,9 +199,9 @@ impl CallConv<AArch64GPReg> for AArch64Call {
let mut offset = aligned_stack_size; let mut offset = aligned_stack_size;
if !leaf_function { if !leaf_function {
offset -= 8; offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GPReg::LR, offset); AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::LR, offset);
offset -= 8; offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GPReg::FP, offset); AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::FP, offset);
} }
for reg in saved_regs { for reg in saved_regs {
offset -= 8; offset -= 8;
@ -197,8 +209,8 @@ impl CallConv<AArch64GPReg> for AArch64Call {
} }
AArch64Assembler::add_reg64_reg64_imm32( AArch64Assembler::add_reg64_reg64_imm32(
buf, buf,
AArch64GPReg::ZRSP, AArch64GeneralReg::ZRSP,
AArch64GPReg::ZRSP, AArch64GeneralReg::ZRSP,
aligned_stack_size, aligned_stack_size,
); );
} }
@ -206,17 +218,17 @@ impl CallConv<AArch64GPReg> for AArch64Call {
} }
} }
impl Assembler<AArch64GPReg> for AArch64Assembler { impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn abs_reg64_reg64(_buf: &mut Vec<'_, u8>, _dst: AArch64GPReg, _src: AArch64GPReg) { fn abs_reg64_reg64(_buf: &mut Vec<'_, u8>, _dst: AArch64GeneralReg, _src: AArch64GeneralReg) {
unimplemented!("abs_reg64_reg64 is not yet implement for AArch64"); unimplemented!("abs_reg64_reg64 is not yet implement for AArch64");
} }
#[inline(always)] #[inline(always)]
fn add_reg64_reg64_imm32( fn add_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: AArch64GPReg, dst: AArch64GeneralReg,
src: AArch64GPReg, src: AArch64GeneralReg,
imm32: i32, imm32: i32,
) { ) {
if imm32 < 0 { if imm32 < 0 {
@ -233,15 +245,35 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn add_reg64_reg64_reg64( fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: AArch64GPReg, dst: AArch64GeneralReg,
src1: AArch64GPReg, src1: AArch64GeneralReg,
src2: AArch64GPReg, src2: AArch64GeneralReg,
) { ) {
add_reg64_reg64_reg64(buf, dst, src1, src2); add_reg64_reg64_reg64(buf, dst, src1, src2);
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm: i64) { fn add_freg64_freg64_freg64(
_buf: &mut Vec<'_, u8>,
_dst: AArch64FloatReg,
_src1: AArch64FloatReg,
_src2: AArch64FloatReg,
) {
unimplemented!("adding floats not yet implemented for AArch64");
}
#[inline(always)]
fn mov_freg64_imm64(
_buf: &mut Vec<'_, u8>,
_relocs: &mut Vec<'_, Relocation>,
_dst: AArch64FloatReg,
_imm: f64,
) {
unimplemented!("loading float literal not yet implemented for AArch64");
}
#[inline(always)]
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm: i64) {
let mut remaining = imm as u64; let mut remaining = imm as u64;
movz_reg64_imm16(buf, dst, remaining as u16, 0); movz_reg64_imm16(buf, dst, remaining as u16, 0);
remaining >>= 16; remaining >>= 16;
@ -259,29 +291,43 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg) { fn mov_freg64_freg64(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _src: AArch64FloatReg) {
unimplemented!("moving data between float registers not yet implemented for AArch64");
}
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
mov_reg64_reg64(buf, dst, src); mov_reg64_reg64(buf, dst, src);
} }
#[inline(always)] #[inline(always)]
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, offset: i32) { fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) {
if offset < 0 { if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64"); unimplemented!("negative stack offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) { } else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0); debug_assert!(offset % 8 == 0);
ldr_reg64_imm12(buf, dst, AArch64GPReg::ZRSP, (offset as u16) >> 3); ldr_reg64_imm12(buf, dst, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else { } else {
unimplemented!("stack offsets over 32k are not yet implement for AArch64"); unimplemented!("stack offsets over 32k are not yet implement for AArch64");
} }
} }
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _offset: i32) {
unimplemented!("loading floating point reg from stack not yet implemented for AArch64");
}
#[inline(always)] #[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GPReg) { fn mov_stack32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FloatReg) {
unimplemented!("saving floating point reg to stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 { if offset < 0 {
unimplemented!("negative stack offsets are not yet implement for AArch64"); unimplemented!("negative stack offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) { } else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0); debug_assert!(offset % 8 == 0);
str_reg64_imm12(buf, src, AArch64GPReg::ZRSP, (offset as u16) >> 3); str_reg64_imm12(buf, src, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else { } else {
unimplemented!("stack offsets over 32k are not yet implement for AArch64"); unimplemented!("stack offsets over 32k are not yet implement for AArch64");
} }
@ -290,8 +336,8 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn sub_reg64_reg64_imm32( fn sub_reg64_reg64_imm32(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: AArch64GPReg, dst: AArch64GeneralReg,
src: AArch64GPReg, src: AArch64GeneralReg,
imm32: i32, imm32: i32,
) { ) {
if imm32 < 0 { if imm32 < 0 {
@ -310,9 +356,9 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn sub_reg64_reg64_reg64( fn sub_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>, _buf: &mut Vec<'_, u8>,
_dst: AArch64GPReg, _dst: AArch64GeneralReg,
_src1: AArch64GPReg, _src1: AArch64GeneralReg,
_src2: AArch64GPReg, _src2: AArch64GeneralReg,
) { ) {
unimplemented!("registers subtractions not implemented yet for AArch64"); unimplemented!("registers subtractions not implemented yet for AArch64");
} }
@ -320,15 +366,15 @@ impl Assembler<AArch64GPReg> for AArch64Assembler {
#[inline(always)] #[inline(always)]
fn eq_reg64_reg64_reg64( fn eq_reg64_reg64_reg64(
_buf: &mut Vec<'_, u8>, _buf: &mut Vec<'_, u8>,
_dst: AArch64GPReg, _dst: AArch64GeneralReg,
_src1: AArch64GPReg, _src1: AArch64GeneralReg,
_src2: AArch64GPReg, _src2: AArch64GeneralReg,
) { ) {
unimplemented!("registers equality not implemented yet for AArch64"); unimplemented!("registers equality not implemented yet for AArch64");
} }
#[inline(always)] #[inline(always)]
fn ret(buf: &mut Vec<'_, u8>) { fn ret(buf: &mut Vec<'_, u8>) {
ret_reg64(buf, AArch64GPReg::LR) ret_reg64(buf, AArch64GeneralReg::LR)
} }
} }
@ -355,7 +401,7 @@ enum BranchGroup {
opc: u8, opc: u8,
op2: u8, op2: u8,
op3: u8, op3: u8,
reg_n: AArch64GPReg, reg_n: AArch64GeneralReg,
op4: u8, op4: u8,
}, },
} }
@ -367,19 +413,19 @@ enum DPRegGroup {
subtract: bool, subtract: bool,
set_flags: bool, set_flags: bool,
shift: u8, shift: u8,
reg_m: AArch64GPReg, reg_m: AArch64GeneralReg,
imm6: u8, imm6: u8,
reg_n: AArch64GPReg, reg_n: AArch64GeneralReg,
reg_d: AArch64GPReg, reg_d: AArch64GeneralReg,
}, },
Logical { Logical {
sf: bool, sf: bool,
op: DPRegLogicalOp, op: DPRegLogicalOp,
shift: u8, shift: u8,
reg_m: AArch64GPReg, reg_m: AArch64GeneralReg,
imm6: u8, imm6: u8,
reg_n: AArch64GPReg, reg_n: AArch64GeneralReg,
reg_d: AArch64GPReg, reg_d: AArch64GeneralReg,
}, },
} }
@ -391,15 +437,15 @@ enum DPImmGroup {
set_flags: bool, set_flags: bool,
shift: bool, shift: bool,
imm12: u16, imm12: u16,
reg_n: AArch64GPReg, reg_n: AArch64GeneralReg,
reg_d: AArch64GPReg, reg_d: AArch64GeneralReg,
}, },
MoveWide { MoveWide {
sf: bool, sf: bool,
opc: u8, opc: u8,
hw: u8, hw: u8,
imm16: u16, imm16: u16,
reg_d: AArch64GPReg, reg_d: AArch64GeneralReg,
}, },
} }
@ -410,8 +456,8 @@ enum LdStrGroup {
v: bool, v: bool,
opc: u8, opc: u8,
imm12: u16, imm12: u16,
reg_n: AArch64GPReg, reg_n: AArch64GeneralReg,
reg_t: AArch64GPReg, reg_t: AArch64GeneralReg,
}, },
} }
@ -587,7 +633,12 @@ fn build_instruction(inst: AArch64Instruction) -> [u8; 4] {
/// `ADD Xd, Xn, imm12` -> Add Xn and imm12 and place the result into Xd. /// `ADD Xd, Xn, imm12` -> Add Xn and imm12 and place the result into Xd.
#[inline(always)] #[inline(always)]
fn add_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg, imm12: u16) { fn add_reg64_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm12: u16,
) {
buf.extend(&build_instruction(AArch64Instruction::DPImm( buf.extend(&build_instruction(AArch64Instruction::DPImm(
DPImmGroup::AddSubImm { DPImmGroup::AddSubImm {
sf: true, sf: true,
@ -605,9 +656,9 @@ fn add_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64G
#[inline(always)] #[inline(always)]
fn add_reg64_reg64_reg64( fn add_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>, buf: &mut Vec<'_, u8>,
dst: AArch64GPReg, dst: AArch64GeneralReg,
src1: AArch64GPReg, src1: AArch64GeneralReg,
src2: AArch64GPReg, src2: AArch64GeneralReg,
) { ) {
buf.extend(&build_instruction(AArch64Instruction::DPReg( buf.extend(&build_instruction(AArch64Instruction::DPReg(
DPRegGroup::AddSubShifted { DPRegGroup::AddSubShifted {
@ -626,7 +677,12 @@ fn add_reg64_reg64_reg64(
/// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP. /// `LDR Xt, [Xn, #offset]` -> Load Xn + Offset Xt. ZRSP is SP.
/// Note: imm12 is the offest divided by 8. /// Note: imm12 is the offest divided by 8.
#[inline(always)] #[inline(always)]
fn ldr_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, base: AArch64GPReg, imm12: u16) { fn ldr_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
base: AArch64GeneralReg,
imm12: u16,
) {
debug_assert!(imm12 <= 0xFFF); debug_assert!(imm12 <= 0xFFF);
buf.extend(&build_instruction(AArch64Instruction::LdStr( buf.extend(&build_instruction(AArch64Instruction::LdStr(
LdStrGroup::UnsignedImm { LdStrGroup::UnsignedImm {
@ -642,7 +698,7 @@ fn ldr_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, base: AArch64GPReg,
/// `MOV Xd, Xm` -> Move Xm to Xd. /// `MOV Xd, Xm` -> Move Xm to Xd.
#[inline(always)] #[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg) { fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64. // MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPReg( buf.extend(&build_instruction(AArch64Instruction::DPReg(
DPRegGroup::Logical { DPRegGroup::Logical {
@ -651,7 +707,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg)
shift: 0, shift: 0,
reg_m: src, reg_m: src,
imm6: 0, imm6: 0,
reg_n: AArch64GPReg::ZRSP, reg_n: AArch64GeneralReg::ZRSP,
reg_d: dst, reg_d: dst,
}, },
))); )));
@ -659,7 +715,7 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg)
/// `MOVK Xd, imm16` -> Keeps Xd and moves an optionally shifted imm16 to Xd. /// `MOVK Xd, imm16` -> Keeps Xd and moves an optionally shifted imm16 to Xd.
#[inline(always)] #[inline(always)]
fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8) { fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
debug_assert!(hw <= 0b11); debug_assert!(hw <= 0b11);
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64. // MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPImm( buf.extend(&build_instruction(AArch64Instruction::DPImm(
@ -675,7 +731,7 @@ fn movk_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8
/// `MOVZ Xd, imm16` -> Zeros Xd and moves an optionally shifted imm16 to Xd. /// `MOVZ Xd, imm16` -> Zeros Xd and moves an optionally shifted imm16 to Xd.
#[inline(always)] #[inline(always)]
fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8) { fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm16: u16, hw: u8) {
debug_assert!(hw <= 0b11); debug_assert!(hw <= 0b11);
// MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64. // MOV is equvalent to `ORR Xd, XZR, XM` in AARCH64.
buf.extend(&build_instruction(AArch64Instruction::DPImm( buf.extend(&build_instruction(AArch64Instruction::DPImm(
@ -692,7 +748,12 @@ fn movz_reg64_imm16(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, imm16: u16, hw: u8
/// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP. /// `STR Xt, [Xn, #offset]` -> Store Xt to Xn + Offset. ZRSP is SP.
/// Note: imm12 is the offest divided by 8. /// Note: imm12 is the offest divided by 8.
#[inline(always)] #[inline(always)]
fn str_reg64_imm12(buf: &mut Vec<'_, u8>, src: AArch64GPReg, base: AArch64GPReg, imm12: u16) { fn str_reg64_imm12(
buf: &mut Vec<'_, u8>,
src: AArch64GeneralReg,
base: AArch64GeneralReg,
imm12: u16,
) {
debug_assert!(imm12 <= 0xFFF); debug_assert!(imm12 <= 0xFFF);
buf.extend(&build_instruction(AArch64Instruction::LdStr( buf.extend(&build_instruction(AArch64Instruction::LdStr(
LdStrGroup::UnsignedImm { LdStrGroup::UnsignedImm {
@ -708,7 +769,12 @@ fn str_reg64_imm12(buf: &mut Vec<'_, u8>, src: AArch64GPReg, base: AArch64GPReg,
/// `SUB Xd, Xn, imm12` -> Subtract Xn and imm12 and place the result into Xd. /// `SUB Xd, Xn, imm12` -> Subtract Xn and imm12 and place the result into Xd.
#[inline(always)] #[inline(always)]
fn sub_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64GPReg, imm12: u16) { fn sub_reg64_reg64_imm12(
buf: &mut Vec<'_, u8>,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
imm12: u16,
) {
buf.extend(&build_instruction(AArch64Instruction::DPImm( buf.extend(&build_instruction(AArch64Instruction::DPImm(
DPImmGroup::AddSubImm { DPImmGroup::AddSubImm {
sf: true, sf: true,
@ -724,7 +790,7 @@ fn sub_reg64_reg64_imm12(buf: &mut Vec<'_, u8>, dst: AArch64GPReg, src: AArch64G
/// `RET Xn` -> Return to the address stored in Xn. /// `RET Xn` -> Return to the address stored in Xn.
#[inline(always)] #[inline(always)]
fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GPReg) { fn ret_reg64(buf: &mut Vec<'_, u8>, xn: AArch64GeneralReg) {
buf.extend(&build_instruction(AArch64Instruction::Branch( buf.extend(&build_instruction(AArch64Instruction::Branch(
BranchGroup::UnconditionBranchReg { BranchGroup::UnconditionBranchReg {
opc: 0b0010, opc: 0b0010,
@ -750,9 +816,9 @@ mod tests {
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
add_reg64_reg64_reg64( add_reg64_reg64_reg64(
&mut buf, &mut buf,
AArch64GPReg::X10, AArch64GeneralReg::X10,
AArch64GPReg::ZRSP, AArch64GeneralReg::ZRSP,
AArch64GPReg::X21, AArch64GeneralReg::X21,
); );
assert_eq!(&buf, &[0xAA, 0x02, 0x1F, 0x8B]); assert_eq!(&buf, &[0xAA, 0x02, 0x1F, 0x8B]);
} }
@ -761,7 +827,12 @@ mod tests {
fn test_add_reg64_reg64_imm12() { fn test_add_reg64_reg64_imm12() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
add_reg64_reg64_imm12(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21, 0x123); add_reg64_reg64_imm12(
&mut buf,
AArch64GeneralReg::X10,
AArch64GeneralReg::X21,
0x123,
);
assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0x91]); assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0x91]);
} }
@ -769,7 +840,12 @@ mod tests {
fn test_ldr_reg64_imm12() { fn test_ldr_reg64_imm12() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
ldr_reg64_imm12(&mut buf, AArch64GPReg::X21, AArch64GPReg::ZRSP, 0x123); ldr_reg64_imm12(
&mut buf,
AArch64GeneralReg::X21,
AArch64GeneralReg::ZRSP,
0x123,
);
assert_eq!(&buf, &[0xF5, 0x8F, 0x44, 0xF9]); assert_eq!(&buf, &[0xF5, 0x8F, 0x44, 0xF9]);
} }
@ -777,7 +853,7 @@ mod tests {
fn test_mov_reg64_reg64() { fn test_mov_reg64_reg64() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
mov_reg64_reg64(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21); mov_reg64_reg64(&mut buf, AArch64GeneralReg::X10, AArch64GeneralReg::X21);
assert_eq!(&buf, &[0xEA, 0x03, 0x15, 0xAA]); assert_eq!(&buf, &[0xEA, 0x03, 0x15, 0xAA]);
} }
@ -785,7 +861,7 @@ mod tests {
fn test_movk_reg64_imm16() { fn test_movk_reg64_imm16() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
movk_reg64_imm16(&mut buf, AArch64GPReg::X21, TEST_U16, 3); movk_reg64_imm16(&mut buf, AArch64GeneralReg::X21, TEST_U16, 3);
assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xF2]); assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xF2]);
} }
@ -793,7 +869,7 @@ mod tests {
fn test_movz_reg64_imm16() { fn test_movz_reg64_imm16() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
movz_reg64_imm16(&mut buf, AArch64GPReg::X21, TEST_U16, 3); movz_reg64_imm16(&mut buf, AArch64GeneralReg::X21, TEST_U16, 3);
assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xD2]); assert_eq!(&buf, &[0x95, 0x46, 0xE2, 0xD2]);
} }
@ -801,7 +877,12 @@ mod tests {
fn test_str_reg64_imm12() { fn test_str_reg64_imm12() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
str_reg64_imm12(&mut buf, AArch64GPReg::X21, AArch64GPReg::ZRSP, 0x123); str_reg64_imm12(
&mut buf,
AArch64GeneralReg::X21,
AArch64GeneralReg::ZRSP,
0x123,
);
assert_eq!(&buf, &[0xF5, 0x8F, 0x04, 0xF9]); assert_eq!(&buf, &[0xF5, 0x8F, 0x04, 0xF9]);
} }
@ -809,7 +890,12 @@ mod tests {
fn test_sub_reg64_reg64_imm12() { fn test_sub_reg64_reg64_imm12() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
sub_reg64_reg64_imm12(&mut buf, AArch64GPReg::X10, AArch64GPReg::X21, 0x123); sub_reg64_reg64_imm12(
&mut buf,
AArch64GeneralReg::X10,
AArch64GeneralReg::X21,
0x123,
);
assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0xD1]); assert_eq!(&buf, &[0xAA, 0x8E, 0x04, 0xD1]);
} }
@ -817,7 +903,7 @@ mod tests {
fn test_ret_reg64() { fn test_ret_reg64() {
let arena = bumpalo::Bump::new(); let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena]; let mut buf = bumpalo::vec![in &arena];
ret_reg64(&mut buf, AArch64GPReg::LR); ret_reg64(&mut buf, AArch64GeneralReg::LR);
assert_eq!(&buf, &[0xC0, 0x03, 0x5F, 0xD6]); assert_eq!(&buf, &[0xC0, 0x03, 0x5F, 0xD6]);
} }
} }

View file

@ -9,29 +9,38 @@ use target_lexicon::Triple;
pub mod aarch64; pub mod aarch64;
pub mod x86_64; pub mod x86_64;
pub trait CallConv<GPReg: GPRegTrait> { pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait> {
const GP_PARAM_REGS: &'static [GPReg]; const GENERAL_PARAM_REGS: &'static [GeneralReg];
const GP_RETURN_REGS: &'static [GPReg]; const GENERAL_RETURN_REGS: &'static [GeneralReg];
const GP_DEFAULT_FREE_REGS: &'static [GPReg]; const GENERAL_DEFAULT_FREE_REGS: &'static [GeneralReg];
const FLOAT_PARAM_REGS: &'static [FloatReg];
const FLOAT_RETURN_REGS: &'static [FloatReg];
const FLOAT_DEFAULT_FREE_REGS: &'static [FloatReg];
const SHADOW_SPACE_SIZE: u8; const SHADOW_SPACE_SIZE: u8;
fn callee_saved(reg: &GPReg) -> bool; fn general_callee_saved(reg: &GeneralReg) -> bool;
#[inline(always)] #[inline(always)]
fn caller_saved_regs(reg: &GPReg) -> bool { fn general_caller_saved(reg: &GeneralReg) -> bool {
!Self::callee_saved(reg) !Self::general_callee_saved(reg)
}
fn float_callee_saved(reg: &FloatReg) -> bool;
#[inline(always)]
fn float_caller_saved(reg: &FloatReg) -> bool {
!Self::float_callee_saved(reg)
} }
fn setup_stack<'a>( fn setup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
saved_regs: &[GPReg], general_saved_regs: &[GeneralReg],
requested_stack_size: i32, requested_stack_size: i32,
) -> Result<i32, String>; ) -> Result<i32, String>;
fn cleanup_stack<'a>( fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>, buf: &mut Vec<'a, u8>,
leaf_function: bool, leaf_function: bool,
saved_regs: &[GPReg], general_saved_regs: &[GeneralReg],
aligned_stack_size: i32, aligned_stack_size: i32,
) -> Result<(), String>; ) -> Result<(), String>;
} }
@ -42,36 +51,76 @@ pub trait CallConv<GPReg: GPRegTrait> {
/// Thus, some backends will need to use mulitiple instructions to preform a single one of this calls. /// Thus, some backends will need to use mulitiple instructions to preform a single one of this calls.
/// Generally, I prefer explicit sources, as opposed to dst being one of the sources. Ex: `x = x + y` would be `add x, x, y` instead of `add x, y`. /// Generally, I prefer explicit sources, as opposed to dst being one of the sources. Ex: `x = x + y` would be `add x, x, y` instead of `add x, y`.
/// dst should always come before sources. /// dst should always come before sources.
pub trait Assembler<GPReg: GPRegTrait> { pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn abs_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src: GPReg); fn abs_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, imm32: i32); fn add_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn add_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg); fn add_freg64_freg64_freg64(
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GPReg, imm: i64); buf: &mut Vec<'_, u8>,
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src: GPReg); dst: FloatReg,
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GPReg, offset: i32); src1: FloatReg,
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GPReg); src2: FloatReg,
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, imm32: i32); );
fn sub_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg); fn add_reg64_reg64_reg64(
fn eq_reg64_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GPReg, src1: GPReg, src2: GPReg); buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn mov_freg64_imm64(
buf: &mut Vec<'_, u8>,
relocs: &mut Vec<'_, Relocation>,
dst: FloatReg,
imm: f64,
);
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GeneralReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn mov_freg64_stack32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
);
fn ret(buf: &mut Vec<'_, u8>); fn ret(buf: &mut Vec<'_, u8>);
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
enum SymbolStorage<GPReg: GPRegTrait> { #[allow(dead_code)]
enum SymbolStorage<GeneralReg: RegTrait, FloatReg: RegTrait> {
// These may need layout, but I am not sure. // These may need layout, but I am not sure.
// I think whenever a symbol would be used, we specify layout anyways. // I think whenever a symbol would be used, we specify layout anyways.
GPReg(GPReg), GeneralReg(GeneralReg),
FloatReg(FloatReg),
Stack(i32), Stack(i32),
StackAndGPReg(GPReg, i32), StackAndGeneralReg(GeneralReg, i32),
StackAndFloatReg(FloatReg, i32),
} }
pub trait GPRegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {} pub trait RegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {}
pub struct Backend64Bit<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> { pub struct Backend64Bit<
'a,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> {
phantom_asm: PhantomData<ASM>, phantom_asm: PhantomData<ASM>,
phantom_cc: PhantomData<CC>, phantom_cc: PhantomData<CC>,
env: &'a Env<'a>, env: &'a Env<'a>,
buf: Vec<'a, u8>, buf: Vec<'a, u8>,
relocs: Vec<'a, Relocation<'a>>,
/// leaf_function is true if the only calls this function makes are tail calls. /// leaf_function is true if the only calls this function makes are tail calls.
/// If that is the case, we can skip emitting the frame pointer and updating the stack. /// If that is the case, we can skip emitting the frame pointer and updating the stack.
@ -79,26 +128,34 @@ pub struct Backend64Bit<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallCo
last_seen_map: MutMap<Symbol, *const Stmt<'a>>, last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>, free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
symbols_map: MutMap<Symbol, SymbolStorage<GPReg>>, symbols_map: MutMap<Symbol, SymbolStorage<GeneralReg, FloatReg>>,
literal_map: MutMap<Symbol, Literal<'a>>, literal_map: MutMap<Symbol, Literal<'a>>,
// This should probably be smarter than a vec. // This should probably be smarter than a vec.
// There are certain registers we should always use first. With pushing and popping, this could get mixed. // There are certain registers we should always use first. With pushing and popping, this could get mixed.
gp_free_regs: Vec<'a, GPReg>, general_free_regs: Vec<'a, GeneralReg>,
float_free_regs: Vec<'a, FloatReg>,
// The last major thing we need is a way to decide what reg to free when all of them are full. // The last major thing we need is a way to decide what reg to free when all of them are full.
// Theoretically we want a basic lru cache for the currently loaded symbols. // Theoretically we want a basic lru cache for the currently loaded symbols.
// For now just a vec of used registers and the symbols they contain. // For now just a vec of used registers and the symbols they contain.
gp_used_regs: Vec<'a, (GPReg, Symbol)>, general_used_regs: Vec<'a, (GeneralReg, Symbol)>,
float_used_regs: Vec<'a, (FloatReg, Symbol)>,
stack_size: i32,
// used callee saved regs must be tracked for pushing and popping at the beginning/end of the function. // used callee saved regs must be tracked for pushing and popping at the beginning/end of the function.
used_callee_saved_regs: MutSet<GPReg>, general_used_callee_saved_regs: MutSet<GeneralReg>,
float_used_callee_saved_regs: MutSet<FloatReg>,
stack_size: i32,
} }
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<'a> impl<
for Backend64Bit<'a, GPReg, ASM, CC> 'a,
GeneralReg: RegTrait,
FloatReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> Backend<'a> for Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
{ {
fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> { fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> {
Ok(Backend64Bit { Ok(Backend64Bit {
@ -107,14 +164,18 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
env, env,
leaf_function: true, leaf_function: true,
buf: bumpalo::vec!(in env.arena), buf: bumpalo::vec!(in env.arena),
relocs: bumpalo::vec!(in env.arena),
last_seen_map: MutMap::default(), last_seen_map: MutMap::default(),
free_map: MutMap::default(), free_map: MutMap::default(),
symbols_map: MutMap::default(), symbols_map: MutMap::default(),
literal_map: MutMap::default(), literal_map: MutMap::default(),
gp_free_regs: bumpalo::vec![in env.arena], general_free_regs: bumpalo::vec![in env.arena],
gp_used_regs: bumpalo::vec![in env.arena], general_used_regs: bumpalo::vec![in env.arena],
general_used_callee_saved_regs: MutSet::default(),
float_free_regs: bumpalo::vec![in env.arena],
float_used_regs: bumpalo::vec![in env.arena],
float_used_callee_saved_regs: MutSet::default(),
stack_size: 0, stack_size: 0,
used_callee_saved_regs: MutSet::default(),
}) })
} }
@ -129,11 +190,16 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
self.free_map.clear(); self.free_map.clear();
self.symbols_map.clear(); self.symbols_map.clear();
self.buf.clear(); self.buf.clear();
self.used_callee_saved_regs.clear(); self.general_used_callee_saved_regs.clear();
self.gp_free_regs.clear(); self.general_free_regs.clear();
self.gp_used_regs.clear(); self.general_used_regs.clear();
self.gp_free_regs self.general_free_regs
.extend_from_slice(CC::GP_DEFAULT_FREE_REGS); .extend_from_slice(CC::GENERAL_DEFAULT_FREE_REGS);
self.float_used_callee_saved_regs.clear();
self.float_free_regs.clear();
self.float_used_regs.clear();
self.float_free_regs
.extend_from_slice(CC::FLOAT_DEFAULT_FREE_REGS);
} }
fn set_not_leaf_function(&mut self) { fn set_not_leaf_function(&mut self) {
@ -157,12 +223,12 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
&mut self.free_map &mut self.free_map
} }
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String> { fn finalize(&mut self) -> Result<(&'a [u8], &[&Relocation]), String> {
let mut out = bumpalo::vec![in self.env.arena]; let mut out = bumpalo::vec![in self.env.arena];
// Setup stack. // Setup stack.
let mut used_regs = bumpalo::vec![in self.env.arena]; let mut used_regs = bumpalo::vec![in self.env.arena];
used_regs.extend(&self.used_callee_saved_regs); used_regs.extend(&self.general_used_callee_saved_regs);
let aligned_stack_size = let aligned_stack_size =
CC::setup_stack(&mut out, self.leaf_function, &used_regs, self.stack_size)?; CC::setup_stack(&mut out, self.leaf_function, &used_regs, self.stack_size)?;
@ -173,12 +239,14 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
CC::cleanup_stack(&mut out, self.leaf_function, &used_regs, aligned_stack_size)?; CC::cleanup_stack(&mut out, self.leaf_function, &used_regs, aligned_stack_size)?;
ASM::ret(&mut out); ASM::ret(&mut out);
Ok((out.into_bump_slice(), &[])) let mut out_relocs = bumpalo::vec![in self.env.arena];
out_relocs.extend(&self.relocs);
Ok((out.into_bump_slice(), out_relocs.into_bump_slice()))
} }
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> { fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?; let dst_reg = self.claim_general_reg(dst)?;
let src_reg = self.load_to_reg(src)?; let src_reg = self.load_to_general_reg(src)?;
ASM::abs_reg64_reg64(&mut self.buf, dst_reg, src_reg); ASM::abs_reg64_reg64(&mut self.buf, dst_reg, src_reg);
Ok(()) Ok(())
} }
@ -189,30 +257,43 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
src1: &Symbol, src1: &Symbol,
src2: &Symbol, src2: &Symbol,
) -> Result<(), String> { ) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?; let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_reg(src1)?; let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_reg(src2)?; let src2_reg = self.load_to_general_reg(src2)?;
ASM::add_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg); ASM::add_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(()) Ok(())
} }
fn build_num_add_f64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_float_reg(dst)?;
let src1_reg = self.load_to_float_reg(src1)?;
let src2_reg = self.load_to_float_reg(src2)?;
ASM::add_freg64_freg64_freg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(())
}
fn build_num_sub_i64( fn build_num_sub_i64(
&mut self, &mut self,
dst: &Symbol, dst: &Symbol,
src1: &Symbol, src1: &Symbol,
src2: &Symbol, src2: &Symbol,
) -> Result<(), String> { ) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?; let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_reg(src1)?; let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_reg(src2)?; let src2_reg = self.load_to_general_reg(src2)?;
ASM::sub_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg); ASM::sub_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(()) Ok(())
} }
fn build_eq_i64(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol) -> Result<(), String> { fn build_eq_i64(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?; let dst_reg = self.claim_general_reg(dst)?;
let src1_reg = self.load_to_reg(src1)?; let src1_reg = self.load_to_general_reg(src1)?;
let src2_reg = self.load_to_reg(src2)?; let src2_reg = self.load_to_general_reg(src2)?;
ASM::eq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg); ASM::eq_reg64_reg64_reg64(&mut self.buf, dst_reg, src1_reg, src2_reg);
Ok(()) Ok(())
} }
@ -220,22 +301,28 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> { fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> {
match lit { match lit {
Literal::Int(x) => { Literal::Int(x) => {
let reg = self.claim_gp_reg(sym)?; let reg = self.claim_general_reg(sym)?;
let val = *x; let val = *x;
ASM::mov_reg64_imm64(&mut self.buf, reg, val); ASM::mov_reg64_imm64(&mut self.buf, reg, val);
Ok(()) Ok(())
} }
Literal::Float(x) => {
let reg = self.claim_float_reg(sym)?;
let val = *x;
ASM::mov_freg64_imm64(&mut self.buf, &mut self.relocs, reg, val);
Ok(())
}
x => Err(format!("loading literal, {:?}, is not yet implemented", x)), x => Err(format!("loading literal, {:?}, is not yet implemented", x)),
} }
} }
fn free_symbol(&mut self, sym: &Symbol) { fn free_symbol(&mut self, sym: &Symbol) {
self.symbols_map.remove(sym); self.symbols_map.remove(sym);
for i in 0..self.gp_used_regs.len() { for i in 0..self.general_used_regs.len() {
let (reg, saved_sym) = self.gp_used_regs[i]; let (reg, saved_sym) = self.general_used_regs[i];
if saved_sym == *sym { if saved_sym == *sym {
self.gp_free_regs.push(reg); self.general_free_regs.push(reg);
self.gp_used_regs.remove(i); self.general_used_regs.remove(i);
break; break;
} }
} }
@ -244,11 +331,16 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String> { fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.get(sym); let val = self.symbols_map.get(sym);
match val { match val {
Some(SymbolStorage::GPReg(reg)) if *reg == CC::GP_RETURN_REGS[0] => Ok(()), Some(SymbolStorage::GeneralReg(reg)) if *reg == CC::GENERAL_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::GPReg(reg)) => { Some(SymbolStorage::GeneralReg(reg)) => {
// If it fits in a general purpose register, just copy it over to. // If it fits in a general purpose register, just copy it over to.
// Technically this can be optimized to produce shorter instructions if less than 64bits. // Technically this can be optimized to produce shorter instructions if less than 64bits.
ASM::mov_reg64_reg64(&mut self.buf, CC::GP_RETURN_REGS[0], *reg); ASM::mov_reg64_reg64(&mut self.buf, CC::GENERAL_RETURN_REGS[0], *reg);
Ok(())
}
Some(SymbolStorage::FloatReg(reg)) if *reg == CC::FLOAT_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::FloatReg(reg)) => {
ASM::mov_freg64_freg64(&mut self.buf, CC::FLOAT_RETURN_REGS[0], *reg);
Ok(()) Ok(())
} }
Some(x) => Err(format!( Some(x) => Err(format!(
@ -262,46 +354,108 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<
/// This impl block is for ir related instructions that need backend specific information. /// This impl block is for ir related instructions that need backend specific information.
/// For example, loading a symbol for doing a computation. /// For example, loading a symbol for doing a computation.
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> impl<
Backend64Bit<'a, GPReg, ASM, CC> 'a,
FloatReg: RegTrait,
GeneralReg: RegTrait,
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg>,
> Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
{ {
fn claim_gp_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> { fn claim_general_reg(&mut self, sym: &Symbol) -> Result<GeneralReg, String> {
let reg = if !self.gp_free_regs.is_empty() { let reg = if !self.general_free_regs.is_empty() {
let free_reg = self.gp_free_regs.pop().unwrap(); let free_reg = self.general_free_regs.pop().unwrap();
if CC::callee_saved(&free_reg) { if CC::general_callee_saved(&free_reg) {
self.used_callee_saved_regs.insert(free_reg); self.general_used_callee_saved_regs.insert(free_reg);
} }
Ok(free_reg) Ok(free_reg)
} else if !self.gp_used_regs.is_empty() { } else if !self.general_used_regs.is_empty() {
let (reg, sym) = self.gp_used_regs.remove(0); let (reg, sym) = self.general_used_regs.remove(0);
self.free_to_stack(&sym)?; self.free_to_stack(&sym)?;
Ok(reg) Ok(reg)
} else { } else {
Err("completely out of registers".to_string()) Err("completely out of general purpose registers".to_string())
}?; }?;
self.gp_used_regs.push((reg, *sym)); self.general_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::GPReg(reg)); self.symbols_map
.insert(*sym, SymbolStorage::GeneralReg(reg));
Ok(reg) Ok(reg)
} }
fn load_to_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> { fn claim_float_reg(&mut self, sym: &Symbol) -> Result<FloatReg, String> {
let val = self.symbols_map.remove(sym); let reg = if !self.float_free_regs.is_empty() {
match val { let free_reg = self.float_free_regs.pop().unwrap();
Some(SymbolStorage::GPReg(reg)) => { if CC::float_callee_saved(&free_reg) {
self.symbols_map.insert(*sym, SymbolStorage::GPReg(reg)); self.float_used_callee_saved_regs.insert(free_reg);
}
Ok(free_reg)
} else if !self.float_used_regs.is_empty() {
let (reg, sym) = self.float_used_regs.remove(0);
self.free_to_stack(&sym)?;
Ok(reg)
} else {
Err("completely out of floating point registers".to_string())
}?;
self.float_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::FloatReg(reg));
Ok(reg) Ok(reg)
} }
Some(SymbolStorage::StackAndGPReg(reg, offset)) => {
fn load_to_general_reg(&mut self, sym: &Symbol) -> Result<GeneralReg, String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GeneralReg(reg)) => {
self.symbols_map self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPReg(reg, offset)); .insert(*sym, SymbolStorage::GeneralReg(reg));
Ok(reg)
}
Some(SymbolStorage::FloatReg(_reg)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::StackAndGeneralReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::StackAndFloatReg(_reg, _offset)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::Stack(offset)) => {
let reg = self.claim_general_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
ASM::mov_reg64_stack32(&mut self.buf, reg, offset as i32);
Ok(reg)
}
None => Err(format!("Unknown symbol: {}", sym)),
}
}
fn load_to_float_reg(&mut self, sym: &Symbol) -> Result<FloatReg, String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GeneralReg(_reg)) => {
Err("Cannot load integer point symbol into FloatReg".to_string())
}
Some(SymbolStorage::FloatReg(reg)) => {
self.symbols_map.insert(*sym, SymbolStorage::FloatReg(reg));
Ok(reg)
}
Some(SymbolStorage::StackAndGeneralReg(_reg, _offset)) => {
Err("Cannot load integer point symbol into FloatReg".to_string())
}
Some(SymbolStorage::StackAndFloatReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
Ok(reg) Ok(reg)
} }
Some(SymbolStorage::Stack(offset)) => { Some(SymbolStorage::Stack(offset)) => {
let reg = self.claim_gp_reg(sym)?; let reg = self.claim_float_reg(sym)?;
self.symbols_map self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPReg(reg, offset)); .insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
ASM::mov_reg64_stack32(&mut self.buf, reg, offset as i32); ASM::mov_freg64_stack32(&mut self.buf, reg, offset as i32);
Ok(reg) Ok(reg)
} }
None => Err(format!("Unknown symbol: {}", sym)), None => Err(format!("Unknown symbol: {}", sym)),
@ -311,13 +465,23 @@ impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>>
fn free_to_stack(&mut self, sym: &Symbol) -> Result<(), String> { fn free_to_stack(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.remove(sym); let val = self.symbols_map.remove(sym);
match val { match val {
Some(SymbolStorage::GPReg(reg)) => { Some(SymbolStorage::GeneralReg(reg)) => {
let offset = self.increase_stack_size(8)?; let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_reg64(&mut self.buf, offset as i32, reg); ASM::mov_stack32_reg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset)); self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(()) Ok(())
} }
Some(SymbolStorage::StackAndGPReg(_, offset)) => { Some(SymbolStorage::FloatReg(reg)) => {
let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_freg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::StackAndGeneralReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::StackAndFloatReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset)); self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(()) Ok(())
} }

File diff suppressed because it is too large Load diff

View file

@ -34,10 +34,21 @@ const INLINED_SYMBOLS: [Symbol; 4] = [
// These relocations likely will need a length. // These relocations likely will need a length.
// They may even need more definition, but this should be at least good enough for how we will use elf. // They may even need more definition, but this should be at least good enough for how we will use elf.
#[allow(dead_code)] #[allow(dead_code)]
enum Relocation<'a> { pub enum Relocation<'a> {
LocalData { offset: u64, data: &'a [u8] }, LocalData {
LinkedFunction { offset: u64, name: &'a str }, offset: u64,
LinkedData { offset: u64, name: &'a str }, // This should probably technically be a bumpalo::Vec.
// The problem is that it currently is built in a place that can't access the arena.
data: std::vec::Vec<u8>,
},
LinkedFunction {
offset: u64,
name: &'a str,
},
LinkedData {
offset: u64,
name: &'a str,
},
} }
trait Backend<'a> trait Backend<'a>
@ -56,10 +67,10 @@ where
/// finalize does setup because things like stack size and jump locations are not know until the function is written. /// finalize does setup because things like stack size and jump locations are not know until the function is written.
/// For example, this can store the frame pionter and setup stack space. /// For example, this can store the frame pionter and setup stack space.
/// finalize is run at the end of build_proc when all internal code is finalized. /// finalize is run at the end of build_proc when all internal code is finalized.
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String>; fn finalize(&mut self) -> Result<(&'a [u8], &[&Relocation]), String>;
/// build_proc creates a procedure and outputs it to the wrapped object writer. /// build_proc creates a procedure and outputs it to the wrapped object writer.
fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[Relocation]), String> { fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[&Relocation]), String> {
self.reset(); self.reset();
// TODO: let the backend know of all the arguments. // TODO: let the backend know of all the arguments.
// let start = std::time::Instant::now(); // let start = std::time::Instant::now();
@ -182,6 +193,9 @@ where
Layout::Builtin(Builtin::Int64) => { Layout::Builtin(Builtin::Int64) => {
self.build_num_add_i64(sym, &args[0], &args[1]) self.build_num_add_i64(sym, &args[0], &args[1])
} }
Layout::Builtin(Builtin::Float64) => {
self.build_num_add_f64(sym, &args[0], &args[1])
}
x => Err(format!("layout, {:?}, not implemented yet", x)), x => Err(format!("layout, {:?}, not implemented yet", x)),
} }
} }
@ -217,6 +231,15 @@ where
src2: &Symbol, src2: &Symbol,
) -> Result<(), String>; ) -> Result<(), String>;
/// build_num_add_f64 stores the sum of src1 and src2 into dst.
/// It only deals with inputs and outputs of f64 type.
fn build_num_add_f64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String>;
/// build_num_sub_i64 stores the `src1 - src2` difference into dst. /// build_num_sub_i64 stores the `src1 - src2` difference into dst.
/// It only deals with inputs and outputs of i64 type. /// It only deals with inputs and outputs of i64 type.
fn build_num_sub_i64( fn build_num_sub_i64(

View file

@ -29,7 +29,8 @@ pub fn build_module<'a>(
.. ..
} => { } => {
let backend: Backend64Bit< let backend: Backend64Bit<
x86_64::X86_64GPReg, x86_64::X86_64GeneralReg,
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler, x86_64::X86_64Assembler,
x86_64::X86_64SystemV, x86_64::X86_64SystemV,
> = Backend::new(env, target)?; > = Backend::new(env, target)?;
@ -46,7 +47,8 @@ pub fn build_module<'a>(
.. ..
} => { } => {
let backend: Backend64Bit< let backend: Backend64Bit<
aarch64::AArch64GPReg, aarch64::AArch64GeneralReg,
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler, aarch64::AArch64Assembler,
aarch64::AArch64Call, aarch64::AArch64Call,
> = Backend::new(env, target)?; > = Backend::new(env, target)?;

View file

@ -27,6 +27,17 @@ mod gen_num {
assert_evals_to!("0x1000_0000_0000_0000", 0x1000_0000_0000_0000, i64); assert_evals_to!("0x1000_0000_0000_0000", 0x1000_0000_0000_0000, i64);
} }
#[test]
fn f64_values() {
assert_evals_to!("0.0", 0.0, f64);
assert_evals_to!("-0.0", 0.0, f64);
assert_evals_to!("1.0", 1.0, f64);
assert_evals_to!("-1.0", -1.0, f64);
assert_evals_to!("3.1415926535897932", 3.1415926535897932, f64);
assert_evals_to!(&format!("{:0.1}", f64::MIN), f64::MIN, f64);
assert_evals_to!(&format!("{:0.1}", f64::MAX), f64::MAX, f64);
}
#[test] #[test]
fn gen_add_i64() { fn gen_add_i64() {
assert_evals_to!( assert_evals_to!(
@ -40,6 +51,19 @@ mod gen_num {
); );
} }
#[test]
fn gen_add_f64() {
assert_evals_to!(
indoc!(
r#"
1.1 + 2.4 + 3
"#
),
6.5,
f64
);
}
#[test] #[test]
fn gen_sub_i64() { fn gen_sub_i64() {
assert_evals_to!( assert_evals_to!(
@ -197,19 +221,6 @@ mod gen_num {
); );
} }
#[test]
fn gen_add_f64() {
assert_evals_to!(
indoc!(
r#"
1.1 + 2.4 + 3
"#
),
6.5,
f64
);
}
#[test] #[test]
fn gen_wrap_add_nums() { fn gen_wrap_add_nums() {
assert_evals_to!( assert_evals_to!(

View file

@ -602,7 +602,21 @@ fn to_relevant_branch_help<'a>(
} }
} }
Wrapped::RecordOrSingleTagUnion => { Wrapped::RecordOrSingleTagUnion => {
todo!("this should need a special index, right?") let sub_positions = arguments.into_iter().enumerate().map(
|(index, (pattern, _))| {
(
Path::Index {
index: index as u64,
tag_id,
path: Box::new(path.clone()),
},
Guard::NoGuard,
pattern,
)
},
);
start.extend(sub_positions);
start.extend(end);
} }
Wrapped::MultiTagUnion => { Wrapped::MultiTagUnion => {
let sub_positions = arguments.into_iter().enumerate().map( let sub_positions = arguments.into_iter().enumerate().map(
@ -1037,6 +1051,10 @@ fn path_to_expr_help<'a>(
match variant { match variant {
NonRecursive(layouts) | Recursive(layouts) => layouts[*tag_id as usize], NonRecursive(layouts) | Recursive(layouts) => layouts[*tag_id as usize],
NonNullableUnwrapped(fields) => {
debug_assert_eq!(*tag_id, 0);
fields
}
NullableWrapped { NullableWrapped {
nullable_id, nullable_id,
other_tags: layouts, other_tags: layouts,

View file

@ -834,6 +834,8 @@ impl Wrapped {
}, },
_ => Some(Wrapped::MultiTagUnion), _ => Some(Wrapped::MultiTagUnion),
}, },
NonNullableUnwrapped(_) => Some(Wrapped::RecordOrSingleTagUnion),
NullableWrapped { .. } | NullableUnwrapped { .. } => { NullableWrapped { .. } | NullableUnwrapped { .. } => {
Some(Wrapped::MultiTagUnion) Some(Wrapped::MultiTagUnion)
} }
@ -1127,9 +1129,11 @@ impl<'a> Stmt<'a> {
use Stmt::*; use Stmt::*;
match self { match self {
Let(symbol, expr, _, cont) => alloc Let(symbol, expr, _layout, cont) => alloc
.text("let ") .text("let ")
.append(symbol_to_doc(alloc, *symbol)) .append(symbol_to_doc(alloc, *symbol))
//.append(" : ")
//.append(alloc.text(format!("{:?}", layout)))
.append(" = ") .append(" = ")
.append(expr.to_doc(alloc)) .append(expr.to_doc(alloc))
.append(";") .append(";")
@ -2738,6 +2742,7 @@ pub fn with_hole<'a>(
use WrappedVariant::*; use WrappedVariant::*;
let (tag, layout) = match variant { let (tag, layout) = match variant {
Recursive { sorted_tag_layouts } => { Recursive { sorted_tag_layouts } => {
debug_assert!(sorted_tag_layouts.len() > 1);
let tag_id_symbol = env.unique_symbol(); let tag_id_symbol = env.unique_symbol();
opt_tag_id_symbol = Some(tag_id_symbol); opt_tag_id_symbol = Some(tag_id_symbol);
@ -2758,6 +2763,7 @@ pub fn with_hole<'a>(
layouts.push(arg_layouts); layouts.push(arg_layouts);
} }
debug_assert!(layouts.len() > 1);
let layout = let layout =
Layout::Union(UnionLayout::Recursive(layouts.into_bump_slice())); Layout::Union(UnionLayout::Recursive(layouts.into_bump_slice()));
@ -2771,6 +2777,35 @@ pub fn with_hole<'a>(
(tag, layout) (tag, layout)
} }
NonNullableUnwrapped {
fields,
tag_name: wrapped_tag_name,
} => {
debug_assert_eq!(tag_name, wrapped_tag_name);
opt_tag_id_symbol = None;
field_symbols = {
let mut temp =
Vec::with_capacity_in(field_symbols_temp.len(), arena);
temp.extend(field_symbols_temp.iter().map(|r| r.1));
temp.into_bump_slice()
};
let layout = Layout::Union(UnionLayout::NonNullableUnwrapped(fields));
let tag = Expr::Tag {
tag_layout: layout.clone(),
tag_name,
tag_id: tag_id as u8,
union_size,
arguments: field_symbols,
};
(tag, layout)
}
NonRecursive { sorted_tag_layouts } => { NonRecursive { sorted_tag_layouts } => {
let tag_id_symbol = env.unique_symbol(); let tag_id_symbol = env.unique_symbol();
opt_tag_id_symbol = Some(tag_id_symbol); opt_tag_id_symbol = Some(tag_id_symbol);
@ -6138,6 +6173,7 @@ fn from_can_pattern_help<'a>(
temp temp
}; };
debug_assert!(layouts.len() > 1);
let layout = let layout =
Layout::Union(UnionLayout::Recursive(layouts.into_bump_slice())); Layout::Union(UnionLayout::Recursive(layouts.into_bump_slice()));
@ -6150,6 +6186,51 @@ fn from_can_pattern_help<'a>(
} }
} }
NonNullableUnwrapped {
tag_name: w_tag_name,
fields,
} => {
debug_assert_eq!(&w_tag_name, tag_name);
ctors.push(Ctor {
tag_id: TagId(0_u8),
name: tag_name.clone(),
arity: fields.len(),
});
let union = crate::exhaustive::Union {
render_as: RenderAs::Tag,
alternatives: ctors,
};
let mut mono_args = Vec::with_capacity_in(arguments.len(), env.arena);
debug_assert_eq!(arguments.len(), argument_layouts.len());
let it = argument_layouts.iter();
for ((_, loc_pat), layout) in arguments.iter().zip(it) {
mono_args.push((
from_can_pattern_help(
env,
layout_cache,
&loc_pat.value,
assignments,
)?,
layout.clone(),
));
}
let layout = Layout::Union(UnionLayout::NonNullableUnwrapped(fields));
Pattern::AppliedTag {
tag_name: tag_name.clone(),
tag_id: tag_id as u8,
arguments: mono_args,
union,
layout,
}
}
NullableWrapped { NullableWrapped {
sorted_tag_layouts: ref tags, sorted_tag_layouts: ref tags,
nullable_id, nullable_id,

View file

@ -43,8 +43,11 @@ pub enum UnionLayout<'a> {
/// e.g. `Result a e : [ Ok a, Err e ]` /// e.g. `Result a e : [ Ok a, Err e ]`
NonRecursive(&'a [&'a [Layout<'a>]]), NonRecursive(&'a [&'a [Layout<'a>]]),
/// A recursive tag union /// A recursive tag union
/// e.g. `RoseTree a : [ Tree a (List (RoseTree a)) ]` /// e.g. `Expr : [ Sym Str, Add Expr Expr ]`
Recursive(&'a [&'a [Layout<'a>]]), Recursive(&'a [&'a [Layout<'a>]]),
/// A recursive tag union with just one constructor
/// e.g. `RoseTree a : [ Tree a (List (RoseTree a)) ]`
NonNullableUnwrapped(&'a [Layout<'a>]),
/// A recursive tag union where the non-nullable variant(s) store the tag id /// A recursive tag union where the non-nullable variant(s) store the tag id
/// e.g. `FingerTree a : [ Empty, Single a, More (Some a) (FingerTree (Tuple a)) (Some a) ]` /// e.g. `FingerTree a : [ Empty, Single a, More (Some a) (FingerTree (Tuple a)) (Some a) ]`
/// see also: https://youtu.be/ip92VMpf_-A?t=164 /// see also: https://youtu.be/ip92VMpf_-A?t=164
@ -484,7 +487,10 @@ impl<'a> Layout<'a> {
NonRecursive(tags) => tags NonRecursive(tags) => tags
.iter() .iter()
.all(|tag_layout| tag_layout.iter().all(|field| field.safe_to_memcpy())), .all(|tag_layout| tag_layout.iter().all(|field| field.safe_to_memcpy())),
Recursive(_) | NullableWrapped { .. } | NullableUnwrapped { .. } => { Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => {
// a recursive union will always contain a pointer, and is thus not safe to memcpy // a recursive union will always contain a pointer, and is thus not safe to memcpy
false false
} }
@ -548,9 +554,10 @@ impl<'a> Layout<'a> {
.max() .max()
.unwrap_or_default(), .unwrap_or_default(),
Recursive(_) | NullableWrapped { .. } | NullableUnwrapped { .. } => { Recursive(_)
pointer_size | NullableWrapped { .. }
} | NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => pointer_size,
} }
} }
Closure(_, closure_layout, _) => pointer_size + closure_layout.stack_size(pointer_size), Closure(_, closure_layout, _) => pointer_size + closure_layout.stack_size(pointer_size),
@ -579,9 +586,10 @@ impl<'a> Layout<'a> {
.map(|x| x.alignment_bytes(pointer_size)) .map(|x| x.alignment_bytes(pointer_size))
.max() .max()
.unwrap_or(0), .unwrap_or(0),
Recursive(_) | NullableWrapped { .. } | NullableUnwrapped { .. } => { Recursive(_)
pointer_size | NullableWrapped { .. }
} | NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => pointer_size,
} }
} }
Layout::Builtin(builtin) => builtin.alignment_bytes(pointer_size), Layout::Builtin(builtin) => builtin.alignment_bytes(pointer_size),
@ -633,7 +641,10 @@ impl<'a> Layout<'a> {
.map(|ls| ls.iter()) .map(|ls| ls.iter())
.flatten() .flatten()
.any(|f| f.contains_refcounted()), .any(|f| f.contains_refcounted()),
Recursive(_) | NullableWrapped { .. } | NullableUnwrapped { .. } => true, Recursive(_)
| NullableWrapped { .. }
| NullableUnwrapped { .. }
| NonNullableUnwrapped(_) => true,
} }
} }
Closure(_, closure_layout, _) => closure_layout.contains_refcounted(), Closure(_, closure_layout, _) => closure_layout.contains_refcounted(),
@ -1116,6 +1127,9 @@ fn layout_from_flat_type<'a>(
other_tags: many, other_tags: many,
}, },
} }
} else if tag_layouts.len() == 1 {
// drop the tag id
UnionLayout::NonNullableUnwrapped(&tag_layouts.pop().unwrap()[1..])
} else { } else {
UnionLayout::Recursive(tag_layouts.into_bump_slice()) UnionLayout::Recursive(tag_layouts.into_bump_slice())
}; };
@ -1220,6 +1234,10 @@ pub enum WrappedVariant<'a> {
nullable_name: TagName, nullable_name: TagName,
sorted_tag_layouts: Vec<'a, (TagName, &'a [Layout<'a>])>, sorted_tag_layouts: Vec<'a, (TagName, &'a [Layout<'a>])>,
}, },
NonNullableUnwrapped {
tag_name: TagName,
fields: &'a [Layout<'a>],
},
NullableUnwrapped { NullableUnwrapped {
nullable_id: bool, nullable_id: bool,
nullable_name: TagName, nullable_name: TagName,
@ -1281,6 +1299,7 @@ impl<'a> WrappedVariant<'a> {
(!*nullable_id as u8, *other_fields) (!*nullable_id as u8, *other_fields)
} }
} }
NonNullableUnwrapped { fields, .. } => (0, fields),
} }
} }
@ -1299,6 +1318,7 @@ impl<'a> WrappedVariant<'a> {
sorted_tag_layouts.len() + 1 sorted_tag_layouts.len() + 1
} }
NullableUnwrapped { .. } => 2, NullableUnwrapped { .. } => 2,
NonNullableUnwrapped { .. } => 1,
} }
} }
} }
@ -1409,6 +1429,11 @@ pub fn union_sorted_tags_help<'a>(
} else { } else {
UnionVariant::Unit UnionVariant::Unit
} }
} else if opt_rec_var.is_some() {
UnionVariant::Wrapped(WrappedVariant::NonNullableUnwrapped {
tag_name,
fields: layouts.into_bump_slice(),
})
} else { } else {
UnionVariant::Unwrapped(layouts) UnionVariant::Unwrapped(layouts)
} }
@ -1517,6 +1542,7 @@ pub fn union_sorted_tags_help<'a>(
} }
} }
} else if is_recursive { } else if is_recursive {
debug_assert!(answer.len() > 1);
WrappedVariant::Recursive { WrappedVariant::Recursive {
sorted_tag_layouts: answer, sorted_tag_layouts: answer,
} }
@ -1585,6 +1611,7 @@ pub fn layout_from_tag_union<'a>(
let mut tag_layouts = Vec::with_capacity_in(tags.len(), arena); let mut tag_layouts = Vec::with_capacity_in(tags.len(), arena);
tag_layouts.extend(tags.iter().map(|r| r.1)); tag_layouts.extend(tags.iter().map(|r| r.1));
debug_assert!(tag_layouts.len() > 1);
Layout::Union(UnionLayout::Recursive(tag_layouts.into_bump_slice())) Layout::Union(UnionLayout::Recursive(tag_layouts.into_bump_slice()))
} }
@ -1603,6 +1630,7 @@ pub fn layout_from_tag_union<'a>(
} }
NullableUnwrapped { .. } => todo!(), NullableUnwrapped { .. } => todo!(),
NonNullableUnwrapped { .. } => todo!(),
} }
} }
} }
@ -1819,8 +1847,8 @@ pub fn list_layout_from_elem<'a>(
// If this was still a (List *) then it must have been an empty list // If this was still a (List *) then it must have been an empty list
Ok(Layout::Builtin(Builtin::EmptyList)) Ok(Layout::Builtin(Builtin::EmptyList))
} }
content => { _ => {
let elem_layout = Layout::new_help(env, elem_var, content)?; let elem_layout = Layout::from_var(env, elem_var)?;
// This is a normal list. // This is a normal list.
Ok(Layout::Builtin(Builtin::List( Ok(Layout::Builtin(Builtin::List(

View file

@ -82,9 +82,14 @@ indoc = "0.3.3"
quickcheck = "0.8" quickcheck = "0.8"
quickcheck_macros = "0.8" quickcheck_macros = "0.8"
criterion = "0.3" criterion = "0.3"
rand = "0.8.2"
[[bench]] [[bench]]
name = "my_benchmark" name = "file_benchmark"
harness = false
[[bench]]
name = "edit_benchmark"
harness = false harness = false
# uncomment everything below if you have made changes to any shaders and # uncomment everything below if you have made changes to any shaders and

View file

@ -0,0 +1,199 @@
use bumpalo::Bump;
use criterion::{criterion_group, criterion_main, Criterion};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use roc_editor::mvc::app_model::AppModel;
use roc_editor::mvc::ed_model::{EdModel, Position, RawSelection};
use roc_editor::mvc::update::handle_new_char;
use roc_editor::text_buffer;
use roc_editor::text_buffer::TextBuffer;
use ropey::Rope;
use std::cmp::min;
use std::path::Path;
// duplicate inside mvc::update
fn mock_app_model(
text_buf: TextBuffer,
caret_pos: Position,
selection_opt: Option<RawSelection>,
) -> AppModel {
AppModel {
ed_model_opt: Some(EdModel {
text_buf,
caret_pos,
selection_opt,
glyph_dim_rect_opt: None,
has_focus: true,
}),
}
}
fn text_buffer_from_str(lines_str: &str) -> TextBuffer {
TextBuffer {
text_rope: Rope::from_str(lines_str),
path_str: "".to_owned(),
}
}
pub fn char_insert_bench(c: &mut Criterion) {
let text_buf = text_buffer_from_str("");
let caret_pos = Position { line: 0, column: 0 };
let selection_opt: Option<RawSelection> = None;
let mut app_model = mock_app_model(text_buf, caret_pos, selection_opt);
c.bench_function("single char insert, small buffer", |b| {
b.iter(|| handle_new_char(&mut app_model, &'a'))
});
}
pub fn char_pop_bench(c: &mut Criterion) {
let nr_lines = 50000;
let mut text_buf = buf_from_dummy_file(nr_lines);
let mut rand_gen_pos = StdRng::seed_from_u64(44);
c.bench_function(
&format!("single char pop, {} lines", text_buf.nr_of_lines()),
|b| {
b.iter(|| {
let max_line_nr = text_buf.nr_of_lines();
let rand_line_nr = rand_gen_pos.gen_range(0..max_line_nr);
let max_col = text_buf
.line_len(rand_line_nr)
.expect("Failed to retrieve line length.");
let caret_pos = Position {
line: rand_line_nr,
column: rand_gen_pos.gen_range(0..max_col),
};
text_buf.pop_char(caret_pos);
})
},
);
}
fn get_all_lines_helper(nr_lines: usize, c: &mut Criterion) {
let text_buf = buf_from_dummy_file(nr_lines);
let arena = Bump::new();
c.bench_function(
&format!("get all {:?} lines from textbuffer", nr_lines),
|b| b.iter(|| text_buf.all_lines(&arena)),
);
}
fn get_all_lines_bench(c: &mut Criterion) {
get_all_lines_helper(10000, c)
}
fn get_line_len_helper(nr_lines: usize, c: &mut Criterion) {
let text_buf = buf_from_dummy_file(nr_lines);
let mut rand_gen = StdRng::seed_from_u64(45);
c.bench_function(
&format!("get random line len from {:?}-line textbuffer", nr_lines),
|b| b.iter(|| text_buf.line_len(rand_gen.gen_range(0..nr_lines)).unwrap()),
);
}
fn get_line_len_bench(c: &mut Criterion) {
get_line_len_helper(10000, c)
}
fn get_line_helper(nr_lines: usize, c: &mut Criterion) {
let text_buf = buf_from_dummy_file(nr_lines);
let mut rand_gen = StdRng::seed_from_u64(46);
c.bench_function(
&format!("get random line from {:?}-line textbuffer", nr_lines),
|b| b.iter(|| text_buf.line(rand_gen.gen_range(0..nr_lines)).unwrap()),
);
}
fn get_line_bench(c: &mut Criterion) {
get_line_helper(10000, c)
}
pub fn del_select_bench(c: &mut Criterion) {
let nr_lines = 25000000;
let mut text_buf = buf_from_dummy_file(nr_lines);
let mut rand_gen = StdRng::seed_from_u64(47);
c.bench_function(
&format!(
"delete rand selection, {}-line file",
text_buf.nr_of_lines()
),
|b| {
b.iter(|| {
let rand_sel = gen_rand_selection(&mut rand_gen, &text_buf);
text_buf.del_selection(rand_sel).unwrap();
})
},
);
}
fn gen_rand_selection(rand_gen: &mut StdRng, text_buf: &TextBuffer) -> RawSelection {
let max_line_nr = text_buf.nr_of_lines();
let rand_line_nr_a = rand_gen.gen_range(0..max_line_nr - 3);
let max_col_a = text_buf.line_len(rand_line_nr_a).expect(&format!(
"Failed to retrieve line length. For line {}, with {} lines in buffer",
rand_line_nr_a,
text_buf.nr_of_lines()
));
let rand_col_a = if max_col_a > 0 {
rand_gen.gen_range(0..max_col_a)
} else {
0
};
let max_sel_end = min(rand_line_nr_a + 5, max_line_nr);
let rand_line_nr_b = rand_gen.gen_range((rand_line_nr_a + 1)..max_sel_end);
let max_col_b = text_buf.line_len(rand_line_nr_b).expect(&format!(
"Failed to retrieve line length. For line {}, with {} lines in buffer",
rand_line_nr_b,
text_buf.nr_of_lines()
));
let rand_col_b = if max_col_b > 0 {
rand_gen.gen_range(0..max_col_b)
} else {
0
};
RawSelection {
start_pos: Position {
line: rand_line_nr_a,
column: rand_col_a,
},
end_pos: Position {
line: rand_line_nr_b,
column: rand_col_b,
},
}
}
fn buf_from_dummy_file(nr_lines: usize) -> TextBuffer {
let path_str = format!("benches/resources/{}_lines.roc", nr_lines);
text_buffer::from_path(Path::new(&path_str)).expect("Failed to read file at given path.")
}
//TODO remove all random generation from inside measured execution block
//criterion_group!(benches, del_select_bench);
criterion_group!(
benches,
char_pop_bench,
char_insert_bench,
get_all_lines_bench,
get_line_len_bench,
get_line_bench,
del_select_bench
);
criterion_main!(benches);

View file

@ -0,0 +1,123 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use rand::distributions::Alphanumeric;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use roc_editor::mvc::app_model::AppModel;
use roc_editor::mvc::ed_model::{EdModel, Position, RawSelection};
use roc_editor::mvc::update::handle_new_char;
use roc_editor::text_buffer;
use roc_editor::text_buffer::TextBuffer;
use ropey::Rope;
use std::fs::File;
use std::io::Write;
use std::path::Path;
// duplicate inside mvc::update
fn mock_app_model(
text_buf: TextBuffer,
caret_pos: Position,
selection_opt: Option<RawSelection>,
) -> AppModel {
AppModel {
ed_model_opt: Some(EdModel {
text_buf,
caret_pos,
selection_opt,
glyph_dim_rect_opt: None,
has_focus: true,
}),
}
}
fn text_buffer_from_str(lines_str: &str) -> TextBuffer {
TextBuffer {
text_rope: Rope::from_str(lines_str),
path_str: "".to_owned(),
}
}
pub fn char_insert_benchmark(c: &mut Criterion) {
let text_buf = text_buffer_from_str("");
let caret_pos = Position { line: 0, column: 0 };
let selection_opt: Option<RawSelection> = None;
let mut app_model = mock_app_model(text_buf, caret_pos, selection_opt);
c.bench_function("single char insert, small buffer", |b| {
b.iter(|| handle_new_char(&mut app_model, &'a'))
});
}
static ROC_SOURCE_START: &str = "interface LongStrProvider
exposes [ longStr ]
imports []
longStr : Str
longStr =
\"\"\"";
static ROC_SOURCE_END: &str = "\"\"\"";
fn line_count(lines: &str) -> usize {
lines.matches("\n").count()
}
pub fn gen_file(nr_lines: usize) {
let nr_of_str_lines = nr_lines - line_count(ROC_SOURCE_START);
let path_str = format!("benches/resources/{:?}_lines.roc", nr_lines);
let path = Path::new(&path_str);
let display = path.display();
// Open a file in write-only mode, returns `io::Result<File>`
let mut file = match File::create(&path) {
Err(why) => panic!("couldn't create {}: {}", display, why),
Ok(file) => file,
};
file.write(ROC_SOURCE_START.as_bytes())
.expect("Failed to write String to file.");
let mut rand_gen_line = StdRng::seed_from_u64(42);
for _ in 0..nr_of_str_lines {
let line_len = rand_gen_line.gen_range(1..90);
let char_seed = rand_gen_line.gen_range(0..1000);
let mut rand_string: String = StdRng::seed_from_u64(char_seed)
.sample_iter(&Alphanumeric)
.take(line_len)
.map(char::from)
.collect();
rand_string.push('\n');
file.write(rand_string.as_bytes())
.expect("Failed to write String to file.");
}
file.write(ROC_SOURCE_END.as_bytes())
.expect("Failed to write String to file.");
}
fn file_read_bench_helper(nr_lines: usize, c: &mut Criterion) {
let path_str = format!("benches/resources/{}_lines.roc", nr_lines);
text_buffer::from_path(Path::new(&path_str)).expect("Failed to read file at given path.");
c.bench_function(
&format!("read {:?} line file into textbuffer", nr_lines),
|b| b.iter(|| text_buffer::from_path(black_box(Path::new(&path_str)))),
);
}
fn file_read_bench(c: &mut Criterion) {
// generate dummy files
/*let lines_vec = vec![100, 500, 1000, 10000, 50000, 100000, 25000000];
for nr_lines in lines_vec.iter(){
gen_file(*nr_lines);
}*/
file_read_bench_helper(10, c)
}
criterion_group!(benches, file_read_bench);
criterion_main!(benches);

View file

@ -1,50 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use roc_editor::mvc::app_model::AppModel;
use roc_editor::mvc::ed_model::{EdModel, Position, RawSelection};
use roc_editor::mvc::update::handle_new_char;
use roc_editor::text_buffer::TextBuffer;
use ropey::Rope;
// duplicate inside mvc::update
fn mock_app_model(
text_buf: TextBuffer,
caret_pos: Position,
selection_opt: Option<RawSelection>,
) -> AppModel {
AppModel {
ed_model_opt: Some(EdModel {
text_buf,
caret_pos,
selection_opt,
glyph_dim_rect_opt: None,
has_focus: true,
}),
}
}
fn text_buffer_from_str(lines_str: &str) -> TextBuffer {
TextBuffer {
text_rope: Rope::from_str(lines_str),
path_str: "".to_owned(),
}
}
pub fn char_insert_benchmark(c: &mut Criterion) {
let text_buf = text_buffer_from_str("");
let caret_pos = Position { line: 0, column: 0 };
let selection_opt: Option<RawSelection> = None;
let mut app_model = mock_app_model(text_buf, caret_pos, selection_opt);
c.bench_function("single char insert, small buffer", |b| {
b.iter(|| handle_new_char(&mut app_model, &'a'))
});
}
pub fn file_open_benchmark(c: &mut Criterion) {
ed_model::init_model(path)
//TODO continue here
}
criterion_group!(benches, char_insert_benchmark);
criterion_main!(benches);

View file

@ -0,0 +1,11 @@
interface LongStrProvider
exposes [ longStr ]
imports []
longStr : Str
longStr =
"""7vntt4wlBKiVkNss19DZlOfmSAyIzO5Ph8eckYgnctYDersOFs3AWOPHcONxI58DoTEwGKNLGkhrxwCD
gWxYsX9hlEuQ0vI4twHMqgj8F
Ox4pVYIxku15v1KaWahgjkJ8EBXMWhe5m2519wpEtP
HtaqU0XzVu1ix3jGAZ66UugNKJrVP8RVQm
"""

View file

@ -0,0 +1,26 @@
System info:
- CPU: Intel i7 4770k
- SSD: Samsung 970 EVO PLUS M.2 1TB
- OS: Ubuntu 20.04
c.bench_function(
"read file into textbuffer",
|b| b.iter(|| text_buffer::from_path(black_box(Path::new(path_str))))
);
10 lines, 285 B time: [3.2343 us]
100 lines, 4.2 KiB time: [6.1810 us]
500 lines, 22.2 KiB time: [15.689 us]
1000 lines, 44.6 KiB time: [29.591 us]
10000 lines, 448 KiB time: [376.22 us]
50000 lines, 2.2 MiB time: [2.0329 ms]
100000 lines, 4.4 MiB time: [4.4221 ms]
25000000 lines, 1.1 GiB time: [1.1333 s]

View file

@ -46,12 +46,12 @@ pub mod error;
pub mod graphics; pub mod graphics;
mod keyboard_input; mod keyboard_input;
pub mod lang; pub mod lang;
//mod mvc; mod mvc;
pub mod mvc; // for benchmarking //pub mod mvc; // for benchmarking
mod resources; mod resources;
mod selection; mod selection;
//mod text_buffer; mod text_buffer;
pub mod text_buffer; // for benchmarking //pub mod text_buffer; // for benchmarking
mod util; mod util;
mod vec_result; mod vec_result;

View file

@ -3,6 +3,7 @@ use crate::graphics::primitives::rect::Rect;
use crate::text_buffer; use crate::text_buffer;
use crate::text_buffer::TextBuffer; use crate::text_buffer::TextBuffer;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::fmt;
use std::path::Path; use std::path::Path;
#[derive(Debug)] #[derive(Debug)]
@ -55,3 +56,13 @@ pub struct RawSelection {
pub start_pos: Position, pub start_pos: Position,
pub end_pos: Position, pub end_pos: Position,
} }
impl std::fmt::Display for RawSelection {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"RawSelection: start_pos: line:{} col:{}, end_pos: line:{} col:{}",
self.start_pos.line, self.start_pos.column, self.end_pos.line, self.end_pos.column
)
}
}

View file

@ -91,7 +91,7 @@ impl TextBuffer {
} }
// expensive function, don't use it if it can be done with a specialized, more efficient function // expensive function, don't use it if it can be done with a specialized, more efficient function
// TODO use bump allocation here // TODO use pool allocation here
pub fn all_lines<'a>(&self, arena: &'a Bump) -> BumpString<'a> { pub fn all_lines<'a>(&self, arena: &'a Bump) -> BumpString<'a> {
let mut lines = BumpString::with_capacity_in(self.text_rope.len_chars(), arena); let mut lines = BumpString::with_capacity_in(self.text_rope.len_chars(), arena);