Merge branch 'parse-pkg-import' into task_can

This commit is contained in:
Richard Feldman 2020-11-26 21:24:50 -05:00
commit 7bbf9812f7
54 changed files with 4955 additions and 1263 deletions

View file

@ -16,6 +16,8 @@ To run the test suite (via `cargo test`), you additionally need to install:
* [`valgrind`](https://www.valgrind.org/) (needs special treatment to [install on macOS](https://stackoverflow.com/a/61359781)
Alternatively, you can use `cargo test --no-fail-fast` or `cargo test -p specific_tests` to skip over the valgrind failures & tests.
For debugging LLVM IR, we use [DebugIR](https://github.com/vaivaswatha/debugir). This dependency is only required to build with the `--debug` flag, and for normal developtment you should be fine without it.
### libunwind & libc++-dev
MacOS systems should already have `libunwind`, but other systems will need to install it (On Ubuntu, this can be donw with `sudo apt-get install libunwind-dev`).

80
Cargo.lock generated
View file

@ -145,7 +145,7 @@ dependencies = [
"cfg-if 0.1.10",
"libc",
"miniz_oxide",
"object",
"object 0.20.0",
"rustc-demangle",
]
@ -481,6 +481,15 @@ dependencies = [
"objc",
]
[[package]]
name = "crc32fast"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
"cfg-if 1.0.0",
]
[[package]]
name = "criterion"
version = "0.3.3"
@ -761,6 +770,18 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
[[package]]
name = "flate2"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
dependencies = [
"cfg-if 1.0.0",
"crc32fast",
"libc",
"miniz_oxide",
]
[[package]]
name = "fnv"
version = "1.0.7"
@ -1721,6 +1742,18 @@ version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
[[package]]
name = "object"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
dependencies = [
"crc32fast",
"flate2",
"indexmap",
"wasmparser",
]
[[package]]
name = "once_cell"
version = "1.4.1"
@ -2577,6 +2610,45 @@ dependencies = [
"tokio",
]
[[package]]
name = "roc_gen_dev"
version = "0.1.0"
dependencies = [
"bumpalo",
"im",
"im-rc",
"indoc",
"inlinable_string",
"itertools",
"libc",
"libloading",
"maplit",
"object 0.22.0",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_build",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_load",
"roc_module",
"roc_mono",
"roc_parse",
"roc_problem",
"roc_region",
"roc_reporting",
"roc_solve",
"roc_std",
"roc_types",
"roc_unify",
"roc_uniq",
"target-lexicon",
"tempfile",
"tokio",
]
[[package]]
name = "roc_load"
version = "0.1.0"
@ -3534,6 +3606,12 @@ version = "0.2.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307"
[[package]]
name = "wasmparser"
version = "0.57.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6"
[[package]]
name = "wayland-client"
version = "0.23.6"

View file

@ -18,6 +18,7 @@ members = [
"compiler/mono",
"compiler/load",
"compiler/gen",
"compiler/gen_dev",
"compiler/build",
"compiler/arena_pool",
"vendor/ena",

View file

@ -24,6 +24,7 @@ pub fn build_file(
src_dir: PathBuf,
roc_file_path: PathBuf,
opt_level: OptLevel,
emit_debug_info: bool,
link_type: LinkType,
) -> Result<PathBuf, LoadingProblem> {
let compilation_start = SystemTime::now();
@ -95,6 +96,7 @@ pub fn build_file(
Triple::host(),
&app_o_file,
opt_level,
emit_debug_info,
);
println!("\nSuccess! 🎉\n\n\t{}\n", app_o_file.display());

View file

@ -14,6 +14,7 @@ use target_lexicon::Triple;
pub mod build;
pub mod repl;
pub static FLAG_DEBUG: &str = "debug";
pub static FLAG_OPTIMIZE: &str = "optimize";
pub static FLAG_ROC_FILE: &str = "ROC_FILE";
pub static DIRECTORY_OR_FILES: &str = "DIRECTORY_OR_FILES";
@ -34,6 +35,12 @@ pub fn build_app<'a>() -> App<'a> {
.help("Optimize the compiled program to run faster. (Optimization takes time to complete.)")
.required(false),
)
.arg(
Arg::with_name(FLAG_DEBUG)
.long(FLAG_DEBUG)
.help("Store LLVM debug information in the generated program")
.required(false),
)
)
.subcommand(App::new("run")
.about("Build and run a program")
@ -48,6 +55,12 @@ pub fn build_app<'a>() -> App<'a> {
.help("Optimize the compiled program to run faster. (Optimization takes time to complete.)")
.required(false),
)
.arg(
Arg::with_name(FLAG_DEBUG)
.long(FLAG_DEBUG)
.help("Store LLVM debug information in the generated program")
.required(false),
)
)
.subcommand(App::new("repl")
.about("Launch the interactive Read Eval Print Loop (REPL)")
@ -70,6 +83,8 @@ pub fn build(target: &Triple, matches: &ArgMatches, run_after_build: bool) -> io
} else {
OptLevel::Normal
};
let emit_debug_info = matches.is_present(FLAG_DEBUG);
let path = Path::new(filename).canonicalize().unwrap();
let src_dir = path.parent().unwrap().canonicalize().unwrap();
@ -92,7 +107,14 @@ pub fn build(target: &Triple, matches: &ArgMatches, run_after_build: bool) -> io
}
});
let binary_path = build::build_file(target, src_dir, path, opt_level, LinkType::Executable)
let binary_path = build::build_file(
target,
src_dir,
path,
opt_level,
emit_debug_info,
LinkType::Executable,
)
.expect("TODO gracefully handle build_file failing");
if run_after_build {

View file

@ -4,6 +4,8 @@ use inkwell::module::Module;
use inkwell::targets::{CodeModel, FileType, RelocMode};
use libloading::{Error, Library};
use roc_gen::llvm::build::OptLevel;
use std::collections::HashMap;
use std::env;
use std::io;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Output};
@ -25,7 +27,6 @@ pub fn link(
) -> io::Result<(Child, PathBuf)> {
match target {
Triple {
architecture: Architecture::X86_64,
operating_system: OperatingSystem::Linux,
..
} => link_linux(target, output_path, input_paths, link_type),
@ -46,9 +47,11 @@ pub fn rebuild_host(host_input_path: &Path) {
let cargo_host_src = host_input_path.with_file_name("Cargo.toml");
let host_dest = host_input_path.with_file_name("host.o");
let env_path = env::var("PATH").unwrap_or_else(|_| "".to_string());
// Compile host.c
let output = Command::new("clang")
.env_clear()
.env("PATH", &env_path)
.args(&[
"-c",
c_host_src.to_str().unwrap(),
@ -75,6 +78,7 @@ pub fn rebuild_host(host_input_path: &Path) {
let output = Command::new("ld")
.env_clear()
.env("PATH", &env_path)
.args(&[
"-r",
"-L",
@ -103,6 +107,7 @@ pub fn rebuild_host(host_input_path: &Path) {
let output = Command::new("ld")
.env_clear()
.env("PATH", &env_path)
.args(&[
"-r",
c_host_dest.to_str().unwrap(),
@ -145,18 +150,31 @@ fn link_linux(
input_paths: &[&str],
link_type: LinkType,
) -> io::Result<(Child, PathBuf)> {
let libcrt_path = if Path::new("/usr/lib/x86_64-linux-gnu").exists() {
Path::new("/usr/lib/x86_64-linux-gnu")
let usr_lib_path = Path::new("/usr/lib").to_path_buf();
let usr_lib_gnu_path = usr_lib_path.join(format!("{}-linux-gnu", target.architecture));
let lib_gnu_path = Path::new("/lib/").join(format!("{}-linux-gnu", target.architecture));
let libcrt_path = if usr_lib_gnu_path.exists() {
&usr_lib_gnu_path
} else {
Path::new("/usr/lib")
&usr_lib_path
};
let libgcc_path = if Path::new("/lib/x86_64-linux-gnu/libgcc_s.so.1").exists() {
Path::new("/lib/x86_64-linux-gnu/libgcc_s.so.1")
} else if Path::new("/usr/lib/x86_64-linux-gnu/libgcc_s.so.1").exists() {
Path::new("/usr/lib/x86_64-linux-gnu/libgcc_s.so.1")
let libgcc_name = "libgcc_s.so.1";
let libgcc_path = if lib_gnu_path.join(libgcc_name).exists() {
lib_gnu_path.join(libgcc_name)
} else if usr_lib_gnu_path.join(libgcc_name).exists() {
usr_lib_gnu_path.join(libgcc_name)
} else {
Path::new("/usr/lib/libgcc_s.so.1")
usr_lib_path.join(libgcc_name)
};
let ld_linux = match target.architecture {
Architecture::X86_64 => "/lib64/ld-linux-x86-64.so.2",
Architecture::Aarch64(_) => "/lib/ld-linux-aarch64.so.1",
_ => panic!(
"TODO gracefully handle unsupported linux architecture: {:?}",
target.architecture
),
};
let mut soname;
@ -194,12 +212,20 @@ fn link_linux(
}
};
let env_path = env::var("PATH").unwrap_or_else(|_| "".to_string());
// NOTE: order of arguments to `ld` matters here!
// The `-l` flags should go after the `.o` arguments
Ok((
Command::new("ld")
// Don't allow LD_ env vars to affect this
.env_clear()
.env("PATH", &env_path)
// Keep NIX_ env vars
.envs(
env::vars()
.filter(|&(ref k, _)| k.starts_with("NIX_"))
.collect::<HashMap<String, String>>(),
)
.args(&[
"-arch",
arch_str(target),
@ -207,7 +233,7 @@ fn link_linux(
libcrt_path.join("crtn.o").to_str().unwrap(),
])
.args(&base_args)
.args(&["-dynamic-linker", "/lib64/ld-linux-x86-64.so.2"])
.args(&["-dynamic-linker", ld_linux])
.args(input_paths)
.args(&[
// Libraries - see https://github.com/rtfeldman/roc/pull/554#discussion_r496365925
@ -220,6 +246,7 @@ fn link_linux(
"-lutil",
"-lc_nonshared",
"-lc++",
"-lc++abi",
"-lunwind",
libgcc_path.to_str().unwrap(),
// Output

View file

@ -20,6 +20,7 @@ pub fn gen_from_mono_module(
target: Triple,
app_o_file: &Path,
opt_level: OptLevel,
emit_debug_info: bool,
) {
use roc_reporting::report::{
can_problem, mono_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE,
@ -159,15 +160,78 @@ pub fn gen_from_mono_module(
// Uncomment this to see the module's optimized LLVM instruction output:
// env.module.print_to_stderr();
// annotate the LLVM IR output with debug info
// so errors are reported with the line number of the LLVM source
if emit_debug_info {
module.strip_debug_info();
let mut app_ll_file = std::path::PathBuf::from(app_o_file);
app_ll_file.set_extension("ll");
let mut app_ll_dbg_file = std::path::PathBuf::from(app_o_file);
app_ll_dbg_file.set_extension("dbg.ll");
let mut app_bc_file = std::path::PathBuf::from(app_o_file);
app_bc_file.set_extension("bc");
use std::process::Command;
// write the ll code to a file, so we can modify it
module.print_to_file(&app_ll_file).unwrap();
// run the debugir https://github.com/vaivaswatha/debugir tool
match Command::new("debugir")
.env_clear()
.args(&[app_ll_file.to_str().unwrap()])
.output()
{
Ok(_) => {}
Err(error) => {
use std::io::ErrorKind;
match error.kind() {
ErrorKind::NotFound => panic!(
r"I could not find the `debugir` tool on the PATH, install it from https://github.com/vaivaswatha/debugir"
),
_ => panic!("{:?}", error),
}
}
}
// assemble the .ll into a .bc
let _ = Command::new("llvm-as-10")
.env_clear()
.args(&[
app_ll_dbg_file.to_str().unwrap(),
"-o",
app_bc_file.to_str().unwrap(),
])
.output()
.unwrap();
// write the .o file. Note that this builds the .o for the local machine,
// and ignores the `target_machine` entirely.
let _ = Command::new("llc-10")
.env_clear()
.args(&[
"-filetype=obj",
app_bc_file.to_str().unwrap(),
"-o",
app_o_file.to_str().unwrap(),
])
.output()
.unwrap();
} else {
// Emit the .o file
let reloc = RelocMode::Default;
let model = CodeModel::Default;
let target_machine = target::target_machine(&target, opt_level.into(), reloc, model).unwrap();
let target_machine =
target::target_machine(&target, opt_level.into(), reloc, model).unwrap();
target_machine
.write_to_file(&env.module, FileType::Object, &app_o_file)
.expect("Writing .o file failed");
}
}
pub struct FunctionIterator<'ctx> {

View file

@ -14,6 +14,11 @@ pub fn target_triple_str(target: &Triple) -> &'static str {
operating_system: OperatingSystem::Linux,
..
} => "x86_64-unknown-linux-gnu",
Triple {
architecture: Architecture::Aarch64(_),
operating_system: OperatingSystem::Linux,
..
} => "aarch64-unknown-linux-gnu",
Triple {
architecture: Architecture::X86_64,
operating_system: OperatingSystem::Darwin,
@ -36,6 +41,10 @@ pub fn arch_str(target: &Triple) -> &'static str {
"x86-64"
}
Architecture::Aarch64(_) => {
Target::initialize_aarch64(&InitializationConfig::default());
"aarch64"
}
Architecture::Arm(_) if cfg!(feature = "target-arm") => {
// NOTE: why not enable arm and wasm by default?
//
@ -67,7 +76,7 @@ pub fn target_machine(
Target::from_name(arch).unwrap().create_target_machine(
&TargetTriple::create(target_triple_str(target)),
arch,
"generic",
"", // TODO: this probably should be TargetMachine::get_host_cpu_features() to enable all features.
opt,
reloc,

View file

@ -489,9 +489,22 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
),
);
// walkRight : List elem, (elem -> accum -> accum), accum -> accum
// walk : List elem, (elem -> accum -> accum), accum -> accum
add_type(
Symbol::LIST_WALK_RIGHT,
Symbol::LIST_WALK,
top_level_function(
vec![
list_type(flex(TVAR1)),
closure(vec![flex(TVAR1), flex(TVAR2)], TVAR3, Box::new(flex(TVAR2))),
flex(TVAR2),
],
Box::new(flex(TVAR2)),
),
);
// walkBackwards : List elem, (elem -> accum -> accum), accum -> accum
add_type(
Symbol::LIST_WALK_BACKWARDS,
top_level_function(
vec![
list_type(flex(TVAR1)),

View file

@ -777,11 +777,38 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
)
});
// walkRight : Attr (* | u) (List (Attr u a))
// walk : Attr (* | u) (List (Attr u a))
// , Attr Shared (Attr u a -> b -> b)
// , b
// -> b
add_type(Symbol::LIST_WALK_RIGHT, {
add_type(Symbol::LIST_WALK, {
let_tvars! { u, a, b, star1, closure };
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
container(star1, vec![u]),
SolvedType::Apply(Symbol::LIST_LIST, vec![attr_type(u, a)]),
],
),
shared(SolvedType::Func(
vec![attr_type(u, a), flex(b)],
Box::new(flex(closure)),
Box::new(flex(b)),
)),
flex(b),
],
flex(b),
)
});
// walkBackwards : Attr (* | u) (List (Attr u a))
// , Attr Shared (Attr u a -> b -> b)
// , b
// -> b
add_type(Symbol::LIST_WALK_BACKWARDS, {
let_tvars! { u, a, b, star1, closure };
unique_function(

View file

@ -71,7 +71,8 @@ pub fn builtin_defs(var_store: &mut VarStore) -> MutMap<Symbol, Def> {
Symbol::LIST_JOIN => list_join,
Symbol::LIST_MAP => list_map,
Symbol::LIST_KEEP_IF => list_keep_if,
Symbol::LIST_WALK_RIGHT => list_walk_right,
Symbol::LIST_WALK => list_walk,
Symbol::LIST_WALK_BACKWARDS => list_walk_backwards,
Symbol::NUM_ADD => num_add,
Symbol::NUM_ADD_CHECKED => num_add_checked,
Symbol::NUM_ADD_WRAP => num_add_wrap,
@ -1313,14 +1314,43 @@ fn list_join(symbol: Symbol, var_store: &mut VarStore) -> Def {
)
}
/// List.walkRight : List elem, (elem -> accum -> accum), accum -> accum
fn list_walk_right(symbol: Symbol, var_store: &mut VarStore) -> Def {
/// List.walk : List elem, (elem -> accum -> accum), accum -> accum
fn list_walk(symbol: Symbol, var_store: &mut VarStore) -> Def {
let list_var = var_store.fresh();
let func_var = var_store.fresh();
let accum_var = var_store.fresh();
let body = RunLowLevel {
op: LowLevel::ListWalkRight,
op: LowLevel::ListWalk,
args: vec![
(list_var, Var(Symbol::ARG_1)),
(func_var, Var(Symbol::ARG_2)),
(accum_var, Var(Symbol::ARG_3)),
],
ret_var: accum_var,
};
defn(
symbol,
vec![
(list_var, Symbol::ARG_1),
(func_var, Symbol::ARG_2),
(accum_var, Symbol::ARG_3),
],
var_store,
body,
accum_var,
)
}
/// List.walkBackwards : List elem, (elem -> accum -> accum), accum -> accum
fn list_walk_backwards(symbol: Symbol, var_store: &mut VarStore) -> Def {
let list_var = var_store.fresh();
let func_var = var_store.fresh();
let accum_var = var_store.fresh();
let body = RunLowLevel {
op: LowLevel::ListWalkBackwards,
args: vec![
(list_var, Var(Symbol::ARG_1)),
(func_var, Var(Symbol::ARG_2)),

View file

@ -37,10 +37,6 @@ pub enum Newlines {
No,
}
pub fn fmt_annotation<'a>(buf: &mut String<'a>, annotation: &'a TypeAnnotation<'a>, indent: u16) {
annotation.format(buf, indent);
}
pub trait Formattable<'a> {
fn is_multiline(&self) -> bool;
@ -85,13 +81,16 @@ where
}
macro_rules! format_sequence {
($buf: expr, $indent:expr, $start:expr, $end:expr, $items:expr, $t:ident) => {
// is it a multiline type annotation?
if $items.iter().any(|item| item.value.is_multiline()) {
($buf: expr, $indent:expr, $start:expr, $end:expr, $items:expr, $final_comments:expr, $newline:expr, $t:ident) => {
let is_multiline =
$items.iter().any(|item| item.value.is_multiline()) || !$final_comments.is_empty();
if is_multiline {
let braces_indent = $indent + INDENT;
let item_indent = braces_indent + INDENT;
if ($newline == Newlines::Yes) {
newline($buf, braces_indent);
}
$buf.push($start);
for item in $items.iter() {
@ -139,10 +138,12 @@ macro_rules! format_sequence {
}
}
}
fmt_comments_only($buf, $final_comments.iter(), NewlineAt::Top, item_indent);
newline($buf, braces_indent);
$buf.push($end);
} else {
// is_multiline == false
// there is no comment to add
$buf.push($start);
let mut iter = $items.iter().peekable();
while let Some(item) = iter.next() {
@ -285,9 +286,9 @@ impl<'a> Formattable<'a> for TypeAnnotation<'a> {
TagUnion {
tags,
ext,
final_comments: _,
final_comments,
} => {
format_sequence!(buf, indent, '[', ']', tags, Tag);
format_sequence!(buf, indent, '[', ']', tags, final_comments, newlines, Tag);
if let Some(loc_ext_ann) = *ext {
loc_ext_ann.value.format(buf, indent);
@ -297,9 +298,18 @@ impl<'a> Formattable<'a> for TypeAnnotation<'a> {
Record {
fields,
ext,
final_comments: _,
final_comments,
} => {
format_sequence!(buf, indent, '{', '}', fields, AssignedField);
format_sequence!(
buf,
indent,
'{',
'}',
fields,
final_comments,
newlines,
AssignedField
);
if let Some(loc_ext_ann) = *ext {
loc_ext_ann.value.format(buf, indent);
@ -313,8 +323,18 @@ impl<'a> Formattable<'a> for TypeAnnotation<'a> {
rhs.value.format(buf, indent);
}
SpaceBefore(ann, _spaces) | SpaceAfter(ann, _spaces) => {
ann.format_with_options(buf, parens, newlines, indent)
SpaceBefore(ann, spaces) => {
newline(buf, indent + INDENT);
fmt_comments_only(buf, spaces.iter(), NewlineAt::Bottom, indent + INDENT);
ann.format_with_options(buf, parens, Newlines::No, indent)
}
SpaceAfter(ann, spaces) => {
ann.format_with_options(buf, parens, newlines, indent);
fmt_comments_only(buf, spaces.iter(), NewlineAt::Bottom, indent);
// seems like this SpaceAfter is not constructible
// so this branch hasn't be tested. Please add some test if
// this branch is actually reached and remove this dbg_assert.
debug_assert!(false);
}
Malformed(raw) => buf.push_str(raw),

View file

@ -36,8 +36,23 @@ impl<'a> Formattable<'a> for Def<'a> {
match self {
Annotation(loc_pattern, loc_annotation) => {
loc_pattern.format(buf, indent);
if loc_annotation.is_multiline() {
buf.push_str(" :");
loc_annotation.format_with_options(
buf,
Parens::NotNeeded,
Newlines::Yes,
indent,
);
} else {
buf.push_str(" : ");
loc_annotation.format(buf, indent);
loc_annotation.format_with_options(
buf,
Parens::NotNeeded,
Newlines::No,
indent,
);
}
}
Alias { name, vars, ann } => {
buf.push_str(name.value);

View file

@ -175,7 +175,7 @@ fn fmt_imports_entry<'a>(buf: &mut String<'a>, entry: &'a ImportsEntry<'a>, inde
}
}
Package(_name, _entries) => {
Package(_pkg, _name, _entries) => {
todo!("TODO Format imported package");
}

View file

@ -781,25 +781,125 @@ mod test_fmt {
);
}
// // TODO This raises a parse error:
// // NotYetImplemented("TODO the : in this declaration seems outdented")
// #[test]
// fn comments_in_record_annotation() {
// expr_formats_to(
// indoc!(
// r#"
// f :
// {}
#[test]
fn trailing_comma_in_record_annotation_same() {
expr_formats_same(indoc!(
r#"
f :
{
y : Int,
x : Int,
}
// f"#
// ),
// indoc!(
// r#"
// f : b {}
// f"#
// ),
// );
// }
f"#
));
}
#[test]
fn multiline_type_definition() {
expr_formats_same(indoc!(
r#"
f :
Int
f"#
));
}
#[test]
fn multiline_empty_record_type_definition() {
expr_formats_same(indoc!(
r#"
f :
{}
f"#
));
}
#[test]
fn type_definition_comment_after_colon() {
expr_formats_to(
indoc!(
r#"
f : # comment
{}
f"#
),
indoc!(
r#"
f :
# comment
{}
f"#
),
);
}
#[test]
fn final_comment_in_empty_record_type_definition() {
expr_formats_to(
indoc!(
r#"
f :
{ # comment
}
f"#
),
indoc!(
r#"
f :
{
# comment
}
f"#
),
);
}
#[test]
fn multiline_inside_empty_record_annotation() {
expr_formats_same(indoc!(
r#"
f :
{
}
f"#
));
}
#[test]
fn final_comment_record_annotation() {
expr_formats_to(
indoc!(
r#"
f :
{
x: Int # comment 1
,
# comment 2
}
f"#
),
indoc!(
r#"
f :
{
x : Int,
# comment 1
# comment 2
}
f"#
),
);
}
#[test]
fn def_closure() {

View file

@ -1,7 +1,7 @@
use crate::llvm::build_list::{
allocate_list, empty_list, empty_polymorphic_list, list_append, list_concat, list_contains,
list_get_unsafe, list_join, list_keep_if, list_len, list_map, list_prepend, list_repeat,
list_reverse, list_set, list_single, list_sum, list_walk_right,
list_reverse, list_set, list_single, list_sum, list_walk, list_walk_backwards,
};
use crate::llvm::build_str::{
str_concat, str_count_graphemes, str_len, str_split, str_starts_with, CHAR_LAYOUT,
@ -735,7 +735,12 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
// Insert field exprs into struct_val
for (index, field_val) in field_vals.into_iter().enumerate() {
struct_val = builder
.build_insert_value(struct_val, field_val, index as u32, "insert_field")
.build_insert_value(
struct_val,
field_val,
index as u32,
"insert_record_field",
)
.unwrap();
}
@ -785,7 +790,12 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
// Insert field exprs into struct_val
for (index, field_val) in field_vals.into_iter().enumerate() {
struct_val = builder
.build_insert_value(struct_val, field_val, index as u32, "insert_field")
.build_insert_value(
struct_val,
field_val,
index as u32,
"insert_single_tag_field",
)
.unwrap();
}
@ -848,7 +858,12 @@ pub fn build_exp_expr<'a, 'ctx, 'env>(
// Insert field exprs into struct_val
for (index, field_val) in field_vals.into_iter().enumerate() {
struct_val = builder
.build_insert_value(struct_val, field_val, index as u32, "insert_field")
.build_insert_value(
struct_val,
field_val,
index as u32,
"insert_multi_tag_field",
)
.unwrap();
}
@ -1710,10 +1725,7 @@ fn expose_function_to_host<'a, 'ctx, 'env>(
let c_function_name: String =
format!("roc_{}_exposed", roc_function.get_name().to_str().unwrap());
let result = expose_function_to_host_help(env, roc_function, &c_function_name);
let subprogram = env.new_subprogram(&c_function_name);
result.set_subprogram(subprogram);
expose_function_to_host_help(env, roc_function, &c_function_name);
}
fn expose_function_to_host_help<'a, 'ctx, 'env>(
@ -1791,7 +1803,8 @@ fn expose_function_to_host_help<'a, 'ctx, 'env>(
// STEP 3: build a {} -> u64 function that gives the size of the return type
let size_function_type = env.context.i64_type().fn_type(&[], false);
let size_function_name: String = format!("{}_size", roc_function.get_name().to_str().unwrap());
let size_function_name: String =
format!("roc_{}_size", roc_function.get_name().to_str().unwrap());
let size_function = env.module.add_function(
size_function_name.as_str(),
@ -1799,10 +1812,30 @@ fn expose_function_to_host_help<'a, 'ctx, 'env>(
Some(Linkage::External),
);
let subprogram = env.new_subprogram(&size_function_name);
size_function.set_subprogram(subprogram);
let entry = context.append_basic_block(size_function, "entry");
builder.position_at_end(entry);
let func_scope = size_function.get_subprogram().unwrap();
let lexical_block = env.dibuilder.create_lexical_block(
/* scope */ func_scope.as_debug_info_scope(),
/* file */ env.compile_unit.get_file(),
/* line_no */ 0,
/* column_no */ 0,
);
let loc = env.dibuilder.create_debug_location(
env.context,
/* line */ 0,
/* column */ 0,
/* current_scope */ lexical_block.as_debug_info_scope(),
/* inlined_at */ None,
);
builder.set_current_debug_location(env.context, loc);
let size: BasicValueEnum = return_type.size_of().unwrap().into();
builder.build_return(Some(&size));
@ -1997,6 +2030,9 @@ fn make_exception_catching_wrapper<'a, 'ctx, 'env>(
env.module
.add_function(&wrapper_function_name, wrapper_function_type, None);
let subprogram = env.new_subprogram(wrapper_function_name);
wrapper_function.set_subprogram(subprogram);
// our exposed main function adheres to the C calling convention
wrapper_function.set_call_conventions(FAST_CALL_CONV);
@ -2006,6 +2042,23 @@ fn make_exception_catching_wrapper<'a, 'ctx, 'env>(
let basic_block = context.append_basic_block(wrapper_function, "entry");
builder.position_at_end(basic_block);
let func_scope = wrapper_function.get_subprogram().unwrap();
let lexical_block = env.dibuilder.create_lexical_block(
/* scope */ func_scope.as_debug_info_scope(),
/* file */ env.compile_unit.get_file(),
/* line_no */ 0,
/* column_no */ 0,
);
let loc = env.dibuilder.create_debug_location(
env.context,
/* line */ 0,
/* column */ 0,
/* current_scope */ lexical_block.as_debug_info_scope(),
/* inlined_at */ None,
);
builder.set_current_debug_location(env.context, loc);
let result = invoke_and_catch(
env,
wrapper_function,
@ -2104,8 +2157,9 @@ pub fn build_closure_caller<'a, 'ctx, 'env>(
// STEP 1: build function header
// e.g. `roc__main_1_Fx_caller`
let function_name = format!(
"{}_{}_caller",
"roc_{}_{}_caller",
def_name,
alias_symbol.ident_string(&env.interns)
);
@ -2185,7 +2239,7 @@ pub fn build_closure_caller<'a, 'ctx, 'env>(
// STEP 3: build a {} -> u64 function that gives the size of the return type
let size_function_type = env.context.i64_type().fn_type(&[], false);
let size_function_name: String = format!(
"{}_{}_size",
"roc_{}_{}_size",
def_name,
alias_symbol.ident_string(&env.interns)
);
@ -2492,8 +2546,7 @@ fn run_low_level<'a, 'ctx, 'env>(
list_contains(env, parent, elem, elem_layout, list, list_layout)
}
ListWalkRight => {
// List.walkRight : List elem, (elem -> accum -> accum), accum -> accum
ListWalk => {
debug_assert_eq!(args.len(), 3);
let (list, list_layout) = load_symbol_and_layout(env, scope, &args[0]);
@ -2502,7 +2555,28 @@ fn run_low_level<'a, 'ctx, 'env>(
let (default, default_layout) = load_symbol_and_layout(env, scope, &args[2]);
list_walk_right(
list_walk(
env,
parent,
list,
list_layout,
func,
func_layout,
default,
default_layout,
)
}
ListWalkBackwards => {
// List.walkBackwards : List elem, (elem -> accum -> accum), accum -> accum
debug_assert_eq!(args.len(), 3);
let (list, list_layout) = load_symbol_and_layout(env, scope, &args[0]);
let (func, func_layout) = load_symbol_and_layout(env, scope, &args[1]);
let (default, default_layout) = load_symbol_and_layout(env, scope, &args[2]);
list_walk_backwards(
env,
parent,
list,

View file

@ -809,9 +809,9 @@ pub fn list_sum<'a, 'ctx, 'env>(
builder.build_load(accum_alloca, "load_final_acum")
}
/// List.walkRight : List elem, (elem -> accum -> accum), accum -> accum
/// List.walk : List elem, (elem -> accum -> accum), accum -> accum
#[allow(clippy::too_many_arguments)]
pub fn list_walk_right<'a, 'ctx, 'env>(
pub fn list_walk<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
parent: FunctionValue<'ctx>,
list: BasicValueEnum<'ctx>,
@ -901,6 +901,98 @@ pub fn list_walk_right<'a, 'ctx, 'env>(
builder.build_load(accum_alloca, "load_final_acum")
}
/// List.walkBackwards : List elem, (elem -> accum -> accum), accum -> accum
#[allow(clippy::too_many_arguments)]
pub fn list_walk_backwards<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
parent: FunctionValue<'ctx>,
list: BasicValueEnum<'ctx>,
list_layout: &Layout<'a>,
func: BasicValueEnum<'ctx>,
func_layout: &Layout<'a>,
default: BasicValueEnum<'ctx>,
default_layout: &Layout<'a>,
) -> BasicValueEnum<'ctx> {
let ctx = env.context;
let builder = env.builder;
let list_wrapper = list.into_struct_value();
let len = list_len(env.builder, list_wrapper);
let accum_type = basic_type_from_layout(env.arena, ctx, default_layout, env.ptr_bytes);
let accum_alloca = builder.build_alloca(accum_type, "alloca_walk_right_accum");
builder.build_store(accum_alloca, default);
let then_block = ctx.append_basic_block(parent, "then");
let cont_block = ctx.append_basic_block(parent, "branchcont");
let condition = builder.build_int_compare(
IntPredicate::UGT,
len,
ctx.i64_type().const_zero(),
"list_non_empty",
);
builder.build_conditional_branch(condition, then_block, cont_block);
builder.position_at_end(then_block);
match (func, func_layout) {
(BasicValueEnum::PointerValue(func_ptr), Layout::FunctionPointer(_, _)) => {
let elem_layout = match list_layout {
Layout::Builtin(Builtin::List(_, layout)) => layout,
_ => unreachable!("can only fold over a list"),
};
let elem_type = basic_type_from_layout(env.arena, ctx, elem_layout, env.ptr_bytes);
let elem_ptr_type = get_ptr_type(&elem_type, AddressSpace::Generic);
let list_ptr = load_list_ptr(builder, list_wrapper, elem_ptr_type);
let walk_right_loop = |_, elem: BasicValueEnum<'ctx>| {
// load current accumulator
let current = builder.build_load(accum_alloca, "retrieve_accum");
let call_site_value =
builder.build_call(func_ptr, &[elem, current], "#walk_right_func");
// set the calling convention explicitly for this call
call_site_value.set_call_convention(crate::llvm::build::FAST_CALL_CONV);
let new_current = call_site_value
.try_as_basic_value()
.left()
.unwrap_or_else(|| panic!("LLVM error: Invalid call by pointer."));
builder.build_store(accum_alloca, new_current);
};
decrementing_elem_loop(
builder,
ctx,
parent,
list_ptr,
len,
"#index",
walk_right_loop,
);
}
_ => {
unreachable!(
"Invalid function basic value enum or layout for List.keepIf : {:?}",
(func, func_layout)
);
}
}
builder.build_unconditional_branch(cont_block);
builder.position_at_end(cont_block);
builder.build_load(accum_alloca, "load_final_acum")
}
/// List.contains : List elem, elem -> Bool
pub fn list_contains<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
@ -1537,6 +1629,7 @@ where
let current_index = builder
.build_load(index_alloca, index_name)
.into_int_value();
let next_index = builder.build_int_sub(current_index, one, "nextindex");
builder.build_store(index_alloca, next_index);
@ -1546,7 +1639,7 @@ where
// #index >= 0
let condition = builder.build_int_compare(
IntPredicate::UGE,
IntPredicate::SGE,
next_index,
ctx.i64_type().const_zero(),
"bounds_check",

View file

@ -237,11 +237,11 @@ mod gen_list {
}
#[test]
fn list_walk_right_empty_all_inline() {
fn list_walk_backwards_empty_all_inline() {
assert_evals_to!(
indoc!(
r#"
List.walkRight [0x1] (\a, b -> a + b) 0
List.walkBackwards [0x1] (\a, b -> a + b) 0
"#
),
1,
@ -255,7 +255,7 @@ mod gen_list {
empty =
[]
List.walkRight empty (\a, b -> a + b) 0
List.walkBackwards empty (\a, b -> a + b) 0
"#
),
0,
@ -264,22 +264,22 @@ mod gen_list {
}
#[test]
fn list_walk_right_with_str() {
fn list_walk_backwards_with_str() {
assert_evals_to!(
r#"List.walkRight [ "x", "y", "z" ] Str.concat "<""#,
RocStr::from("zyx<"),
r#"List.walkBackwards [ "x", "y", "z" ] Str.concat "<""#,
RocStr::from("xyz<"),
RocStr
);
assert_evals_to!(
r#"List.walkRight [ "Third", "Second", "First" ] Str.concat "Fourth""#,
RocStr::from("FirstSecondThirdFourth"),
r#"List.walkBackwards [ "Third", "Second", "First" ] Str.concat "Fourth""#,
RocStr::from("ThirdSecondFirstFourth"),
RocStr
);
}
#[test]
fn list_walk_right_with_record() {
fn list_walk_backwards_with_record() {
assert_evals_to!(
indoc!(
r#"
@ -295,7 +295,7 @@ mod gen_list {
Zero -> { r & zeroes: r.zeroes + 1 }
One -> { r & ones: r.ones + 1 }
finalCounts = List.walkRight byte acc initialCounts
finalCounts = List.walkBackwards byte acc initialCounts
finalCounts.ones * 10 + finalCounts.zeroes
"#
@ -305,6 +305,26 @@ mod gen_list {
);
}
#[test]
fn list_walk_with_str() {
assert_evals_to!(
r#"List.walk [ "x", "y", "z" ] Str.concat "<""#,
RocStr::from("zyx<"),
RocStr
);
assert_evals_to!(
r#"List.walk [ "Third", "Second", "First" ] Str.concat "Fourth""#,
RocStr::from("FirstSecondThirdFourth"),
RocStr
);
}
#[test]
fn list_walk_substraction() {
assert_evals_to!(r#"List.walk [ 1, 2 ] Num.sub 1"#, 2, i64);
}
#[test]
fn list_keep_if_empty_list_of_int() {
assert_evals_to!(

View file

@ -844,4 +844,49 @@ mod gen_records {
(bool, bool)
);
}
#[test]
fn alignment_in_record() {
assert_evals_to!(
indoc!("{ c: 32, b: if True then Red else if True then Green else Blue, a: 1 == 1 }"),
(32i64, true, 2u8),
(i64, bool, u8)
);
}
#[test]
fn blue_and_present() {
assert_evals_to!(
indoc!(
r#"
f = \r ->
when r is
{ x: Blue, y ? 3 } -> y
{ x: Red, y ? 5 } -> y
f { x: Blue, y: 7 }
"#
),
7,
i64
);
}
#[test]
fn blue_and_absent() {
assert_evals_to!(
indoc!(
r#"
f = \r ->
when r is
{ x: Blue, y ? 3 } -> y
{ x: Red, y ? 5 } -> y
f { x: Blue }
"#
),
3,
i64
);
}
}

View file

@ -758,4 +758,114 @@ mod gen_tags {
i64
);
}
#[test]
fn alignment_in_single_tag_construction() {
assert_evals_to!(indoc!("Three (1 == 1) 32"), (32i64, true), (i64, bool));
assert_evals_to!(
indoc!("Three (1 == 1) (if True then Red else if True then Green else Blue) 32"),
(32i64, true, 2u8),
(i64, bool, u8)
);
}
#[test]
fn alignment_in_single_tag_pattern_match() {
assert_evals_to!(
indoc!(
r"#
x = Three (1 == 1) 32
when x is
Three bool int ->
{ bool, int }
#"
),
(32i64, true),
(i64, bool)
);
assert_evals_to!(
indoc!(
r"#
x = Three (1 == 1) (if True then Red else if True then Green else Blue) 32
when x is
Three bool color int ->
{ bool, color, int }
#"
),
(32i64, true, 2u8),
(i64, bool, u8)
);
}
#[test]
fn alignment_in_multi_tag_construction() {
assert_evals_to!(
indoc!(
r"#
x : [ Three Bool Int, Empty ]
x = Three (1 == 1) 32
x
#"
),
(1, 32i64, true),
(i64, i64, bool)
);
assert_evals_to!(
indoc!(
r"#
x : [ Three Bool [ Red, Green, Blue ] Int, Empty ]
x = Three (1 == 1) (if True then Red else if True then Green else Blue) 32
x
#"
),
(1, 32i64, true, 2u8),
(i64, i64, bool, u8)
);
}
#[test]
fn alignment_in_multi_tag_pattern_match() {
assert_evals_to!(
indoc!(
r"#
x : [ Three Bool Int, Empty ]
x = Three (1 == 1) 32
when x is
Three bool int ->
{ bool, int }
Empty ->
{ bool: False, int: 0 }
#"
),
(32i64, true),
(i64, bool)
);
assert_evals_to!(
indoc!(
r"#
x : [ Three Bool [ Red, Green, Blue ] Int, Empty ]
x = Three (1 == 1) (if True then Red else if True then Green else Blue) 32
when x is
Three bool color int ->
{ bool, color, int }
Empty ->
{ bool: False, color: Red, int: 0 }
#"
),
(32i64, true, 2u8),
(i64, bool, u8)
);
}
}

View file

@ -0,0 +1,44 @@
[package]
name = "roc_gen_dev"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
license = "Apache-2.0"
[dependencies]
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_load = { path = "../load" }
roc_module = { path = "../module" }
roc_problem = { path = "../problem" }
roc_types = { path = "../types" }
roc_builtins = { path = "../builtins" }
roc_constrain = { path = "../constrain" }
roc_uniq = { path = "../uniq" }
roc_unify = { path = "../unify" }
roc_solve = { path = "../solve" }
roc_mono = { path = "../mono" }
im = "14" # im and im-rc should always have the same version!
im-rc = "14" # im and im-rc should always have the same version!
bumpalo = { version = "3.2", features = ["collections"] }
inlinable_string = "0.1"
target-lexicon = "0.10"
libloading = "0.6"
object = { version = "0.22", features = ["write"] }
[dev-dependencies]
roc_can = { path = "../can" }
roc_parse = { path = "../parse" }
roc_reporting = { path = "../reporting" }
roc_build = { path = "../build" }
roc_std = { path = "../../roc_std" }
pretty_assertions = "0.5.1"
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"
tokio = { version = "0.2", features = ["blocking", "fs", "sync", "rt-threaded"] }
bumpalo = { version = "3.2", features = ["collections"] }
libc = "0.2"
tempfile = "3.1.0"
itertools = "0.9"

View file

@ -0,0 +1,331 @@
use crate::{Backend, Env, Relocation};
use bumpalo::collections::Vec;
use roc_collections::all::{ImSet, MutMap, MutSet};
use roc_module::symbol::Symbol;
use roc_mono::ir::{Literal, Stmt};
use std::marker::PhantomData;
use target_lexicon::Triple;
pub mod x86_64;
pub trait CallConv<GPReg> {
fn gp_param_regs() -> &'static [GPReg];
fn gp_return_regs() -> &'static [GPReg];
fn gp_default_free_regs() -> &'static [GPReg];
// A linear scan of an array may be faster than a set technically.
// That being said, fastest would likely be a trait based on calling convention/register.
fn caller_saved_regs() -> ImSet<GPReg>;
fn callee_saved_regs() -> ImSet<GPReg>;
fn stack_pointer() -> GPReg;
fn frame_pointer() -> GPReg;
fn shadow_space_size() -> u8;
// It may be worth ignoring the red zone and keeping things simpler.
fn red_zone_size() -> u8;
}
pub trait Assembler<GPReg> {
fn add_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
fn add_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
fn cmovl_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
fn mov_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
fn mov_register64bit_immediate64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i64);
fn mov_register64bit_register64bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, src: GPReg);
fn mov_register64bit_stackoffset32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, offset: i32);
fn mov_stackoffset32bit_register64bit<'a>(buf: &mut Vec<'a, u8>, offset: i32, src: GPReg);
fn neg_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
fn ret<'a>(buf: &mut Vec<'a, u8>);
fn sub_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: GPReg, imm: i32);
fn pop_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
fn push_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: GPReg);
}
#[derive(Clone, Debug, PartialEq)]
enum SymbolStorage<GPReg> {
// These may need layout, but I am not sure.
// I think whenever a symbol would be used, we specify layout anyways.
GPRegeg(GPReg),
Stack(i32),
StackAndGPRegeg(GPReg, i32),
}
pub trait GPRegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {}
pub struct Backend64Bit<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> {
phantom_asm: PhantomData<ASM>,
phantom_cc: PhantomData<CC>,
env: &'a Env<'a>,
buf: Vec<'a, u8>,
/// leaf_function is true if the only calls this function makes are tail calls.
/// If that is the case, we can skip emitting the frame pointer and updating the stack.
leaf_function: bool,
last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
symbols_map: MutMap<Symbol, SymbolStorage<GPReg>>,
literal_map: MutMap<Symbol, Literal<'a>>,
// This should probably be smarter than a vec.
// There are certain registers we should always use first. With pushing and poping, this could get mixed.
gp_free_regs: Vec<'a, GPReg>,
// The last major thing we need is a way to decide what reg to free when all of them are full.
// Theoretically we want a basic lru cache for the currently loaded symbols.
// For now just a vec of used registers and the symbols they contain.
gp_used_regs: Vec<'a, (GPReg, Symbol)>,
stack_size: i32,
// used callee saved regs must be tracked for pushing and popping at the beginning/end of the function.
used_callee_saved_regs: MutSet<GPReg>,
}
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>> Backend<'a>
for Backend64Bit<'a, GPReg, ASM, CC>
{
fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> {
Ok(Backend64Bit {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
env,
leaf_function: true,
buf: bumpalo::vec!(in env.arena),
last_seen_map: MutMap::default(),
free_map: MutMap::default(),
symbols_map: MutMap::default(),
literal_map: MutMap::default(),
gp_free_regs: bumpalo::vec![in env.arena],
gp_used_regs: bumpalo::vec![in env.arena],
stack_size: 0,
used_callee_saved_regs: MutSet::default(),
})
}
fn env(&self) -> &'a Env<'a> {
self.env
}
fn reset(&mut self) {
self.stack_size = -(CC::red_zone_size() as i32);
self.leaf_function = true;
self.last_seen_map.clear();
self.free_map.clear();
self.symbols_map.clear();
self.buf.clear();
self.used_callee_saved_regs.clear();
self.gp_free_regs.clear();
self.gp_used_regs.clear();
self.gp_free_regs
.extend_from_slice(CC::gp_default_free_regs());
}
fn set_not_leaf_function(&mut self) {
self.leaf_function = false;
// If this is not a leaf function, it can't use the shadow space.
self.stack_size = CC::shadow_space_size() as i32 - CC::red_zone_size() as i32;
}
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>> {
&mut self.literal_map
}
fn last_seen_map(&mut self) -> &mut MutMap<Symbol, *const Stmt<'a>> {
&mut self.last_seen_map
}
fn set_free_map(&mut self, map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>) {
self.free_map = map;
}
fn free_map(&mut self) -> &mut MutMap<*const Stmt<'a>, Vec<'a, Symbol>> {
&mut self.free_map
}
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String> {
let mut out = bumpalo::vec![in self.env.arena];
if !self.leaf_function {
// I believe that this will have to move away from push and to mov to be generic across backends.
ASM::push_register64bit(&mut out, CC::frame_pointer());
ASM::mov_register64bit_register64bit(
&mut out,
CC::frame_pointer(),
CC::stack_pointer(),
);
}
// Save data in all callee saved regs.
let mut pop_order = bumpalo::vec![in self.env.arena];
for reg in &self.used_callee_saved_regs {
ASM::push_register64bit(&mut out, *reg);
pop_order.push(*reg);
}
if self.stack_size > 0 {
ASM::sub_register64bit_immediate32bit(&mut out, CC::stack_pointer(), self.stack_size);
}
// Add function body.
out.extend(&self.buf);
if self.stack_size > 0 {
ASM::add_register64bit_immediate32bit(&mut out, CC::stack_pointer(), self.stack_size);
}
// Restore data in callee saved regs.
while let Some(reg) = pop_order.pop() {
ASM::pop_register64bit(&mut out, reg);
}
if !self.leaf_function {
ASM::pop_register64bit(&mut out, CC::frame_pointer());
}
ASM::ret(&mut out);
Ok((out.into_bump_slice(), &[]))
}
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?;
let src_reg = self.load_to_reg(src)?;
ASM::mov_register64bit_register64bit(&mut self.buf, dst_reg, src_reg);
ASM::neg_register64bit(&mut self.buf, dst_reg);
ASM::cmovl_register64bit_register64bit(&mut self.buf, dst_reg, src_reg);
Ok(())
}
fn build_num_add_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String> {
let dst_reg = self.claim_gp_reg(dst)?;
let src1_reg = self.load_to_reg(src1)?;
ASM::mov_register64bit_register64bit(&mut self.buf, dst_reg, src1_reg);
let src2_reg = self.load_to_reg(src2)?;
ASM::add_register64bit_register64bit(&mut self.buf, dst_reg, src2_reg);
Ok(())
}
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String> {
match lit {
Literal::Int(x) => {
let reg = self.claim_gp_reg(sym)?;
let val = *x;
ASM::mov_register64bit_immediate64bit(&mut self.buf, reg, val);
Ok(())
}
x => Err(format!("loading literal, {:?}, is not yet implemented", x)),
}
}
fn free_symbol(&mut self, sym: &Symbol) {
self.symbols_map.remove(sym);
for i in 0..self.gp_used_regs.len() {
let (reg, saved_sym) = self.gp_used_regs[i];
if saved_sym == *sym {
self.gp_free_regs.push(reg);
self.gp_used_regs.remove(i);
break;
}
}
}
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.get(sym);
match val {
Some(SymbolStorage::GPRegeg(reg)) if *reg == CC::gp_return_regs()[0] => Ok(()),
Some(SymbolStorage::GPRegeg(reg)) => {
// If it fits in a general purpose register, just copy it over to.
// Technically this can be optimized to produce shorter instructions if less than 64bits.
ASM::mov_register64bit_register64bit(&mut self.buf, CC::gp_return_regs()[0], *reg);
Ok(())
}
Some(x) => Err(format!(
"returning symbol storage, {:?}, is not yet implemented",
x
)),
None => Err(format!("Unknown return symbol: {}", sym)),
}
}
}
/// This impl block is for ir related instructions that need backend specific information.
/// For example, loading a symbol for doing a computation.
impl<'a, GPReg: GPRegTrait, ASM: Assembler<GPReg>, CC: CallConv<GPReg>>
Backend64Bit<'a, GPReg, ASM, CC>
{
fn claim_gp_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
let reg = if !self.gp_free_regs.is_empty() {
let free_reg = self.gp_free_regs.pop().unwrap();
if CC::callee_saved_regs().contains(&free_reg) {
self.used_callee_saved_regs.insert(free_reg);
}
Ok(free_reg)
} else if !self.gp_used_regs.is_empty() {
let (reg, sym) = self.gp_used_regs.remove(0);
self.free_to_stack(&sym)?;
Ok(reg)
} else {
Err("completely out of registers".to_string())
}?;
self.gp_used_regs.push((reg, *sym));
self.symbols_map.insert(*sym, SymbolStorage::GPRegeg(reg));
Ok(reg)
}
fn load_to_reg(&mut self, sym: &Symbol) -> Result<GPReg, String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GPRegeg(reg)) => {
self.symbols_map.insert(*sym, SymbolStorage::GPRegeg(reg));
Ok(reg)
}
Some(SymbolStorage::StackAndGPRegeg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPRegeg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::Stack(offset)) => {
let reg = self.claim_gp_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGPRegeg(reg, offset));
ASM::mov_register64bit_stackoffset32bit(&mut self.buf, reg, offset as i32);
Ok(reg)
}
None => Err(format!("Unknown symbol: {}", sym)),
}
}
fn free_to_stack(&mut self, sym: &Symbol) -> Result<(), String> {
let val = self.symbols_map.remove(sym);
match val {
Some(SymbolStorage::GPRegeg(reg)) => {
let offset = self.stack_size;
self.stack_size += 8;
if let Some(size) = self.stack_size.checked_add(8) {
self.stack_size = size;
} else {
return Err(format!(
"Ran out of stack space while saving symbol: {}",
sym
));
}
ASM::mov_stackoffset32bit_register64bit(&mut self.buf, offset as i32, reg);
self.symbols_map
.insert(*sym, SymbolStorage::Stack(offset as i32));
Ok(())
}
Some(SymbolStorage::StackAndGPRegeg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
Some(SymbolStorage::Stack(offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Ok(())
}
None => Err(format!("Unknown symbol: {}", sym)),
}
}
}

View file

@ -0,0 +1,582 @@
use crate::generic64::{Assembler, CallConv, GPRegTrait};
use bumpalo::collections::Vec;
use roc_collections::all::ImSet;
// Not sure exactly how I want to represent registers.
// If we want max speed, we would likely make them structs that impl the same trait to avoid ifs.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub enum X86_64GPReg {
RAX = 0,
RCX = 1,
RDX = 2,
RBX = 3,
RSP = 4,
RBP = 5,
RSI = 6,
RDI = 7,
R8 = 8,
R9 = 9,
R10 = 10,
R11 = 11,
R12 = 12,
R13 = 13,
R14 = 14,
R15 = 15,
}
impl GPRegTrait for X86_64GPReg {}
const REX: u8 = 0x40;
const REX_W: u8 = REX + 0x8;
fn add_rm_extension(reg: X86_64GPReg, byte: u8) -> u8 {
if reg as u8 > 7 {
byte + 1
} else {
byte
}
}
fn add_opcode_extension(reg: X86_64GPReg, byte: u8) -> u8 {
add_rm_extension(reg, byte)
}
fn add_reg_extension(reg: X86_64GPReg, byte: u8) -> u8 {
if reg as u8 > 7 {
byte + 4
} else {
byte
}
}
pub struct X86_64Assembler {}
pub struct X86_64WindowsFastcall {}
pub struct X86_64SystemV {}
impl CallConv<X86_64GPReg> for X86_64SystemV {
fn gp_param_regs() -> &'static [X86_64GPReg] {
&[
X86_64GPReg::RDI,
X86_64GPReg::RSI,
X86_64GPReg::RDX,
X86_64GPReg::RCX,
X86_64GPReg::R8,
X86_64GPReg::R9,
]
}
fn gp_return_regs() -> &'static [X86_64GPReg] {
&[X86_64GPReg::RAX, X86_64GPReg::RDX]
}
fn gp_default_free_regs() -> &'static [X86_64GPReg] {
&[
// The regs we want to use first should be at the end of this vec.
// We will use pop to get which reg to use next
// Use callee saved regs last.
X86_64GPReg::RBX,
// Don't use frame pointer: X86_64GPReg::RBP,
X86_64GPReg::R12,
X86_64GPReg::R13,
X86_64GPReg::R14,
X86_64GPReg::R15,
// Use caller saved regs first.
X86_64GPReg::RAX,
X86_64GPReg::RCX,
X86_64GPReg::RDX,
// Don't use stack pionter: X86_64GPReg::RSP,
X86_64GPReg::RSI,
X86_64GPReg::RDI,
X86_64GPReg::R8,
X86_64GPReg::R9,
X86_64GPReg::R10,
X86_64GPReg::R11,
]
}
fn caller_saved_regs() -> ImSet<X86_64GPReg> {
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
ImSet::from(vec![
X86_64GPReg::RAX,
X86_64GPReg::RCX,
X86_64GPReg::RDX,
X86_64GPReg::RSP,
X86_64GPReg::RSI,
X86_64GPReg::RDI,
X86_64GPReg::R8,
X86_64GPReg::R9,
X86_64GPReg::R10,
X86_64GPReg::R11,
])
}
fn callee_saved_regs() -> ImSet<X86_64GPReg> {
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
ImSet::from(vec![
X86_64GPReg::RBX,
X86_64GPReg::RBP,
X86_64GPReg::R12,
X86_64GPReg::R13,
X86_64GPReg::R14,
X86_64GPReg::R15,
])
}
fn stack_pointer() -> X86_64GPReg {
X86_64GPReg::RSP
}
fn frame_pointer() -> X86_64GPReg {
X86_64GPReg::RBP
}
fn shadow_space_size() -> u8 {
0
}
fn red_zone_size() -> u8 {
128
}
}
impl CallConv<X86_64GPReg> for X86_64WindowsFastcall {
fn gp_param_regs() -> &'static [X86_64GPReg] {
&[
X86_64GPReg::RCX,
X86_64GPReg::RDX,
X86_64GPReg::R8,
X86_64GPReg::R9,
]
}
fn gp_return_regs() -> &'static [X86_64GPReg] {
&[X86_64GPReg::RAX]
}
fn gp_default_free_regs() -> &'static [X86_64GPReg] {
&[
// The regs we want to use first should be at the end of this vec.
// We will use pop to get which reg to use next
// Use callee saved regs last.
X86_64GPReg::RBX,
// Don't use frame pointer: X86_64GPReg::RBP,
X86_64GPReg::RSI,
// Don't use stack pionter: X86_64GPReg::RSP,
X86_64GPReg::RDI,
X86_64GPReg::R12,
X86_64GPReg::R13,
X86_64GPReg::R14,
X86_64GPReg::R15,
// Use caller saved regs first.
X86_64GPReg::RAX,
X86_64GPReg::RCX,
X86_64GPReg::RDX,
X86_64GPReg::R8,
X86_64GPReg::R9,
X86_64GPReg::R10,
X86_64GPReg::R11,
]
}
fn caller_saved_regs() -> ImSet<X86_64GPReg> {
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
ImSet::from(vec![
X86_64GPReg::RAX,
X86_64GPReg::RCX,
X86_64GPReg::RDX,
X86_64GPReg::R8,
X86_64GPReg::R9,
X86_64GPReg::R10,
X86_64GPReg::R11,
])
}
fn callee_saved_regs() -> ImSet<X86_64GPReg> {
// TODO: stop using vec! here. I was just have trouble with some errors, but it shouldn't be needed.
ImSet::from(vec![
X86_64GPReg::RBX,
X86_64GPReg::RBP,
X86_64GPReg::RSI,
X86_64GPReg::RSP,
X86_64GPReg::RDI,
X86_64GPReg::R12,
X86_64GPReg::R13,
X86_64GPReg::R14,
X86_64GPReg::R15,
])
}
fn stack_pointer() -> X86_64GPReg {
X86_64GPReg::RSP
}
fn frame_pointer() -> X86_64GPReg {
X86_64GPReg::RBP
}
fn shadow_space_size() -> u8 {
32
}
fn red_zone_size() -> u8 {
0
}
}
impl Assembler<X86_64GPReg> for X86_64Assembler {
// Below here are the functions for all of the assembly instructions.
// Their names are based on the instruction and operators combined.
// You should call `buf.reserve()` if you push or extend more than once.
// Unit tests are added at the bottom of the file to ensure correct asm generation.
// Please keep these in alphanumeric order.
/// `ADD r/m64, imm32` -> Add imm32 sign-extended to 64-bits from r/m64.
fn add_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
// This can be optimized if the immediate is 1 byte.
let rex = add_rm_extension(dst, REX_W);
let dst_mod = dst as u8 % 8;
buf.reserve(7);
buf.extend(&[rex, 0x81, 0xC0 + dst_mod]);
buf.extend(&imm.to_le_bytes());
}
/// `ADD r/m64,r64` -> Add r64 to r/m64.
fn add_register64bit_register64bit<'a>(
buf: &mut Vec<'a, u8>,
dst: X86_64GPReg,
src: X86_64GPReg,
) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(src, rex);
let dst_mod = dst as u8 % 8;
let src_mod = (src as u8 % 8) << 3;
buf.extend(&[rex, 0x01, 0xC0 + dst_mod + src_mod]);
}
/// `CMOVL r64,r/m64` -> Move if less (SF≠ OF).
fn cmovl_register64bit_register64bit<'a>(
buf: &mut Vec<'a, u8>,
dst: X86_64GPReg,
src: X86_64GPReg,
) {
let rex = add_reg_extension(dst, REX_W);
let rex = add_rm_extension(src, rex);
let dst_mod = (dst as u8 % 8) << 3;
let src_mod = src as u8 % 8;
buf.extend(&[rex, 0x0F, 0x4C, 0xC0 + dst_mod + src_mod]);
}
/// `MOV r/m64, imm32` -> Move imm32 sign extended to 64-bits to r/m64.
fn mov_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
let rex = add_rm_extension(dst, REX_W);
let dst_mod = dst as u8 % 8;
buf.reserve(7);
buf.extend(&[rex, 0xC7, 0xC0 + dst_mod]);
buf.extend(&imm.to_le_bytes());
}
/// `MOV r64, imm64` -> Move imm64 to r64.
fn mov_register64bit_immediate64bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i64) {
if imm <= i32::MAX as i64 && imm >= i32::MIN as i64 {
Self::mov_register64bit_immediate32bit(buf, dst, imm as i32)
} else {
let rex = add_opcode_extension(dst, REX_W);
let dst_mod = dst as u8 % 8;
buf.reserve(10);
buf.extend(&[rex, 0xB8 + dst_mod]);
buf.extend(&imm.to_le_bytes());
}
}
/// `MOV r/m64,r64` -> Move r64 to r/m64.
fn mov_register64bit_register64bit<'a>(
buf: &mut Vec<'a, u8>,
dst: X86_64GPReg,
src: X86_64GPReg,
) {
let rex = add_rm_extension(dst, REX_W);
let rex = add_reg_extension(src, rex);
let dst_mod = dst as u8 % 8;
let src_mod = (src as u8 % 8) << 3;
buf.extend(&[rex, 0x89, 0xC0 + dst_mod + src_mod]);
}
/// `MOV r64,r/m64` -> Move r/m64 to r64.
fn mov_register64bit_stackoffset32bit<'a>(
buf: &mut Vec<'a, u8>,
dst: X86_64GPReg,
offset: i32,
) {
// This can be optimized based on how many bytes the offset actually is.
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
// Also, this may technically be faster genration since stack operations should be so common.
let rex = add_reg_extension(dst, REX_W);
let dst_mod = (dst as u8 % 8) << 3;
buf.reserve(8);
buf.extend(&[rex, 0x8B, 0x84 + dst_mod, 0x24]);
buf.extend(&offset.to_le_bytes());
}
/// `MOV r/m64,r64` -> Move r64 to r/m64.
fn mov_stackoffset32bit_register64bit<'a>(
buf: &mut Vec<'a, u8>,
offset: i32,
src: X86_64GPReg,
) {
// This can be optimized based on how many bytes the offset actually is.
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
// Also, this may technically be faster genration since stack operations should be so common.
let rex = add_reg_extension(src, REX_W);
let src_mod = (src as u8 % 8) << 3;
buf.reserve(8);
buf.extend(&[rex, 0x89, 0x84 + src_mod, 0x24]);
buf.extend(&offset.to_le_bytes());
}
/// `NEG r/m64` -> Two's complement negate r/m64.
fn neg_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
let rex = add_rm_extension(reg, REX_W);
let reg_mod = reg as u8 % 8;
buf.extend(&[rex, 0xF7, 0xD8 + reg_mod]);
}
/// `RET` -> Near return to calling procedure.
fn ret<'a>(buf: &mut Vec<'a, u8>) {
buf.push(0xC3);
}
/// `SUB r/m64, imm32` -> Subtract imm32 sign-extended to 64-bits from r/m64.
fn sub_register64bit_immediate32bit<'a>(buf: &mut Vec<'a, u8>, dst: X86_64GPReg, imm: i32) {
// This can be optimized if the immediate is 1 byte.
let rex = add_rm_extension(dst, REX_W);
let dst_mod = dst as u8 % 8;
buf.reserve(7);
buf.extend(&[rex, 0x81, 0xE8 + dst_mod]);
buf.extend(&imm.to_le_bytes());
}
/// `POP r64` -> Pop top of stack into r64; increment stack pointer. Cannot encode 32-bit operand size.
fn pop_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
let reg_mod = reg as u8 % 8;
if reg as u8 > 7 {
let rex = add_opcode_extension(reg, REX);
buf.extend(&[rex, 0x58 + reg_mod]);
} else {
buf.push(0x58 + reg_mod);
}
}
/// `PUSH r64` -> Push r64,
fn push_register64bit<'a>(buf: &mut Vec<'a, u8>, reg: X86_64GPReg) {
let reg_mod = reg as u8 % 8;
if reg as u8 > 7 {
let rex = add_opcode_extension(reg, REX);
buf.extend(&[rex, 0x50 + reg_mod]);
} else {
buf.push(0x50 + reg_mod);
}
}
}
// When writing tests, it is a good idea to test both a number and unnumbered register.
// This is because R8-R15 often have special instruction prefixes.
#[cfg(test)]
mod tests {
use super::*;
const TEST_I32: i32 = 0x12345678;
const TEST_I64: i64 = 0x12345678_9ABCDEF0;
#[test]
fn test_add_register64bit_immediate32bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (dst, expected) in &[
(X86_64GPReg::RAX, [0x48, 0x81, 0xC0]),
(X86_64GPReg::R15, [0x49, 0x81, 0xC7]),
] {
buf.clear();
X86_64Assembler::add_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_add_register64bit_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, src), expected) in &[
((X86_64GPReg::RAX, X86_64GPReg::RAX), [0x48, 0x01, 0xC0]),
((X86_64GPReg::RAX, X86_64GPReg::R15), [0x4C, 0x01, 0xF8]),
((X86_64GPReg::R15, X86_64GPReg::RAX), [0x49, 0x01, 0xC7]),
((X86_64GPReg::R15, X86_64GPReg::R15), [0x4D, 0x01, 0xFF]),
] {
buf.clear();
X86_64Assembler::add_register64bit_register64bit(&mut buf, *dst, *src);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_cmovl_register64bit_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, src), expected) in &[
(
(X86_64GPReg::RAX, X86_64GPReg::RAX),
[0x48, 0x0F, 0x4C, 0xC0],
),
(
(X86_64GPReg::RAX, X86_64GPReg::R15),
[0x49, 0x0F, 0x4C, 0xC7],
),
(
(X86_64GPReg::R15, X86_64GPReg::RAX),
[0x4C, 0x0F, 0x4C, 0xF8],
),
(
(X86_64GPReg::R15, X86_64GPReg::R15),
[0x4D, 0x0F, 0x4C, 0xFF],
),
] {
buf.clear();
X86_64Assembler::cmovl_register64bit_register64bit(&mut buf, *dst, *src);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_mov_register64bit_immediate32bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (dst, expected) in &[
(X86_64GPReg::RAX, [0x48, 0xC7, 0xC0]),
(X86_64GPReg::R15, [0x49, 0xC7, 0xC7]),
] {
buf.clear();
X86_64Assembler::mov_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_mov_register64bit_immediate64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (dst, expected) in &[
(X86_64GPReg::RAX, [0x48, 0xB8]),
(X86_64GPReg::R15, [0x49, 0xBF]),
] {
buf.clear();
X86_64Assembler::mov_register64bit_immediate64bit(&mut buf, *dst, TEST_I64);
assert_eq!(expected, &buf[..2]);
assert_eq!(TEST_I64.to_le_bytes(), &buf[2..]);
}
for (dst, expected) in &[
(X86_64GPReg::RAX, [0x48, 0xC7, 0xC0]),
(X86_64GPReg::R15, [0x49, 0xC7, 0xC7]),
] {
buf.clear();
X86_64Assembler::mov_register64bit_immediate64bit(&mut buf, *dst, TEST_I32 as i64);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_mov_register64bit_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, src), expected) in &[
((X86_64GPReg::RAX, X86_64GPReg::RAX), [0x48, 0x89, 0xC0]),
((X86_64GPReg::RAX, X86_64GPReg::R15), [0x4C, 0x89, 0xF8]),
((X86_64GPReg::R15, X86_64GPReg::RAX), [0x49, 0x89, 0xC7]),
((X86_64GPReg::R15, X86_64GPReg::R15), [0x4D, 0x89, 0xFF]),
] {
buf.clear();
X86_64Assembler::mov_register64bit_register64bit(&mut buf, *dst, *src);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_mov_register64bit_stackoffset32bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, offset), expected) in &[
((X86_64GPReg::RAX, TEST_I32), [0x48, 0x8B, 0x84, 0x24]),
((X86_64GPReg::R15, TEST_I32), [0x4C, 0x8B, 0xBC, 0x24]),
] {
buf.clear();
X86_64Assembler::mov_register64bit_stackoffset32bit(&mut buf, *dst, *offset);
assert_eq!(expected, &buf[..4]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[4..]);
}
}
#[test]
fn test_mov_stackoffset32bit_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((offset, src), expected) in &[
((TEST_I32, X86_64GPReg::RAX), [0x48, 0x89, 0x84, 0x24]),
((TEST_I32, X86_64GPReg::R15), [0x4C, 0x89, 0xBC, 0x24]),
] {
buf.clear();
X86_64Assembler::mov_stackoffset32bit_register64bit(&mut buf, *offset, *src);
assert_eq!(expected, &buf[..4]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[4..]);
}
}
#[test]
fn test_neg_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (reg, expected) in &[
(X86_64GPReg::RAX, [0x48, 0xF7, 0xD8]),
(X86_64GPReg::R15, [0x49, 0xF7, 0xDF]),
] {
buf.clear();
X86_64Assembler::neg_register64bit(&mut buf, *reg);
assert_eq!(expected, &buf[..]);
}
}
#[test]
fn test_ret() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
X86_64Assembler::ret(&mut buf);
assert_eq!(&[0xC3], &buf[..]);
}
#[test]
fn test_sub_register64bit_immediate32bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (dst, expected) in &[
(X86_64GPReg::RAX, [0x48, 0x81, 0xE8]),
(X86_64GPReg::R15, [0x49, 0x81, 0xEF]),
] {
buf.clear();
X86_64Assembler::sub_register64bit_immediate32bit(&mut buf, *dst, TEST_I32);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_pop_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (dst, expected) in &[
(X86_64GPReg::RAX, vec![0x58]),
(X86_64GPReg::R15, vec![0x41, 0x5F]),
] {
buf.clear();
X86_64Assembler::pop_register64bit(&mut buf, *dst);
assert_eq!(&expected[..], &buf[..]);
}
}
#[test]
fn test_push_register64bit() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for (src, expected) in &[
(X86_64GPReg::RAX, vec![0x50]),
(X86_64GPReg::R15, vec![0x41, 0x57]),
] {
buf.clear();
X86_64Assembler::push_register64bit(&mut buf, *src);
assert_eq!(&expected[..], &buf[..]);
}
}
}

388
compiler/gen_dev/src/lib.rs Normal file
View file

@ -0,0 +1,388 @@
#![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)]
use bumpalo::{collections::Vec, Bump};
use roc_collections::all::{MutMap, MutSet};
use roc_module::ident::TagName;
use roc_module::low_level::LowLevel;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::ir::{CallType, Expr, JoinPointId, Literal, Proc, Stmt};
use roc_mono::layout::{Builtin, Layout};
use target_lexicon::Triple;
mod generic64;
mod object_builder;
pub use object_builder::build_module;
mod run_roc;
pub struct Env<'a> {
pub arena: &'a Bump,
pub interns: Interns,
pub exposed_to_host: MutSet<Symbol>,
pub lazy_literals: bool,
}
// INLINED_SYMBOLS is a set of all of the functions we automatically inline if seen.
const INLINED_SYMBOLS: [Symbol; 2] = [Symbol::NUM_ABS, Symbol::NUM_ADD];
// These relocations likely will need a length.
// They may even need more definition, but this should be at least good enough for how we will use elf.
#[allow(dead_code)]
enum Relocation<'a> {
LocalData { offset: u64, data: &'a [u8] },
LinkedFunction { offset: u64, name: &'a str },
LinkedData { offset: u64, name: &'a str },
}
trait Backend<'a>
where
Self: Sized,
{
/// new creates a new backend that will output to the specific Object.
fn new(env: &'a Env, target: &Triple) -> Result<Self, String>;
fn env(&self) -> &'a Env<'a>;
/// reset resets any registers or other values that may be occupied at the end of a procedure.
fn reset(&mut self);
/// finalize does any setup and cleanup that should happen around the procedure.
/// finalize does setup because things like stack size and jump locations are not know until the function is written.
/// For example, this can store the frame pionter and setup stack space.
/// finalize is run at the end of build_proc when all internal code is finalized.
fn finalize(&mut self) -> Result<(&'a [u8], &[Relocation]), String>;
/// build_proc creates a procedure and outputs it to the wrapped object writer.
fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[Relocation]), String> {
self.reset();
// TODO: let the backend know of all the arguments.
// let start = std::time::Instant::now();
self.scan_ast(&proc.body);
self.create_free_map();
// let duration = start.elapsed();
// println!("Time to calculate lifetimes: {:?}", duration);
// println!("{:?}", self.last_seen_map());
self.build_stmt(&proc.body)?;
self.finalize()
}
/// build_stmt builds a statement and outputs at the end of the buffer.
fn build_stmt(&mut self, stmt: &Stmt<'a>) -> Result<(), String> {
match stmt {
Stmt::Let(sym, expr, layout, following) => {
self.build_expr(sym, expr, layout)?;
self.free_symbols(stmt);
self.build_stmt(following)?;
Ok(())
}
Stmt::Ret(sym) => {
self.load_literal_symbols(&[*sym])?;
self.return_symbol(sym)?;
self.free_symbols(stmt);
Ok(())
}
x => Err(format!("the statement, {:?}, is not yet implemented", x)),
}
}
/// build_expr builds the expressions for the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later.
fn build_expr(
&mut self,
sym: &Symbol,
expr: &Expr<'a>,
layout: &Layout<'a>,
) -> Result<(), String> {
match expr {
Expr::Literal(lit) => {
if self.env().lazy_literals {
self.literal_map().insert(*sym, lit.clone());
} else {
self.load_literal(sym, lit)?;
}
Ok(())
}
Expr::FunctionCall {
call_type: CallType::ByName(func_sym),
args,
..
} => {
match *func_sym {
Symbol::NUM_ABS => {
// Instead of calling the function, just inline it.
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAbs, args), layout)
}
Symbol::NUM_ADD => {
// Instead of calling the function, just inline it.
self.build_expr(sym, &Expr::RunLowLevel(LowLevel::NumAdd, args), layout)
}
x => Err(format!("the function, {:?}, is not yet implemented", x)),
}
}
Expr::RunLowLevel(lowlevel, args) => {
self.build_run_low_level(sym, lowlevel, args, layout)
}
x => Err(format!("the expression, {:?}, is not yet implemented", x)),
}
}
/// build_run_low_level builds the low level opertation and outputs to the specified symbol.
/// The builder must keep track of the symbol because it may be refered to later.
fn build_run_low_level(
&mut self,
sym: &Symbol,
lowlevel: &LowLevel,
args: &'a [Symbol],
layout: &Layout<'a>,
) -> Result<(), String> {
// Now that the arguments are needed, load them if they are literals.
self.load_literal_symbols(args)?;
match lowlevel {
LowLevel::NumAbs => {
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => self.build_num_abs_i64(sym, &args[0]),
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
LowLevel::NumAdd => {
// TODO: when this is expanded to floats. deal with typecasting here, and then call correct low level method.
match layout {
Layout::Builtin(Builtin::Int64) => {
self.build_num_add_i64(sym, &args[0], &args[1])
}
x => Err(format!("layout, {:?}, not implemented yet", x)),
}
}
x => Err(format!("low level, {:?}. is not yet implemented", x)),
}
}
/// build_num_abs_i64 stores the absolute value of src into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_abs_i64(&mut self, dst: &Symbol, src: &Symbol) -> Result<(), String>;
/// build_num_add_i64 stores the absolute value of src into dst.
/// It only deals with inputs and outputs of i64 type.
fn build_num_add_i64(
&mut self,
dst: &Symbol,
src1: &Symbol,
src2: &Symbol,
) -> Result<(), String>;
/// literal_map gets the map from symbol to literal, used for lazy loading and literal folding.
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>>;
fn load_literal_symbols(&mut self, syms: &[Symbol]) -> Result<(), String> {
if self.env().lazy_literals {
for sym in syms {
if let Some(lit) = self.literal_map().remove(sym) {
self.load_literal(sym, &lit)?;
}
}
}
Ok(())
}
/// load_literal sets a symbol to be equal to a literal.
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String>;
/// return_symbol moves a symbol to the correct return location for the backend.
fn return_symbol(&mut self, sym: &Symbol) -> Result<(), String>;
/// free_symbols will free all symbols for the given statement.
fn free_symbols(&mut self, stmt: &Stmt<'a>) {
if let Some(syms) = self.free_map().remove(&(stmt as *const Stmt<'a>)) {
for sym in syms {
//println!("Freeing symbol: {:?}", sym);
self.free_symbol(&sym);
}
}
}
/// free_symbol frees any registers or stack space used to hold a symbol.
fn free_symbol(&mut self, sym: &Symbol);
/// set_last_seen sets the statement a symbol was last seen in.
fn set_last_seen(&mut self, sym: Symbol, stmt: &Stmt<'a>) {
self.last_seen_map().insert(sym, stmt);
}
/// last_seen_map gets the map from symbol to when it is last seen in the function.
fn last_seen_map(&mut self) -> &mut MutMap<Symbol, *const Stmt<'a>>;
fn create_free_map(&mut self) {
let mut free_map = MutMap::default();
let arena = self.env().arena;
for (sym, stmt) in self.last_seen_map() {
let vals = free_map
.entry(*stmt)
.or_insert_with(|| bumpalo::vec![in arena]);
vals.push(*sym);
}
self.set_free_map(free_map);
}
/// free_map gets the map statement to the symbols that are free after they run.
fn free_map(&mut self) -> &mut MutMap<*const Stmt<'a>, Vec<'a, Symbol>>;
/// set_free_map sets the free map to the given map.
fn set_free_map(&mut self, map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>);
/// set_not_leaf_function lets the backend know that it is not a leaf function.
fn set_not_leaf_function(&mut self);
/// scan_ast runs through the ast and fill the last seen map.
/// It also checks if the function is a leaf function or not.
/// This must iterate through the ast in the same way that build_stmt does. i.e. then before else.
fn scan_ast(&mut self, stmt: &Stmt<'a>) {
match stmt {
Stmt::Let(sym, expr, _, following) => {
self.set_last_seen(*sym, stmt);
match expr {
Expr::Literal(_) => {}
Expr::FunctionPointer(sym, _) => self.set_last_seen(*sym, stmt),
Expr::FunctionCall {
call_type, args, ..
} => {
for sym in *args {
self.set_last_seen(*sym, stmt);
}
match call_type {
CallType::ByName(sym) => {
// For functions that we won't inline, we should not be a leaf function.
if !INLINED_SYMBOLS.contains(sym) {
self.set_not_leaf_function();
}
}
CallType::ByPointer(sym) => {
self.set_not_leaf_function();
self.set_last_seen(*sym, stmt);
}
}
}
Expr::RunLowLevel(_, args) => {
for sym in *args {
self.set_last_seen(*sym, stmt);
}
}
Expr::ForeignCall { arguments, .. } => {
for sym in *arguments {
self.set_last_seen(*sym, stmt);
}
self.set_not_leaf_function();
}
Expr::Tag { arguments, .. } => {
for sym in *arguments {
self.set_last_seen(*sym, stmt);
}
}
Expr::Struct(syms) => {
for sym in *syms {
self.set_last_seen(*sym, stmt);
}
}
Expr::AccessAtIndex { structure, .. } => {
self.set_last_seen(*structure, stmt);
}
Expr::Array { elems, .. } => {
for sym in *elems {
self.set_last_seen(*sym, stmt);
}
}
Expr::Reuse {
symbol,
arguments,
tag_name,
..
} => {
self.set_last_seen(*symbol, stmt);
match tag_name {
TagName::Closure(sym) => {
self.set_last_seen(*sym, stmt);
}
TagName::Private(sym) => {
self.set_last_seen(*sym, stmt);
}
TagName::Global(_) => {}
}
for sym in *arguments {
self.set_last_seen(*sym, stmt);
}
}
Expr::Reset(sym) => {
self.set_last_seen(*sym, stmt);
}
Expr::EmptyArray => {}
Expr::RuntimeErrorFunction(_) => {}
}
self.scan_ast(following);
}
Stmt::Switch {
cond_symbol,
branches,
default_branch,
..
} => {
self.set_last_seen(*cond_symbol, stmt);
for (_, branch) in *branches {
self.scan_ast(branch);
}
self.scan_ast(default_branch);
}
Stmt::Cond {
cond_symbol,
branching_symbol,
pass,
fail,
..
} => {
self.set_last_seen(*cond_symbol, stmt);
self.set_last_seen(*branching_symbol, stmt);
self.scan_ast(pass);
self.scan_ast(fail);
}
Stmt::Ret(sym) => {
self.set_last_seen(*sym, stmt);
}
Stmt::Inc(sym, following) => {
self.set_last_seen(*sym, stmt);
self.scan_ast(following);
}
Stmt::Dec(sym, following) => {
self.set_last_seen(*sym, stmt);
self.scan_ast(following);
}
Stmt::Join {
parameters,
continuation,
remainder,
..
} => {
for param in *parameters {
self.set_last_seen(param.symbol, stmt);
}
self.scan_ast(continuation);
self.scan_ast(remainder);
}
Stmt::Jump(JoinPointId(sym), symbols) => {
self.set_last_seen(*sym, stmt);
for sym in *symbols {
self.set_last_seen(*sym, stmt);
}
}
Stmt::RuntimeError(_) => {}
}
}
}

View file

@ -0,0 +1,154 @@
use crate::generic64::{x86_64, Backend64Bit};
use crate::{Backend, Env, Relocation, INLINED_SYMBOLS};
use bumpalo::collections::Vec;
use object::write;
use object::write::{Object, StandardSection, Symbol, SymbolSection};
use object::{
Architecture, BinaryFormat, Endianness, RelocationEncoding, RelocationKind, SectionKind,
SymbolFlags, SymbolKind, SymbolScope,
};
use roc_collections::all::MutMap;
use roc_module::symbol;
use roc_mono::ir::Proc;
use roc_mono::layout::Layout;
use target_lexicon::{Architecture as TargetArch, BinaryFormat as TargetBF, Triple};
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// build_module is the high level builder/delegator.
/// It takes the request to build a module and output the object file for the module.
pub fn build_module<'a>(
env: &'a Env,
target: &Triple,
procedures: MutMap<(symbol::Symbol, Layout<'a>), Proc<'a>>,
) -> Result<Object, String> {
let (mut output, mut backend) = match target {
Triple {
architecture: TargetArch::X86_64,
binary_format: TargetBF::Elf,
..
} => {
let backend: Backend64Bit<
x86_64::X86_64GPReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
> = Backend::new(env, target)?;
Ok((
Object::new(BinaryFormat::Elf, Architecture::X86_64, Endianness::Little),
backend,
))
}
x => Err(format! {
"the target, {:?}, is not yet implemented",
x}),
}?;
let text = output.section_id(StandardSection::Text);
let data_section = output.section_id(StandardSection::Data);
let comment = output.add_section(vec![], b"comment".to_vec(), SectionKind::OtherString);
output.append_section_data(
comment,
format!("\0roc dev backend version {} \0", VERSION).as_bytes(),
1,
);
// Setup layout_ids for procedure calls.
let mut layout_ids = roc_mono::layout::LayoutIds::default();
let mut procs = Vec::with_capacity_in(procedures.len(), env.arena);
for ((sym, layout), proc) in procedures {
// This is temporary until we support passing args to functions.
if INLINED_SYMBOLS.contains(&sym) {
continue;
}
let fn_name = layout_ids
.get(sym, &layout)
.to_symbol_string(sym, &env.interns);
let proc_symbol = Symbol {
name: fn_name.as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
// TODO: Depending on whether we are building a static or dynamic lib, this should change.
// We should use Dynamic -> anyone, Linkage -> static link, Compilation -> this module only.
scope: if env.exposed_to_host.contains(&sym) {
SymbolScope::Dynamic
} else {
SymbolScope::Linkage
},
weak: false,
section: SymbolSection::Section(text),
flags: SymbolFlags::None,
};
let proc_id = output.add_symbol(proc_symbol);
procs.push((fn_name, proc_id, proc));
}
// Build procedures.
for (fn_name, proc_id, proc) in procs {
let mut local_data_index = 0;
let (proc_data, relocations) = backend.build_proc(proc)?;
let proc_offset = output.add_symbol_data(proc_id, text, proc_data, 16);
for reloc in relocations {
let elfreloc = match reloc {
Relocation::LocalData { offset, data } => {
let data_symbol = write::Symbol {
name: format!("{}.data{}", fn_name, local_data_index)
.as_bytes()
.to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Data,
scope: SymbolScope::Compilation,
weak: false,
section: write::SymbolSection::Section(data_section),
flags: SymbolFlags::None,
};
local_data_index += 1;
let data_id = output.add_symbol(data_symbol);
output.add_symbol_data(data_id, data_section, data, 4);
write::Relocation {
offset: offset + proc_offset,
size: 32,
kind: RelocationKind::Relative,
encoding: RelocationEncoding::Generic,
symbol: data_id,
addend: -4,
}
}
Relocation::LinkedData { offset, name } => {
if let Some(sym_id) = output.symbol_id(name.as_bytes()) {
write::Relocation {
offset: offset + proc_offset,
size: 32,
kind: RelocationKind::GotRelative,
encoding: RelocationEncoding::Generic,
symbol: sym_id,
addend: -4,
}
} else {
return Err(format!("failed to find symbol for {:?}", name));
}
}
Relocation::LinkedFunction { offset, name } => {
if let Some(sym_id) = output.symbol_id(name.as_bytes()) {
write::Relocation {
offset: offset + proc_offset,
size: 32,
kind: RelocationKind::PltRelative,
encoding: RelocationEncoding::Generic,
symbol: sym_id,
addend: -4,
}
} else {
return Err(format!("failed to find symbol for {:?}", name));
}
}
};
output
.add_relocation(text, elfreloc)
.map_err(|e| format!("{:?}", e))?;
}
}
Ok(output)
}

View file

@ -0,0 +1,31 @@
#[macro_export]
/// run_jit_function_raw runs an unwrapped jit function.
/// The function could throw an exception and break things, or worse, it could not throw an exception and break things.
/// This functions is generally a bad idea with an untrused backend, but is being used for now for development purposes.
macro_rules! run_jit_function_raw {
($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr) => {{
let v: std::vec::Vec<roc_problem::can::Problem> = std::vec::Vec::new();
run_jit_function_raw!($lib, $main_fn_name, $ty, $transform, v)
}};
($lib: expr, $main_fn_name: expr, $ty:ty, $transform:expr, $errors:expr) => {{
unsafe {
let main: libloading::Symbol<unsafe extern "C" fn() -> $ty> = $lib
.get($main_fn_name.as_bytes())
.ok()
.ok_or(format!("Unable to JIT compile `{}`", $main_fn_name))
.expect("errored");
let result = main();
assert_eq!(
$errors,
std::vec::Vec::new(),
"Encountered errors: {:?}",
$errors
);
$transform(result)
}
}};
}

View file

@ -0,0 +1,802 @@
#[macro_use]
extern crate pretty_assertions;
#[macro_use]
extern crate indoc;
extern crate bumpalo;
extern crate libc;
#[macro_use]
mod helpers;
#[cfg(all(test, target_os = "linux", target_arch = "x86_64"))]
mod gen_num {
//use roc_std::RocOrder;
#[test]
fn i64_values() {
assert_evals_to!("0", 0, i64);
assert_evals_to!("-0", 0, i64);
assert_evals_to!("-1", -1, i64);
assert_evals_to!("1", 1, i64);
assert_evals_to!("9_000_000_000_000", 9_000_000_000_000, i64);
assert_evals_to!("-9_000_000_000_000", -9_000_000_000_000, i64);
assert_evals_to!("0b1010", 0b1010, i64);
assert_evals_to!("0o17", 0o17, i64);
assert_evals_to!("0x1000_0000_0000_0000", 0x1000_0000_0000_0000, i64);
}
#[test]
fn gen_add_i64() {
assert_evals_to!(
indoc!(
r#"
1 + 2 + 3
"#
),
6,
i64
);
}
#[test]
fn i64_force_stack() {
// This claims 33 registers. One more than Arm and RISC-V, and many more than x86-64.
assert_evals_to!(
indoc!(
r#"
a = 0
b = 1
c = 2
d = 3
e = 4
f = 5
g = 6
h = 7
i = 8
j = 9
k = 10
l = 11
m = 12
n = 13
o = 14
p = 15
q = 16
r = 17
s = 18
t = 19
u = 20
v = 21
w = 22
x = 23
y = 24
z = 25
aa = 26
ab = 27
ac = 28
ad = 29
ae = 30
af = 31
ag = 32
# This can't be one line because it causes a stack overflow in the frontend :(
tmp = a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q
tmp + r + s + t + u + v + w + x + y + z + aa + ab + ac + ad + ae + af + ag
"#
),
528,
i64
);
}
#[test]
fn i64_abs() {
assert_evals_to!("Num.abs -6", 6, i64);
assert_evals_to!("Num.abs 7", 7, i64);
assert_evals_to!("Num.abs 0", 0, i64);
assert_evals_to!("Num.abs -0", 0, i64);
assert_evals_to!("Num.abs -1", 1, i64);
assert_evals_to!("Num.abs 1", 1, i64);
assert_evals_to!("Num.abs 9_000_000_000_000", 9_000_000_000_000, i64);
assert_evals_to!("Num.abs -9_000_000_000_000", 9_000_000_000_000, i64);
}
/*
#[test]
fn f64_sqrt() {
// FIXME this works with normal types, but fails when checking uniqueness types
assert_evals_to!(
indoc!(
r#"
when Num.sqrt 100 is
Ok val -> val
Err _ -> -1
"#
),
10.0,
f64
);
}
#[test]
fn f64_round_old() {
assert_evals_to!("Num.round 3.6", 4, i64);
}
#[test]
fn f64_abs() {
assert_evals_to!("Num.abs -4.7", 4.7, f64);
assert_evals_to!("Num.abs 5.8", 5.8, f64);
}
#[test]
fn gen_if_fn() {
assert_evals_to!(
indoc!(
r#"
limitedNegate = \num ->
x =
if num == 1 then
-1
else if num == -1 then
1
else
num
x
limitedNegate 1
"#
),
-1,
i64
);
assert_evals_to!(
indoc!(
r#"
limitedNegate = \num ->
if num == 1 then
-1
else if num == -1 then
1
else
num
limitedNegate 1
"#
),
-1,
i64
);
}
#[test]
fn gen_float_eq() {
assert_evals_to!(
indoc!(
r#"
1.0 == 1.0
"#
),
true,
bool
);
}
#[test]
fn gen_add_f64() {
assert_evals_to!(
indoc!(
r#"
1.1 + 2.4 + 3
"#
),
6.5,
f64
);
}
#[test]
fn gen_wrap_add_nums() {
assert_evals_to!(
indoc!(
r#"
add2 = \num1, num2 -> num1 + num2
add2 4 5
"#
),
9,
i64
);
}
#[test]
fn gen_div_f64() {
// FIXME this works with normal types, but fails when checking uniqueness types
assert_evals_to!(
indoc!(
r#"
when 48 / 2 is
Ok val -> val
Err _ -> -1
"#
),
24.0,
f64
);
}
#[test]
fn gen_int_eq() {
assert_evals_to!(
indoc!(
r#"
4 == 4
"#
),
true,
bool
);
}
#[test]
fn gen_int_neq() {
assert_evals_to!(
indoc!(
r#"
4 != 5
"#
),
true,
bool
);
}
#[test]
fn gen_wrap_int_neq() {
assert_evals_to!(
indoc!(
r#"
wrappedNotEq : a, a -> Bool
wrappedNotEq = \num1, num2 ->
num1 != num2
wrappedNotEq 2 3
"#
),
true,
bool
);
}
#[test]
fn gen_sub_f64() {
assert_evals_to!(
indoc!(
r#"
1.5 - 2.4 - 3
"#
),
-3.9,
f64
);
}
#[test]
fn gen_sub_i64() {
assert_evals_to!(
indoc!(
r#"
1 - 2 - 3
"#
),
-4,
i64
);
}
#[test]
fn gen_mul_i64() {
assert_evals_to!(
indoc!(
r#"
2 * 4 * 6
"#
),
48,
i64
);
}
#[test]
fn gen_div_i64() {
assert_evals_to!(
indoc!(
r#"
when 1000 // 10 is
Ok val -> val
Err _ -> -1
"#
),
100,
i64
);
}
#[test]
fn gen_div_by_zero_i64() {
assert_evals_to!(
indoc!(
r#"
when 1000 // 0 is
Err DivByZero -> 99
_ -> -24
"#
),
99,
i64
);
}
#[test]
fn gen_rem_i64() {
assert_evals_to!(
indoc!(
r#"
when Num.rem 8 3 is
Ok val -> val
Err _ -> -1
"#
),
2,
i64
);
}
#[test]
fn gen_rem_div_by_zero_i64() {
assert_evals_to!(
indoc!(
r#"
when Num.rem 8 0 is
Err DivByZero -> 4
Ok _ -> -23
"#
),
4,
i64
);
}
#[test]
fn gen_is_zero_i64() {
assert_evals_to!("Num.isZero 0", true, bool);
assert_evals_to!("Num.isZero 1", false, bool);
}
#[test]
fn gen_is_positive_i64() {
assert_evals_to!("Num.isPositive 0", false, bool);
assert_evals_to!("Num.isPositive 1", true, bool);
assert_evals_to!("Num.isPositive -5", false, bool);
}
#[test]
fn gen_is_negative_i64() {
assert_evals_to!("Num.isNegative 0", false, bool);
assert_evals_to!("Num.isNegative 3", false, bool);
assert_evals_to!("Num.isNegative -2", true, bool);
}
#[test]
fn gen_is_positive_f64() {
assert_evals_to!("Num.isPositive 0.0", false, bool);
assert_evals_to!("Num.isPositive 4.7", true, bool);
assert_evals_to!("Num.isPositive -8.5", false, bool);
}
#[test]
fn gen_is_negative_f64() {
assert_evals_to!("Num.isNegative 0.0", false, bool);
assert_evals_to!("Num.isNegative 9.9", false, bool);
assert_evals_to!("Num.isNegative -4.4", true, bool);
}
#[test]
fn gen_is_zero_f64() {
assert_evals_to!("Num.isZero 0", true, bool);
assert_evals_to!("Num.isZero 0_0", true, bool);
assert_evals_to!("Num.isZero 0.0", true, bool);
assert_evals_to!("Num.isZero 1", false, bool);
}
#[test]
fn gen_is_odd() {
assert_evals_to!("Num.isOdd 4", false, bool);
assert_evals_to!("Num.isOdd 5", true, bool);
}
#[test]
fn gen_is_even() {
assert_evals_to!("Num.isEven 6", true, bool);
assert_evals_to!("Num.isEven 7", false, bool);
}
#[test]
fn sin() {
assert_evals_to!("Num.sin 0", 0.0, f64);
assert_evals_to!("Num.sin 1.41421356237", 0.9877659459922529, f64);
}
#[test]
fn cos() {
assert_evals_to!("Num.cos 0", 1.0, f64);
assert_evals_to!("Num.cos 3.14159265359", -1.0, f64);
}
#[test]
fn tan() {
assert_evals_to!("Num.tan 0", 0.0, f64);
assert_evals_to!("Num.tan 1", 1.557407724654902, f64);
}
#[test]
fn lt_i64() {
assert_evals_to!("1 < 2", true, bool);
assert_evals_to!("1 < 1", false, bool);
assert_evals_to!("2 < 1", false, bool);
assert_evals_to!("0 < 0", false, bool);
}
#[test]
fn lte_i64() {
assert_evals_to!("1 <= 1", true, bool);
assert_evals_to!("2 <= 1", false, bool);
assert_evals_to!("1 <= 2", true, bool);
assert_evals_to!("0 <= 0", true, bool);
}
#[test]
fn gt_i64() {
assert_evals_to!("2 > 1", true, bool);
assert_evals_to!("2 > 2", false, bool);
assert_evals_to!("1 > 1", false, bool);
assert_evals_to!("0 > 0", false, bool);
}
#[test]
fn gte_i64() {
assert_evals_to!("1 >= 1", true, bool);
assert_evals_to!("1 >= 2", false, bool);
assert_evals_to!("2 >= 1", true, bool);
assert_evals_to!("0 >= 0", true, bool);
}
#[test]
fn lt_f64() {
assert_evals_to!("1.1 < 1.2", true, bool);
assert_evals_to!("1.1 < 1.1", false, bool);
assert_evals_to!("1.2 < 1.1", false, bool);
assert_evals_to!("0.0 < 0.0", false, bool);
}
#[test]
fn lte_f64() {
assert_evals_to!("1.1 <= 1.1", true, bool);
assert_evals_to!("1.2 <= 1.1", false, bool);
assert_evals_to!("1.1 <= 1.2", true, bool);
assert_evals_to!("0.0 <= 0.0", true, bool);
}
#[test]
fn gt_f64() {
assert_evals_to!("2.2 > 1.1", true, bool);
assert_evals_to!("2.2 > 2.2", false, bool);
assert_evals_to!("1.1 > 2.2", false, bool);
assert_evals_to!("0.0 > 0.0", false, bool);
}
#[test]
fn gte_f64() {
assert_evals_to!("1.1 >= 1.1", true, bool);
assert_evals_to!("1.1 >= 1.2", false, bool);
assert_evals_to!("1.2 >= 1.1", true, bool);
assert_evals_to!("0.0 >= 0.0", true, bool);
}
#[test]
fn gen_order_of_arithmetic_ops() {
assert_evals_to!(
indoc!(
r#"
1 + 3 * 7 - 2
"#
),
20,
i64
);
}
#[test]
fn gen_order_of_arithmetic_ops_complex_float() {
assert_evals_to!(
indoc!(
r#"
3 - 48 * 2.0
"#
),
-93.0,
f64
);
}
#[test]
fn if_guard_bind_variable_false() {
assert_evals_to!(
indoc!(
r#"
wrapper = \{} ->
when 10 is
x if x == 5 -> 0
_ -> 42
wrapper {}
"#
),
42,
i64
);
}
#[test]
fn if_guard_bind_variable_true() {
assert_evals_to!(
indoc!(
r#"
wrapper = \{} ->
when 10 is
x if x == 10 -> 42
_ -> 0
wrapper {}
"#
),
42,
i64
);
}
#[test]
fn tail_call_elimination() {
assert_evals_to!(
indoc!(
r#"
sum = \n, accum ->
when n is
0 -> accum
_ -> sum (n - 1) (n + accum)
sum 1_000_000 0
"#
),
500000500000,
i64
);
}
#[test]
fn int_negate() {
assert_evals_to!("Num.neg 123", -123, i64);
}
#[test]
fn gen_wrap_int_neg() {
assert_evals_to!(
indoc!(
r#"
wrappedNeg = \num -> -num
wrappedNeg 3
"#
),
-3,
i64
);
}
#[test]
fn gen_basic_fn() {
assert_evals_to!(
indoc!(
r#"
always42 : Num.Num Num.Integer -> Num.Num Num.Integer
always42 = \_ -> 42
always42 5
"#
),
42,
i64
);
}
#[test]
fn int_to_float() {
assert_evals_to!("Num.toFloat 0x9", 9.0, f64);
}
#[test]
fn num_to_float() {
assert_evals_to!("Num.toFloat 9", 9.0, f64);
}
#[test]
fn float_to_float() {
assert_evals_to!("Num.toFloat 0.5", 0.5, f64);
}
#[test]
fn int_compare() {
assert_evals_to!("Num.compare 0 1", RocOrder::Lt, RocOrder);
assert_evals_to!("Num.compare 1 1", RocOrder::Eq, RocOrder);
assert_evals_to!("Num.compare 1 0", RocOrder::Gt, RocOrder);
}
#[test]
fn float_compare() {
assert_evals_to!("Num.compare 0.01 3.14", RocOrder::Lt, RocOrder);
assert_evals_to!("Num.compare 3.14 3.14", RocOrder::Eq, RocOrder);
assert_evals_to!("Num.compare 3.14 0.01", RocOrder::Gt, RocOrder);
}
#[test]
fn pow() {
assert_evals_to!("Num.pow 2.0 2.0", 4.0, f64);
}
#[test]
fn ceiling() {
assert_evals_to!("Num.ceiling 1.1", 2, i64);
}
#[test]
fn floor() {
assert_evals_to!("Num.floor 1.9", 1, i64);
}
#[test]
fn pow_int() {
assert_evals_to!("Num.powInt 2 3", 8, i64);
}
#[test]
fn atan() {
assert_evals_to!("Num.atan 10", 1.4711276743037347, f64);
}
// #[test]
// #[should_panic(expected = r#"Roc failed with message: "integer addition overflowed!"#)]
// fn int_overflow() {
// assert_evals_to!(
// indoc!(
// r#"
// 9_223_372_036_854_775_807 + 1
// "#
// ),
// 0,
// i64
// );
// }
#[test]
fn int_add_checked() {
assert_evals_to!(
indoc!(
r#"
when Num.addChecked 1 2 is
Ok v -> v
_ -> -1
"#
),
3,
i64
);
assert_evals_to!(
indoc!(
r#"
when Num.addChecked 9_223_372_036_854_775_807 1 is
Err Overflow -> -1
Ok v -> v
"#
),
-1,
i64
);
}
#[test]
fn int_add_wrap() {
assert_evals_to!(
indoc!(
r#"
Num.addWrap 9_223_372_036_854_775_807 1
"#
),
std::i64::MIN,
i64
);
}
#[test]
fn float_add_checked_pass() {
assert_evals_to!(
indoc!(
r#"
when Num.addChecked 1.0 0.0 is
Ok v -> v
Err Overflow -> -1.0
"#
),
1.0,
f64
);
}
#[test]
fn float_add_checked_fail() {
assert_evals_to!(
indoc!(
r#"
when Num.addChecked 1.7976931348623157e308 1.7976931348623157e308 is
Err Overflow -> -1
Ok v -> v
"#
),
-1.0,
f64
);
}
// #[test]
// #[should_panic(expected = r#"Roc failed with message: "float addition overflowed!"#)]
// fn float_overflow() {
// assert_evals_to!(
// indoc!(
// r#"
// 1.7976931348623157e308 + 1.7976931348623157e308
// "#
// ),
// 0.0,
// f64
// );
// }
#[test]
fn num_max_int() {
assert_evals_to!(
indoc!(
r#"
Num.maxInt
"#
),
i64::MAX,
i64
);
}
#[test]
fn num_min_int() {
assert_evals_to!(
indoc!(
r#"
Num.minInt
"#
),
i64::MIN,
i64
);
}
*/
}

View file

@ -0,0 +1,233 @@
use libloading::Library;
use roc_build::link::{link, LinkType};
use roc_collections::all::MutMap;
use tempfile::tempdir;
fn promote_expr_to_module(src: &str) -> String {
let mut buffer = String::from("app \"test\" provides [ main ] to \"./platform\"\n\nmain =\n");
for line in src.lines() {
// indent the body!
buffer.push_str(" ");
buffer.push_str(line);
buffer.push('\n');
}
buffer
}
pub fn helper<'a>(
arena: &'a bumpalo::Bump,
src: &str,
stdlib: roc_builtins::std::StdLib,
_leak: bool,
lazy_literals: bool,
) -> (String, Vec<roc_problem::can::Problem>, Library) {
use std::path::{Path, PathBuf};
//let stdlib_mode = stdlib.mode;
let dir = tempdir().unwrap();
let filename = PathBuf::from("Test.roc");
let src_dir = Path::new("fake/test/path");
let app_o_file = dir.path().join("app.o");
let module_src;
let temp;
if src.starts_with("app") {
// this is already a module
module_src = src;
} else {
// this is an expression, promote it to a module
temp = promote_expr_to_module(src);
module_src = &temp;
}
let exposed_types = MutMap::default();
let loaded = roc_load::file::load_and_monomorphize_from_str(
arena,
filename,
&module_src,
stdlib,
src_dir,
exposed_types,
);
let mut loaded = loaded.expect("failed to load module");
use roc_load::file::MonomorphizedModule;
let MonomorphizedModule {
procedures,
interns,
exposed_to_host,
..
} = loaded;
/*
println!("=========== Procedures ==========");
println!("{:?}", procedures);
println!("=================================\n");
println!("=========== Interns ==========");
println!("{:?}", interns);
println!("=================================\n");
println!("=========== Exposed ==========");
println!("{:?}", exposed_to_host);
println!("=================================\n");
*/
debug_assert_eq!(exposed_to_host.len(), 1);
let main_fn_symbol = exposed_to_host.keys().copied().nth(0).unwrap();
let (_, main_fn_layout) = procedures
.keys()
.find(|(s, _)| *s == main_fn_symbol)
.unwrap()
.clone();
let mut layout_ids = roc_mono::layout::LayoutIds::default();
let main_fn_name = layout_ids
.get(main_fn_symbol, &main_fn_layout)
.to_symbol_string(main_fn_symbol, &interns);
let mut lines = Vec::new();
// errors whose reporting we delay (so we can see that code gen generates runtime errors)
let mut delayed_errors = Vec::new();
for (home, (module_path, src)) in loaded.sources {
use roc_reporting::report::{
can_problem, mono_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE,
};
let can_problems = loaded.can_problems.remove(&home).unwrap_or_default();
let type_problems = loaded.type_problems.remove(&home).unwrap_or_default();
let mono_problems = loaded.mono_problems.remove(&home).unwrap_or_default();
let error_count = can_problems.len() + type_problems.len() + mono_problems.len();
if error_count == 0 {
continue;
}
let src_lines: Vec<&str> = src.split('\n').collect();
let palette = DEFAULT_PALETTE;
// Report parsing and canonicalization problems
let alloc = RocDocAllocator::new(&src_lines, home, &interns);
use roc_problem::can::Problem::*;
for problem in can_problems.into_iter() {
// Ignore "unused" problems
match problem {
UnusedDef(_, _) | UnusedArgument(_, _, _) | UnusedImport(_, _) => {
delayed_errors.push(problem);
continue;
}
_ => {
let report = can_problem(&alloc, module_path.clone(), problem);
let mut buf = String::new();
report.render_color_terminal(&mut buf, &alloc, &palette);
lines.push(buf);
}
}
}
for problem in type_problems {
let report = type_problem(&alloc, module_path.clone(), problem);
let mut buf = String::new();
report.render_color_terminal(&mut buf, &alloc, &palette);
lines.push(buf);
}
for problem in mono_problems {
let report = mono_problem(&alloc, module_path.clone(), problem);
let mut buf = String::new();
report.render_color_terminal(&mut buf, &alloc, &palette);
lines.push(buf);
}
}
if !lines.is_empty() {
println!("{}", lines.join("\n"));
assert_eq!(0, 1, "Mistakes were made");
}
let env = roc_gen_dev::Env {
arena,
interns,
exposed_to_host: exposed_to_host.keys().copied().collect(),
lazy_literals,
};
let target = target_lexicon::Triple::host();
let module_object =
roc_gen_dev::build_module(&env, &target, procedures).expect("failed to compile module");
let module_out = module_object
.write()
.expect("failed to build output object");
std::fs::write(&app_o_file, module_out).expect("failed to write object to file");
let (mut child, dylib_path) = link(
&target,
app_o_file.clone(),
&[app_o_file.to_str().unwrap()],
LinkType::Dylib,
)
.expect("failed to link dynamic library");
child.wait().unwrap();
// Load the dylib
let path = dylib_path.as_path().to_str().unwrap();
// std::fs::copy(&app_o_file, "/tmp/app.o").unwrap();
// std::fs::copy(&path, "/tmp/libapp.so").unwrap();
let lib = Library::new(path).expect("failed to load shared library");
(main_fn_name, delayed_errors, lib)
}
#[macro_export]
macro_rules! assert_evals_to {
($src:expr, $expected:expr, $ty:ty) => {{
assert_evals_to!($src, $expected, $ty, (|val| val));
}};
($src:expr, $expected:expr, $ty:ty, $transform:expr) => {
// Same as above, except with an additional transformation argument.
{
assert_evals_to!($src, $expected, $ty, $transform, true);
}
};
($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr) => {
// Run both with and without lazy literal optimization.
{
assert_evals_to!($src, $expected, $ty, $transform, $leak, false);
}
{
assert_evals_to!($src, $expected, $ty, $transform, $leak, true);
}
};
($src:expr, $expected:expr, $ty:ty, $transform:expr, $leak:expr, $lazy_literals:expr) => {
use bumpalo::Bump;
use roc_gen_dev::run_jit_function_raw;
let stdlib = roc_builtins::std::standard_stdlib();
let arena = Bump::new();
let (main_fn_name, errors, lib) =
$crate::helpers::eval::helper(&arena, $src, stdlib, $leak, $lazy_literals);
let transform = |success| {
let expected = $expected;
let given = $transform(success);
assert_eq!(&given, &expected);
};
run_jit_function_raw!(lib, main_fn_name, $ty, transform, errors)
};
}

View file

@ -0,0 +1,44 @@
extern crate bumpalo;
#[macro_use]
pub mod eval;
/// Used in the with_larger_debug_stack() function, for tests that otherwise
/// run out of stack space in debug builds (but don't in --release builds)
#[allow(dead_code)]
const EXPANDED_STACK_SIZE: usize = 8 * 1024 * 1024;
/// Without this, some tests pass in `cargo test --release` but fail without
/// the --release flag because they run out of stack space. This increases
/// stack size for debug builds only, while leaving the stack space at the default
/// amount for release builds.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
std::thread::Builder::new()
.stack_size(EXPANDED_STACK_SIZE)
.spawn(run_test)
.expect("Error while spawning expanded dev stack size thread")
.join()
.expect("Error while joining expanded dev stack size thread")
}
/// In --release builds, don't increase the stack size. Run the test normally.
/// This way, we find out if any of our tests are blowing the stack even after
/// optimizations in release builds.
#[allow(dead_code)]
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
run_test()
}

View file

@ -2796,7 +2796,7 @@ fn exposed_from_import(entry: &ImportsEntry<'_>) -> (ModuleName, Vec<Ident>) {
(module_name.as_str().into(), exposed)
}
Package(_package_name, _exposes) => {
Package(_package_name, _module_name, _exposes) => {
todo!("TODO support exposing package-qualified module names.");
}

View file

@ -22,7 +22,8 @@ pub enum LowLevel {
ListJoin,
ListMap,
ListKeepIf,
ListWalkRight,
ListWalk,
ListWalkBackwards,
ListSum,
NumAdd,
NumAddWrap,

View file

@ -689,18 +689,18 @@ define_builtins! {
5 LIST_APPEND: "append"
6 LIST_MAP: "map"
7 LIST_LEN: "len"
8 LIST_FOLDL: "foldl"
9 LIST_WALK_RIGHT: "walkRight"
10 LIST_CONCAT: "concat"
11 LIST_FIRST: "first"
12 LIST_SINGLE: "single"
13 LIST_REPEAT: "repeat"
14 LIST_REVERSE: "reverse"
15 LIST_PREPEND: "prepend"
16 LIST_JOIN: "join"
17 LIST_KEEP_IF: "keepIf"
18 LIST_CONTAINS: "contains"
19 LIST_SUM: "sum"
8 LIST_WALK_BACKWARDS: "walkBackwards"
9 LIST_CONCAT: "concat"
10 LIST_FIRST: "first"
11 LIST_SINGLE: "single"
12 LIST_REPEAT: "repeat"
13 LIST_REVERSE: "reverse"
14 LIST_PREPEND: "prepend"
15 LIST_JOIN: "join"
16 LIST_KEEP_IF: "keepIf"
17 LIST_CONTAINS: "contains"
18 LIST_SUM: "sum"
19 LIST_WALK: "walk"
}
5 RESULT: "Result" => {
0 RESULT_RESULT: "Result" imported // the Result.Result type alias

View file

@ -535,7 +535,8 @@ pub fn lowlevel_borrow_signature(arena: &Bump, op: LowLevel) -> &[bool] {
ListMap => arena.alloc_slice_copy(&[owned, irrelevant]),
ListKeepIf => arena.alloc_slice_copy(&[owned, irrelevant]),
ListContains => arena.alloc_slice_copy(&[borrowed, irrelevant]),
ListWalkRight => arena.alloc_slice_copy(&[borrowed, irrelevant, owned]),
ListWalk => arena.alloc_slice_copy(&[borrowed, irrelevant, owned]),
ListWalkBackwards => arena.alloc_slice_copy(&[borrowed, irrelevant, owned]),
ListSum => arena.alloc_slice_copy(&[borrowed]),
Eq | NotEq | And | Or | NumAdd | NumAddWrap | NumAddChecked | NumSub | NumMul | NumGt

View file

@ -412,7 +412,7 @@ fn test_at_path<'a>(selected_path: &Path, branch: &Branch<'a>, all_tests: &mut V
arguments.push((Pattern::Underscore, destruct.layout.clone()));
}
DestructType::Optional(_expr) => {
arguments.push((Pattern::Underscore, destruct.layout.clone()));
// do nothing
}
}
}
@ -540,11 +540,15 @@ fn to_relevant_branch_help<'a>(
..
} => {
debug_assert!(test_name == &TagName::Global(RECORD_TAG_NAME.into()));
let sub_positions = destructs.into_iter().enumerate().map(|(index, destruct)| {
let sub_positions = destructs
.into_iter()
.filter(|destruct| !matches!(destruct.typ, DestructType::Optional(_)))
.enumerate()
.map(|(index, destruct)| {
let pattern = match destruct.typ {
DestructType::Guard(guard) => guard.clone(),
DestructType::Required => Pattern::Underscore,
DestructType::Optional(_expr) => Pattern::Underscore,
DestructType::Optional(_expr) => unreachable!("because of the filter"),
};
(

View file

@ -907,7 +907,13 @@ where
if PRETTY_PRINT_IR_SYMBOLS {
alloc.text(format!("{:?}", symbol))
} else {
alloc.text(format!("{}", symbol))
let text = format!("{}", symbol);
if text.starts_with('.') {
alloc.text("Test").append(text)
} else {
alloc.text(text)
}
}
}
@ -917,7 +923,7 @@ where
D::Doc: Clone,
A: Clone,
{
alloc.text(format!("{}", symbol.0))
symbol_to_doc(alloc, symbol.0)
}
impl<'a> Expr<'a> {
@ -1101,7 +1107,9 @@ impl<'a> Stmt<'a> {
.chain(std::iter::once(default_doc));
//
alloc
.text(format!("switch {}:", cond_symbol))
.text("switch ")
.append(symbol_to_doc(alloc, *cond_symbol))
.append(":")
.append(alloc.hardline())
.append(
alloc.intersperse(branches_docs, alloc.hardline().append(alloc.hardline())),
@ -1115,7 +1123,9 @@ impl<'a> Stmt<'a> {
fail,
..
} => alloc
.text(format!("if {} then", branching_symbol))
.text("if ")
.append(symbol_to_doc(alloc, *branching_symbol))
.append(" then")
.append(alloc.hardline())
.append(pass.to_doc(alloc).indent(4))
.append(alloc.hardline())
@ -2384,7 +2394,7 @@ pub fn with_hole<'a>(
Tag {
variant_var,
name: tag_name,
arguments: args,
arguments: mut args,
..
} => {
use crate::layout::UnionVariant::*;
@ -2421,11 +2431,34 @@ pub fn with_hole<'a>(
}
Unwrapped(field_layouts) => {
let mut field_symbols_temp =
Vec::with_capacity_in(field_layouts.len(), env.arena);
for (var, arg) in args.drain(..) {
// Layout will unpack this unwrapped tack if it only has one (non-zero-sized) field
let layout = layout_cache
.from_var(env.arena, var, env.subs)
.unwrap_or_else(|err| {
panic!("TODO turn fn_var into a RuntimeError {:?}", err)
});
let alignment = layout.alignment_bytes(8);
let symbol = possible_reuse_symbol(env, procs, &arg.value);
field_symbols_temp.push((
alignment,
symbol,
((var, arg), &*env.arena.alloc(symbol)),
));
}
field_symbols_temp.sort_by(|a, b| b.0.cmp(&a.0));
let mut field_symbols = Vec::with_capacity_in(field_layouts.len(), env.arena);
for (_, arg) in args.iter() {
field_symbols.push(possible_reuse_symbol(env, procs, &arg.value));
for (_, symbol, _) in field_symbols_temp.iter() {
field_symbols.push(*symbol);
}
let field_symbols = field_symbols.into_bump_slice();
// Layout will unpack this unwrapped tack if it only has one (non-zero-sized) field
@ -2438,7 +2471,7 @@ pub fn with_hole<'a>(
// even though this was originally a Tag, we treat it as a Struct from now on
let stmt = Stmt::Let(assigned, Expr::Struct(field_symbols), layout, hole);
let iter = args.into_iter().rev().zip(field_symbols.iter().rev());
let iter = field_symbols_temp.into_iter().map(|(_, _, data)| data);
assign_to_symbols(env, procs, layout_cache, iter, stmt)
}
Wrapped(sorted_tag_layouts) => {
@ -2449,12 +2482,33 @@ pub fn with_hole<'a>(
.find(|(_, (key, _))| key == &tag_name)
.expect("tag must be in its own type");
let mut field_symbols_temp = Vec::with_capacity_in(args.len(), env.arena);
for (var, arg) in args.drain(..) {
// Layout will unpack this unwrapped tack if it only has one (non-zero-sized) field
let layout = layout_cache
.from_var(env.arena, var, env.subs)
.unwrap_or_else(|err| {
panic!("TODO turn fn_var into a RuntimeError {:?}", err)
});
let alignment = layout.alignment_bytes(8);
let symbol = possible_reuse_symbol(env, procs, &arg.value);
field_symbols_temp.push((
alignment,
symbol,
((var, arg), &*env.arena.alloc(symbol)),
));
}
field_symbols_temp.sort_by(|a, b| b.0.cmp(&a.0));
let mut field_symbols: Vec<Symbol> = Vec::with_capacity_in(args.len(), arena);
let tag_id_symbol = env.unique_symbol();
field_symbols.push(tag_id_symbol);
for (_, arg) in args.iter() {
field_symbols.push(possible_reuse_symbol(env, procs, &arg.value));
for (_, symbol, _) in field_symbols_temp.iter() {
field_symbols.push(*symbol);
}
let mut layouts: Vec<&'a [Layout<'a>]> =
@ -2475,7 +2529,11 @@ pub fn with_hole<'a>(
};
let mut stmt = Stmt::Let(assigned, tag, layout, hole);
let iter = args.into_iter().rev().zip(field_symbols.iter().rev());
let iter = field_symbols_temp
.drain(..)
.map(|x| x.2 .0)
.rev()
.zip(field_symbols.iter().rev());
stmt = assign_to_symbols(env, procs, layout_cache, iter, stmt);
@ -5290,6 +5348,20 @@ pub fn from_can_pattern<'a>(
}],
};
let mut arguments = arguments.clone();
arguments.sort_by(|arg1, arg2| {
let ptr_bytes = 8;
let layout1 = layout_cache.from_var(env.arena, arg1.0, env.subs).unwrap();
let layout2 = layout_cache.from_var(env.arena, arg2.0, env.subs).unwrap();
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1)
});
let mut mono_args = Vec::with_capacity_in(arguments.len(), env.arena);
for ((_, loc_pat), layout) in arguments.iter().zip(field_layouts.iter()) {
mono_args.push((
@ -5333,6 +5405,20 @@ pub fn from_can_pattern<'a>(
let mut mono_args = Vec::with_capacity_in(arguments.len(), env.arena);
// disregard the tag discriminant layout
let mut arguments = arguments.clone();
arguments.sort_by(|arg1, arg2| {
let ptr_bytes = 8;
let layout1 = layout_cache.from_var(env.arena, arg1.0, env.subs).unwrap();
let layout2 = layout_cache.from_var(env.arena, arg2.0, env.subs).unwrap();
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1)
});
// TODO make this assert pass, it currently does not because
// 0-sized values are dropped out
// debug_assert_eq!(arguments.len(), argument_layouts[1..].len());
@ -5374,8 +5460,8 @@ pub fn from_can_pattern<'a>(
// sorted fields based on the destruct
let mut mono_destructs = Vec::with_capacity_in(destructs.len(), env.arena);
let mut destructs = destructs.clone();
destructs.sort_by(|a, b| a.value.label.cmp(&b.value.label));
let destructs_by_label = env.arena.alloc(MutMap::default());
destructs_by_label.extend(destructs.iter().map(|x| (&x.value.label, x)));
let mut field_layouts = Vec::with_capacity_in(sorted_fields.len(), env.arena);
@ -5387,27 +5473,25 @@ pub fn from_can_pattern<'a>(
// in the source the field is not matche in the source language.
//
// Optional fields somewhat complicate the matter here
let mut it1 = sorted_fields.into_iter();
let mut opt_sorted = it1.next();
let mut it2 = destructs.iter();
let mut opt_destruct = it2.next();
for (label, variable, res_layout) in sorted_fields.into_iter() {
match res_layout {
Ok(field_layout) => {
// the field is non-optional according to the type
loop {
match (opt_sorted, opt_destruct) {
(Some((label, variable, Ok(field_layout))), Some(destruct)) => {
if destruct.value.label == label {
match destructs_by_label.remove(&label) {
Some(destruct) => {
// this field is destructured by the pattern
mono_destructs.push(from_can_record_destruct(
env,
layout_cache,
&destruct.value,
field_layout.clone(),
));
opt_sorted = it1.next();
opt_destruct = it2.next();
} else {
// insert underscore pattern
}
None => {
// this field is not destructured by the pattern
// put in an underscore
mono_destructs.push(RecordDestruct {
label: label.clone(),
symbol: env.unique_symbol(),
@ -5415,15 +5499,17 @@ pub fn from_can_pattern<'a>(
layout: field_layout.clone(),
typ: DestructType::Guard(Pattern::Underscore),
});
opt_sorted = it1.next();
}
}
// the layout of this field is part of the layout of the record
field_layouts.push(field_layout);
}
(Some((label, variable, Err(field_layout))), Some(destruct)) => {
if destruct.value.label == label {
opt_destruct = it2.next();
Err(field_layout) => {
// the field is optional according to the type
match destructs_by_label.remove(&label) {
Some(destruct) => {
// this field is destructured by the pattern
mono_destructs.push(RecordDestruct {
label: destruct.value.label.clone(),
symbol: destruct.value.symbol,
@ -5441,12 +5527,9 @@ pub fn from_can_pattern<'a>(
},
});
}
opt_sorted = it1.next();
}
(Some((label, variable, Err(field_layout))), None) => {
// the remainder of the fields (from the type) is not matched on in
// this pattern; to fill it out, we put underscores
None => {
// this field is not destructured by the pattern
// put in an underscore
mono_destructs.push(RecordDestruct {
label: label.clone(),
symbol: env.unique_symbol(),
@ -5454,26 +5537,14 @@ pub fn from_can_pattern<'a>(
layout: field_layout.clone(),
typ: DestructType::Guard(Pattern::Underscore),
});
opt_sorted = it1.next();
}
}
}
}
}
(Some((label, variable, Ok(field_layout))), None) => {
// the remainder of the fields (from the type) is not matched on in
// this pattern; to fill it out, we put underscores
mono_destructs.push(RecordDestruct {
label: label.clone(),
symbol: env.unique_symbol(),
variable,
layout: field_layout.clone(),
typ: DestructType::Guard(Pattern::Underscore),
});
field_layouts.push(field_layout);
opt_sorted = it1.next();
}
(None, Some(destruct)) => {
// destruct is not in the type, but is in the pattern
for (_, destruct) in destructs_by_label.drain() {
// this destruct is not in the type, but is in the pattern
// it must be an optional field, and we will use the default
match &destruct.value.typ {
roc_can::pattern::DestructType::Optional(field_var, loc_expr) => {
@ -5493,14 +5564,6 @@ pub fn from_can_pattern<'a>(
}
_ => unreachable!("only optional destructs can be optional fields"),
}
opt_sorted = None;
opt_destruct = it2.next();
}
(None, None) => {
break;
}
}
}
Pattern::RecordDestructure(

View file

@ -4,6 +4,7 @@ use roc_collections::all::{default_hasher, MutMap, MutSet};
use roc_module::ident::{Lowercase, TagName};
use roc_module::symbol::{Interns, Symbol};
use roc_types::subs::{Content, FlatType, Subs, Variable};
use roc_types::types::RecordField;
use std::collections::HashMap;
pub const MAX_ENUM_SIZE: usize = (std::mem::size_of::<u8>() * 8) as usize;
@ -789,59 +790,30 @@ fn layout_from_flat_type<'a>(
}
}
Record(fields, ext_var) => {
// Sort the fields by label
let mut sorted_fields = Vec::with_capacity_in(fields.len(), arena);
sorted_fields.extend(fields.into_iter());
// extract any values from the ext_var
let mut fields_map = MutMap::default();
fields_map.extend(fields);
match roc_types::pretty_print::chase_ext_record(subs, ext_var, &mut fields_map) {
Ok(()) | Err((_, Content::FlexVar(_))) => {}
Err(_) => unreachable!("this would have been a type error"),
}
sorted_fields.extend(fields_map.into_iter());
sorted_fields.sort_by(|(label1, _), (label2, _)| label1.cmp(label2));
let sorted_fields = sort_record_fields_help(env, fields_map);
// Determine the layouts of the fields, maintaining sort order
let mut layouts = Vec::with_capacity_in(sorted_fields.len(), arena);
for (label, field) in sorted_fields {
use LayoutProblem::*;
let field_var = {
use roc_types::types::RecordField::*;
match field {
Optional(_) => {
// when an optional field reaches this stage, the field was truly
// optional, and not unified to be demanded or required
// therefore, there is no such field on the record, and we ignore this
// field from now on.
continue;
}
Required(var) => var,
Demanded(var) => var,
}
};
match Layout::from_var(env, field_var) {
for (_, _, res_layout) in sorted_fields {
match res_layout {
Ok(layout) => {
// Drop any zero-sized fields like {}.
if !layout.is_dropped_because_empty() {
layouts.push(layout);
}
}
Err(UnresolvedTypeVar(v)) => {
// Invalid field!
panic!(
r"I hit an unresolved type var {:?} when determining the layout of {:?} of record field: {:?} : {:?}",
field_var, v, label, field
);
}
Err(Erroneous) => {
// Invalid field!
panic!("TODO gracefully handle record with invalid field.var");
Err(_) => {
// optional field, ignore
continue;
}
}
}
@ -894,6 +866,15 @@ fn layout_from_flat_type<'a>(
tag_layout.push(Layout::from_var(env, var)?);
}
tag_layout.sort_by(|layout1, layout2| {
let ptr_bytes = 8;
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1)
});
tag_layouts.push(tag_layout.into_bump_slice());
}
@ -924,24 +905,30 @@ pub fn sort_record_fields<'a>(
};
match roc_types::pretty_print::chase_ext_record(subs, var, &mut fields_map) {
Ok(()) | Err((_, Content::FlexVar(_))) => {
// Sort the fields by label
let mut sorted_fields = Vec::with_capacity_in(fields_map.len(), arena);
Ok(()) | Err((_, Content::FlexVar(_))) => sort_record_fields_help(&mut env, fields_map),
Err(other) => panic!("invalid content in record variable: {:?}", other),
}
}
fn sort_record_fields_help<'a>(
env: &mut Env<'a, '_>,
fields_map: MutMap<Lowercase, RecordField<Variable>>,
) -> Vec<'a, (Lowercase, Variable, Result<Layout<'a>, Layout<'a>>)> {
// Sort the fields by label
let mut sorted_fields = Vec::with_capacity_in(fields_map.len(), env.arena);
use roc_types::types::RecordField;
for (label, field) in fields_map {
let var = match field {
RecordField::Demanded(v) => v,
RecordField::Required(v) => v,
RecordField::Optional(v) => {
let layout =
Layout::from_var(&mut env, v).expect("invalid layout from var");
let layout = Layout::from_var(env, v).expect("invalid layout from var");
sorted_fields.push((label, v, Err(layout)));
continue;
}
};
let layout = Layout::from_var(&mut env, var).expect("invalid layout from var");
let layout = Layout::from_var(env, var).expect("invalid layout from var");
// Drop any zero-sized fields like {}
if !layout.is_dropped_because_empty() {
@ -949,12 +936,22 @@ pub fn sort_record_fields<'a>(
}
}
sorted_fields.sort_by(|(label1, _, _), (label2, _, _)| label1.cmp(label2));
sorted_fields.sort_by(
|(label1, _, res_layout1), (label2, _, res_layout2)| match res_layout1 {
Ok(layout1) | Err(layout1) => match res_layout2 {
Ok(layout2) | Err(layout2) => {
let ptr_bytes = 8;
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1).then(label1.cmp(label2))
}
},
},
);
sorted_fields
}
Err(other) => panic!("invalid content in record variable: {:?}", other),
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
@ -1059,6 +1056,15 @@ pub fn union_sorted_tags_help<'a>(
}
}
layouts.sort_by(|layout1, layout2| {
let ptr_bytes = 8;
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1)
});
if layouts.is_empty() {
if contains_zero_sized {
UnionVariant::UnitWithArguments
@ -1102,6 +1108,15 @@ pub fn union_sorted_tags_help<'a>(
}
}
arg_layouts.sort_by(|layout1, layout2| {
let ptr_bytes = 8;
let size1 = layout1.alignment_bytes(ptr_bytes);
let size2 = layout2.alignment_bytes(ptr_bytes);
size2.cmp(&size1)
});
answer.push((tag_name, arg_layouts.into_bump_slice()));
}

File diff suppressed because it is too large Load diff

View file

@ -544,7 +544,7 @@ fn annotation<'a>(
ascii_char(b':'),
// Spaces after the ':' (at a normal indentation level) and then the type.
// The type itself must be indented more than the pattern and ':'
space0_before(type_annotation::located(indented_more), indented_more)
space0_before(type_annotation::located(indented_more), min_indent)
)
)
}
@ -835,7 +835,7 @@ fn parse_def_signature<'a>(
// It should be indented more than the original, and it will
// end when outdented again.
and_then_with_indent_level(
type_annotation::located(indented_more),
space0_before(type_annotation::located(indented_more), min_indent),
// The first annotation may be immediately (spaces_then_comment_or_newline())
// followed by a body at the exact same indent_level
// leading to an AnnotatedBody in this case

View file

@ -177,7 +177,11 @@ pub enum ImportsEntry<'a> {
Module(ModuleName<'a>, Vec<'a, Loc<ExposesEntry<'a, &'a str>>>),
/// e.g. `base.Task` or `base.Task.{ after }` or `base.{ Task.{ Task, after } }`
Package(&'a str, Vec<'a, Loc<&'a ImportsEntry<'a>>>),
Package(
&'a str,
ModuleName<'a>,
Vec<'a, Loc<ExposesEntry<'a, &'a str>>>,
),
// Spaces
SpaceBefore(&'a ImportsEntry<'a>, &'a [CommentOrNewline<'a>]),

View file

@ -544,11 +544,16 @@ fn typed_ident<'a>() -> impl Parser<'a, TypedIdent<'a>> {
}
#[inline(always)]
#[allow(clippy::type_complexity)]
fn imports_entry<'a>() -> impl Parser<'a, ImportsEntry<'a>> {
map_with_arena!(
and!(
and!(
// e.g. `base.`
optional(skip_second!(lowercase_ident(), ascii_char(b'.'))),
// e.g. `Task`
module_name(),
module_name()
),
// e.g. `.{ Task, after}`
optional(skip_first!(
ascii_char(b'.'),
@ -562,13 +567,17 @@ fn imports_entry<'a>() -> impl Parser<'a, ImportsEntry<'a>> {
))
),
|arena,
(module_name, opt_values): (
ModuleName<'a>,
((opt_shortname, module_name), opt_values): (
(Option<&'a str>, ModuleName<'a>),
Option<Vec<'a, Located<ExposesEntry<'a, &'a str>>>>
)| {
let exposed_values = opt_values.unwrap_or_else(|| Vec::new_in(arena));
ImportsEntry::Module(module_name, exposed_values)
match opt_shortname {
Some(shortname) => ImportsEntry::Package(shortname, module_name, exposed_values),
None => ImportsEntry::Module(module_name, exposed_values),
}
}
)
}

View file

@ -27,8 +27,8 @@ mod test_parse {
self, Attempting, Def, EscapedChar, Spaceable, TypeAnnotation, WhenBranch,
};
use roc_parse::header::{
AppHeader, Effects, ExposesEntry, InterfaceHeader, ModuleName, PackageEntry, PackageName,
PackageOrPath, PlatformHeader, To,
AppHeader, Effects, ExposesEntry, ImportsEntry, InterfaceHeader, ModuleName, PackageEntry,
PackageName, PackageOrPath, PlatformHeader, To,
};
use roc_parse::module::{app_header, interface_header, module_defs, platform_header};
use roc_parse::parser::{Fail, FailReason, Parser, State};
@ -1775,6 +1775,44 @@ mod test_parse {
);
}
#[test]
fn multiline_type_signature_with_comment() {
assert_parses_to(
"f :# comment\n {}\n\n42",
Defs(
&[&Located::new(
0,
1,
0,
6,
Def::Annotation(
Located::new(0, 0, 0, 1, Pattern::Identifier("f")),
Located::new(
1,
1,
4,
6,
TypeAnnotation::SpaceBefore(
&TypeAnnotation::Record {
fields: &[],
ext: None,
final_comments: &[],
},
&[LineComment(" comment")],
),
),
),
)],
&Located::new(
3,
3,
0,
2,
Expr::SpaceBefore(&Expr::Num("42"), &[Newline, Newline]),
),
),
);
}
// #[test]
// fn type_signature_function_def() {
// use TypeAnnotation;
@ -2320,16 +2358,19 @@ mod test_parse {
use ExposesEntry::Exposed;
use PackageOrPath::Path;
let newlines = &[Newline];
let pkg_entry = PackageEntry::Entry {
shorthand: "base",
spaces_after_shorthand: &[],
package_or_path: Located::new(0, 0, 33, 45, Path(PlainLine("./platform"))),
package_or_path: Located::new(1, 1, 21, 33, Path(PlainLine("./platform"))),
};
let loc_pkg_entry = Located::new(0, 0, 27, 45, pkg_entry);
let loc_pkg_entry = Located::new(1, 1, 15, 33, pkg_entry);
let arena = Bump::new();
let packages = bumpalo::vec![in &arena; loc_pkg_entry];
let imports = Vec::new_in(&arena);
let provide_entry = Located::new(0, 0, 59, 68, Exposed("quicksort"));
let import = ImportsEntry::Package("foo", ModuleName::new("Bar.Baz"), Vec::new_in(&arena));
let loc_import = Located::new(2, 2, 14, 25, import);
let imports = bumpalo::vec![in &arena; loc_import];
let provide_entry = Located::new(3, 3, 15, 24, Exposed("quicksort"));
let provides = bumpalo::vec![in &arena; provide_entry];
let module_name = StrLiteral::PlainLine("quicksort");
let expected = AppHeader {
@ -2337,13 +2378,13 @@ mod test_parse {
packages,
imports,
provides,
to: Located::new(0, 0, 74, 78, To::ExistingPackage("base")),
to: Located::new(3, 3, 30, 34, To::ExistingPackage("base")),
after_app_keyword: &[],
before_packages: &[],
before_packages: newlines,
after_packages: &[],
before_imports: &[],
before_imports: newlines,
after_imports: &[],
before_provides: &[],
before_provides: newlines,
after_provides: &[],
before_to: &[],
after_to: &[],
@ -2351,7 +2392,10 @@ mod test_parse {
let src = indoc!(
r#"
app "quicksort" packages { base: "./platform" } provides [ quicksort ] to base
app "quicksort"
packages { base: "./platform" }
imports [ foo.Bar.Baz ]
provides [ quicksort ] to base
"#
);
let actual = app_header()

View file

@ -2928,11 +2928,11 @@ mod solve_expr {
}
#[test]
fn list_walk_right() {
fn list_walk_backwards() {
infer_eq_without_problem(
indoc!(
r#"
List.walkRight
List.walkBackwards
"#
),
"List a, (a, b -> b), b -> b",
@ -2940,7 +2940,7 @@ mod solve_expr {
}
#[test]
fn list_walk_right_example() {
fn list_walk_backwards_example() {
infer_eq_without_problem(
indoc!(
r#"
@ -2948,7 +2948,7 @@ mod solve_expr {
empty =
[]
List.walkRight empty (\a, b -> a + b) 0
List.walkBackwards empty (\a, b -> a + b) 0
"#
),
"Int",

View file

@ -2236,11 +2236,11 @@ mod solve_uniq_expr {
}
#[test]
fn list_walk_right_sum() {
fn list_walk_backwards_sum() {
infer_eq(
indoc!(
r#"
sum = \list -> List.walkRight list Num.add 0
sum = \list -> List.walkBackwards list Num.add 0
sum
"#
@ -2321,11 +2321,11 @@ mod solve_uniq_expr {
}
#[test]
fn list_walk_right_reverse() {
fn list_walk_backwards_reverse() {
infer_eq(
indoc!(
r#"
reverse = \list -> List.walkRight list (\e, l -> List.append l e) []
reverse = \list -> List.walkBackwards list (\e, l -> List.append l e) []
reverse
"#
@ -3133,11 +3133,11 @@ mod solve_uniq_expr {
}
#[test]
fn list_walk_right() {
fn list_walk_backwards() {
infer_eq(
indoc!(
r#"
List.walkRight
List.walkBackwards
"#
),
"Attr * (Attr (* | b) (List (Attr b a)), Attr Shared (Attr b a, c -> c), c -> c)",
@ -3145,7 +3145,7 @@ mod solve_uniq_expr {
}
#[test]
fn list_walk_right_example() {
fn list_walk_backwards_example() {
infer_eq(
indoc!(
r#"
@ -3153,7 +3153,7 @@ mod solve_uniq_expr {
empty =
[]
List.walkRight empty (\a, b -> a + b) 0
List.walkBackwards empty (\a, b -> a + b) 0
"#
),
"Attr a Int",

View file

@ -70,6 +70,8 @@ Thoughts and ideas possibly taken from above inspirations or separate.
* Makes sense for unit tests, keeps the test close to the source
* Doesn't necessarily make sense for integration or e2e testing
* Maybe easier to manually trigger a test related to exactly what code you're writing
* Ability to generate unit tests for a selected function in context menu
* A table should appear to enter input and expected output pairs quickly
* "Error mode" where the editor jumps you to the next error
* Similar in theory to diff tools that jump you to the next merge conflict
* dependency recommendation

View file

@ -0,0 +1,7 @@
app Main provides [ rocMain ] imports [ Effect ]
rocMain : Effect.Effect {} as Fx
rocMain =
when List.len (Str.split "hello" "JJJJ there") is
_ -> Effect.putLine "Yay"

23
examples/balance/platform/Cargo.lock generated Normal file
View file

@ -0,0 +1,23 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "host"
version = "0.1.0"
dependencies = [
"roc_std 0.1.0",
]
[[package]]
name = "libc"
version = "0.2.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "roc_std"
version = "0.1.0"
dependencies = [
"libc 0.2.79 (registry+https://github.com/rust-lang/crates.io-index)",
]
[metadata]
"checksum libc 0.2.79 (registry+https://github.com/rust-lang/crates.io-index)" = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743"

View file

@ -0,0 +1,13 @@
[package]
name = "host"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
[lib]
crate-type = ["staticlib"]
[dependencies]
roc_std = { path = "../../../roc_std" }
[workspace]

View file

@ -0,0 +1,13 @@
platform folkertdev/foo
provides [ mainForHost ]
requires { main : Effect {} }
imports []
effects Effect
{
putChar : Int -> Effect {},
putLine : Str -> Effect {},
getLine : Effect Str
}
mainForHost : Effect {} as Fx
mainForHost = main

View file

@ -0,0 +1,7 @@
#include <stdio.h>
extern int rust_main();
int main() {
return rust_main();
}

View file

@ -0,0 +1,118 @@
#![allow(non_snake_case)]
use roc_std::alloca;
use roc_std::RocCallResult;
use roc_std::RocStr;
use std::alloc::Layout;
use std::time::SystemTime;
extern "C" {
#[link_name = "Main_rocMain_1_exposed"]
fn roc_main(output: *mut u8) -> ();
#[link_name = "Main_rocMain_1_size"]
fn roc_main_size() -> i64;
#[link_name = "Main_rocMain_1_Fx_caller"]
fn call_Fx(function_pointer: *const u8, closure_data: *const u8, output: *mut u8) -> ();
#[link_name = "Main_rocMain_1_Fx_size"]
fn size_Fx() -> i64;
}
#[no_mangle]
pub fn roc_fx_putChar(foo: i64) -> () {
let character = foo as u8 as char;
print!("{}", character);
()
}
#[no_mangle]
pub fn roc_fx_putLine(line: RocStr) -> () {
let bytes = line.as_slice();
let string = unsafe { std::str::from_utf8_unchecked(bytes) };
println!("{}", string);
()
}
#[no_mangle]
pub fn roc_fx_getLine() -> RocStr {
use std::io::{self, BufRead};
let stdin = io::stdin();
let line1 = stdin.lock().lines().next().unwrap().unwrap();
RocStr::from_slice_with_capacity(line1.as_bytes(), line1.len())
}
unsafe fn call_the_closure(function_pointer: *const u8, closure_data_ptr: *const u8) -> i64 {
let size = size_Fx() as usize;
alloca::with_stack_bytes(size, |buffer| {
let buffer: *mut std::ffi::c_void = buffer;
let buffer: *mut u8 = buffer as *mut u8;
call_Fx(
function_pointer,
closure_data_ptr as *const u8,
buffer as *mut u8,
);
let output = &*(buffer as *mut RocCallResult<i64>);
// match output.into() {
// Ok(v) => v,
// Err(e) => panic!("failed with {}", e),
// }
32
})
}
#[no_mangle]
pub fn rust_main() -> isize {
println!("Running Roc closure");
let start_time = SystemTime::now();
let size = unsafe { roc_main_size() } as usize;
let layout = Layout::array::<u8>(size).unwrap();
let answer = unsafe {
let buffer = std::alloc::alloc(layout);
roc_main(buffer);
let output = &*(buffer as *mut RocCallResult<()>);
match output.into() {
Ok(()) => {
let function_pointer = {
// this is a pointer to the location where the function pointer is stored
// we pass just the function pointer
let temp = buffer.offset(8) as *const i64;
(*temp) as *const u8
};
let closure_data_ptr = buffer.offset(16);
call_the_closure(function_pointer as *const u8, closure_data_ptr as *const u8)
}
Err(msg) => {
std::alloc::dealloc(buffer, layout);
panic!("Roc failed with message: {}", msg);
}
}
};
let end_time = SystemTime::now();
let duration = end_time.duration_since(start_time).unwrap();
println!(
"Roc execution took {:.4} ms",
duration.as_secs_f64() * 1000.0,
);
// Exit code
0
}

View file

@ -6,4 +6,3 @@ app "effect-example"
main : Task {}
main =
Task.putLine "Hello world"

View file

@ -1,4 +1,4 @@
interface RBTree exposes [ Dict, empty, size, singleton, isEmpty, insert, remove, update, fromList, toList ] imports []
interface RBTree exposes [ Dict, empty, size, singleton, isEmpty, insert, remove, update, fromList, toList, balance ] imports []
# The color of a node. Leaves are considered Black.
NodeColor : [ Red, Black ]

View file

@ -7,16 +7,16 @@ use std::alloc::Layout;
use std::time::SystemTime;
extern "C" {
#[link_name = "Main_main_1_exposed"]
#[link_name = "roc__main_1_exposed"]
fn roc_main(output: *mut u8) -> ();
#[link_name = "Main_main_1_size"]
#[link_name = "roc__main_1_size"]
fn roc_main_size() -> i64;
#[link_name = "Main_main_1_Fx_caller"]
#[link_name = "roc__main_1_Fx_caller"]
fn call_Fx(function_pointer: *const u8, closure_data: *const u8, output: *mut u8) -> ();
#[link_name = "Main_main_1_Fx_size"]
#[link_name = "roc__main_1_Fx_size"]
fn size_Fx() -> i64;
}
@ -60,10 +60,10 @@ unsafe fn call_the_closure(function_pointer: *const u8, closure_data_ptr: *const
buffer as *mut u8,
);
let output = &*(buffer as *mut RocCallResult<i64>);
let output = &*(buffer as *mut RocCallResult<()>);
match output.into() {
Ok(v) => v,
Ok(_) => 0,
Err(e) => panic!("failed with {}", e),
}
})
@ -95,7 +95,12 @@ pub fn rust_main() -> isize {
let closure_data_ptr = buffer.offset(16);
call_the_closure(function_pointer as *const u8, closure_data_ptr as *const u8)
let result =
call_the_closure(function_pointer as *const u8, closure_data_ptr as *const u8);
std::alloc::dealloc(buffer, layout);
result
}
Err(msg) => {
std::alloc::dealloc(buffer, layout);

View file

@ -66,6 +66,9 @@ let
libffi
libxml2
zlib
llvmPkgs.libcxx
llvmPkgs.libcxxabi
libunwind
# faster builds - see https://github.com/rtfeldman/roc/blob/trunk/BUILDING_FROM_SOURCE.md#use-lld-for-the-linker
llvmPkgs.lld
# dev tools
@ -79,7 +82,7 @@ in mkShell {
LLVM_SYS_100_PREFIX = "${llvmPkgs.llvm}";
APPEND_LIBRARY_PATH = stdenv.lib.makeLibraryPath
([ pkgconfig llvmPkgs.libcxx llvmPkgs.libcxxabi libunwind ] ++ linux-only);
([ pkg-config llvmPkgs.libcxx llvmPkgs.libcxxabi libunwind ] ++ linux-only);
# Aliases don't work cross shell, so we do this
shellHook = ''