Merge remote-tracking branch 'origin/trunk' into windows-linking

This commit is contained in:
Folkert de Vries 2022-08-02 14:11:02 +02:00
commit 19159d170a
45 changed files with 1849 additions and 991 deletions

View file

@ -26,6 +26,7 @@ roc_gen_dev = { path = "../gen_dev", default-features = false }
roc_reporting = { path = "../../reporting" }
roc_error_macros = { path = "../../error_macros" }
roc_std = { path = "../../roc_std", default-features = false }
roc_utils = { path = "../../utils" }
bumpalo = { version = "3.8.0", features = ["collections"] }
libloading = "0.7.1"
tempfile = "3.2.0"

View file

@ -3,6 +3,7 @@ use libloading::{Error, Library};
use roc_builtins::bitcode;
use roc_error_macros::internal_error;
use roc_mono::ir::OptLevel;
use roc_utils::get_lib_path;
use std::collections::HashMap;
use std::env;
use std::io;
@ -66,12 +67,13 @@ pub fn link(
fn find_zig_str_path() -> PathBuf {
// First try using the lib path relative to the executable location.
let exe_relative_str_path = std::env::current_exe()
.ok()
.and_then(|path| Some(path.parent()?.join("lib").join("str.zig")));
if let Some(exe_relative_str_path) = exe_relative_str_path {
if std::path::Path::exists(&exe_relative_str_path) {
return exe_relative_str_path;
let lib_path_opt = get_lib_path();
if let Some(lib_path) = lib_path_opt {
let zig_str_path = lib_path.join("str.zig");
if std::path::Path::exists(&zig_str_path) {
return zig_str_path;
}
}
@ -87,7 +89,7 @@ fn find_zig_str_path() -> PathBuf {
return zig_str_path;
}
panic!("cannot find `str.zig`. Launch me from either the root of the roc repo or one level down(roc/examples, roc/cli...)")
panic!("cannot find `str.zig`. Check the source code in find_zig_str_path() to show all the paths I tried.")
}
fn find_wasi_libc_path() -> PathBuf {
@ -124,7 +126,7 @@ pub fn build_zig_host_native(
"build-exe",
"-fPIE",
shared_lib_path.to_str().unwrap(),
bitcode::BUILTINS_HOST_OBJ_PATH,
&bitcode::get_builtins_host_obj_path(),
]);
} else {
command.args(&["build-obj", "-fPIC"]);
@ -231,7 +233,7 @@ pub fn build_zig_host_native(
"build-exe",
"-fPIE",
shared_lib_path.to_str().unwrap(),
bitcode::BUILTINS_HOST_OBJ_PATH,
&bitcode::get_builtins_host_obj_path(),
]);
} else {
command.args(&["build-obj", "-fPIC"]);
@ -343,7 +345,7 @@ pub fn build_c_host_native(
if let Some(shared_lib_path) = shared_lib_path {
command.args(&[
shared_lib_path.to_str().unwrap(),
bitcode::BUILTINS_HOST_OBJ_PATH,
&bitcode::get_builtins_host_obj_path(),
"-fPIE",
"-pie",
"-lm",
@ -1199,7 +1201,7 @@ pub fn preprocess_host_wasm32(host_input_path: &Path, preprocessed_host_path: &P
let mut command = Command::new(&zig_executable());
let args = &[
"wasm-ld",
bitcode::BUILTINS_WASM32_OBJ_PATH,
&bitcode::get_builtins_wasm32_obj_path(),
host_input,
WASI_LIBC_PATH,
WASI_COMPILER_RT_PATH, // builtins need __multi3, __udivti3, __fixdfti

View file

@ -10,6 +10,7 @@ roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_target = { path = "../roc_target" }
roc_utils = { path = "../../utils" }
lazy_static = "1.4.0"
[build-dependencies]

View file

@ -2608,10 +2608,10 @@ test "getScalarUnsafe" {
}
pub fn strCloneTo(
string: RocStr,
ptr: [*]u8,
offset: usize,
extra_offset: usize,
string: RocStr,
) callconv(.C) usize {
const WIDTH: usize = @sizeOf(RocStr);
if (string.isSmallStr()) {

View file

@ -4,6 +4,7 @@ use std::ffi::OsStr;
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::str;
@ -53,19 +54,9 @@ fn main() {
#[cfg(not(windows))]
const BUILTINS_HOST_FILE: &str = "builtins-host.o";
generate_object_file(
&bitcode_path,
"BUILTINS_HOST_O",
"object",
BUILTINS_HOST_FILE,
);
generate_object_file(&bitcode_path, "object", BUILTINS_HOST_FILE);
generate_object_file(
&bitcode_path,
"BUILTINS_WASM32_O",
"wasm32-object",
"builtins-wasm32.o",
);
generate_object_file(&bitcode_path, "wasm32-object", "builtins-wasm32.o");
copy_zig_builtins_to_target_dir(&bitcode_path);
@ -84,21 +75,10 @@ fn main() {
.expect("Failed to delete temp dir zig_cache_dir.");
}
fn generate_object_file(
bitcode_path: &Path,
env_var_name: &str,
zig_object: &str,
object_file_name: &str,
) {
let out_dir = env::var_os("OUT_DIR").unwrap();
let dest_obj_path = Path::new(&out_dir).join(object_file_name);
fn generate_object_file(bitcode_path: &Path, zig_object: &str, object_file_name: &str) {
let dest_obj_path = get_lib_dir().join(object_file_name);
let dest_obj = dest_obj_path.to_str().expect("Invalid dest object path");
// set the variable (e.g. BUILTINS_HOST_O) that is later used in
// `compiler/builtins/src/bitcode.rs` to load the object file
println!("cargo:rustc-env={}={}", env_var_name, dest_obj);
let src_obj_path = bitcode_path.join(object_file_name);
let src_obj = src_obj_path.to_str().expect("Invalid src object path");
@ -146,20 +126,29 @@ fn generate_bc_file(bitcode_path: &Path, zig_object: &str, file_name: &str) {
);
}
fn copy_zig_builtins_to_target_dir(bitcode_path: &Path) {
// To enable roc to find the zig biultins, we want them to be moved to a folder next to the roc executable.
// So if <roc_folder>/roc is the executable. The zig files will be in <roc_folder>/lib/*.zig
pub fn get_lib_dir() -> PathBuf {
// Currently we have the OUT_DIR variable which points to `/target/debug/build/roc_builtins-*/out/`.
// So we just need to shed a 3 of the outer layers to get `/target/debug/` and then add `lib`.
let out_dir = env::var_os("OUT_DIR").unwrap();
let target_profile_dir = Path::new(&out_dir)
let lib_path = Path::new(&out_dir)
.parent()
.and_then(|path| path.parent())
.and_then(|path| path.parent())
.unwrap()
.join("lib");
// create dir of it does not exist
fs::create_dir_all(lib_path.clone()).expect("Failed to make lib dir.");
lib_path
}
fn copy_zig_builtins_to_target_dir(bitcode_path: &Path) {
// To enable roc to find the zig biultins, we want them to be moved to a folder next to the roc executable.
// So if <roc_folder>/roc is the executable. The zig files will be in <roc_folder>/lib/*.zig
let target_profile_dir = get_lib_dir();
let zig_src_dir = bitcode_path.join("src");
cp_unless_zig_cache(&zig_src_dir, &target_profile_dir).unwrap_or_else(|err| {

View file

@ -1,16 +1,29 @@
use roc_module::symbol::Symbol;
use roc_target::TargetInfo;
use roc_utils::get_lib_path;
use std::ops::Index;
pub const BUILTINS_HOST_OBJ_PATH: &str = env!(
"BUILTINS_HOST_O",
"Env var BUILTINS_HOST_O not found. Is there a problem with the build script?"
);
pub fn get_builtins_host_obj_path() -> String {
let builtins_host_path = get_lib_path()
.expect("Failed to find lib dir.")
.join("builtins-host.o");
pub const BUILTINS_WASM32_OBJ_PATH: &str = env!(
"BUILTINS_WASM32_O",
"Env var BUILTINS_WASM32_O not found. Is there a problem with the build script?"
);
builtins_host_path
.into_os_string()
.into_string()
.expect("Failed to convert builtins_host_path to str")
}
pub fn get_builtins_wasm32_obj_path() -> String {
let builtins_wasm32_path = get_lib_path()
.expect("Failed to find lib dir.")
.join("builtins-wasm32.o");
builtins_wasm32_path
.into_os_string()
.into_string()
.expect("Failed to convert builtins_wasm32_path to str")
}
#[derive(Debug, Default, Copy, Clone)]
pub struct IntrinsicName {

View file

@ -566,6 +566,7 @@ fn can_annotation_help(
region,
alias_needs: alias.type_variables.len() as u8,
type_got: args.len() as u8,
alias_kind: alias.kind,
});
return error;
}

View file

@ -713,7 +713,6 @@ pub struct PatternEq(
pub struct OpportunisticResolve {
/// The specialized type of this lookup, to try to resolve.
pub specialization_variable: Variable,
pub specialization_expectation: Index<Expected<Type>>,
/// The ability member to try to resolve.
pub member: Symbol,

View file

@ -369,6 +369,7 @@ fn canonicalize_alias<'a>(
typ: symbol,
variable_region: loc_lowercase.region,
variable_name: loc_lowercase.value.clone(),
alias_kind: AliasKind::Structural,
});
}
AliasKind::Opaque => {
@ -2688,6 +2689,7 @@ fn correct_mutual_recursive_type_alias<'a>(
env,
&mut alias.typ,
alias_name,
alias.kind,
alias.region,
rest,
can_still_report_error,
@ -2870,7 +2872,15 @@ fn make_tag_union_recursive_help<'a, 'b>(
}
_ => {
// take care to report a cyclic alias only once (not once for each alias in the cycle)
mark_cyclic_alias(env, typ, symbol, region, others, *can_report_cyclic_error);
mark_cyclic_alias(
env,
typ,
symbol,
alias_kind,
region,
others,
*can_report_cyclic_error,
);
*can_report_cyclic_error = false;
Cyclic
@ -2882,6 +2892,7 @@ fn mark_cyclic_alias<'a>(
env: &mut Env<'a>,
typ: &mut Type,
symbol: Symbol,
alias_kind: AliasKind,
region: Region,
others: Vec<Symbol>,
report: bool,
@ -2890,7 +2901,7 @@ fn mark_cyclic_alias<'a>(
*typ = Type::Erroneous(problem);
if report {
let problem = Problem::CyclicAlias(symbol, region, others);
let problem = Problem::CyclicAlias(symbol, region, others, alias_kind);
env.problems.push(problem);
}
}

View file

@ -1568,13 +1568,6 @@ fn canonicalize_var_lookup(
output.references.insert_value_lookup(symbol);
if scope.abilities_store.is_ability_member_name(symbol) {
// Is there a shadow implementation with the same name? If so, we might be in
// the def for that shadow. In that case add a value lookup of the shadow impl,
// so that it's marked as possibly-recursive.
if let Some(shadow) = scope.get_member_shadow(symbol) {
output.references.insert_value_lookup(shadow.value);
}
AbilityMember(
symbol,
Some(scope.abilities_store.fresh_specialization_id()),

View file

@ -206,7 +206,6 @@ pub fn canonicalize_def_header_pattern<'a>(
// Likely a specialization of an ability.
Some(ability_member_name) => {
output.references.insert_bound(symbol);
output.references.insert_value_lookup(ability_member_name);
Pattern::AbilityMemberSpecialization {
ident: symbol,
specializes: ability_member_name,

View file

@ -439,9 +439,6 @@ pub fn constrain_expr(
if let Some(specialization_id) = specialization_id {
env.resolutions_to_make.push(OpportunisticResolve {
specialization_variable: specialization_var,
specialization_expectation: constraints.push_expected_type(
Expected::NoExpectation(Type::Variable(specialization_var)),
),
member: symbol,
specialization_id,
});

View file

@ -1,17 +1,21 @@
use crate::llvm::bitcode::call_bitcode_fn;
use crate::llvm::build::{store_roc_value, Env};
use crate::debug_info_init;
use crate::llvm::bitcode::call_str_bitcode_fn;
use crate::llvm::build::{get_tag_id, store_roc_value, Env};
use crate::llvm::build_list::{self, incrementing_elem_loop};
use crate::llvm::convert::basic_type_from_layout;
use crate::llvm::convert::{basic_type_from_layout, RocUnion};
use inkwell::builder::Builder;
use inkwell::types::BasicType;
use inkwell::values::{BasicValueEnum, IntValue, PointerValue};
use inkwell::module::Linkage;
use inkwell::types::{BasicMetadataTypeEnum, BasicType};
use inkwell::values::{BasicValueEnum, FunctionValue, IntValue, PointerValue};
use inkwell::AddressSpace;
use roc_builtins::bitcode;
use roc_module::symbol::Symbol;
use roc_mono::layout::{Builtin, Layout, LayoutIds, UnionLayout};
use roc_region::all::Region;
use super::build::{load_symbol_and_layout, Scope};
use super::build::{
add_func, load_roc_value, load_symbol_and_layout, use_roc_value, FunctionSpec, Scope,
};
#[derive(Debug, Clone, Copy)]
struct Cursors<'ctx> {
@ -204,19 +208,19 @@ fn build_clone<'a, 'ctx, 'env>(
when_recursive,
),
Layout::Struct {
field_layouts: _, ..
} => {
if layout.safe_to_memcpy() {
build_copy(env, ptr, cursors.offset, value)
} else {
todo!()
}
}
Layout::Struct { field_layouts, .. } => build_clone_struct(
env,
layout_ids,
ptr,
cursors,
value,
field_layouts,
when_recursive,
),
Layout::LambdaSet(_) => unreachable!("cannot compare closures"),
Layout::Union(_union_layout) => {
Layout::Union(union_layout) => {
if layout.safe_to_memcpy() {
let ptr = unsafe {
env.builder
@ -230,24 +234,50 @@ fn build_clone<'a, 'ctx, 'env>(
store_roc_value(env, layout, ptr, value);
let width = value.get_type().size_of().unwrap();
env.builder
.build_int_add(cursors.offset, width, "new_offset")
cursors.extra_offset
} else {
todo!()
build_clone_tag(
env,
layout_ids,
ptr,
cursors,
value,
union_layout,
WhenRecursive::Loop(union_layout),
)
}
}
/*
Layout::Boxed(inner_layout) => build_box_eq(
env,
layout_ids,
when_recursive,
lhs_layout,
inner_layout,
lhs_val,
rhs_val,
),
Layout::Boxed(inner_layout) => {
// write the offset
build_copy(env, ptr, cursors.offset, cursors.extra_offset.into());
let source = value.into_pointer_value();
let value = load_roc_value(env, *inner_layout, source, "inner");
let inner_width = env
.ptr_int()
.const_int(inner_layout.stack_size(env.target_info) as u64, false);
let new_extra = env
.builder
.build_int_add(cursors.offset, inner_width, "new_extra");
let cursors = Cursors {
offset: cursors.extra_offset,
extra_offset: new_extra,
};
build_clone(
env,
layout_ids,
ptr,
cursors,
value,
*inner_layout,
when_recursive,
)
}
Layout::RecursivePointer => match when_recursive {
WhenRecursive::Unreachable => {
@ -260,27 +290,249 @@ fn build_clone<'a, 'ctx, 'env>(
let bt = basic_type_from_layout(env, &layout);
// cast the i64 pointer to a pointer to block of memory
let field1_cast = env
.builder
.build_bitcast(lhs_val, bt, "i64_to_opaque")
.into_pointer_value();
let field1_cast = env.builder.build_bitcast(value, bt, "i64_to_opaque");
let field2_cast = env
.builder
.build_bitcast(rhs_val, bt, "i64_to_opaque")
.into_pointer_value();
build_tag_eq(
build_clone_tag(
env,
layout_ids,
ptr,
cursors,
field1_cast,
union_layout,
WhenRecursive::Loop(union_layout),
&union_layout,
field1_cast.into(),
field2_cast.into(),
)
}
},
*/
}
}
#[allow(clippy::too_many_arguments)]
fn build_clone_struct<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout_ids: &mut LayoutIds<'a>,
ptr: PointerValue<'ctx>,
cursors: Cursors<'ctx>,
value: BasicValueEnum<'ctx>,
field_layouts: &[Layout<'a>],
when_recursive: WhenRecursive<'a>,
) -> IntValue<'ctx> {
let layout = Layout::struct_no_name_order(field_layouts);
if layout.safe_to_memcpy() {
build_copy(env, ptr, cursors.offset, value)
} else {
let mut cursors = cursors;
let structure = value.into_struct_value();
for (i, field_layout) in field_layouts.iter().enumerate() {
let field = env
.builder
.build_extract_value(structure, i as _, "extract")
.unwrap();
let field = use_roc_value(env, *field_layout, field, "field");
let new_extra = build_clone(
env,
layout_ids,
ptr,
cursors,
field,
*field_layout,
when_recursive,
);
let field_width = env
.ptr_int()
.const_int(field_layout.stack_size(env.target_info) as u64, false);
cursors.extra_offset = new_extra;
cursors.offset = env
.builder
.build_int_add(cursors.offset, field_width, "offset");
}
cursors.extra_offset
}
}
#[allow(clippy::too_many_arguments)]
fn build_clone_tag<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout_ids: &mut LayoutIds<'a>,
ptr: PointerValue<'ctx>,
cursors: Cursors<'ctx>,
value: BasicValueEnum<'ctx>,
union_layout: UnionLayout<'a>,
when_recursive: WhenRecursive<'a>,
) -> IntValue<'ctx> {
let layout = Layout::Union(union_layout);
let layout_id = layout_ids.get(Symbol::CLONE, &layout);
let fn_name = layout_id.to_symbol_string(Symbol::CLONE, &env.interns);
let function = match env.module.get_function(fn_name.as_str()) {
Some(function_value) => function_value,
None => {
let block = env.builder.get_insert_block().expect("to be in a function");
let di_location = env.builder.get_current_debug_location().unwrap();
let function_type = env.ptr_int().fn_type(
&[
env.context.i8_type().ptr_type(AddressSpace::Generic).into(),
env.ptr_int().into(),
env.ptr_int().into(),
BasicMetadataTypeEnum::from(value.get_type()),
],
false,
);
let function_value = add_func(
env.context,
env.module,
&fn_name,
FunctionSpec::known_fastcc(function_type),
Linkage::Private,
);
let subprogram = env.new_subprogram(&fn_name);
function_value.set_subprogram(subprogram);
env.dibuilder.finalize();
build_clone_tag_help(
env,
layout_ids,
union_layout,
when_recursive,
function_value,
);
env.builder.position_at_end(block);
env.builder
.set_current_debug_location(env.context, di_location);
function_value
}
};
let call = env.builder.build_call(
function,
&[
ptr.into(),
cursors.offset.into(),
cursors.extra_offset.into(),
value.into(),
],
"build_clone_tag",
);
call.set_call_convention(function.get_call_conventions());
let result = call.try_as_basic_value().left().unwrap();
result.into_int_value()
}
#[allow(clippy::too_many_arguments)]
fn build_clone_tag_help<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>,
layout_ids: &mut LayoutIds<'a>,
union_layout: UnionLayout<'a>,
when_recursive: WhenRecursive<'a>,
fn_val: FunctionValue<'ctx>,
) {
use bumpalo::collections::Vec;
let context = &env.context;
let builder = env.builder;
// Add a basic block for the entry point
let entry = context.append_basic_block(fn_val, "entry");
builder.position_at_end(entry);
debug_info_init!(env, fn_val);
// Add args to scope
// let arg_symbol = Symbol::ARG_1;
// tag_value.set_name(arg_symbol.as_str(&env.interns));
let mut it = fn_val.get_param_iter();
let ptr = it.next().unwrap().into_pointer_value();
let offset = it.next().unwrap().into_int_value();
let extra_offset = it.next().unwrap().into_int_value();
let tag_value = it.next().unwrap();
let cursors = Cursors {
offset,
extra_offset,
};
let parent = fn_val;
debug_assert!(tag_value.is_pointer_value());
use UnionLayout::*;
match union_layout {
NonRecursive(&[]) => {
// we're comparing empty tag unions; this code is effectively unreachable
env.builder.build_unreachable();
}
NonRecursive(tags) => {
let id = get_tag_id(env, parent, &union_layout, tag_value);
let switch_block = env.context.append_basic_block(parent, "switch_block");
env.builder.build_unconditional_branch(switch_block);
let mut cases = Vec::with_capacity_in(tags.len(), env.arena);
for (tag_id, field_layouts) in tags.iter().enumerate() {
let block = env.context.append_basic_block(parent, "tag_id_modify");
env.builder.position_at_end(block);
let raw_data_ptr = env
.builder
.build_struct_gep(
tag_value.into_pointer_value(),
RocUnion::TAG_DATA_INDEX,
"tag_data",
)
.unwrap();
let layout = Layout::struct_no_name_order(field_layouts);
let basic_type = basic_type_from_layout(env, &layout);
let data_ptr = env.builder.build_pointer_cast(
raw_data_ptr,
basic_type.ptr_type(AddressSpace::Generic),
"data_ptr",
);
let data = env.builder.build_load(data_ptr, "load_data");
let answer =
build_clone(env, layout_ids, ptr, cursors, data, layout, when_recursive);
env.builder.build_return(Some(&answer));
cases.push((id.get_type().const_int(tag_id as u64, false), block));
}
env.builder.position_at_end(switch_block);
match cases.pop() {
Some((_, default)) => {
env.builder.build_switch(id, default, &cases);
}
None => {
// we're serializing an empty tag union; this code is effectively unreachable
env.builder.build_unreachable();
}
}
}
_ => todo!(),
}
}
@ -329,14 +581,15 @@ fn build_clone_builtin<'a, 'ctx, 'env>(
Builtin::Str => {
//
call_bitcode_fn(
call_str_bitcode_fn(
env,
&[value],
&[
ptr.into(),
cursors.offset.into(),
cursors.extra_offset.into(),
value,
],
crate::llvm::bitcode::BitcodeReturns::Basic,
bitcode::STR_CLONE_TO,
)
.into_int_value()
@ -380,10 +633,6 @@ fn build_clone_builtin<'a, 'ctx, 'env>(
"elements",
);
// where we write the elements' stack representation
// let element_offset = bd.build_alloca(env.ptr_int(), "element_offset");
// bd.build_store(element_offset, elements_start_offset);
// if the element has any pointers, we clone them to this offset
let rest_offset = bd.build_alloca(env.ptr_int(), "rest_offset");
@ -404,26 +653,24 @@ fn build_clone_builtin<'a, 'ctx, 'env>(
bd.build_int_add(elements_start_offset, current_offset, "current_offset");
let current_extra_offset = bd.build_load(rest_offset, "element_offset");
let offset = current_offset; // env.ptr_int().const_int(60, false);
let extra_offset = current_extra_offset.into_int_value(); // env.ptr_int().const_int(60 + 24, false);
let offset = current_offset;
let extra_offset = current_extra_offset.into_int_value();
let cursors = Cursors {
offset,
extra_offset,
};
let new_offset = build_clone(
env,
layout_ids,
ptr,
Cursors {
// offset: current_offset,
// extra_offset: current_extra_offset.into_int_value(),
offset,
extra_offset,
},
cursors,
element,
*elem,
when_recursive,
);
// let new_offset = env.ptr_int().const_int(60 + 24 + 34, false);
bd.build_store(rest_offset, new_offset);
};

View file

@ -10,7 +10,8 @@ use roc_collections::MutMap;
use roc_derive::SharedDerivedModule;
use roc_error_macros::internal_error;
use roc_module::symbol::ModuleId;
use roc_solve::solve::{compact_lambda_sets_of_vars, Phase, Pools};
use roc_solve::solve::Pools;
use roc_solve::specialize::{compact_lambda_sets_of_vars, DerivedEnv, Phase};
use roc_types::subs::{get_member_lambda_sets_at_region, Content, FlatType, LambdaSet};
use roc_types::subs::{ExposedTypesStorageSubs, Subs, Variable};
use roc_unify::unify::{unify as unify_unify, Env, Mode, Unified};
@ -272,15 +273,18 @@ pub fn unify(
let mut pools = Pools::default();
let late_phase = LatePhase { home, abilities };
let derived_env = DerivedEnv {
derived_module,
exposed_types: exposed_by_module,
};
let must_implement_constraints = compact_lambda_sets_of_vars(
subs,
derived_module,
&derived_env,
arena,
&mut pools,
lambda_sets_to_specialize,
&late_phase,
exposed_by_module,
);
// At this point we can't do anything with must-implement constraints, since we're no
// longer solving. We must assume that they were totally caught during solving.

View file

@ -1005,6 +1005,8 @@ define_builtins! {
30 DEV_TMP5: "#dev_tmp5"
31 ATTR_INVALID: "#attr_invalid"
32 CLONE: "#clone" // internal function that clones a value into a buffer
}
// Fake module for synthesizing and storing derived implementations
1 DERIVED_SYNTH: "#Derived" => {

View file

@ -3999,9 +3999,10 @@ pub fn with_hole<'a>(
}
// creating a record from the var will unpack it if it's just a single field.
let layout = layout_cache
.from_var(env.arena, record_var, env.subs)
.unwrap_or_else(|err| panic!("TODO turn fn_var into a RuntimeError {:?}", err));
let layout = match layout_cache.from_var(env.arena, record_var, env.subs) {
Ok(layout) => layout,
Err(_) => return Stmt::RuntimeError("Can't create record with improper layout"),
};
let field_symbols = field_symbols.into_bump_slice();

View file

@ -45,12 +45,13 @@ pub enum Problem {
shadow: Loc<Ident>,
kind: ShadowKind,
},
CyclicAlias(Symbol, Region, Vec<Symbol>),
CyclicAlias(Symbol, Region, Vec<Symbol>, AliasKind),
BadRecursion(Vec<CycleEntry>),
PhantomTypeArgument {
typ: Symbol,
variable_region: Region,
variable_name: Lowercase,
alias_kind: AliasKind,
},
UnboundTypeVariable {
typ: Symbol,

View file

@ -5,3 +5,4 @@
pub mod ability;
pub mod module;
pub mod solve;
pub mod specialize;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,782 @@
//! Module [specialize] is resolves specialization lambda sets.
use std::collections::VecDeque;
use bumpalo::Bump;
use roc_can::{
abilities::{AbilitiesStore, ImplKey},
module::ExposedByModule,
};
use roc_collections::{VecMap, VecSet};
use roc_debug_flags::dbg_do;
#[cfg(debug_assertions)]
use roc_debug_flags::ROC_TRACE_COMPACTION;
use roc_derive::SharedDerivedModule;
use roc_derive_key::{DeriveError, DeriveKey};
use roc_error_macros::{internal_error, todo_abilities};
use roc_module::symbol::{ModuleId, Symbol};
use roc_types::{
subs::{
get_member_lambda_sets_at_region, Content, Descriptor, GetSubsSlice, LambdaSet, Mark,
OptVariable, Rank, Subs, SubsSlice, UlsOfVar, Variable,
},
types::{AliasKind, MemberImpl, Uls},
};
use roc_unify::unify::{unify, Env as UEnv, Mode, MustImplementConstraints};
use crate::solve::{deep_copy_var_in, introduce, Pools};
/// What phase in the compiler is reaching out to specialize lambda sets?
/// This is important to distinguish subtle differences in the behavior of the solving algorithm.
//
// TODO the APIs of this trait suck, this needs a nice cleanup.
pub trait Phase {
/// The regular type-solving phase, or during some later phase of compilation.
/// During the solving phase we must anticipate that some information is still unknown and react to
/// that; during late phases, we expect that all information is resolved.
const IS_LATE: bool;
fn with_module_abilities_store<T, F>(&self, module: ModuleId, f: F) -> T
where
F: FnMut(&AbilitiesStore) -> T;
/// Given a known lambda set's ambient function in an external module, copy that ambient
/// function into the given subs.
fn copy_lambda_set_ambient_function_to_home_subs(
&self,
external_lambda_set_var: Variable,
external_module_id: ModuleId,
home_subs: &mut Subs,
) -> Variable;
/// Find the ambient function var at a given region for an ability member definition (not a
/// specialization!), and copy that into the given subs.
fn get_and_copy_ability_member_ambient_function(
&self,
ability_member: Symbol,
region: u8,
home_subs: &mut Subs,
) -> Variable;
}
pub(crate) struct SolvePhase<'a> {
pub abilities_store: &'a AbilitiesStore,
}
impl Phase for SolvePhase<'_> {
const IS_LATE: bool = false;
fn with_module_abilities_store<T, F>(&self, _module: ModuleId, mut f: F) -> T
where
F: FnMut(&AbilitiesStore) -> T,
{
// During solving we're only aware of our module's abilities store.
f(self.abilities_store)
}
fn copy_lambda_set_ambient_function_to_home_subs(
&self,
external_lambda_set_var: Variable,
_external_module_id: ModuleId,
home_subs: &mut Subs,
) -> Variable {
// During solving we're only aware of our module's abilities store, the var must
// be in our module store. Even if the specialization lambda set comes from another
// module, we should have taken care to import it before starting solving in this module.
let LambdaSet {
ambient_function, ..
} = home_subs.get_lambda_set(external_lambda_set_var);
ambient_function
}
fn get_and_copy_ability_member_ambient_function(
&self,
ability_member: Symbol,
region: u8,
home_subs: &mut Subs,
) -> Variable {
// During solving we're only aware of our module's abilities store, the var must
// be in our module store. Even if the specialization lambda set comes from another
// module, we should have taken care to import it before starting solving in this module.
let member_def = self
.abilities_store
.member_def(ability_member)
.unwrap_or_else(|| {
internal_error!(
"{:?} is not resolved, or not an ability member!",
ability_member
)
});
let member_var = member_def.signature_var();
let region_lset = get_member_lambda_sets_at_region(home_subs, member_var, region);
let LambdaSet {
ambient_function, ..
} = home_subs.get_lambda_set(region_lset);
ambient_function
}
}
pub struct DerivedEnv<'a> {
pub derived_module: &'a SharedDerivedModule,
/// Exposed types needed by the derived module.
pub exposed_types: &'a ExposedByModule,
}
#[derive(Default)]
pub struct AwaitingSpecializations {
// What variables' specialized lambda sets in `uls_of_var` will be unlocked for specialization
// when an implementation key's specialization is resolved?
waiting: VecMap<ImplKey, VecSet<Variable>>,
uls_of_var: UlsOfVar,
}
impl AwaitingSpecializations {
pub fn remove_for_specialized(&mut self, subs: &Subs, impl_key: ImplKey) -> UlsOfVar {
let spec_variables = self
.waiting
.remove(&impl_key)
.map(|(_, set)| set)
.unwrap_or_default();
let mut result = UlsOfVar::default();
for var in spec_variables {
let target_lambda_sets = self
.uls_of_var
.remove_dependent_unspecialized_lambda_sets(subs, var);
result.extend(var, target_lambda_sets);
}
result
}
pub fn add(
&mut self,
impl_key: ImplKey,
var: Variable,
lambda_sets: impl IntoIterator<Item = Variable>,
) {
self.uls_of_var.extend(var, lambda_sets);
let waiting = self.waiting.get_or_insert(impl_key, Default::default);
waiting.insert(var);
}
pub fn union(&mut self, other: Self) {
for (impl_key, waiting_vars) in other.waiting {
let waiting = self.waiting.get_or_insert(impl_key, Default::default);
waiting.extend(waiting_vars);
}
self.uls_of_var.union(other.uls_of_var);
}
pub fn waiting_for(&self, impl_key: ImplKey) -> bool {
self.waiting.contains_key(&impl_key)
}
}
pub struct CompactionResult {
pub obligations: MustImplementConstraints,
pub awaiting_specialization: AwaitingSpecializations,
}
#[cfg(debug_assertions)]
fn trace_compaction_step_1(subs: &Subs, c_a: Variable, uls_a: &[Variable]) {
let c_a = roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(c_a), subs);
let uls_a = uls_a
.iter()
.map(|v| {
format!(
"{:?}",
roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(*v), subs)
)
})
.collect::<Vec<_>>()
.join(",");
eprintln!("===lambda set compaction===");
eprintln!(" concrete type: {:?}", c_a);
eprintln!(" step 1:");
eprintln!(" uls_a = {{ {} }}", uls_a);
}
#[cfg(debug_assertions)]
fn trace_compaction_step_2(subs: &Subs, uls_a: &[Variable]) {
let uls_a = uls_a
.iter()
.map(|v| {
format!(
"{:?}",
roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(*v), subs)
)
})
.collect::<Vec<_>>()
.join(",");
eprintln!(" step 2:");
eprintln!(" uls_a' = {{ {} }}", uls_a);
}
#[cfg(debug_assertions)]
fn trace_compaction_step_3start() {
eprintln!(" step 3:");
}
#[cfg(debug_assertions)]
fn trace_compaction_step_3iter_start(
subs: &Subs,
iteration_lambda_set: Variable,
t_f1: Variable,
t_f2: Variable,
) {
let iteration_lambda_set = roc_types::subs::SubsFmtContent(
subs.get_content_without_compacting(iteration_lambda_set),
subs,
);
let t_f1 = roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(t_f1), subs);
let t_f2 = roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(t_f2), subs);
eprintln!(" - iteration: {:?}", iteration_lambda_set);
eprintln!(" {:?}", t_f1);
eprintln!(" ~ {:?}", t_f2);
}
#[cfg(debug_assertions)]
#[rustfmt::skip]
fn trace_compaction_step_3iter_end(subs: &Subs, t_f_result: Variable, skipped: bool) {
let t_f_result =
roc_types::subs::SubsFmtContent(subs.get_content_without_compacting(t_f_result), subs);
if skipped {
eprintln!(" SKIP");
}
eprintln!(" = {:?}\n", t_f_result);
}
macro_rules! trace_compact {
(1. $subs:expr, $c_a:expr, $uls_a:expr) => {{
dbg_do!(ROC_TRACE_COMPACTION, {
trace_compaction_step_1($subs, $c_a, $uls_a)
})
}};
(2. $subs:expr, $uls_a:expr) => {{
dbg_do!(ROC_TRACE_COMPACTION, {
trace_compaction_step_2($subs, $uls_a)
})
}};
(3start.) => {{
dbg_do!(ROC_TRACE_COMPACTION, { trace_compaction_step_3start() })
}};
(3iter_start. $subs:expr, $iteration_lset:expr, $t_f1:expr, $t_f2:expr) => {{
dbg_do!(ROC_TRACE_COMPACTION, {
trace_compaction_step_3iter_start($subs, $iteration_lset, $t_f1, $t_f2)
})
}};
(3iter_end. $subs:expr, $t_f_result:expr) => {{
dbg_do!(ROC_TRACE_COMPACTION, {
trace_compaction_step_3iter_end($subs, $t_f_result, false)
})
}};
(3iter_end_skipped. $subs:expr, $t_f_result:expr) => {{
dbg_do!(ROC_TRACE_COMPACTION, {
trace_compaction_step_3iter_end($subs, $t_f_result, true)
})
}};
}
#[inline(always)]
fn iter_concrete_of_unspecialized<'a>(
subs: &'a Subs,
c_a: Variable,
uls: &'a [Uls],
) -> impl Iterator<Item = &'a Uls> {
uls.iter()
.filter(move |Uls(var, _, _)| subs.equivalent_without_compacting(*var, c_a))
}
/// Gets the unique unspecialized lambda resolving to concrete type `c_a` in a list of
/// unspecialized lambda sets.
#[inline(always)]
fn unique_unspecialized_lambda(subs: &Subs, c_a: Variable, uls: &[Uls]) -> Option<Uls> {
let mut iter_concrete = iter_concrete_of_unspecialized(subs, c_a, uls);
let uls = iter_concrete.next()?;
debug_assert!(iter_concrete.next().is_none(), "multiple concrete");
Some(*uls)
}
#[must_use]
pub fn compact_lambda_sets_of_vars<P: Phase>(
subs: &mut Subs,
derived_env: &DerivedEnv,
arena: &Bump,
pools: &mut Pools,
uls_of_var: UlsOfVar,
phase: &P,
) -> CompactionResult {
let mut must_implement = MustImplementConstraints::default();
let mut awaiting_specialization = AwaitingSpecializations::default();
let mut uls_of_var_queue = VecDeque::with_capacity(uls_of_var.len());
uls_of_var_queue.extend(uls_of_var.drain());
// Suppose a type variable `a` with `uls_of_var` mapping `uls_a = {l1, ... ln}` has been instantiated to a concrete type `C_a`.
while let Some((c_a, uls_a)) = uls_of_var_queue.pop_front() {
let c_a = subs.get_root_key_without_compacting(c_a);
// 1. Let each `l` in `uls_a` be of form `[solved_lambdas + ... + C:f:r + ...]`.
// NB: There may be multiple unspecialized lambdas of form `C:f:r, C:f1:r1, ..., C:fn:rn` in `l`.
// In this case, let `t1, ... tm` be the other unspecialized lambdas not of form `C:_:_`,
// that is, none of which are now specialized to the type `C`. Then, deconstruct
// `l` such that `l' = [solved_lambdas + t1 + ... + tm + C:f:r]` and `l1 = [[] + C:f1:r1], ..., ln = [[] + C:fn:rn]`.
// Replace `l` with `l', l1, ..., ln` in `uls_a`, flattened.
// TODO: the flattening step described above
let uls_a = {
let mut uls = uls_a.into_vec();
// De-duplicate lambdas by root key.
uls.iter_mut().for_each(|v| *v = subs.get_root_key(*v));
uls.sort();
uls.dedup();
uls
};
trace_compact!(1. subs, c_a, &uls_a);
// The flattening step - remove lambda sets that don't reference the concrete var, and for
// flatten lambda sets that reference it more than once.
let mut uls_a: Vec<_> = uls_a
.into_iter()
.flat_map(|lambda_set| {
let LambdaSet {
solved,
recursion_var,
unspecialized,
ambient_function,
} = subs.get_lambda_set(lambda_set);
let lambda_set_rank = subs.get_rank(lambda_set);
let unspecialized = subs.get_subs_slice(unspecialized);
// TODO: is it faster to traverse once, see if we only have one concrete lambda, and
// bail in that happy-path, rather than always splitting?
let (concrete, mut not_concrete): (Vec<_>, Vec<_>) = unspecialized
.iter()
.copied()
.partition(|Uls(var, _, _)| subs.equivalent_without_compacting(*var, c_a));
if concrete.len() == 1 {
// No flattening needs to be done, just return the lambda set as-is
return vec![lambda_set];
}
// Must flatten
concrete
.into_iter()
.enumerate()
.map(|(i, concrete_lambda)| {
let (var, unspecialized) = if i == 0 {
// The first lambda set contains one concrete lambda, plus all solved
// lambdas, plus all other unspecialized lambdas.
// l' = [solved_lambdas + t1 + ... + tm + C:f:r]
let unspecialized = SubsSlice::extend_new(
&mut subs.unspecialized_lambda_sets,
not_concrete
.drain(..)
.chain(std::iter::once(concrete_lambda)),
);
(lambda_set, unspecialized)
} else {
// All the other lambda sets consists only of their respective concrete
// lambdas.
// ln = [[] + C:fn:rn]
let unspecialized = SubsSlice::extend_new(
&mut subs.unspecialized_lambda_sets,
[concrete_lambda],
);
let var = subs.fresh(Descriptor {
content: Content::Error,
rank: lambda_set_rank,
mark: Mark::NONE,
copy: OptVariable::NONE,
});
(var, unspecialized)
};
subs.set_content(
var,
Content::LambdaSet(LambdaSet {
solved,
recursion_var,
unspecialized,
ambient_function,
}),
);
var
})
.collect()
})
.collect();
// 2. Now, each `l` in `uls_a` has a unique unspecialized lambda of form `C:f:r`.
// Sort `uls_a` primarily by `f` (arbitrary order), and secondarily by `r` in descending order.
uls_a.sort_by(|v1, v2| {
let unspec_1 = subs.get_subs_slice(subs.get_lambda_set(*v1).unspecialized);
let unspec_2 = subs.get_subs_slice(subs.get_lambda_set(*v2).unspecialized);
let Uls(_, f1, r1) = unique_unspecialized_lambda(subs, c_a, unspec_1).unwrap();
let Uls(_, f2, r2) = unique_unspecialized_lambda(subs, c_a, unspec_2).unwrap();
match f1.cmp(&f2) {
std::cmp::Ordering::Equal => {
// Order by descending order of region.
r2.cmp(&r1)
}
ord => ord,
}
});
trace_compact!(2. subs, &uls_a);
// 3. For each `l` in `uls_a` with unique unspecialized lambda `C:f:r`:
// 1. Let `t_f1` be the directly ambient function of the lambda set containing `C:f:r`. Remove `C:f:r` from `t_f1`'s lambda set.
// - For example, `(b' -[[] + Fo:f:2]-> {})` if `C:f:r=Fo:f:2`. Removing `Fo:f:2`, we get `(b' -[[]]-> {})`.
// 2. Let `t_f2` be the directly ambient function of the specialization lambda set resolved by `C:f:r`.
// - For example, `(b -[[] + b:g:1]-> {})` if `C:f:r=Fo:f:2`, running on example from above.
// 3. Unify `t_f1 ~ t_f2`.
trace_compact!(3start.);
for l in uls_a {
let compaction_result =
compact_lambda_set(subs, derived_env, arena, pools, c_a, l, phase);
match compaction_result {
OneCompactionResult::Compacted {
new_obligations,
new_lambda_sets_to_specialize,
} => {
must_implement.extend(new_obligations);
uls_of_var_queue.extend(new_lambda_sets_to_specialize.drain());
}
OneCompactionResult::MustWaitForSpecialization(impl_key) => {
awaiting_specialization.add(impl_key, c_a, [l])
}
}
}
}
CompactionResult {
obligations: must_implement,
awaiting_specialization,
}
}
enum OneCompactionResult {
Compacted {
new_obligations: MustImplementConstraints,
new_lambda_sets_to_specialize: UlsOfVar,
},
MustWaitForSpecialization(ImplKey),
}
#[must_use]
#[allow(clippy::too_many_arguments)]
fn compact_lambda_set<P: Phase>(
subs: &mut Subs,
derived_env: &DerivedEnv,
arena: &Bump,
pools: &mut Pools,
resolved_concrete: Variable,
this_lambda_set: Variable,
phase: &P,
) -> OneCompactionResult {
// 3. For each `l` in `uls_a` with unique unspecialized lambda `C:f:r`:
// 1. Let `t_f1` be the directly ambient function of the lambda set containing `C:f:r`. Remove `C:f:r` from `t_f1`'s lambda set.
// - For example, `(b' -[[] + Fo:f:2]-> {})` if `C:f:r=Fo:f:2`. Removing `Fo:f:2`, we get `(b' -[[]]-> {})`.
// 2. Let `t_f2` be the directly ambient function of the specialization lambda set resolved by `C:f:r`.
// - For example, `(b -[[] + b:g:1]-> {})` if `C:f:r=Fo:f:2`, from the algorithm's running example.
// 3. Unify `t_f1 ~ t_f2`.
let LambdaSet {
solved,
recursion_var,
unspecialized,
ambient_function: t_f1,
} = subs.get_lambda_set(this_lambda_set);
let target_rank = subs.get_rank(this_lambda_set);
debug_assert!(!unspecialized.is_empty());
let unspecialized = subs.get_subs_slice(unspecialized);
// 1. Let `t_f1` be the directly ambient function of the lambda set containing `C:f:r`.
let Uls(c, f, r) = unique_unspecialized_lambda(subs, resolved_concrete, unspecialized).unwrap();
debug_assert!(subs.equivalent_without_compacting(c, resolved_concrete));
// Now decide: do we
// - proceed with specialization
// - simply drop the specialization lambda set (due to an error)
// - or do we need to wait, because we don't know enough information for the specialization yet?
let specialization_decision = make_specialization_decision(subs, phase, c, f);
let specialization_key_or_drop = match specialization_decision {
SpecializeDecision::Specialize(key) => Ok(key),
SpecializeDecision::Drop => Err(()),
SpecializeDecision::PendingSpecialization(impl_key) => {
// Bail, we need to wait for the specialization to be known.
return OneCompactionResult::MustWaitForSpecialization(impl_key);
}
};
// 1b. Remove `C:f:r` from `t_f1`'s lambda set.
let new_unspecialized: Vec<_> = unspecialized
.iter()
.filter(|Uls(v, _, _)| !subs.equivalent_without_compacting(*v, resolved_concrete))
.copied()
.collect();
debug_assert_eq!(new_unspecialized.len(), unspecialized.len() - 1);
let t_f1_lambda_set_without_concrete = LambdaSet {
solved,
recursion_var,
unspecialized: SubsSlice::extend_new(
&mut subs.unspecialized_lambda_sets,
new_unspecialized,
),
ambient_function: t_f1,
};
subs.set_content(
this_lambda_set,
Content::LambdaSet(t_f1_lambda_set_without_concrete),
);
let specialization_key = match specialization_key_or_drop {
Ok(specialization_key) => specialization_key,
Err(()) => {
// Do nothing other than to remove the concrete lambda to drop from the lambda set,
// which we already did in 1b above.
trace_compact!(3iter_end_skipped. subs, t_f1);
return OneCompactionResult::Compacted {
new_obligations: Default::default(),
new_lambda_sets_to_specialize: Default::default(),
};
}
};
let specialization_ambient_function_var = get_specialization_lambda_set_ambient_function(
subs,
derived_env,
phase,
f,
r,
specialization_key,
target_rank,
);
let t_f2 = match specialization_ambient_function_var {
Ok(lset) => lset,
Err(()) => {
// Do nothing other than to remove the concrete lambda to drop from the lambda set,
// which we already did in 1b above.
trace_compact!(3iter_end_skipped. subs, t_f1);
return OneCompactionResult::Compacted {
new_obligations: Default::default(),
new_lambda_sets_to_specialize: Default::default(),
};
}
};
// Ensure the specialized ambient function we'll unify with is not a generalized one, but one
// at the rank of the lambda set being compacted.
let t_f2 = deep_copy_var_in(subs, target_rank, pools, t_f2, arena);
// 3. Unify `t_f1 ~ t_f2`.
trace_compact!(3iter_start. subs, this_lambda_set, t_f1, t_f2);
let (vars, new_obligations, new_lambda_sets_to_specialize, _meta) =
unify(&mut UEnv::new(subs), t_f1, t_f2, Mode::EQ)
.expect_success("ambient functions don't unify");
trace_compact!(3iter_end. subs, t_f1);
introduce(subs, target_rank, pools, &vars);
OneCompactionResult::Compacted {
new_obligations,
new_lambda_sets_to_specialize,
}
}
#[derive(Debug)]
enum SpecializationTypeKey {
Opaque(Symbol),
Derived(DeriveKey),
Immediate(Symbol),
}
enum SpecializeDecision {
Specialize(SpecializationTypeKey),
Drop,
/// Only relevant during module solving of recursive defs - we don't yet know the
/// specialization type for a declared ability implementation, so we must hold off on
/// specialization.
PendingSpecialization(ImplKey),
}
fn make_specialization_decision<P: Phase>(
subs: &Subs,
phase: &P,
var: Variable,
ability_member: Symbol,
) -> SpecializeDecision {
use Content::*;
use SpecializationTypeKey::*;
match subs.get_content_without_compacting(var) {
Alias(opaque, _, _, AliasKind::Opaque) if opaque.module_id() != ModuleId::NUM => {
if P::IS_LATE {
SpecializeDecision::Specialize(Opaque(*opaque))
} else {
// Solving within a module.
phase.with_module_abilities_store(opaque.module_id(), |abilities_store| {
let impl_key = ImplKey {
opaque: *opaque,
ability_member,
};
match abilities_store.get_implementation(impl_key) {
None => {
// Doesn't specialize; an error will already be reported for this.
SpecializeDecision::Drop
}
Some(MemberImpl::Error | MemberImpl::Derived) => {
// TODO: probably not right, we may want to choose a derive decision!
SpecializeDecision::Specialize(Opaque(*opaque))
}
Some(MemberImpl::Impl(specialization_symbol)) => {
match abilities_store.specialization_info(*specialization_symbol) {
Some(_) => SpecializeDecision::Specialize(Opaque(*opaque)),
// If we expect a specialization impl but don't yet know it, we must hold off
// compacting the lambda set until the specialization is well-known.
None => SpecializeDecision::PendingSpecialization(impl_key),
}
}
}
})
}
}
Structure(_) | Alias(_, _, _, _) => {
// This is a structural type, find the name of the derived ability function it
// should use.
match roc_derive_key::Derived::encoding(subs, var) {
Ok(derived) => match derived {
roc_derive_key::Derived::Immediate(imm) => {
SpecializeDecision::Specialize(Immediate(imm))
// todo!("deal with lambda set extraction from immediates")
}
roc_derive_key::Derived::Key(derive_key) => {
SpecializeDecision::Specialize(Derived(derive_key))
}
},
Err(DeriveError::UnboundVar) => {
// not specialized yet, but that also means that it can't possibly be derivable
// at this point?
// TODO: is this right? Revisit if it causes us problems in the future.
SpecializeDecision::Drop
}
Err(DeriveError::Underivable) => {
// we should have reported an error for this; drop the lambda set.
SpecializeDecision::Drop
}
}
}
Error => SpecializeDecision::Drop,
FlexAbleVar(_, _)
| RigidAbleVar(..)
| FlexVar(..)
| RigidVar(..)
| RecursionVar { .. }
| LambdaSet(..)
| RangedNumber(..) => {
internal_error!("unexpected")
}
}
}
#[allow(clippy::too_many_arguments)]
fn get_specialization_lambda_set_ambient_function<P: Phase>(
subs: &mut Subs,
derived_env: &DerivedEnv,
phase: &P,
ability_member: Symbol,
lset_region: u8,
specialization_key: SpecializationTypeKey,
target_rank: Rank,
) -> Result<Variable, ()> {
match specialization_key {
SpecializationTypeKey::Opaque(opaque) => {
let opaque_home = opaque.module_id();
let external_specialized_lset =
phase.with_module_abilities_store(opaque_home, |abilities_store| {
let impl_key = roc_can::abilities::ImplKey {
opaque,
ability_member,
};
let opt_specialization =
abilities_store.get_implementation(impl_key);
match opt_specialization {
None => {
if P::IS_LATE {
internal_error!(
"expected to know a specialization for {:?}#{:?}, but it wasn't found",
opaque,
ability_member
);
} else {
// doesn't specialize, we'll have reported an error for this
Err(())
}
}
Some(member_impl) => match member_impl {
MemberImpl::Impl(spec_symbol) => {
let specialization =
abilities_store.specialization_info(*spec_symbol).expect("expected custom implementations to always have complete specialization info by this point");
let specialized_lambda_set = *specialization
.specialization_lambda_sets
.get(&lset_region)
.expect("lambda set region not resolved");
Ok(specialized_lambda_set)
}
MemberImpl::Derived => todo_abilities!(),
MemberImpl::Error => todo_abilities!(),
},
}
})?;
let specialized_ambient = phase.copy_lambda_set_ambient_function_to_home_subs(
external_specialized_lset,
opaque_home,
subs,
);
Ok(specialized_ambient)
}
SpecializationTypeKey::Derived(derive_key) => {
let mut derived_module = derived_env.derived_module.lock().unwrap();
let (_, _, specialization_lambda_sets) =
derived_module.get_or_insert(derived_env.exposed_types, derive_key);
let specialized_lambda_set = *specialization_lambda_sets
.get(&lset_region)
.expect("lambda set region not resolved");
let specialized_ambient = derived_module.copy_lambda_set_ambient_function_to_subs(
specialized_lambda_set,
subs,
target_rank,
);
Ok(specialized_ambient)
}
SpecializationTypeKey::Immediate(imm) => {
// Immediates are like opaques in that we can simply look up their type definition in
// the ability store, there is nothing new to synthesize.
//
// THEORY: if something can become an immediate, it will always be available in the
// local ability store, because the transformation is local (?)
let immediate_lambda_set_at_region =
phase.get_and_copy_ability_member_ambient_function(imm, lset_region, subs);
Ok(immediate_lambda_set_at_region)
}
}
}

View file

@ -6510,7 +6510,6 @@ mod solve_expr {
}
#[test]
#[ignore = "TODO: fix unification of derived types"]
fn encode_record() {
infer_queries!(
indoc!(
@ -6523,14 +6522,11 @@ mod solve_expr {
# ^^^^^^^^^
"#
),
@r#"
"Encoding#toEncoder(2) : { a : Str } -[[#Derived.toEncoder_{a}(0)]]-> Encoder fmt | fmt has EncoderFormatting",
"#
@"Encoding#toEncoder(2) : { a : Str } -[[#Derived.toEncoder_{a}(0)]]-> Encoder fmt | fmt has EncoderFormatting"
)
}
#[test]
#[ignore = "TODO: fix unification of derived types"]
fn encode_record_with_nested_custom_impl() {
infer_queries!(
indoc!(
@ -6539,16 +6535,14 @@ mod solve_expr {
imports [Encode.{ toEncoder, Encoding, custom }]
provides [main] to "./platform"
A := {}
A := {} has [Encoding {toEncoder}]
toEncoder = \@A _ -> custom \b, _ -> b
main = toEncoder { a: @A {} }
# ^^^^^^^^^
"#
),
@r#"
"Encoding#toEncoder(2) : { a : A } -[[#Derived.toEncoder_{a}(0)]]-> Encoder fmt | fmt has EncoderFormatting",
"#
@"Encoding#toEncoder(2) : { a : A } -[[#Derived.toEncoder_{a}(0)]]-> Encoder fmt | fmt has EncoderFormatting"
)
}
@ -6831,15 +6825,13 @@ mod solve_expr {
ping : a -> a | a has Bounce
pong : a -> a | a has Bounce
A := {} has [Bounce {ping, pong}]
A := {} has [Bounce {ping: pingA, pong: pongA}]
ping : A -> A
ping = \@A {} -> pong (@A {})
#^^^^{-1} ^^^^
pingA = \@A {} -> pong (@A {})
#^^^^^{-1} ^^^^
pong : A -> A
pong = \@A {} -> ping (@A {})
#^^^^{-1} ^^^^
pongA = \@A {} -> ping (@A {})
#^^^^^{-1} ^^^^
main =
a : A
@ -6850,17 +6842,16 @@ mod solve_expr {
"#
),
@r###"
A#ping(5) : A -[[ping(5)]]-> A
A#pong(6) : A -[[pong(6)]]-> A
A#pong(6) : A -[[pong(6)]]-> A
A#ping(5) : A -[[ping(5)]]-> A
A#ping(5) : A -[[ping(5)]]-> A
pingA : A -[[pingA(5)]]-> A
A#pong(6) : A -[[pongA(6)]]-> A
pongA : A -[[pongA(6)]]-> A
A#ping(5) : A -[[pingA(5)]]-> A
A#ping(5) : A -[[pingA(5)]]-> A
"###
)
}
#[test]
#[ignore = "TODO: this currently runs into trouble with ping and pong first being inferred as overly-general before recursive constraining"]
fn resolve_mutually_recursive_ability_lambda_sets_inferred() {
infer_queries!(
indoc!(
@ -6889,7 +6880,7 @@ mod solve_expr {
),
@r###"
A#ping(5) : A -[[ping(5)]]-> A
Bounce#pong(3) : A -[[pong(6)]]-> A
A#pong(6) : A -[[pong(6)]]-> A
A#pong(6) : A -[[pong(6)]]-> A
A#ping(5) : A -[[ping(5)]]-> A
A#ping(5) : A -[[ping(5)]]-> A
@ -7257,24 +7248,11 @@ mod solve_expr {
# ^
"#
),
// TODO SERIOUS: Let generalization is broken here, and this is NOT correct!!
// Two problems:
// - 1. `{}` always has its rank adjusted to the toplevel, which forces the rest
// of the type to the toplevel, but that is NOT correct here!
// - 2. During solving lambda set compaction cannot happen until an entire module
// is solved, which forces resolved-but-not-yet-compacted lambdas in
// unspecialized lambda sets to pull the rank into a lower, non-generalized
// rank. Special-casing for that is a TERRIBLE HACK that interferes very
// poorly with (1)
//
// We are BLOCKED on https://github.com/rtfeldman/roc/issues/3207 to make this work
// correctly!
// See also https://github.com/rtfeldman/roc/pull/3175, a separate, but similar problem.
@r###"
Fo#f(7) : Fo -[[f(7)]]-> (b -[[] + b:g(4):1]-> {}) | b has G
Go#g(8) : Go -[[g(8)]]-> {}
h : Go -[[g(8)]]-> {}
Fo#f(7) : Fo -[[f(7)]]-> (Go -[[g(8)]]-> {})
h : b -[[] + b:g(4):1]-> {} | b has G
Fo#f(7) : Fo -[[f(7)]]-> (b -[[] + b:g(4):1]-> {}) | b has G
h : Go -[[g(8)]]-> {}
"###
);

View file

@ -95,7 +95,7 @@ fn build_wasm_test_host() {
run_zig(&[
"wasm-ld",
bitcode::BUILTINS_WASM32_OBJ_PATH,
&bitcode::get_builtins_wasm32_obj_path(),
platform_path.to_str().unwrap(),
WASI_COMPILER_RT_PATH,
WASI_LIBC_PATH,

View file

@ -187,7 +187,7 @@ pub fn helper(
// With the current method all methods are kept and it adds about 100k to all outputs.
&[
app_o_file.to_str().unwrap(),
bitcode::BUILTINS_HOST_OBJ_PATH,
&bitcode::get_builtins_host_obj_path(),
],
LinkType::Dylib,
)

View file

@ -361,6 +361,21 @@ impl UlsOfVar {
fn rollback_to(&mut self, snapshot: UlsOfVarSnapshot) {
*self = snapshot.0;
}
pub fn remove_dependent_unspecialized_lambda_sets<'a>(
&'a mut self,
subs: &'a Subs,
var: Variable,
) -> impl Iterator<Item = Variable> + 'a {
let utable = &subs.utable;
let root_var = utable.root_key_without_compacting(var);
self.0
.drain_filter(move |cand_var, _| {
utable.root_key_without_compacting(*cand_var) == root_var
})
.flat_map(|(_, lambda_set_vars)| lambda_set_vars.into_iter())
}
}
#[derive(Clone)]

View file

@ -1322,6 +1322,7 @@ impl Type {
region,
type_got: args.len() as u8,
alias_needs: alias.type_variables.len() as u8,
alias_kind: AliasKind::Structural,
});
return;
}
@ -2028,6 +2029,15 @@ pub enum AliasKind {
Opaque,
}
impl AliasKind {
pub fn as_str(&self) -> &'static str {
match self {
AliasKind::Structural => "alias",
AliasKind::Opaque => "opaque",
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct AliasVar {
pub name: Lowercase,
@ -2104,6 +2114,7 @@ pub enum Problem {
region: Region,
type_got: u8,
alias_needs: u8,
alias_kind: AliasKind,
},
InvalidModule,
SolvedTypeError,
@ -2661,6 +2672,9 @@ pub fn gather_fields_unsorted_iter(
// TODO investigate apparently this one pops up in the reporting tests!
RigidVar(_) => break,
// Stop on errors in the record
Error => break,
_ => return Err(RecordFieldsError),
}
}