Merge branch 'trunk' of github.com:rtfeldman/roc into int-abs

This commit is contained in:
Chad Stearns 2020-05-09 18:12:29 -04:00
commit 09ef6b2734
5 changed files with 90 additions and 81 deletions

View file

@ -408,9 +408,9 @@ fn gen(
Closure(annotation, _, _, loc_args, boxed_body) => {
let (loc_body, ret_var) = *boxed_body;
procs.insert_closure(
procs.insert_named(
&mut mono_env,
Some(symbol),
symbol,
annotation,
loc_args,
loc_body,

View file

@ -144,6 +144,31 @@ type MsgReceiver = mpsc::Receiver<Msg>;
/// The loaded_modules argument specifies which modules have already been loaded.
/// It typically contains *at least* the standard modules, but is empty when loading
/// the standard modules themselves.
///
/// If we're just type-checking everything (e.g. running `roc check` at the command line),
/// we can stop there. However, if we're generating code, then there are additional steps.
///
/// 10. After reporting the completed type annotation, we have all the information necessary
/// to monomorphize. However, since we want to monomorphize in parallel without
/// duplicating work, we do monomorphization in two steps. First, we go through and
/// determine all the specializations this module *wants*. We compute the hashes
/// and report them to the coordinator thread, along with the mono::expr::Expr values of
/// the current function's body. At this point, we have not yet begun to assemble Procs;
/// all we've done is send a list of requetsted specializations to the coordinator.
/// 11. The coordinator works through the specialization requests in parallel, adding them
/// to a global map once they're finished. Performing one specialization may result
/// in requests for others; these are added to the queue and worked through as normal.
/// This process continues until *both* all modules have reported that they've finished
/// adding specialization requests to the queue, *and* the queue is empty (including
/// of any requestss that were added in the course of completing other requests). Now
/// we have a map of specializations, and everything was assembled in parallel with
/// no unique specialization ever getting assembled twice (meanaing no wasted effort).
/// 12. Now that we have our final map of specializations, we can proceed to code gen!
/// As long as the specializations are stored in a per-ModuleId map, we can also
/// parallelize this code gen. (e.g. in dev builds, building separate LLVM modules
/// and then linking them together, and possibly caching them by the hash of their
/// specializations, so if none of their specializations changed, we don't even need
/// to rebuild the module and can link in the cached one directly.)
#[allow(clippy::cognitive_complexity)]
pub async fn load<'a>(
stdlib: &StdLib,

View file

@ -35,53 +35,41 @@ pub struct Procs<'a> {
}
impl<'a> Procs<'a> {
fn insert_user_defined(&mut self, symbol: Symbol, partial_proc: PartialProc<'a>) {
self.user_defined.insert(symbol, partial_proc);
}
fn insert_anonymous(&mut self, symbol: Symbol, proc: Option<Proc<'a>>) {
self.anonymous.insert(symbol, proc);
}
pub fn insert_closure(
pub fn insert_named(
&mut self,
env: &mut Env<'a, '_>,
name: Option<Symbol>,
name: Symbol,
annotation: Variable,
loc_args: std::vec::Vec<(Variable, Located<roc_can::pattern::Pattern>)>,
loc_body: Located<roc_can::expr::Expr>,
ret_var: Variable,
) -> Symbol {
// turn record/tag patterns into a when expression, e.g.
//
// foo = \{ x } -> body
//
// becomes
//
// foo = \r -> when r is { x } -> body
//
// conversion of one-pattern when expressions will do the most optimal thing
) {
let (_, arg_symbols, body) = patterns_to_when(env, loc_args, ret_var, loc_body);
let (arg_vars, arg_symbols, body) = patterns_to_when(env, loc_args, ret_var, loc_body);
match name {
Some(symbol) => {
// a named closure
self.insert_user_defined(
symbol,
self.user_defined.insert(
name,
PartialProc {
annotation,
patterns: arg_symbols,
body: body.value,
},
);
symbol
}
None => {
pub fn insert_anonymous(
&mut self,
env: &mut Env<'a, '_>,
symbol: Symbol,
annotation: Variable,
loc_args: std::vec::Vec<(Variable, Located<roc_can::pattern::Pattern>)>,
loc_body: Located<roc_can::expr::Expr>,
ret_var: Variable,
) {
let (arg_vars, arg_symbols, body) = patterns_to_when(env, loc_args, ret_var, loc_body);
// an anonymous closure. These will always be specialized already
// by the surrounding context
let symbol = env.unique_symbol();
let opt_proc = specialize_proc_body(
env,
@ -96,11 +84,7 @@ impl<'a> Procs<'a> {
)
.ok();
self.insert_anonymous(symbol, opt_proc);
symbol
}
}
self.anonymous.insert(symbol, opt_proc);
}
fn insert_specialization(
@ -331,6 +315,15 @@ fn num_to_int_or_float(subs: &Subs, var: Variable) -> IntOrFloat {
}
}
/// turn record/tag patterns into a when expression, e.g.
///
/// foo = \{ x } -> body
///
/// becomes
///
/// foo = \r -> when r is { x } -> body
///
/// conversion of one-pattern when expressions will do the most optimal thing
fn patterns_to_when<'a>(
env: &mut Env<'a, '_>,
patterns: std::vec::Vec<(Variable, Located<roc_can::pattern::Pattern>)>,
@ -482,9 +475,20 @@ fn from_can<'a>(
LetRec(defs, ret_expr, _, _) => from_can_defs(env, defs, *ret_expr, procs),
LetNonRec(def, ret_expr, _, _) => from_can_defs(env, vec![*def], *ret_expr, procs),
Closure(annotation, _, _, loc_args, boxed_body) => {
Closure(ann, original_name, _, loc_args, boxed_body) => {
let (loc_body, ret_var) = *boxed_body;
let symbol = procs.insert_closure(env, name, annotation, loc_args, loc_body, ret_var);
let symbol = match name {
Some(symbol) => {
procs.insert_named(env, symbol, ann, loc_args, loc_body, ret_var);
symbol
}
None => {
procs.insert_anonymous(env, original_name, ann, loc_args, loc_body, ret_var);
original_name
}
};
Expr::FunctionPointer(symbol)
}
@ -1350,7 +1354,7 @@ fn call_by_name<'a>(
Some(specialization) => {
opt_specialize_body = None;
// a specialization with this type hash already exists, use its symbol
// a specialization with this type hash already exists, so use its symbol
specialization.0
}
None => {

View file

@ -51,26 +51,6 @@ pub fn infer_expr(
#[allow(dead_code)]
const EXPANDED_STACK_SIZE: usize = 4 * 1024 * 1024;
/// Without this, some tests pass in `cargo test --release` but fail without
/// the --release flag because they run out of stack space. This increases
/// stack size for debug builds only, while leaving the stack space at the default
/// amount for release builds.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
std::thread::Builder::new()
.stack_size(EXPANDED_STACK_SIZE)
.spawn(run_test)
.expect("Error while spawning expanded dev stack size thread")
.join()
.expect("Error while joining expanded dev stack size thread")
}
/// In --release builds, don't increase the stack size. Run the test normally.
/// This way, we find out if any of our tests are blowing the stack even after
/// optimizations in release builds.