Merge remote-tracking branch 'origin/trunk' into use-fewer-variables

This commit is contained in:
Folkert 2020-03-07 00:53:13 +01:00
commit 21bbfd3c94
88 changed files with 2532 additions and 2251 deletions

290
Cargo.lock generated
View file

@ -237,12 +237,6 @@ dependencies = [
"libc",
]
[[package]]
name = "fixedbitset"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33"
[[package]]
name = "fnv"
version = "1.0.6"
@ -255,97 +249,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
[[package]]
name = "futures"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a"
[[package]]
name = "futures-executor"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6"
[[package]]
name = "futures-macro"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7"
dependencies = [
"proc-macro-hack",
"proc-macro2 1.0.9",
"quote 1.0.3",
"syn 1.0.16",
]
[[package]]
name = "futures-sink"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6"
[[package]]
name = "futures-task"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27"
[[package]]
name = "futures-util"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-utils",
"proc-macro-hack",
"proc-macro-nested",
"slab",
]
[[package]]
name = "gcc"
version = "0.3.55"
@ -541,12 +444,6 @@ version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b"
[[package]]
name = "ordermap"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063"
[[package]]
name = "parking_lot"
version = "0.10.0"
@ -571,28 +468,12 @@ dependencies = [
"winapi",
]
[[package]]
name = "petgraph"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f"
dependencies = [
"fixedbitset",
"ordermap",
]
[[package]]
name = "pin-project-lite"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae"
[[package]]
name = "pin-utils"
version = "0.1.0-alpha.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587"
[[package]]
name = "pretty_assertions"
version = "0.5.1"
@ -614,12 +495,6 @@ dependencies = [
"syn 1.0.16",
]
[[package]]
name = "proc-macro-nested"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e"
[[package]]
name = "proc-macro2"
version = "0.4.30"
@ -856,45 +731,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "roc"
version = "0.1.0"
dependencies = [
"bumpalo",
"cranelift",
"cranelift-codegen",
"cranelift-module",
"cranelift-simplejit",
"futures",
"im",
"im-rc",
"indoc",
"inkwell",
"inlinable_string",
"lazy_static",
"log",
"maplit",
"petgraph",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
"roc_unify",
"roc_uniqueness",
"target-lexicon",
"tokio",
"wyhash",
]
[[package]]
name = "roc_builtins"
version = "0.1.0"
@ -959,7 +795,88 @@ dependencies = [
"roc_parse",
"roc_region",
"roc_types",
"roc_uniqueness",
"roc_uniq",
]
[[package]]
name = "roc_fmt"
version = "0.1.0"
dependencies = [
"bumpalo",
"im",
"im-rc",
"indoc",
"inlinable_string",
"maplit",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_collections",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_types",
]
[[package]]
name = "roc_gen"
version = "0.1.0"
dependencies = [
"bumpalo",
"cranelift",
"cranelift-codegen",
"cranelift-module",
"cranelift-simplejit",
"im",
"im-rc",
"indoc",
"inkwell",
"inlinable_string",
"maplit",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_mono",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
"roc_unify",
"roc_uniq",
"target-lexicon",
"tokio",
]
[[package]]
name = "roc_load"
version = "0.1.0"
dependencies = [
"bumpalo",
"indoc",
"inlinable_string",
"maplit",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
"roc_unify",
"tokio",
]
[[package]]
@ -976,6 +893,29 @@ dependencies = [
"roc_region",
]
[[package]]
name = "roc_mono"
version = "0.1.0"
dependencies = [
"bumpalo",
"indoc",
"maplit",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
"roc_unify",
]
[[package]]
name = "roc_parse"
version = "0.1.0"
@ -1031,15 +971,21 @@ dependencies = [
name = "roc_solve"
version = "0.1.0"
dependencies = [
"bumpalo",
"indoc",
"maplit",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
"roc_unify",
]
@ -1077,9 +1023,10 @@ dependencies = [
]
[[package]]
name = "roc_uniqueness"
name = "roc_uniq"
version = "0.1.0"
dependencies = [
"bumpalo",
"im",
"im-rc",
"indoc",
@ -1087,10 +1034,15 @@ dependencies = [
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"roc_builtins",
"roc_can",
"roc_collections",
"roc_constrain",
"roc_module",
"roc_parse",
"roc_problem",
"roc_region",
"roc_solve",
"roc_types",
]
@ -1134,12 +1086,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "slab"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
[[package]]
name = "smallvec"
version = "1.2.0"

View file

@ -1,7 +1,6 @@
[workspace]
members = [
"compiler",
"compiler/region",
"compiler/collections",
"compiler/module",
@ -9,12 +8,16 @@ members = [
"compiler/can",
"compiler/problem",
"compiler/types",
"compiler/uniqueness",
"compiler/uniq",
"compiler/builtins",
"compiler/constrain",
"compiler/unify",
"compiler/solve",
"compiler/reporting",
"compiler/fmt",
"compiler/mono",
"compiler/load",
"compiler/gen",
"vendor/ena",
"vendor/pathfinding"
]

114
Core.md
View file

@ -1,114 +0,0 @@
Uses for unions:
1. Reduce
## reduce : (a, b -> b | Done), b, Array a -> b
Being able to return Done when you're done means you can implement things like
`find` without needing recursion.
Having to return (Continue val) or Done every time would be tedious. This is a
simpler API.
2. Additional guarantees
I can define my reducing function as either of these:
reduceFn : String, Int -> Int
reduceFn : String, Int -> Int | Done
These types unify, meaning I can say something more specific about my
types than I can with something like Maybe. "This reduce call never bails
out early."
Example:
fibonacci : Int -> Int
fibonacci = index ->
if index <= 1 then
index
else
fibonacci (index - 1) + fibonacci (index - 2)
# Array
## empty : Array *
## isEmpty : Array * -> Bool
## length : Array * -> Int
## get : Int, Array elem -> elem | Nil
## put : Int, elem, Array elem -> Array elem
## push : elem, Array elem -> Array elem
## concat : Array elem, Array elem -> Array elem
## find : (elem -> Bool) -> Array elem -> elem | Nil
## map : (a -> b), Array a -> Array b
## indexedMap : (Int, a -> b), Array a -> Array b
## reduce : (a, b -> b | Done), b, Array a -> b
## reduceFromEnd : (a, b -> b | Done), b, Array a -> b
## keepIf : (elem -> Bool), Array elem -> Array elem
## dropIf : (elem -> Bool), Array elem -> Array elem
# String
## isEmpty : String -> Bool
## length : String -> Int
## replace : { every : String, with : String }, String -> String
## concat : String, String -> String
## join : String, Array String -> String
## split : String, String -> Array String
## takeFirst : Int, String -> String
## takeLast : Int, String -> String
## dropFirst : Int, String -> String
## dropLast : Int, String -> String
## startsWith : String, String -> Bool
## endWith : String, String -> Bool
## toInt : String -> Int | Nil
## toFloat : String -> Float | Nil
## fromInt : Int -> String
## fromFloat : Float -> String
## toUpper : String -> String
## toLower : String -> String
## trim : String -> String
## padStart : Int, String -> String
## padEnd : Int, String -> String

10
NOTICE
View file

@ -1,10 +0,0 @@
Notice of copyrights and license use. Thank you to everyone who has contributed
to this project, either directly or indirectly!
## Licensed from Apache 2.0 projects
Source code from the following projects has been included inline in this project,
licensed under the Apache License, version 2.0. A full copy of the License
can be found here: http://www.apache.org/licenses/LICENSE-2.0
1. The num-integer crate: https://docs.rs/num-integer/0.1.41/src/num_integer/

View file

@ -1,61 +1,57 @@
succeed = \val =>
Success val
succeed = \val -> Success val
fail = \val =>
Failure val
fail = \val -> Failure val
echo = \str =>
Echo fail succeed str
echo = \str -> Echo fail succeed str
readInput =
Read fail succeed
readInput = Read fail succeed
map = \convert task =>
after task \output =>
succeed (convert output)
map = \convert, task ->
after task \output ->
succeed (convert output)
mapErr = \convert task =>
fallback task \err =>
fail (convert err)
mapErr = \convert, task ->
fallback task \err ->
fail (convert err)
after = \task cont =>
case task
when Success val then cont val
when Failure val then Failure val
after = \task, cont ->
when task is
Success val -> cont val
Failure val -> Failure val
when Echo onFailure prevCont str then
Echo onFailure prevCont str ->
Echo
(\ioErr => after (onFailure ioErr) cont)
(\{} => after (prevCont {}) cont)
(\ioErr -> after (onFailure ioErr) cont)
(\{} -> after (prevCont {}) cont)
str
when Read onFailure prevCont then
Read onFailure prevCont ->
Read
(\ioErr => after (onFailure ioErr) cont)
(\str => after (prevCont str) cont)
(\ioErr -> after (onFailure ioErr) cont)
(\str -> after (prevCont str) cont)
fallback = \task onFailure =>
case task
when Success val then Success val
when Failure val then onFailure val
fallback = \task onFailure ->
when task is
Success val -> Success val
Failure val -> onFailure val
when Echo prevOnFailure cont str then
Echo prevOnFailure cont str ->
Echo
(\ioErr => fallback (prevOnFailure ioErr) onFailure)
(\{} => fallback (cont {}) onFailure)
(\ioErr -> fallback (prevOnFailure ioErr) onFailure)
(\{} -> fallback (cont {}) onFailure)
str
when Read prevOnFailure cont then
Read prevOnFailure cont ->
Read
(\ioErr => fallback (prevOnFailure ioErr) onFailure)
(\str => fallback (cont str) onFailure)
(\ioErr -> fallback (prevOnFailure ioErr) onFailure)
(\str -> fallback (cont str) onFailure)
###############################################################
@ -63,10 +59,10 @@ fallback = \task onFailure =>
###############################################################
program =
after (echo "What is your first name?") \{} =>
after readInput \firstName =>
after (echo "Hi \(firstName)! What is your last name?") \{} =>
after readInput \lastName =>
echo "Your full name is: \(firstName) \(lastName)"
after (echo "What is your first name?") \{} ->
after readInput \firstName ->
after (echo "Hi \(firstName)! What is your last name?") \{} ->
after readInput \lastName ->
echo "Your full name is: \(firstName) \(lastName)"
program

View file

@ -1,7 +1,7 @@
fibonacci = \num =>
fibonacci = \num ->
if num < 2 then
num
else
fibonacci (num - 1) + fibonacci (num - 2)
fibonacci 9
fibonacci 9

View file

@ -105,3 +105,40 @@ That concludes our original recursive call to `eval`, after which point we'll be
This will work the same way as `Minus` did, and will reduce down to `Int(6)`.
## Optimization philosophy
Focus on optimizations which are only safe in the absence of side effects, and leave the rest to LLVM.
This focus may lead to some optimizations becoming transitively in scope. For example, some deforestation
examples in the MSR paper benefit from multiple rounds of interleaved deforestation, beta-reduction, and inlining.
To get those benefits, we'd have to do some inlining and beta-reduction that we could otherwise leave to LLVM's
inlining and constant propagation/folding.
Even if we're doing those things, it may still make sense to have LLVM do a pass for them as well, since
early LLVM optimization passes may unlock later opportunities for inlining and constant propagation/folding.
## Inlining
If a function is called exactly once (it's a helper function), presumably we always want to inline those.
If a function is "small enough" it's probably worth inlining too.
## Fusion
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/07/deforestation-short-cut.pdf
Basic approach:
Do list stuff using `build` passing Cons Nil (like a cons list) and then do foldr/build substitution/reduction.
Afterwards, we can do a separate pass to flatten nested Cons structures into properly initialized RRBTs.
This way we get both deforestation and efficient RRBT construction. Should work for the other collection types too.
It looks like we need to do some amount of inlining and beta reductions on the Roc side, rather than
leaving all of those to LLVM.
Advanced approach:
Express operations like map and filter in terms of toStream and fromStream, to unlock more deforestation.
More info on here:
https://wiki.haskell.org/GHC_optimisations#Fusion

View file

@ -6,7 +6,7 @@ use roc_types::solved_types::{BuiltinAlias, SolvedType};
use roc_types::subs::VarId;
use std::collections::HashMap;
#[derive(Clone, Copy)]
#[derive(Clone, Copy, Debug)]
pub enum Mode {
Standard,
Uniqueness,

View file

@ -25,6 +25,22 @@ const UVAR4: VarId = VarId::from_u32(1004);
const UVAR5: VarId = VarId::from_u32(1005);
const UVAR6: VarId = VarId::from_u32(1006);
pub struct IDStore(u32);
impl IDStore {
fn new() -> Self {
IDStore(2000)
}
fn fresh(&mut self) -> VarId {
let result = VarId::from_u32(self.0);
self.0 += 1;
result
}
}
fn shared() -> SolvedType {
SolvedType::Boolean(SolvedAtom::Zero, vec![])
}
@ -39,7 +55,7 @@ fn disjunction(free: VarId, rest: Vec<VarId>) -> SolvedType {
SolvedType::Boolean(SolvedAtom::Variable(free), solved_rest)
}
pub fn uniqueness_stdlib() -> StdLib {
pub fn uniq_stdlib() -> StdLib {
use crate::std::Mode;
let types = types();
@ -309,6 +325,12 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
),
);
// toFloat : Num a -> Float
add_type(
Symbol::NUM_TO_FLOAT,
unique_function(vec![num_type(UVAR1, TVAR1)], float_type(UVAR2)),
);
// Int module
// highest : Int
@ -363,6 +385,18 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
// Bool module
// isEq or (==) : Attr u1 Bool, Attr u2 Bool -> Attr u3 Bool
add_type(
Symbol::BOOL_EQ,
unique_function(vec![bool_type(UVAR1), bool_type(UVAR2)], bool_type(UVAR3)),
);
// isNeq or (!=) : Attr u1 Bool, Attr u2 Bool -> Attr u3 Bool
add_type(
Symbol::BOOL_NEQ,
unique_function(vec![bool_type(UVAR1), bool_type(UVAR2)], bool_type(UVAR3)),
);
// and or (&&) : Attr u1 Bool, Attr u2 Bool -> Attr u3 Bool
add_type(
Symbol::BOOL_AND,
@ -524,6 +558,267 @@ pub fn types() -> MutMap<Symbol, (SolvedType, Region)> {
),
);
// Map module
// empty : Map k v
add_type(Symbol::MAP_EMPTY, map_type(UVAR1, TVAR1, TVAR2));
// singleton : k, v -> Map k v
add_type(
Symbol::MAP_SINGLETON,
unique_function(
vec![flex(TVAR1), flex(TVAR2)],
map_type(UVAR1, TVAR1, TVAR2),
),
);
// get : Attr (u | v | *) (Map (Attr u key) (Attr v val), (Attr * key) -> Attr * (Result (Attr v val) [ KeyNotFound ]*)
let key_not_found = SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
SolvedType::Wildcard,
SolvedType::TagUnion(
vec![(TagName::Global("KeyNotFound".into()), vec![])],
Box::new(SolvedType::Wildcard),
),
],
);
add_type(Symbol::MAP_GET, {
let mut store = IDStore::new();
let u = store.fresh();
let v = store.fresh();
let key = store.fresh();
let val = store.fresh();
let star1 = store.fresh();
let star2 = store.fresh();
let star3 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u, v]),
SolvedType::Apply(
Symbol::MAP_MAP,
vec![attr_type(u, key), attr_type(v, val)],
),
],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![disjunction(star2, vec![u]), flex(key)],
),
],
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(star3),
SolvedType::Apply(
Symbol::RESULT_RESULT,
vec![attr_type(v, val), key_not_found],
),
],
),
)
});
// insert : Attr (u | v | *) (Map (Attr u key) (Attr v val)), Attr (u | *) key, Attr (v | *) val -> Attr * (Map (Attr u key) (Attr v val))
add_type(Symbol::MAP_INSERT, {
let mut store = IDStore::new();
let u = store.fresh();
let v = store.fresh();
let key = store.fresh();
let val = store.fresh();
let star1 = store.fresh();
let star2 = store.fresh();
let star3 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u, v]),
SolvedType::Apply(
Symbol::MAP_MAP,
vec![attr_type(u, key), attr_type(v, val)],
),
],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![disjunction(star2, vec![u]), flex(key)],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![disjunction(star2, vec![v]), flex(val)],
),
],
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(star3),
SolvedType::Apply(Symbol::MAP_MAP, vec![attr_type(u, key), attr_type(v, val)]),
],
),
)
});
// Set module
// empty : Set a
add_type(Symbol::SET_EMPTY, set_type(UVAR1, TVAR1));
// singleton : a -> Set a
add_type(
Symbol::SET_SINGLETON,
unique_function(vec![flex(TVAR1)], set_type(UVAR1, TVAR1)),
);
// op : Attr (u | *) (Set (Attr u a)), Attr (u | *) (Set (Attr u a)) -> Attr * Set (Attr u a)
let set_combine = {
let mut store = IDStore::new();
let u = store.fresh();
let a = store.fresh();
let star1 = store.fresh();
let star2 = store.fresh();
let star3 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u]),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star2, vec![u]),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
],
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(star3),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
)
};
// union : Set a, Set a -> Set a
add_type(Symbol::SET_UNION, set_combine.clone());
// diff : Set a, Set a -> Set a
add_type(Symbol::SET_DIFF, set_combine);
// foldl : Attr (u | *) (Set (Attr u a)), Attr Shared (Attr u a -> b -> b), b -> b
add_type(Symbol::SET_FOLDL, {
let mut store = IDStore::new();
let u = store.fresh();
let a = store.fresh();
let b = store.fresh();
let star1 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u]),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
shared(),
SolvedType::Func(vec![attr_type(u, a), flex(b)], Box::new(flex(b))),
],
),
flex(b),
],
flex(b),
)
});
// insert : Attr (u | *) (Set (Attr u a)), Attr (u | *) a -> Attr * (Set (Attr u a))
add_type(Symbol::SET_INSERT, {
let mut store = IDStore::new();
let u = store.fresh();
let a = store.fresh();
let star1 = store.fresh();
let star2 = store.fresh();
let star3 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u]),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![disjunction(star2, vec![u]), flex(a)],
),
],
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(star3),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
)
});
// we can remove a key that is shared from a set of unique keys
// remove : Attr (u | *) (Set (Attr u a)), Attr * a -> Attr * (Set (Attr u a))
add_type(Symbol::SET_REMOVE, {
let mut store = IDStore::new();
let u = store.fresh();
let a = store.fresh();
let star1 = store.fresh();
let star2 = store.fresh();
let star3 = store.fresh();
unique_function(
vec![
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
disjunction(star1, vec![u]),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
SolvedType::Apply(Symbol::ATTR_ATTR, vec![flex(star2), flex(a)]),
],
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(star3),
SolvedType::Apply(Symbol::SET_SET, vec![attr_type(u, a)]),
],
),
)
});
// Str module
// isEmpty : Attr u Str -> Attr v Bool
@ -655,3 +950,22 @@ fn list_type(u: VarId, a: VarId) -> SolvedType {
vec![flex(u), SolvedType::Apply(Symbol::LIST_LIST, vec![flex(a)])],
)
}
#[inline(always)]
fn set_type(u: VarId, a: VarId) -> SolvedType {
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![flex(u), SolvedType::Apply(Symbol::SET_SET, vec![flex(a)])],
)
}
#[inline(always)]
fn map_type(u: VarId, key: VarId, value: VarId) -> SolvedType {
SolvedType::Apply(
Symbol::ATTR_ATTR,
vec![
flex(u),
SolvedType::Apply(Symbol::MAP_MAP, vec![flex(key), flex(value)]),
],
)
}

View file

@ -1,6 +1,6 @@
use crate::env::Env;
use crate::scope::Scope;
use roc_collections::all::{ImMap, MutMap, MutSet, SendMap};
use roc_collections::all::{MutSet, SendMap};
use roc_module::ident::Ident;
use roc_module::ident::{Lowercase, TagName};
use roc_module::symbol::Symbol;
@ -13,19 +13,13 @@ use roc_types::types::{Alias, Problem, Type};
#[derive(Clone, Debug, PartialEq)]
pub struct Annotation {
pub typ: Type,
pub ftv: MutMap<Variable, Lowercase>,
pub rigids: ImMap<Lowercase, Variable>,
pub introduced_variables: IntroducedVariables,
pub references: MutSet<Symbol>,
pub aliases: SendMap<Symbol, Alias>,
}
pub fn canonicalize_annotation(
env: &mut Env,
scope: &mut Scope,
annotation: &roc_parse::ast::TypeAnnotation,
region: Region,
var_store: &VarStore,
) -> Annotation {
#[derive(Clone, Debug, PartialEq, Default)]
pub struct IntroducedVariables {
// NOTE on rigids
//
// Rigids must be unique within a type annoation.
@ -36,7 +30,44 @@ pub fn canonicalize_annotation(
// But then between annotations, the same name can occur multiple times,
// but a variable can only have one name. Therefore
// `ftv : SendMap<Variable, Lowercase>`.
let mut rigids = ImMap::default();
pub wildcards: Vec<Variable>,
pub var_by_name: SendMap<Lowercase, Variable>,
pub name_by_var: SendMap<Variable, Lowercase>,
}
impl IntroducedVariables {
pub fn insert_named(&mut self, name: Lowercase, var: Variable) {
self.var_by_name.insert(name.clone(), var);
self.name_by_var.insert(var, name);
}
pub fn insert_wildcard(&mut self, var: Variable) {
self.wildcards.push(var);
}
pub fn union(&mut self, other: &Self) {
self.wildcards.extend(other.wildcards.iter().cloned());
self.var_by_name.extend(other.var_by_name.clone());
self.name_by_var.extend(other.name_by_var.clone());
}
pub fn var_by_name(&self, name: &Lowercase) -> Option<&Variable> {
self.var_by_name.get(name)
}
pub fn name_by_var(&self, var: Variable) -> Option<&Lowercase> {
self.name_by_var.get(&var)
}
}
pub fn canonicalize_annotation(
env: &mut Env,
scope: &mut Scope,
annotation: &roc_parse::ast::TypeAnnotation,
region: Region,
var_store: &VarStore,
) -> Annotation {
let mut introduced_variables = IntroducedVariables::default();
let mut aliases = SendMap::default();
let mut references = MutSet::default();
let typ = can_annotation_help(
@ -45,22 +76,15 @@ pub fn canonicalize_annotation(
region,
scope,
var_store,
&mut rigids,
&mut introduced_variables,
&mut aliases,
&mut references,
);
let mut ftv = MutMap::default();
for (k, v) in rigids.clone() {
ftv.insert(v, k);
}
Annotation {
typ,
ftv,
introduced_variables,
references,
rigids,
aliases,
}
}
@ -72,7 +96,7 @@ fn can_annotation_help(
region: Region,
scope: &mut Scope,
var_store: &VarStore,
rigids: &mut ImMap<Lowercase, Variable>,
introduced_variables: &mut IntroducedVariables,
local_aliases: &mut SendMap<Symbol, Alias>,
references: &mut MutSet<Symbol>,
) -> Type {
@ -89,7 +113,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -103,7 +127,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -148,7 +172,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -161,12 +185,12 @@ fn can_annotation_help(
BoundVariable(v) => {
let name = Lowercase::from(*v);
match rigids.get(&name) {
match introduced_variables.var_by_name(&name) {
Some(var) => Type::Variable(*var),
None => {
let var = var_store.fresh();
rigids.insert(name, var);
introduced_variables.insert_named(name, var);
Type::Variable(var)
}
@ -200,7 +224,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -214,12 +238,12 @@ fn can_annotation_help(
BoundVariable(ident) => {
let var_name = Lowercase::from(ident);
if let Some(var) = rigids.get(&var_name) {
if let Some(var) = introduced_variables.var_by_name(&var_name) {
vars.push((var_name, Type::Variable(*var)));
} else {
let var = var_store.fresh();
rigids.insert(var_name.clone(), var);
introduced_variables.insert_named(var_name.clone(), var);
vars.push((var_name.clone(), Type::Variable(var)));
lowercase_vars.push(Located::at(loc_var.region, (var_name, var)));
@ -276,7 +300,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
&mut field_types,
references,
@ -290,7 +314,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
),
@ -309,7 +333,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
&mut tag_types,
references,
@ -323,7 +347,7 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
),
@ -338,13 +362,15 @@ fn can_annotation_help(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
),
Wildcard | Malformed(_) => {
let var = var_store.fresh();
introduced_variables.insert_wildcard(var);
Type::Variable(var)
}
}
@ -358,7 +384,7 @@ fn can_assigned_field<'a>(
region: Region,
scope: &mut Scope,
var_store: &VarStore,
rigids: &mut ImMap<Lowercase, Variable>,
introduced_variables: &mut IntroducedVariables,
local_aliases: &mut SendMap<Symbol, Alias>,
field_types: &mut SendMap<Lowercase, Type>,
references: &mut MutSet<Symbol>,
@ -373,7 +399,7 @@ fn can_assigned_field<'a>(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -385,11 +411,11 @@ fn can_assigned_field<'a>(
// Interpret { a, b } as { a : a, b : b }
let field_name = Lowercase::from(loc_field_name.value);
let field_type = {
if let Some(var) = rigids.get(&field_name) {
if let Some(var) = introduced_variables.var_by_name(&field_name) {
Type::Variable(*var)
} else {
let field_var = var_store.fresh();
rigids.insert(field_name.clone(), field_var);
introduced_variables.insert_named(field_name.clone(), field_var);
Type::Variable(field_var)
}
};
@ -402,7 +428,7 @@ fn can_assigned_field<'a>(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
field_types,
references,
@ -419,7 +445,7 @@ fn can_tag<'a>(
region: Region,
scope: &mut Scope,
var_store: &VarStore,
rigids: &mut ImMap<Lowercase, Variable>,
introduced_variables: &mut IntroducedVariables,
local_aliases: &mut SendMap<Symbol, Alias>,
tag_types: &mut Vec<(TagName, Vec<Type>)>,
references: &mut MutSet<Symbol>,
@ -436,7 +462,7 @@ fn can_tag<'a>(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -458,7 +484,7 @@ fn can_tag<'a>(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
references,
);
@ -474,7 +500,7 @@ fn can_tag<'a>(
region,
scope,
var_store,
rigids,
introduced_variables,
local_aliases,
tag_types,
references,

View file

@ -1,4 +1,5 @@
use crate::annotation::canonicalize_annotation;
use crate::annotation::IntroducedVariables;
use crate::env::Env;
use crate::expr::Expr::{self, *};
use crate::expr::{
@ -28,7 +29,7 @@ pub struct Def {
pub loc_expr: Located<Expr>,
pub expr_var: Variable,
pub pattern_vars: SendMap<Symbol, Variable>,
pub annotation: Option<(Type, SendMap<Lowercase, Variable>, SendMap<Symbol, Alias>)>,
pub annotation: Option<(Type, IntroducedVariables, SendMap<Symbol, Alias>)>,
}
#[derive(Debug)]
@ -192,7 +193,10 @@ pub fn canonicalize_defs<'a>(
Vec::with_capacity(vars.len());
for loc_lowercase in vars {
if let Some(var) = can_ann.rigids.get(&loc_lowercase.value) {
if let Some(var) = can_ann
.introduced_variables
.var_by_name(&loc_lowercase.value)
{
// This is a valid lowercase rigid var for the alias.
can_vars.push(Located {
value: (loc_lowercase.value.clone(), *var),
@ -713,14 +717,7 @@ fn canonicalize_pending_def<'a>(
aliases.insert(symbol, alias);
}
// union seen rigids with already found ones
for (k, v) in ann.rigids {
output.rigids.insert(k, v);
}
for (k, v) in ann.ftv {
output.ftv.insert(k, v);
}
output.introduced_variables.union(&ann.introduced_variables);
pattern_to_vars_by_symbol(&mut vars_by_symbol, &loc_can_pattern.value, expr_var);
@ -790,7 +787,11 @@ fn canonicalize_pending_def<'a>(
value: loc_can_expr.value.clone(),
},
pattern_vars: im::HashMap::clone(&vars_by_symbol),
annotation: Some((typ.clone(), output.rigids.clone(), ann.aliases.clone())),
annotation: Some((
typ.clone(),
output.introduced_variables.clone(),
ann.aliases.clone(),
)),
},
);
}
@ -810,7 +811,10 @@ fn canonicalize_pending_def<'a>(
let mut can_vars: Vec<Located<(Lowercase, Variable)>> = Vec::with_capacity(vars.len());
for loc_lowercase in vars {
if let Some(var) = can_ann.rigids.get(&loc_lowercase.value) {
if let Some(var) = can_ann
.introduced_variables
.var_by_name(&loc_lowercase.value)
{
// This is a valid lowercase rigid var for the alias.
can_vars.push(Located {
value: (loc_lowercase.value.clone(), *var),
@ -840,15 +844,9 @@ fn canonicalize_pending_def<'a>(
let alias = scope.lookup_alias(symbol).expect("alias was not added");
aliases.insert(symbol, alias.clone());
// aliases cannot introduce new rigids that are visible in other annotations
// but the rigids can show up in type error messages, so still register them
for (k, v) in can_ann.rigids {
output.rigids.insert(k, v);
}
for (k, v) in can_ann.ftv {
output.ftv.insert(k, v);
}
output
.introduced_variables
.union(&can_ann.introduced_variables);
}
TypedBody(loc_pattern, loc_can_pattern, loc_ann, loc_expr) => {
let ann =
@ -867,14 +865,7 @@ fn canonicalize_pending_def<'a>(
aliases.insert(symbol, alias);
}
// union seen rigids with already found ones
for (k, v) in ann.rigids {
output.rigids.insert(k, v);
}
for (k, v) in ann.ftv {
output.ftv.insert(k, v);
}
output.introduced_variables.union(&ann.introduced_variables);
// bookkeeping for tail-call detection. If we're assigning to an
// identifier (e.g. `f = \x -> ...`), then this symbol can be tail-called.
@ -991,7 +982,11 @@ fn canonicalize_pending_def<'a>(
value: loc_can_expr.value.clone(),
},
pattern_vars: im::HashMap::clone(&vars_by_symbol),
annotation: Some((typ.clone(), output.rigids.clone(), ann.aliases.clone())),
annotation: Some((
typ.clone(),
output.introduced_variables.clone(),
ann.aliases.clone(),
)),
},
);
}
@ -1157,8 +1152,9 @@ pub fn can_defs_with_return<'a>(
let (ret_expr, mut output) =
canonicalize_expr(env, var_store, &mut scope, loc_ret.region, &loc_ret.value);
output.rigids = output.rigids.union(defs_output.rigids);
output.ftv = output.ftv.union(defs_output.ftv);
output
.introduced_variables
.union(&defs_output.introduced_variables);
output.references = output.references.union(defs_output.references);
// Now that we've collected all the references, check to see if any of the new idents

View file

@ -1,3 +1,4 @@
use crate::annotation::IntroducedVariables;
use crate::def::{can_defs_with_return, Def};
use crate::env::Env;
use crate::num::{
@ -25,8 +26,7 @@ use std::ops::Neg;
pub struct Output {
pub references: References,
pub tail_call: Option<Symbol>,
pub rigids: SendMap<Lowercase, Variable>,
pub ftv: SendMap<Variable, Lowercase>,
pub introduced_variables: IntroducedVariables,
pub aliases: SendMap<Symbol, Alias>,
}

View file

@ -121,7 +121,7 @@ pub fn canonicalize_module_defs<'a>(
}
}
for (var, lowercase) in output.ftv.clone() {
for (var, lowercase) in output.introduced_variables.name_by_var.clone() {
rigid_variables.insert(var, lowercase);
}

View file

@ -12,7 +12,7 @@ roc_parse = { path = "../parse" }
roc_types = { path = "../types" }
roc_can = { path = "../can" }
roc_builtins = { path = "../builtins" }
roc_uniqueness = { path = "../uniqueness" }
roc_uniq = { path = "../uniq" }
[dev-dependencies]
pretty_assertions = "0.5.1 "

View file

@ -1,5 +1,6 @@
use crate::builtins::{empty_list_type, float_literal, int_literal, list_type, str_type};
use crate::pattern::{constrain_pattern, PatternState};
use roc_can::annotation::IntroducedVariables;
use roc_can::constraint::Constraint::{self, *};
use roc_can::constraint::LetConstraint;
use roc_can::def::{Declaration, Def};
@ -754,7 +755,7 @@ fn constrain_def(env: &Env, def: &Def, body_con: Constraint) -> Constraint {
let mut new_rigids = Vec::new();
let expr_con = match &def.annotation {
Some((annotation, free_vars, ann_def_aliases)) => {
Some((annotation, introduced_vars, ann_def_aliases)) => {
def_aliases = ann_def_aliases.clone();
let arity = annotation.arity();
@ -763,7 +764,7 @@ fn constrain_def(env: &Env, def: &Def, body_con: Constraint) -> Constraint {
let annotation = instantiate_rigids(
annotation,
&free_vars,
&introduced_vars,
&mut new_rigids,
&mut ftv,
&def.loc_pattern,
@ -821,7 +822,7 @@ fn constrain_def(env: &Env, def: &Def, body_con: Constraint) -> Constraint {
fn instantiate_rigids(
annotation: &Type,
free_vars: &SendMap<Lowercase, Variable>,
introduced_vars: &IntroducedVariables,
new_rigids: &mut Vec<Variable>,
ftv: &mut ImMap<Lowercase, Variable>,
loc_pattern: &Located<Pattern>,
@ -830,8 +831,8 @@ fn instantiate_rigids(
let mut annotation = annotation.clone();
let mut rigid_substitution: ImMap<Variable, Type> = ImMap::default();
for (name, var) in free_vars {
if let Some(existing_rigid) = ftv.get(name) {
for (name, var) in introduced_vars.var_by_name.iter() {
if let Some(existing_rigid) = ftv.get(&name) {
rigid_substitution.insert(*var, Type::Variable(*existing_rigid));
} else {
// It's possible to use this rigid in nested defs
@ -854,6 +855,8 @@ fn instantiate_rigids(
}
}
new_rigids.extend(introduced_vars.wildcards.iter().cloned());
annotation
}
@ -922,7 +925,7 @@ pub fn rec_defs_help(
flex_info.def_types.extend(pattern_state.headers);
}
Some((annotation, free_vars, ann_def_aliases)) => {
Some((annotation, introduced_vars, ann_def_aliases)) => {
for (symbol, alias) in ann_def_aliases.clone() {
def_aliases.insert(symbol, alias);
}
@ -932,7 +935,7 @@ pub fn rec_defs_help(
let annotation = instantiate_rigids(
annotation,
&free_vars,
&introduced_vars,
&mut new_rigids,
&mut ftv,
&def.loc_pattern,

View file

@ -14,4 +14,4 @@ pub mod builtins;
pub mod expr;
pub mod module;
pub mod pattern;
pub mod uniqueness;
pub mod uniq;

View file

@ -29,7 +29,7 @@ pub fn constrain_module(
match mode {
Standard => constrain_decls(home, decls, send_aliases),
Uniqueness => crate::uniqueness::constrain_decls(home, decls, send_aliases, var_store),
Uniqueness => crate::uniq::constrain_decls(home, decls, send_aliases, var_store),
}
}
@ -72,6 +72,10 @@ pub fn constrain_imported_values(
rigid_vars.push(var);
}
for var in free_vars.wildcards {
rigid_vars.push(var);
}
// Variables can lose their name during type inference. But the unnamed
// variables are still part of a signature, and thus must be treated as rigids here!
for (_, var) in free_vars.unnamed_vars {
@ -151,9 +155,10 @@ pub fn load_builtin_aliases(
pub struct FreeVars {
pub named_vars: ImMap<Lowercase, Variable>,
pub unnamed_vars: ImMap<VarId, Variable>,
pub wildcards: Vec<Variable>,
}
pub fn to_type(solved_type: &SolvedType, free_vars: &mut FreeVars, var_store: &VarStore) -> Type {
fn to_type(solved_type: &SolvedType, free_vars: &mut FreeVars, var_store: &VarStore) -> Type {
use roc_types::solved_types::SolvedType::*;
match solved_type {
@ -196,7 +201,11 @@ pub fn to_type(solved_type: &SolvedType, free_vars: &mut FreeVars, var_store: &V
Type::Variable(var)
}
}
Wildcard => Type::Variable(var_store.fresh()),
Wildcard => {
let var = var_store.fresh();
free_vars.wildcards.push(var);
Type::Variable(var)
}
Record { fields, ext } => {
let mut new_fields = SendMap::default();

View file

@ -1,4 +1,5 @@
use crate::expr::{exists, exists_with_aliases, Info};
use roc_can::annotation::IntroducedVariables;
use roc_can::constraint::Constraint::{self, *};
use roc_can::constraint::LetConstraint;
use roc_can::def::{Declaration, Def};
@ -14,8 +15,8 @@ use roc_types::subs::{VarStore, Variable};
use roc_types::types::AnnotationSource::{self, *};
use roc_types::types::Type::{self, *};
use roc_types::types::{Alias, PReason, Reason};
use roc_uniqueness::builtins::{attr_type, empty_list_type, list_type, str_type};
use roc_uniqueness::sharing::{self, Container, FieldAccess, Mark, Usage, VarUsage};
use roc_uniq::builtins::{attr_type, empty_list_type, list_type, str_type};
use roc_uniq::sharing::{self, Container, FieldAccess, Mark, Usage, VarUsage};
pub struct Env {
/// Whenever we encounter a user-defined type variable (a "rigid" var for short),
@ -59,7 +60,7 @@ pub fn constrain_declaration(
pub fn constrain_decls(
home: ModuleId,
decls: &[Declaration],
aliases: SendMap<Symbol, Alias>,
mut aliases: SendMap<Symbol, Alias>,
var_store: &VarStore,
) -> Constraint {
let mut constraint = Constraint::SaveTheEnvironment;
@ -81,6 +82,8 @@ pub fn constrain_decls(
}
}
aliases_to_attr_type(var_store, &mut aliases);
for decl in decls.iter().rev() {
// NOTE: rigids are empty because they are not shared between top-level definitions
match decl {
@ -1635,14 +1638,15 @@ fn constrain_def(
let mut new_rigids = Vec::new();
let expr_con = match &def.annotation {
Some((annotation, free_vars, ann_def_aliases)) => {
Some((annotation, introduced_vars, ann_def_aliases)) => {
def_aliases = ann_def_aliases.clone();
let arity = annotation.arity();
let mut ftv = env.rigids.clone();
let annotation = instantiate_rigids(
var_store,
annotation,
&free_vars,
&introduced_vars,
&mut new_rigids,
&mut ftv,
&def.loc_pattern,
@ -1709,7 +1713,7 @@ fn constrain_def(
fn instantiate_rigids(
var_store: &VarStore,
annotation: &Type,
free_vars: &SendMap<Lowercase, Variable>,
introduced_vars: &IntroducedVariables,
new_rigids: &mut Vec<Variable>,
ftv: &mut ImMap<Lowercase, (Variable, Variable)>,
loc_pattern: &Located<Pattern>,
@ -1720,7 +1724,7 @@ fn instantiate_rigids(
let mut rigid_substitution: ImMap<Variable, Type> = ImMap::default();
for (name, var) in free_vars {
for (name, var) in introduced_vars.var_by_name.iter() {
if let Some((existing_rigid, existing_uvar)) = ftv.get(&name) {
rigid_substitution.insert(
*var,
@ -1764,6 +1768,7 @@ fn instantiate_rigids(
}
new_rigids.extend(uniq_vars);
new_rigids.extend(introduced_vars.wildcards.iter().cloned());
for (_, v) in new_rigid_pairs {
new_rigids.push(v);
@ -1854,7 +1859,7 @@ pub fn rec_defs_help(
flex_info.def_types.extend(pattern_state.headers);
}
Some((annotation, free_vars, ann_def_aliases)) => {
Some((annotation, introduced_vars, ann_def_aliases)) => {
for (symbol, alias) in ann_def_aliases.clone() {
def_aliases.insert(symbol, alias);
}
@ -1863,7 +1868,7 @@ pub fn rec_defs_help(
let annotation = instantiate_rigids(
var_store,
annotation,
&free_vars,
&introduced_vars,
&mut new_rigids,
&mut ftv,
&def.loc_pattern,

View file

@ -1,5 +1,5 @@
[package]
name = "roc_uniqueness"
name = "roc_fmt"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
@ -8,10 +8,13 @@ edition = "2018"
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_parse = { path = "../parse" }
roc_problem = { path = "../problem" }
roc_types = { path = "../types" }
roc_can = { path = "../can" }
im = "14" # im and im-rc should always have the same version!
im-rc = "14" # im and im-rc should always have the same version!
bumpalo = "2.6"
inlinable_string = "0.1.0"
[dev-dependencies]
pretty_assertions = "0.5.1 "

View file

@ -1,6 +1,6 @@
use crate::fmt::expr::{fmt_expr, is_multiline_expr};
use crate::fmt::pattern::fmt_pattern;
use crate::fmt::spaces::{fmt_spaces, newline, INDENT};
use crate::expr::{fmt_expr, is_multiline_expr};
use crate::pattern::fmt_pattern;
use crate::spaces::{fmt_spaces, newline, INDENT};
use bumpalo::collections::String;
use roc_parse::ast::{Def, Expr};

View file

@ -1,6 +1,6 @@
use crate::fmt::def::fmt_def;
use crate::fmt::pattern::fmt_pattern;
use crate::fmt::spaces::{
use crate::def::fmt_def;
use crate::pattern::fmt_pattern;
use crate::spaces::{
add_spaces, fmt_comments_only, fmt_condition_spaces, fmt_spaces, is_comment, newline, INDENT,
};
use bumpalo::collections::{String, Vec};

17
compiler/fmt/src/lib.rs Normal file
View file

@ -0,0 +1,17 @@
#![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)]
pub mod def;
pub mod expr;
pub mod module;
pub mod pattern;
pub mod spaces;

View file

@ -1,4 +1,4 @@
use crate::fmt::spaces::{fmt_spaces, INDENT};
use crate::spaces::{fmt_spaces, INDENT};
use bumpalo::collections::{String, Vec};
use roc_parse::ast::{AppHeader, ExposesEntry, ImportsEntry, InterfaceHeader, Module};
use roc_region::all::Located;

View file

@ -1,4 +1,4 @@
use crate::fmt::spaces::{fmt_comments_only, fmt_spaces};
use crate::spaces::{fmt_comments_only, fmt_spaces};
use bumpalo::collections::String;
use roc_parse::ast::{Base, Pattern};

View file

@ -3,7 +3,7 @@ extern crate pretty_assertions;
#[macro_use]
extern crate indoc;
extern crate bumpalo;
extern crate roc;
extern crate roc_fmt;
#[macro_use]
extern crate roc_parse;
@ -11,9 +11,9 @@ extern crate roc_parse;
mod test_format {
use bumpalo::collections::String;
use bumpalo::Bump;
use roc::fmt::def::fmt_def;
use roc::fmt::expr::fmt_expr;
use roc::fmt::module::fmt_module;
use roc_fmt::def::fmt_def;
use roc_fmt::expr::fmt_expr;
use roc_fmt::module::fmt_module;
use roc_parse::ast::{Attempting, Expr};
use roc_parse::blankspace::space0_before;
use roc_parse::module::{self, module_defs};

View file

@ -1,28 +1,23 @@
[package]
name = "roc"
name = "roc_gen"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
[dependencies]
roc_collections = { path = "./collections" }
roc_region = { path = "./region" }
roc_module = { path = "./module" }
roc_parse = { path = "./parse" }
roc_problem = { path = "./problem" }
roc_types = { path = "./types" }
roc_can = { path = "./can" }
roc_builtins = { path = "./builtins" }
roc_constrain = { path = "./constrain" }
roc_uniqueness = { path = "./uniqueness" }
roc_unify = { path = "./unify" }
roc_solve = { path = "./solve" }
log = "0.4.8"
petgraph = { version = "0.4.5", optional = true }
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_problem = { path = "../problem" }
roc_types = { path = "../types" }
roc_builtins = { path = "../builtins" }
roc_constrain = { path = "../constrain" }
roc_uniq = { path = "../uniq" }
roc_unify = { path = "../unify" }
roc_solve = { path = "../solve" }
roc_mono = { path = "../mono" }
im = "14" # im and im-rc should always have the same version!
im-rc = "14" # im and im-rc should always have the same version!
wyhash = "0.3"
tokio = { version = "0.2", features = ["blocking", "fs", "sync", "rt-threaded"] }
bumpalo = "2.6"
inlinable_string = "0.1.0"
# NOTE: Breaking API changes get pushed directly to this Inkwell branch, so be
@ -32,8 +27,6 @@ inlinable_string = "0.1.0"
# `rev` works locally, it causes an error on GitHub Actions. (It's unclear why,
# but after several hours of trying unsuccessfully to fix it, `branch` is it.)
inkwell = { git = "https://github.com/TheDan64/inkwell", branch = "llvm8-0" }
futures = "0.3"
lazy_static = "1.4"
target-lexicon = "0.10" # NOTE: we must use the same version of target-lexicon as cranelift!
cranelift = "0.59" # All cranelift crates should have the same version!
cranelift-simplejit = "0.59" # All cranelift crates should have the same version!
@ -41,8 +34,12 @@ cranelift-module = "0.59" # All cranelift crates should have the same version
cranelift-codegen = "0.59" # All cranelift crates should have the same version!
[dev-dependencies]
roc_can = { path = "../can" }
roc_parse = { path = "../parse" }
pretty_assertions = "0.5.1 "
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"
tokio = { version = "0.2", features = ["blocking", "fs", "sync", "rt-threaded"] }
bumpalo = "2.6"

View file

@ -14,10 +14,10 @@ use cranelift_codegen::Context;
use cranelift_module::{Backend, FuncId, Linkage, Module};
use crate::crane::convert::{sig_from_layout, type_from_layout};
use crate::mono::expr::{Expr, Proc, Procs};
use crate::mono::layout::{Builtin, Layout};
use roc_collections::all::ImMap;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::expr::{Expr, Proc, Procs};
use roc_mono::layout::{Builtin, Layout};
use roc_types::subs::{Subs, Variable};
type Scope = ImMap<Symbol, ScopeEntry>;
@ -46,7 +46,7 @@ pub fn build_expr<'a, B: Backend>(
expr: &Expr<'a>,
procs: &Procs<'a>,
) -> Value {
use crate::mono::expr::Expr::*;
use roc_mono::expr::Expr::*;
match expr {
Int(num) => builder.ins().iconst(types::I64, *num),

View file

@ -3,11 +3,11 @@ use cranelift_codegen::ir::{types, Signature, Type};
use cranelift_codegen::isa::TargetFrontendConfig;
use cranelift_module::{Backend, Module};
use crate::mono::layout::Layout;
use roc_mono::layout::Layout;
pub fn type_from_layout(cfg: TargetFrontendConfig, layout: &Layout<'_>) -> Type {
use crate::mono::layout::Builtin::*;
use crate::mono::layout::Layout::*;
use roc_mono::layout::Builtin::*;
use roc_mono::layout::Layout::*;
match layout {
Pointer(_) | FunctionPointer(_, _) => cfg.pointer_type(),

View file

@ -10,11 +10,5 @@
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)]
pub mod string;
pub mod crane;
pub mod fmt;
pub mod llvm;
pub mod load;
pub mod mono;

View file

@ -9,10 +9,10 @@ use inkwell::values::{FunctionValue, IntValue, PointerValue};
use inkwell::{FloatPredicate, IntPredicate};
use crate::llvm::convert::{basic_type_from_layout, get_fn_type};
use crate::mono::expr::{Expr, Proc, Procs};
use crate::mono::layout::Layout;
use roc_collections::all::ImMap;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::expr::{Expr, Proc, Procs};
use roc_mono::layout::Layout;
use roc_types::subs::{Subs, Variable};
/// This is for Inkwell's FunctionValue::verify - we want to know the verification
@ -42,7 +42,7 @@ pub fn build_expr<'a, 'ctx, 'env>(
expr: &Expr<'a>,
procs: &Procs<'a>,
) -> BasicValueEnum<'ctx> {
use crate::mono::expr::Expr::*;
use roc_mono::expr::Expr::*;
match expr {
Int(num) => env.context.i64_type().const_int(*num as u64, true).into(),

View file

@ -3,7 +3,7 @@ use inkwell::types::BasicTypeEnum::{self, *};
use inkwell::types::{BasicType, FunctionType};
use inkwell::AddressSpace;
use crate::mono::layout::Layout;
use roc_mono::layout::Layout;
/// TODO could this be added to Inkwell itself as a method on BasicValueEnum?
pub fn get_fn_type<'ctx>(
@ -24,8 +24,8 @@ pub fn basic_type_from_layout<'ctx>(
context: &'ctx Context,
layout: &Layout<'_>,
) -> BasicTypeEnum<'ctx> {
use crate::mono::layout::Builtin::*;
use crate::mono::layout::Layout::*;
use roc_mono::layout::Builtin::*;
use roc_mono::layout::Layout::*;
match layout {
FunctionPointer(args, ret_layout) => {

View file

@ -0,0 +1,460 @@
extern crate bumpalo;
use self::bumpalo::Bump;
use roc_builtins::unique::uniq_stdlib;
use roc_can::constraint::Constraint;
use roc_can::env::Env;
use roc_can::expected::Expected;
use roc_can::expr::{canonicalize_expr, Expr, Output};
use roc_can::operator;
use roc_can::scope::Scope;
use roc_collections::all::{ImMap, ImSet, MutMap, SendMap, SendSet};
use roc_constrain::expr::constrain_expr;
use roc_constrain::module::{constrain_imported_values, load_builtin_aliases, Import};
use roc_module::ident::Ident;
use roc_module::symbol::{IdentIds, Interns, ModuleId, ModuleIds, Symbol};
use roc_parse::ast::{self, Attempting};
use roc_parse::blankspace::space0_before;
use roc_parse::parser::{loc, Fail, Parser, State};
use roc_problem::can::Problem;
use roc_region::all::{Located, Region};
use roc_solve::solve;
use roc_types::subs::{Content, Subs, VarStore, Variable};
use roc_types::types::Type;
use std::hash::Hash;
use std::path::{Path, PathBuf};
pub fn test_home() -> ModuleId {
ModuleIds::default().get_or_insert(&"Test".into())
}
#[allow(dead_code)]
pub fn infer_expr(
subs: Subs,
problems: &mut Vec<roc_types::types::Problem>,
constraint: &Constraint,
expr_var: Variable,
) -> (Content, Subs) {
let env = solve::Env {
aliases: MutMap::default(),
vars_by_symbol: SendMap::default(),
};
let (solved, _) = solve::run(&env, problems, subs, constraint);
let content = solved.inner().get_without_compacting(expr_var).content;
(content, solved.into_inner())
}
/// Used in the with_larger_debug_stack() function, for tests that otherwise
/// run out of stack space in debug builds (but don't in --release builds)
#[allow(dead_code)]
const EXPANDED_STACK_SIZE: usize = 4 * 1024 * 1024;
/// Without this, some tests pass in `cargo test --release` but fail without
/// the --release flag because they run out of stack space. This increases
/// stack size for debug builds only, while leaving the stack space at the default
/// amount for release builds.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
std::thread::Builder::new()
.stack_size(EXPANDED_STACK_SIZE)
.spawn(run_test)
.expect("Error while spawning expanded dev stack size thread")
.join()
.expect("Error while joining expanded dev stack size thread")
}
/// In --release builds, don't increase the stack size. Run the test normally.
/// This way, we find out if any of our tests are blowing the stack even after
/// optimizations in release builds.
#[allow(dead_code)]
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
run_test()
}
#[allow(dead_code)]
pub fn parse_with<'a>(arena: &'a Bump, input: &'a str) -> Result<ast::Expr<'a>, Fail> {
parse_loc_with(arena, input).map(|loc_expr| loc_expr.value)
}
#[allow(dead_code)]
pub fn parse_loc_with<'a>(arena: &'a Bump, input: &'a str) -> Result<Located<ast::Expr<'a>>, Fail> {
let state = State::new(&input, Attempting::Module);
let parser = space0_before(loc(roc_parse::expr::expr(0)), 0);
let answer = parser.parse(&arena, state);
answer
.map(|(loc_expr, _)| loc_expr)
.map_err(|(fail, _)| fail)
}
#[allow(dead_code)]
pub fn can_expr(expr_str: &str) -> CanExprOut {
can_expr_with(&Bump::new(), test_home(), expr_str)
}
#[allow(dead_code)]
pub fn uniq_expr(
expr_str: &str,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let declared_idents: &ImMap<Ident, (Symbol, Region)> = &ImMap::default();
uniq_expr_with(&Bump::new(), expr_str, declared_idents)
}
#[allow(dead_code)]
pub fn uniq_expr_with(
arena: &Bump,
expr_str: &str,
declared_idents: &ImMap<Ident, (Symbol, Region)>,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let home = test_home();
let CanExprOut {
loc_expr,
output,
problems,
var_store: old_var_store,
var,
interns,
..
} = can_expr_with(arena, home, expr_str);
// double check
let var_store = VarStore::new(old_var_store.fresh());
let expected2 = Expected::NoExpectation(Type::Variable(var));
let constraint = roc_constrain::uniq::constrain_declaration(
home,
&var_store,
Region::zero(),
&loc_expr,
declared_idents,
expected2,
);
let stdlib = uniq_stdlib();
let types = stdlib.types;
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
// load builtin values
// TODO what to do with those rigids?
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
// load builtin types
let mut constraint = load_builtin_aliases(&stdlib.aliases, constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let subs2 = Subs::new(var_store.into());
(
loc_expr, output, problems, subs2, var, constraint, home, interns,
)
}
pub struct CanExprOut {
pub loc_expr: Located<Expr>,
pub output: Output,
pub problems: Vec<Problem>,
pub home: ModuleId,
pub interns: Interns,
pub var_store: VarStore,
pub var: Variable,
pub constraint: Constraint,
}
#[allow(dead_code)]
pub fn can_expr_with(arena: &Bump, home: ModuleId, expr_str: &str) -> CanExprOut {
let loc_expr = parse_loc_with(&arena, expr_str).unwrap_or_else(|e| {
panic!(
"can_expr_with() got a parse error when attempting to canonicalize:\n\n{:?} {:?}",
expr_str, e
)
});
let var_store = VarStore::default();
let var = var_store.fresh();
let expected = Expected::NoExpectation(Type::Variable(var));
let module_ids = ModuleIds::default();
// Desugar operators (convert them to Apply calls, taking into account
// operator precedence and associativity rules), before doing other canonicalization.
//
// If we did this *during* canonicalization, then each time we
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
let loc_expr = operator::desugar_expr(arena, &loc_expr);
let mut scope = Scope::new(home);
let dep_idents = IdentIds::exposed_builtins(0);
let mut env = Env::new(home, dep_idents, &module_ids, IdentIds::default());
let (loc_expr, output) = canonicalize_expr(
&mut env,
&var_store,
&mut scope,
Region::zero(),
&loc_expr.value,
);
let constraint = constrain_expr(
&roc_constrain::expr::Env {
rigids: ImMap::default(),
home,
},
loc_expr.region,
&loc_expr.value,
expected,
);
let types = roc_builtins::std::types();
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
//load builtin values
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
// TODO determine what to do with those rigids
// for var in introduced_rigids {
// output.ftv.insert(var, format!("internal_{:?}", var).into());
// }
//load builtin types
let mut constraint =
load_builtin_aliases(&roc_builtins::std::aliases(), constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let mut all_ident_ids = MutMap::default();
// When pretty printing types, we may need the exposed builtins,
// so include them in the Interns we'll ultimately return.
for (module_id, ident_ids) in IdentIds::exposed_builtins(0) {
all_ident_ids.insert(module_id, ident_ids);
}
all_ident_ids.insert(home, env.ident_ids);
let interns = Interns {
module_ids: env.module_ids.clone(),
all_ident_ids,
};
CanExprOut {
loc_expr,
output,
problems: env.problems,
home: env.home,
var_store,
interns,
var,
constraint,
}
}
#[allow(dead_code)]
pub fn mut_map_from_pairs<K, V, I>(pairs: I) -> MutMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq,
{
let mut answer = MutMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn im_map_from_pairs<K, V, I>(pairs: I) -> ImMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq + Clone,
V: Clone,
{
let mut answer = ImMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn send_set_from<V, I>(elems: I) -> SendSet<V>
where
I: IntoIterator<Item = V>,
V: Hash + Eq + Clone,
{
let mut answer = SendSet::default();
for elem in elems {
answer.insert(elem);
}
answer
}
#[allow(dead_code)]
pub fn fixtures_dir<'a>() -> PathBuf {
Path::new("tests").join("fixtures").join("build")
}
#[allow(dead_code)]
pub fn builtins_dir<'a>() -> PathBuf {
PathBuf::new().join("builtins")
}
// Check constraints
//
// Keep track of the used (in types or expectations) variables, and the declared variables (in
// flex_vars or rigid_vars fields of LetConstraint. These roc_collections should match: no duplicates
// and no variables that are used but not declared are allowed.
//
// There is one exception: the initial variable (that stores the type of the whole expression) is
// never declared, but is used.
#[allow(dead_code)]
pub fn assert_correct_variable_usage(constraint: &Constraint) {
// variables declared in constraint (flex_vars or rigid_vars)
// and variables actually used in constraints
let (declared, used) = variable_usage(constraint);
let used: ImSet<Variable> = used.clone().into();
let mut decl: ImSet<Variable> = declared.rigid_vars.clone().into();
for var in declared.flex_vars.clone() {
decl.insert(var);
}
let diff = used.clone().relative_complement(decl);
// NOTE: this checks whether we're using variables that are not declared. For recursive type
// definitions, their rigid types are declared twice, which is correct!
if !diff.is_empty() {
println!("VARIABLE USAGE PROBLEM");
println!("used: {:?}", &used);
println!("rigids: {:?}", &declared.rigid_vars);
println!("flexs: {:?}", &declared.flex_vars);
println!("difference: {:?}", &diff);
panic!("variable usage problem (see stdout for details)");
}
}
#[derive(Default)]
pub struct SeenVariables {
pub rigid_vars: Vec<Variable>,
pub flex_vars: Vec<Variable>,
}
pub fn variable_usage(con: &Constraint) -> (SeenVariables, Vec<Variable>) {
let mut declared = SeenVariables::default();
let mut used = ImSet::default();
variable_usage_help(con, &mut declared, &mut used);
used.remove(unsafe { &Variable::unsafe_test_debug_variable(1) });
let mut used_vec: Vec<Variable> = used.into_iter().collect();
used_vec.sort();
declared.rigid_vars.sort();
declared.flex_vars.sort();
(declared, used_vec)
}
fn variable_usage_help(con: &Constraint, declared: &mut SeenVariables, used: &mut ImSet<Variable>) {
use Constraint::*;
match con {
True | SaveTheEnvironment => (),
Eq(tipe, expectation, _) => {
for v in tipe.variables() {
used.insert(v);
}
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Lookup(_, expectation, _) => {
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Pattern(_, _, tipe, pexpectation) => {
for v in tipe.variables() {
used.insert(v);
}
for v in pexpectation.get_type_ref().variables() {
used.insert(v);
}
}
Let(letcon) => {
declared.rigid_vars.extend(letcon.rigid_vars.clone());
declared.flex_vars.extend(letcon.flex_vars.clone());
variable_usage_help(&letcon.defs_constraint, declared, used);
variable_usage_help(&letcon.ret_constraint, declared, used);
}
And(constraints) => {
for sub in constraints {
variable_usage_help(sub, declared, used);
}
}
}
}

View file

@ -5,7 +5,7 @@ extern crate indoc;
extern crate bumpalo;
extern crate inkwell;
extern crate roc;
extern crate roc_gen;
mod helpers;
@ -24,14 +24,14 @@ mod test_gen {
use inkwell::passes::PassManager;
use inkwell::types::BasicType;
use inkwell::OptimizationLevel;
use roc::crane::build::{declare_proc, define_proc_body, ScopeEntry};
use roc::crane::convert::type_from_layout;
use roc::crane::imports::define_malloc;
use roc::llvm::build::{build_proc, build_proc_header};
use roc::llvm::convert::basic_type_from_layout;
use roc::mono::expr::Expr;
use roc::mono::layout::Layout;
use roc_collections::all::{ImMap, MutMap};
use roc_gen::crane::build::{declare_proc, define_proc_body, ScopeEntry};
use roc_gen::crane::convert::type_from_layout;
use roc_gen::crane::imports::define_malloc;
use roc_gen::llvm::build::{build_proc, build_proc_header};
use roc_gen::llvm::convert::basic_type_from_layout;
use roc_mono::expr::Expr;
use roc_mono::layout::Layout;
use roc_types::subs::Subs;
use std::ffi::{CStr, CString};
use std::mem;
@ -63,7 +63,7 @@ mod test_gen {
// Compile and add all the Procs before adding main
let mut procs = MutMap::default();
let mut env = roc::crane::build::Env {
let mut env = roc_gen::crane::build::Env {
arena: &arena,
subs,
interns,
@ -135,7 +135,7 @@ mod test_gen {
builder.append_block_params_for_function_params(block);
let main_body =
roc::crane::build::build_expr(&env, &scope, &mut module, &mut builder, &mono_expr, &procs);
roc_gen::crane::build::build_expr(&env, &scope, &mut module, &mut builder, &mono_expr, &procs);
builder.ins().return_(&[main_body]);
// TODO re-enable this once Switch stops making unsealed blocks, e.g.
@ -210,7 +210,7 @@ mod test_gen {
let pointer_bytes = execution_engine.get_target_data().get_pointer_byte_size(None);
// Compile and add all the Procs before adding main
let mut env = roc::llvm::build::Env {
let mut env = roc_gen::llvm::build::Env {
arena: &arena,
subs,
builder: &builder,
@ -265,7 +265,7 @@ mod test_gen {
builder.position_at_end(basic_block);
let ret = roc::llvm::build::build_expr(
let ret = roc_gen::llvm::build::build_expr(
&env,
&ImMap::default(),
main_fn,
@ -346,7 +346,7 @@ mod test_gen {
let pointer_bytes = execution_engine.get_target_data().get_pointer_byte_size(None);
// Compile and add all the Procs before adding main
let mut env = roc::llvm::build::Env {
let mut env = roc_gen::llvm::build::Env {
arena: &arena,
subs,
builder: &builder,
@ -401,7 +401,7 @@ mod test_gen {
builder.position_at_end(basic_block);
let ret = roc::llvm::build::build_expr(
let ret = roc_gen::llvm::build::build_expr(
&env,
&ImMap::default(),
main_fn,

28
compiler/load/Cargo.toml Normal file
View file

@ -0,0 +1,28 @@
[package]
name = "roc_load"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
[dependencies]
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_types = { path = "../types" }
roc_can = { path = "../can" }
roc_constrain = { path = "../constrain" }
roc_builtins = { path = "../builtins" }
roc_problem = { path = "../problem" }
roc_unify = { path = "../unify" }
roc_parse = { path = "../parse" }
roc_solve = { path = "../solve" }
bumpalo = "2.6"
inlinable_string = "0.1.0"
tokio = { version = "0.2", features = ["blocking", "fs", "sync", "rt-threaded"] }
[dev-dependencies]
pretty_assertions = "0.5.1 "
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"

13
compiler/load/src/lib.rs Normal file
View file

@ -0,0 +1,13 @@
#![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)]
pub mod file;

View file

@ -5,11 +5,11 @@ interface AStar
# a port of https://github.com/krisajenkins/elm-astar/blob/2.1.3/src/AStar/Generalised.elm
Model xyz :
{ evaluated : Set xyz
, openSet : Set xyz
, costs : Map.Map xyz Float
, cameFrom : Map.Map xyz xyz
Model position :
{ evaluated : Set position
, openSet : Set position
, costs : Map.Map position Float
, cameFrom : Map.Map position position
}
@ -22,12 +22,12 @@ initialModel = \start ->
}
cheapestOpen : (position -> Float), Model position -> Result position [ KeyNotFound ]*
cheapestOpen : (position -> Float), Model position -> Result position [ KeyNotFound ]*
cheapestOpen = \costFunction, model ->
folder = \position, resSmallestSoFar ->
when Map.get model.costs position is
Err e ->
Err e ->
Err e
Ok cost ->
@ -35,7 +35,7 @@ cheapestOpen = \costFunction, model ->
when resSmallestSoFar is
Err _ -> Ok { position, cost: cost + positionCost }
Ok smallestSoFar ->
Ok smallestSoFar ->
if positionCost + cost < smallestSoFar.cost then
Ok { position, cost: cost + positionCost }
@ -51,16 +51,16 @@ reconstructPath : Map position position, position -> List position
reconstructPath = \cameFrom, goal ->
when Map.get cameFrom goal is
Err KeyNotFound ->
[]
[]
Ok next ->
List.push (reconstructPath cameFrom next) goal
updateCost : position, position, Model position -> Model position
updateCost = \current, neighbour, model ->
newCameFrom = Map.insert model.cameFrom neighbour current
newCameFrom = Map.insert model.cameFrom neighbour current
newCosts = Map.insert model.costs neighbour distanceTo
newCosts = Map.insert model.costs neighbour distanceTo
distanceTo = reconstructPath newCameFrom neighbour
|> List.length
@ -81,7 +81,7 @@ updateCost = \current, neighbour, model ->
findPath : { costFunction: (position, position -> Float), moveFunction: (position -> Set position), start : position, end : position } -> Result (List position) [ KeyNotFound ]*
findPath = \{ costFunction, moveFunction, start, end } ->
findPath = \{ costFunction, moveFunction, start, end } ->
astar costFunction moveFunction end (initialModel start)
@ -105,7 +105,7 @@ astar = \costFn, moveFn, goal, model ->
modelWithNeighbours = { modelPopped & openSet : Set.union modelPopped.openSet newNeighbours }
modelWithCosts = Set.foldl newNeighbours (\nb, md -> updateCost current nb md) modelWithNeighbours
modelWithCosts = Set.foldl newNeighbours (\nb, md -> updateCost current nb md) modelWithNeighbours
astar costFn moveFn goal modelWithCosts
astar costFn moveFn goal modelWithCosts

View file

@ -1,17 +1,16 @@
extern crate bumpalo;
use self::bumpalo::Bump;
use roc_builtins::unique;
use roc_builtins::unique::uniq_stdlib;
use roc_can::constraint::Constraint;
use roc_can::env::Env;
use roc_can::expected::Expected;
use roc_can::expr::Output;
use roc_can::expr::{canonicalize_expr, Expr};
use roc_can::expr::{canonicalize_expr, Expr, Output};
use roc_can::operator;
use roc_can::scope::Scope;
use roc_collections::all::{ImMap, ImSet, MutMap, SendMap, SendSet};
use roc_constrain::expr::constrain_expr;
use roc_constrain::module::Import;
use roc_constrain::module::{constrain_imported_values, load_builtin_aliases, Import};
use roc_module::ident::Ident;
use roc_module::symbol::{IdentIds, Interns, ModuleId, ModuleIds, Symbol};
use roc_parse::ast::{self, Attempting};
@ -131,7 +130,7 @@ pub fn uniq_expr_with(
let var_store = VarStore::new(old_var_store.fresh());
let expected2 = Expected::NoExpectation(Type::Variable(var));
let constraint = roc_constrain::uniqueness::constrain_declaration(
let constraint = roc_constrain::uniq::constrain_declaration(
home,
&var_store,
Region::zero(),
@ -140,7 +139,7 @@ pub fn uniq_expr_with(
expected2,
);
let stdlib = unique::uniqueness_stdlib();
let stdlib = uniq_stdlib();
let types = stdlib.types;
let imports: Vec<_> = types
@ -152,14 +151,11 @@ pub fn uniq_expr_with(
.collect();
// load builtin values
// TODO what to do with those rigids?
let (_introduced_rigids, constraint) =
roc_constrain::module::constrain_imported_values(imports, constraint, &var_store);
constrain_imported_values(imports, constraint, &var_store);
// load builtin types
let mut constraint =
roc_constrain::module::load_builtin_aliases(&stdlib.aliases, constraint, &var_store);
let mut constraint = load_builtin_aliases(&stdlib.aliases, constraint, &var_store);
constraint.instantiate_aliases(&var_store);
@ -237,19 +233,11 @@ pub fn can_expr_with(arena: &Bump, home: ModuleId, expr_str: &str) -> CanExprOut
//load builtin values
let (_introduced_rigids, constraint) =
roc_constrain::module::constrain_imported_values(imports, constraint, &var_store);
// TODO determine what to do with those rigids
// for var in introduced_rigids {
// output.ftv.insert(var, format!("internal_{:?}", var).into());
// }
constrain_imported_values(imports, constraint, &var_store);
//load builtin types
let mut constraint = roc_constrain::module::load_builtin_aliases(
&roc_builtins::std::aliases(),
constraint,
&var_store,
);
let mut constraint =
load_builtin_aliases(&roc_builtins::std::aliases(), constraint, &var_store);
constraint.instantiate_aliases(&var_store);

View file

@ -4,10 +4,9 @@ extern crate pretty_assertions;
extern crate maplit;
extern crate bumpalo;
extern crate inkwell;
extern crate roc;
extern crate inlinable_string;
extern crate roc_collections;
extern crate roc_load;
extern crate roc_module;
mod helpers;
@ -16,10 +15,10 @@ mod helpers;
mod test_load {
use crate::helpers::fixtures_dir;
use inlinable_string::InlinableString;
use roc::load::{load, LoadedModule};
use roc_can::def::Declaration::*;
use roc_can::def::Def;
use roc_collections::all::MutMap;
use roc_load::file::{load, LoadedModule};
use roc_module::symbol::{Interns, ModuleId};
use roc_solve::solve::SubsByModule;
use roc_types::pretty_print::{content_to_string, name_all_type_vars};

View file

@ -4,22 +4,22 @@ extern crate pretty_assertions;
extern crate maplit;
extern crate bumpalo;
extern crate inkwell;
extern crate roc;
extern crate inlinable_string;
extern crate roc_collections;
extern crate roc_load;
extern crate roc_module;
mod helpers;
#[cfg(test)]
mod test_uniqueness_load {
mod test_uniq_load {
use crate::helpers::fixtures_dir;
use inlinable_string::InlinableString;
use roc::load::{load, LoadedModule};
use roc_builtins::unique;
use roc_can::def::Declaration::*;
use roc_can::def::Def;
use roc_collections::all::MutMap;
use roc_load::file::{load, LoadedModule};
use roc_module::symbol::{Interns, ModuleId};
use roc_solve::solve::SubsByModule;
use roc_types::pretty_print::{content_to_string, name_all_type_vars};
@ -45,13 +45,7 @@ mod test_uniqueness_load {
) -> LoadedModule {
let src_dir = fixtures_dir().join(dir_name);
let filename = src_dir.join(format!("{}.roc", module_name));
let loaded = load(
&unique::uniqueness_stdlib(),
src_dir,
filename,
subs_by_module,
)
.await;
let loaded = load(&unique::uniq_stdlib(), src_dir, filename, subs_by_module).await;
let loaded_module = loaded.expect("Test module failed to load");
assert_eq!(loaded_module.can_problems, Vec::new());
@ -222,6 +216,26 @@ mod test_uniqueness_load {
});
}
#[test]
fn load_astar() {
test_async(async {
let subs_by_module = MutMap::default();
let loaded_module = load_fixture("interface_with_deps", "AStar", subs_by_module).await;
expect_types(
loaded_module,
hashmap! {
"findPath" => "Attr * (Attr * { costFunction : (Attr Shared (Attr Shared position, Attr Shared position -> Attr Shared Float)), end : (Attr Shared position), moveFunction : (Attr Shared (Attr Shared position -> Attr * (Set (Attr Shared position)))), start : (Attr Shared position) } -> Attr * (Result (Attr * (List (Attr Shared position))) (Attr * [ KeyNotFound ]*)))",
"initialModel" => "Attr * (Attr Shared position -> Attr * (Model (Attr Shared position)))",
"reconstructPath" => "Attr Shared (Attr Shared (Map (Attr Shared position) (Attr Shared position)), Attr Shared position -> Attr * (List (Attr Shared position)))",
"updateCost" => "Attr * (Attr Shared position, Attr Shared position, Attr Shared (Model (Attr Shared position)) -> Attr Shared (Model (Attr Shared position)))",
"cheapestOpen" => "Attr * (Attr * (Attr Shared position -> Attr Shared Float), Attr * (Model (Attr Shared position)) -> Attr * (Result (Attr Shared position) (Attr * [ KeyNotFound ]*)))",
"astar" => "Attr Shared (Attr Shared (Attr Shared position, Attr Shared position -> Attr Shared Float), Attr Shared (Attr Shared position -> Attr * (Set (Attr Shared position))), Attr Shared position, Attr Shared (Model (Attr Shared position)) -> Attr * [ Err (Attr * [ KeyNotFound ]*), Ok (Attr * (List (Attr Shared position))) ]*)",
},
);
});
}
#[test]
fn load_and_typecheck_quicksort() {
test_async(async {

26
compiler/mono/Cargo.toml Normal file
View file

@ -0,0 +1,26 @@
[package]
name = "roc_mono"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
[dependencies]
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_types = { path = "../types" }
roc_can = { path = "../can" }
roc_unify = { path = "../unify" }
bumpalo = "2.6"
[dev-dependencies]
roc_constrain = { path = "../constrain" }
roc_builtins = { path = "../builtins" }
roc_problem = { path = "../problem" }
roc_parse = { path = "../parse" }
roc_solve = { path = "../solve" }
pretty_assertions = "0.5.1 "
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"

View file

@ -1,8 +1,8 @@
use crate::mono::layout::{Builtin, Layout};
use crate::layout::{Builtin, Layout};
use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_can;
use roc_can::pattern::Pattern;
use roc_can::{self};
use roc_collections::all::MutMap;
use roc_module::ident::{Lowercase, TagName};
use roc_module::symbol::{IdentIds, ModuleId, Symbol};

14
compiler/mono/src/lib.rs Normal file
View file

@ -0,0 +1,14 @@
#![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)]
pub mod expr;
pub mod layout;

View file

@ -13,8 +13,14 @@ roc_can = { path = "../can" }
roc_unify = { path = "../unify" }
[dev-dependencies]
roc_constrain = { path = "../constrain" }
roc_builtins = { path = "../builtins" }
roc_problem = { path = "../problem" }
roc_parse = { path = "../parse" }
roc_solve = { path = "../solve" }
pretty_assertions = "0.5.1 "
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"
bumpalo = "2.6"

View file

@ -605,7 +605,7 @@ fn check_for_infinite_type(
) {
let var = loc_var.value;
let is_uniqueness_infer = match subs.get(var).content {
let is_uniq_infer = match subs.get(var).content {
Content::Alias(Symbol::ATTR_ATTR, _, _) => true,
_ => false,
};
@ -617,7 +617,7 @@ fn check_for_infinite_type(
// try to make a tag union recursive, see if that helps
match content {
Content::Structure(FlatType::TagUnion(tags, ext_var)) => {
if !is_uniqueness_infer {
if !is_uniq_infer {
let rec_var = subs.fresh_unnamed_flex_var();
subs.set_rank(rec_var, description.rank);

View file

@ -0,0 +1,455 @@
extern crate bumpalo;
use self::bumpalo::Bump;
use roc_builtins::unique::uniq_stdlib;
use roc_can::constraint::Constraint;
use roc_can::env::Env;
use roc_can::expected::Expected;
use roc_can::expr::{canonicalize_expr, Expr, Output};
use roc_can::operator;
use roc_can::scope::Scope;
use roc_collections::all::{ImMap, ImSet, MutMap, SendMap, SendSet};
use roc_constrain::expr::constrain_expr;
use roc_constrain::module::{constrain_imported_values, load_builtin_aliases, Import};
use roc_module::ident::Ident;
use roc_module::symbol::{IdentIds, Interns, ModuleId, ModuleIds, Symbol};
use roc_parse::ast::{self, Attempting};
use roc_parse::blankspace::space0_before;
use roc_parse::parser::{loc, Fail, Parser, State};
use roc_problem::can::Problem;
use roc_region::all::{Located, Region};
use roc_solve::solve;
use roc_types::subs::{Content, Subs, VarStore, Variable};
use roc_types::types::Type;
use std::hash::Hash;
use std::path::{Path, PathBuf};
pub fn test_home() -> ModuleId {
ModuleIds::default().get_or_insert(&"Test".into())
}
#[allow(dead_code)]
pub fn infer_expr(
subs: Subs,
problems: &mut Vec<roc_types::types::Problem>,
constraint: &Constraint,
expr_var: Variable,
) -> (Content, Subs) {
let env = solve::Env {
aliases: MutMap::default(),
vars_by_symbol: SendMap::default(),
};
let (solved, _) = solve::run(&env, problems, subs, constraint);
let content = solved.inner().get_without_compacting(expr_var).content;
(content, solved.into_inner())
}
/// Used in the with_larger_debug_stack() function, for tests that otherwise
/// run out of stack space in debug builds (but don't in --release builds)
#[allow(dead_code)]
const EXPANDED_STACK_SIZE: usize = 4 * 1024 * 1024;
/// Without this, some tests pass in `cargo test --release` but fail without
/// the --release flag because they run out of stack space. This increases
/// stack size for debug builds only, while leaving the stack space at the default
/// amount for release builds.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
std::thread::Builder::new()
.stack_size(EXPANDED_STACK_SIZE)
.spawn(run_test)
.expect("Error while spawning expanded dev stack size thread")
.join()
.expect("Error while joining expanded dev stack size thread")
}
/// In --release builds, don't increase the stack size. Run the test normally.
/// This way, we find out if any of our tests are blowing the stack even after
/// optimizations in release builds.
#[allow(dead_code)]
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
run_test()
}
#[allow(dead_code)]
pub fn parse_with<'a>(arena: &'a Bump, input: &'a str) -> Result<ast::Expr<'a>, Fail> {
parse_loc_with(arena, input).map(|loc_expr| loc_expr.value)
}
#[allow(dead_code)]
pub fn parse_loc_with<'a>(arena: &'a Bump, input: &'a str) -> Result<Located<ast::Expr<'a>>, Fail> {
let state = State::new(&input, Attempting::Module);
let parser = space0_before(loc(roc_parse::expr::expr(0)), 0);
let answer = parser.parse(&arena, state);
answer
.map(|(loc_expr, _)| loc_expr)
.map_err(|(fail, _)| fail)
}
#[allow(dead_code)]
pub fn can_expr(expr_str: &str) -> CanExprOut {
can_expr_with(&Bump::new(), test_home(), expr_str)
}
#[allow(dead_code)]
pub fn uniq_expr(
expr_str: &str,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let declared_idents: &ImMap<Ident, (Symbol, Region)> = &ImMap::default();
uniq_expr_with(&Bump::new(), expr_str, declared_idents)
}
#[allow(dead_code)]
pub fn uniq_expr_with(
arena: &Bump,
expr_str: &str,
declared_idents: &ImMap<Ident, (Symbol, Region)>,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let home = test_home();
let CanExprOut {
loc_expr,
output,
problems,
var_store: old_var_store,
var,
interns,
..
} = can_expr_with(arena, home, expr_str);
// double check
let var_store = VarStore::new(old_var_store.fresh());
let expected2 = Expected::NoExpectation(Type::Variable(var));
let constraint = roc_constrain::uniq::constrain_declaration(
home,
&var_store,
Region::zero(),
&loc_expr,
declared_idents,
expected2,
);
let stdlib = uniq_stdlib();
let types = stdlib.types;
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
// load builtin values
// TODO what to do with those rigids?
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
// load builtin types
let mut constraint = load_builtin_aliases(&stdlib.aliases, constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let subs2 = Subs::new(var_store.into());
(
loc_expr, output, problems, subs2, var, constraint, home, interns,
)
}
pub struct CanExprOut {
pub loc_expr: Located<Expr>,
pub output: Output,
pub problems: Vec<Problem>,
pub home: ModuleId,
pub interns: Interns,
pub var_store: VarStore,
pub var: Variable,
pub constraint: Constraint,
}
#[allow(dead_code)]
pub fn can_expr_with(arena: &Bump, home: ModuleId, expr_str: &str) -> CanExprOut {
let loc_expr = parse_loc_with(&arena, expr_str).unwrap_or_else(|e| {
panic!(
"can_expr_with() got a parse error when attempting to canonicalize:\n\n{:?} {:?}",
expr_str, e
)
});
let var_store = VarStore::default();
let var = var_store.fresh();
let expected = Expected::NoExpectation(Type::Variable(var));
let module_ids = ModuleIds::default();
// Desugar operators (convert them to Apply calls, taking into account
// operator precedence and associativity rules), before doing other canonicalization.
//
// If we did this *during* canonicalization, then each time we
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
let loc_expr = operator::desugar_expr(arena, &loc_expr);
let mut scope = Scope::new(home);
let dep_idents = IdentIds::exposed_builtins(0);
let mut env = Env::new(home, dep_idents, &module_ids, IdentIds::default());
let (loc_expr, output) = canonicalize_expr(
&mut env,
&var_store,
&mut scope,
Region::zero(),
&loc_expr.value,
);
let constraint = constrain_expr(
&roc_constrain::expr::Env {
rigids: ImMap::default(),
home,
},
loc_expr.region,
&loc_expr.value,
expected,
);
let types = roc_builtins::std::types();
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
//load builtin values
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
//load builtin types
let mut constraint =
load_builtin_aliases(&roc_builtins::std::aliases(), constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let mut all_ident_ids = MutMap::default();
// When pretty printing types, we may need the exposed builtins,
// so include them in the Interns we'll ultimately return.
for (module_id, ident_ids) in IdentIds::exposed_builtins(0) {
all_ident_ids.insert(module_id, ident_ids);
}
all_ident_ids.insert(home, env.ident_ids);
let interns = Interns {
module_ids: env.module_ids.clone(),
all_ident_ids,
};
CanExprOut {
loc_expr,
output,
problems: env.problems,
home: env.home,
var_store,
interns,
var,
constraint,
}
}
#[allow(dead_code)]
pub fn mut_map_from_pairs<K, V, I>(pairs: I) -> MutMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq,
{
let mut answer = MutMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn im_map_from_pairs<K, V, I>(pairs: I) -> ImMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq + Clone,
V: Clone,
{
let mut answer = ImMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn send_set_from<V, I>(elems: I) -> SendSet<V>
where
I: IntoIterator<Item = V>,
V: Hash + Eq + Clone,
{
let mut answer = SendSet::default();
for elem in elems {
answer.insert(elem);
}
answer
}
#[allow(dead_code)]
pub fn fixtures_dir<'a>() -> PathBuf {
Path::new("tests").join("fixtures").join("build")
}
#[allow(dead_code)]
pub fn builtins_dir<'a>() -> PathBuf {
PathBuf::new().join("builtins")
}
// Check constraints
//
// Keep track of the used (in types or expectations) variables, and the declared variables (in
// flex_vars or rigid_vars fields of LetConstraint. These roc_collections should match: no duplicates
// and no variables that are used but not declared are allowed.
//
// There is one exception: the initial variable (that stores the type of the whole expression) is
// never declared, but is used.
#[allow(dead_code)]
pub fn assert_correct_variable_usage(constraint: &Constraint) {
// variables declared in constraint (flex_vars or rigid_vars)
// and variables actually used in constraints
let (declared, used) = variable_usage(constraint);
let used: ImSet<Variable> = used.clone().into();
let mut decl: ImSet<Variable> = declared.rigid_vars.clone().into();
for var in declared.flex_vars.clone() {
decl.insert(var);
}
let diff = used.clone().relative_complement(decl);
// NOTE: this checks whether we're using variables that are not declared. For recursive type
// definitions, their rigid types are declared twice, which is correct!
if !diff.is_empty() {
println!("VARIABLE USAGE PROBLEM");
println!("used: {:?}", &used);
println!("rigids: {:?}", &declared.rigid_vars);
println!("flexs: {:?}", &declared.flex_vars);
println!("difference: {:?}", &diff);
panic!("variable usage problem (see stdout for details)");
}
}
#[derive(Default)]
pub struct SeenVariables {
pub rigid_vars: Vec<Variable>,
pub flex_vars: Vec<Variable>,
}
pub fn variable_usage(con: &Constraint) -> (SeenVariables, Vec<Variable>) {
let mut declared = SeenVariables::default();
let mut used = ImSet::default();
variable_usage_help(con, &mut declared, &mut used);
used.remove(unsafe { &Variable::unsafe_test_debug_variable(1) });
let mut used_vec: Vec<Variable> = used.into_iter().collect();
used_vec.sort();
declared.rigid_vars.sort();
declared.flex_vars.sort();
(declared, used_vec)
}
fn variable_usage_help(con: &Constraint, declared: &mut SeenVariables, used: &mut ImSet<Variable>) {
use Constraint::*;
match con {
True | SaveTheEnvironment => (),
Eq(tipe, expectation, _) => {
for v in tipe.variables() {
used.insert(v);
}
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Lookup(_, expectation, _) => {
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Pattern(_, _, tipe, pexpectation) => {
for v in tipe.variables() {
used.insert(v);
}
for v in pexpectation.get_type_ref().variables() {
used.insert(v);
}
}
Let(letcon) => {
declared.rigid_vars.extend(letcon.rigid_vars.clone());
declared.flex_vars.extend(letcon.flex_vars.clone());
variable_usage_help(&letcon.defs_constraint, declared, used);
variable_usage_help(&letcon.ret_constraint, declared, used);
}
And(constraints) => {
for sub in constraints {
variable_usage_help(sub, declared, used);
}
}
}
}

View file

@ -4,12 +4,11 @@ extern crate pretty_assertions;
extern crate indoc;
extern crate bumpalo;
extern crate roc;
mod helpers;
#[cfg(test)]
mod test_infer {
mod test_solve {
use crate::helpers::{assert_correct_variable_usage, can_expr, infer_expr, CanExprOut};
use roc_types::pretty_print::{content_to_string, name_all_type_vars};
use roc_types::subs::Subs;
@ -37,7 +36,7 @@ mod test_infer {
assert_correct_variable_usage(&constraint);
for (var, name) in output.ftv {
for (var, name) in output.introduced_variables.name_by_var {
subs.rigid_var(var, name);
}

View file

@ -4,13 +4,14 @@ extern crate pretty_assertions;
extern crate indoc;
extern crate bumpalo;
extern crate roc;
mod helpers;
#[cfg(test)]
mod test_infer_uniq {
use crate::helpers::{assert_correct_variable_usage, infer_expr, uniq_expr};
mod test_uniq_solve {
use crate::helpers::{
assert_correct_variable_usage, infer_expr, uniq_expr, with_larger_debug_stack,
};
use roc_types::pretty_print::{content_to_string, name_all_type_vars};
// HELPERS
@ -21,7 +22,7 @@ mod test_infer_uniq {
assert_correct_variable_usage(&constraint);
for (var, name) in output.ftv {
for (var, name) in output.introduced_variables.name_by_var {
subs.rigid_var(var, name);
}
@ -2082,7 +2083,7 @@ mod test_infer_uniq {
reverse
"#
),
"Attr * (Attr * (List (Attr (a | b) c)) -> Attr (* | a | b) (List (Attr a c)))",
"Attr * (Attr * (List (Attr (a | b) c)) -> Attr (* | a | b) (List (Attr b c)))",
);
}
@ -2099,7 +2100,7 @@ mod test_infer_uniq {
}
#[test]
fn update_cost() {
fn use_correct_ext_var() {
infer_eq(
indoc!(
r#"
@ -2115,4 +2116,250 @@ mod test_infer_uniq {
"Attr * (Attr (* | a | b) { p : (Attr b *), q : (Attr a *) }* -> Attr * Int)",
);
}
#[test]
fn reconstruct_path() {
infer_eq(
indoc!(
r#"
reconstructPath : Map position position, position -> List position
reconstructPath = \cameFrom, goal ->
when Map.get cameFrom goal is
Err KeyNotFound ->
[]
Ok next ->
List.push (reconstructPath cameFrom next) goal
reconstructPath
"#
),
"Attr Shared (Attr Shared (Map (Attr Shared position) (Attr Shared position)), Attr Shared position -> Attr * (List (Attr Shared position)))"
);
}
#[test]
fn cheapest_open() {
infer_eq(
indoc!(
r#"
Model position : { evaluated : Set position
, openSet : Set position
, costs : Map.Map position Float
, cameFrom : Map.Map position position
}
cheapestOpen : (position -> Float), Model position -> Result position [ KeyNotFound ]*
cheapestOpen = \costFunction, model ->
folder = \position, resSmallestSoFar ->
when Map.get model.costs position is
Err e ->
Err e
Ok cost ->
positionCost = costFunction position
when resSmallestSoFar is
Err _ -> Ok { position, cost: cost + positionCost }
Ok smallestSoFar ->
if positionCost + cost < smallestSoFar.cost then
Ok { position, cost: cost + positionCost }
else
Ok smallestSoFar
Set.foldl model.openSet folder (Err KeyNotFound)
|> Result.map (\x -> x.position)
cheapestOpen
"#
),
"Attr * (Attr * (Attr Shared position -> Attr Shared Float), Attr * (Model (Attr Shared position)) -> Attr * (Result (Attr Shared position) (Attr * [ KeyNotFound ]*)))"
);
}
#[test]
fn update_cost() {
infer_eq(
indoc!(
r#"
Model position : { evaluated : Set position
, openSet : Set position
, costs : Map.Map position Float
, cameFrom : Map.Map position position
}
reconstructPath : Map position position, position -> List position
reconstructPath = \cameFrom, goal ->
when Map.get cameFrom goal is
Err KeyNotFound ->
[]
Ok next ->
List.push (reconstructPath cameFrom next) goal
updateCost : position, position, Model position -> Model position
updateCost = \current, neighbour, model ->
newCameFrom = Map.insert model.cameFrom neighbour current
newCosts = Map.insert model.costs neighbour distanceTo
distanceTo = reconstructPath newCameFrom neighbour
|> List.length
|> Num.toFloat
newModel = { model & costs : newCosts , cameFrom : newCameFrom }
when Map.get model.costs neighbour is
Err KeyNotFound ->
newModel
Ok previousDistance ->
if distanceTo < previousDistance then
newModel
else
model
updateCost
"#
),
"Attr * (Attr Shared position, Attr Shared position, Attr Shared (Model (Attr Shared position)) -> Attr Shared (Model (Attr Shared position)))"
);
}
#[test]
fn astar_full_code() {
with_larger_debug_stack(|| {
infer_eq(
indoc!(
r#"
Model position : { evaluated : Set position
, openSet : Set position
, costs : Map.Map position Float
, cameFrom : Map.Map position position
}
initialModel : position -> Model position
initialModel = \start ->
{ evaluated : Set.empty
, openSet : Set.singleton start
, costs : Map.singleton start 0.0
, cameFrom : Map.empty
}
cheapestOpen : (position -> Float), Model position -> Result position [ KeyNotFound ]*
cheapestOpen = \costFunction, model ->
folder = \position, resSmallestSoFar ->
when Map.get model.costs position is
Err e ->
Err e
Ok cost ->
positionCost = costFunction position
when resSmallestSoFar is
Err _ -> Ok { position, cost: cost + positionCost }
Ok smallestSoFar ->
if positionCost + cost < smallestSoFar.cost then
Ok { position, cost: cost + positionCost }
else
Ok smallestSoFar
Set.foldl model.openSet folder (Err KeyNotFound)
|> Result.map (\x -> x.position)
reconstructPath : Map position position, position -> List position
reconstructPath = \cameFrom, goal ->
when Map.get cameFrom goal is
Err KeyNotFound ->
[]
Ok next ->
List.push (reconstructPath cameFrom next) goal
updateCost : position, position, Model position -> Model position
updateCost = \current, neighbour, model ->
newCameFrom = Map.insert model.cameFrom neighbour current
newCosts = Map.insert model.costs neighbour distanceTo
distanceTo = reconstructPath newCameFrom neighbour
|> List.length
|> Num.toFloat
newModel = { model & costs : newCosts , cameFrom : newCameFrom }
when Map.get model.costs neighbour is
Err KeyNotFound ->
newModel
Ok previousDistance ->
if distanceTo < previousDistance then
newModel
else
model
findPath : { costFunction: (position, position -> Float), moveFunction: (position -> Set position), start : position, end : position } -> Result (List position) [ KeyNotFound ]*
findPath = \{ costFunction, moveFunction, start, end } ->
astar costFunction moveFunction end (initialModel start)
astar : (position, position -> Float), (position -> Set position), position, Model position -> [ Err [ KeyNotFound ]*, Ok (List position) ]*
astar = \costFn, moveFn, goal, model ->
when cheapestOpen (\position -> costFn goal position) model is
Err _ ->
Err KeyNotFound
Ok current ->
if current == goal then
Ok (reconstructPath model.cameFrom goal)
else
modelPopped = { model & openSet : Set.remove model.openSet current, evaluated : Set.insert model.evaluated current }
neighbours = moveFn current
newNeighbours = Set.diff neighbours modelPopped.evaluated
modelWithNeighbours = { modelPopped & openSet : Set.union modelPopped.openSet newNeighbours }
modelWithCosts = Set.foldl newNeighbours (\nb, md -> updateCost current nb md) modelWithNeighbours
astar costFn moveFn goal modelWithCosts
findPath
"#
),
"Attr * (Attr * { costFunction : (Attr Shared (Attr Shared position, Attr Shared position -> Attr Shared Float)), end : (Attr Shared position), moveFunction : (Attr Shared (Attr Shared position -> Attr * (Set (Attr Shared position)))), start : (Attr Shared position) } -> Attr * (Result (Attr * (List (Attr Shared position))) (Attr * [ KeyNotFound ]*)))"
)
});
}
#[test]
fn instantiated_alias() {
infer_eq(
indoc!(
r#"
Model a : { foo : Set a }
initialModel : position -> Model Int
initialModel = \_ -> { foo : Set.empty }
initialModel
"#
),
"Attr * (Attr * position -> Attr * (Model (Attr * Int)))",
);
}
}

View file

@ -1,5 +0,0 @@
pub mod def;
pub mod expr;
pub mod module;
pub mod pattern;
pub mod spaces;

View file

@ -1,2 +0,0 @@
pub mod expr;
pub mod layout;

View file

@ -1,36 +0,0 @@
// PHILOSOPHY
//
// Focus on optimizations which are only safe in the absence of side effects, and leave the rest to LLVM.
//
// This focus may lead to some optimizations becoming transitively in scope. For example, some deforestation
// examples in the MSR paper benefit from multiple rounds of interleaved deforestation, beta-reduction, and inlining.
// To get those benefits, we'd have to do some inlining and beta-reduction that we could otherwise leave to LLVM's
// inlining and constant propagation/folding.
//
// Even if we're doing those things, it may still make sense to have LLVM do a pass for them as well, since
// early LLVM optimization passes may unlock later opportunities for inlining and constant propagation/folding.
//
// INLINING
//
// If a function is called exactly once (it's a helper function), presumably we always want to inline those.
// If a function is "small enough" it's probably worth inlining too.
//
// FUSION
//
// https://www.microsoft.com/en-us/research/wp-content/uploads/2016/07/deforestation-short-cut.pdf
//
// Basic approach:
//
// Do list stuff using `build` passing Cons Nil (like a cons list) and then do foldr/build substitution/reduction.
// Afterwards, we can do a separate pass to flatten nested Cons structures into properly initialized RRBTs.
// This way we get both deforestation and efficient RRBT construction. Should work for the other collection types too.
//
// It looks like we need to do some amount of inlining and beta reductions on the Roc side, rather than
// leaving all of those to LLVM.
//
// Advanced approach:
//
// Express operations like map and filter in terms of toStream and fromStream, to unlock more deforestation.
// More info on here:
//
// https://wiki.haskell.org/GHC_optimisations#Fusion

View file

@ -1,309 +0,0 @@
use std::alloc::{self, Layout};
use std::fmt;
use std::mem::{self, MaybeUninit};
use std::ptr;
use std::slice;
use std::str;
/// An immutable string whose maximum length is `isize::MAX`. (For convenience,
/// it still returns its length as `usize` since it can't be negative.)
///
/// For larger strings, under the hood this is a struct which stores a
/// pointer and a usize for length (so 16 bytes on a 64-bit system).
///
/// For smaller strings (lengths 0-15 on 64-bit systems, and 0-7 on 32-bit),
/// this uses a "short string optimization" where it stores the entire string
/// in this struct and does not bother allocating on the heap at all.
pub struct RocStr(InnerStr);
/// Roc strings are optimized not to do heap allocations when they are between
/// 0-15 bytes in length on 64-bit little endian systems,
/// and 0-7 bytes on systems that are 32-bit, big endian, or both.
///
/// This optimization relies on the assumption that string lengths are always
/// less than isize::MAX as opposed to usize::MAX. It relies on this because
/// it uses the most significant bit in the most significant byte in the length
/// as a flag for whether it is a short string or a long string. This bit is
/// unused if lengths are below isize::MAX.
///
/// Roc integers are i64, so on 64-bit systems this guarantee necessarily holds
/// from the roc side. On a 32-bit system it might not though. Rust historically
/// had this guarantee, but it might get relaxed. For more on the Rust side, see
/// https://github.com/rust-lang/unsafe-code-guidelines/issues/102
///
/// Since Roc will interpret them as i64, it's important that on 64-bit systems,
/// Rust never sends Roc any length values outsize isize::MAX because they'll
/// be interpreted as negative i64s!
///
/// Anyway, this "is this a short string?" bit is in a convenient location on
/// 64-bit little endian systems. This is because of how Rust's &str is
/// laid out, and memory alignment.
///
/// Rust's &str is laid out as a slice, namely:
///
/// struct RustStr { ptr: *const [u8], length: usize }
///
/// In little endian systems, the bit for detecting short vs long length is
/// the most significant bit of the length field, which is the very last byte
/// in the struct.
///
/// This means if we detect that we are a short string, we can pass a pointer
/// to the entire struct (which is necessarily aligned already), and its first
/// contiguous N bytes represent the bytes in the string, where N is 15 on
/// 64-bit systems and 7 on 32-bit ones. The final byte is the msbyte where
/// we stored the flag, but it doesn't matter what's in that memory because the
/// str's length will be too low to encounter that anyway.
union InnerStr {
raw: [u8; 16],
long: LongStr,
}
#[derive(Copy)]
#[repr(C)]
struct LongStr {
/// It is *crucial* that we have exactly this memory layout!
/// This is the same layout that Rust uses for string slices in memory,
/// which lets us mem::transmute long strings directly into them.
///
/// https://pramode.in/2016/09/13/using-unsafe-tricks-in-rust/
bytes: MaybeUninit<*const u8>,
length: usize,
}
// The bit pattern for an empty string. (1 and then all 0s.)
// Any other bit pattern means this is not an empty string!
#[cfg(target_pointer_width = "64")]
const EMPTY_STRING: usize =
0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
#[cfg(target_pointer_width = "32")]
const EMPTY_STRING: usize = 0b1000_0000_0000_0000;
impl RocStr {
#[inline(always)]
pub fn is_empty(&self) -> bool {
unsafe { self.0.long.length == EMPTY_STRING }
}
#[inline(always)]
pub fn empty() -> RocStr {
RocStr(InnerStr {
long: LongStr {
length: EMPTY_STRING,
// empty strings only ever have length set.
bytes: MaybeUninit::uninit(),
},
})
}
pub fn len(&self) -> usize {
let len_msbyte = self.len_msbyte();
if flagged_as_short_string(len_msbyte) {
// Drop the "is this a short string?" flag
let length: u8 = len_msbyte & 0b0111_1111;
length as usize
} else {
unsafe { self.0.long.length }
}
}
/// The most significant byte in the length. We use the last bit of this
/// byte to determine if we are a short string or a long string.
/// If this is a short string, we intentionally set that bit to 1.
#[inline(always)]
#[cfg(all(target_pointer_width = "64", target_endian = "little"))]
fn len_msbyte(&self) -> u8 {
(unsafe { mem::transmute::<usize, [u8; 8]>(self.0.long.length) })[7]
}
#[inline(always)]
#[cfg(all(target_pointer_width = "32", target_endian = "little"))]
fn len_msbyte(&self) -> u8 {
(unsafe { mem::transmute::<usize, [u8; 4]>(self.long.length) })[3]
}
#[inline(always)]
#[cfg(all(target_pointer_width = "64", target_endian = "big"))]
fn len_msbyte(&self) -> u8 {
(unsafe { mem::transmute::<usize, [u8; 8]>(self.long.length) })[0]
}
#[inline(always)]
#[cfg(all(target_pointer_width = "32", target_endian = "big"))]
fn len_msbyte(&self) -> u8 {
(unsafe { mem::transmute::<usize, [u8; 4]>(self.long.length) })[0]
}
}
#[inline(always)]
fn flagged_as_short_string(len_msbyte: u8) -> bool {
// It's a short string iff the first bit of len_msbyte is 1.
len_msbyte & 0b1000_0000 == 0b1000_0000
}
#[inline(always)]
fn with_short_string_flag_enabled(len_msbyte: u8) -> u8 {
// It's a short string iff the first bit of len_msbyte is 1.
len_msbyte | 0b1000_0000
}
impl fmt::Debug for RocStr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// TODO do this without getting a cloned String involved
let string: String = self.clone().into();
string.fmt(f)
}
}
impl fmt::Display for RocStr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// TODO do this without getting a cloned String involved
let string: String = self.clone().into();
string.fmt(f)
}
}
impl Clone for LongStr {
fn clone(&self) -> Self {
let length = self.length;
let layout = unsafe { Layout::from_size_align_unchecked(length, 8) };
let old_bytes_ptr = unsafe { self.bytes.assume_init() };
// Allocate memory for the new bytes. (We'll manually drop them later.)
let new_bytes_ptr = unsafe { alloc::alloc(layout) };
unsafe {
ptr::copy_nonoverlapping(old_bytes_ptr, new_bytes_ptr, length);
}
LongStr {
bytes: MaybeUninit::new(new_bytes_ptr),
length,
}
}
}
impl Into<String> for RocStr {
#[cfg(all(target_pointer_width = "64", target_endian = "little"))]
fn into(self) -> String {
let len_msbyte = self.len_msbyte();
// TODO I'm not sure this works the way we want it to. Need to review.
if flagged_as_short_string(len_msbyte) {
// Drop the "is this a short string?" flag
let length: u8 = len_msbyte & 0b0111_1111;
let bytes_ptr = unsafe { &self.0.raw } as *const u8;
// These bytes are already aligned, so we can use them directly.
let bytes_slice: &[u8] = unsafe { slice::from_raw_parts(bytes_ptr, length as usize) };
(unsafe { str::from_utf8_unchecked(bytes_slice) }).to_string()
} else {
// If it's a long string, we already have the exact
// same memory layout as a Rust &str slice.
let str_slice = unsafe { mem::transmute::<[u8; 16], &str>(self.0.raw) };
let string = str_slice.to_string();
// Drop will deallocate the bytes, which we don't want in this case.
// String is using those bytes now!
mem::forget(self);
string
}
}
}
impl From<String> for RocStr {
#[cfg(all(target_pointer_width = "64", target_endian = "little"))]
fn from(string: String) -> RocStr {
if string.is_empty() {
RocStr::empty()
} else {
let str_len = string.len();
if str_len <= 15 {
let mut buffer: [u8; 16] = [0; 16];
// Copy the raw bytes from the string into the buffer.
unsafe {
// Write into the buffer's bytes
ptr::copy_nonoverlapping(string.as_ptr(), buffer.as_ptr() as *mut u8, str_len);
}
// Set the last byte in the buffer to be the length (with flag).
buffer[15] = with_short_string_flag_enabled(string.len() as u8);
RocStr(InnerStr { raw: buffer })
} else {
panic!("TODO: use mem::forget on the string and steal its bytes!");
// let bytes_ptr = string.as_bytes().clone().as_ptr();
// let long = LongStr {
// bytes: MaybeUninit::new(bytes_ptr),
// length: str_len,
// };
// RocStr(InnerStr { long })
}
}
}
}
impl Clone for RocStr {
fn clone(&self) -> Self {
let inner = if flagged_as_short_string(self.len_msbyte()) {
InnerStr {
raw: (unsafe { self.0.raw }),
}
} else {
InnerStr {
long: (unsafe { self.0.long }),
}
};
RocStr(inner)
}
}
impl Drop for RocStr {
fn drop(&mut self) {
// If this is a LongStr, we need to deallocate its bytes.
// Otherwise we would have a memory leak!
if !flagged_as_short_string(self.len_msbyte()) {
let bytes_ptr = unsafe { self.0.long.bytes.assume_init() };
// If this was already dropped previously (most likely because the
// bytes were moved into a String), we shouldn't deallocate them.
if !bytes_ptr.is_null() {
let length = unsafe { self.0.long.length };
let layout = unsafe { Layout::from_size_align_unchecked(length, 8) };
// We don't need to call drop_in_place. We know bytes_ptr points to
// a plain u8 array, so there will for sure be no destructor to run.
unsafe {
alloc::dealloc(bytes_ptr as *mut u8, layout);
}
}
}
}
}
#[cfg(test)]
mod test_roc_str {
use super::RocStr;
#[test]
fn empty_str() {
assert!(RocStr::empty().is_empty());
assert_eq!(RocStr::empty().len(), 0);
}
#[test]
fn fmt() {
assert_eq!("".to_string(), format!("{}", RocStr::empty()));
}
}

27
compiler/uniq/Cargo.toml Normal file
View file

@ -0,0 +1,27 @@
[package]
name = "roc_uniq"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
edition = "2018"
[dependencies]
roc_collections = { path = "../collections" }
roc_region = { path = "../region" }
roc_module = { path = "../module" }
roc_types = { path = "../types" }
roc_can = { path = "../can" }
im = "14" # im and im-rc should always have the same version!
im-rc = "14" # im and im-rc should always have the same version!
[dev-dependencies]
roc_constrain = { path = "../constrain" }
roc_builtins = { path = "../builtins" }
roc_problem = { path = "../problem" }
roc_parse = { path = "../parse" }
roc_solve = { path = "../solve" }
pretty_assertions = "0.5.1 "
maplit = "1.0.1"
indoc = "0.3.3"
quickcheck = "0.8"
quickcheck_macros = "0.8"
bumpalo = "2.6"

View file

@ -0,0 +1,455 @@
extern crate bumpalo;
use self::bumpalo::Bump;
use roc_builtins::unique::uniq_stdlib;
use roc_can::constraint::Constraint;
use roc_can::env::Env;
use roc_can::expected::Expected;
use roc_can::expr::{canonicalize_expr, Expr, Output};
use roc_can::operator;
use roc_can::scope::Scope;
use roc_collections::all::{ImMap, ImSet, MutMap, SendMap, SendSet};
use roc_constrain::expr::constrain_expr;
use roc_constrain::module::{constrain_imported_values, load_builtin_aliases, Import};
use roc_module::ident::Ident;
use roc_module::symbol::{IdentIds, Interns, ModuleId, ModuleIds, Symbol};
use roc_parse::ast::{self, Attempting};
use roc_parse::blankspace::space0_before;
use roc_parse::parser::{loc, Fail, Parser, State};
use roc_problem::can::Problem;
use roc_region::all::{Located, Region};
use roc_solve::solve;
use roc_types::subs::{Content, Subs, VarStore, Variable};
use roc_types::types::Type;
use std::hash::Hash;
use std::path::{Path, PathBuf};
pub fn test_home() -> ModuleId {
ModuleIds::default().get_or_insert(&"Test".into())
}
#[allow(dead_code)]
pub fn infer_expr(
subs: Subs,
problems: &mut Vec<roc_types::types::Problem>,
constraint: &Constraint,
expr_var: Variable,
) -> (Content, Subs) {
let env = solve::Env {
aliases: MutMap::default(),
vars_by_symbol: SendMap::default(),
};
let (solved, _) = solve::run(&env, problems, subs, constraint);
let content = solved.inner().get_without_compacting(expr_var).content;
(content, solved.into_inner())
}
/// Used in the with_larger_debug_stack() function, for tests that otherwise
/// run out of stack space in debug builds (but don't in --release builds)
#[allow(dead_code)]
const EXPANDED_STACK_SIZE: usize = 4 * 1024 * 1024;
/// Without this, some tests pass in `cargo test --release` but fail without
/// the --release flag because they run out of stack space. This increases
/// stack size for debug builds only, while leaving the stack space at the default
/// amount for release builds.
#[allow(dead_code)]
#[cfg(debug_assertions)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
std::thread::Builder::new()
.stack_size(EXPANDED_STACK_SIZE)
.spawn(run_test)
.expect("Error while spawning expanded dev stack size thread")
.join()
.expect("Error while joining expanded dev stack size thread")
}
/// In --release builds, don't increase the stack size. Run the test normally.
/// This way, we find out if any of our tests are blowing the stack even after
/// optimizations in release builds.
#[allow(dead_code)]
#[cfg(not(debug_assertions))]
#[inline(always)]
pub fn with_larger_debug_stack<F>(run_test: F)
where
F: FnOnce() -> (),
F: Send,
F: 'static,
{
run_test()
}
#[allow(dead_code)]
pub fn parse_with<'a>(arena: &'a Bump, input: &'a str) -> Result<ast::Expr<'a>, Fail> {
parse_loc_with(arena, input).map(|loc_expr| loc_expr.value)
}
#[allow(dead_code)]
pub fn parse_loc_with<'a>(arena: &'a Bump, input: &'a str) -> Result<Located<ast::Expr<'a>>, Fail> {
let state = State::new(&input, Attempting::Module);
let parser = space0_before(loc(roc_parse::expr::expr(0)), 0);
let answer = parser.parse(&arena, state);
answer
.map(|(loc_expr, _)| loc_expr)
.map_err(|(fail, _)| fail)
}
#[allow(dead_code)]
pub fn can_expr(expr_str: &str) -> CanExprOut {
can_expr_with(&Bump::new(), test_home(), expr_str)
}
#[allow(dead_code)]
pub fn uniq_expr(
expr_str: &str,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let declared_idents: &ImMap<Ident, (Symbol, Region)> = &ImMap::default();
uniq_expr_with(&Bump::new(), expr_str, declared_idents)
}
#[allow(dead_code)]
pub fn uniq_expr_with(
arena: &Bump,
expr_str: &str,
declared_idents: &ImMap<Ident, (Symbol, Region)>,
) -> (
Located<Expr>,
Output,
Vec<Problem>,
Subs,
Variable,
Constraint,
ModuleId,
Interns,
) {
let home = test_home();
let CanExprOut {
loc_expr,
output,
problems,
var_store: old_var_store,
var,
interns,
..
} = can_expr_with(arena, home, expr_str);
// double check
let var_store = VarStore::new(old_var_store.fresh());
let expected2 = Expected::NoExpectation(Type::Variable(var));
let constraint = roc_constrain::uniq::constrain_declaration(
home,
&var_store,
Region::zero(),
&loc_expr,
declared_idents,
expected2,
);
let stdlib = uniq_stdlib();
let types = stdlib.types;
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
// load builtin values
// TODO what to do with those rigids?
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
// load builtin types
let mut constraint = load_builtin_aliases(&stdlib.aliases, constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let subs2 = Subs::new(var_store.into());
(
loc_expr, output, problems, subs2, var, constraint, home, interns,
)
}
pub struct CanExprOut {
pub loc_expr: Located<Expr>,
pub output: Output,
pub problems: Vec<Problem>,
pub home: ModuleId,
pub interns: Interns,
pub var_store: VarStore,
pub var: Variable,
pub constraint: Constraint,
}
#[allow(dead_code)]
pub fn can_expr_with(arena: &Bump, home: ModuleId, expr_str: &str) -> CanExprOut {
let loc_expr = parse_loc_with(&arena, expr_str).unwrap_or_else(|e| {
panic!(
"can_expr_with() got a parse error when attempting to canonicalize:\n\n{:?} {:?}",
expr_str, e
)
});
let var_store = VarStore::default();
let var = var_store.fresh();
let expected = Expected::NoExpectation(Type::Variable(var));
let module_ids = ModuleIds::default();
// Desugar operators (convert them to Apply calls, taking into account
// operator precedence and associativity rules), before doing other canonicalization.
//
// If we did this *during* canonicalization, then each time we
// visited a BinOp node we'd recursively try to apply this to each of its nested
// operators, and then again on *their* nested operators, ultimately applying the
// rules multiple times unnecessarily.
let loc_expr = operator::desugar_expr(arena, &loc_expr);
let mut scope = Scope::new(home);
let dep_idents = IdentIds::exposed_builtins(0);
let mut env = Env::new(home, dep_idents, &module_ids, IdentIds::default());
let (loc_expr, output) = canonicalize_expr(
&mut env,
&var_store,
&mut scope,
Region::zero(),
&loc_expr.value,
);
let constraint = constrain_expr(
&roc_constrain::expr::Env {
rigids: ImMap::default(),
home,
},
loc_expr.region,
&loc_expr.value,
expected,
);
let types = roc_builtins::std::types();
let imports: Vec<_> = types
.iter()
.map(|(symbol, (solved_type, region))| Import {
loc_symbol: Located::at(*region, *symbol),
solved_type: solved_type,
})
.collect();
//load builtin values
let (_introduced_rigids, constraint) =
constrain_imported_values(imports, constraint, &var_store);
//load builtin types
let mut constraint =
load_builtin_aliases(&roc_builtins::std::aliases(), constraint, &var_store);
constraint.instantiate_aliases(&var_store);
let mut all_ident_ids = MutMap::default();
// When pretty printing types, we may need the exposed builtins,
// so include them in the Interns we'll ultimately return.
for (module_id, ident_ids) in IdentIds::exposed_builtins(0) {
all_ident_ids.insert(module_id, ident_ids);
}
all_ident_ids.insert(home, env.ident_ids);
let interns = Interns {
module_ids: env.module_ids.clone(),
all_ident_ids,
};
CanExprOut {
loc_expr,
output,
problems: env.problems,
home: env.home,
var_store,
interns,
var,
constraint,
}
}
#[allow(dead_code)]
pub fn mut_map_from_pairs<K, V, I>(pairs: I) -> MutMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq,
{
let mut answer = MutMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn im_map_from_pairs<K, V, I>(pairs: I) -> ImMap<K, V>
where
I: IntoIterator<Item = (K, V)>,
K: Hash + Eq + Clone,
V: Clone,
{
let mut answer = ImMap::default();
for (key, value) in pairs {
answer.insert(key, value);
}
answer
}
#[allow(dead_code)]
pub fn send_set_from<V, I>(elems: I) -> SendSet<V>
where
I: IntoIterator<Item = V>,
V: Hash + Eq + Clone,
{
let mut answer = SendSet::default();
for elem in elems {
answer.insert(elem);
}
answer
}
#[allow(dead_code)]
pub fn fixtures_dir<'a>() -> PathBuf {
Path::new("tests").join("fixtures").join("build")
}
#[allow(dead_code)]
pub fn builtins_dir<'a>() -> PathBuf {
PathBuf::new().join("builtins")
}
// Check constraints
//
// Keep track of the used (in types or expectations) variables, and the declared variables (in
// flex_vars or rigid_vars fields of LetConstraint. These roc_collections should match: no duplicates
// and no variables that are used but not declared are allowed.
//
// There is one exception: the initial variable (that stores the type of the whole expression) is
// never declared, but is used.
#[allow(dead_code)]
pub fn assert_correct_variable_usage(constraint: &Constraint) {
// variables declared in constraint (flex_vars or rigid_vars)
// and variables actually used in constraints
let (declared, used) = variable_usage(constraint);
let used: ImSet<Variable> = used.clone().into();
let mut decl: ImSet<Variable> = declared.rigid_vars.clone().into();
for var in declared.flex_vars.clone() {
decl.insert(var);
}
let diff = used.clone().relative_complement(decl);
// NOTE: this checks whether we're using variables that are not declared. For recursive type
// definitions, their rigid types are declared twice, which is correct!
if !diff.is_empty() {
println!("VARIABLE USAGE PROBLEM");
println!("used: {:?}", &used);
println!("rigids: {:?}", &declared.rigid_vars);
println!("flexs: {:?}", &declared.flex_vars);
println!("difference: {:?}", &diff);
panic!("variable usage problem (see stdout for details)");
}
}
#[derive(Default)]
pub struct SeenVariables {
pub rigid_vars: Vec<Variable>,
pub flex_vars: Vec<Variable>,
}
pub fn variable_usage(con: &Constraint) -> (SeenVariables, Vec<Variable>) {
let mut declared = SeenVariables::default();
let mut used = ImSet::default();
variable_usage_help(con, &mut declared, &mut used);
used.remove(unsafe { &Variable::unsafe_test_debug_variable(1) });
let mut used_vec: Vec<Variable> = used.into_iter().collect();
used_vec.sort();
declared.rigid_vars.sort();
declared.flex_vars.sort();
(declared, used_vec)
}
fn variable_usage_help(con: &Constraint, declared: &mut SeenVariables, used: &mut ImSet<Variable>) {
use Constraint::*;
match con {
True | SaveTheEnvironment => (),
Eq(tipe, expectation, _) => {
for v in tipe.variables() {
used.insert(v);
}
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Lookup(_, expectation, _) => {
for v in expectation.get_type_ref().variables() {
used.insert(v);
}
}
Pattern(_, _, tipe, pexpectation) => {
for v in tipe.variables() {
used.insert(v);
}
for v in pexpectation.get_type_ref().variables() {
used.insert(v);
}
}
Let(letcon) => {
declared.rigid_vars.extend(letcon.rigid_vars.clone());
declared.flex_vars.extend(letcon.flex_vars.clone());
variable_usage_help(&letcon.defs_constraint, declared, used);
variable_usage_help(&letcon.ret_constraint, declared, used);
}
And(constraints) => {
for sub in constraints {
variable_usage_help(sub, declared, used);
}
}
}
}

View file

@ -6,9 +6,9 @@ extern crate pretty_assertions;
extern crate indoc;
extern crate bumpalo;
extern crate roc;
extern crate roc_collections;
extern crate roc_module;
extern crate roc_uniq;
mod helpers;
@ -18,10 +18,10 @@ mod test_usage_analysis {
use roc_collections::all::{ImMap, ImSet};
use roc_module::ident::Lowercase;
use roc_module::symbol::{Interns, Symbol};
use roc_uniqueness::sharing;
use roc_uniqueness::sharing::FieldAccess;
use roc_uniqueness::sharing::VarUsage;
use roc_uniqueness::sharing::{Container, Mark, Usage};
use roc_uniq::sharing;
use roc_uniq::sharing::FieldAccess;
use roc_uniq::sharing::VarUsage;
use roc_uniq::sharing::{Container, Mark, Usage};
use Container::*;
use Mark::*;
@ -196,7 +196,7 @@ mod test_usage_analysis {
loc_expr, interns, ..
} = can_expr(src);
use roc_uniqueness::sharing::annotate_usage;
use roc_uniq::sharing::annotate_usage;
let mut usage = VarUsage::default();
annotate_usage(&loc_expr.value, &mut usage);

View file

@ -1,18 +0,0 @@
Def {
loc_pattern: |L 0-0, C 0-1| Identifier('Test.Blah$q'),
loc_expr: |L 0-3, C 4-26| Closure(30, 'Test.Blah$2', Recursive, [(18, |L 0-0, C 5-6| Identifier('Test.Blah$x'))], (|L 1-3, C 8-26| When { cond_var: 19, expr_var: 29, loc_cond: |L 1-3, C 8-26| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$x', resolved_symbol: 'Test.Blah$x' }, branches: [(|L 2-2, C 12-13| IntLiteral(0), |L 1-3, C 8-26| Int(20, 0)), (|L 3-3, C 12-13| Underscore, |L 1-3, C 8-26| Call((27, |L 3-3, C 17-18| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$p', resolved_symbol: 'Test.Blah$p' }, 28), [(26, |L 3-3, C 20-25| Call((24, |L 3-3, C 22-23| Var { module: ModuleName("Num"), symbol_for_lookup: 'Num.sub', resolved_symbol: 'Num.sub' }, 25), [(21, |L 3-3, C 20-21| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$x', resolved_symbol: 'Test.Blah$x' }), (23, |L 3-3, C 24-25| Int(22, 1))], BinOp(Minus)))], Space))] }, 31)),
expr_var: 17,
pattern_vars: {
'Test.Blah$q': 17,
},
annotation: None,
},
Def {
loc_pattern: |L 5-5, C 0-1| Identifier('Test.Blah$p'),
loc_expr: |L 5-8, C 4-26| Closure(15, 'Test.Blah$1', Recursive, [(3, |L 5-5, C 5-6| Identifier('Test.Blah$x'))], (|L 6-8, C 8-26| When { cond_var: 4, expr_var: 14, loc_cond: |L 6-8, C 8-26| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$x', resolved_symbol: 'Test.Blah$x' }, branches: [(|L 7-7, C 12-13| IntLiteral(0), |L 6-8, C 8-26| Int(5, 0)), (|L 8-8, C 12-13| Underscore, |L 6-8, C 8-26| Call((12, |L 8-8, C 17-18| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$q', resolved_symbol: 'Test.Blah$q' }, 13), [(11, |L 8-8, C 20-25| Call((9, |L 8-8, C 22-23| Var { module: ModuleName("Num"), symbol_for_lookup: 'Num.sub', resolved_symbol: 'Num.sub' }, 10), [(6, |L 8-8, C 20-21| Var { module: ModuleName(""), symbol_for_lookup: 'Test.Blah$x', resolved_symbol: 'Test.Blah$x' }), (8, |L 8-8, C 24-25| Int(7, 1))], BinOp(Minus)))], Space))] }, 16)),
expr_var: 2,
pattern_vars: {
'Test.Blah$p': 2,
},
annotation: None,
},

View file

@ -1,9 +0,0 @@
Def {
loc_pattern: |L 0-0, C 0-1| Identifier(`Test.q`),
loc_expr: |L 0-3, C 4-26| Closure(30, `Test.1`, Recursive, [(18, |L 0-0, C 5-6| Identifier(`Test.x`))], (|L 1-3, C 8-26| When { cond_var: 19, expr_var: 29, loc_cond: |L 1-3, C 8-26| Var(`Test.x`), branches: [(WhenPattern { pattern: |L 2-2, C 12-13| IntLiteral(0), guard: None }, |L 1-3, C 8-26| Int(20, 0)), (WhenPattern { pattern: |L 3-3, C 12-13| Underscore, guard: None }, |L 1-3, C 8-26| Call((27, |L 3-3, C 17-18| Var(`Test.p`), 28), [(26, |L 3-3, C 20-25| Call((24, |L 3-3, C 22-23| Var(`Num.sub`), 25), [(21, |L 3-3, C 20-21| Var(`Test.x`)), (23, |L 3-3, C 24-25| Int(22, 1))], BinOp(Minus)))], Space))] }, 31)),
expr_var: 17,
pattern_vars: {
`Test.q`: 17,
},
annotation: None,
},

108
stdlib/Cargo.lock generated
View file

@ -1,108 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "difference"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "im-rc"
version = "13.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sized-chunks 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "pretty_assertions"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "roc-std"
version = "0.1.0"
dependencies = [
"im-rc 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "sized-chunks"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "typenum"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
"checksum im-rc 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0a0197597d095c0d11107975d3175173f810ee572c2501ff4de64f4f3f119806"
"checksum pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a029430f0d744bc3d15dd474d591bed2402b645d024583082b9f63bb936dac6"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum sized-chunks 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a2a2eb3fe454976eefb479f78f9b394d34d661b647c6326a3a6e66f68bb12c26"
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View file

@ -1,11 +0,0 @@
[package]
name = "roc-std"
version = "0.1.0"
authors = ["Richard Feldman <oss@rtfeldman.com>"]
[dependencies]
im-rc = "13.0.0"
fxhash = "0.2.1"
[dev-dependencies]
pretty_assertions = "0.5.1"

View file

@ -1,150 +0,0 @@
use std::marker::PhantomData;
use std::hash::{Hash, Hasher};
use std::ops::{Add, Sub, Mul, Neg};
/// Approx is stored as an f64 under the hood.
///
/// However, part of Roc's design is that Roc users never encounter Infinity,
/// -Infinity, NaN, or -0. To Roc application authors, the only difference between
/// Approx and Frac is that Approx supports a few more operations (sqrt,
/// trigonometry, etc) and is potentially imprecise.
///
/// To achieve this, Roc maps all invalid Float values (NaN, Infinity, -Infinity) to
/// Err values. This means that any value which could contain one of these bit patterns
/// is typed as Result { ok: Approx, err: InvalidApprox }, including any Approx values
/// passed into Roc.
///
/// Roc code does not have the expressive power to create NaN, Infinity, or -Infinity,
/// so the Approx type inside Roc represents an f64 that is guaratneed not to be NaN,
/// Infinity, or -Infinity.
///
/// Additionally, the implementation detail of 0 and -0 being different f64 values does
/// not reach Roc code because there is no way to convert an Approx directly to a String.
/// Instead, Approx must go through conversion to either a Frac or an Int, neither of
/// which is capable of representing -0. In f64 operations, 0 and -0 are considered
/// equivalent, so the distinction does not matter there either.
pub struct Approximation<T> {
value: f64,
phantom: PhantomData<T>
}
pub struct Valid;
pub type Approx = Approximation<Valid>;
impl Hash for Approx {
fn hash<H: Hasher>(&self, state: &mut H) {
panic!("TODO: implement using integer_decode");
// let (man, exp, sign) = f.integer_decode();
// if man == 0 {
// // Consolidate the representation of zero, whether signed or not
// // The IEEE standard considers positive and negative zero to be equal
// 0
// } else {
// (man ^ ((exp as u64) << 48) ^ sign as u64)
// }.hash(state)
}
}
impl<T> Approximation<T> {
pub fn abs(num: &Self) -> Self {
Approximation { value: num.value.abs(), phantom: PhantomData }
}
/// Returns whether the approximation is valid.
/// For an Approx<Valid>, this will always return true.
#[inline(always)]
pub fn is_valid(&self) -> bool {
self.value.is_finite()
}
/// If the approximation is valid, return it wrapped in Some.
/// Otherwise, return None.
pub fn into_valid<V>(self) -> Option<Approximation<Valid>> {
if self.is_valid() {
Some(Approximation {value: self.value, phantom: PhantomData})
} else {
None
}
}
/// If the approximation is valid, return it with the type variable set accordingly.
/// Otherwise, return the fallback value.
pub fn valid_or<V>(self, fallback: V) -> Result<Approximation<Valid>, V> {
if self.is_valid() {
Ok(Approximation {value: self.value, phantom: PhantomData})
} else {
Err(fallback)
}
}
pub fn valid_or_else<F, V>(self, fallback_fn: F) -> Result<Approximation<Valid>, V>
where F: Fn() -> V
{
if self.is_valid() {
Ok(Approximation {value: self.value, phantom: PhantomData})
} else {
Err(fallback_fn())
}
}
}
impl<Valid> From<f64> for Approximation<Valid> {
fn from(num: f64) -> Self {
if num.is_finite() {
Approximation { value: num, phantom: PhantomData }
} else {
panic!("Tried to convert {:?} to Float.", num)
}
}
}
impl<Valid> From<f32> for Approximation<Valid> {
fn from(num: f32) -> Self {
(num as f64).into()
}
}
impl<T> Into<f64> for Approximation<T> {
fn into(self) -> f64 {
self.value
}
}
impl Add for Approximation<Valid> {
type Output = Approximation<Valid>;
fn add(self, other: Self) -> Self {
assert_no_overflow(self.value.add(other.value), "addition")
}
}
impl Mul for Approximation<Valid> {
type Output = Approximation<Valid>;
fn mul(self, other: Self) -> Self {
assert_no_overflow(self.value.mul(other.value), "multiplication")
}
}
impl Sub for Approximation<Valid> {
type Output = Approximation<Valid>;
fn sub(self, other: Self) -> Self {
assert_no_overflow(self.value.sub(other.value), "subtraction")
}
}
#[inline(always)]
fn assert_no_overflow(num: f64, op: &'static str) -> Approximation<Valid> {
if num.is_finite() {
Approximation { value: num, phantom: PhantomData }
} else if num.is_infinity() {
panic!("Float overflowed during ", op)
} else if num.is_negative_infinity() {
panic!("Float underflowed during ", op)
} else {
panic!("Float was NaN during ", op)
}
}

View file

@ -1,2 +0,0 @@
/// A plain old bool.
pub type Bool = bool;

View file

@ -1,940 +0,0 @@
use std::cmp::{Eq, Ordering, PartialEq, PartialOrd};
use std::hash::{Hash, Hasher};
use std::ops::{Add, Sub, Mul, Neg};
use std::mem;
use std::fmt;
use std::marker::PhantomData;
pub type Frac = Fraction<Valid>;
#[derive(Clone, Copy)]
pub struct Fraction<T> {
numerator: i64,
/// A positive denominator represents a valid, rational Fraction.
/// A denominator of zero or lower represents an invalid Fraction. All operations
/// on invalid Fractions have undefined behavior, so be careful not to perform
/// operations on them if they get into that state!
///
/// This design optimizes for runtime efficiency of Roc code, with the cost
/// that it makes Frac more error-prone to use outside of Roc.
///
/// * Roc will accept any pattern of bits in this struct, and will type them as
/// Result { ok: Frac, err: DivByZero }. Behind the scenes, a pattern match
/// on this result will map all positive denominators to Ok and all zero or negative
/// denominators to Err, so there is no extra memory cost to this being typed as Result.
/// * Roc's reciprocal function and division operator return Result values as well. Because
/// of the previous point, these operations have no memory overhead either. If the denominator
/// ends up being zero after these operations, that will map to an Err value as normal.
/// * Roc code does not have the expressive power to construct a Frac with a negative
/// denominator, so it is safe to assume within Roc code that this will never happen.
///
/// Putting these together, it becomes efficient for Roc to assume any value of type Frac F a positive
/// denominator, because it cannot be negative, and if it were zero it would have been wrapped in a Result.
///
/// Outside Roc, it is possible to (for example) temporarily have a negative denominator and then later
/// transition to a positive one. If you know about this and are planning for it, that's fine.
/// However, it's important to remember if you pass a negative denominator to Roc, it will map to an Err value.
///
/// The single source of truth for whether the fraction is positive or negative lies with the numerator.
/// This is for two reasons:
///
/// 1. It means the numerator is exactly an i64, so converting from Int -> Frac will never fail or lose precision.
/// 2. It means checking for an error only needs to invovle the denominator. It's possible to ask "is the denominator positive?"
/// and immediately know if the fraction is valid or not.
///
/// Denominator is stored as an i64 because it supports the same range of positive numbers as i64.
/// This prevents the need for conversions between u64.
denominator: i64,
/// The phantom type records whether the Frac is valid.
/// A Frac with a positive denominator is valid, and all others are invalid.
phantom: PhantomData<T>
}
pub struct Valid;
/// Returns a new fraction.
///
/// Panics in non-release builds if given a denominator larger than the
/// largest possible i64.
///
/// Internally, Fraction stores the denominator as an i64 that must be positive.
/// The positivity guarantee allows Fraction to do comparisons faster, because
/// it can avoid accounting for the case where one fraction has both its
/// numerator and denominator negated, and the other has neither negated.
///
/// The denominator cannot be higher than std::i64::MAX because certain operations
/// (e.g. `reciprocal` and `div`) require swapping numerator and denominator,
/// and numerator is an i64.
///
/// A Fraction with a denominator of zero is invalid, and this function returns
/// a Fraction<U> because it is unknown whether the given denominator is zero.
/// To convert to a Fraction<Valid>, see `valid_or` and `valid_or_else`.
pub fn new<U>(numerator: i64, denominator: u64) -> Fraction<U> {
assert!(denominator <= std::i64::MAX as u64);
Fraction { numerator, denominator: denominator as i64, phantom: PhantomData }
}
/// Returns a new fraction. This is `unsafe` because it assumes, without checking,
/// that it was given a nonzero denominator. Never pass this a zero denominator!
///
/// Panics in non-release builds if given a denominator of zero, or if
/// the denominator is larger than the largest possible i64.
///
/// Internally, Fraction stores the denominator as an i64 that must be positive.
/// The positivity guarantee allows Fraction to do comparisons faster, because
/// it can avoid accounting for the case where one fraction has both its
/// numerator and denominator negated, and the other has neither negated.
///
/// The denominator cannot be higher than std::i64::MAX because certain operations
/// (e.g. `reciprocal` and `div`) require swapping numerator and denominator,
/// and numerator is an i64.
///
/// A Fraction with a denominator of zero is invalid, and this function returns
/// a Fraction<Valid>, so it is important that this function never return a Fraction<Valid>
/// with a zero denominator. That would lead to undefined behavior!
pub unsafe fn unchecked_new(numerator: i64, denominator: u64) -> Fraction<Valid> {
assert_ne!(denominator, 0);
assert!(denominator <= std::i64::MAX as u64);
Fraction { numerator, denominator: denominator as i64, phantom: PhantomData }
}
impl<T> Fraction<T> {
#[inline]
/// Reduces the fraction in place.
pub fn reduced(&self) -> Self {
let common_divisor = gcd(self.numerator, self.denominator);
let numerator = self.numerator / common_divisor;
let denominator = self.denominator / common_divisor;
Fraction { numerator, denominator, phantom: PhantomData }
}
pub fn is_rational(&self) -> bool {
self.denominator.is_positive()
}
#[inline]
/// Reduces the fraction, then returns true iff the denominator is 1.
pub fn is_integer(&self) -> bool {
let common_divisor = gcd(self.numerator, self.denominator);
let denominator = self.denominator / common_divisor;
denominator == 1
}
#[inline]
/// Returns true iff the numerator is zero, without reducing first.
/// This is more efficient than getting the reduced numerator and then
/// comparing it to 0, because that has to execute the reduce operation.
pub fn is_zero(self) -> bool {
self.numerator == 0
}
pub fn abs(&self) -> Self {
match self.numerator.overflowing_abs() {
(numerator, false) =>
Fraction { numerator, denominator: self.denominator, phantom: PhantomData },
(_, true) => {
// We underflowed, so reduce and try again.
let reduced_self = self.reduced();
Fraction {
numerator: reduced_self.numerator.abs(),
denominator: reduced_self.denominator,
phantom: PhantomData
}
}
}
}
pub fn checked_abs(&self) -> Option<Self> {
match self.numerator.overflowing_abs() {
(numerator, false) =>
Some(Fraction { numerator, denominator: self.denominator, phantom: PhantomData }),
(_, true) => {
// We underflowed, so reduce and try again.
let reduced_self = self.reduced();
match reduced_self.numerator.overflowing_abs() {
(numerator, false) => {
Some(Fraction { numerator, denominator: reduced_self.denominator, phantom: PhantomData })
},
(_, true) => None
}
}
}
}
pub fn overflowing_abs(&self) -> (Self, bool) {
match self.numerator.overflowing_abs() {
(numerator, false) =>
(Fraction { numerator, denominator: self.denominator, phantom: PhantomData }, false),
(_, true) => {
let reduced_self = self.reduced();
let (numerator, underflowed) = reduced_self.numerator.overflowing_abs();
(Fraction { numerator, denominator: reduced_self.denominator, phantom: PhantomData }, underflowed)
}
}
}
/// Add two fractions, returning None on overflow. Note: overflow can occur on more than just large numerators!
pub fn checked_add(&self, other: &Self) -> Option<Self> {
if self.denominator == other.denominator {
// Happy path - we get to skip calculating a common denominator!
match self.numerator.overflowing_add(other.numerator) {
(numerator, false) => Some(Fraction { numerator, denominator: self.denominator, phantom: PhantomData }),
(_, true) => self.reducing_checked_add(other)
}
} else {
let common_denom: i64 = self.denominator * other.denominator;
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_add(other_numer) {
(numerator, false) =>
Some(Fraction { numerator, denominator: common_denom, phantom: PhantomData }),
(_, true) =>
None
}
}
// Denominator overflowed - try reducing the inputs first.
(_, true) => self.reducing_checked_add(other)
}
},
// Numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_checked_add(other)
}
}
}
/// Add while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_add(&self, other: &Self) -> Self {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
let numerator =
(reduced_self.numerator * (denominator / reduced_self.denominator))
+ (reduced_other.numerator * (denominator / reduced_other.denominator));
Fraction { numerator, denominator, phantom: PhantomData }
}
/// Add while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_checked_add(&self, other: &Self) -> Option<Self> {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
match (denominator / reduced_self.denominator).overflowing_mul(reduced_self.numerator) {
(self_numer, false) => {
match (denominator / reduced_other.denominator).overflowing_mul(reduced_other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_add(other_numer) {
(numerator, false) =>
Some(Fraction { numerator, denominator, phantom: PhantomData }),
(_, true) =>
None
}
},
(_, true) => None
}
},
(_, true) => None
}
}
/// Subtract two fractions, returning None on overflow. Note: overflow can occur on more than just large numerators!
pub fn checked_sub(&self, other: &Self) -> Option<Self> {
if self.denominator == other.denominator {
// Happy path - we get to skip calculating a common denominator!
match self.numerator.overflowing_sub(other.numerator) {
(numerator, false) => Some(Fraction { numerator, denominator: self.denominator, phantom: PhantomData }),
(_, true) => self.reducing_checked_sub(other)
}
} else {
let common_denom: i64 = self.denominator * other.denominator;
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_sub(other_numer) {
(numerator, false) =>
Some(Fraction { numerator, denominator: common_denom, phantom: PhantomData }),
(_, true) =>
None
}
}
// Denominator overflowed - try reducing the inputs first.
(_, true) => self.reducing_checked_sub(other)
}
},
// Numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_checked_sub(other)
}
}
}
/// Subtract while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_sub(&self, other: &Self) -> Self {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
let numerator =
(reduced_self.numerator * (denominator / reduced_self.denominator))
+ (reduced_other.numerator * (denominator / reduced_other.denominator));
Fraction { numerator, denominator, phantom: PhantomData }
}
/// Subtract while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_checked_sub(&self, other: &Self) -> Option<Self> {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
match (denominator / reduced_self.denominator).overflowing_mul(reduced_self.numerator) {
(self_numer, false) => {
match (denominator / reduced_other.denominator).overflowing_mul(reduced_other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_sub(other_numer) {
(numerator, false) =>
Some(Fraction { numerator, denominator, phantom: PhantomData }),
(_, true) =>
None
}
},
(_, true) => None
}
},
(_, true) => None
}
}
/// Multiply two fractions, returning None on overflow. Note: overflow can occur on more than just large numerators!
pub fn checked_mul(&self, other: &Self) -> Option<Self> {
match self.numerator.checked_mul(other.numerator) {
Some(numerator) => {
// Common denominator is valuable. If we have it, try to preserve it!
if self.denominator == other.denominator
// See if the denominator is evenly divisible by the new numerator.
// If it is, we can "pre-reduce" to the original denominator!
&& numerator.checked_rem(self.denominator)
.map(|rem| rem == 0)
.unwrap_or(false)
{
// TODO There's probably an optimization opportunity here. Check the
// generated instructions - there might be a way to use a division-with-remainder
// instruction, grab the remainder value out of the register, then
// do the test, and if it passes, grab the existing result of the division
// out of the other register without issuing a second division instruction.
Some(Fraction {
numerator: numerator / self.denominator,
denominator: self.denominator,
phantom: PhantomData
})
} else {
match self.denominator.checked_mul(other.denominator) {
Some(denominator) => Some(Fraction { numerator, denominator, phantom: PhantomData }),
// Denominator overflowed. See if reducing the inputs helps!
None => self.reducing_checked_mul(other)
}
}
},
// Numerator overflowed. See if reducing the inputs helps!
None => self.reducing_checked_mul(other)
}
}
/// Return the fracion with numerator and denominator swapped.
///
/// Returns a Fraction with an unbound type variable because this can make it invalid;
/// if the numerator was 0 before (which is valid), now the denominator will be 0 (which is invalid).
pub fn reciprocal<V>(&self) -> Fraction<V> {
let denominator = self.numerator;
let numerator = self.denominator;
// Make sure we don't end up with a negative denominator!
if denominator.is_negative() {
Fraction { numerator: -numerator, denominator: -denominator, phantom: PhantomData }
} else {
Fraction { numerator, denominator, phantom: PhantomData }
}
}
pub fn checked_div<U, V>(&self, other: &Self) -> Option<Fraction<V>> {
// We're going to multiply by the reciprocal of `other`, so if its numerator
// was 0, then the resulting fraction will have 0 for a denominator, so we're done.
if other.numerator == 0 {
return None;
}
match self.numerator.overflowing_mul(other.denominator) {
(numerator, false) => {
match self.denominator.overflowing_mul(other.numerator) {
(denominator, false) => {
// Make sure we don't end up with a negative denominator!
if denominator.is_negative() {
Some(Fraction { numerator: -numerator, denominator: -denominator, phantom: PhantomData })
} else {
Some(Fraction { numerator, denominator, phantom: PhantomData })
}
},
// Denominator overflowed. See if reducing the inputs helps!
(_, true) => self.reducing_checked_div(other)
}
},
// Numerator overflowed. See if reducing the inputs helps!
(_, true) => self.reducing_checked_div(other)
}
}
/// Returns whether the fraction is valid.
/// For a Frac<Valid>, this will always return true.
#[inline(always)]
pub fn is_valid(&self) -> bool {
self.denominator.is_positive()
}
/// If the fraction is valid, return it wrapped in Some.
/// Otherwise, return None.
pub fn into_valid<V>(self) -> Option<Fraction<Valid>> {
if self.is_valid() {
Some(Fraction {numerator: self.numerator, denominator: self.denominator, phantom: PhantomData})
} else {
None
}
}
/// If the fraction is valid, return it with the type variable set accordingly.
/// Otherwise, return the fallback value.
pub fn valid_or<V>(self, fallback: V) -> Result<Fraction<Valid>, V> {
if self.is_valid() {
Ok(Fraction {numerator: self.numerator, denominator: self.denominator, phantom: PhantomData})
} else {
Err(fallback)
}
}
pub fn valid_or_else<F, V>(self, fallback_fn: F) -> Result<Fraction<Valid>, V>
where F: Fn() -> V
{
if self.is_valid() {
Ok(Fraction {numerator: self.numerator, denominator: self.denominator, phantom: PhantomData})
} else {
Err(fallback_fn())
}
}
/// Divide two fractions.
///
/// This returns a Frac with an unbound type parameter because the result may not be valid.
pub fn div<U, V>(&self, other: &Fraction<U>) -> Fraction<V> {
match self.numerator.checked_mul(other.denominator) {
Some(numerator) => {
match self.denominator.checked_mul(other.numerator) {
Some(denominator) => {
// Make sure we don't end up with a negative denominator!
if denominator.is_negative() {
Fraction { numerator: -numerator, denominator: -denominator, phantom: PhantomData }
} else {
Fraction { numerator, denominator, phantom: PhantomData }
}
},
// Denominator overflowed. See if reducing the inputs helps!
None => self.reducing_div(other)
}
},
// Numerator overflowed. See if reducing the inputs helps!
None => self.reducing_div(other)
}
}
/// Multiply while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_mul(&self, other: &Self) -> Self {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
// Preserving common denominator is out the window at this point.
let numerator = reduced_self.numerator * reduced_other.numerator;
let denominator = reduced_self.denominator * reduced_other.denominator;
Fraction { numerator, denominator, phantom: PhantomData }
}
/// Multiply while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_checked_mul(&self, other: &Self) -> Option<Self> {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
// Preserving common denominator is out the window at this point.
match reduced_self.numerator.overflowing_mul(reduced_other.numerator) {
(numerator, false) => {
match reduced_self.denominator.overflowing_mul(reduced_other.denominator) {
(denominator, false) =>
Some(Fraction { numerator, denominator, phantom: PhantomData }),
(_, true) =>
None
}
},
(_, true) => None
}
}
/// Divide while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_checked_div<U, V>(&self, other: &Fraction<U>) -> Option<Fraction<V>> {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
// Preserving common denominator is out the window at this point.
match reduced_self.numerator.overflowing_mul(reduced_other.numerator) {
(denominator, false) => {
match reduced_self.denominator.overflowing_mul(reduced_other.denominator) {
(numerator, false) =>
Some(Fraction { numerator, denominator, phantom: PhantomData }),
(_, true) =>
None
}
},
(_, true) => None
}
}
/// Divide while sacrificing performance to avoid overflow.
/// This should only be used as a fallback after an overflow was caught in a higher-perf arithmetic operation.
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_div<U, V>(&self, other: &Fraction<U>) -> Fraction<V> {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let numerator = reduced_self.denominator * reduced_other.denominator;
let denominator = reduced_self.numerator * reduced_other.numerator;
Fraction { numerator, denominator, phantom: PhantomData }
}
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_eq(&self, other: &Self) -> bool {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
let self_numerator = reduced_self.numerator * (denominator / reduced_self.denominator);
let other_numerator = reduced_other.numerator * (denominator / reduced_other.denominator);
self_numerator == other_numerator
}
#[inline(never)] // We don't want to inline this because it should be almost never invoked in practice.
fn reducing_cmp(&self, other: &Self) -> Ordering {
let reduced_self = self.reduced();
let reduced_other = other.reduced();
let denominator = lcm(reduced_self.denominator, reduced_other.denominator);
let self_numerator = reduced_self.numerator * (denominator / reduced_self.denominator);
let other_numerator = reduced_other.numerator * (denominator / reduced_other.denominator);
self_numerator.cmp(&other_numerator)
}
#[inline(always)]
pub fn numerator(&self) -> i64 {
self.numerator
}
#[inline(always)]
pub fn denominator(&self) -> i64 {
self.denominator
}
}
impl<T> fmt::Debug for Fraction<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.numerator, self.denominator)
}
}
impl<T> PartialEq for Fraction<T> {
fn eq(&self, other: &Self) -> bool {
if self.denominator == other.denominator {
self.numerator.eq(&other.numerator)
} else if self.numerator == 0 {
// If numerator is 0, the whole fraction is 0.
other.numerator == 0
} else if other.numerator == 0 {
// We couldn't have reached this branch if self.numerator == 0
false
} else {
match self.denominator.overflowing_mul(other.denominator) {
(common_denom, false) => {
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => self_numer.eq(&other_numer),
// other.numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_eq(other)
}
},
// self.numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_eq(other)
}
}
// Common denominator overflowed - try reducing the inputs first.
(_, true) => self.reducing_eq(other)
}
}
}
}
impl<T> Eq for Fraction<T> {}
/// We only have Ord for valid Fracs because potentially invalid ones are essentially equivalent
/// to Result<Frac, ()>. Defining Ord for that case too would mean all Ord implementations would
/// have to do extra checking for situations where either denominator is 0, which does not
/// seem worth the cost.
impl PartialOrd for Fraction<Valid> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Fraction<Valid> {
fn cmp(&self, other: &Self) -> Ordering {
if self.denominator == other.denominator {
self.numerator.cmp(&other.numerator)
} else if self.numerator == 0 {
// If numerator is 0, the whole fraction is 0.
// Just compare numerators to see if the other one is 0, positive, or negative.
0.cmp(&other.numerator)
} else if other.numerator == 0 {
self.numerator.cmp(&0)
} else {
match self.denominator.overflowing_mul(other.denominator) {
(common_denom, false) => {
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => self_numer.cmp(&other_numer),
// other.numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_cmp(other)
}
},
// self.numerator overflowed - try reducing the inputs first.
(_, true) => self.reducing_cmp(other)
}
}
// Common denominator overflowed - try reducing the inputs first.
(_, true) => self.reducing_cmp(other)
}
}
}
}
impl Add for Fraction<Valid> {
type Output = Fraction<Valid>;
/// Add two fractions.
fn add(self, other: Self) -> Self {
if self.denominator == other.denominator {
// Happy path - we get to skip calculating a common denominator!
match self.numerator.overflowing_add(other.numerator) {
(numerator, false) => Fraction { numerator, denominator: self.denominator, phantom: PhantomData },
(_, true) => self.reducing_add(&other)
}
} else {
let common_denom: i64 = self.denominator * other.denominator;
// This code would look nicer with checked_ instead of overflowing_, but
// seems likely the perf would be worse.
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_add(other_numer) {
(numerator, false) => Fraction { numerator, denominator: common_denom, phantom: PhantomData },
(_, true) => self.reducing_add(&other)
}
},
(_, true) => self.reducing_add(&other)
}
},
(_, true) => self.reducing_add(&other)
}
}
}
}
impl Mul for Fraction<Valid> {
type Output = Fraction<Valid>;
/// Multiply two fractions.
fn mul(self, other: Self) -> Self {
match self.numerator.overflowing_mul(other.numerator) {
(numerator, false) => {
// Common denominator is valuable. If we have it, try to preserve it!
if self.denominator == other.denominator
// See if the denominator is evenly divisible by the new numerator.
// If it is, we can "pre-reduce" to the original denominator!
&& (numerator.overflowing_rem(self.denominator) == (0, false))
{
// TODO There's probably an optimization opportunity here. Check the
// generated instructions - there might be a way to use a division-with-remainder
// instruction, grab the remainder value out of the register, then
// do the test, and if it passes, grab the existing result of the division
// out of the other register without issuing a second division instruction.
Fraction {
numerator: numerator / self.denominator,
denominator: self.denominator,
phantom: PhantomData
}
} else {
match self.denominator.overflowing_mul(other.denominator) {
(denominator, false) => Fraction { numerator, denominator, phantom: PhantomData },
// Denominator overflowed. See if reducing the inputs helps!
(_, true) => self.reducing_mul(&other)
}
}
},
// Numerator overflowed. See if reducing the inputs helps!
(_, true) => self.reducing_mul(&other)
}
}
}
impl Sub for Fraction<Valid> {
type Output = Fraction<Valid>;
/// Subtract two fractions.
fn sub(self, other: Self) -> Self {
if self.denominator == other.denominator {
// Happy path - we get to skip calculating a common denominator!
match self.numerator.overflowing_sub(other.numerator) {
(numerator, false) => Fraction { numerator, denominator: self.denominator, phantom: PhantomData },
(_, true) => self.reducing_sub(&other)
}
} else {
let common_denom: i64 = self.denominator * other.denominator;
// This code would look nicer with checked_ instead of overflowing_, but
// seems likely the perf would be worse.
match (common_denom / self.denominator).overflowing_mul(self.numerator) {
(self_numer, false) => {
match (common_denom / other.denominator).overflowing_mul(other.numerator) {
(other_numer, false) => {
match self_numer.overflowing_sub(other_numer) {
(numerator, false) => Fraction { numerator, denominator: common_denom, phantom: PhantomData },
(_, true) => self.reducing_sub(&other)
}
},
(_, true) => self.reducing_sub(&other)
}
},
(_, true) => self.reducing_sub(&other)
}
}
}
}
impl Hash for Fraction<Valid> {
fn hash<H: Hasher>(&self, state: &mut H) {
let reduced_self = self.reduced();
reduced_self.numerator.hash(state);
reduced_self.denominator.hash(state);
}
}
impl<T> Neg for Fraction<T> {
type Output = Fraction<T>;
fn neg(self) -> Self {
Fraction { numerator: -self.numerator, denominator: self.denominator, phantom: PhantomData }
}
}
impl<T> From<u8> for Fraction<T> {
fn from (numerator: u8) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<i8> for Fraction<T> {
fn from (numerator: i8) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<u16> for Fraction<T> {
fn from (numerator: u16) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<i16> for Fraction<T> {
fn from (numerator: i16) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<u32> for Fraction<T> {
fn from (numerator: u32) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<i32> for Fraction<T> {
fn from (numerator: i32) -> Fraction<T> {
Fraction { numerator: numerator as i64, denominator: 1, phantom: PhantomData }
}
}
impl<T> From<i64> for Fraction<T> {
fn from (numerator: i64) -> Fraction<T> {
Fraction { numerator, denominator: 1, phantom: PhantomData }
}
}
/// This function was adapted from v0.1.39 of the num-integer crate. Licensed under the
/// Apache License, version 2.0. A full copy of the License can be found here:
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// The original code can be found at:
/// https://docs.rs/num-integer/0.1.41/src/num_integer/lib.rs.html#456-500
///
///
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always positive.
#[inline]
fn gcd(me: i64, other: i64) -> i64 {
// Use Stein's algorithm
let mut m = me;
let mut n = other;
if m == 0 || n == 0 { return (m | n).abs() }
// find common factors of 2
let shift = (m | n).trailing_zeros();
// The algorithm needs positive numbers, but the minimum value
// can't be represented as a positive one.
// It's also a power of two, so the gcd can be
// calculated by bitshifting in that case
// Assuming two's complement, the number created by the shift
// is positive for all numbers except gcd = abs(min value)
// The call to .abs() causes a panic in debug mode
if m == i64::min_value() || n == i64::min_value() {
return ((1 << shift) as i64).abs()
}
// guaranteed to be positive now, rest like unsigned algorithm
m = m.abs();
n = n.abs();
// divide n and m by 2 until odd
// m inside loop
n >>= n.trailing_zeros();
while m != 0 {
m >>= m.trailing_zeros();
if n > m { mem::swap(&mut n, &mut m) }
m -= n;
}
n << shift
}
/// Lowest common multiple
fn lcm(me: i64, other: i64) -> i64 {
me * (other / gcd(me, other))
}
#[cfg(test)]
mod test_fast_fraction {
use super::Frac;
pub fn frac(numerator: i64, denominator: u64) -> Frac {
super::new(numerator, denominator)
}
#[test]
fn one_plus_one() {
assert_eq!(
frac(1, 1) + frac(1, 1),
frac(2, 1)
);
}
#[test]
fn point_one_plus_point_two() {
assert_eq!(
frac(1, 10) + frac(2, 10),
frac(3, 10)
);
}
#[test]
fn one_minus_one() {
assert_eq!(
frac(1, 1) - frac(1, 1),
frac(0, 9999)
);
}
#[test]
fn multiply() {
assert_eq!(
frac(2, 3) * frac(5, 7),
frac(10, 21)
);
}
#[test]
fn divide() {
assert_eq!(
frac(2, 3).div(&frac(5, 7)),
frac(14, 15)
);
}
}

View file

@ -1,20 +0,0 @@
/// An i64 that always panics on overflow.
pub struct Int(i64);
impl Int {
pub fn abs(&self) -> Self {
let Int(int_self) = self;
let (output, underflowed) = int_self.overflowing_abs();
if underflowed {
underflow_panic();
}
Int(output)
}
}
fn underflow_panic() -> ! {
panic!("Underflow!");
}

View file

@ -1,9 +0,0 @@
pub mod approx;
pub mod default;
pub mod frac;
pub mod int;
pub mod map;
pub mod set;
extern crate im_rc;
extern crate wyhash;

View file

@ -1,15 +0,0 @@
use im_rc::hashmap::HashMap;
use im_rc::vector::Vector;
use wyhash::WyHash;
/// A persistent HashMap which records insertion order and iterates in that order.
pub struct Map<K, V> {
store: HashMap<K, V, BuildHasherDefault<WyHash>>;
order: Vector<K>
}
impl<K, V> Map<K, V> {
pub fn is_empty(self) -> bool {
self.store.is_empty()
}
}

View file

@ -1,42 +0,0 @@
use im_rc::hashset::HashSet;
use std::hash::{Hash};
use im_rc::vector::Vector;
/// A persistent Set which records insertion order and iterates in that order.
pub struct Set<K> {
store: HashSet<K>,
order: Vector<K>
}
impl<K> Set<K>
where K : std::hash::Hash
{
pub fn is_empty(self) -> bool {
self.store.is_empty()
}
pub fn insert<B>(self, elem: K) -> Set<B>
{
let mut new_set: Set<K> = self.clone();
new_set.store.insert(elem);
new_set.order.insert(elem);
new_set
}
pub fn map<F, B>(self, transform: F) -> Set<B>
where F: Fn(K) -> B,
B: Hash
{
let mut new_set: Set<B> = Set::new();
for elem in self.order.iter() {
if self.store.contains(elem) {
new_set.insert(transform(elem))
}
}
new_set
}
}

4
vendor/README.md vendored Normal file
View file

@ -0,0 +1,4 @@
## Vendored code
These are files that were originally obtained somewhere else (e.g. crates.io)
but which we needed to fork for some Roc-specific reason.