mirror of
https://github.com/roc-lang/roc.git
synced 2025-11-02 05:48:17 +00:00
WIP
This commit is contained in:
parent
c622cebea6
commit
1ba4d9d735
6 changed files with 356 additions and 203 deletions
|
|
@ -3399,6 +3399,7 @@ fn finish_specialization<'a>(
|
|||
let module_id = platform_data.as_ref().unwrap().module_id;
|
||||
|
||||
for (_name, proc_layout) in exposed_to_host.iter() {
|
||||
dbg!(_name);
|
||||
let ret = &proc_layout.result;
|
||||
for in_layout in proc_layout.arguments.iter().chain([ret]) {
|
||||
let layout = layout_interner.get(*in_layout);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ use roc_types::subs::{
|
|||
instantiate_rigids, storage_copy_var_to, Content, ExhaustiveMark, FlatType, RedundantMark,
|
||||
StorageSubs, Subs, Variable, VariableSubsSlice,
|
||||
};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::collections::HashMap;
|
||||
use ven_pretty::{BoxAllocator, DocAllocator, DocBuilder};
|
||||
|
||||
|
|
@ -11059,9 +11060,12 @@ pub struct GlueProc<'a> {
|
|||
|
||||
pub struct GlueProcs<'a> {
|
||||
pub getters: Vec<'a, (Layout<'a>, Vec<'a, GlueProc<'a>>)>,
|
||||
pub extern_names: Vec<'a, (LambdaSet<'a>, String)>,
|
||||
pub extern_names: Vec<'a, (LambdaSetPathHash, String)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct LambdaSetPathHash(u64);
|
||||
|
||||
pub fn generate_glue_procs<'a, 'i, I>(
|
||||
home: ModuleId,
|
||||
interns: &mut Interns,
|
||||
|
|
@ -11072,16 +11076,21 @@ pub fn generate_glue_procs<'a, 'i, I>(
|
|||
where
|
||||
I: LayoutInterner<'a>,
|
||||
{
|
||||
use std::hash::Hasher;
|
||||
|
||||
let mut answer = GlueProcs {
|
||||
getters: Vec::new_in(arena),
|
||||
extern_names: Vec::new_in(arena),
|
||||
};
|
||||
|
||||
let mut stack: Vec<'a, Layout<'a>> = Vec::from_iter_in([*layout], arena);
|
||||
let mut hasher = DefaultHasher::default();
|
||||
|
||||
let mut stack: Vec<'a, (Layout<'a>, DefaultHasher)> =
|
||||
Vec::from_iter_in([(*layout, hasher)], arena);
|
||||
let mut next_unique_id = 0;
|
||||
|
||||
macro_rules! handle_struct_field_layouts {
|
||||
($field_layouts: expr) => {{
|
||||
($hasher:expr, $field_layouts: expr) => {{
|
||||
if $field_layouts.iter().any(|l| {
|
||||
layout_interner
|
||||
.get(*l)
|
||||
|
|
@ -11100,12 +11109,16 @@ where
|
|||
answer.getters.push((*layout, procs));
|
||||
}
|
||||
|
||||
stack.extend($field_layouts.iter().map(|i| layout_interner.get(*i)));
|
||||
for (i, in_layout) in $field_layouts.iter().enumerate() {
|
||||
let mut hasher = $hasher.clone();
|
||||
hasher.write(i.to_ne_bytes().as_slice());
|
||||
stack.push((layout_interner.get(*in_layout), hasher));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! handle_tag_field_layouts {
|
||||
($tag_id:expr, $union_layout:expr, $field_layouts: expr) => {{
|
||||
($hasher:expr, $tag_id:expr, $union_layout:expr, $field_layouts: expr) => {{
|
||||
if $field_layouts.iter().any(|l| {
|
||||
layout_interner
|
||||
.get(*l)
|
||||
|
|
@ -11126,11 +11139,15 @@ where
|
|||
answer.getters.push((*layout, procs));
|
||||
}
|
||||
|
||||
stack.extend($field_layouts.iter().map(|i| layout_interner.get(*i)));
|
||||
for (i, in_layout) in $field_layouts.iter().enumerate() {
|
||||
let mut hasher = $hasher.clone();
|
||||
hasher.write(i.to_ne_bytes().as_slice());
|
||||
stack.push((layout_interner.get(*in_layout), hasher));
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
while let Some(layout) = stack.pop() {
|
||||
while let Some((layout, mut hasher)) = stack.pop() {
|
||||
match layout {
|
||||
Layout::Builtin(builtin) => match builtin {
|
||||
Builtin::Int(_)
|
||||
|
|
@ -11138,42 +11155,78 @@ where
|
|||
| Builtin::Bool
|
||||
| Builtin::Decimal
|
||||
| Builtin::Str => { /* do nothing */ }
|
||||
Builtin::List(element) => stack.push(layout_interner.get(element)),
|
||||
Builtin::List(element) => {
|
||||
hasher.write(b"List");
|
||||
stack.push((layout_interner.get(element), hasher))
|
||||
}
|
||||
},
|
||||
Layout::Struct { field_layouts, .. } => {
|
||||
handle_struct_field_layouts!(field_layouts);
|
||||
handle_struct_field_layouts!(hasher, field_layouts);
|
||||
}
|
||||
Layout::Boxed(boxed) => {
|
||||
hasher.write(b"Boxed");
|
||||
stack.push((layout_interner.get(boxed), hasher));
|
||||
}
|
||||
Layout::Boxed(boxed) => stack.push(layout_interner.get(boxed)),
|
||||
Layout::Union(union_layout) => match union_layout {
|
||||
UnionLayout::NonRecursive(tags) | UnionLayout::Recursive(tags) => {
|
||||
for tag in tags.iter() {
|
||||
stack.extend(tag.iter().map(|i| layout_interner.get(*i)));
|
||||
UnionLayout::NonRecursive(tags) => {
|
||||
hasher.write(b"NonRecursive");
|
||||
for (i, in_layout) in tags.iter().flat_map(|e| e.iter()).enumerate() {
|
||||
let mut hasher = hasher.clone();
|
||||
hasher.write(i.to_ne_bytes().as_slice());
|
||||
stack.push((layout_interner.get(*in_layout), hasher));
|
||||
}
|
||||
}
|
||||
UnionLayout::Recursive(tags) => {
|
||||
hasher.write(b"Recursive");
|
||||
for (i, in_layout) in tags.iter().flat_map(|e| e.iter()).enumerate() {
|
||||
let mut hasher = hasher.clone();
|
||||
hasher.write(i.to_ne_bytes().as_slice());
|
||||
stack.push((layout_interner.get(*in_layout), hasher));
|
||||
}
|
||||
}
|
||||
UnionLayout::NonNullableUnwrapped(field_layouts) => {
|
||||
handle_tag_field_layouts!(0, union_layout, field_layouts);
|
||||
hasher.write(b"NonNullableUnwrapped");
|
||||
handle_tag_field_layouts!(hasher, 0, union_layout, field_layouts);
|
||||
}
|
||||
UnionLayout::NullableWrapped {
|
||||
other_tags,
|
||||
nullable_id,
|
||||
} => {
|
||||
hasher.write(b"NullableWrapped");
|
||||
let tag_ids =
|
||||
(0..nullable_id).chain(nullable_id + 1..other_tags.len() as u16 + 1);
|
||||
for (i, field_layouts) in tag_ids.zip(other_tags) {
|
||||
handle_tag_field_layouts!(i, union_layout, *field_layouts);
|
||||
handle_tag_field_layouts!(hasher, i, union_layout, *field_layouts);
|
||||
}
|
||||
}
|
||||
UnionLayout::NullableUnwrapped { other_fields, .. } => {
|
||||
stack.extend(other_fields.iter().map(|i| layout_interner.get(*i)));
|
||||
hasher.write(b"NullableUnwrapped");
|
||||
for (i, in_layout) in other_fields.iter().enumerate() {
|
||||
let mut hasher = hasher.clone();
|
||||
hasher.write(i.to_ne_bytes().as_slice());
|
||||
stack.push((layout_interner.get(*in_layout), hasher));
|
||||
}
|
||||
}
|
||||
},
|
||||
Layout::LambdaSet(lambda_set) => {
|
||||
hasher.write(b"LambdaSet");
|
||||
|
||||
let symbol = unique_glue_symbol(arena, &mut next_unique_id, home, interns);
|
||||
let string = String::from(symbol.as_str(interns));
|
||||
answer.extern_names.push((lambda_set, string));
|
||||
|
||||
let path_hash = LambdaSetPathHash(hasher.finish());
|
||||
dbg!(path_hash);
|
||||
answer.extern_names.push((path_hash, string));
|
||||
|
||||
// let alloc = ven_pretty::Arena::<()>::new();
|
||||
// let doc = layout.to_doc(&alloc, layout_interner, Parens::NotNeeded);
|
||||
// dbg!(doc.1.pretty(100).to_string());
|
||||
|
||||
// TODO generate closure caller
|
||||
stack.push(layout_interner.get(lambda_set.runtime_representation()))
|
||||
stack.push((
|
||||
layout_interner.get(lambda_set.runtime_representation()),
|
||||
hasher,
|
||||
));
|
||||
}
|
||||
Layout::RecursivePointer => {
|
||||
/* do nothing, we've already generated for this type through the Union(_) */
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ use roc_mono::layout::{GlobalLayoutInterner, LayoutCache, LayoutInterner};
|
|||
use roc_packaging::cache::{self, RocCacheDir};
|
||||
use roc_reporting::report::{RenderTarget, DEFAULT_PALETTE};
|
||||
use roc_target::{Architecture, TargetInfo};
|
||||
use roc_types::subs::{Subs, Variable};
|
||||
use std::fs::File;
|
||||
use std::io::{self, ErrorKind, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
|
@ -79,6 +80,119 @@ pub fn generate(input_path: &Path, output_path: &Path) -> io::Result<i32> {
|
|||
}
|
||||
}
|
||||
|
||||
fn number_lambda_sets(subs: &Subs, initial: Variable) -> Vec<Variable> {
|
||||
let mut lambda_sets = vec![];
|
||||
let mut stack = vec![initial];
|
||||
|
||||
macro_rules! var_slice {
|
||||
($variable_subs_slice:expr) => {{
|
||||
let slice = $variable_subs_slice;
|
||||
subs.variables[slice.indices()].iter().rev()
|
||||
}};
|
||||
}
|
||||
|
||||
while let Some(var) = stack.pop() {
|
||||
use roc_types::subs::Content::*;
|
||||
use roc_types::subs::FlatType::*;
|
||||
|
||||
use roc_types::subs::GetSubsSlice;
|
||||
use roc_types::types::Uls;
|
||||
|
||||
match subs.get_content_without_compacting(var) {
|
||||
RigidVar(_) | RigidAbleVar(_, _) | FlexVar(_) | FlexAbleVar(_, _) | Error => (),
|
||||
|
||||
RecursionVar { structure, .. } => {
|
||||
// can we skip this?
|
||||
// stack.push(*structure);
|
||||
}
|
||||
|
||||
Structure(flat_type) => match flat_type {
|
||||
Apply(_, args) => {
|
||||
stack.extend(var_slice!(*args));
|
||||
}
|
||||
|
||||
Func(arg_vars, closure_var, ret_var) => {
|
||||
lambda_sets.push(subs.get_root_key_without_compacting(*closure_var));
|
||||
|
||||
stack.push(*ret_var);
|
||||
stack.push(*closure_var);
|
||||
stack.extend(var_slice!(arg_vars));
|
||||
}
|
||||
|
||||
EmptyRecord => (),
|
||||
EmptyTagUnion => (),
|
||||
|
||||
Record(fields, ext) => {
|
||||
let fields = *fields;
|
||||
let ext = *ext;
|
||||
|
||||
stack.push(ext);
|
||||
stack.extend(var_slice!(fields.variables()));
|
||||
}
|
||||
TagUnion(tags, ext) => {
|
||||
let tags = *tags;
|
||||
let ext = *ext;
|
||||
|
||||
stack.push(ext.var());
|
||||
|
||||
for slice_index in tags.variables() {
|
||||
let slice = subs.variable_slices[slice_index.index as usize];
|
||||
stack.extend(var_slice!(slice));
|
||||
}
|
||||
}
|
||||
FunctionOrTagUnion(_, _, ext) => {
|
||||
stack.push(ext.var());
|
||||
}
|
||||
|
||||
RecursiveTagUnion(rec_var, tags, ext) => {
|
||||
let tags = *tags;
|
||||
let ext = *ext;
|
||||
let rec_var = *rec_var;
|
||||
|
||||
stack.push(ext.var());
|
||||
|
||||
for slice_index in tags.variables() {
|
||||
let slice = subs.variable_slices[slice_index.index as usize];
|
||||
stack.extend(var_slice!(slice));
|
||||
}
|
||||
|
||||
stack.push(rec_var);
|
||||
}
|
||||
},
|
||||
Alias(_, args, var, _) => {
|
||||
let var = *var;
|
||||
let args = *args;
|
||||
|
||||
stack.extend(var_slice!(args.all_variables()));
|
||||
|
||||
stack.push(var);
|
||||
}
|
||||
LambdaSet(roc_types::subs::LambdaSet {
|
||||
solved,
|
||||
recursion_var,
|
||||
unspecialized,
|
||||
ambient_function: _,
|
||||
}) => {
|
||||
for slice_index in solved.variables() {
|
||||
let slice = subs.variable_slices[slice_index.index as usize];
|
||||
stack.extend(var_slice!(slice));
|
||||
}
|
||||
|
||||
if let Some(rec_var) = recursion_var.into_variable() {
|
||||
stack.push(rec_var);
|
||||
}
|
||||
|
||||
for Uls(var, _, _) in subs.get_subs_slice(*unspecialized) {
|
||||
stack.push(*var);
|
||||
}
|
||||
}
|
||||
&RangedNumber(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
lambda_sets
|
||||
}
|
||||
|
||||
pub fn load_types(
|
||||
full_file_path: PathBuf,
|
||||
threading: Threading,
|
||||
|
|
@ -155,16 +269,23 @@ pub fn load_types(
|
|||
};
|
||||
let mut layout_cache = LayoutCache::new(layout_interner.fork(), target_info);
|
||||
let mut glue_procs_by_layout = MutMap::default();
|
||||
|
||||
let mut extern_names = MutMap::default();
|
||||
|
||||
// Populate glue getters/setters for all relevant variables
|
||||
for var in variables.clone() {
|
||||
for (i, v) in number_lambda_sets(subs, var).iter().enumerate() {
|
||||
extern_names.insert(*v, i.to_string());
|
||||
}
|
||||
|
||||
let in_layout = layout_cache
|
||||
.from_var(arena, var, subs)
|
||||
.expect("Something weird ended up in the content");
|
||||
|
||||
let layout = layout_cache.interner.get(in_layout);
|
||||
|
||||
// dbg!(layout);
|
||||
|
||||
if layout.has_varying_stack_size(&layout_cache.interner, arena) {
|
||||
let answer = generate_glue_procs(
|
||||
home,
|
||||
|
|
@ -174,8 +295,6 @@ pub fn load_types(
|
|||
arena.alloc(layout),
|
||||
);
|
||||
|
||||
extern_names.extend(answer.extern_names);
|
||||
|
||||
// Even though generate_glue_procs does more work than we need it to,
|
||||
// it's important that we use it in order to make sure we get exactly
|
||||
// the same names that mono::ir did for code gen!
|
||||
|
|
|
|||
|
|
@ -11,9 +11,12 @@ use roc_module::{
|
|||
ident::TagName,
|
||||
symbol::{Interns, Symbol},
|
||||
};
|
||||
use roc_mono::layout::{
|
||||
cmp_fields, ext_var_is_empty_tag_union, round_up_to_alignment, Builtin, Discriminant, InLayout,
|
||||
LambdaSet, Layout, LayoutCache, LayoutInterner, TLLayoutInterner, UnionLayout,
|
||||
use roc_mono::{
|
||||
ir::LambdaSetPathHash,
|
||||
layout::{
|
||||
cmp_fields, ext_var_is_empty_tag_union, round_up_to_alignment, Builtin, Discriminant,
|
||||
InLayout, Layout, LayoutCache, LayoutInterner, TLLayoutInterner, UnionLayout,
|
||||
},
|
||||
};
|
||||
use roc_target::TargetInfo;
|
||||
use roc_types::{
|
||||
|
|
@ -72,7 +75,7 @@ impl Types {
|
|||
variables: I,
|
||||
interns: &'a Interns,
|
||||
glue_procs_by_layout: MutMap<Layout<'a>, &'a [String]>,
|
||||
extern_names: MutMap<LambdaSet<'a>, String>,
|
||||
extern_names: MutMap<Variable, String>,
|
||||
layout_cache: LayoutCache<'a>,
|
||||
target: TargetInfo,
|
||||
) -> Self {
|
||||
|
|
@ -823,7 +826,7 @@ struct Env<'a> {
|
|||
subs: &'a Subs,
|
||||
layout_cache: LayoutCache<'a>,
|
||||
glue_procs_by_layout: MutMap<Layout<'a>, &'a [String]>,
|
||||
extern_names: MutMap<LambdaSet<'a>, String>,
|
||||
extern_names: MutMap<Variable, String>,
|
||||
interns: &'a Interns,
|
||||
struct_names: Structs,
|
||||
enum_names: Enums,
|
||||
|
|
@ -839,7 +842,7 @@ impl<'a> Env<'a> {
|
|||
interns: &'a Interns,
|
||||
layout_interner: TLLayoutInterner<'a>,
|
||||
glue_procs_by_layout: MutMap<Layout<'a>, &'a [String]>,
|
||||
extern_names: MutMap<LambdaSet<'a>, String>,
|
||||
extern_names: MutMap<Variable, String>,
|
||||
target: TargetInfo,
|
||||
) -> Self {
|
||||
Env {
|
||||
|
|
@ -872,17 +875,6 @@ impl<'a> Env<'a> {
|
|||
types
|
||||
}
|
||||
|
||||
fn add_type(&mut self, var: Variable, types: &mut Types) -> TypeId {
|
||||
roc_tracing::debug!(content=?roc_types::subs::SubsFmtContent(self.subs.get_content_without_compacting(var), self.subs), "adding type");
|
||||
|
||||
let layout = self
|
||||
.layout_cache
|
||||
.from_var(self.arena, var, self.subs)
|
||||
.expect("Something weird ended up in the content");
|
||||
|
||||
add_type_help(self, layout, var, None, types)
|
||||
}
|
||||
|
||||
fn resolve_pending_recursive_types(&mut self, types: &mut Types) {
|
||||
// TODO if VecMap gets a drain() method, use that instead of doing take() and into_iter
|
||||
let pending = core::mem::take(&mut self.pending_recursive_types);
|
||||
|
|
@ -909,6 +901,17 @@ impl<'a> Env<'a> {
|
|||
types.replace(type_id, RocType::RecursivePointer(actual_type_id));
|
||||
}
|
||||
}
|
||||
|
||||
fn add_type(&mut self, var: Variable, types: &mut Types) -> TypeId {
|
||||
roc_tracing::debug!(content=?roc_types::subs::SubsFmtContent(self.subs.get_content_without_compacting(var), self.subs), "adding type");
|
||||
|
||||
let layout = self
|
||||
.layout_cache
|
||||
.from_var(self.arena, var, self.subs)
|
||||
.expect("Something weird ended up in the content");
|
||||
|
||||
add_type_help(self, layout, var, None, types)
|
||||
}
|
||||
}
|
||||
|
||||
fn add_type_help<'a>(
|
||||
|
|
@ -1002,8 +1005,9 @@ fn add_type_help<'a>(
|
|||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let extern_name = String::from("roc__mainForHost_1__Fx2_caller");
|
||||
// let extern_name = env.extern_names.get(&lambda_set).cloned().unwrap();
|
||||
let extern_name = env.extern_names.get(closure_var).cloned().unwrap();
|
||||
// let extern_name = String::from("roc__mainForHost_1__Fx1_caller");
|
||||
let extern_name = format!("roc__mainForHost_1__Fx{}_caller", extern_name);
|
||||
|
||||
for arg_var in args {
|
||||
let arg_layout = env
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ platform "echo-in-rust"
|
|||
provides [mainForHost]
|
||||
|
||||
|
||||
mainForHost : [StdoutWrite Str (({} -> Op) as Fx1), StderrWrite Str (({} -> Op) as Fx2), Done] as Op
|
||||
mainForHost : [StdoutWrite Str (({} -> Op) as Fx0), StderrWrite Str (({} -> Op) as Fx1), Done] as Op
|
||||
mainForHost = main
|
||||
|
||||
# mainForHost : { x: Str, y: {} -> Str }
|
||||
|
|
|
|||
|
|
@ -59,7 +59,6 @@ pub struct Op {
|
|||
pointer: *mut union_Op,
|
||||
}
|
||||
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "aarch64",
|
||||
|
|
@ -87,7 +86,6 @@ union union_Op {
|
|||
target_arch = "x86_64"
|
||||
))]
|
||||
//TODO HAS CLOSURE 2
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "aarch64",
|
||||
|
|
@ -95,7 +93,6 @@ union union_Op {
|
|||
target_arch = "x86",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
|
||||
#[repr(C)]
|
||||
pub struct RocFunction_65 {
|
||||
pub closure_data: *mut u8,
|
||||
|
|
@ -104,11 +101,21 @@ pub struct RocFunction_65 {
|
|||
impl RocFunction_65 {
|
||||
pub fn force_thunk(self, arg_0: ()) -> Op {
|
||||
extern "C" {
|
||||
fn roc__mainForHost_1__Fx2_caller(output: *mut Op, arg_0: (), closure_data: *mut u8);
|
||||
fn roc__mainForHost_1__Fx0_caller(
|
||||
arg_0: *mut u8,
|
||||
closure_data: *mut u8,
|
||||
output: *mut Op,
|
||||
);
|
||||
}
|
||||
|
||||
let mut output = std::mem::MaybeUninit::uninit();
|
||||
unsafe { roc__mainForHost_1__Fx2_caller(output.as_mut_ptr(), arg_0, self.closure_data) };
|
||||
unsafe {
|
||||
roc__mainForHost_1__Fx0_caller(
|
||||
self.closure_data,
|
||||
self.closure_data,
|
||||
output.as_mut_ptr(),
|
||||
)
|
||||
};
|
||||
unsafe { output.assume_init() }
|
||||
}
|
||||
}
|
||||
|
|
@ -120,7 +127,6 @@ impl RocFunction_65 {
|
|||
target_arch = "x86",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
|
||||
#[repr(C)]
|
||||
pub struct RocFunction_67 {
|
||||
pub closure_data: *mut u8,
|
||||
|
|
@ -129,11 +135,11 @@ pub struct RocFunction_67 {
|
|||
impl RocFunction_67 {
|
||||
pub fn force_thunk(self, arg_0: ()) -> Op {
|
||||
extern "C" {
|
||||
fn roc__mainForHost_1__Fx2_caller(output: *mut Op, arg_0: (), closure_data: *mut u8);
|
||||
fn roc__mainForHost_1__Fx1_caller(output: *mut Op, arg_0: (), closure_data: *mut u8);
|
||||
}
|
||||
|
||||
let mut output = std::mem::MaybeUninit::uninit();
|
||||
unsafe { roc__mainForHost_1__Fx2_caller(output.as_mut_ptr(), arg_0, self.closure_data) };
|
||||
unsafe { roc__mainForHost_1__Fx1_caller(output.as_mut_ptr(), arg_0, self.closure_data) };
|
||||
unsafe { output.assume_init() }
|
||||
}
|
||||
}
|
||||
|
|
@ -161,28 +167,18 @@ impl Op {
|
|||
if untagged.is_null() {
|
||||
None
|
||||
} else {
|
||||
unsafe {
|
||||
Some(&*untagged.sub(1))
|
||||
}
|
||||
unsafe { Some(&*untagged.sub(1)) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Returns which variant this tag union holds. Note that this never includes a payload!
|
||||
pub fn discriminant(&self) -> discriminant_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the recursive pointer
|
||||
unsafe { core::mem::transmute::<u8, discriminant_Op>((self.pointer as u8) & 0b11) }
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Internal helper
|
||||
fn tag_discriminant(pointer: *mut union_Op, discriminant: discriminant_Op) -> *mut union_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the union pointer
|
||||
|
|
@ -192,11 +188,7 @@ impl Op {
|
|||
tagged as *mut union_Op
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Internal helper
|
||||
fn union_pointer(&self) -> *mut union_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the union pointer
|
||||
|
|
@ -274,32 +266,28 @@ impl Op {
|
|||
))]
|
||||
/// Construct a tag named `StderrWrite`, with the appropriate payload
|
||||
pub fn StderrWrite(arg: Op_StderrWrite) -> Self {
|
||||
let size = core::mem::size_of::<union_Op>();
|
||||
let align = core::mem::align_of::<union_Op>() as u32;
|
||||
let size = core::mem::size_of::<union_Op>();
|
||||
let align = core::mem::align_of::<union_Op>() as u32;
|
||||
|
||||
unsafe {
|
||||
let ptr = roc_std::roc_alloc_refcounted::<union_Op>();
|
||||
unsafe {
|
||||
let ptr = roc_std::roc_alloc_refcounted::<union_Op>();
|
||||
|
||||
*ptr = union_Op {
|
||||
StderrWrite: core::mem::ManuallyDrop::new(arg)
|
||||
};
|
||||
*ptr = union_Op {
|
||||
StderrWrite: core::mem::ManuallyDrop::new(arg),
|
||||
};
|
||||
|
||||
Self {
|
||||
pointer: Self::tag_discriminant(ptr, discriminant_Op::StderrWrite),
|
||||
}
|
||||
Self {
|
||||
pointer: Self::tag_discriminant(ptr, discriminant_Op::StderrWrite),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StderrWrite` and convert it to `StderrWrite`'s payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn into_StderrWrite(mut self) -> Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn into_StderrWrite(mut self) -> Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b11) as *mut union_Op;
|
||||
let mut uninitialized = core::mem::MaybeUninit::uninit();
|
||||
|
|
@ -318,16 +306,12 @@ impl Op {
|
|||
payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StderrWrite` and return its payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn as_StderrWrite(&self) -> &Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn as_StderrWrite(&self) -> &Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b11) as *mut union_Op;
|
||||
|
||||
|
|
@ -396,32 +380,28 @@ impl Op {
|
|||
))]
|
||||
/// Construct a tag named `StdoutWrite`, with the appropriate payload
|
||||
pub fn StdoutWrite(arg: Op_StdoutWrite) -> Self {
|
||||
let size = core::mem::size_of::<union_Op>();
|
||||
let align = core::mem::align_of::<union_Op>() as u32;
|
||||
let size = core::mem::size_of::<union_Op>();
|
||||
let align = core::mem::align_of::<union_Op>() as u32;
|
||||
|
||||
unsafe {
|
||||
let ptr = roc_std::roc_alloc_refcounted::<union_Op>();
|
||||
unsafe {
|
||||
let ptr = roc_std::roc_alloc_refcounted::<union_Op>();
|
||||
|
||||
*ptr = union_Op {
|
||||
StdoutWrite: core::mem::ManuallyDrop::new(arg)
|
||||
};
|
||||
*ptr = union_Op {
|
||||
StdoutWrite: core::mem::ManuallyDrop::new(arg),
|
||||
};
|
||||
|
||||
Self {
|
||||
pointer: Self::tag_discriminant(ptr, discriminant_Op::StdoutWrite),
|
||||
}
|
||||
Self {
|
||||
pointer: Self::tag_discriminant(ptr, discriminant_Op::StdoutWrite),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StdoutWrite` and convert it to `StdoutWrite`'s payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn into_StdoutWrite(mut self) -> Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn into_StdoutWrite(mut self) -> Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b11) as *mut union_Op;
|
||||
let mut uninitialized = core::mem::MaybeUninit::uninit();
|
||||
|
|
@ -440,16 +420,12 @@ impl Op {
|
|||
payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "arm",
|
||||
target_arch = "wasm32",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
#[cfg(any(target_arch = "arm", target_arch = "wasm32", target_arch = "x86"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StdoutWrite` and return its payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn as_StdoutWrite(&self) -> &Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn as_StdoutWrite(&self) -> &Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b11) as *mut union_Op;
|
||||
|
||||
|
|
@ -459,20 +435,14 @@ impl Op {
|
|||
&payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Returns which variant this tag union holds. Note that this never includes a payload!
|
||||
pub fn discriminant(&self) -> discriminant_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the recursive pointer
|
||||
unsafe { core::mem::transmute::<u8, discriminant_Op>((self.pointer as u8) & 0b111) }
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Internal helper
|
||||
fn tag_discriminant(pointer: *mut union_Op, discriminant: discriminant_Op) -> *mut union_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the union pointer
|
||||
|
|
@ -482,25 +452,19 @@ impl Op {
|
|||
tagged as *mut union_Op
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Internal helper
|
||||
fn union_pointer(&self) -> *mut union_Op {
|
||||
// The discriminant is stored in the unused bytes at the end of the union pointer
|
||||
((self.pointer as usize) & (!0b111 as usize)) as *mut union_Op
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StderrWrite` and convert it to `StderrWrite`'s payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn into_StderrWrite(mut self) -> Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn into_StderrWrite(mut self) -> Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b111) as *mut union_Op;
|
||||
let mut uninitialized = core::mem::MaybeUninit::uninit();
|
||||
|
|
@ -519,15 +483,12 @@ impl Op {
|
|||
payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StderrWrite` and return its payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn as_StderrWrite(&self) -> &Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StderrWrite`.
|
||||
pub unsafe fn as_StderrWrite(&self) -> &Op_StderrWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StderrWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b111) as *mut union_Op;
|
||||
|
||||
|
|
@ -537,15 +498,12 @@ impl Op {
|
|||
&payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StdoutWrite` and convert it to `StdoutWrite`'s payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn into_StdoutWrite(mut self) -> Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn into_StdoutWrite(mut self) -> Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b111) as *mut union_Op;
|
||||
let mut uninitialized = core::mem::MaybeUninit::uninit();
|
||||
|
|
@ -564,15 +522,12 @@ impl Op {
|
|||
payload
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
target_arch = "aarch64",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
|
||||
/// Unsafely assume this `Op` has a `.discriminant()` of `StdoutWrite` and return its payload.
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn as_StdoutWrite(&self) -> &Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
/// (Always examine `.discriminant()` first to make sure this is the correct variant!)
|
||||
/// Panics in debug builds if the `.discriminant()` doesn't return `StdoutWrite`.
|
||||
pub unsafe fn as_StdoutWrite(&self) -> &Op_StdoutWrite {
|
||||
debug_assert_eq!(self.discriminant(), discriminant_Op::StdoutWrite);
|
||||
let payload = {
|
||||
let ptr = (self.pointer as usize & !0b111) as *mut union_Op;
|
||||
|
||||
|
|
@ -601,17 +556,23 @@ impl Drop for Op {
|
|||
|
||||
if needs_dealloc {
|
||||
// Drop the payload first.
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => {}
|
||||
discriminant_Op::StderrWrite => unsafe { core::mem::ManuallyDrop::drop(&mut (&mut *self.union_pointer()).StderrWrite) },
|
||||
discriminant_Op::StdoutWrite => unsafe { core::mem::ManuallyDrop::drop(&mut (&mut *self.union_pointer()).StdoutWrite) },
|
||||
}
|
||||
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => {}
|
||||
discriminant_Op::StderrWrite => unsafe {
|
||||
core::mem::ManuallyDrop::drop(&mut (&mut *self.union_pointer()).StderrWrite)
|
||||
},
|
||||
discriminant_Op::StdoutWrite => unsafe {
|
||||
core::mem::ManuallyDrop::drop(&mut (&mut *self.union_pointer()).StdoutWrite)
|
||||
},
|
||||
}
|
||||
|
||||
// Dealloc the pointer
|
||||
let alignment = core::mem::align_of::<Self>().max(core::mem::align_of::<roc_std::Storage>());
|
||||
let alignment =
|
||||
core::mem::align_of::<Self>().max(core::mem::align_of::<roc_std::Storage>());
|
||||
|
||||
unsafe { crate::roc_dealloc(storage.as_ptr().cast(), alignment as u32); }
|
||||
unsafe {
|
||||
crate::roc_dealloc(storage.as_ptr().cast(), alignment as u32);
|
||||
}
|
||||
} else {
|
||||
// Write the storage back.
|
||||
storage.set(new_storage);
|
||||
|
|
@ -631,15 +592,19 @@ impl PartialEq for Op {
|
|||
target_arch = "x86_64"
|
||||
))]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
if self.discriminant() != other.discriminant() {
|
||||
return false;
|
||||
}
|
||||
if self.discriminant() != other.discriminant() {
|
||||
return false;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
unsafe {
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => true,
|
||||
discriminant_Op::StderrWrite => (&*self.union_pointer()).StderrWrite == (&*other.union_pointer()).StderrWrite,
|
||||
discriminant_Op::StdoutWrite => (&*self.union_pointer()).StdoutWrite == (&*other.union_pointer()).StdoutWrite,
|
||||
discriminant_Op::StderrWrite => {
|
||||
(&*self.union_pointer()).StderrWrite == (&*other.union_pointer()).StderrWrite
|
||||
}
|
||||
discriminant_Op::StdoutWrite => {
|
||||
(&*self.union_pointer()).StdoutWrite == (&*other.union_pointer()).StdoutWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -662,8 +627,12 @@ impl PartialOrd for Op {
|
|||
unsafe {
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => Some(core::cmp::Ordering::Equal),
|
||||
discriminant_Op::StderrWrite => (&*self.union_pointer()).StderrWrite.partial_cmp(&(&*other.union_pointer()).StderrWrite),
|
||||
discriminant_Op::StdoutWrite => (&*self.union_pointer()).StdoutWrite.partial_cmp(&(&*other.union_pointer()).StdoutWrite),
|
||||
discriminant_Op::StderrWrite => (&*self.union_pointer())
|
||||
.StderrWrite
|
||||
.partial_cmp(&(&*other.union_pointer()).StderrWrite),
|
||||
discriminant_Op::StdoutWrite => (&*self.union_pointer())
|
||||
.StdoutWrite
|
||||
.partial_cmp(&(&*other.union_pointer()).StdoutWrite),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -678,16 +647,20 @@ impl Ord for Op {
|
|||
target_arch = "x86_64"
|
||||
))]
|
||||
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
|
||||
match self.discriminant().cmp(&other.discriminant()) {
|
||||
core::cmp::Ordering::Equal => {}
|
||||
not_eq => return not_eq,
|
||||
}
|
||||
match self.discriminant().cmp(&other.discriminant()) {
|
||||
core::cmp::Ordering::Equal => {}
|
||||
not_eq => return not_eq,
|
||||
}
|
||||
|
||||
unsafe {
|
||||
unsafe {
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => core::cmp::Ordering::Equal,
|
||||
discriminant_Op::StderrWrite => (&*self.union_pointer()).StderrWrite.cmp(&(&*other.union_pointer()).StderrWrite),
|
||||
discriminant_Op::StdoutWrite => (&*self.union_pointer()).StdoutWrite.cmp(&(&*other.union_pointer()).StdoutWrite),
|
||||
discriminant_Op::StderrWrite => (&*self.union_pointer())
|
||||
.StderrWrite
|
||||
.cmp(&(&*other.union_pointer()).StderrWrite),
|
||||
discriminant_Op::StdoutWrite => (&*self.union_pointer())
|
||||
.StdoutWrite
|
||||
.cmp(&(&*other.union_pointer()).StdoutWrite),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -711,7 +684,7 @@ impl Clone for Op {
|
|||
}
|
||||
|
||||
Self {
|
||||
pointer: self.pointer
|
||||
pointer: self.pointer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -724,16 +697,17 @@ impl core::hash::Hash for Op {
|
|||
target_arch = "x86",
|
||||
target_arch = "x86_64"
|
||||
))]
|
||||
fn hash<H: core::hash::Hasher>(&self, state: &mut H) { match self.discriminant() {
|
||||
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => discriminant_Op::Done.hash(state),
|
||||
discriminant_Op::StderrWrite => unsafe {
|
||||
discriminant_Op::StderrWrite.hash(state);
|
||||
(&*self.union_pointer()).StderrWrite.hash(state);
|
||||
},
|
||||
discriminant_Op::StderrWrite.hash(state);
|
||||
(&*self.union_pointer()).StderrWrite.hash(state);
|
||||
},
|
||||
discriminant_Op::StdoutWrite => unsafe {
|
||||
discriminant_Op::StdoutWrite.hash(state);
|
||||
(&*self.union_pointer()).StdoutWrite.hash(state);
|
||||
},
|
||||
discriminant_Op::StdoutWrite.hash(state);
|
||||
(&*self.union_pointer()).StdoutWrite.hash(state);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -752,12 +726,14 @@ impl core::fmt::Debug for Op {
|
|||
unsafe {
|
||||
match self.discriminant() {
|
||||
discriminant_Op::Done => f.write_str("Done"),
|
||||
discriminant_Op::StderrWrite => f.debug_tuple("StderrWrite")
|
||||
// TODO HAS CLOSURE
|
||||
.finish(),
|
||||
discriminant_Op::StdoutWrite => f.debug_tuple("StdoutWrite")
|
||||
// TODO HAS CLOSURE
|
||||
.finish(),
|
||||
discriminant_Op::StderrWrite => f
|
||||
.debug_tuple("StderrWrite")
|
||||
// TODO HAS CLOSURE
|
||||
.finish(),
|
||||
discriminant_Op::StdoutWrite => f
|
||||
.debug_tuple("StdoutWrite")
|
||||
// TODO HAS CLOSURE
|
||||
.finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue