Remove unsafe code and clean up the code base in general (#1263)

* Remove unsafe code

* Make node graph test syncronous

* Add miri step to ci

* Remove unsafe from node graph evaluation

* Replace operation pseudo_hash with hash based on discriminant

* Fix test

* Move memo module to core and make it safe

* Fix formatting

* Remove unused stuff from gstd

* Use safe casting for creating key variants

* Fix memo node types

* Fix ref node

* "fix" ub

* Use correct input types for ExtractImageFrame

* Fix types for async nodes

* Fix missing implementation

* Manually override output type for async nodes

* Fix types for EditorApi

* Fix output type for WasmSurfaceHandle

* Remove unused miri.yml

* Fix incorrect type for cache node
This commit is contained in:
Dennis Kobert 2023-06-02 11:05:32 +02:00 committed by Keavon Chambers
parent 259dcdc628
commit 4e1bfddcd8
43 changed files with 520 additions and 1252 deletions

View file

@ -9,8 +9,7 @@ license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
memoization = ["once_cell"]
default = ["memoization", "wgpu"]
default = ["wgpu"]
gpu = [
"graphene-core/gpu",
"gpu-compiler-bin-wrapper",
@ -31,7 +30,6 @@ graphene-core = { path = "../gcore", features = [
"std",
"serde",
], default-features = false }
borrow_stack = { path = "../borrow_stack" }
dyn-any = { path = "../../libraries/dyn-any", features = ["derive"] }
graph-craft = { path = "../graph-craft" }
vulkan-executor = { path = "../vulkan-executor", optional = true }
@ -41,16 +39,6 @@ gpu-compiler-bin-wrapper = { path = "../gpu-compiler/gpu-compiler-bin-wrapper",
compilation-client = { path = "../compilation-client", optional = true }
bytemuck = { version = "1.8" }
tempfile = "3"
once_cell = { version = "1.10", optional = true }
#pretty-token-stream = {path = "../../pretty-token-stream"}
syn = { version = "1.0", default-features = false, features = [
"parsing",
"printing",
] }
proc-macro2 = { version = "1.0", default-features = false, features = [
"proc-macro",
] }
quote = { version = "1.0", default-features = false }
image = { version = "*", default-features = false }
dyn-clone = "1.0"
@ -61,7 +49,6 @@ kurbo = { git = "https://github.com/linebender/kurbo.git", features = [
] }
glam = { version = "0.22", features = ["serde"] }
node-macro = { path = "../node-macro" }
boxcar = "0.1.0"
xxhash-rust = { workspace = true }
serde_json = "1.0.96"
reqwest = { version = "0.11.17", features = ["rustls", "rustls-tls"] }

View file

@ -29,9 +29,8 @@ where
Box::pin(output)
}
fn reset(self: std::pin::Pin<&mut Self>) {
let wrapped_node = unsafe { self.map_unchecked_mut(|e| &mut e.node) };
Node::reset(wrapped_node);
fn reset(&self) {
self.node.reset();
}
fn serialize(&self) -> Option<std::sync::Arc<dyn core::any::Any>> {
@ -67,9 +66,8 @@ where
let output = async move { Box::new(result) as Any<'input> };
Box::pin(output)
}
fn reset(self: std::pin::Pin<&mut Self>) {
let wrapped_node = unsafe { self.map_unchecked_mut(|e| &mut e.node) };
Node::reset(wrapped_node);
fn reset(&self) {
self.node.reset();
}
}
@ -114,9 +112,8 @@ where
fn eval(&'i self, input: T) -> Self::Output {
Box::pin(async move { self.node.eval(input) })
}
fn reset(self: std::pin::Pin<&mut Self>) {
let wrapped_node = unsafe { self.map_unchecked_mut(|e| &mut e.node) };
Node::reset(wrapped_node);
fn reset(&self) {
self.node.reset();
}
}
@ -207,7 +204,7 @@ impl<'n: 'input, 'input, O: 'input + StaticType, I: 'input + StaticType> Node<'i
let node_name = self.node.node_name();
let input = Box::new(input);
Box::pin(async move {
let out: Box<&_> = dyn_any::downcast::<&O>(self.node.eval(input).await).unwrap_or_else(|e| panic!("DowncastBothRefNode Input {e}"));
let out: Box<&_> = dyn_any::downcast::<&O>(self.node.eval(input).await).unwrap_or_else(|e| panic!("DowncastBothRefNode Input {e} in {node_name}"));
*out
})
}

View file

@ -1,17 +1,13 @@
use glam::{DAffine2, DMat2, DVec2, Mat2, UVec3, Vec2};
use glam::{DAffine2, DVec2, Mat2, Vec2};
use gpu_executor::{Bindgroup, ComputePassDimensions, PipelineLayout, StorageBufferOptions};
use gpu_executor::{GpuExecutor, ShaderIO, ShaderInput};
use graph_craft::document::value::TaggedValue;
use graph_craft::document::*;
use graph_craft::proto::*;
use graphene_core::raster::bbox::{AxisAlignedBbox, Bbox};
use graphene_core::raster::*;
use graphene_core::*;
use wgpu_executor::NewExecutor;
use bytemuck::Pod;
use core::marker::PhantomData;
use dyn_any::StaticTypeSized;
use std::sync::Arc;
pub struct GpuCompiler<TypingContext, ShaderIO> {

View file

@ -5,9 +5,6 @@ extern crate log;
//pub mod value;
//#![feature(const_type_name)]
#[cfg(feature = "memoization")]
pub mod memo;
pub mod raster;
pub mod http;

View file

@ -1,148 +0,0 @@
//#![feature(generic_associated_types)]
// use borrow_stack::BorrowStack;
// use dyn_any::{DynAny, StaticType};
// use graphene_std::value::{AnyRefNode, AnyValueNode, StorageNode, ValueNode};
// use graphene_std::*;
/*fn mul(#[dyn_any(default)] a: f32, b: f32) -> f32 {
a * b
}*/
/*
mod mul {
use dyn_any::downcast_ref;
use graphene_std::{DynAnyNode, DynNode, DynamicInput, Node};
pub struct MulNodeInput<'n> {
pub a: &'n f32,
pub b: &'n f32,
}
#[derive(Copy, Clone)]
pub struct MulNodeAnyProxy<'n> {
pub a: Option<DynAnyNode<'n>>,
pub b: Option<DynAnyNode<'n>>,
}
#[derive(Copy, Clone)]
pub struct MulNodeTypedProxy<'n> {
pub a: Option<DynNode<'n, &'n f32>>,
pub b: Option<DynNode<'n, &'n f32>>,
}
impl<'n> Node<'n> for MulNodeAnyProxy<'n> {
type Output = MulNodeInput<'n>;
fn eval(&'n self) -> <Self as graphene_std::Node<'n>>::Output {
// let a = self.a.unwrap().eval();
let a: &f32 = self.a.map(|v| downcast_ref(v.eval()).unwrap()).unwrap_or(&1.);
/*let b: &f32 = self
.b
.map(|v| v.eval(&()).downcast_ref::<&'n f32, &'n f32>().unwrap())
.unwrap_or(&&2.);
a * b*/
MulNodeInput { a, b: a }
}
}
impl<'n> Node<'n> for MulNodeTypedProxy<'n> {
type Output = MulNodeInput<'n>;
fn eval(&'n self) -> <Self as graphene_std::Node<'n>>::Output {
let a = self.a.unwrap().eval();
let b = self.b.unwrap().eval();
MulNodeInput { a, b }
}
}
/*macro_rules! new {
() => {
mul::MulNode { a: None, b: None }
};
}*/
//pub(crate) use new;
impl<'n> DynamicInput<'n> for MulNodeAnyProxy<'n> {
fn set_kwarg_by_name(&mut self, _name: &str, _value: DynAnyNode<'n>) {
todo!()
}
fn set_arg_by_index(&mut self, index: usize, value: DynAnyNode<'n>) {
match index {
0 => {
self.a = Some(value);
}
_ => todo!(),
}
}
}
}
// type SNode<'n> = dyn Node<'n, Output = &'n dyn DynAny<'n>>;
*/
// struct NodeStore<'n>(borrow_stack::FixedSizeStack<'n, Box<SNode<'n>>>);
// impl<'n> NodeStore<'n> {
// fn len(&self) -> usize {
// self.0.len()
// }
// fn push(&'n mut self, f: fn(&'n [Box<SNode>]) -> Box<SNode<'n>>) {
// unsafe { self.0.push(f(self.0.get())) };
// }
// /*fn get_index(&'n self, index: usize) -> &'n SNode<'n> {
// assert!(index < self.0.len());
// &unsafe { self.0.get()[index] }
// }*/
// }
fn main() {
// use syn::parse::Parse;
/*let nodes = vec![
NodeKind::Input,
NodeKind::Value(syn::parse_quote!(1u32)),
NodeKind::Node(syn::parse_quote!(graphene_core::ops::AddNode), vec![0, 0]),
];
//println!("{}", node_graph(1));
//
let _nodegraph = NodeGraph {
nodes,
input: syn::Type::Verbatim(quote! {u32}),
output: syn::Type::Verbatim(quote! {u32}),
};*/
//let pretty = pretty_token_stream::Pretty::new(nodegraph.serialize_gpu("add"));
//pretty.print();
/*
use dyn_any::{downcast_ref, DynAny, StaticType};
//let mut mul = mul::MulNode::new();
let mut stack: borrow_stack::FixedSizeStack<Box<dyn Node<'_, Output = &dyn DynAny>>> =
borrow_stack::FixedSizeStack::new(42);
unsafe { stack.push(Box::new(AnyValueNode::new(1_f32))) };
//let node = unsafe { stack.get(0) };
//let boxed = Box::new(StorageNode::new(node));
//unsafe { stack.push(boxed) };
let result = unsafe { &stack.get()[0] }.eval();
dbg!(downcast_ref::<f32>(result));
/*unsafe {
stack
.push(Box::new(AnyRefNode::new(stack.get(0).as_ref()))
as Box<dyn Node<(), Output = &dyn DynAny>>)
};*/
let f = (3.2_f32, 3.1_f32);
let a = ValueNode::new(1.);
let id = std::any::TypeId::of::<&f32>();
let any_a = AnyRefNode::new(&a);
/*let _mul2 = mul::MulNodeInput {
a: None,
b: Some(&any_a),
};
let mut mul2 = mul::new!();
//let cached = memo::CacheNode::new(&mul1);
//let foo = value::AnyRefNode::new(&cached);
mul2.set_arg_by_index(0, &any_a);*/
let int = value::IntNode::<32>;
Node::eval(&int);
println!("{}", Node::eval(&int));
//let _add: u32 = ops::AddNode::<u32>::default().eval((int.exec(), int.exec()));
//let fnode = generic::FnNode::new(|(a, b): &(i32, i32)| a - b);
//let sub = fnode.any(&("a", 2));
//let cache = memo::CacheNode::new(&fnode);
//let cached_result = cache.eval(&(2, 3));
*/
//println!("{}", cached_result)
}

View file

@ -1,177 +0,0 @@
use futures::Future;
use graphene_core::Node;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use xxhash_rust::xxh3::Xxh3;
/// Caches the output of a given Node and acts as a proxy
#[derive(Default)]
pub struct CacheNode<T, CachedNode> {
// We have to use an append only data structure to make sure the references
// to the cache entries are always valid
cache: boxcar::Vec<(u64, T, AtomicBool)>,
node: CachedNode,
}
impl<'i, T: 'i + Clone, I: 'i + Hash, CachedNode: 'i> Node<'i, I> for CacheNode<T, CachedNode>
where
CachedNode: for<'any_input> Node<'any_input, I>,
for<'a> <CachedNode as Node<'a, I>>::Output: core::future::Future<Output = T> + 'a,
{
// TODO: This should return a reference to the cached cached_value
// but that requires a lot of lifetime magic <- This was suggested by copilot but is pretty acurate xD
type Output = Pin<Box<dyn Future<Output = T> + 'i>>;
fn eval(&'i self, input: I) -> Self::Output {
Box::pin(async move {
let mut hasher = Xxh3::new();
input.hash(&mut hasher);
let hash = hasher.finish();
if let Some((_, cached_value, keep)) = self.cache.iter().find(|(h, _, _)| *h == hash) {
keep.store(true, std::sync::atomic::Ordering::Relaxed);
cached_value.clone()
} else {
trace!("Cache miss");
let output = self.node.eval(input).await;
let index = self.cache.push((hash, output, AtomicBool::new(true)));
self.cache[index].1.clone()
}
})
}
fn reset(mut self: Pin<&mut Self>) {
let old_cache = std::mem::take(&mut self.cache);
self.cache = old_cache.into_iter().filter(|(_, _, keep)| keep.swap(false, std::sync::atomic::Ordering::Relaxed)).collect();
}
}
impl<T, CachedNode> std::marker::Unpin for CacheNode<T, CachedNode> {}
impl<T, CachedNode> CacheNode<T, CachedNode> {
pub fn new(node: CachedNode) -> CacheNode<T, CachedNode> {
CacheNode { cache: boxcar::Vec::new(), node }
}
}
/// Caches the output of the last graph evaluation for introspection
#[derive(Default)]
pub struct MonitorNode<T> {
output: Mutex<Option<Arc<T>>>,
}
impl<'i, T: 'static + Clone> Node<'i, T> for MonitorNode<T> {
type Output = T;
fn eval(&'i self, input: T) -> Self::Output {
*self.output.lock().unwrap() = Some(Arc::new(input.clone()));
input
}
fn serialize(&self) -> Option<Arc<dyn core::any::Any>> {
let output = self.output.lock().unwrap();
(*output).as_ref().map(|output| output.clone() as Arc<dyn core::any::Any>)
}
}
impl<T> MonitorNode<T> {
pub const fn new() -> MonitorNode<T> {
MonitorNode { output: Mutex::new(None) }
}
}
/// Caches the output of a given Node and acts as a proxy
/// It provides two modes of operation, it can either be set
/// when calling the node with a `Some<T>` variant or the last
/// value that was added is returned when calling it with `None`
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct LetNode<T> {
// We have to use an append only data structure to make sure the references
// to the cache entries are always valid
// TODO: We only ever access the last value so there is not really a reason for us
// to store the previous entries. This should be reworked in the future
cache: boxcar::Vec<(u64, T)>,
}
impl<'i, T: 'i + Hash> Node<'i, Option<T>> for LetNode<T> {
type Output = &'i T;
fn eval(&'i self, input: Option<T>) -> Self::Output {
match input {
Some(input) => {
let mut hasher = Xxh3::new();
input.hash(&mut hasher);
let hash = hasher.finish();
if let Some((cached_hash, cached_value)) = self.cache.iter().last() {
if hash == *cached_hash {
return cached_value;
}
}
trace!("Cache miss");
let index = self.cache.push((hash, input));
&self.cache[index].1
}
None => &self.cache.iter().last().expect("Let node was not initialized").1,
}
}
fn reset(mut self: Pin<&mut Self>) {
if let Some(last) = std::mem::take(&mut self.cache).into_iter().last() {
self.cache = boxcar::vec![last];
}
}
}
impl<T> std::marker::Unpin for LetNode<T> {}
impl<T> LetNode<T> {
pub fn new() -> LetNode<T> {
LetNode { cache: boxcar::Vec::new() }
}
}
/// Caches the output of a given Node and acts as a proxy
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct EndLetNode<Input> {
input: Input,
}
impl<'i, T: 'i, Input> Node<'i, &'i T> for EndLetNode<Input>
where
Input: Node<'i, ()>,
{
type Output = <Input>::Output;
fn eval(&'i self, _: &'i T) -> Self::Output {
let result = self.input.eval(());
result
}
}
impl<Input> EndLetNode<Input> {
pub const fn new(input: Input) -> EndLetNode<Input> {
EndLetNode { input }
}
}
pub use graphene_core::ops::SomeNode as InitNode;
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct RefNode<T, Let> {
let_node: Let,
_t: PhantomData<T>,
}
impl<'i, T: 'i, Let> Node<'i, ()> for RefNode<T, Let>
where
Let: for<'a> Node<'a, Option<T>>,
{
type Output = <Let as Node<'i, Option<T>>>::Output;
fn eval(&'i self, _: ()) -> Self::Output {
self.let_node.eval(None)
}
}
impl<Let, T> RefNode<T, Let> {
pub const fn new(let_node: Let) -> RefNode<T, Let> {
RefNode { let_node, _t: PhantomData }
}
}