diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97a0c7fab..128c1ebf4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,8 +3,6 @@ name: Continuous Integration on: push: - pull_request: - env: CARGO_TERM_COLOR: always diff --git a/Cargo.lock b/Cargo.lock index 1c5324519..e9aae5ad5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1671,6 +1671,7 @@ dependencies = [ "proc-macro2", "quote", "serde", + "serde_json", "syn 1.0.109", "tempfile", "vulkan-executor", diff --git a/node-graph/gcore/src/lib.rs b/node-graph/gcore/src/lib.rs index defcba9cc..f368a3fb2 100644 --- a/node-graph/gcore/src/lib.rs +++ b/node-graph/gcore/src/lib.rs @@ -37,6 +37,10 @@ pub trait Node<'i, Input: 'i>: 'i { type Output: 'i; fn eval(&'i self, input: Input) -> Self::Output; fn reset(self: Pin<&mut Self>) {} + #[cfg(feature = "alloc")] + fn serialize(&self) -> Option { + None + } } #[cfg(feature = "alloc")] diff --git a/node-graph/gstd/Cargo.toml b/node-graph/gstd/Cargo.toml index 10c519c1b..539fc5ddf 100644 --- a/node-graph/gstd/Cargo.toml +++ b/node-graph/gstd/Cargo.toml @@ -11,12 +11,7 @@ license = "MIT OR Apache-2.0" [features] memoization = ["once_cell"] default = ["memoization"] -gpu = [ - "graphene-core/gpu", - "gpu-compiler-bin-wrapper", - "compilation-client", - "gpu-executor", -] +gpu = ["graphene-core/gpu", "gpu-compiler-bin-wrapper", "compilation-client", "gpu-executor"] vulkan = ["gpu", "vulkan-executor"] wgpu = ["gpu", "wgpu-executor"] quantization = ["autoquant"] @@ -63,6 +58,7 @@ glam = { version = "0.22", features = ["serde"] } node-macro = { path = "../node-macro" } boxcar = "0.1.0" xxhash-rust = { workspace = true } +serde_json = "1.0.96" [dependencies.serde] version = "1.0" diff --git a/node-graph/gstd/src/memo.rs b/node-graph/gstd/src/memo.rs index 822102e51..825f726ab 100644 --- a/node-graph/gstd/src/memo.rs +++ b/node-graph/gstd/src/memo.rs @@ -1,9 +1,11 @@ use graphene_core::Node; +use serde::Serialize; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::pin::Pin; use std::sync::atomic::AtomicBool; +use std::sync::Mutex; use xxhash_rust::xxh3::Xxh3; /// Caches the output of a given Node and acts as a proxy @@ -49,6 +51,37 @@ impl CacheNode { } } +/// Caches the output of the last graph evaluation for introspection +#[derive(Default)] +pub struct MonitorNode { + output: Mutex>, + node: CachedNode, +} +impl<'i, T: 'i + Serialize + Clone, I: 'i + Hash, CachedNode: 'i> Node<'i, I> for MonitorNode +where + CachedNode: for<'any_input> Node<'any_input, I, Output = T>, +{ + type Output = T; + fn eval(&'i self, input: I) -> Self::Output { + let output = self.node.eval(input); + *self.output.lock().unwrap() = Some(output.clone()); + output + } + + fn serialize(&self) -> Option { + let output = self.output.lock().unwrap(); + (&*output).as_ref().map(|output| serde_json::to_string(output).ok()).flatten() + } +} + +impl std::marker::Unpin for MonitorNode {} + +impl MonitorNode { + pub const fn new(node: CachedNode) -> MonitorNode { + MonitorNode { output: Mutex::new(None), node } + } +} + /// Caches the output of a given Node and acts as a proxy /// It provides two modes of operation, it can either be set /// when calling the node with a `Some` variant or the last diff --git a/node-graph/interpreted-executor/src/node_registry.rs b/node-graph/interpreted-executor/src/node_registry.rs index 5f1b48ab7..c6426898b 100644 --- a/node-graph/interpreted-executor/src/node_registry.rs +++ b/node-graph/interpreted-executor/src/node_registry.rs @@ -153,6 +153,7 @@ fn node_registry() -> HashMap, input: ImageFrame, params: [ImageFrame]), register_node!(graphene_std::raster::MaskImageNode<_, _, _>, input: ImageFrame, params: [ImageFrame]), register_node!(graphene_std::raster::EmptyImageNode<_, _>, input: DAffine2, params: [Color]), + register_node!(graphene_std::memo::MonitorNode<_, _>, input: (), params: [ImageFrame]), #[cfg(feature = "gpu")] register_node!(graphene_std::executor::MapGpuSingleImageNode<_>, input: Image, params: [String]), vec![(