Downscale Images to document resolution (#1077)

* Add DownscaleNode

* Add lambda (call argument) input type + fix caching

* Add comment explaining Lambda input

* Automatically insert cache node after downscale node

* Implement sparse hashing of images
This commit is contained in:
Dennis Kobert 2023-03-15 12:49:56 +01:00 committed by Keavon Chambers
parent 0a775fe9be
commit fe233504ca
13 changed files with 209 additions and 224 deletions

View file

@ -329,10 +329,9 @@ mod image {
use alloc::vec::Vec;
use core::hash::{Hash, Hasher};
use dyn_any::{DynAny, StaticType};
use glam::DAffine2;
use glam::DVec2;
use glam::{DAffine2, DVec2};
#[derive(Clone, Debug, PartialEq, DynAny, Default, specta::Type, Hash)]
#[derive(Clone, Debug, PartialEq, DynAny, Default, specta::Type)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Image {
pub width: u32,
@ -340,6 +339,17 @@ mod image {
pub data: Vec<Color>,
}
impl Hash for Image {
fn hash<H: Hasher>(&self, state: &mut H) {
const HASH_SAMPLES: usize = 1000;
self.width.hash(state);
self.height.hash(state);
for i in 0..HASH_SAMPLES.min(self.data.len()) {
self.data[i * self.data.len() / HASH_SAMPLES].hash(state);
}
}
}
impl Image {
pub const fn empty() -> Self {
Self {
@ -362,7 +372,7 @@ mod image {
}
/// Flattens each channel cast to a u8
pub fn as_flat_u8(self) -> (Vec<u8>, u32, u32) {
pub fn into_flat_u8(self) -> (Vec<u8>, u32, u32) {
let Image { width, height, data } = self;
let result_bytes = data.into_iter().flat_map(|color| color.to_rgba8()).collect();
@ -431,11 +441,12 @@ mod image {
&mut self.image.data[y * (self.image.width as usize) + x]
}
pub fn sample(&self, x: f64, y: f64) -> Color {
let x = x.clamp(0.0, self.image.width as f64 - 1.0) as usize;
let y = y.clamp(0.0, self.image.height as f64 - 1.0) as usize;
/// Clamps the provided point to (0, 0) (ImageSize) and returns the closest pixel
pub fn sample(&self, position: DVec2) -> Color {
let x = position.x.clamp(0., self.image.width as f64 - 1.) as usize;
let y = position.y.clamp(0., self.image.height as f64 - 1.) as usize;
self.image.data[y * (self.image.width as usize) + x]
self.image.data[x + y * self.image.width as usize]
}
}

View file

@ -69,6 +69,7 @@ impl DocumentNode {
(ProtoNodeInput::Node(node_id, lambda), ConstructionArgs::Nodes(vec![]))
}
NodeInput::Network(ty) => (ProtoNodeInput::Network(ty), ConstructionArgs::Nodes(vec![])),
NodeInput::ShortCircut(ty) => (ProtoNodeInput::ShortCircut(ty), ConstructionArgs::Nodes(vec![])),
};
assert!(!self.inputs.iter().any(|input| matches!(input, NodeInput::Network(_))), "recieved non resolved parameter");
assert!(
@ -121,12 +122,52 @@ impl DocumentNode {
}
}
/// Represents the possible inputs to a node.
/// # ShortCircuting
/// In Graphite nodes are functions and by default, these are composed into a single function
/// by inserting Compose nodes.
///
///
///
///
/// ┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐
/// │ │◄──────────────┤ │◄───────────────┤ │
/// │ A │ │ B │ │ C │
/// │ ├──────────────►│ ├───────────────►│ │
/// └─────────────────┘ └──────────────────┘ └──────────────────┘
///
///
///
/// This is equivalent to calling c(b(a(input))) when evaluating c with input ( `c.eval(input)`)
/// But sometimes we might want to have a little more control over the order of execution.
/// This is why we allow nodes to opt out of the input forwarding by consuming the input directly.
///
///
///
/// ┌─────────────────────┐ ┌─────────────┐
/// │ │◄───────────────┤ │
/// │ Cache Node │ │ C │
/// │ ├───────────────►│ │
/// ┌──────────────────┐ ├─────────────────────┤ └─────────────┘
/// │ │◄──────────────┤ │
/// │ A │ │ * Cached Node │
/// │ ├──────────────►│ │
/// └──────────────────┘ └─────────────────────┘
///
///
///
///
/// In this case the Cache node actually consumes it's input and then manually forwards it to it's parameter
/// Node. This is necessary because the Cache Node needs to short-circut the actual node evaluation
#[derive(Debug, Clone, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum NodeInput {
Node { node_id: NodeId, output_index: usize, lambda: bool },
Value { tagged_value: crate::document::value::TaggedValue, exposed: bool },
Network(Type),
// A short circuting input represents an input that is not resolved through function composition but
// actually consuming the provided input instead of passing it to its predecessor
ShortCircut(Type),
}
impl NodeInput {
@ -153,6 +194,7 @@ impl NodeInput {
NodeInput::Node { .. } => true,
NodeInput::Value { exposed, .. } => *exposed,
NodeInput::Network(_) => false,
NodeInput::ShortCircut(_) => false,
}
}
pub fn ty(&self) -> Type {
@ -160,6 +202,7 @@ impl NodeInput {
NodeInput::Node { .. } => unreachable!("ty() called on NodeInput::Node"),
NodeInput::Value { tagged_value, .. } => tagged_value.ty(),
NodeInput::Network(ty) => ty.clone(),
NodeInput::ShortCircut(ty) => ty.clone(),
}
}
}
@ -397,6 +440,7 @@ impl NodeNetwork {
self.inputs[index] = *network_input;
}
}
NodeInput::ShortCircut(_) => (),
}
}
node.implementation = DocumentNodeImplementation::Unresolved("graphene_core::ops::IdNode".into());

View file

@ -44,6 +44,7 @@ impl core::fmt::Display for ProtoNetwork {
match &node.input {
ProtoNodeInput::None => f.write_str("None")?,
ProtoNodeInput::Network(ty) => f.write_fmt(format_args!("Network (type = {:?})", ty))?,
ProtoNodeInput::ShortCircut(ty) => f.write_fmt(format_args!("Lambda (type = {:?})", ty))?,
ProtoNodeInput::Node(_, _) => f.write_str("Node")?,
}
f.write_str("\n")?;
@ -116,11 +117,19 @@ pub struct ProtoNode {
pub identifier: NodeIdentifier,
}
/// A ProtoNodeInput represents the input of a node in a ProtoNetwork.
/// For documentation on the meaning of the variants, see the documentation of the `NodeInput` enum
/// in the `document` module
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ProtoNodeInput {
None,
Network(Type),
// the bool indicates whether to treat the node as lambda node
/// A ShortCircut input represents an input that is not resolved through function composition but
/// actually consuming the provided input instead of passing it to its predecessor
ShortCircut(Type),
/// the bool indicates whether to treat the node as lambda node.
/// When treating it as a lambda, only the node that is connected itself is fed as input.
/// Otherwise, the the entire network of which the node is the output is fed as input.
Node(NodeId, bool),
}
@ -142,6 +151,10 @@ impl ProtoNode {
self.construction_args.hash(&mut hasher);
match self.input {
ProtoNodeInput::None => "none".hash(&mut hasher),
ProtoNodeInput::ShortCircut(ref ty) => {
"lambda".hash(&mut hasher);
ty.hash(&mut hasher);
}
ProtoNodeInput::Network(ref ty) => {
"network".hash(&mut hasher);
ty.hash(&mut hasher);
@ -422,6 +435,7 @@ impl TypingContext {
// Get the node input type from the proto node declaration
let input = match node.input {
ProtoNodeInput::None => concrete!(()),
ProtoNodeInput::ShortCircut(ref ty) => ty.clone(),
ProtoNodeInput::Network(ref ty) => ty.clone(),
ProtoNodeInput::Node(id, _) => {
let input = self

View file

@ -34,7 +34,7 @@ once_cell = {version= "1.10", optional = true}
syn = {version = "1.0", default-features = false, features = ["parsing", "printing"]}
proc-macro2 = {version = "1.0", default-features = false, features = ["proc-macro"]}
quote = {version = "1.0", default-features = false }
image = "*"
image = { version = "*", default-features = false }
dyn-clone = "1.0"
log = "0.4"

View file

@ -6,14 +6,18 @@ use graphene_core::Node;
/// Caches the output of a given Node and acts as a proxy
#[derive(Default)]
pub struct CacheNode<T> {
pub struct CacheNode<T, CachedNode> {
// We have to use an append only data structure to make sure the references
// to the cache entries are always valid
cache: boxcar::Vec<(u64, T)>,
node: CachedNode,
}
impl<'i, T: 'i + Hash> Node<'i, T> for CacheNode<T> {
impl<'i, T: 'i, I: 'i + Hash, CachedNode: 'i> Node<'i, I> for CacheNode<T, CachedNode>
where
CachedNode: for<'any_input> Node<'any_input, I, Output = T>,
{
type Output = &'i T;
fn eval<'s: 'i>(&'s self, input: T) -> Self::Output {
fn eval<'s: 'i>(&'s self, input: I) -> Self::Output {
let mut hasher = Xxh3::new();
input.hash(&mut hasher);
let hash = hasher.finish();
@ -22,15 +26,16 @@ impl<'i, T: 'i + Hash> Node<'i, T> for CacheNode<T> {
return cached_value;
} else {
trace!("Cache miss");
let index = self.cache.push((hash, input));
let output = self.node.eval(input);
let index = self.cache.push((hash, output));
return &self.cache[index].1;
}
}
}
impl<T> CacheNode<T> {
pub fn new() -> CacheNode<T> {
CacheNode { cache: boxcar::Vec::new() }
impl<T, CachedNode> CacheNode<T, CachedNode> {
pub fn new(node: CachedNode) -> CacheNode<T, CachedNode> {
CacheNode { cache: boxcar::Vec::new(), node }
}
}

View file

@ -90,6 +90,33 @@ pub fn export_image_node<'i, 's: 'i>() -> impl Node<'i, 's, (Image, &'i str), Ou
}
*/
pub struct DownscaleNode;
#[node_macro::node_fn(DownscaleNode)]
fn downscale(image_frame: ImageFrame) -> ImageFrame {
let target_width = image_frame.transform.transform_vector2((1., 0.).into()).length() as usize;
let target_height = image_frame.transform.transform_vector2((0., 1.).into()).length() as usize;
let mut image = Image {
width: target_width as u32,
height: target_height as u32,
data: Vec::with_capacity(target_width * target_height),
};
let scale_factor = DVec2::new(image_frame.image.width as f64, image_frame.image.height as f64) / DVec2::new(target_width as f64, target_height as f64);
for y in 0..target_height {
for x in 0..target_width {
let pixel = image_frame.sample(DVec2::new(x as f64, y as f64) * scale_factor);
image.data.push(pixel);
}
}
ImageFrame {
image,
transform: image_frame.transform,
}
}
#[derive(Debug, Clone, Copy)]
pub struct MapImageNode<MapFn> {
map_fn: MapFn,
@ -168,8 +195,8 @@ fn compute_transformed_bounding_box(transform: DAffine2) -> Bbox {
}
#[derive(Debug, Clone, Copy)]
pub struct BlendImageNode<background, MapFn> {
background: background,
pub struct BlendImageNode<Background, MapFn> {
background: Background,
map_fn: MapFn,
}
@ -202,7 +229,7 @@ where
}
let dst_pixel = background.get_mut(x as usize, y as usize);
let src_pixel = foreground.sample(fg_point.x, fg_point.y);
let src_pixel = foreground.sample(fg_point);
*dst_pixel = map_fn.eval((src_pixel, *dst_pixel));
}

View file

@ -135,6 +135,7 @@ fn node_registry() -> HashMap<NodeIdentifier, HashMap<NodeIOTypes, NodeConstruct
register_node!(graphene_core::ops::AddParameterNode<_>, input: f64, params: [&f64]),
register_node!(graphene_core::ops::AddParameterNode<_>, input: &f64, params: [&f64]),
register_node!(graphene_core::ops::SomeNode, input: ImageFrame, params: []),
register_node!(graphene_std::raster::DownscaleNode, input: ImageFrame, params: []),
#[cfg(feature = "gpu")]
register_node!(graphene_std::executor::MapGpuSingleImageNode<_>, input: Image, params: [String]),
vec![(
@ -289,21 +290,43 @@ fn node_registry() -> HashMap<NodeIdentifier, HashMap<NodeIOTypes, NodeConstruct
//register_node!(graphene_std::memo::CacheNode<_>, input: Image, params: []),
(
NodeIdentifier::new("graphene_std::memo::CacheNode"),
|_| {
let node: CacheNode<Image> = graphene_std::memo::CacheNode::new();
|args| {
let input: DowncastBothNode<(), Image> = DowncastBothNode::new(args[0]);
let node: CacheNode<Image, _> = graphene_std::memo::CacheNode::new(input);
let any = DynAnyRefNode::new(node);
any.into_type_erased()
},
NodeIOTypes::new(concrete!(Image), concrete!(&Image), vec![]),
NodeIOTypes::new(concrete!(()), concrete!(&Image), vec![(concrete!(()), concrete!(Image))]),
),
(
NodeIdentifier::new("graphene_std::memo::CacheNode"),
|_| {
let node: CacheNode<QuantizationChannels> = graphene_std::memo::CacheNode::new();
|args| {
let input: DowncastBothNode<(), ImageFrame> = DowncastBothNode::new(args[0]);
let node: CacheNode<ImageFrame, _> = graphene_std::memo::CacheNode::new(input);
let any = DynAnyRefNode::new(node);
any.into_type_erased()
},
NodeIOTypes::new(concrete!(QuantizationChannels), concrete!(&QuantizationChannels), vec![]),
NodeIOTypes::new(concrete!(()), concrete!(&ImageFrame), vec![(concrete!(()), concrete!(ImageFrame))]),
),
(
NodeIdentifier::new("graphene_std::memo::CacheNode"),
|args| {
let input: DowncastBothNode<ImageFrame, ImageFrame> = DowncastBothNode::new(args[0]);
let node: CacheNode<ImageFrame, _> = graphene_std::memo::CacheNode::new(input);
let any = DynAnyRefNode::new(node);
any.into_type_erased()
},
NodeIOTypes::new(concrete!(ImageFrame), concrete!(&ImageFrame), vec![(concrete!(ImageFrame), concrete!(ImageFrame))]),
),
(
NodeIdentifier::new("graphene_std::memo::CacheNode"),
|args| {
let input: DowncastBothNode<(), QuantizationChannels> = DowncastBothNode::new(args[0]);
let node: CacheNode<QuantizationChannels, _> = graphene_std::memo::CacheNode::new(input);
let any = DynAnyRefNode::new(node);
any.into_type_erased()
},
NodeIOTypes::new(concrete!(()), concrete!(&QuantizationChannels), vec![(concrete!(()), concrete!(QuantizationChannels))]),
),
],
register_node!(graphene_core::structural::ConsNode<_, _>, input: Image, params: [&str]),