Instance tables refactor part 1: wrap graphical data in the new Instances<T> struct (#2230)

* Port VectorData to Instances<VectorData>

* Port ImageFrame<P> and TextureFrame to Instances<ImageFrame<P>> and Instances<TextureFrame>

* Avoid mutation with the TransformMut trait

* Port GraphicGroup to Instances<GraphicGroup>

* It compiles!

* Organize debugging

* Document upgrading

* Fix Brush node

* Restore TransformMut in lieu of TransformSet trait

* Fix tests

* Final code review
This commit is contained in:
Keavon Chambers 2025-01-28 23:51:12 -08:00 committed by GitHub
parent 408f9bffa1
commit eb0ff20d3c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
43 changed files with 1855 additions and 1221 deletions

View file

@ -3,18 +3,21 @@ use crate::raster::{blend_image_closure, BlendImageTupleNode, EmptyImageNode, Ex
use graphene_core::raster::adjustments::blend_colors;
use graphene_core::raster::bbox::{AxisAlignedBbox, Bbox};
use graphene_core::raster::brush_cache::BrushCache;
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::BlendMode;
use graphene_core::raster::{Alpha, BlendColorPairNode, Color, Image, ImageFrame, Pixel, Sample};
use graphene_core::raster::{Alpha, BlendColorPairNode, Color, Image, Pixel, Sample};
use graphene_core::transform::{Footprint, Transform, TransformMut};
use graphene_core::value::{ClonedNode, CopiedNode, ValueNode};
use graphene_core::vector::brush_stroke::{BrushStroke, BrushStyle};
use graphene_core::vector::VectorData;
use graphene_core::vector::VectorDataTable;
use graphene_core::Node;
use glam::{DAffine2, DVec2};
#[node_macro::node(category("Debug"))]
fn vector_points(_: (), vector_data: VectorData) -> Vec<DVec2> {
fn vector_points(_: (), vector_data: VectorDataTable) -> Vec<DVec2> {
let vector_data = vector_data.one_item();
vector_data.point_domain.positions().to_vec()
}
@ -199,7 +202,9 @@ pub fn blend_with_mode(background: ImageFrame<Color>, foreground: ImageFrame<Col
}
#[node_macro::node(category(""))]
fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec<BrushStroke>, cache: BrushCache) -> ImageFrame<Color> {
fn brush(_: Footprint, image: ImageFrameTable<Color>, bounds: ImageFrameTable<Color>, strokes: Vec<BrushStroke>, cache: BrushCache) -> ImageFrameTable<Color> {
let image = image.one_item().clone();
let stroke_bbox = strokes.iter().map(|s| s.bounding_box()).reduce(|a, b| a.union(&b)).unwrap_or(AxisAlignedBbox::ZERO);
let image_bbox = Bbox::from_transform(image.transform).to_axis_aligned_bbox();
let bbox = if image_bbox.size().length() < 0.1 { stroke_bbox } else { stroke_bbox.union(&image_bbox) };
@ -211,8 +216,8 @@ fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Col
let mut background_bounds = bbox.to_transform();
if bounds.transform != DAffine2::ZERO {
background_bounds = bounds.transform;
if bounds.transform() != DAffine2::ZERO {
background_bounds = bounds.transform();
}
let mut actual_image = ExtendImageToBoundsNode::new(ClonedNode::new(background_bounds)).eval(brush_plan.background);
@ -236,8 +241,7 @@ fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Col
bbox.start = bbox.start.floor();
bbox.end = bbox.end.floor();
let stroke_size = bbox.size() + DVec2::splat(stroke.style.diameter);
// For numerical stability we want to place the first blit point at a stable, integer offset
// in layer space.
// For numerical stability we want to place the first blit point at a stable, integer offset in layer space.
let snap_offset = positions[0].floor() - positions[0];
let stroke_origin_in_layer = bbox.start - snap_offset - DVec2::splat(stroke.style.diameter / 2.);
let stroke_to_layer = DAffine2::from_translation(stroke_origin_in_layer) * DAffine2::from_scale(stroke_size);
@ -250,6 +254,7 @@ fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Col
} else {
EmptyImageNode::new(CopiedNode::new(stroke_to_layer), CopiedNode::new(Color::TRANSPARENT)).eval(())
};
blit_node.eval(blit_target)
};
@ -301,7 +306,8 @@ fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Col
let blend_executor = BlendImageTupleNode::new(ValueNode::new(blend_params));
actual_image = blend_executor.eval((actual_image, erase_restore_mask));
}
actual_image
ImageFrameTable::new(actual_image)
}
#[cfg(test)]

View file

@ -1,5 +1,6 @@
use graph_craft::proto::types::Percentage;
use graphene_core::raster::{Image, ImageFrame};
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::Image;
use graphene_core::transform::Footprint;
use graphene_core::Color;
@ -15,18 +16,19 @@ async fn dehaze<F: 'n + Send + Sync>(
)]
footprint: F,
#[implementations(
() -> ImageFrame<Color>,
Footprint -> ImageFrame<Color>,
() -> ImageFrameTable<Color>,
Footprint -> ImageFrameTable<Color>,
)]
image_frame: impl Node<F, Output = ImageFrame<Color>>,
image_frame: impl Node<F, Output = ImageFrameTable<Color>>,
strength: Percentage,
) -> ImageFrame<Color> {
) -> ImageFrameTable<Color> {
let image_frame = image_frame.eval(footprint).await;
let image_frame = image_frame.one_item();
// Prepare the image data for processing
let image = image_frame.image;
let image_data = bytemuck::cast_vec(image.data);
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, image_data).expect("Failed to convert internal ImageFrame into image-rs data type.");
let image = &image_frame.image;
let image_data = bytemuck::cast_vec(image.data.clone());
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, image_data).expect("Failed to convert internal image format into image-rs data type.");
let dynamic_image: image::DynamicImage = image_buffer.into();
// Run the dehaze algorithm
@ -42,11 +44,13 @@ async fn dehaze<F: 'n + Send + Sync>(
base64_string: None,
};
ImageFrame {
let result = ImageFrame {
image: dehazed_image,
transform: image_frame.transform,
alpha_blending: image_frame.alpha_blending,
}
};
ImageFrameTable::new(result)
}
// There is no real point in modifying these values because they do not change the final result all that much.

View file

@ -4,7 +4,8 @@ use graph_craft::document::value::TaggedValue;
use graph_craft::document::*;
use graph_craft::proto::*;
use graphene_core::application_io::ApplicationIo;
use graphene_core::raster::*;
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::{BlendMode, Image, Pixel};
use graphene_core::*;
use wgpu_executor::{Bindgroup, PipelineLayout, Shader, ShaderIO, ShaderInput, WgpuExecutor, WgpuShaderInput};
@ -61,7 +62,10 @@ impl Clone for ComputePass {
}
#[node_macro::old_node_impl(MapGpuNode)]
async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, editor_api: &'a graphene_core::application_io::EditorApi<WasmApplicationIo>) -> ImageFrame<Color> {
async fn map_gpu<'a: 'input>(image: ImageFrameTable<Color>, node: DocumentNode, editor_api: &'a graphene_core::application_io::EditorApi<WasmApplicationIo>) -> ImageFrameTable<Color> {
let image_frame_table = &image;
let image = image.one_item();
log::debug!("Executing gpu node");
let executor = &editor_api.application_io.as_ref().and_then(|io| io.gpu_executor()).unwrap();
@ -75,9 +79,9 @@ async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, edito
self.cache.lock().as_ref().unwrap().get("placeholder").unwrap().clone()
} else {
let name = "placeholder".to_string();
let Ok(compute_pass_descriptor) = create_compute_pass_descriptor(node, &image, executor).await else {
let Ok(compute_pass_descriptor) = create_compute_pass_descriptor(node, image_frame_table, executor).await else {
log::error!("Error creating compute pass descriptor in 'map_gpu()");
return ImageFrame::empty();
return ImageFrameTable::default();
};
self.cache.lock().as_mut().unwrap().insert(name, compute_pass_descriptor.clone());
log::error!("created compute pass");
@ -105,7 +109,7 @@ async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, edito
#[cfg(feature = "image-compare")]
log::debug!("score: {:?}", score.score);
ImageFrame {
let result = ImageFrame {
image: Image {
data: colors,
width: image.image.width,
@ -114,7 +118,9 @@ async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, edito
},
transform: image.transform,
alpha_blending: image.alpha_blending,
}
};
ImageFrameTable::new(result)
}
impl<Node, EditorApi> MapGpuNode<Node, EditorApi> {
@ -127,7 +133,13 @@ impl<Node, EditorApi> MapGpuNode<Node, EditorApi> {
}
}
async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node: DocumentNode, image: &ImageFrame<T>, executor: &&WgpuExecutor) -> Result<ComputePass, String> {
async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node: DocumentNode, image: &ImageFrameTable<T>, executor: &&WgpuExecutor) -> Result<ComputePass, String>
where
GraphicElement: From<ImageFrame<T>>,
T::Static: Pixel,
{
let image = image.one_item();
let compiler = graph_craft::graphene_compiler::Compiler {};
let inner_network = NodeNetwork::value_network(node);
@ -145,40 +157,37 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::ops::IdentityNode".into()),
..Default::default()
},
/*DocumentNode {
name: "Index".into(),
// inputs: vec![NodeInput::Network(concrete!(UVec3))],
inputs: vec![NodeInput::Inline(InlineRust::new("i1.x as usize".into(), concrete![u32]))],
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::value::CopiedNode".into()),
..Default::default()
},*/
/*
DocumentNode {
name: "Get Node".into(),
inputs: vec![NodeInput::node(NodeId(1), 0), NodeInput::node(NodeId(0), 0)],
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::storage::GetNode".into()),
..Default::default()
},*/
// DocumentNode {
// name: "Index".into(),
// // inputs: vec![NodeInput::Network(concrete!(UVec3))],
// inputs: vec![NodeInput::Inline(InlineRust::new("i1.x as usize".into(), concrete![u32]))],
// implementation: DocumentNodeImplementation::ProtoNode("graphene_core::value::CopiedNode".into()),
// ..Default::default()
// },
// DocumentNode {
// name: "Get Node".into(),
// inputs: vec![NodeInput::node(NodeId(1), 0), NodeInput::node(NodeId(0), 0)],
// implementation: DocumentNodeImplementation::ProtoNode("graphene_core::storage::GetNode".into()),
// ..Default::default()
// },
DocumentNode {
inputs: vec![NodeInput::node(NodeId(0), 0)],
implementation: DocumentNodeImplementation::Network(inner_network),
..Default::default()
},
/*
DocumentNode {
name: "Save Node".into(),
inputs: vec![
NodeInput::node(NodeId(5), 0),
NodeInput::Inline(InlineRust::new(
"|x| o0[(_global_index.y * i1 + _global_index.x) as usize] = x".into(),
// "|x|()".into(),
Type::Fn(Box::new(concrete!(PackedPixel)), Box::new(concrete!(()))),
)),
],
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::generic::FnMutNode".into()),
..Default::default()
},
*/
// DocumentNode {
// name: "Save Node".into(),
// inputs: vec![
// NodeInput::node(NodeId(5), 0),
// NodeInput::Inline(InlineRust::new(
// "|x| o0[(_global_index.y * i1 + _global_index.x) as usize] = x".into(),
// // "|x|()".into(),
// Type::Fn(Box::new(concrete!(PackedPixel)), Box::new(concrete!(()))),
// )),
// ],
// implementation: DocumentNodeImplementation::ProtoNode("graphene_core::generic::FnMutNode".into()),
// ..Default::default()
// },
]
.into_iter()
.enumerate()
@ -204,7 +213,7 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node
)
.await
.unwrap();
// return ImageFrame::empty();
let len: usize = image.image.data.len();
let storage_buffer = executor
@ -218,21 +227,22 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node
},
)
.unwrap();
/*
let canvas = editor_api.application_io.create_surface();
let surface = unsafe { executor.create_surface(canvas) }.unwrap();
let surface_id = surface.surface_id;
// let canvas = editor_api.application_io.create_surface();
let texture = executor.create_texture_buffer(image.image.clone(), TextureBufferOptions::Texture).unwrap();
// let surface = unsafe { executor.create_surface(canvas) }.unwrap();
// let surface_id = surface.surface_id;
// executor.create_render_pass(texture, surface).unwrap();
// let texture = executor.create_texture_buffer(image.image.clone(), TextureBufferOptions::Texture).unwrap();
// // executor.create_render_pass(texture, surface).unwrap();
// let frame = SurfaceFrame {
// surface_id,
// transform: image.transform,
// };
// return frame;
let frame = SurfaceFrame {
surface_id,
transform: image.transform,
};
return frame;*/
log::debug!("creating buffer");
let width_uniform = executor.create_uniform_buffer(image.image.width).unwrap();
@ -269,9 +279,13 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node
}
#[node_macro::node(category("Debug: GPU"))]
async fn blend_gpu_image(_: (), foreground: ImageFrame<Color>, background: ImageFrame<Color>, blend_mode: BlendMode, opacity: f64) -> ImageFrame<Color> {
async fn blend_gpu_image(_: (), foreground: ImageFrameTable<Color>, background: ImageFrameTable<Color>, blend_mode: BlendMode, opacity: f64) -> ImageFrameTable<Color> {
let foreground = foreground.one_item();
let background = background.one_item();
let foreground_size = DVec2::new(foreground.image.width as f64, foreground.image.height as f64);
let background_size = DVec2::new(background.image.width as f64, background.image.height as f64);
// Transforms a point from the background image to the foreground image
let bg_to_fg = DAffine2::from_scale(foreground_size) * foreground.transform.inverse() * background.transform * DAffine2::from_scale(1. / background_size);
@ -320,7 +334,7 @@ async fn blend_gpu_image(_: (), foreground: ImageFrame<Color>, background: Image
let proto_networks: Result<Vec<_>, _> = compiler.compile(network.clone()).collect();
let Ok(proto_networks_result) = proto_networks else {
log::error!("Error compiling network in 'blend_gpu_image()");
return ImageFrame::empty();
return ImageFrameTable::default();
};
let proto_networks = proto_networks_result;
log::debug!("compiling shader");
@ -430,7 +444,7 @@ async fn blend_gpu_image(_: (), foreground: ImageFrame<Color>, background: Image
let result = executor.read_output_buffer(readback_buffer).await.unwrap();
let colors = bytemuck::pod_collect_to_vec::<u8, Color>(result.as_slice());
ImageFrame {
let result = ImageFrame {
image: Image {
data: colors,
width: background.image.width,
@ -439,5 +453,7 @@ async fn blend_gpu_image(_: (), foreground: ImageFrame<Color>, background: Image
},
transform: background.transform,
alpha_blending: background.alpha_blending,
}
};
ImageFrameTable::new(result)
}

View file

@ -1,4 +1,4 @@
use graphene_core::raster::ImageFrame;
use graphene_core::raster::image::ImageFrameTable;
use graphene_core::transform::Footprint;
use graphene_core::Color;
@ -10,10 +10,10 @@ async fn image_color_palette<F: 'n + Send>(
)]
footprint: F,
#[implementations(
() -> ImageFrame<Color>,
Footprint -> ImageFrame<Color>,
() -> ImageFrameTable<Color>,
Footprint -> ImageFrameTable<Color>,
)]
image: impl Node<F, Output = ImageFrame<Color>>,
image: impl Node<F, Output = ImageFrameTable<Color>>,
#[min(1.)]
#[max(28.)]
max_size: u32,
@ -26,6 +26,8 @@ async fn image_color_palette<F: 'n + Send>(
let mut colors: Vec<Vec<Color>> = vec![vec![]; (bins + 1.) as usize];
let image = image.eval(footprint).await;
let image = image.one_item();
for pixel in image.image.data.iter() {
let r = pixel.r() * GRID;
let g = pixel.g() * GRID;
@ -74,7 +76,10 @@ mod test {
use super::*;
use graph_craft::generic::FnNode;
use graphene_core::{raster::Image, value::CopiedNode, Node};
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::Image;
use graphene_core::value::CopiedNode;
use graphene_core::Node;
#[test]
fn test_image_color_palette() {
@ -82,7 +87,7 @@ mod test {
max_size: CopiedNode(1u32),
image: FnNode::new(|_| {
Box::pin(async move {
ImageFrame {
ImageFrameTable::new(ImageFrame {
image: Image {
width: 100,
height: 100,
@ -90,7 +95,7 @@ mod test {
base64_string: None,
},
..Default::default()
}
})
})
}),
};

View file

@ -1,12 +1,8 @@
use crate::wasm_application_io::WasmEditorApi;
use dyn_any::DynAny;
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginateSamplingMethod};
use graph_craft::proto::DynFuture;
use graphene_core::raster::bbox::Bbox;
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::{
Alpha, Bitmap, BitmapMut, CellularDistanceFunction, CellularReturnType, DomainWarpType, FractalType, Image, ImageFrame, Linear, LinearChannel, Luminance, NoiseType, Pixel, RGBMut, RedGreenBlue,
Sample,
Alpha, Bitmap, BitmapMut, CellularDistanceFunction, CellularReturnType, DomainWarpType, FractalType, Image, Linear, LinearChannel, Luminance, NoiseType, Pixel, RGBMut, RedGreenBlue, Sample,
};
use graphene_core::transform::{Footprint, Transform};
use graphene_core::{AlphaBlending, Color, Node};
@ -15,7 +11,6 @@ use fastnoise_lite;
use glam::{DAffine2, DVec2, Vec2};
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
@ -33,10 +28,12 @@ impl From<std::io::Error> for Error {
}
#[node_macro::node(category("Debug: Raster"))]
fn sample_image(footprint: Footprint, image_frame: ImageFrame<Color>) -> ImageFrame<Color> {
fn sample_image(footprint: Footprint, image_frame: ImageFrameTable<Color>) -> ImageFrameTable<Color> {
let image_frame = image_frame.one_item();
// Resize the image using the image crate
let image = image_frame.image;
let data = bytemuck::cast_vec(image.data);
let image = &image_frame.image;
let data = bytemuck::cast_vec(image.data.clone());
let viewport_bounds = footprint.viewport_bounds_in_local_space();
let image_bounds = Bbox::from_transform(image_frame.transform).to_axis_aligned_bbox();
@ -47,10 +44,10 @@ fn sample_image(footprint: Footprint, image_frame: ImageFrame<Color>) -> ImageFr
// If the image would not be visible, return an empty image
if size.x <= 0. || size.y <= 0. {
return ImageFrame::empty();
return ImageFrameTable::default();
}
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, data).expect("Failed to convert internal ImageFrame into image-rs data type.");
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, data).expect("Failed to convert internal image format into image-rs data type.");
let dynamic_image: image::DynamicImage = image_buffer.into();
let offset = (intersection.start - image_bounds.start).max(DVec2::ZERO);
@ -83,11 +80,14 @@ fn sample_image(footprint: Footprint, image_frame: ImageFrame<Color>) -> ImageFr
// we need to adjust the offset if we truncate the offset calculation
let new_transform = image_frame.transform * DAffine2::from_translation(offset) * DAffine2::from_scale(size);
ImageFrame {
let result = ImageFrame {
image,
transform: new_transform,
alpha_blending: image_frame.alpha_blending,
}
};
ImageFrameTable::new(result)
}
#[derive(Debug, Clone, Copy)]
@ -244,6 +244,7 @@ where
MapFn: Fn(_P, _P) -> _P,
{
let background_size = DVec2::new(background.width() as f64, background.height() as f64);
// Transforms a point from the background image to the foreground image
let bg_to_fg = background.transform() * DAffine2::from_scale(1. / background_size);
@ -331,104 +332,104 @@ fn empty_image<P: Pixel>(_: (), transform: DAffine2, #[implementations(Color)] c
}
}
#[cfg(feature = "serde")]
macro_rules! generate_imaginate_node {
($($val:ident: $t:ident: $o:ty,)*) => {
pub struct ImaginateNode<P: Pixel, E, C, G, $($t,)*> {
editor_api: E,
controller: C,
generation_id: G,
$($val: $t,)*
cache: std::sync::Arc<std::sync::Mutex<HashMap<u64, Image<P>>>>,
last_generation: std::sync::atomic::AtomicU64,
}
// #[cfg(feature = "serde")]
// macro_rules! generate_imaginate_node {
// ($($val:ident: $t:ident: $o:ty,)*) => {
// pub struct ImaginateNode<P: Pixel, E, C, G, $($t,)*> {
// editor_api: E,
// controller: C,
// generation_id: G,
// $($val: $t,)*
// cache: std::sync::Arc<std::sync::Mutex<HashMap<u64, Image<P>>>>,
// last_generation: std::sync::atomic::AtomicU64,
// }
impl<'e, P: Pixel, E, C, G, $($t,)*> ImaginateNode<P, E, C, G, $($t,)*>
where $($t: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, $o>>,)*
E: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, &'e WasmEditorApi>>,
C: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, ImaginateController>>,
G: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, u64>>,
{
#[allow(clippy::too_many_arguments)]
pub fn new(editor_api: E, controller: C, $($val: $t,)* generation_id: G ) -> Self {
Self { editor_api, controller, generation_id, $($val,)* cache: Default::default(), last_generation: std::sync::atomic::AtomicU64::new(u64::MAX) }
}
}
// impl<'e, P: Pixel, E, C, G, $($t,)*> ImaginateNode<P, E, C, G, $($t,)*>
// where $($t: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, $o>>,)*
// E: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, &'e WasmEditorApi>>,
// C: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, ImaginateController>>,
// G: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, u64>>,
// {
// #[allow(clippy::too_many_arguments)]
// pub fn new(editor_api: E, controller: C, $($val: $t,)* generation_id: G ) -> Self {
// Self { editor_api, controller, generation_id, $($val,)* cache: Default::default(), last_generation: std::sync::atomic::AtomicU64::new(u64::MAX) }
// }
// }
impl<'i, 'e: 'i, P: Pixel + 'i + Hash + Default + Send, E: 'i, C: 'i, G: 'i, $($t: 'i,)*> Node<'i, ImageFrame<P>> for ImaginateNode<P, E, C, G, $($t,)*>
where $($t: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, $o>>,)*
E: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, &'e WasmEditorApi>>,
C: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, ImaginateController>>,
G: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, u64>>,
{
type Output = DynFuture<'i, ImageFrame<P>>;
// impl<'i, 'e: 'i, P: Pixel + 'i + Hash + Default + Send, E: 'i, C: 'i, G: 'i, $($t: 'i,)*> Node<'i, ImageFrame<P>> for ImaginateNode<P, E, C, G, $($t,)*>
// where $($t: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, $o>>,)*
// E: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, &'e WasmEditorApi>>,
// C: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, ImaginateController>>,
// G: for<'any_input> Node<'any_input, (), Output = DynFuture<'any_input, u64>>,
// {
// type Output = DynFuture<'i, ImageFrame<P>>;
fn eval(&'i self, frame: ImageFrame<P>) -> Self::Output {
let controller = self.controller.eval(());
$(let $val = self.$val.eval(());)*
// fn eval(&'i self, frame: ImageFrame<P>) -> Self::Output {
// let controller = self.controller.eval(());
// $(let $val = self.$val.eval(());)*
use std::hash::Hasher;
let mut hasher = rustc_hash::FxHasher::default();
frame.image.hash(&mut hasher);
let hash = hasher.finish();
let editor_api = self.editor_api.eval(());
let cache = self.cache.clone();
let generation_future = self.generation_id.eval(());
let last_generation = &self.last_generation;
// use std::hash::Hasher;
// let mut hasher = rustc_hash::FxHasher::default();
// frame.image.hash(&mut hasher);
// let hash = hasher.finish();
// let editor_api = self.editor_api.eval(());
// let cache = self.cache.clone();
// let generation_future = self.generation_id.eval(());
// let last_generation = &self.last_generation;
Box::pin(async move {
let controller: ImaginateController = controller.await;
let generation_id = generation_future.await;
if generation_id != last_generation.swap(generation_id, std::sync::atomic::Ordering::SeqCst) {
let image = super::imaginate::imaginate(frame.image, editor_api, controller, $($val,)*).await;
// Box::pin(async move {
// let controller: ImaginateController = controller.await;
// let generation_id = generation_future.await;
// if generation_id != last_generation.swap(generation_id, std::sync::atomic::Ordering::SeqCst) {
// let image = super::imaginate::imaginate(frame.image, editor_api, controller, $($val,)*).await;
cache.lock().unwrap().insert(hash, image.clone());
// cache.lock().unwrap().insert(hash, image.clone());
return wrap_image_frame(image, frame.transform);
}
let image = cache.lock().unwrap().get(&hash).cloned().unwrap_or_default();
// return wrap_image_frame(image, frame.transform);
// }
// let image = cache.lock().unwrap().get(&hash).cloned().unwrap_or_default();
return wrap_image_frame(image, frame.transform);
})
}
}
}
}
// return wrap_image_frame(image, frame.transform);
// })
// }
// }
// }
// }
fn wrap_image_frame<P: Pixel>(image: Image<P>, transform: DAffine2) -> ImageFrame<P> {
if !transform.decompose_scale().abs_diff_eq(DVec2::ZERO, 0.00001) {
ImageFrame {
image,
transform,
alpha_blending: AlphaBlending::default(),
}
} else {
let resolution = DVec2::new(image.height as f64, image.width as f64);
ImageFrame {
image,
transform: DAffine2::from_scale_angle_translation(resolution, 0., transform.translation),
alpha_blending: AlphaBlending::default(),
}
}
}
// fn wrap_image_frame<P: Pixel>(image: Image<P>, transform: DAffine2) -> ImageFrame<P> {
// if !transform.decompose_scale().abs_diff_eq(DVec2::ZERO, 0.00001) {
// ImageFrame {
// image,
// transform,
// alpha_blending: AlphaBlending::default(),
// }
// } else {
// let resolution = DVec2::new(image.height as f64, image.width as f64);
// ImageFrame {
// image,
// transform: DAffine2::from_scale_angle_translation(resolution, 0., transform.translation),
// alpha_blending: AlphaBlending::default(),
// }
// }
// }
#[cfg(feature = "serde")]
generate_imaginate_node! {
seed: Seed: f64,
res: Res: Option<DVec2>,
samples: Samples: u32,
sampling_method: SamplingMethod: ImaginateSamplingMethod,
prompt_guidance: PromptGuidance: f64,
prompt: Prompt: String,
negative_prompt: NegativePrompt: String,
adapt_input_image: AdaptInputImage: bool,
image_creativity: ImageCreativity: f64,
inpaint: Inpaint: bool,
mask_blur: MaskBlur: f64,
mask_starting_fill: MaskStartingFill: ImaginateMaskStartingFill,
improve_faces: ImproveFaces: bool,
tiling: Tiling: bool,
}
// #[cfg(feature = "serde")]
// generate_imaginate_node! {
// seed: Seed: f64,
// res: Res: Option<DVec2>,
// samples: Samples: u32,
// sampling_method: SamplingMethod: ImaginateSamplingMethod,
// prompt_guidance: PromptGuidance: f64,
// prompt: Prompt: String,
// negative_prompt: NegativePrompt: String,
// adapt_input_image: AdaptInputImage: bool,
// image_creativity: ImageCreativity: f64,
// inpaint: Inpaint: bool,
// mask_blur: MaskBlur: f64,
// mask_starting_fill: MaskStartingFill: ImaginateMaskStartingFill,
// improve_faces: ImproveFaces: bool,
// tiling: Tiling: bool,
// }
#[node_macro::node(category("Raster: Generator"))]
#[allow(clippy::too_many_arguments)]
@ -450,7 +451,7 @@ fn noise_pattern(
cellular_distance_function: CellularDistanceFunction,
cellular_return_type: CellularReturnType,
cellular_jitter: f64,
) -> ImageFrame<Color> {
) -> ImageFrameTable<Color> {
let viewport_bounds = footprint.viewport_bounds_in_local_space();
let mut size = viewport_bounds.size();
@ -467,7 +468,7 @@ fn noise_pattern(
// If the image would not be visible, return an empty image
if size.x <= 0. || size.y <= 0. {
return ImageFrame::empty();
return ImageFrameTable::default();
}
let footprint_scale = footprint.scale();
@ -511,11 +512,13 @@ fn noise_pattern(
}
}
return ImageFrame::<Color> {
let result = ImageFrame {
image,
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
alpha_blending: AlphaBlending::default(),
};
return ImageFrameTable::new(result);
}
};
noise.set_noise_type(Some(noise_type));
@ -573,16 +576,17 @@ fn noise_pattern(
}
}
// Return the coherent noise image
ImageFrame::<Color> {
let result = ImageFrame {
image,
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
alpha_blending: AlphaBlending::default(),
}
};
ImageFrameTable::new(result)
}
#[node_macro::node(category("Raster: Generator"))]
fn mandelbrot(footprint: Footprint) -> ImageFrame<Color> {
fn mandelbrot(footprint: Footprint) -> ImageFrameTable<Color> {
let viewport_bounds = footprint.viewport_bounds_in_local_space();
let image_bounds = Bbox::from_transform(DAffine2::IDENTITY).to_axis_aligned_bbox();
@ -593,7 +597,7 @@ fn mandelbrot(footprint: Footprint) -> ImageFrame<Color> {
// If the image would not be visible, return an empty image
if size.x <= 0. || size.y <= 0. {
return ImageFrame::empty();
return ImageFrameTable::default();
}
let scale = footprint.scale();
@ -614,7 +618,8 @@ fn mandelbrot(footprint: Footprint) -> ImageFrame<Color> {
data.push(map_color(iter, max_iter));
}
}
ImageFrame {
let result = ImageFrame {
image: Image {
width,
height,
@ -623,7 +628,9 @@ fn mandelbrot(footprint: Footprint) -> ImageFrame<Color> {
},
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
..Default::default()
}
};
ImageFrameTable::new(result)
}
#[inline(always)]

View file

@ -1,3 +1,5 @@
use crate::vector::{VectorData, VectorDataTable};
use graph_craft::wasm_application_io::WasmEditorApi;
use graphene_core::text::TypesettingConfig;
pub use graphene_core::text::{bounding_box, load_face, to_path, Font, FontCache};
@ -13,7 +15,7 @@ fn text<'i: 'n>(
#[default(1.)] character_spacing: f64,
#[default(None)] max_width: Option<f64>,
#[default(None)] max_height: Option<f64>,
) -> crate::vector::VectorData {
) -> VectorDataTable {
let buzz_face = editor.font_cache.get(&font_name).map(|data| load_face(data));
let typesetting = TypesettingConfig {
@ -23,5 +25,8 @@ fn text<'i: 'n>(
max_width,
max_height,
};
crate::vector::VectorData::from_subpaths(to_path(&text, buzz_face, typesetting), false)
let result = VectorData::from_subpaths(to_path(&text, buzz_face, typesetting), false);
VectorDataTable::new(result)
}

View file

@ -1,13 +1,13 @@
use crate::transform::Footprint;
use bezier_rs::{ManipulatorGroup, Subpath};
use graphene_core::transform::Transform;
use graphene_core::vector::misc::BooleanOperation;
use graphene_core::vector::style::Fill;
pub use graphene_core::vector::*;
use graphene_core::{Color, GraphicElement, GraphicGroup};
use graphene_core::{transform::Transform, GraphicGroup};
use graphene_core::{Color, GraphicElement, GraphicGroupTable};
pub use path_bool as path_bool_lib;
use path_bool::FillRule;
use path_bool::PathBooleanOperation;
use path_bool::{FillRule, PathBooleanOperation};
use glam::{DAffine2, DVec2};
use std::ops::Mul;
@ -20,41 +20,49 @@ async fn boolean_operation<F: 'n + Send>(
)]
footprint: F,
#[implementations(
() -> GraphicGroup,
Footprint -> GraphicGroup,
() -> GraphicGroupTable,
Footprint -> GraphicGroupTable,
)]
group_of_paths: impl Node<F, Output = GraphicGroup>,
group_of_paths: impl Node<F, Output = GraphicGroupTable>,
operation: BooleanOperation,
) -> VectorData {
) -> VectorDataTable {
let group_of_paths = group_of_paths.eval(footprint).await;
let group_of_paths = group_of_paths.one_item();
fn vector_from_image<T: Transform>(image_frame: T) -> VectorData {
let corner1 = DVec2::ZERO;
let corner2 = DVec2::new(1., 1.);
let mut subpath = Subpath::new_rect(corner1, corner2);
subpath.apply_transform(image_frame.transform());
let mut vector_data = VectorData::from_subpath(subpath);
vector_data
.style
.set_fill(graphene_core::vector::style::Fill::Solid(Color::from_rgb_str("777777").unwrap().to_gamma_srgb()));
vector_data.style.set_fill(Fill::Solid(Color::from_rgb_str("777777").unwrap().to_gamma_srgb()));
vector_data
}
fn union_vector_data(graphic_element: &GraphicElement) -> VectorData {
match graphic_element {
GraphicElement::VectorData(vector_data) => *vector_data.clone(),
GraphicElement::VectorData(vector_data) => {
let vector_data = vector_data.one_item();
vector_data.clone()
}
// Union all vector data in the graphic group into a single vector
GraphicElement::GraphicGroup(graphic_group) => {
let graphic_group = graphic_group.one_item();
let vector_data = collect_vector_data(graphic_group);
boolean_operation_on_vector_data(&vector_data, BooleanOperation::Union)
}
GraphicElement::Raster(image) => vector_from_image(image),
GraphicElement::RasterFrame(image) => vector_from_image(image),
}
}
fn collect_vector_data(graphic_group: &GraphicGroup) -> Vec<VectorData> {
// Ensure all non vector data in the graphic group is converted to vector data
let vector_data = graphic_group.iter().map(|(element, _)| union_vector_data(element));
// Apply the transform from the parent graphic group
let transformed_vector_data = vector_data.map(|mut vector_data| {
vector_data.transform = graphic_group.transform * vector_data.transform;
@ -186,15 +194,15 @@ async fn boolean_operation<F: 'n + Send>(
}
// The first index is the bottom of the stack
let mut boolean_operation_result = boolean_operation_on_vector_data(&collect_vector_data(&group_of_paths), operation);
let mut boolean_operation_result = boolean_operation_on_vector_data(&collect_vector_data(group_of_paths), operation);
let transform = boolean_operation_result.transform;
VectorData::transform(&mut boolean_operation_result, transform);
boolean_operation_result.style.set_stroke_transform(DAffine2::IDENTITY);
boolean_operation_result.transform = DAffine2::IDENTITY;
boolean_operation_result.upstream_graphic_group = Some(group_of_paths);
boolean_operation_result.upstream_graphic_group = Some(GraphicGroupTable::new(group_of_paths.clone()));
boolean_operation_result
VectorDataTable::new(boolean_operation_result)
}
fn to_path(vector: &VectorData, transform: DAffine2) -> Vec<path_bool::PathSegment> {

View file

@ -6,13 +6,13 @@ use graphene_core::application_io::SurfaceHandle;
use graphene_core::application_io::{ApplicationIo, ExportFormat, RenderConfig};
#[cfg(target_arch = "wasm32")]
use graphene_core::raster::bbox::Bbox;
use graphene_core::raster::image::{ImageFrame, ImageFrameTable};
use graphene_core::raster::Image;
use graphene_core::raster::ImageFrame;
use graphene_core::renderer::RenderMetadata;
use graphene_core::renderer::{format_transform_matrix, GraphicElementRendered, ImageRenderMode, RenderParams, RenderSvgSegmentList, SvgRender};
use graphene_core::transform::Footprint;
use graphene_core::vector::VectorData;
use graphene_core::GraphicGroup;
use graphene_core::vector::VectorDataTable;
use graphene_core::GraphicGroupTable;
use graphene_core::{Color, WasmNotSend};
#[cfg(target_arch = "wasm32")]
@ -22,8 +22,6 @@ use glam::DAffine2;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::Clamped;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::JsCast;
#[cfg(target_arch = "wasm32")]
use web_sys::{CanvasRenderingContext2d, HtmlCanvasElement};
@ -33,25 +31,33 @@ async fn create_surface<'a: 'n>(_: (), editor: &'a WasmEditorApi) -> Arc<WasmSur
Arc::new(editor.application_io.as_ref().unwrap().create_window())
}
#[node_macro::node(category("Debug: GPU"))]
#[cfg(target_arch = "wasm32")]
async fn draw_image_frame(_: (), image: ImageFrame<graphene_core::raster::SRGBA8>, surface_handle: Arc<WasmSurfaceHandle>) -> graphene_core::application_io::SurfaceHandleFrame<HtmlCanvasElement> {
let image_data = image.image.data;
let array: Clamped<&[u8]> = Clamped(bytemuck::cast_slice(image_data.as_slice()));
if image.image.width > 0 && image.image.height > 0 {
let canvas = &surface_handle.surface;
canvas.set_width(image.image.width);
canvas.set_height(image.image.height);
// TODO: replace "2d" with "bitmaprenderer" once we switch to ImageBitmap (lives on gpu) from ImageData (lives on cpu)
let context = canvas.get_context("2d").unwrap().unwrap().dyn_into::<CanvasRenderingContext2d>().unwrap();
let image_data = web_sys::ImageData::new_with_u8_clamped_array_and_sh(array, image.image.width, image.image.height).expect("Failed to construct ImageData");
context.put_image_data(&image_data, 0., 0.).unwrap();
}
graphene_core::application_io::SurfaceHandleFrame {
surface_handle,
transform: image.transform,
}
}
// #[cfg(target_arch = "wasm32")]
// use wasm_bindgen::Clamped;
//
// #[node_macro::node(category("Debug: GPU"))]
// #[cfg(target_arch = "wasm32")]
// async fn draw_image_frame(
// _: (),
// image: ImageFrameTable<graphene_core::raster::SRGBA8>,
// surface_handle: Arc<WasmSurfaceHandle>,
// ) -> graphene_core::application_io::SurfaceHandleFrame<HtmlCanvasElement> {
// let image = image.one_item();
// let image_data = image.image.data;
// let array: Clamped<&[u8]> = Clamped(bytemuck::cast_slice(image_data.as_slice()));
// if image.image.width > 0 && image.image.height > 0 {
// let canvas = &surface_handle.surface;
// canvas.set_width(image.image.width);
// canvas.set_height(image.image.height);
// // TODO: replace "2d" with "bitmaprenderer" once we switch to ImageBitmap (lives on gpu) from ImageData (lives on cpu)
// let context = canvas.get_context("2d").unwrap().unwrap().dyn_into::<CanvasRenderingContext2d>().unwrap();
// let image_data = web_sys::ImageData::new_with_u8_clamped_array_and_sh(array, image.image.width, image.image.height).expect("Failed to construct ImageData");
// context.put_image_data(&image_data, 0., 0.).unwrap();
// }
// graphene_core::application_io::SurfaceHandleFrame {
// surface_handle,
// transform: image.transform,
// }
// }
#[node_macro::node(category("Network"))]
async fn load_resource<'a: 'n>(_: (), _primary: (), #[scope("editor-api")] editor: &'a WasmEditorApi, url: String) -> Arc<[u8]> {
@ -69,8 +75,10 @@ async fn load_resource<'a: 'n>(_: (), _primary: (), #[scope("editor-api")] edito
}
#[node_macro::node(category("Raster"))]
fn decode_image(_: (), data: Arc<[u8]>) -> ImageFrame<Color> {
let Some(image) = image::load_from_memory(data.as_ref()).ok() else { return ImageFrame::default() };
fn decode_image(_: (), data: Arc<[u8]>) -> ImageFrameTable<Color> {
let Some(image) = image::load_from_memory(data.as_ref()).ok() else {
return ImageFrameTable::default();
};
let image = image.to_rgba32f();
let image = ImageFrame {
image: Image {
@ -81,7 +89,8 @@ fn decode_image(_: (), data: Arc<[u8]>) -> ImageFrame<Color> {
},
..Default::default()
};
image
ImageFrameTable::new(image)
}
fn render_svg(data: impl GraphicElementRendered, mut render: SvgRender, render_params: RenderParams, footprint: Footprint) -> RenderOutputType {
@ -144,17 +153,17 @@ async fn render_canvas(render_config: RenderConfig, data: impl GraphicElementRen
async fn rasterize<T: GraphicElementRendered + graphene_core::transform::TransformMut + WasmNotSend + 'n>(
_: (),
#[implementations(
Footprint -> VectorData,
Footprint -> ImageFrame<Color>,
Footprint -> GraphicGroup,
Footprint -> VectorDataTable,
Footprint -> ImageFrameTable<Color>,
Footprint -> GraphicGroupTable,
)]
data: impl Node<Footprint, Output = T>,
footprint: Footprint,
surface_handle: Arc<SurfaceHandle<HtmlCanvasElement>>,
) -> ImageFrame<Color> {
) -> ImageFrameTable<Color> {
if footprint.transform.matrix2.determinant() == 0. {
log::trace!("Invalid footprint received for rasterization");
return ImageFrame::default();
return ImageFrameTable::default();
}
let mut data = data.eval(footprint).await;
@ -192,12 +201,13 @@ async fn rasterize<T: GraphicElementRendered + graphene_core::transform::Transfo
let rasterized = context.get_image_data(0., 0., resolution.x as f64, resolution.y as f64).unwrap();
let image = Image::from_image_data(&rasterized.data().0, resolution.x as u32, resolution.y as u32);
ImageFrame {
image,
let result = ImageFrame {
image: Image::from_image_data(&rasterized.data().0, resolution.x as u32, resolution.y as u32),
transform: footprint.transform,
..Default::default()
}
};
ImageFrameTable::new(result)
}
#[node_macro::node(category(""))]
@ -205,9 +215,9 @@ async fn render<'a: 'n, T: 'n + GraphicElementRendered + WasmNotSend>(
render_config: RenderConfig,
editor_api: &'a WasmEditorApi,
#[implementations(
Footprint -> VectorData,
Footprint -> ImageFrame<Color>,
Footprint -> GraphicGroup,
Footprint -> VectorDataTable,
Footprint -> ImageFrameTable<Color>,
Footprint -> GraphicGroupTable,
Footprint -> graphene_core::Artboard,
Footprint -> graphene_core::ArtboardGroup,
Footprint -> Option<Color>,