mirror of
https://github.com/GraphiteEditor/Graphite.git
synced 2025-08-04 05:18:19 +00:00
Refactor the node macro and simply most of the node implementations (#1942)
* Add support structure for new node macro to gcore * Fix compile issues and code generation * Implement new node_fn macro * Implement property translation * Fix NodeIO type generation * Start translating math nodes * Move node implementation to outer scope to allow usage of local imports * Add expose attribute to allow controlling the parameter exposure * Add rust analyzer support for #[implementations] attribute * Migrate logic nodes * Handle where clause properly * Implement argument ident pattern preservation * Implement adjustment layer mapping * Fix node registry types * Fix module paths * Improve demo artwork comptibility * Improve macro error reporting * Fix handling of impl node implementations * Fix nodeio type computation * Fix opacity node and graph type resolution * Fix loading of demo artworks * Fix eslint * Fix typo in macro test * Remove node definitions for Adjustment Nodes * Fix type alias property generation and make adjustments footprint aware * Convert vector nodes * Implement path overrides * Fix stroke node * Fix painted dreams * Implement experimental type level specialization * Fix poisson disk sampling -> all demo artworks should work again * Port text node + make node macro more robust by implementing lifetime substitution * Fix vector node tests * Fix red dress demo + ci * Fix clippy warnings * Code review * Fix primary input issues * Improve math nodes and audit others * Set no_properties when no automatic properties are derived * Port vector generator nodes (could not derive all definitions yet) * Various QA changes and add min/max/mode_range to number parameters * Add min and max for f64 and u32 * Convert gpu nodes and clean up unused nodes * Partially port transform node * Allow implementations on call arg * Port path modify node * Start porting graphic element nodes * Transform nodes in graphic_element.rs * Port brush node * Port nodes in wasm_executior * Rename node macro * Fix formatting * Fix Mandelbrot node * Formatting * Fix Load Image and Load Resource nodes, add scope input to node macro * Remove unnecessary underscores * Begin attemping to make nodes resolution-aware * Infer a generic manual compositon type on generic call arg * Various fixes and work towards merging * Final changes for merge! * Fix tests, probably * More free line removals! --------- Co-authored-by: Keavon Chambers <keavon@keavon.com>
This commit is contained in:
parent
ca0d102296
commit
e352c7fa71
92 changed files with 4255 additions and 7275 deletions
|
@ -15,7 +15,6 @@ gpu = [
|
|||
"gpu-executor",
|
||||
]
|
||||
wgpu = ["gpu", "dep:wgpu", "graph-craft/wgpu"]
|
||||
quantization = ["autoquant"]
|
||||
wasm = ["wasm-bindgen", "web-sys", "js-sys"]
|
||||
imaginate = ["image/png", "base64", "js-sys", "web-sys", "wasm-bindgen-futures"]
|
||||
image-compare = ["dep:image-compare"]
|
||||
|
@ -25,7 +24,7 @@ wayland = ["graph-craft/wayland"]
|
|||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
dyn-any = { path = "../../libraries/dyn-any", features = ["derive"] }
|
||||
dyn-any = { path = "../../libraries/dyn-any", features = ["derive", "reqwest"] }
|
||||
graph-craft = { path = "../graph-craft", features = ["serde"] }
|
||||
wgpu-executor = { path = "../wgpu-executor" }
|
||||
graphene-core = { path = "../gcore", default-features = false, features = [
|
||||
|
@ -85,9 +84,6 @@ web-sys = { workspace = true, optional = true, features = [
|
|||
"HtmlImageElement",
|
||||
"ImageBitmapRenderingContext",
|
||||
] }
|
||||
autoquant = { git = "https://github.com/truedoctor/autoquant", optional = true, features = [
|
||||
"fitting",
|
||||
] }
|
||||
|
||||
# Optional dependencies
|
||||
image-compare = { version = "0.4.1", optional = true }
|
||||
|
|
|
@ -6,141 +6,7 @@ pub use graphene_core::{generic, ops, Node};
|
|||
|
||||
use dyn_any::StaticType;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub struct DynAnyNode<I, O, Node> {
|
||||
node: Node,
|
||||
_i: PhantomData<I>,
|
||||
_o: PhantomData<O>,
|
||||
}
|
||||
|
||||
impl<'input, _I: 'input + StaticType + WasmNotSend, _O: 'input + StaticType + WasmNotSend, N: 'input> Node<'input, Any<'input>> for DynAnyNode<_I, _O, N>
|
||||
where
|
||||
N: Node<'input, _I, Output = DynFuture<'input, _O>>,
|
||||
{
|
||||
type Output = FutureAny<'input>;
|
||||
#[inline]
|
||||
fn eval(&'input self, input: Any<'input>) -> Self::Output {
|
||||
let node_name = core::any::type_name::<N>();
|
||||
let output = |input| {
|
||||
let result = self.node.eval(input);
|
||||
async move { Box::new(result.await) as Any<'input> }
|
||||
};
|
||||
match dyn_any::downcast(input) {
|
||||
Ok(input) => Box::pin(output(*input)),
|
||||
// If the input type of the node is `()` and we supply an invalid type, we can still call the
|
||||
// node and just ignore the input and call it with the unit type instead.
|
||||
Err(_) if core::any::TypeId::of::<_I::Static>() == core::any::TypeId::of::<()>() => {
|
||||
assert_eq!(std::mem::size_of::<_I>(), 0);
|
||||
// Rust can't know, that `_I` and `()` are the same size, so we have to use a `transmute_copy()` here
|
||||
Box::pin(output(unsafe { std::mem::transmute_copy(&()) }))
|
||||
}
|
||||
Err(e) => panic!("DynAnyNode Input, {0} in:\n{1}", e, node_name),
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&self) {
|
||||
self.node.reset();
|
||||
}
|
||||
|
||||
fn serialize(&self) -> Option<std::sync::Arc<dyn core::any::Any>> {
|
||||
self.node.serialize()
|
||||
}
|
||||
}
|
||||
impl<'input, _I: 'input + StaticType, _O: 'input + StaticType, N: 'input> DynAnyNode<_I, _O, N>
|
||||
where
|
||||
N: Node<'input, _I, Output = DynFuture<'input, _O>>,
|
||||
{
|
||||
pub const fn new(node: N) -> Self {
|
||||
Self {
|
||||
node,
|
||||
_i: core::marker::PhantomData,
|
||||
_o: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynAnyRefNode<I, O, Node> {
|
||||
node: Node,
|
||||
_i: PhantomData<(I, O)>,
|
||||
}
|
||||
impl<'input, _I: 'input + StaticType, _O: 'input + StaticType + WasmNotSend + Sync, N: 'input> Node<'input, Any<'input>> for DynAnyRefNode<_I, _O, N>
|
||||
where
|
||||
N: for<'any_input> Node<'any_input, _I, Output = &'any_input _O>,
|
||||
{
|
||||
type Output = FutureAny<'input>;
|
||||
fn eval(&'input self, input: Any<'input>) -> Self::Output {
|
||||
let node_name = core::any::type_name::<N>();
|
||||
let input: Box<_I> = dyn_any::downcast(input).unwrap_or_else(|e| panic!("DynAnyRefNode Input, {e} in:\n{node_name}"));
|
||||
let result = self.node.eval(*input);
|
||||
let output = async move { Box::new(result) as Any<'input> };
|
||||
Box::pin(output)
|
||||
}
|
||||
fn reset(&self) {
|
||||
self.node.reset();
|
||||
}
|
||||
fn serialize(&self) -> Option<std::sync::Arc<dyn core::any::Any>> {
|
||||
self.node.serialize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<_I, _O, S0> DynAnyRefNode<_I, _O, S0> {
|
||||
pub const fn new(node: S0) -> Self {
|
||||
Self { node, _i: core::marker::PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynAnyInRefNode<I, O, Node> {
|
||||
node: Node,
|
||||
_i: PhantomData<(I, O)>,
|
||||
}
|
||||
impl<'input, _I: 'input + StaticType, _O: 'input + StaticType + WasmNotSend, N: 'input> Node<'input, Any<'input>> for DynAnyInRefNode<_I, _O, N>
|
||||
where
|
||||
N: for<'any_input> Node<'any_input, &'any_input _I, Output = DynFuture<'any_input, _O>>,
|
||||
{
|
||||
type Output = FutureAny<'input>;
|
||||
fn eval(&'input self, input: Any<'input>) -> Self::Output {
|
||||
{
|
||||
let node_name = core::any::type_name::<N>();
|
||||
let input: Box<&_I> = dyn_any::downcast(input).unwrap_or_else(|e| panic!("DynAnyInRefNode Input, {e} in:\n{node_name}"));
|
||||
let result = self.node.eval(*input);
|
||||
Box::pin(async move { Box::new(result.await) as Any<'_> })
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<_I, _O, S0> DynAnyInRefNode<_I, _O, S0> {
|
||||
pub const fn new(node: S0) -> Self {
|
||||
Self { node, _i: core::marker::PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FutureWrapperNode<Node> {
|
||||
node: Node,
|
||||
}
|
||||
|
||||
impl<'i, T: 'i + WasmNotSend, N> Node<'i, T> for FutureWrapperNode<N>
|
||||
where
|
||||
N: Node<'i, T, Output: WasmNotSend> + WasmNotSend,
|
||||
{
|
||||
type Output = DynFuture<'i, N::Output>;
|
||||
fn eval(&'i self, input: T) -> Self::Output {
|
||||
let result = self.node.eval(input);
|
||||
Box::pin(async move { result })
|
||||
}
|
||||
fn reset(&self) {
|
||||
self.node.reset();
|
||||
}
|
||||
|
||||
fn serialize(&self) -> Option<std::sync::Arc<dyn core::any::Any>> {
|
||||
self.node.serialize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<N> FutureWrapperNode<N> {
|
||||
pub const fn new(node: N) -> Self {
|
||||
Self { node }
|
||||
}
|
||||
}
|
||||
pub use graphene_core::registry::{DowncastBothNode, DynAnyNode, FutureWrapperNode, PanicNode};
|
||||
|
||||
pub trait IntoTypeErasedNode<'n> {
|
||||
fn into_type_erased(self) -> TypeErasedBox<'n>;
|
||||
|
@ -155,60 +21,6 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub struct DowncastNode<O, Node> {
|
||||
node: Node,
|
||||
_o: PhantomData<O>,
|
||||
}
|
||||
impl<N: Clone, O: StaticType> Clone for DowncastNode<O, N> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { node: self.node.clone(), _o: self._o }
|
||||
}
|
||||
}
|
||||
impl<N: Copy, O: StaticType> Copy for DowncastNode<O, N> {}
|
||||
|
||||
#[node_macro::node_fn(DowncastNode<_O>)]
|
||||
fn downcast<N: 'input, _O: StaticType>(input: Any<'input>, node: &'input N) -> _O
|
||||
where
|
||||
N: for<'any_input> Node<'any_input, Any<'any_input>, Output = Any<'any_input>> + 'input,
|
||||
{
|
||||
let node_name = core::any::type_name::<N>();
|
||||
let out = dyn_any::downcast(node.eval(input)).unwrap_or_else(|e| panic!("DowncastNode Input {e} in:\n{node_name}"));
|
||||
*out
|
||||
}
|
||||
|
||||
/// Boxes the input and downcasts the output.
|
||||
/// Wraps around a node taking Box<dyn DynAny> and returning Box<dyn DynAny>
|
||||
#[derive(Clone)]
|
||||
pub struct DowncastBothNode<I, O> {
|
||||
node: SharedNodeContainer,
|
||||
_i: PhantomData<I>,
|
||||
_o: PhantomData<O>,
|
||||
}
|
||||
impl<'input, O: 'input + StaticType + WasmNotSend, I: 'input + StaticType + WasmNotSend> Node<'input, I> for DowncastBothNode<I, O> {
|
||||
type Output = DynFuture<'input, O>;
|
||||
#[inline]
|
||||
fn eval(&'input self, input: I) -> Self::Output {
|
||||
{
|
||||
let node_name = self.node.node_name();
|
||||
let input = Box::new(input);
|
||||
let future = self.node.eval(input);
|
||||
Box::pin(async move {
|
||||
let out = dyn_any::downcast(future.await).unwrap_or_else(|e| panic!("DowncastBothNode Input {e} in: \n{node_name}"));
|
||||
*out
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<I, O> DowncastBothNode<I, O> {
|
||||
pub const fn new(node: SharedNodeContainer) -> Self {
|
||||
Self {
|
||||
node,
|
||||
_i: core::marker::PhantomData,
|
||||
_o: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ComposeTypeErased {
|
||||
first: SharedNodeContainer,
|
||||
second: SharedNodeContainer,
|
||||
|
@ -236,71 +48,3 @@ pub fn input_node<O: StaticType>(n: SharedNodeContainer) -> DowncastBothNode<(),
|
|||
pub fn downcast_node<I: StaticType, O: StaticType>(n: SharedNodeContainer) -> DowncastBothNode<I, O> {
|
||||
DowncastBothNode::new(n)
|
||||
}
|
||||
|
||||
pub struct PanicNode<I: WasmNotSend, O: WasmNotSend>(PhantomData<I>, PhantomData<O>);
|
||||
|
||||
impl<'i, I: 'i + WasmNotSend, O: 'i + WasmNotSend> Node<'i, I> for PanicNode<I, O> {
|
||||
type Output = O;
|
||||
fn eval(&'i self, _: I) -> Self::Output {
|
||||
unimplemented!("This node should never be evaluated")
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: WasmNotSend, O: WasmNotSend> PanicNode<I, O> {
|
||||
pub const fn new() -> Self {
|
||||
Self(PhantomData, PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: WasmNotSend, O: WasmNotSend> Default for PanicNode<I, O> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Evaluate safety
|
||||
unsafe impl<I: WasmNotSend, O: WasmNotSend> Sync for PanicNode<I, O> {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use graphene_core::{ops::AddPairNode, ops::IdentityNode};
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
pub fn dyn_input_invalid_eval_panic() {
|
||||
// let add = DynAnyNode::new(AddPairNode::new()).into_type_erased();
|
||||
// add.eval(Box::new(&("32", 32_u32)));
|
||||
let dyn_any = DynAnyNode::<(u32, u32), u32, _>::new(FutureWrapperNode { node: AddPairNode::new() });
|
||||
let type_erased = Box::new(dyn_any) as TypeErasedBox;
|
||||
let _ref_type_erased = type_erased.as_ref();
|
||||
// let type_erased = Box::pin(dyn_any) as TypeErasedBox<'_>;
|
||||
futures::executor::block_on(type_erased.eval(Box::new(&("32", 32_u32))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn dyn_input_compose() {
|
||||
// let add = DynAnyNode::new(AddPairNode::new()).into_type_erased();
|
||||
// add.eval(Box::new(&("32", 32_u32)));
|
||||
let dyn_any = DynAnyNode::<(u32, u32), u32, _>::new(FutureWrapperNode { node: AddPairNode::new() });
|
||||
let type_erased = Box::new(dyn_any) as TypeErasedBox<'_>;
|
||||
futures::executor::block_on(type_erased.eval(Box::new((4_u32, 2_u32))));
|
||||
let id_node = FutureWrapperNode::new(IdentityNode::new());
|
||||
let any_id = DynAnyNode::<u32, u32, _>::new(id_node);
|
||||
let type_erased_id = Box::new(any_id) as TypeErasedBox;
|
||||
let type_erased = ComposeTypeErased::new(NodeContainer::new(type_erased), NodeContainer::new(type_erased_id));
|
||||
futures::executor::block_on(type_erased.eval(Box::new((4_u32, 2_u32))));
|
||||
// let downcast: DowncastBothNode<(u32, u32), u32> = DowncastBothNode::new(type_erased.as_ref());
|
||||
// downcast.eval((4_u32, 2_u32));
|
||||
}
|
||||
|
||||
// TODO: Fix this test
|
||||
// #[test]
|
||||
// pub fn dyn_input_storage_composition() {
|
||||
// // todo readd test
|
||||
// let node = <graphene_core::ops::IdentityNode>::new();
|
||||
// let any: DynAnyNode<Any<'_>, Any<'_>, _> = DynAnyNode::new(ValueNode::new(node));
|
||||
// any.into_type_erased();
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -3,65 +3,19 @@ use crate::raster::{blend_image_closure, BlendImageTupleNode, EmptyImageNode, Ex
|
|||
use graphene_core::raster::adjustments::blend_colors;
|
||||
use graphene_core::raster::bbox::{AxisAlignedBbox, Bbox};
|
||||
use graphene_core::raster::brush_cache::BrushCache;
|
||||
use graphene_core::raster::{Alpha, Color, Image, ImageFrame, Pixel, Sample};
|
||||
use graphene_core::raster::{BlendMode, BlendNode};
|
||||
use graphene_core::transform::{Transform, TransformMut};
|
||||
use graphene_core::raster::BlendMode;
|
||||
use graphene_core::raster::{Alpha, BlendColorPairNode, Color, Image, ImageFrame, Pixel, Sample};
|
||||
use graphene_core::transform::{Footprint, Transform, TransformMut};
|
||||
use graphene_core::value::{ClonedNode, CopiedNode, ValueNode};
|
||||
use graphene_core::vector::brush_stroke::{BrushStroke, BrushStyle};
|
||||
use graphene_core::vector::VectorData;
|
||||
use graphene_core::{Node, WasmNotSend};
|
||||
use node_macro::node_fn;
|
||||
use graphene_core::Node;
|
||||
|
||||
use glam::{DAffine2, DVec2};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ReduceNode<Initial, Lambda> {
|
||||
pub initial: Initial,
|
||||
pub lambda: Lambda,
|
||||
}
|
||||
|
||||
#[node_fn(ReduceNode)]
|
||||
fn reduce<I: Iterator, Lambda, T>(iter: I, initial: T, lambda: &'input Lambda) -> T
|
||||
where
|
||||
Lambda: for<'a> Node<'a, (T, I::Item), Output = T>,
|
||||
{
|
||||
iter.fold(initial, |a, x| lambda.eval((a, x)))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ChainApplyNode<Value> {
|
||||
pub value: Value,
|
||||
}
|
||||
|
||||
#[node_fn(ChainApplyNode)]
|
||||
async fn chain_apply<I: Iterator + WasmNotSend, T: WasmNotSend>(iter: I, value: T) -> T
|
||||
where
|
||||
I::Item: for<'a> Node<'a, T, Output = T>,
|
||||
{
|
||||
let mut value = value;
|
||||
for lambda in iter {
|
||||
value = lambda.eval(value);
|
||||
}
|
||||
value
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct IntoIterNode<T> {
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
#[node_fn(IntoIterNode<_T>)]
|
||||
fn into_iter<'i: 'input, _T: Send + Sync>(vec: &'i Vec<_T>) -> Box<dyn Iterator<Item = &'i _T> + Send + Sync + 'i> {
|
||||
Box::new(vec.iter())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct VectorPointsNode;
|
||||
|
||||
#[node_fn(VectorPointsNode)]
|
||||
fn vector_points(vector: VectorData) -> Vec<DVec2> {
|
||||
vector.point_domain.positions().to_vec()
|
||||
#[node_macro::node(category("Debug"))]
|
||||
fn vector_points(_: (), vector_data: VectorData) -> Vec<DVec2> {
|
||||
vector_data.point_domain.positions().to_vec()
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
|
@ -110,27 +64,8 @@ impl<P: Pixel + Alpha> Sample for BrushStampGenerator<P> {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct BrushStampGeneratorNode<ColorNode, Hardness, Flow> {
|
||||
pub color: ColorNode,
|
||||
pub hardness: Hardness,
|
||||
pub flow: Flow,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct EraseNode<Flow> {
|
||||
flow: Flow,
|
||||
}
|
||||
|
||||
#[node_fn(EraseNode)]
|
||||
fn erase(input: (Color, Color), flow: f64) -> Color {
|
||||
let (input, brush) = input;
|
||||
let alpha = input.a() * (1. - flow as f32 * brush.a());
|
||||
Color::from_unassociated_alpha(input.r(), input.g(), input.b(), alpha)
|
||||
}
|
||||
|
||||
#[node_fn(BrushStampGeneratorNode)]
|
||||
fn brush_stamp_generator_node(diameter: f64, color: Color, hardness: f64, flow: f64) -> BrushStampGenerator<Color> {
|
||||
#[node_macro::node(skip_impl)]
|
||||
fn brush_stamp_generator(diameter: f64, color: Color, hardness: f64, flow: f64) -> BrushStampGenerator<Color> {
|
||||
// Diameter
|
||||
let radius = diameter / 2.;
|
||||
|
||||
|
@ -148,29 +83,10 @@ fn brush_stamp_generator_node(diameter: f64, color: Color, hardness: f64, flow:
|
|||
BrushStampGenerator { color, feather_exponent, transform }
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct TranslateNode<Translatable> {
|
||||
translatable: Translatable,
|
||||
}
|
||||
|
||||
#[node_fn(TranslateNode)]
|
||||
fn translate_node<Data: TransformMut>(offset: DVec2, mut translatable: Data) -> Data {
|
||||
translatable.translate(offset);
|
||||
translatable
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct BlitNode<P, Texture, Positions, BlendFn> {
|
||||
texture: Texture,
|
||||
positions: Positions,
|
||||
blend_mode: BlendFn,
|
||||
_p: PhantomData<P>,
|
||||
}
|
||||
|
||||
#[node_fn(BlitNode<_P>)]
|
||||
fn blit_node<_P: Alpha + Pixel + std::fmt::Debug, BlendFn>(mut target: ImageFrame<_P>, texture: Image<_P>, positions: Vec<DVec2>, blend_mode: BlendFn) -> ImageFrame<_P>
|
||||
#[node_macro::node(skip_impl)]
|
||||
fn blit<P: Alpha + Pixel + std::fmt::Debug, BlendFn>(mut target: ImageFrame<P>, texture: Image<P>, positions: Vec<DVec2>, blend_mode: BlendFn) -> ImageFrame<P>
|
||||
where
|
||||
BlendFn: for<'any_input> Node<'any_input, (_P, _P), Output = _P>,
|
||||
BlendFn: for<'any_input> Node<'any_input, (P, P), Output = P>,
|
||||
{
|
||||
if positions.is_empty() {
|
||||
return target;
|
||||
|
@ -216,8 +132,8 @@ pub fn create_brush_texture(brush_style: &BrushStyle) -> Image<Color> {
|
|||
let stamp = BrushStampGeneratorNode::new(CopiedNode::new(brush_style.color), CopiedNode::new(brush_style.hardness), CopiedNode::new(brush_style.flow));
|
||||
let stamp = stamp.eval(brush_style.diameter);
|
||||
let transform = DAffine2::from_scale_angle_translation(DVec2::splat(brush_style.diameter), 0., -DVec2::splat(brush_style.diameter / 2.));
|
||||
let blank_texture = EmptyImageNode::new(CopiedNode::new(Color::TRANSPARENT)).eval(transform);
|
||||
let normal_blend = BlendNode::new(CopiedNode::new(BlendMode::Normal), CopiedNode::new(100.));
|
||||
let blank_texture = EmptyImageNode::new(CopiedNode::new(transform), CopiedNode::new(Color::TRANSPARENT)).eval(());
|
||||
let normal_blend = BlendColorPairNode::new(CopiedNode::new(BlendMode::Normal), CopiedNode::new(100.));
|
||||
let blend_executor = BlendImageTupleNode::new(ValueNode::new(normal_blend));
|
||||
blend_executor.eval((blank_texture, stamp)).image
|
||||
}
|
||||
|
@ -282,14 +198,8 @@ pub fn blend_with_mode(background: ImageFrame<Color>, foreground: ImageFrame<Col
|
|||
)
|
||||
}
|
||||
|
||||
pub struct BrushNode<Bounds, Strokes, Cache> {
|
||||
bounds: Bounds,
|
||||
strokes: Strokes,
|
||||
cache: Cache,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BrushNode)]
|
||||
async fn brush(image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec<BrushStroke>, cache: BrushCache) -> ImageFrame<Color> {
|
||||
#[node_macro::node(category(""))]
|
||||
fn brush(_footprint: Footprint, image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec<BrushStroke>, cache: BrushCache) -> ImageFrame<Color> {
|
||||
let stroke_bbox = strokes.iter().map(|s| s.bounding_box()).reduce(|a, b| a.union(&b)).unwrap_or(AxisAlignedBbox::ZERO);
|
||||
let image_bbox = Bbox::from_transform(image.transform).to_axis_aligned_bbox();
|
||||
let bbox = if image_bbox.size().length() < 0.1 { stroke_bbox } else { stroke_bbox.union(&image_bbox) };
|
||||
|
@ -332,13 +242,13 @@ async fn brush(image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec
|
|||
let stroke_origin_in_layer = bbox.start - snap_offset - DVec2::splat(stroke.style.diameter / 2.0);
|
||||
let stroke_to_layer = DAffine2::from_translation(stroke_origin_in_layer) * DAffine2::from_scale(stroke_size);
|
||||
|
||||
let normal_blend = BlendNode::new(CopiedNode::new(BlendMode::Normal), CopiedNode::new(100.));
|
||||
let normal_blend = BlendColorPairNode::new(CopiedNode::new(BlendMode::Normal), CopiedNode::new(100.));
|
||||
let blit_node = BlitNode::new(ClonedNode::new(brush_texture), ClonedNode::new(positions), ClonedNode::new(normal_blend));
|
||||
let blit_target = if idx == 0 {
|
||||
let target = core::mem::take(&mut brush_plan.first_stroke_texture);
|
||||
ExtendImageToBoundsNode::new(CopiedNode::new(stroke_to_layer)).eval(target)
|
||||
} else {
|
||||
EmptyImageNode::new(CopiedNode::new(Color::TRANSPARENT)).eval(stroke_to_layer)
|
||||
EmptyImageNode::new(CopiedNode::new(stroke_to_layer), CopiedNode::new(Color::TRANSPARENT)).eval(())
|
||||
};
|
||||
blit_node.eval(blit_target)
|
||||
};
|
||||
|
@ -371,14 +281,14 @@ async fn brush(image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec
|
|||
|
||||
match stroke.style.blend_mode {
|
||||
BlendMode::Erase => {
|
||||
let blend_params = BlendNode::new(CopiedNode::new(BlendMode::Erase), CopiedNode::new(100.));
|
||||
let blend_params = BlendColorPairNode::new(CopiedNode::new(BlendMode::Erase), CopiedNode::new(100.));
|
||||
let blit_node = BlitNode::new(ClonedNode::new(brush_texture), ClonedNode::new(positions), ClonedNode::new(blend_params));
|
||||
erase_restore_mask = blit_node.eval(erase_restore_mask);
|
||||
}
|
||||
|
||||
// Yes, this is essentially the same as the above, but we duplicate to inline the blend mode.
|
||||
BlendMode::Restore => {
|
||||
let blend_params = BlendNode::new(CopiedNode::new(BlendMode::Restore), CopiedNode::new(100.));
|
||||
let blend_params = BlendColorPairNode::new(CopiedNode::new(BlendMode::Restore), CopiedNode::new(100.));
|
||||
let blit_node = BlitNode::new(ClonedNode::new(brush_texture), ClonedNode::new(positions), ClonedNode::new(blend_params));
|
||||
erase_restore_mask = blit_node.eval(erase_restore_mask);
|
||||
}
|
||||
|
@ -387,7 +297,7 @@ async fn brush(image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec
|
|||
}
|
||||
}
|
||||
|
||||
let blend_params = BlendNode::new(CopiedNode::new(BlendMode::MultiplyAlpha), CopiedNode::new(100.0));
|
||||
let blend_params = BlendColorPairNode::new(CopiedNode::new(BlendMode::MultiplyAlpha), CopiedNode::new(100.0));
|
||||
let blend_executor = BlendImageTupleNode::new(ValueNode::new(blend_params));
|
||||
actual_image = blend_executor.eval((actual_image, erase_restore_mask));
|
||||
}
|
||||
|
@ -397,34 +307,12 @@ async fn brush(image: ImageFrame<Color>, bounds: ImageFrame<Color>, strokes: Vec
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::raster::*;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use graphene_core::ops::{AddPairNode, CloneNode};
|
||||
use graphene_core::raster::*;
|
||||
use graphene_core::structural::Then;
|
||||
use graphene_core::transform::{Transform, TransformMut};
|
||||
use graphene_core::value::{ClonedNode, ValueNode};
|
||||
use graphene_core::transform::Transform;
|
||||
use graphene_core::value::ClonedNode;
|
||||
|
||||
use glam::DAffine2;
|
||||
|
||||
#[test]
|
||||
fn test_translate_node() {
|
||||
let image = Image::new(10, 10, Color::TRANSPARENT);
|
||||
let mut image = ImageFrame { image, ..Default::default() };
|
||||
image.translate(DVec2::new(1., 2.));
|
||||
let translate_node = TranslateNode::new(ClonedNode::new(image));
|
||||
let image = translate_node.eval(DVec2::new(1., 2.));
|
||||
assert_eq!(image.transform(), DAffine2::from_translation(DVec2::new(2., 4.)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reduce() {
|
||||
let reduce_node = ReduceNode::new(ClonedNode::new(0u32), ValueNode::new(AddPairNode));
|
||||
let sum = reduce_node.eval(vec![1, 2, 3, 4, 5].into_iter());
|
||||
assert_eq!(sum, 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_brush_texture() {
|
||||
let brush_texture_node = BrushStampGeneratorNode::new(ClonedNode::new(Color::BLACK), ClonedNode::new(100.), ClonedNode::new(100.));
|
||||
|
@ -434,26 +322,4 @@ mod test {
|
|||
// center pixel should be BLACK
|
||||
assert_eq!(image.sample(DVec2::splat(0.), DVec2::ONE), Some(Color::BLACK));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_brush() {
|
||||
let brush_texture_node = BrushStampGeneratorNode::new(ClonedNode::new(Color::BLACK), ClonedNode::new(1.), ClonedNode::new(1.));
|
||||
let image = brush_texture_node.eval(20.);
|
||||
let trace = vec![DVec2::new(0., 0.), DVec2::new(10., 0.)];
|
||||
let trace = ClonedNode::new(trace.into_iter());
|
||||
let translate_node = TranslateNode::new(ClonedNode::new(image));
|
||||
let frames = MapNode::new(ValueNode::new(translate_node));
|
||||
let frames = trace.then(frames).eval(()).collect::<Vec<_>>();
|
||||
assert_eq!(frames.len(), 2);
|
||||
let background_bounds = ReduceNode::new(ClonedNode::new(None), ValueNode::new(MergeBoundingBoxNode::new()));
|
||||
let background_bounds = background_bounds.eval(frames.clone().into_iter());
|
||||
let background_bounds = ClonedNode::new(background_bounds.unwrap().to_transform());
|
||||
let background_image = background_bounds.then(EmptyImageNode::new(ClonedNode::new(Color::TRANSPARENT)));
|
||||
let blend_node = graphene_core::raster::BlendNode::new(ClonedNode::new(BlendMode::Normal), ClonedNode::new(1.));
|
||||
let final_image = ReduceNode::new(background_image, ValueNode::new(BlendImageTupleNode::new(ValueNode::new(blend_node))));
|
||||
let final_image = final_image.eval(frames.into_iter());
|
||||
assert_eq!(final_image.image.height, 20);
|
||||
assert_eq!(final_image.image.width, 30);
|
||||
drop(final_image);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,30 +4,21 @@ use graph_craft::document::value::TaggedValue;
|
|||
use graph_craft::document::*;
|
||||
use graph_craft::proto::*;
|
||||
use graphene_core::application_io::ApplicationIo;
|
||||
use graphene_core::quantization::QuantizationChannels;
|
||||
use graphene_core::raster::*;
|
||||
use graphene_core::*;
|
||||
use wgpu_executor::{Bindgroup, PipelineLayout, Shader, ShaderIO, ShaderInput, WgpuExecutor, WgpuShaderInput};
|
||||
|
||||
use glam::{DAffine2, DVec2, Mat2, Vec2};
|
||||
|
||||
#[cfg(feature = "quantization")]
|
||||
use graphene_core::quantization::PackedPixel;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::wasm_application_io::WasmApplicationIo;
|
||||
|
||||
pub struct GpuCompiler<TypingContext, ShaderIO> {
|
||||
typing_context: TypingContext,
|
||||
io: ShaderIO,
|
||||
}
|
||||
|
||||
// TODO: Move to graph-craft
|
||||
#[node_macro::node_fn(GpuCompiler)]
|
||||
async fn compile_gpu(node: &'input DocumentNode, typing_context: TypingContext, io: ShaderIO) -> Result<compilation_client::Shader, String> {
|
||||
#[node_macro::node(category("Debug: GPU"))]
|
||||
async fn compile_gpu<'a: 'n>(_: (), node: &'a DocumentNode, typing_context: TypingContext, io: ShaderIO) -> Result<compilation_client::Shader, String> {
|
||||
let mut typing_context = typing_context;
|
||||
let compiler = graph_craft::graphene_compiler::Compiler {};
|
||||
let DocumentNodeImplementation::Network(ref network) = node.implementation else { panic!() };
|
||||
|
@ -69,40 +60,22 @@ impl Clone for ComputePass {
|
|||
}
|
||||
}
|
||||
|
||||
#[node_macro::node_impl(MapGpuNode)]
|
||||
#[node_macro::old_node_impl(MapGpuNode)]
|
||||
async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, editor_api: &'a graphene_core::application_io::EditorApi<WasmApplicationIo>) -> ImageFrame<Color> {
|
||||
log::debug!("Executing gpu node");
|
||||
let executor = &editor_api.application_io.as_ref().and_then(|io| io.gpu_executor()).unwrap();
|
||||
|
||||
#[cfg(feature = "quantization")]
|
||||
let quantization = crate::quantization::generate_quantization_from_image_frame(&image);
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
let quantization = QuantizationChannels::default();
|
||||
log::debug!("quantization: {quantization:?}");
|
||||
|
||||
#[cfg(feature = "image-compare")]
|
||||
let img: image::DynamicImage = image::Rgba32FImage::from_raw(image.image.width, image.image.height, bytemuck::cast_vec(image.image.data.clone()))
|
||||
.unwrap()
|
||||
.into();
|
||||
|
||||
#[cfg(feature = "quantization")]
|
||||
let image = ImageFrame {
|
||||
image: Image {
|
||||
data: image.image.data.iter().map(|c| quantization::quantize_color(*c, quantization)).collect(),
|
||||
width: image.image.width,
|
||||
height: image.image.height,
|
||||
base64_string: None,
|
||||
},
|
||||
transform: image.transform,
|
||||
alpha_blending: image.alpha_blending,
|
||||
};
|
||||
|
||||
// TODO: The cache should be based on the network topology not the node name
|
||||
let compute_pass_descriptor = if self.cache.lock().as_ref().unwrap().contains_key("placeholder") {
|
||||
self.cache.lock().as_ref().unwrap().get("placeholder").unwrap().clone()
|
||||
} else {
|
||||
let name = "placeholder".to_string();
|
||||
let Ok(compute_pass_descriptor) = create_compute_pass_descriptor(node, &image, executor, quantization).await else {
|
||||
let Ok(compute_pass_descriptor) = create_compute_pass_descriptor(node, &image, executor).await else {
|
||||
log::error!("Error creating compute pass descriptor in 'map_gpu()");
|
||||
return ImageFrame::empty();
|
||||
};
|
||||
|
@ -122,13 +95,6 @@ async fn map_gpu<'a: 'input>(image: ImageFrame<Color>, node: DocumentNode, edito
|
|||
log::debug!("executed pipeline");
|
||||
log::debug!("reading buffer");
|
||||
let result = executor.read_output_buffer(compute_pass_descriptor.readback_buffer.clone().unwrap()).await.unwrap();
|
||||
#[cfg(feature = "quantization")]
|
||||
let colors = bytemuck::pod_collect_to_vec::<u8, PackedPixel>(result.as_slice());
|
||||
#[cfg(feature = "quantization")]
|
||||
log::debug!("first color: {:b}", colors[0].0);
|
||||
#[cfg(feature = "quantization")]
|
||||
let colors: Vec<_> = colors.iter().map(|c| quantization::dequantize_color(*c, quantization)).collect();
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
let colors = bytemuck::pod_collect_to_vec::<u8, Color>(result.as_slice());
|
||||
log::debug!("first color: {:?}", colors[0]);
|
||||
|
||||
|
@ -161,32 +127,19 @@ impl<Node, EditorApi> MapGpuNode<Node, EditorApi> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
||||
node: DocumentNode,
|
||||
image: &ImageFrame<T>,
|
||||
executor: &&WgpuExecutor,
|
||||
quantization: QuantizationChannels,
|
||||
) -> Result<ComputePass, String> {
|
||||
async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(node: DocumentNode, image: &ImageFrame<T>, executor: &&WgpuExecutor) -> Result<ComputePass, String> {
|
||||
let compiler = graph_craft::graphene_compiler::Compiler {};
|
||||
let inner_network = NodeNetwork::value_network(node);
|
||||
|
||||
log::debug!("inner_network: {inner_network:?}");
|
||||
let network = NodeNetwork {
|
||||
#[cfg(feature = "quantization")]
|
||||
exports: vec![NodeInput::node(NodeId(5), 0)],
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
exports: vec![NodeInput::node(NodeId(3), 0)],
|
||||
exports: vec![NodeInput::node(NodeId(2), 0)],
|
||||
nodes: [
|
||||
DocumentNode {
|
||||
inputs: vec![NodeInput::Inline(InlineRust::new("i1[(_global_index.y * i0 + _global_index.x) as usize]".into(), concrete![Color]))],
|
||||
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::value::CopiedNode".into()),
|
||||
..Default::default()
|
||||
},
|
||||
DocumentNode {
|
||||
inputs: vec![NodeInput::network(concrete!(quantization::Quantization), 1)],
|
||||
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::ops::IdentityNode".into()),
|
||||
..Default::default()
|
||||
},
|
||||
DocumentNode {
|
||||
inputs: vec![NodeInput::network(concrete!(u32), 0)],
|
||||
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::ops::IdentityNode".into()),
|
||||
|
@ -201,34 +154,19 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
},*/
|
||||
/*
|
||||
DocumentNode {
|
||||
name: "GetNode".into(),
|
||||
name: "Get Node".into(),
|
||||
inputs: vec![NodeInput::node(NodeId(1), 0), NodeInput::node(NodeId(0), 0)],
|
||||
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::storage::GetNode".into()),
|
||||
..Default::default()
|
||||
},*/
|
||||
#[cfg(feature = "quantization")]
|
||||
DocumentNode {
|
||||
inputs: vec![NodeInput::node(NodeId(0), 0), NodeInput::node(NodeId(1), 0)],
|
||||
implementation: DocumentNodeImplementation::proto("graphene_core::quantization::DeQuantizeNode"),
|
||||
..Default::default()
|
||||
},
|
||||
DocumentNode {
|
||||
#[cfg(feature = "quantization")]
|
||||
inputs: vec![NodeInput::node(NodeId(3), 0)],
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
inputs: vec![NodeInput::node(NodeId(0), 0)],
|
||||
implementation: DocumentNodeImplementation::Network(inner_network),
|
||||
..Default::default()
|
||||
},
|
||||
#[cfg(feature = "quantization")]
|
||||
DocumentNode {
|
||||
inputs: vec![NodeInput::node(NodeId(4), 0), NodeInput::node(NodeId(1), 0)],
|
||||
implementation: DocumentNodeImplementation::proto("graphene_core::quantization::QuantizeNode"),
|
||||
..Default::default()
|
||||
},
|
||||
/*
|
||||
DocumentNode {
|
||||
name: "SaveNode".into(),
|
||||
name: "Save Node".into(),
|
||||
inputs: vec![
|
||||
NodeInput::node(NodeId(5), 0),
|
||||
NodeInput::Inline(InlineRust::new(
|
||||
|
@ -256,23 +194,11 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
vec![concrete!(u32), concrete!(Color)],
|
||||
vec![concrete!(Color)],
|
||||
ShaderIO {
|
||||
#[cfg(feature = "quantization")]
|
||||
inputs: vec![
|
||||
ShaderInput::UniformBuffer((), concrete!(u32)),
|
||||
ShaderInput::StorageBuffer((), concrete!(PackedPixel)),
|
||||
ShaderInput::UniformBuffer((), concrete!(quantization::QuantizationChannels)),
|
||||
// ShaderInput::Constant(gpu_executor::GPUConstant::GlobalInvocationId),
|
||||
ShaderInput::OutputBuffer((), concrete!(PackedPixel)),
|
||||
],
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
inputs: vec![
|
||||
ShaderInput::UniformBuffer((), concrete!(u32)),
|
||||
ShaderInput::StorageBuffer((), concrete!(Color)),
|
||||
ShaderInput::OutputBuffer((), concrete!(Color)),
|
||||
],
|
||||
#[cfg(feature = "quantization")]
|
||||
output: ShaderInput::OutputBuffer((), concrete!(PackedPixel)),
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
output: ShaderInput::OutputBuffer((), concrete!(Color)),
|
||||
},
|
||||
)
|
||||
|
@ -281,6 +207,17 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
// return ImageFrame::empty();
|
||||
let len: usize = image.image.data.len();
|
||||
|
||||
let storage_buffer = executor
|
||||
.create_storage_buffer(
|
||||
image.image.data.clone(),
|
||||
StorageBufferOptions {
|
||||
cpu_writable: false,
|
||||
gpu_writable: true,
|
||||
cpu_readable: false,
|
||||
storage: true,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
/*
|
||||
let canvas = editor_api.application_io.create_surface();
|
||||
|
||||
|
@ -298,25 +235,7 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
return frame;*/
|
||||
log::debug!("creating buffer");
|
||||
let width_uniform = executor.create_uniform_buffer(image.image.width).unwrap();
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
core::hint::black_box(quantization);
|
||||
|
||||
#[cfg(feature = "quantization")]
|
||||
let quantization_uniform = executor.create_uniform_buffer(quantization).unwrap();
|
||||
let storage_buffer = executor
|
||||
.create_storage_buffer(
|
||||
image.image.data.clone(),
|
||||
StorageBufferOptions {
|
||||
cpu_writable: false,
|
||||
gpu_writable: true,
|
||||
cpu_readable: false,
|
||||
storage: true,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
let width_uniform = Arc::new(width_uniform);
|
||||
#[cfg(feature = "quantization")]
|
||||
let quantization_uniform = Arc::new(quantization_uniform);
|
||||
let storage_buffer = Arc::new(storage_buffer);
|
||||
let output_buffer = executor.create_output_buffer(len, concrete!(Color), false).unwrap();
|
||||
let output_buffer = Arc::new(output_buffer);
|
||||
|
@ -324,10 +243,7 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
let readback_buffer = Arc::new(readback_buffer);
|
||||
log::debug!("created buffer");
|
||||
let bind_group = Bindgroup {
|
||||
#[cfg(feature = "quantization")]
|
||||
buffers: vec![width_uniform.clone(), storage_buffer.clone(), quantization_uniform.clone()],
|
||||
#[cfg(not(feature = "quantization"))]
|
||||
buffers: vec![width_uniform, storage_buffer],
|
||||
buffers: vec![width_uniform.into(), storage_buffer],
|
||||
};
|
||||
|
||||
let shader = Shader {
|
||||
|
@ -351,72 +267,9 @@ async fn create_compute_pass_descriptor<T: Clone + Pixel + StaticTypeSized>(
|
|||
readback_buffer: Some(readback_buffer),
|
||||
})
|
||||
}
|
||||
/*
|
||||
#[node_macro::node_fn(MapGpuNode)]
|
||||
async fn map_gpu(inputs: Vec<ShaderInput<<NewExecutor as GpuExecutor>::BufferHandle>>, shader: &'any_input compilation_client::Shader) {
|
||||
use graph_craft::executor::Executor;
|
||||
let executor = NewExecutor::new().unwrap();
|
||||
for input in shader.io.inputs.iter() {
|
||||
let buffer = executor.create_storage_buffer(&self, data, options)
|
||||
let buffer = executor.create_buffer(input.size).unwrap();
|
||||
executor.write_buffer(buffer, input.data).unwrap();
|
||||
}
|
||||
todo!();
|
||||
/*
|
||||
let executor: GpuExecutor = GpuExecutor::new(Context::new().await.unwrap(), shader.into(), "gpu::eval".into()).unwrap();
|
||||
let data: Vec<_> = input.into_iter().collect();
|
||||
let result = executor.execute(Box::new(data)).unwrap();
|
||||
let result = dyn_any::downcast::<Vec<_O>>(result).unwrap();
|
||||
*result
|
||||
*/
|
||||
}
|
||||
|
||||
pub struct MapGpuSingleImageNode<N> {
|
||||
node: N,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(MapGpuSingleImageNode)]
|
||||
fn map_gpu_single_image(input: Image<Color>, node: String) -> Image<Color> {
|
||||
use graph_craft::document::*;
|
||||
use graph_craft::ProtoNodeIdentifier;
|
||||
|
||||
let identifier = ProtoNodeIdentifier { name: std::borrow::Cow::Owned(node) };
|
||||
|
||||
let network = NodeNetwork {
|
||||
inputs: vec![NodeId(0)],
|
||||
disabled: vec![],
|
||||
previous_outputs: None,
|
||||
outputs: vec![NodeInput::node(NodeId(0), 0)],
|
||||
nodes: [(
|
||||
NodeId(0),
|
||||
DocumentNode {
|
||||
name: "Image Filter".into(),
|
||||
inputs: vec![NodeInput::Network(concrete!(Color))],
|
||||
implementation: DocumentNodeImplementation::ProtoNode(identifier),
|
||||
metadata: DocumentNodeMetadata::default(),
|
||||
..Default::default()
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
};
|
||||
|
||||
let value_network = ValueNode::new(network);
|
||||
let map_node = MapGpuNode::new(value_network);
|
||||
let data = map_node.eval(input.data.clone());
|
||||
Image { data, ..input }
|
||||
}
|
||||
*/
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct BlendGpuImageNode<Background, B, O> {
|
||||
background: Background,
|
||||
blend_mode: B,
|
||||
opacity: O,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BlendGpuImageNode)]
|
||||
async fn blend_gpu_image(foreground: ImageFrame<Color>, background: ImageFrame<Color>, blend_mode: BlendMode, opacity: f64) -> ImageFrame<Color> {
|
||||
#[node_macro::node(category("Debug: GPU"))]
|
||||
async fn blend_gpu_image(_: (), foreground: ImageFrame<Color>, background: ImageFrame<Color>, blend_mode: BlendMode, opacity: f64) -> ImageFrame<Color> {
|
||||
let foreground_size = DVec2::new(foreground.image.width as f64, foreground.image.height as f64);
|
||||
let background_size = DVec2::new(background.image.width as f64, background.image.height as f64);
|
||||
// Transforms a point from the background image to the foreground image
|
||||
|
|
|
@ -1,17 +1,9 @@
|
|||
use crate::Node;
|
||||
|
||||
pub struct GetNode;
|
||||
|
||||
#[node_macro::node_fn(GetNode)]
|
||||
async fn get_node(url: String) -> reqwest::Response {
|
||||
#[node_macro::node(category("Network"))]
|
||||
async fn get_request(_: (), url: String) -> reqwest::Response {
|
||||
reqwest::get(url).await.unwrap()
|
||||
}
|
||||
|
||||
pub struct PostNode<Body> {
|
||||
body: Body,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(PostNode)]
|
||||
async fn post_node(url: String, body: String) -> reqwest::Response {
|
||||
#[node_macro::node(category("Network"))]
|
||||
async fn post_request(_: (), url: String, body: String) -> reqwest::Response {
|
||||
reqwest::Client::new().post(url).body(body).send().await.unwrap()
|
||||
}
|
||||
|
|
|
@ -1,23 +1,24 @@
|
|||
use graphene_core::raster::ImageFrame;
|
||||
use graphene_core::transform::Footprint;
|
||||
use graphene_core::Color;
|
||||
use graphene_core::Node;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct ImageColorPaletteNode<MaxSize> {
|
||||
max_size: MaxSize,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(ImageColorPaletteNode)]
|
||||
fn image_color_palette(frame: ImageFrame<Color>, max_size: u32) -> Vec<Color> {
|
||||
const GRID: f32 = 3.0;
|
||||
#[node_macro::node(category("Raster"))]
|
||||
async fn image_color_palette<F: 'n + Send>(
|
||||
#[implementations((), Footprint)] footprint: F,
|
||||
#[implementations(((), ImageFrame<Color>), (Footprint, ImageFrame<Color>))] image: impl Node<F, Output = ImageFrame<Color>>,
|
||||
#[min(1.)]
|
||||
#[max(28.)]
|
||||
max_size: u32,
|
||||
) -> Vec<Color> {
|
||||
const GRID: f32 = 3.;
|
||||
|
||||
let bins = GRID * GRID * GRID;
|
||||
|
||||
let mut histogram: Vec<usize> = vec![0; (bins + 1.0) as usize];
|
||||
let mut colors: Vec<Vec<Color>> = vec![vec![]; (bins + 1.0) as usize];
|
||||
|
||||
for pixel in frame.image.data.iter() {
|
||||
let image = image.eval(footprint).await;
|
||||
for pixel in image.image.data.iter() {
|
||||
let r = pixel.r() * GRID;
|
||||
let g = pixel.g() * GRID;
|
||||
let b = pixel.b() * GRID;
|
||||
|
@ -57,28 +58,34 @@ fn image_color_palette(frame: ImageFrame<Color>, max_size: u32) -> Vec<Color> {
|
|||
palette.push(color);
|
||||
}
|
||||
|
||||
return palette;
|
||||
palette
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use graphene_core::{raster::Image, value::CopiedNode};
|
||||
use graph_craft::generic::FnNode;
|
||||
use graphene_core::{raster::Image, value::CopiedNode, Node};
|
||||
|
||||
#[test]
|
||||
fn test_image_color_palette() {
|
||||
assert_eq!(
|
||||
ImageColorPaletteNode { max_size: CopiedNode(1u32) }.eval(ImageFrame {
|
||||
image: Image {
|
||||
width: 100,
|
||||
height: 100,
|
||||
data: vec![Color::from_rgbaf32(0.0, 0.0, 0.0, 1.0).unwrap(); 10000],
|
||||
base64_string: None,
|
||||
},
|
||||
..Default::default()
|
||||
let node = ImageColorPaletteNode {
|
||||
max_size: CopiedNode(1u32),
|
||||
image: FnNode::new(|_| {
|
||||
Box::pin(async move {
|
||||
ImageFrame {
|
||||
image: Image {
|
||||
width: 100,
|
||||
height: 100,
|
||||
data: vec![Color::from_rgbaf32(0.0, 0.0, 0.0, 1.0).unwrap(); 10000],
|
||||
base64_string: None,
|
||||
},
|
||||
..Default::default()
|
||||
}
|
||||
})
|
||||
}),
|
||||
[Color::from_rgbaf32(0.0, 0.0, 0.0, 1.0).unwrap()]
|
||||
);
|
||||
};
|
||||
assert_eq!(futures::executor::block_on(node.eval(())), [Color::from_rgbaf32(0.0, 0.0, 0.0, 1.0).unwrap()]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
use std::collections::hash_map::HashMap;
|
||||
|
||||
use graphene_core::raster::{Color, ImageFrame};
|
||||
use graphene_core::Node;
|
||||
|
||||
fn apply_mask(image_frame: &mut ImageFrame<Color>, x: usize, y: usize, multiplier: u8) {
|
||||
let color = &mut image_frame.image.data[y * image_frame.image.width as usize + x];
|
||||
let color8 = color.to_rgba8_srgb();
|
||||
*color = Color::from_rgba8_srgb(color8[0] * multiplier, color8[1] * multiplier, color8[2] * multiplier, color8[3] * multiplier);
|
||||
}
|
||||
|
||||
pub struct Mask {
|
||||
pub data: Vec<u8>,
|
||||
pub width: usize,
|
||||
pub height: usize,
|
||||
}
|
||||
|
||||
impl Mask {
|
||||
fn sample(&self, u: f32, v: f32) -> u8 {
|
||||
let x = (u * (self.width as f32)) as usize;
|
||||
let y = (v * (self.height as f32)) as usize;
|
||||
|
||||
self.data[y * self.width + x]
|
||||
}
|
||||
}
|
||||
|
||||
fn image_segmentation(input_image: &ImageFrame<Color>, input_mask: &Mask) -> Vec<ImageFrame<Color>> {
|
||||
const NUM_LABELS: usize = u8::MAX as usize;
|
||||
let mut result = Vec::<ImageFrame<Color>>::with_capacity(NUM_LABELS);
|
||||
let mut current_label = 0_usize;
|
||||
let mut label_appeared = [false; NUM_LABELS + 1];
|
||||
let mut max_label = 0_usize;
|
||||
|
||||
if input_mask.data.is_empty() {
|
||||
warn!("The mask for the segmentation node is empty!");
|
||||
return vec![ImageFrame::empty()];
|
||||
}
|
||||
|
||||
result.push(input_image.clone());
|
||||
let result_last = result.last_mut().unwrap();
|
||||
|
||||
for y in 0..input_image.image.height {
|
||||
let v = (y as f32) / (input_image.image.height as f32);
|
||||
for x in 0..input_image.image.width {
|
||||
let u = (x as f32) / (input_image.image.width as f32);
|
||||
let label = input_mask.sample(u, v) as usize;
|
||||
let multiplier = (label == current_label) as u8;
|
||||
|
||||
apply_mask(result_last, x as usize, y as usize, multiplier);
|
||||
|
||||
if label < NUM_LABELS {
|
||||
label_appeared[label] = true;
|
||||
max_label = max_label.max(label);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !label_appeared[current_label] {
|
||||
result.pop();
|
||||
}
|
||||
|
||||
for i in 1..=max_label.max(NUM_LABELS) {
|
||||
current_label = i;
|
||||
|
||||
if !label_appeared[current_label] {
|
||||
continue;
|
||||
}
|
||||
|
||||
result.push(input_image.clone());
|
||||
let result_last = result.last_mut().unwrap();
|
||||
|
||||
for y in 0..input_image.image.height {
|
||||
let v = (y as f32) / (input_image.image.height as f32);
|
||||
for x in 0..input_image.image.width {
|
||||
let u = (x as f32) / (input_image.image.width as f32);
|
||||
let label = input_mask.sample(u, v) as usize;
|
||||
let multiplier = (label == current_label) as u8;
|
||||
|
||||
apply_mask(result_last, x as usize, y as usize, multiplier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn convert_image_to_mask(input: &ImageFrame<Color>) -> Vec<u8> {
|
||||
let mut result = vec![0_u8; (input.image.width * input.image.height) as usize];
|
||||
let mut colors = HashMap::<[u8; 4], usize>::new();
|
||||
let mut last_value = 0_usize;
|
||||
|
||||
for (color, result) in input.image.data.iter().zip(result.iter_mut()) {
|
||||
let color = color.to_rgba8_srgb();
|
||||
if let Some(value) = colors.get(&color) {
|
||||
*result = *value as u8;
|
||||
} else {
|
||||
if last_value > u8::MAX as usize {
|
||||
warn!("The limit for number of segments ({}) has been exceeded!", u8::MAX);
|
||||
break;
|
||||
}
|
||||
|
||||
*result = last_value as u8;
|
||||
colors.insert(color, last_value);
|
||||
last_value += 1;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ImageSegmentationNode<MaskImage> {
|
||||
pub(crate) mask_image: MaskImage,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(ImageSegmentationNode)]
|
||||
pub(crate) fn image_segmentation(image: ImageFrame<Color>, mask_image: ImageFrame<Color>) -> Vec<ImageFrame<Color>> {
|
||||
let mask_data = convert_image_to_mask(&mask_image);
|
||||
let mask = Mask {
|
||||
data: mask_data,
|
||||
width: mask_image.image.width as usize,
|
||||
height: mask_image.image.height as usize,
|
||||
};
|
||||
|
||||
image_segmentation(&image, &mask)
|
||||
}
|
|
@ -7,6 +7,8 @@ extern crate log;
|
|||
|
||||
pub mod raster;
|
||||
|
||||
pub mod text;
|
||||
|
||||
pub mod vector;
|
||||
|
||||
pub mod http;
|
||||
|
@ -16,13 +18,8 @@ pub mod any;
|
|||
#[cfg(feature = "gpu")]
|
||||
pub mod gpu_nodes;
|
||||
|
||||
#[cfg(feature = "quantization")]
|
||||
pub mod quantization;
|
||||
|
||||
pub use graphene_core::*;
|
||||
|
||||
pub mod image_segmentation;
|
||||
|
||||
pub mod image_color_palette;
|
||||
|
||||
pub mod brush;
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
use autoquant::packing::ErrorFunction;
|
||||
use graphene_core::quantization::*;
|
||||
use graphene_core::raster::{Color, ImageFrame};
|
||||
use graphene_core::Node;
|
||||
|
||||
/// The `GenerateQuantizationNode` encodes the brightness of each channel of the image as an integer number
|
||||
/// signified by the samples parameter. This node is used to asses the loss of visual information when
|
||||
/// quantizing the image using different fit functions.
|
||||
pub struct GenerateQuantizationNode<N, M> {
|
||||
samples: N,
|
||||
function: M,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(GenerateQuantizationNode)]
|
||||
fn generate_quantization_fn(image_frame: ImageFrame<Color>, samples: u32, function: u32) -> [Quantization; 4] {
|
||||
generate_quantization_from_image_frame(&image_frame)
|
||||
}
|
||||
|
||||
pub fn generate_quantization_from_image_frame(image_frame: &ImageFrame<Color>) -> [Quantization; 4] {
|
||||
let image = &image_frame.image;
|
||||
|
||||
let len = image.data.len().min(10000);
|
||||
let data = image
|
||||
.data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(i, _)| i % (image.data.len() / len) == 0)
|
||||
.flat_map(|(_, x)| vec![x.r() as f64, x.g() as f64, x.b() as f64, x.a() as f64])
|
||||
.collect::<Vec<_>>();
|
||||
generate_quantization(data, len)
|
||||
}
|
||||
fn generate_quantization(data: Vec<f64>, samples: usize) -> [Quantization; 4] {
|
||||
let red = create_distribution(data.clone(), samples, 0);
|
||||
let green = create_distribution(data.clone(), samples, 1);
|
||||
let blue = create_distribution(data.clone(), samples, 2);
|
||||
let alpha = create_distribution(data, samples, 3);
|
||||
|
||||
let fit_red = autoquant::calculate_error_function(&red, 1, &red);
|
||||
let fit_green = autoquant::calculate_error_function(&green, 1, &green);
|
||||
let fit_blue = autoquant::calculate_error_function(&blue, 1, &blue);
|
||||
let fit_alpha = autoquant::calculate_error_function(&alpha, 1, &alpha);
|
||||
let red_error: ErrorFunction<10> = autoquant::packing::ErrorFunction::new(fit_red.as_slice());
|
||||
let green_error: ErrorFunction<10> = autoquant::packing::ErrorFunction::new(fit_green.as_slice());
|
||||
let blue_error: ErrorFunction<10> = autoquant::packing::ErrorFunction::new(fit_blue.as_slice());
|
||||
let alpha_error: ErrorFunction<10> = autoquant::packing::ErrorFunction::new(fit_alpha.as_slice());
|
||||
let merged: ErrorFunction<20> = autoquant::packing::merge_error_functions(&red_error, &green_error);
|
||||
let merged: ErrorFunction<30> = autoquant::packing::merge_error_functions(&merged, &blue_error);
|
||||
let merged: ErrorFunction<40> = autoquant::packing::merge_error_functions(&merged, &alpha_error);
|
||||
|
||||
let bin_size = 8;
|
||||
let mut distributions = [red, green, blue, alpha].into_iter();
|
||||
|
||||
let bits = &merged.bits[bin_size];
|
||||
|
||||
core::array::from_fn(|i| {
|
||||
let fit = autoquant::models::OptimizedLin::new(distributions.next().unwrap(), (1 << bits[i]) - 1);
|
||||
let parameters = fit.parameters();
|
||||
Quantization::new(parameters[0] as f32, parameters[1] as f32, bits[i] as u32)
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
// TODO: make this work with generic size parameters
|
||||
fn generate_quantization<const N: usize>(data: Vec<f64>, samples: usize, channels: usize) -> [Quantization; N] {
|
||||
let mut quantizations = Vec::new();
|
||||
let mut merged_error: Option<ErrorFunction<10>> = None;
|
||||
let bin_size = 32;
|
||||
|
||||
for i in 0..channels {
|
||||
let channel_data = create_distribution(data.clone(), samples, i);
|
||||
|
||||
let fit = autoquant::calculate_error_function(&channel_data, 0, &channel_data);
|
||||
let error: ErrorFunction<10> = autoquant::packing::ErrorFunction::new(fit.as_slice());
|
||||
|
||||
// Merge current error function with previous ones
|
||||
merged_error = match merged_error {
|
||||
Some(prev_error) => Some(autoquant::packing::merge_error_functions(&prev_error, &error)),
|
||||
None => Some(error.clone()),
|
||||
};
|
||||
|
||||
println!("Merged: {merged_error:?}");
|
||||
|
||||
let bits = merged_error.as_ref().unwrap().bits.iter().map(|x| x[i]).collect::<Vec<_>>();
|
||||
let model_fit = autoquant::models::OptimizedLin::new(channel_data, 1 << bits[bin_size]);
|
||||
let parameters = model_fit.parameters();
|
||||
let quantization = Quantization::new(parameters[0] as f32, parameters[1] as u32, bits[bin_size] as u32);
|
||||
|
||||
quantizations.push(quantization);
|
||||
}
|
||||
|
||||
core::array::from_fn(|x| quantizations[x])
|
||||
}*/
|
||||
|
||||
fn create_distribution(data: Vec<f64>, samples: usize, channel: usize) -> Vec<(f64, f64)> {
|
||||
let data: Vec<f64> = data.chunks(4 * (data.len() / (4 * samples.min(data.len() / 4)))).map(|x| x[channel]).collect();
|
||||
let max = *data.iter().max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)).unwrap();
|
||||
let data: Vec<f64> = data.iter().map(|x| x / max).collect();
|
||||
dbg!(max);
|
||||
// let data = autoquant::generate_normal_distribution(3.0, 1.1, 1000);
|
||||
// data.iter_mut().for_each(|x| *x = x.abs());
|
||||
let mut dist = autoquant::integrate_distribution(data);
|
||||
autoquant::drop_duplicates(&mut dist);
|
||||
let dist = autoquant::normalize_distribution(dist.as_slice());
|
||||
dist
|
||||
}
|
|
@ -1,16 +1,15 @@
|
|||
use crate::wasm_application_io::WasmEditorApi;
|
||||
|
||||
use dyn_any::{DynAny, StaticType};
|
||||
use dyn_any::DynAny;
|
||||
use graph_craft::imaginate_input::{ImaginateController, ImaginateMaskStartingFill, ImaginateSamplingMethod};
|
||||
use graph_craft::proto::DynFuture;
|
||||
use graphene_core::raster::bbox::{AxisAlignedBbox, Bbox};
|
||||
use graphene_core::raster::bbox::Bbox;
|
||||
use graphene_core::raster::{
|
||||
Alpha, Bitmap, BitmapMut, BlendMode, BlendNode, CellularDistanceFunction, CellularReturnType, DomainWarpType, FractalType, Image, ImageFrame, Linear, LinearChannel, Luminance, NoiseType, Pixel,
|
||||
RGBMut, RedGreenBlue, Sample,
|
||||
Alpha, Bitmap, BitmapMut, CellularDistanceFunction, CellularReturnType, DomainWarpType, FractalType, Image, ImageFrame, Linear, LinearChannel, Luminance, NoiseType, Pixel, RGBMut, RedGreenBlue,
|
||||
Sample,
|
||||
};
|
||||
use graphene_core::transform::{Footprint, Transform};
|
||||
use graphene_core::value::CopiedNode;
|
||||
use graphene_core::{AlphaBlending, Color, Node, WasmNotSend};
|
||||
use graphene_core::{AlphaBlending, Color, Node};
|
||||
|
||||
use fastnoise_lite;
|
||||
use glam::{DAffine2, DVec2, Vec2};
|
||||
|
@ -20,7 +19,6 @@ use std::collections::HashMap;
|
|||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, DynAny)]
|
||||
pub enum Error {
|
||||
|
@ -34,40 +32,9 @@ impl From<std::io::Error> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
pub trait FileSystem {
|
||||
fn open<P: AsRef<Path>>(&self, path: P) -> Result<Box<dyn std::io::Read>, Error>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StdFs;
|
||||
impl FileSystem for StdFs {
|
||||
fn open<P: AsRef<Path>>(&self, path: P) -> Result<Reader, Error> {
|
||||
Ok(Box::new(std::fs::File::open(path)?))
|
||||
}
|
||||
}
|
||||
type Reader = Box<dyn std::io::Read>;
|
||||
|
||||
pub struct FileNode<FileSystem> {
|
||||
fs: FileSystem,
|
||||
}
|
||||
#[node_macro::node_fn(FileNode)]
|
||||
fn file_node<P: AsRef<Path>, FS: FileSystem>(path: P, fs: FS) -> Result<Reader, Error> {
|
||||
fs.open(path)
|
||||
}
|
||||
|
||||
pub struct BufferNode;
|
||||
#[node_macro::node_fn(BufferNode)]
|
||||
fn buffer_node<R: std::io::Read>(reader: R) -> Result<Vec<u8>, Error> {
|
||||
Ok(std::io::Read::bytes(reader).collect::<Result<Vec<_>, _>>()?)
|
||||
}
|
||||
|
||||
pub struct SampleNode<ImageFrame> {
|
||||
image_frame: ImageFrame,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(SampleNode)]
|
||||
fn sample(footprint: Footprint, image_frame: ImageFrame<Color>) -> ImageFrame<Color> {
|
||||
// resize the image using the image crate
|
||||
#[node_macro::node(category("Debug: Raster"))]
|
||||
fn sample_image(footprint: Footprint, image_frame: ImageFrame<Color>) -> ImageFrame<Color> {
|
||||
// Resize the image using the image crate
|
||||
let image = image_frame.image;
|
||||
let data = bytemuck::cast_vec(image.data);
|
||||
|
||||
|
@ -129,7 +96,7 @@ pub struct MapImageNode<P, MapFn> {
|
|||
_p: PhantomData<P>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(MapImageNode<_P>)]
|
||||
#[node_macro::old_node_fn(MapImageNode<_P>)]
|
||||
fn map_image<MapFn, _P, Img: BitmapMut<Pixel = _P>>(image: Img, map_fn: &'input MapFn) -> Img
|
||||
where
|
||||
MapFn: for<'any_input> Node<'any_input, _P, Output = _P> + 'input,
|
||||
|
@ -148,8 +115,8 @@ pub struct InsertChannelNode<P, S, Insertion, TargetChannel> {
|
|||
_s: PhantomData<S>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(InsertChannelNode<_P, _S>)]
|
||||
fn insert_channel_node<
|
||||
#[node_macro::old_node_fn(InsertChannelNode<_P, _S>)]
|
||||
fn insert_channel<
|
||||
// _P is the color of the input image.
|
||||
_P: RGBMut,
|
||||
_S: Pixel + Luminance,
|
||||
|
@ -195,7 +162,7 @@ pub struct MaskImageNode<P, S, Stencil> {
|
|||
_s: PhantomData<S>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(MaskImageNode<_P, _S>)]
|
||||
#[node_macro::old_node_fn(MaskImageNode<_P, _S>)]
|
||||
fn mask_image<
|
||||
// _P is the color of the input image. It must have an alpha channel because that is going to
|
||||
// be modified by the mask
|
||||
|
@ -247,7 +214,7 @@ pub struct BlendImageTupleNode<P, Fg, MapFn> {
|
|||
_fg: PhantomData<Fg>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BlendImageTupleNode<_P, _Fg>)]
|
||||
#[node_macro::old_node_fn(BlendImageTupleNode<_P, _Fg>)]
|
||||
fn blend_image_tuple<_P: Alpha + Pixel + Debug, MapFn, _Fg: Sample<Pixel = _P> + Transform>(images: (ImageFrame<_P>, _Fg), map_fn: &'input MapFn) -> ImageFrame<_P>
|
||||
where
|
||||
MapFn: for<'any_input> Node<'any_input, (_P, _P), Output = _P> + 'input + Clone,
|
||||
|
@ -257,72 +224,6 @@ where
|
|||
blend_image(foreground, background, map_fn)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct BlendImageNode<P, Background, MapFn> {
|
||||
background: Background,
|
||||
map_fn: MapFn,
|
||||
_p: PhantomData<P>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BlendImageNode<_P>)]
|
||||
async fn blend_image_node<_P: Alpha + Pixel + Debug + WasmNotSend + Sync + 'static, MapFn, Forground: Sample<Pixel = _P> + Transform + Send>(
|
||||
foreground: Forground,
|
||||
background: ImageFrame<_P>,
|
||||
map_fn: &'input MapFn,
|
||||
) -> ImageFrame<_P>
|
||||
where
|
||||
for<'a> MapFn: Node<'a, (_P, _P), Output = _P> + 'input,
|
||||
{
|
||||
blend_new_image(foreground, background, map_fn)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct BlendReverseImageNode<P, Background, MapFn> {
|
||||
background: Background,
|
||||
map_fn: MapFn,
|
||||
_p: PhantomData<P>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BlendReverseImageNode<_P>)]
|
||||
fn blend_image_node<_P: Alpha + Pixel + Debug, MapFn, Background: Transform + Sample<Pixel = _P>>(foreground: ImageFrame<_P>, background: Background, map_fn: &'input MapFn) -> ImageFrame<_P>
|
||||
where
|
||||
MapFn: for<'any_input> Node<'any_input, (_P, _P), Output = _P> + 'input,
|
||||
{
|
||||
blend_new_image(background, foreground, map_fn)
|
||||
}
|
||||
|
||||
fn blend_new_image<'input, _P: Alpha + Pixel + Debug, MapFn, Frame: Sample<Pixel = _P> + Transform>(foreground: Frame, background: ImageFrame<_P>, map_fn: &'input MapFn) -> ImageFrame<_P>
|
||||
where
|
||||
MapFn: Node<'input, (_P, _P), Output = _P>,
|
||||
{
|
||||
let foreground_aabb = Bbox::unit().affine_transform(foreground.transform()).to_axis_aligned_bbox();
|
||||
let background_aabb = Bbox::unit().affine_transform(background.transform()).to_axis_aligned_bbox();
|
||||
|
||||
let Some(aabb) = foreground_aabb.union_non_empty(&background_aabb) else {
|
||||
return ImageFrame::empty();
|
||||
};
|
||||
|
||||
if background_aabb.contains(foreground_aabb.start) && background_aabb.contains(foreground_aabb.end) {
|
||||
return blend_image(foreground, background, map_fn);
|
||||
}
|
||||
|
||||
// Clamp the foreground image to the background image
|
||||
let start = aabb.start.as_uvec2();
|
||||
let end = aabb.end.as_uvec2();
|
||||
|
||||
let new_background = Image::new(end.x - start.x, end.y - start.y, _P::TRANSPARENT);
|
||||
let size = DVec2::new(new_background.width as f64, new_background.height as f64);
|
||||
let transfrom = DAffine2::from_scale_angle_translation(size, 0., start.as_dvec2());
|
||||
let mut new_background = ImageFrame {
|
||||
image: new_background,
|
||||
transform: transfrom,
|
||||
alpha_blending: background.alpha_blending,
|
||||
};
|
||||
|
||||
new_background = blend_image(background, new_background, map_fn);
|
||||
blend_image(foreground, new_background, map_fn)
|
||||
}
|
||||
|
||||
fn blend_image<'input, _P: Alpha + Pixel + Debug, MapFn, Frame: Sample<Pixel = _P> + Transform, Background: BitmapMut<Pixel = _P> + Transform + Sample<Pixel = _P>>(
|
||||
foreground: Frame,
|
||||
background: Background,
|
||||
|
@ -370,30 +271,13 @@ where
|
|||
background
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ExtendImageNode<Background> {
|
||||
background: Background,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(ExtendImageNode)]
|
||||
fn extend_image_node(foreground: ImageFrame<Color>, background: ImageFrame<Color>) -> ImageFrame<Color> {
|
||||
let foreground_aabb = Bbox::unit().affine_transform(foreground.transform()).to_axis_aligned_bbox();
|
||||
let background_aabb = Bbox::unit().affine_transform(background.transform()).to_axis_aligned_bbox();
|
||||
|
||||
if foreground_aabb.contains(background_aabb.start) && foreground_aabb.contains(background_aabb.end) {
|
||||
return foreground;
|
||||
}
|
||||
|
||||
blend_image(foreground, background, &BlendNode::new(CopiedNode::new(BlendMode::Normal), CopiedNode::new(100.)))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ExtendImageToBoundsNode<Bounds> {
|
||||
bounds: Bounds,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(ExtendImageToBoundsNode)]
|
||||
fn extend_image_to_bounds_node(image: ImageFrame<Color>, bounds: DAffine2) -> ImageFrame<Color> {
|
||||
#[node_macro::old_node_fn(ExtendImageToBoundsNode)]
|
||||
fn extend_image_to_bounds(image: ImageFrame<Color>, bounds: DAffine2) -> ImageFrame<Color> {
|
||||
let image_aabb = Bbox::unit().affine_transform(image.transform()).to_axis_aligned_bbox();
|
||||
let bounds_aabb = Bbox::unit().affine_transform(bounds.transform()).to_axis_aligned_bbox();
|
||||
if image_aabb.contains(bounds_aabb.start) && image_aabb.contains(bounds_aabb.end) {
|
||||
|
@ -401,7 +285,7 @@ fn extend_image_to_bounds_node(image: ImageFrame<Color>, bounds: DAffine2) -> Im
|
|||
}
|
||||
|
||||
if image.image.width == 0 || image.image.height == 0 {
|
||||
return EmptyImageNode::new(CopiedNode::new(Color::TRANSPARENT)).eval(bounds);
|
||||
return empty_image((), bounds, Color::TRANSPARENT);
|
||||
}
|
||||
|
||||
let orig_image_scale = DVec2::new(image.image.width as f64, image.image.height as f64);
|
||||
|
@ -433,32 +317,8 @@ fn extend_image_to_bounds_node(image: ImageFrame<Color>, bounds: DAffine2) -> Im
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct MergeBoundingBoxNode<Data> {
|
||||
_data: PhantomData<Data>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(MergeBoundingBoxNode<_Data>)]
|
||||
fn merge_bounding_box_node<_Data: Transform>(input: (Option<AxisAlignedBbox>, _Data)) -> Option<AxisAlignedBbox> {
|
||||
let (initial_aabb, data) = input;
|
||||
|
||||
let snd_aabb = Bbox::unit().affine_transform(data.transform()).to_axis_aligned_bbox();
|
||||
|
||||
if let Some(fst_aabb) = initial_aabb {
|
||||
fst_aabb.union_non_empty(&snd_aabb)
|
||||
} else {
|
||||
Some(snd_aabb)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct EmptyImageNode<P, FillColor> {
|
||||
pub color: FillColor,
|
||||
_p: PhantomData<P>,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(EmptyImageNode<_P>)]
|
||||
fn empty_image<_P: Pixel>(transform: DAffine2, color: _P) -> ImageFrame<_P> {
|
||||
#[node_macro::node(category("Debug: Raster"))]
|
||||
fn empty_image<P: Pixel>(_: (), transform: DAffine2, #[implementations(Color)] color: P) -> ImageFrame<P> {
|
||||
let width = transform.transform_vector2(DVec2::new(1., 0.)).length() as u32;
|
||||
let height = transform.transform_vector2(DVec2::new(0., 1.)).length() as u32;
|
||||
|
||||
|
@ -575,7 +435,7 @@ pub struct ImageFrameNode<P, Transform> {
|
|||
transform: Transform,
|
||||
_p: PhantomData<P>,
|
||||
}
|
||||
#[node_macro::node_fn(ImageFrameNode<_P>)]
|
||||
#[node_macro::old_node_fn(ImageFrameNode<_P>)]
|
||||
fn image_frame<_P: Pixel>(image: Image<_P>, transform: DAffine2) -> graphene_core::raster::ImageFrame<_P> {
|
||||
graphene_core::raster::ImageFrame {
|
||||
image,
|
||||
|
@ -584,42 +444,7 @@ fn image_frame<_P: Pixel>(image: Image<_P>, transform: DAffine2) -> graphene_cor
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NoisePatternNode<
|
||||
Clip,
|
||||
Seed,
|
||||
Scale,
|
||||
NoiseType,
|
||||
DomainWarpType,
|
||||
DomainWarpAmplitude,
|
||||
FractalType,
|
||||
FractalOctaves,
|
||||
FractalLacunarity,
|
||||
FractalGain,
|
||||
FractalWeightedStrength,
|
||||
FractalPingPongStrength,
|
||||
CellularDistanceFunction,
|
||||
CellularReturnType,
|
||||
CellularJitter,
|
||||
> {
|
||||
clip: Clip,
|
||||
seed: Seed,
|
||||
scale: Scale,
|
||||
noise_type: NoiseType,
|
||||
domain_warp_type: DomainWarpType,
|
||||
domain_warp_amplitude: DomainWarpAmplitude,
|
||||
fractal_type: FractalType,
|
||||
fractal_octaves: FractalOctaves,
|
||||
fractal_lacunarity: FractalLacunarity,
|
||||
fractal_gain: FractalGain,
|
||||
fractal_weighted_strength: FractalWeightedStrength,
|
||||
fractal_ping_pong_strength: FractalPingPongStrength,
|
||||
cellular_distance_function: CellularDistanceFunction,
|
||||
cellular_return_type: CellularReturnType,
|
||||
cellular_jitter: CellularJitter,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(NoisePatternNode)]
|
||||
#[node_macro::node(category("Raster: Generator"))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn noise_pattern(
|
||||
footprint: Footprint,
|
||||
|
@ -769,11 +594,8 @@ fn noise_pattern(
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct MandelbrotNode;
|
||||
|
||||
#[node_macro::node_fn(MandelbrotNode)]
|
||||
fn mandelbrot_node(footprint: Footprint) -> ImageFrame<Color> {
|
||||
#[node_macro::node(category("Raster: Generator"))]
|
||||
fn mandelbrot(footprint: Footprint) -> ImageFrame<Color> {
|
||||
let viewport_bounds = footprint.viewport_bounds_in_local_space();
|
||||
|
||||
let image_bounds = Bbox::from_transform(DAffine2::IDENTITY).to_axis_aligned_bbox();
|
||||
|
@ -801,7 +623,7 @@ fn mandelbrot_node(footprint: Footprint) -> ImageFrame<Color> {
|
|||
let pos = Vec2::new(x as f32, y as f32);
|
||||
let c = pos * scale + coordinate_offset;
|
||||
|
||||
let iter = mandelbrot(c, max_iter);
|
||||
let iter = mandelbrot_impl(c, max_iter);
|
||||
data.push(map_color(iter, max_iter));
|
||||
}
|
||||
}
|
||||
|
@ -818,7 +640,7 @@ fn mandelbrot_node(footprint: Footprint) -> ImageFrame<Color> {
|
|||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mandelbrot(c: Vec2, max_iter: usize) -> usize {
|
||||
fn mandelbrot_impl(c: Vec2, max_iter: usize) -> usize {
|
||||
let mut z = Vec2::new(0.0, 0.0);
|
||||
for i in 0..max_iter {
|
||||
z = Vec2::new(z.x * z.x - z.y * z.y, 2.0 * z.x * z.y) + c;
|
||||
|
@ -833,21 +655,3 @@ fn map_color(iter: usize, max_iter: usize) -> Color {
|
|||
let v = iter as f32 / max_iter as f32;
|
||||
Color::from_rgbaf32_unchecked(v, v, v, 1.)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
#[test]
|
||||
fn load_image() {
|
||||
// TODO: reenable this test
|
||||
/*
|
||||
let image = image_node::<&str>();
|
||||
|
||||
let grayscale_picture = image.then(MapResultNode::new(&image));
|
||||
let export = export_image_node();
|
||||
|
||||
let picture = grayscale_picture.eval("test-image-1.png").expect("Failed to load image");
|
||||
export.eval((picture, "test-image-1-result.png")).unwrap();
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
|
9
node-graph/gstd/src/text.rs
Normal file
9
node-graph/gstd/src/text.rs
Normal file
|
@ -0,0 +1,9 @@
|
|||
use graph_craft::wasm_application_io::WasmEditorApi;
|
||||
|
||||
pub use graphene_core::text::{bounding_box, load_face, to_path, Font, FontCache};
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
fn text<'i: 'n>(_: (), editor: &'i WasmEditorApi, text: String, font_name: Font, #[default(24)] font_size: f64) -> crate::vector::VectorData {
|
||||
let buzz_face = editor.font_cache.get(&font_name).map(|data| load_face(data));
|
||||
crate::vector::VectorData::from_subpaths(to_path(&text, buzz_face, font_size, None), false)
|
||||
}
|
|
@ -14,8 +14,8 @@ pub struct BooleanOperationNode<BooleanOp> {
|
|||
operation: BooleanOp,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(BooleanOperationNode)]
|
||||
fn boolean_operation_node(group_of_paths: GraphicGroup, operation: BooleanOperation) -> VectorData {
|
||||
#[node_macro::old_node_fn(BooleanOperationNode)]
|
||||
fn boolean_operation(group_of_paths: GraphicGroup, operation: BooleanOperation) -> VectorData {
|
||||
fn vector_from_image<T: Transform>(image_frame: T) -> VectorData {
|
||||
let corner1 = DVec2::ZERO;
|
||||
let corner2 = DVec2::new(1., 1.);
|
||||
|
|
|
@ -11,7 +11,8 @@ use graphene_core::raster::ImageFrame;
|
|||
use graphene_core::renderer::RenderMetadata;
|
||||
use graphene_core::renderer::{format_transform_matrix, GraphicElementRendered, ImageRenderMode, RenderParams, RenderSvgSegmentList, SvgRender};
|
||||
use graphene_core::transform::Footprint;
|
||||
use graphene_core::Node;
|
||||
use graphene_core::vector::VectorData;
|
||||
use graphene_core::GraphicGroup;
|
||||
use graphene_core::{Color, WasmNotSend};
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
|
@ -27,24 +28,14 @@ use wasm_bindgen::JsCast;
|
|||
#[cfg(target_arch = "wasm32")]
|
||||
use web_sys::{CanvasRenderingContext2d, HtmlCanvasElement};
|
||||
|
||||
pub struct CreateSurfaceNode {}
|
||||
|
||||
#[node_macro::node_fn(CreateSurfaceNode)]
|
||||
async fn create_surface_node<'a: 'input>(editor: &'a WasmEditorApi) -> Arc<WasmSurfaceHandle> {
|
||||
#[node_macro::node(category("Debug: GPU"))]
|
||||
async fn create_surface<'a: 'n>(_: (), editor: &'a WasmEditorApi) -> Arc<WasmSurfaceHandle> {
|
||||
Arc::new(editor.application_io.as_ref().unwrap().create_window())
|
||||
}
|
||||
|
||||
#[node_macro::node(category("Debug: GPU"))]
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub struct DrawImageFrameNode<Surface> {
|
||||
surface_handle: Surface,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(DrawImageFrameNode)]
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
async fn draw_image_frame_node<'a: 'input>(
|
||||
image: ImageFrame<graphene_core::raster::SRGBA8>,
|
||||
surface_handle: Arc<WasmSurfaceHandle>,
|
||||
) -> graphene_core::application_io::SurfaceHandleFrame<HtmlCanvasElement> {
|
||||
async fn draw_image_frame(_: (), image: ImageFrame<graphene_core::raster::SRGBA8>, surface_handle: Arc<WasmSurfaceHandle>) -> graphene_core::application_io::SurfaceHandleFrame<HtmlCanvasElement> {
|
||||
let image_data = image.image.data;
|
||||
let array: Clamped<&[u8]> = Clamped(bytemuck::cast_slice(image_data.as_slice()));
|
||||
if image.image.width > 0 && image.image.height > 0 {
|
||||
|
@ -62,19 +53,13 @@ async fn draw_image_frame_node<'a: 'input>(
|
|||
}
|
||||
}
|
||||
|
||||
pub struct LoadResourceNode<Url> {
|
||||
url: Url,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(LoadResourceNode)]
|
||||
async fn load_resource_node<'a: 'input>(editor: &'a WasmEditorApi, url: String) -> Arc<[u8]> {
|
||||
#[node_macro::node(category("Network"))]
|
||||
async fn load_resource<'a: 'n>(_: (), _primary: (), #[scope("editor-api")] editor: &'a WasmEditorApi, url: String) -> Arc<[u8]> {
|
||||
editor.application_io.as_ref().unwrap().load_resource(url).unwrap().await.unwrap()
|
||||
}
|
||||
|
||||
pub struct DecodeImageNode;
|
||||
|
||||
#[node_macro::node_fn(DecodeImageNode)]
|
||||
fn decode_image_node<'a: 'input>(data: Arc<[u8]>) -> ImageFrame<Color> {
|
||||
#[node_macro::node(category("Raster"))]
|
||||
fn decode_image(_: (), data: Arc<[u8]>) -> ImageFrame<Color> {
|
||||
let image = image::load_from_memory(data.as_ref()).expect("Failed to decode image");
|
||||
let image = image.to_rgba32f();
|
||||
let image = ImageFrame {
|
||||
|
@ -144,25 +129,21 @@ async fn render_canvas(render_config: RenderConfig, data: impl GraphicElementRen
|
|||
RenderOutputType::CanvasFrame(frame)
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub struct RasterizeNode<Footprint, Surface> {
|
||||
footprint: Footprint,
|
||||
surface_handle: Surface,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(RasterizeNode)]
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
async fn rasterize<_T: GraphicElementRendered + graphene_core::transform::TransformMut + WasmNotSend>(
|
||||
mut data: _T,
|
||||
async fn rasterize<T: GraphicElementRendered + graphene_core::transform::TransformMut + WasmNotSend + 'n>(
|
||||
_: (),
|
||||
#[implementations((Footprint, VectorData), (Footprint, ImageFrame<Color>), (Footprint, GraphicGroup))] data: impl Node<Footprint, Output = T>,
|
||||
footprint: Footprint,
|
||||
surface_handle: Arc<SurfaceHandle<HtmlCanvasElement>>,
|
||||
) -> ImageFrame<Color> {
|
||||
let mut render = SvgRender::new();
|
||||
|
||||
if footprint.transform.matrix2.determinant() == 0. {
|
||||
log::trace!("Invalid footprint received for rasterization");
|
||||
return ImageFrame::default();
|
||||
}
|
||||
|
||||
let mut data = data.eval(footprint).await;
|
||||
let mut render = SvgRender::new();
|
||||
let aabb = Bbox::from_transform(footprint.transform).to_axis_aligned_bbox();
|
||||
let size = aabb.size();
|
||||
let resolution = footprint.resolution;
|
||||
|
@ -204,16 +185,23 @@ async fn rasterize<_T: GraphicElementRendered + graphene_core::transform::Transf
|
|||
}
|
||||
}
|
||||
|
||||
pub struct RenderNode<EditorApi, Data, Surface> {
|
||||
editor_api: EditorApi,
|
||||
data: Data,
|
||||
_surface_handle: Surface,
|
||||
}
|
||||
|
||||
#[node_macro::node_fn(RenderNode)]
|
||||
async fn render_node<'a: 'input, T: 'input + GraphicElementRendered + WasmNotSend>(
|
||||
#[node_macro::node(category(""))]
|
||||
async fn render<'a: 'n, T: 'n + GraphicElementRendered + WasmNotSend>(
|
||||
render_config: RenderConfig,
|
||||
editor_api: &'a WasmEditorApi,
|
||||
#[implementations(
|
||||
(Footprint, VectorData),
|
||||
(Footprint, ImageFrame<Color>),
|
||||
(Footprint, GraphicGroup),
|
||||
(Footprint, graphene_core::Artboard),
|
||||
(Footprint, graphene_core::ArtboardGroup),
|
||||
(Footprint, Option<Color>),
|
||||
(Footprint, Vec<Color>),
|
||||
(Footprint, bool),
|
||||
(Footprint, f32),
|
||||
(Footprint, f64),
|
||||
(Footprint, String),
|
||||
)]
|
||||
data: impl Node<Footprint, Output = T>,
|
||||
_surface_handle: impl Node<(), Output = Option<wgpu_executor::WgpuSurface>>,
|
||||
) -> RenderOutput {
|
||||
|
@ -222,9 +210,9 @@ async fn render_node<'a: 'input, T: 'input + GraphicElementRendered + WasmNotSen
|
|||
let RenderConfig { hide_artboards, for_export, .. } = render_config;
|
||||
let render_params = RenderParams::new(render_config.view_mode, ImageRenderMode::Base64, None, false, hide_artboards, for_export);
|
||||
|
||||
let data = self.data.eval(footprint).await;
|
||||
let data = data.eval(footprint).await;
|
||||
#[cfg(all(feature = "vello", target_arch = "wasm32"))]
|
||||
let surface_handle = self._surface_handle.eval(()).await;
|
||||
let surface_handle = _surface_handle.eval(()).await;
|
||||
let use_vello = editor_api.editor_preferences.use_vello();
|
||||
#[cfg(all(feature = "vello", target_arch = "wasm32"))]
|
||||
let use_vello = use_vello && surface_handle.is_some();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue