Fix the 'Upload Texture' node (#2680)

* Fix upload texture node

* Feature gate gpu node implemenations
This commit is contained in:
Dennis Kobert 2025-05-29 13:35:35 +02:00 committed by GitHub
parent 4d2e1d57fd
commit 76ecdc8f1b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 26 additions and 13 deletions

View file

@ -1781,7 +1781,7 @@ fn static_nodes() -> Vec<DocumentNodeDefinition> {
nodes: [
DocumentNode {
inputs: vec![NodeInput::scope("editor-api")],
implementation: DocumentNodeImplementation::ProtoNode(ProtoNodeIdentifier::new("graphene_core::ops::IntoNode")),
implementation: DocumentNodeImplementation::ProtoNode(ProtoNodeIdentifier::new("graphene_core::ops::IntoNode<&WgpuExecutor>")),
..Default::default()
},
DocumentNode {

View file

@ -925,7 +925,7 @@ impl GraphicElementRendered for RasterFrame {
fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) {
match self {
RasterFrame::ImageFrame(image) => image.render_svg(render, render_params),
RasterFrame::TextureFrame(_) => unimplemented!(),
RasterFrame::TextureFrame(_) => log::warn!("tried to render texture as an svg"),
}
}

View file

@ -13,7 +13,7 @@ use graphene_core::{fn_type_fut, future};
use graphene_std::Context;
use graphene_std::GraphicElement;
use graphene_std::any::{ComposeTypeErased, DowncastBothNode, DynAnyNode, IntoTypeErasedNode};
use graphene_std::application_io::ImageTexture;
use graphene_std::application_io::{ImageTexture, TextureFrameTable};
use graphene_std::wasm_application_io::*;
use node_registry_macros::{async_node, into_node};
use once_cell::sync::Lazy;
@ -21,12 +21,11 @@ use std::collections::HashMap;
use std::sync::Arc;
#[cfg(feature = "gpu")]
use wgpu_executor::ShaderInputFrame;
use wgpu_executor::{WgpuSurface, WindowHandle};
use wgpu_executor::{WgpuExecutor, WgpuSurface, WindowHandle};
// TODO: turn into hashmap
fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeConstructor>> {
let node_types: Vec<(ProtoNodeIdentifier, NodeConstructor, NodeIOTypes)> = vec![
into_node!(from: f64, to: f64),
into_node!(from: f64, to: f64),
into_node!(from: u32, to: f64),
into_node!(from: u8, to: u32),
@ -113,6 +112,12 @@ fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeCons
#[cfg(feature = "gpu")]
async_node!(graphene_core::memo::ImpureMemoNode<_, _, _>, input: Context, fn_params: [Context => ShaderInputFrame]),
#[cfg(feature = "gpu")]
async_node!(graphene_core::memo::ImpureMemoNode<_, _, _>, input: Context, fn_params: [Context => TextureFrameTable]),
#[cfg(feature = "gpu")]
async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => TextureFrameTable]),
#[cfg(feature = "gpu")]
into_node!(from: &WasmEditorApi, to: &WgpuExecutor),
#[cfg(feature = "gpu")]
(
ProtoNodeIdentifier::new(stringify!(wgpu_executor::CreateGpuSurfaceNode<_>)),
|args| {

View file

@ -8,7 +8,7 @@ pub use executor::GpuExecutor;
use futures::Future;
use glam::{DAffine2, UVec2};
use gpu_executor::{ComputePassDimensions, GPUConstant, StorageBufferOptions, TextureBufferOptions, TextureBufferType, ToStorageBuffer, ToUniformBuffer};
use graphene_core::application_io::{ApplicationIo, EditorApi, ImageTexture, SurfaceHandle};
use graphene_core::application_io::{ApplicationIo, EditorApi, ImageTexture, SurfaceHandle, TextureFrameTable};
use graphene_core::raster::image::ImageFrameTable;
use graphene_core::raster::{Image, SRGBA8};
use graphene_core::transform::{Footprint, Transform};
@ -910,14 +910,14 @@ async fn render_texture<'a: 'n>(
}
#[node_macro::node(category(""))]
async fn upload_texture<'a: 'n>(_: impl ExtractFootprint + Ctx, input: ImageFrameTable<Color>, executor: &'a WgpuExecutor) -> ImageTexture {
async fn upload_texture<'a: 'n>(_: impl ExtractFootprint + Ctx, input: ImageFrameTable<Color>, executor: &'a WgpuExecutor) -> TextureFrameTable {
// let new_data: Vec<RGBA16F> = input.image.data.into_iter().map(|c| c.into()).collect();
let input = input.one_instance_ref().instance;
let new_data: Vec<SRGBA8> = input.data.iter().map(|x| (*x).into()).collect();
let image = input.one_instance_ref().instance;
let new_data: Vec<SRGBA8> = image.data.iter().map(|x| (*x).into()).collect();
let new_image = Image {
width: input.width,
height: input.height,
width: image.width,
height: image.height,
data: new_data,
base64_string: None,
};
@ -929,10 +929,18 @@ async fn upload_texture<'a: 'n>(_: impl ExtractFootprint + Ctx, input: ImageFram
_ => unreachable!("Unsupported ShaderInput type"),
};
ImageTexture {
let texture = ImageTexture {
texture: texture.into(),
// TODO: Find an alternate way to encode the transform and alpha_blend now that these fields have been moved up out of ImageTexture
// transform: input.transform,
// alpha_blend: Default::default(),
}
};
let mut result_table = TextureFrameTable::empty();
result_table.push(graphene_core::instances::Instance {
instance: texture,
transform: input.transform(),
alpha_blending: *input.one_instance_ref().alpha_blending,
source_node_id: *input.one_instance_ref().source_node_id,
});
result_table
}