Make the dynamic node graph execution asynchronous (#1218)

* Make node graph execution async

Make node macro generate async node implementations

Start propagating async through the node system

Async checkpoint

Make Any<'i> Send + Sync

Determine node io type using panic node

Fix types for raster_node macro

Finish porting node registry?

Fix lifetime errors

Remove Send + Sync requirements and start making node construction async

Async MVP

Fix tests

Clippy fix

* Fix nodes

* Simplify lifetims for node macro + make node macro more modular

* Reenable more nodes

* Fix pasting images

* Remove http test from brush node

* Fix output type for cache node

* Fix types for let scope

* Fix formatting
This commit is contained in:
Dennis Kobert 2023-05-27 11:48:57 +02:00 committed by Keavon Chambers
parent 5c7211cb30
commit 4bd9fbd073
40 changed files with 834 additions and 471 deletions

View file

@ -168,7 +168,7 @@ pub struct UniformNode<Executor> {
}
#[node_macro::node_fn(UniformNode)]
fn uniform_node<T: ToUniformBuffer, E: GpuExecutor>(data: T, executor: &'any_input E) -> ShaderInput<E::BufferHandle> {
fn uniform_node<T: ToUniformBuffer, E: GpuExecutor>(data: T, executor: &'input E) -> ShaderInput<E::BufferHandle> {
executor.create_uniform_buffer(data).unwrap()
}
@ -177,7 +177,7 @@ pub struct StorageNode<Executor> {
}
#[node_macro::node_fn(StorageNode)]
fn storage_node<T: ToStorageBuffer, E: GpuExecutor>(data: T, executor: &'any_input E) -> ShaderInput<E::BufferHandle> {
fn storage_node<T: ToStorageBuffer, E: GpuExecutor>(data: T, executor: &'input E) -> ShaderInput<E::BufferHandle> {
executor
.create_storage_buffer(
data,
@ -205,7 +205,7 @@ pub struct CreateOutputBufferNode<Executor, Ty> {
}
#[node_macro::node_fn(CreateOutputBufferNode)]
fn create_output_buffer_node<E: GpuExecutor>(size: usize, executor: &'any_input E, ty: Type) -> ShaderInput<E::BufferHandle> {
fn create_output_buffer_node<E: GpuExecutor>(size: usize, executor: &'input E, ty: Type) -> ShaderInput<E::BufferHandle> {
executor.create_output_buffer(size, ty, true).unwrap()
}
@ -216,7 +216,7 @@ pub struct CreateComputePassNode<Executor, Output, Instances> {
}
#[node_macro::node_fn(CreateComputePassNode)]
fn create_compute_pass_node<E: GpuExecutor>(layout: PipelineLayout<E>, executor: &'any_input E, output: ShaderInput<E::BufferHandle>, instances: u32) -> E::CommandBuffer {
fn create_compute_pass_node<E: GpuExecutor>(layout: PipelineLayout<E>, executor: &'input E, output: ShaderInput<E::BufferHandle>, instances: u32) -> E::CommandBuffer {
executor.create_compute_pass(&layout, Some(output), instances).unwrap()
}
@ -242,7 +242,7 @@ pub struct ExecuteComputePipelineNode<Executor> {
}
#[node_macro::node_fn(ExecuteComputePipelineNode)]
fn execute_compute_pipeline_node<E: GpuExecutor>(encoder: E::CommandBuffer, executor: &'any_input mut E) {
fn execute_compute_pipeline_node<E: GpuExecutor>(encoder: E::CommandBuffer, executor: &'input mut E) {
executor.execute_compute_pipeline(encoder).unwrap();
}
@ -251,6 +251,6 @@ fn execute_compute_pipeline_node<E: GpuExecutor>(encoder: E::CommandBuffer, exec
// executor: Executor,
// }
// #[node_macro::node_fn(ReadOutputBufferNode)]
// fn read_output_buffer_node<E: GpuExecutor>(buffer: E::BufferHandle, executor: &'any_input mut E) -> Vec<u8> {
// fn read_output_buffer_node<E: GpuExecutor>(buffer: E::BufferHandle, executor: &'input mut E) -> Vec<u8> {
// executor.read_output_buffer(buffer).await.unwrap()
// }