mirror of
https://github.com/GraphiteEditor/Graphite.git
synced 2025-07-07 15:55:00 +00:00
Bulk remove old code for legacy GPU node implementations (#2722)
* fix warning in node-macro * remove crates `gpu-executor`, `gpu-compiler`, `compilation-client` and `compilation-server` * remove `wgpu-executor::executor` * .gitignore .idea/
This commit is contained in:
parent
d721bca85f
commit
3489f9ddb1
33 changed files with 11 additions and 7380 deletions
|
@ -1,19 +0,0 @@
|
|||
[package]
|
||||
name = "compilation-client"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
graph-craft = { path = "../graph-craft", features = ["serde"] }
|
||||
gpu-executor = { path = "../gpu-executor" }
|
||||
wgpu-executor = { path = "../wgpu-executor" }
|
||||
gpu-compiler-bin-wrapper = { path = "../gpu-compiler/gpu-compiler-bin-wrapper" }
|
||||
|
||||
# Workspace dependencies
|
||||
graphene-core = { workspace = true }
|
||||
dyn-any = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["blocking", "json", "rustls-tls"] }
|
|
@ -1,28 +0,0 @@
|
|||
use gpu_compiler_bin_wrapper::CompileRequest;
|
||||
use graph_craft::Type;
|
||||
use graph_craft::proto::ProtoNetwork;
|
||||
use wgpu_executor::ShaderIO;
|
||||
|
||||
pub async fn compile(networks: Vec<ProtoNetwork>, inputs: Vec<Type>, outputs: Vec<Type>, io: ShaderIO) -> Result<Shader, reqwest::Error> {
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let compile_request = CompileRequest::new(networks, inputs.clone(), outputs.clone(), io.clone());
|
||||
let response = client.post("http://localhost:3000/compile/spirv").json(&compile_request).send();
|
||||
let response = response.await?;
|
||||
response.bytes().await.map(|b| Shader {
|
||||
spirv_binary: b.chunks(4).map(|x| u32::from_le_bytes(x.try_into().unwrap())).collect(),
|
||||
input_types: inputs,
|
||||
output_types: outputs,
|
||||
io,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: should we add the entry point as a field?
|
||||
/// A compiled shader with type annotations.
|
||||
#[derive(dyn_any::DynAny)]
|
||||
pub struct Shader {
|
||||
pub spirv_binary: Vec<u32>,
|
||||
pub input_types: Vec<Type>,
|
||||
pub output_types: Vec<Type>,
|
||||
pub io: ShaderIO,
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
use gpu_compiler_bin_wrapper::CompileRequest;
|
||||
use graph_craft::concrete;
|
||||
use graph_craft::document::value::TaggedValue;
|
||||
use graph_craft::document::*;
|
||||
use graphene_core::Color;
|
||||
use graphene_core::raster::adjustments::BlendMode;
|
||||
use std::time::Duration;
|
||||
use wgpu_executor::{ShaderIO, ShaderInput};
|
||||
|
||||
fn main() {
|
||||
let client = reqwest::blocking::Client::new();
|
||||
|
||||
let network = add_network();
|
||||
let compiler = graph_craft::graphene_compiler::Compiler {};
|
||||
let proto_network = compiler.compile_single(network).unwrap();
|
||||
|
||||
let io = ShaderIO {
|
||||
inputs: vec![
|
||||
ShaderInput::StorageBuffer((), concrete!(Color)), // background image
|
||||
ShaderInput::StorageBuffer((), concrete!(Color)), // foreground image
|
||||
ShaderInput::StorageBuffer((), concrete!(u32)), // width/height of the foreground image
|
||||
ShaderInput::OutputBuffer((), concrete!(Color)),
|
||||
],
|
||||
output: ShaderInput::OutputBuffer((), concrete!(Color)),
|
||||
};
|
||||
|
||||
let compile_request = CompileRequest::new(vec![proto_network], vec![concrete!(Color), concrete!(Color), concrete!(u32)], vec![concrete!(Color)], io);
|
||||
let response = client
|
||||
.post("http://localhost:3000/compile/spirv")
|
||||
.timeout(Duration::from_secs(30))
|
||||
.json(&compile_request)
|
||||
.send()
|
||||
.unwrap();
|
||||
println!("response: {response:?}");
|
||||
}
|
||||
|
||||
fn add_network() -> NodeNetwork {
|
||||
NodeNetwork {
|
||||
exports: vec![NodeInput::node(NodeId(0), 0)],
|
||||
nodes: [DocumentNode {
|
||||
inputs: vec![NodeInput::Inline(InlineRust::new(
|
||||
format!(
|
||||
r#"graphene_core::raster::adjustments::BlendNode::new(
|
||||
graphene_core::value::CopiedNode::new({}),
|
||||
graphene_core::value::CopiedNode::new({}),
|
||||
).eval((
|
||||
i1[_global_index.x as usize],
|
||||
if _global_index.x < i2[2] {{
|
||||
i0[_global_index.x as usize]
|
||||
}} else {{
|
||||
Color::from_rgbaf32_unchecked(0., 0., 0., 0.)
|
||||
}},
|
||||
))"#,
|
||||
TaggedValue::BlendMode(BlendMode::Normal).to_primitive_string(),
|
||||
TaggedValue::F64(1.).to_primitive_string(),
|
||||
),
|
||||
concrete![Color],
|
||||
))],
|
||||
implementation: DocumentNodeImplementation::ProtoNode("graphene_core::value::CopiedNode".into()),
|
||||
..Default::default()
|
||||
}]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(id, node)| (NodeId(id as u64), node))
|
||||
.collect(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
[package]
|
||||
name = "compilation-server"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
graph-craft = { path = "../graph-craft", features = ["serde"] }
|
||||
gpu-compiler-bin-wrapper = { path = "../gpu-compiler/gpu-compiler-bin-wrapper" }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
axum = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
tempfile = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
# Required dependencies
|
||||
tower-http = { version = "0.6", features = ["cors"] }
|
|
@ -1,48 +0,0 @@
|
|||
use axum::Router;
|
||||
use axum::extract::{Json, State};
|
||||
use axum::http::StatusCode;
|
||||
use axum::routing::{get, post};
|
||||
use gpu_compiler_bin_wrapper::CompileRequest;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use tower_http::cors::CorsLayer;
|
||||
|
||||
struct AppState {
|
||||
compile_dir: tempfile::TempDir,
|
||||
cache: RwLock<HashMap<CompileRequest, Result<Vec<u8>, StatusCode>>>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let shared_state = Arc::new(AppState {
|
||||
compile_dir: tempfile::tempdir().expect("failed to create tempdir"),
|
||||
cache: Default::default(),
|
||||
});
|
||||
|
||||
// build our application with a single route
|
||||
let app = Router::new()
|
||||
.route("/", get(|| async { "Hello from compilation server!" }))
|
||||
.route("/compile", get(|| async { "Supported targets: spirv" }))
|
||||
.route("/compile/spirv", post(post_compile_spirv))
|
||||
.layer(CorsLayer::permissive())
|
||||
.with_state(shared_state);
|
||||
|
||||
// run it with hyper on localhost:3000
|
||||
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
}
|
||||
|
||||
async fn post_compile_spirv(State(state): State<Arc<AppState>>, Json(compile_request): Json<CompileRequest>) -> Result<Vec<u8>, StatusCode> {
|
||||
if let Some(result) = state.cache.read().unwrap().get(&compile_request) {
|
||||
return result.clone();
|
||||
}
|
||||
|
||||
let path = std::env::var("CARGO_MANIFEST_DIR").unwrap() + "/../gpu-compiler/Cargo.toml";
|
||||
let result = compile_request.compile(state.compile_dir.path().to_str().expect("non utf8 tempdir path"), &path).map_err(|e| {
|
||||
eprintln!("compilation failed: {e}");
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
state.cache.write().unwrap().insert(compile_request, Ok(result.clone()));
|
||||
Ok(result)
|
||||
}
|
4866
node-graph/gpu-compiler/Cargo.lock
generated
4866
node-graph/gpu-compiler/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -1,40 +0,0 @@
|
|||
[package]
|
||||
name = "gpu-compiler"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
profiling = ["nvtx"]
|
||||
serde = ["graphene-core/serde", "glam/serde"]
|
||||
|
||||
# NOTE: We can't use workspace dependencies in this crate because it uses a different toolchain
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
graph-craft = { path = "../graph-craft", features = ["serde"] }
|
||||
gpu-executor = { path = "../gpu-executor" }
|
||||
graphene-core = { path = "../gcore", features = ["std", "alloc"] }
|
||||
dyn-any = { path = "../../libraries/dyn-any", features = [
|
||||
"log-bad-types",
|
||||
"rc",
|
||||
"glam",
|
||||
] }
|
||||
|
||||
# Required dependencies
|
||||
num-traits = { version = "0.2", default-features = false, features = ["i128"] }
|
||||
log = "0.4"
|
||||
serde = { version = "1.0", features = ["derive", "rc"] }
|
||||
glam = { version = "0.29", default-features = false, features = ["serde"] }
|
||||
base64 = "0.22"
|
||||
bytemuck = { version = "1.13", features = ["derive"] }
|
||||
tempfile = "3.6"
|
||||
anyhow = "1.0"
|
||||
serde_json = "1.0"
|
||||
tera = { version = "1.17.1" }
|
||||
spirv-builder = { version = "0.9", default-features = false, features = [
|
||||
"use-installed-tools",
|
||||
] }
|
||||
|
||||
# Optional dependencies
|
||||
nvtx = { version = "1.3", optional = true }
|
|
@ -1,21 +0,0 @@
|
|||
[package]
|
||||
name = "gpu-compiler-bin-wrapper"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
profiling = []
|
||||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
graph-craft = { path = "../../graph-craft", features = ["serde", "wgpu"] }
|
||||
gpu-executor = { path = "../../gpu-executor" }
|
||||
wgpu-executor = { path = "../../wgpu-executor" }
|
||||
|
||||
# Workspace dependencies
|
||||
log = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
|
@ -1,68 +0,0 @@
|
|||
use graph_craft::Type;
|
||||
use graph_craft::proto::ProtoNetwork;
|
||||
use std::io::Write;
|
||||
use wgpu_executor::ShaderIO;
|
||||
|
||||
pub fn compile_spirv(request: &CompileRequest, compile_dir: Option<&str>, manifest_path: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let serialized_graph = serde_json::to_string(&graph_craft::graphene_compiler::CompileRequest {
|
||||
networks: request.networks.clone(),
|
||||
io: request.shader_io.clone(),
|
||||
})?;
|
||||
|
||||
#[cfg(not(feature = "profiling"))]
|
||||
let features = "";
|
||||
#[cfg(feature = "profiling")]
|
||||
let features = "profiling";
|
||||
|
||||
println!("calling cargo run!");
|
||||
let non_cargo_env_vars = std::env::vars().filter(|(k, _)| k.starts_with("PATH")).collect::<Vec<_>>();
|
||||
let mut cargo_command = std::process::Command::new("cargo")
|
||||
.arg("run")
|
||||
.arg("--release")
|
||||
.arg("--manifest-path")
|
||||
.arg(manifest_path)
|
||||
.current_dir(manifest_path.replace("Cargo.toml", ""))
|
||||
.env_clear()
|
||||
.envs(non_cargo_env_vars)
|
||||
.arg("--features")
|
||||
.arg(features)
|
||||
// TODO: handle None case properly
|
||||
.arg(compile_dir.unwrap())
|
||||
.stdin(std::process::Stdio::piped())
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
cargo_command.stdin.as_mut().unwrap().write_all(serialized_graph.as_bytes())?;
|
||||
let output = cargo_command.wait_with_output()?;
|
||||
if !output.status.success() {
|
||||
return Err(anyhow::anyhow!("cargo failed: {}", String::from_utf8_lossy(&output.stderr)));
|
||||
}
|
||||
Ok(std::fs::read(compile_dir.unwrap().to_owned() + "/shader.spv")?)
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Hash, Eq)]
|
||||
pub struct CompileRequest {
|
||||
networks: Vec<graph_craft::proto::ProtoNetwork>,
|
||||
input_types: Vec<Type>,
|
||||
output_types: Vec<Type>,
|
||||
shader_io: ShaderIO,
|
||||
}
|
||||
|
||||
impl CompileRequest {
|
||||
pub fn new(networks: Vec<ProtoNetwork>, input_types: Vec<Type>, output_types: Vec<Type>, io: ShaderIO) -> Self {
|
||||
// TODO: add type checking
|
||||
// for (input, buffer) in input_types.iter().zip(io.inputs.iter()) {
|
||||
// assert_eq!(input, &buffer.ty());
|
||||
// }
|
||||
// assert_eq!(output_type, io.output.ty());
|
||||
Self {
|
||||
networks,
|
||||
input_types,
|
||||
output_types,
|
||||
shader_io: io,
|
||||
}
|
||||
}
|
||||
pub fn compile(&self, compile_dir: &str, manifest_path: &str) -> anyhow::Result<Vec<u8>> {
|
||||
compile_spirv(self, Some(compile_dir), manifest_path)
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
[toolchain]
|
||||
channel = "nightly-2023-05-27"
|
||||
components = [
|
||||
"rust-src",
|
||||
"rustc-dev",
|
||||
"llvm-tools-preview",
|
||||
"clippy",
|
||||
"rustfmt",
|
||||
"rustc",
|
||||
]
|
|
@ -1,259 +0,0 @@
|
|||
use gpu_executor::{GPUConstant, ShaderIO, ShaderInput, SpirVCompiler};
|
||||
use graph_craft::proto::*;
|
||||
use graphene_core::Cow;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tera::Context;
|
||||
|
||||
fn create_cargo_toml(metadata: &Metadata) -> Result<String, tera::Error> {
|
||||
let mut tera = tera::Tera::default();
|
||||
tera.add_raw_template("cargo_toml", include_str!("templates/Cargo-template.toml"))?;
|
||||
let mut context = Context::new();
|
||||
context.insert("name", &metadata.name);
|
||||
context.insert("authors", &metadata.authors);
|
||||
context.insert("gcore_path", &format!("{}{}", env!("CARGO_MANIFEST_DIR"), "/../gcore"));
|
||||
tera.render("cargo_toml", &context)
|
||||
}
|
||||
|
||||
pub struct Metadata {
|
||||
name: String,
|
||||
authors: Vec<String>,
|
||||
}
|
||||
|
||||
impl Metadata {
|
||||
pub fn new(name: String, authors: Vec<String>) -> Self {
|
||||
Self { name, authors }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_files(metadata: &Metadata, networks: &[ProtoNetwork], compile_dir: &Path, io: &ShaderIO) -> anyhow::Result<()> {
|
||||
let src = compile_dir.join("src");
|
||||
let cargo_file = compile_dir.join("Cargo.toml");
|
||||
let cargo_toml = create_cargo_toml(metadata)?;
|
||||
std::fs::write(cargo_file, cargo_toml)?;
|
||||
|
||||
let toolchain_file = compile_dir.join("rust-toolchain.toml");
|
||||
let toolchain = include_str!("templates/rust-toolchain.toml");
|
||||
std::fs::write(toolchain_file, toolchain)?;
|
||||
|
||||
// create src dir
|
||||
match std::fs::create_dir(&src) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
if e.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
let lib = src.join("lib.rs");
|
||||
let shader = serialize_gpu(networks, io)?;
|
||||
eprintln!("{shader}");
|
||||
std::fs::write(lib, shader)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn constant_attribute(constant: &GPUConstant) -> &'static str {
|
||||
match constant {
|
||||
GPUConstant::SubGroupId => "subgroup_id",
|
||||
GPUConstant::SubGroupInvocationId => "subgroup_local_invocation_id",
|
||||
GPUConstant::SubGroupSize => todo!(),
|
||||
GPUConstant::NumSubGroups => "num_subgroups",
|
||||
GPUConstant::WorkGroupId => "workgroup_id",
|
||||
GPUConstant::WorkGroupInvocationId => "local_invocation_id",
|
||||
GPUConstant::WorkGroupSize => todo!(),
|
||||
GPUConstant::NumWorkGroups => "num_workgroups",
|
||||
GPUConstant::GlobalInvocationId => "global_invocation_id",
|
||||
GPUConstant::GlobalSize => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn construct_argument<T: gpu_executor::GpuExecutor>(input: &ShaderInput<T>, position: u32, binding_offset: u32) -> String {
|
||||
let line = match input {
|
||||
ShaderInput::Constant(constant) => format!("#[spirv({})] i{}: {}", constant_attribute(constant), position, constant.ty()),
|
||||
ShaderInput::UniformBuffer(_, ty) => {
|
||||
format!("#[spirv(uniform, descriptor_set = 0, binding = {})] i{}: &{}", position + binding_offset, position, ty,)
|
||||
}
|
||||
ShaderInput::StorageBuffer(_, ty) | ShaderInput::ReadBackBuffer(_, ty) => {
|
||||
format!("#[spirv(storage_buffer, descriptor_set = 0, binding = {})] i{}: &[{}]", position + binding_offset, position, ty,)
|
||||
}
|
||||
ShaderInput::StorageTextureBuffer(_, ty) => {
|
||||
format!("#[spirv(storage_buffer, descriptor_set = 0, binding = {})] i{}: &mut [{}]]", position + binding_offset, position, ty,)
|
||||
}
|
||||
ShaderInput::TextureView(_, _) => {
|
||||
format!(
|
||||
"#[spirv(texture, descriptor_set = 0, binding = {})] i{}: spirv_std::image::Image2d",
|
||||
position + binding_offset,
|
||||
position,
|
||||
)
|
||||
}
|
||||
ShaderInput::TextureBuffer(_, _) => {
|
||||
panic!("Texture Buffers cannot be used as inputs use TextureView instead")
|
||||
}
|
||||
ShaderInput::OutputBuffer(_, ty) => {
|
||||
format!("#[spirv(storage_buffer, descriptor_set = 0, binding = {})] o{}: &mut[{}]", position + binding_offset, position, ty,)
|
||||
}
|
||||
ShaderInput::WorkGroupMemory(_, ty) => format!("#[spirv(workgroup_memory] i{}: {}", position, ty,),
|
||||
};
|
||||
line.replace("glam::u32::uvec3::UVec3", "spirv_std::glam::UVec3")
|
||||
}
|
||||
|
||||
struct GpuCompiler {
|
||||
compile_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl SpirVCompiler for GpuCompiler {
|
||||
fn compile(&self, networks: &[ProtoNetwork], io: &ShaderIO) -> anyhow::Result<gpu_executor::Shader> {
|
||||
let metadata = Metadata::new("project".to_owned(), vec!["test@example.com".to_owned()]);
|
||||
|
||||
create_files(&metadata, networks, &self.compile_dir, io)?;
|
||||
let result = compile(&self.compile_dir)?;
|
||||
|
||||
let bytes = std::fs::read(result.module.unwrap_single())?;
|
||||
let words = bytes.chunks(4).map(|chunk| u32::from_ne_bytes(chunk.try_into().unwrap())).collect::<Vec<_>>();
|
||||
|
||||
Ok(gpu_executor::Shader {
|
||||
source: Cow::Owned(words),
|
||||
name: "",
|
||||
io: io.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize_gpu(networks: &[ProtoNetwork], io: &ShaderIO) -> anyhow::Result<String> {
|
||||
fn nid(id: &u64) -> String {
|
||||
format!("n{id:0x}")
|
||||
}
|
||||
|
||||
dbg!(&io);
|
||||
let mut inputs = io
|
||||
.inputs
|
||||
.iter()
|
||||
.filter(|x| !x.is_output())
|
||||
.enumerate()
|
||||
.map(|(i, input)| construct_argument(input, i as u32, 0))
|
||||
.collect::<Vec<_>>();
|
||||
let offset = inputs.len() as u32;
|
||||
|
||||
inputs.extend(io.inputs.iter().filter(|x| x.is_output()).enumerate().map(|(i, input)| construct_argument(input, i as u32, offset)));
|
||||
|
||||
let mut nodes = Vec::new();
|
||||
let mut input_nodes = Vec::new();
|
||||
let mut output_nodes = Vec::new();
|
||||
for network in networks {
|
||||
dbg!(&network);
|
||||
// assert_eq!(network.inputs.len(), io.inputs.iter().filter(|x| !x.is_output()).count());
|
||||
#[derive(serde::Serialize, Debug)]
|
||||
struct Node {
|
||||
id: String,
|
||||
index: usize,
|
||||
fqn: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
for (i, id) in network.inputs.iter().enumerate() {
|
||||
let Some((_, node)) = network.nodes.iter().find(|(i, _)| i == id) else {
|
||||
anyhow::bail!("Input node not found");
|
||||
};
|
||||
let fqn = &node.identifier.name;
|
||||
let id = nid(id);
|
||||
let node = Node {
|
||||
id: id.clone(),
|
||||
index: i + 2,
|
||||
fqn: fqn.to_string().split('<').next().unwrap().to_owned(),
|
||||
args: node.construction_args.new_function_args(),
|
||||
};
|
||||
dbg!(&node);
|
||||
if !io.inputs[i].is_output() {
|
||||
if input_nodes.iter().any(|x: &Node| x.id == id) {
|
||||
continue;
|
||||
}
|
||||
input_nodes.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
for (ref id, node) in network.nodes.iter() {
|
||||
if network.inputs.contains(id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let fqn = &node.identifier.name;
|
||||
let id = nid(id);
|
||||
|
||||
if nodes.iter().any(|x: &Node| x.id == id) {
|
||||
continue;
|
||||
}
|
||||
nodes.push(Node {
|
||||
id,
|
||||
index: 0,
|
||||
fqn: fqn.to_string().split("<").next().unwrap().to_owned(),
|
||||
args: node.construction_args.new_function_args(),
|
||||
});
|
||||
}
|
||||
|
||||
let output = nid(&network.output);
|
||||
output_nodes.push(output);
|
||||
}
|
||||
dbg!(&input_nodes);
|
||||
|
||||
let template = include_str!("templates/spirv-template.rs");
|
||||
let mut tera = tera::Tera::default();
|
||||
tera.add_raw_template("spirv", template)?;
|
||||
let mut context = Context::new();
|
||||
context.insert("inputs", &inputs);
|
||||
context.insert("input_nodes", &input_nodes);
|
||||
context.insert("output_nodes", &output_nodes);
|
||||
context.insert("nodes", &nodes);
|
||||
context.insert("compute_threads", "12, 8");
|
||||
Ok(tera.render("spirv", &context)?)
|
||||
}
|
||||
|
||||
use spirv_builder::{MetadataPrintout, SpirvBuilder, SpirvMetadata};
|
||||
pub fn compile(dir: &Path) -> Result<spirv_builder::CompileResult, spirv_builder::SpirvBuilderError> {
|
||||
dbg!(&dir);
|
||||
let result = SpirvBuilder::new(dir, "spirv-unknown-vulkan1.2")
|
||||
.print_metadata(MetadataPrintout::DependencyOnly)
|
||||
.multimodule(false)
|
||||
.preserve_bindings(true)
|
||||
.release(true)
|
||||
.spirv_metadata(SpirvMetadata::Full)
|
||||
// .scalar_block_layout(true)
|
||||
.relax_logical_pointer(true)
|
||||
// .capability(spirv_builder::Capability::Float64)
|
||||
// .capability(spirv_builder::Capability::VariablePointersStorageBuffer)
|
||||
.extra_arg("no-early-report-zombies")
|
||||
.extra_arg("no-infer-storage-classes")
|
||||
.extra_arg("spirt-passes=qptr")
|
||||
.build()?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[test]
|
||||
fn test_create_cargo_toml() {
|
||||
let cargo_toml = super::create_cargo_toml(&super::Metadata {
|
||||
name: "project".to_owned(),
|
||||
authors: vec!["Example <john.smith@example.com>".to_owned(), "smith.john@example.com".to_owned()],
|
||||
});
|
||||
let cargo_toml = cargo_toml.expect("Failed to build cargo toml template");
|
||||
let lines = cargo_toml.split('\n').collect::<Vec<_>>();
|
||||
let cargo_toml = lines[..lines.len() - 2].join("\n");
|
||||
let reference = r#"[package]
|
||||
name = "project-node"
|
||||
version = "0.1.0"
|
||||
authors = ["Example <john.smith@example.com>", "smith.john@example.com", ]
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
crate-type = ["dylib", "lib"]
|
||||
|
||||
[patch.crates-io]
|
||||
libm = { git = "https://github.com/rust-lang/libm", tag = "0.2.5" }
|
||||
|
||||
[dependencies]
|
||||
spirv-std = { git = "https://github.com/EmbarkStudios/rust-gpu" , features= ["glam"]}"#;
|
||||
|
||||
assert_eq!(cargo_toml, reference);
|
||||
}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
use gpu_compiler as compiler;
|
||||
use gpu_executor::CompileRequest;
|
||||
use graph_craft::document::NodeNetwork;
|
||||
use std::io::Write;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
println!("Starting GPU Compiler!");
|
||||
let mut stdin = std::io::stdin();
|
||||
let mut stdout = std::io::stdout();
|
||||
let compile_dir = std::env::args().nth(1).map(|x| std::path::PathBuf::from(&x)).unwrap_or(tempfile::tempdir()?.into_path());
|
||||
let request: CompileRequest = serde_json::from_reader(&mut stdin)?;
|
||||
dbg!(&compile_dir);
|
||||
|
||||
let metadata = compiler::Metadata::new("project".to_owned(), vec!["test@example.com".to_owned()]);
|
||||
|
||||
compiler::create_files(&metadata, &request.networks, &compile_dir, &request.io)?;
|
||||
let result = compiler::compile(&compile_dir)?;
|
||||
|
||||
let bytes = std::fs::read(result.module.unwrap_single())?;
|
||||
// TODO: properly resolve this
|
||||
let spirv_path = compile_dir.join("shader.spv");
|
||||
std::fs::write(&spirv_path, &bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
[package]
|
||||
authors = [{% for author in authors %}"{{author}}", {% endfor %}]
|
||||
name = "{{name}}-node"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
crate-type = ["dylib", "lib"]
|
||||
|
||||
[patch.crates-io]
|
||||
libm = { git = "https://github.com/rust-lang/libm", tag = "0.2.5" }
|
||||
|
||||
[dependencies]
|
||||
spirv-std = { version = "0.9" }
|
||||
graphene-core = { path = "{{gcore_path}}", default-features = false, features = [
|
||||
"gpu",
|
||||
] }
|
|
@ -1,10 +0,0 @@
|
|||
[toolchain]
|
||||
channel = "nightly-2023-05-27"
|
||||
components = [
|
||||
"rust-src",
|
||||
"rustc-dev",
|
||||
"llvm-tools-preview",
|
||||
"clippy",
|
||||
"rustfmt",
|
||||
"rustc",
|
||||
]
|
|
@ -1,44 +0,0 @@
|
|||
#![no_std]
|
||||
#![feature(unchecked_math)]
|
||||
|
||||
#[cfg(target_arch = "spirv")]
|
||||
extern crate spirv_std;
|
||||
|
||||
// #[cfg(target_arch = "spirv")]
|
||||
// pub mod gpu {
|
||||
// use super::*;
|
||||
|
||||
use spirv_std::spirv;
|
||||
use spirv_std::glam;
|
||||
use spirv_std::glam::{UVec3, Vec2, Mat2, BVec2};
|
||||
|
||||
#[allow(unused)]
|
||||
#[spirv(compute(threads({{compute_threads}})))]
|
||||
pub fn eval (
|
||||
#[spirv(global_invocation_id)] _global_index: UVec3,
|
||||
{% for input in inputs %}
|
||||
{{input}},
|
||||
{% endfor %}
|
||||
) {
|
||||
use graphene_core::{Node, NodeMut};
|
||||
use graphene_core::raster::adjustments::{BlendMode, BlendNode};
|
||||
use graphene_core::Color;
|
||||
|
||||
{% for input in input_nodes %}
|
||||
let _i{{input.index}} = graphene_core::value::CopiedNode::new(*i{{input.index}});
|
||||
let _{{input.id}} = {{input.fqn}}::new({% for arg in input.args %}{{arg}}, {% endfor %});
|
||||
let {{input.id}} = graphene_core::structural::ComposeNode::new(_i{{input.index}}, _{{input.id}});
|
||||
{% endfor %}
|
||||
|
||||
{% for node in nodes %}
|
||||
let mut {{node.id}} = {{node.fqn}}::new({% for arg in node.args %}{{arg}}, {% endfor %});
|
||||
{% endfor %}
|
||||
|
||||
{% for output in output_nodes %}
|
||||
let v = {{output}}.eval(());
|
||||
o{{loop.index0}}[(_global_index.y * i0 + _global_index.x) as usize] = v;
|
||||
{% endfor %}
|
||||
// TODO: Write output to buffer
|
||||
}
|
||||
|
||||
// }
|
|
@ -1,28 +0,0 @@
|
|||
[package]
|
||||
name = "gpu-executor"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
node-macro = { path = "../node-macro" }
|
||||
|
||||
# Workspace dependencies
|
||||
graphene-core = { workspace = true, features = ["std", "alloc", "gpu"] }
|
||||
dyn-any = { workspace = true, features = ["log-bad-types", "rc", "glam"] }
|
||||
num-traits = { workspace = true }
|
||||
log = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
glam = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytemuck = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
web-sys = { workspace = true, features = [
|
||||
"HtmlCanvasElement",
|
||||
"ImageBitmapRenderingContext",
|
||||
] }
|
|
@ -1,152 +0,0 @@
|
|||
use bytemuck::{Pod, Zeroable};
|
||||
use dyn_any::{StaticType, StaticTypeSized};
|
||||
use glam::UVec3;
|
||||
use graphene_core::raster::color::RGBA16F;
|
||||
use graphene_core::raster::{Image, Pixel, SRGBA8};
|
||||
use graphene_core::*;
|
||||
use std::borrow::Cow;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, dyn_any::DynAny)]
|
||||
pub enum ComputePassDimensions {
|
||||
X(u32),
|
||||
XY(u32, u32),
|
||||
XYZ(u32, u32, u32),
|
||||
}
|
||||
|
||||
impl ComputePassDimensions {
|
||||
pub fn get(&self) -> (u32, u32, u32) {
|
||||
match self {
|
||||
ComputePassDimensions::X(x) => (*x, 1, 1),
|
||||
ComputePassDimensions::XY(x, y) => (*x, *y, 1),
|
||||
ComputePassDimensions::XYZ(x, y, z) => (*x, *y, *z),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Texture {
|
||||
fn width(&self) -> u32;
|
||||
fn height(&self) -> u32;
|
||||
fn format(&self) -> TextureBufferType;
|
||||
fn view<TextureView>(&self) -> TextureView;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
|
||||
/// GPU constants that can be used as inputs to a shader.
|
||||
pub enum GPUConstant {
|
||||
SubGroupId,
|
||||
SubGroupInvocationId,
|
||||
SubGroupSize,
|
||||
NumSubGroups,
|
||||
WorkGroupId,
|
||||
WorkGroupInvocationId,
|
||||
WorkGroupSize,
|
||||
NumWorkGroups,
|
||||
GlobalInvocationId,
|
||||
GlobalSize,
|
||||
}
|
||||
|
||||
impl GPUConstant {
|
||||
pub fn ty(&self) -> Type {
|
||||
match self {
|
||||
GPUConstant::SubGroupId => concrete!(u32),
|
||||
GPUConstant::SubGroupInvocationId => concrete!(u32),
|
||||
GPUConstant::SubGroupSize => concrete!(u32),
|
||||
GPUConstant::NumSubGroups => concrete!(u32),
|
||||
GPUConstant::WorkGroupId => concrete!(UVec3),
|
||||
GPUConstant::WorkGroupInvocationId => concrete!(UVec3),
|
||||
GPUConstant::WorkGroupSize => concrete!(u32),
|
||||
GPUConstant::NumWorkGroups => concrete!(u32),
|
||||
GPUConstant::GlobalInvocationId => concrete!(UVec3),
|
||||
GPUConstant::GlobalSize => concrete!(UVec3),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StorageBufferOptions {
|
||||
pub cpu_writable: bool,
|
||||
pub gpu_writable: bool,
|
||||
pub cpu_readable: bool,
|
||||
pub storage: bool,
|
||||
}
|
||||
|
||||
pub enum TextureBufferOptions {
|
||||
Storage,
|
||||
Texture,
|
||||
Surface,
|
||||
}
|
||||
|
||||
pub trait ToUniformBuffer: StaticType {
|
||||
fn to_bytes(&self) -> Cow<[u8]>;
|
||||
}
|
||||
|
||||
impl<T: StaticType + Pod + Zeroable> ToUniformBuffer for T {
|
||||
fn to_bytes(&self) -> Cow<[u8]> {
|
||||
Cow::Owned(bytemuck::bytes_of(self).into())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ToStorageBuffer: StaticType {
|
||||
fn to_bytes(&self) -> Cow<[u8]>;
|
||||
fn ty(&self) -> Type;
|
||||
}
|
||||
|
||||
impl<T: Pod + Zeroable + StaticTypeSized> ToStorageBuffer for Vec<T> {
|
||||
fn to_bytes(&self) -> Cow<[u8]> {
|
||||
Cow::Borrowed(bytemuck::cast_slice(self.as_slice()))
|
||||
}
|
||||
fn ty(&self) -> Type {
|
||||
concrete!(T)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TextureFormat {
|
||||
fn format() -> TextureBufferType;
|
||||
}
|
||||
|
||||
impl TextureFormat for Color {
|
||||
fn format() -> TextureBufferType {
|
||||
TextureBufferType::Rgba32Float
|
||||
}
|
||||
}
|
||||
impl TextureFormat for SRGBA8 {
|
||||
fn format() -> TextureBufferType {
|
||||
TextureBufferType::Rgba8Srgb
|
||||
}
|
||||
}
|
||||
impl TextureFormat for RGBA16F {
|
||||
fn format() -> TextureBufferType {
|
||||
TextureBufferType::Rgba16Float
|
||||
}
|
||||
}
|
||||
|
||||
// TODO use wgpu type
|
||||
pub enum TextureBufferType {
|
||||
Rgba32Float,
|
||||
Rgba16Float,
|
||||
Rgba8Srgb,
|
||||
}
|
||||
|
||||
pub trait ToTextureBuffer: StaticType {
|
||||
fn to_bytes(&self) -> Cow<[u8]>;
|
||||
fn ty() -> Type;
|
||||
fn format() -> TextureBufferType;
|
||||
fn size(&self) -> (u32, u32);
|
||||
}
|
||||
|
||||
impl<T: Pod + Zeroable + StaticTypeSized + Pixel + TextureFormat> ToTextureBuffer for Image<T>
|
||||
where
|
||||
T::Static: Pixel,
|
||||
{
|
||||
fn to_bytes(&self) -> Cow<[u8]> {
|
||||
Cow::Borrowed(bytemuck::cast_slice(self.data.as_slice()))
|
||||
}
|
||||
fn ty() -> Type {
|
||||
concrete!(T)
|
||||
}
|
||||
fn format() -> TextureBufferType {
|
||||
T::format()
|
||||
}
|
||||
fn size(&self) -> (u32, u32) {
|
||||
(self.width, self.height)
|
||||
}
|
||||
}
|
|
@ -34,9 +34,3 @@ impl Compiler {
|
|||
pub trait Executor<I, O> {
|
||||
fn execute(&self, input: I) -> LocalFuture<Result<O, Box<dyn Error>>>;
|
||||
}
|
||||
#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)]
|
||||
#[cfg(feature = "wgpu")]
|
||||
pub struct CompileRequest {
|
||||
pub networks: Vec<ProtoNetwork>,
|
||||
pub io: wgpu_executor::ShaderIO,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
use crate::document::NodeNetwork;
|
||||
use crate::graphene_compiler::Compiler;
|
||||
use crate::proto::ProtoNetwork;
|
||||
|
||||
pub fn load_network(document_string: &str) -> NodeNetwork {
|
||||
let document: serde_json::Value = serde_json::from_str(document_string).expect("Failed to parse document");
|
||||
|
@ -8,11 +6,6 @@ pub fn load_network(document_string: &str) -> NodeNetwork {
|
|||
serde_json::from_str::<NodeNetwork>(&document).expect("Failed to parse document")
|
||||
}
|
||||
|
||||
pub fn compile(network: NodeNetwork) -> ProtoNetwork {
|
||||
let compiler = Compiler {};
|
||||
compiler.compile_single(network).unwrap()
|
||||
}
|
||||
|
||||
pub fn load_from_name(name: &str) -> NodeNetwork {
|
||||
let content = std::fs::read(format!("../../demo-artwork/{name}.graphite")).expect("failed to read file");
|
||||
let content = std::str::from_utf8(&content).unwrap();
|
||||
|
|
|
@ -17,7 +17,6 @@ gpu = [
|
|||
"graphene-std/gpu",
|
||||
"graphene-core/gpu",
|
||||
"wgpu-executor",
|
||||
"gpu-executor",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
|
@ -50,7 +49,6 @@ clap = { version = "4.5.31", features = ["cargo", "derive"] }
|
|||
|
||||
# Optional local dependencies
|
||||
wgpu-executor = { path = "../wgpu-executor", optional = true }
|
||||
gpu-executor = { path = "../gpu-executor", optional = true }
|
||||
|
||||
# Optional workspace dependencies
|
||||
wasm-bindgen = { workspace = true, optional = true }
|
||||
|
|
|
@ -8,12 +8,7 @@ license = "MIT OR Apache-2.0"
|
|||
|
||||
[features]
|
||||
default = ["wasm", "imaginate"]
|
||||
gpu = [
|
||||
"graphene-core/gpu",
|
||||
"gpu-compiler-bin-wrapper",
|
||||
"compilation-client",
|
||||
"gpu-executor",
|
||||
]
|
||||
gpu = [ "graphene-core/gpu" ]
|
||||
wgpu = ["gpu", "dep:wgpu", "graph-craft/wgpu"]
|
||||
wasm = ["wasm-bindgen", "web-sys", "js-sys"]
|
||||
imaginate = ["image/png", "base64", "js-sys", "web-sys", "wasm-bindgen-futures"]
|
||||
|
@ -59,11 +54,6 @@ image = { workspace = true, default-features = false, features = [
|
|||
"jpeg",
|
||||
] }
|
||||
|
||||
# Optional local dependencies
|
||||
gpu-executor = { path = "../gpu-executor", optional = true }
|
||||
gpu-compiler-bin-wrapper = { path = "../gpu-compiler/gpu-compiler-bin-wrapper", optional = true }
|
||||
compilation-client = { path = "../compilation-client", optional = true }
|
||||
|
||||
# Optional workspace dependencies
|
||||
base64 = { workspace = true, optional = true }
|
||||
wgpu = { workspace = true, optional = true }
|
||||
|
|
|
@ -13,7 +13,6 @@ gpu = ["graphene-std/gpu", "graphene-core/gpu", "graphene-std/wgpu"]
|
|||
# Local dependencies
|
||||
graphene-std = { path = "../gstd", features = ["serde"] }
|
||||
graph-craft = { path = "../graph-craft" }
|
||||
gpu-executor = { path = "../gpu-executor" }
|
||||
wgpu-executor = { path = "../wgpu-executor" }
|
||||
|
||||
# Workspace dependencies
|
||||
|
|
|
@ -19,8 +19,6 @@ use node_registry_macros::{async_node, into_node};
|
|||
use once_cell::sync::Lazy;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
#[cfg(feature = "gpu")]
|
||||
use wgpu_executor::ShaderInputFrame;
|
||||
use wgpu_executor::{WgpuExecutor, WgpuSurface, WindowHandle};
|
||||
|
||||
// TODO: turn into hashmap
|
||||
|
@ -110,12 +108,8 @@ fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeCons
|
|||
),
|
||||
),
|
||||
#[cfg(feature = "gpu")]
|
||||
async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => ShaderInputFrame]),
|
||||
#[cfg(feature = "gpu")]
|
||||
async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => wgpu_executor::WgpuSurface]),
|
||||
#[cfg(feature = "gpu")]
|
||||
async_node!(graphene_core::memo::ImpureMemoNode<_, _, _>, input: Context, fn_params: [Context => ShaderInputFrame]),
|
||||
#[cfg(feature = "gpu")]
|
||||
async_node!(graphene_core::memo::ImpureMemoNode<_, _, _>, input: Context, fn_params: [Context => RasterDataTable<GPU>]),
|
||||
#[cfg(feature = "gpu")]
|
||||
async_node!(graphene_core::memo::MemoNode<_, _>, input: Context, fn_params: [Context => RasterDataTable<GPU>]),
|
||||
|
|
|
@ -474,7 +474,7 @@ fn parse_field(pat_ident: PatIdent, ty: Type, attrs: &[Attribute]) -> syn::Resul
|
|||
}
|
||||
|
||||
let unit = extract_attribute(attrs, "unit")
|
||||
.map(|attr| attr.parse_args::<LitStr>().map_err(|e| Error::new_spanned(attr, format!("Expected a unit type as string"))))
|
||||
.map(|attr| attr.parse_args::<LitStr>().map_err(|_e| Error::new_spanned(attr, format!("Expected a unit type as string"))))
|
||||
.transpose()?;
|
||||
|
||||
let number_display_decimal_places = extract_attribute(attrs, "display_decimal_places")
|
||||
|
|
|
@ -10,9 +10,6 @@ profiling = ["nvtx"]
|
|||
passthrough = []
|
||||
|
||||
[dependencies]
|
||||
# Local dependencies
|
||||
gpu-executor = { path = "../gpu-executor" }
|
||||
|
||||
# Workspace dependencies
|
||||
graphene-core = { workspace = true, features = ["std", "alloc", "gpu", "wgpu"] }
|
||||
dyn-any = { workspace = true, features = ["log-bad-types", "rc", "glam"] }
|
||||
|
|
|
@ -1,250 +0,0 @@
|
|||
use super::context::Context;
|
||||
use bytemuck::Pod;
|
||||
use dyn_any::StaticTypeSized;
|
||||
use std::borrow::Cow;
|
||||
use std::error::Error;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
pub type LocalFuture<'n, T> = Pin<Box<dyn core::future::Future<Output = T> + 'n>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct GpuExecutor<'a, I: StaticTypeSized, O> {
|
||||
context: Context,
|
||||
entry_point: String,
|
||||
shader: Cow<'a, [u32]>,
|
||||
_phantom: std::marker::PhantomData<(I, O)>,
|
||||
}
|
||||
|
||||
impl<'a, I: StaticTypeSized + Sync + Pod + Send, O: StaticTypeSized + Send + Sync + Pod> GpuExecutor<'a, I, O> {
|
||||
pub fn new(context: Context, shader: Cow<'a, [u32]>, entry_point: String) -> anyhow::Result<Self> {
|
||||
Ok(Self {
|
||||
context,
|
||||
entry_point,
|
||||
shader,
|
||||
_phantom: std::marker::PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn execute(&self, input: Vec<I>) -> LocalFuture<Result<Vec<O>, Box<dyn Error>>> {
|
||||
let context = &self.context;
|
||||
let future = execute_shader(context.device.clone(), context.queue.clone(), self.shader.to_vec(), input, self.entry_point.clone());
|
||||
Box::pin(async move {
|
||||
let result = future.await;
|
||||
|
||||
let result: Vec<O> = result.ok_or_else(|| String::from("Failed to execute shader"))?;
|
||||
Ok(result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_shader<I: Pod + Send + Sync, O: Pod + Send + Sync>(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>, shader: Vec<u32>, data: Vec<I>, entry_point: String) -> Option<Vec<O>> {
|
||||
// Loads the shader from WGSL
|
||||
dbg!(&shader);
|
||||
// write shader to file
|
||||
use std::io::Write;
|
||||
let mut file = std::fs::File::create("/tmp/shader.spv").unwrap();
|
||||
file.write_all(bytemuck::cast_slice(&shader)).unwrap();
|
||||
let cs_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: None,
|
||||
source: wgpu::ShaderSource::SpirV(shader.into()),
|
||||
});
|
||||
|
||||
// Gets the size in bytes of the buffer.
|
||||
let slice_size = data.len() * std::mem::size_of::<O>();
|
||||
let size = slice_size as wgpu::BufferAddress;
|
||||
|
||||
// Instantiates buffer without data.
|
||||
// `usage` of buffer specifies how it can be used:
|
||||
// `BufferUsages::MAP_READ` allows it to be read (outside the shader).
|
||||
// `BufferUsages::COPY_DST` allows it to be the destination of the copy.
|
||||
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: None,
|
||||
size,
|
||||
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
// Instantiates buffer with data (`numbers`).
|
||||
// Usage allowing the buffer to be:
|
||||
// A storage buffer (can be bound within a bind group and thus available to a shader).
|
||||
// The destination of a copy.
|
||||
// The source of a copy.
|
||||
let storage_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Storage Buffer"),
|
||||
contents: bytemuck::cast_slice(&data),
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::COPY_SRC,
|
||||
});
|
||||
|
||||
// Instantiates empty buffer for the result.
|
||||
// Usage allowing the buffer to be:
|
||||
// A storage buffer (can be bound within a bind group and thus available to a shader).
|
||||
// The destination of a copy.
|
||||
// The source of a copy.
|
||||
let dest_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("Destination Buffer"),
|
||||
size,
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
// A bind group defines how buffers are accessed by shaders.
|
||||
// It is to WebGPU what a descriptor set is to Vulkan.
|
||||
// `binding` here refers to the `binding` of a buffer in the shader (`layout(set = 0, binding = 0) buffer`).
|
||||
|
||||
// A pipeline specifies the operation of a shader
|
||||
|
||||
// Instantiates the pipeline.
|
||||
let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: None,
|
||||
layout: None,
|
||||
module: &cs_module,
|
||||
entry_point: Some(entry_point.as_str()),
|
||||
compilation_options: Default::default(),
|
||||
cache: None,
|
||||
});
|
||||
|
||||
// Instantiates the bind group, once again specifying the binding of buffers.
|
||||
let bind_group_layout = compute_pipeline.get_bind_group_layout(0);
|
||||
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: None,
|
||||
layout: &bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: storage_buffer.as_entire_binding(),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: dest_buffer.as_entire_binding(),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// A command encoder executes one or many pipelines.
|
||||
// It is to WebGPU what a command buffer is to Vulkan.
|
||||
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
|
||||
{
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None, timestamp_writes: None });
|
||||
cpass.set_pipeline(&compute_pipeline);
|
||||
cpass.set_bind_group(0, Some(&bind_group), &[]);
|
||||
cpass.insert_debug_marker("compute node network evaluation");
|
||||
cpass.dispatch_workgroups(data.len().min(65535) as u32, 1, 1); // Number of cells to run, the (x,y,z) size of item being processed
|
||||
}
|
||||
// Sets adds copy operation to command encoder.
|
||||
// Will copy data from storage buffer on GPU to staging buffer on CPU.
|
||||
encoder.copy_buffer_to_buffer(&dest_buffer, 0, &staging_buffer, 0, size);
|
||||
|
||||
// Submits command encoder for processing
|
||||
queue.submit(Some(encoder.finish()));
|
||||
|
||||
// Note that we're not calling `.await` here.
|
||||
let buffer_slice = staging_buffer.slice(..);
|
||||
// Sets the buffer up for mapping, sending over the result of the mapping back to us when it is finished.
|
||||
let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
|
||||
buffer_slice.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap());
|
||||
|
||||
// Poll the device in a blocking manner so that our future resolves.
|
||||
// In an actual application, `device.poll(...)` should
|
||||
// be called in an event loop or on another thread.
|
||||
device.poll(wgpu::Maintain::Wait);
|
||||
|
||||
// Awaits until `buffer_future` can be read from
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_push!("compute");
|
||||
let result = receiver.receive().await;
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_pop!();
|
||||
if let Some(Ok(())) = result {
|
||||
// Gets contents of buffer
|
||||
let data = buffer_slice.get_mapped_range();
|
||||
// Since contents are got in bytes, this converts these bytes back to u32
|
||||
let result = bytemuck::cast_slice(&data).to_vec();
|
||||
|
||||
// With the current interface, we have to make sure all mapped views are dropped before we unmap the buffer
|
||||
drop(data);
|
||||
// Unmaps buffer from memory
|
||||
staging_buffer.unmap();
|
||||
// If you are familiar with C++ these 2 lines can be thought of similarly to `delete myPointer; myPointer = NULL;`.
|
||||
// It effectively frees the memory.
|
||||
|
||||
// Returns data from buffer
|
||||
Some(result)
|
||||
} else {
|
||||
panic!("failed to run compute on gpu!")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Fix this test
|
||||
// #[cfg(test)]
|
||||
// mod test {
|
||||
// use super::*;
|
||||
//
|
||||
// use graph_craft::concrete;
|
||||
// use graph_craft::generic;
|
||||
// use graph_craft::proto::*;
|
||||
|
||||
// #[test]
|
||||
// fn add_on_gpu() {
|
||||
// use crate::executor::Executor;
|
||||
// let m = compiler::Metadata::new("project".to_owned(), vec!["test@example.com".to_owned()]);
|
||||
// let network = inc_network();
|
||||
// let temp_dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
// let executor: GpuExecutor<u32, u32> = GpuExecutor::new(Context::new(), network, m, temp_dir.path()).unwrap();
|
||||
|
||||
// let data: Vec<_> = (0..1024).map(|x| x as u32).collect();
|
||||
// let result = executor.execute(Box::new(data)).unwrap();
|
||||
// let result = dyn_any::downcast::<Vec<u32>>(result).unwrap();
|
||||
// for (i, r) in result.iter().enumerate() {
|
||||
// assert_eq!(*r, i as u32 + 3);
|
||||
// }
|
||||
// }
|
||||
|
||||
// fn inc_network() -> ProtoNetwork {
|
||||
// let mut construction_network = ProtoNetwork {
|
||||
// inputs: vec![NodeId(10)],
|
||||
// output: NodeId(1),
|
||||
// nodes: [
|
||||
// (
|
||||
// NodeId(1),
|
||||
// ProtoNode {
|
||||
// identifier: ProtoNodeIdentifier::new("graphene_core::ops::IdentityNode", &[generic!("u32")]),
|
||||
// input: ProtoNodeInput::Node(11),
|
||||
// construction_args: ConstructionArgs::Nodes(vec![]),
|
||||
// },
|
||||
// ),
|
||||
// (
|
||||
// NodeId(10),
|
||||
// ProtoNode {
|
||||
// identifier: ProtoNodeIdentifier::new("graphene_core::structural::ConsNode", &[generic!("&ValueNode<u32>"), generic!("()")]),
|
||||
// input: ProtoNodeInput::Network,
|
||||
// construction_args: ConstructionArgs::Nodes(vec![14]),
|
||||
// },
|
||||
// ),
|
||||
// (
|
||||
// NodeId(11),
|
||||
// ProtoNode {
|
||||
// identifier: ProtoNodeIdentifier::new("graphene_core::ops::AddPairNode", &[generic!("u32"), generic!("u32")]),
|
||||
// input: ProtoNodeInput::Node(10),
|
||||
// construction_args: ConstructionArgs::Nodes(vec![]),
|
||||
// },
|
||||
// ),
|
||||
// (
|
||||
// NodeId(14),
|
||||
// ProtoNode {
|
||||
// identifier: ProtoNodeIdentifier::new("graphene_core::value::ValueNode", &[concrete!("u32")]),
|
||||
// input: ProtoNodeInput::None,
|
||||
// construction_args: ConstructionArgs::Value(Box::new(3_u32)),
|
||||
// },
|
||||
// ),
|
||||
// ]
|
||||
// .into_iter()
|
||||
// .collect(),
|
||||
// };
|
||||
// construction_network.resolve_inputs();
|
||||
// construction_network.reorder_ids();
|
||||
// construction_network
|
||||
// }
|
||||
// }
|
|
@ -1,41 +1,24 @@
|
|||
mod context;
|
||||
mod executor;
|
||||
|
||||
use anyhow::{Result, bail};
|
||||
use anyhow::Result;
|
||||
pub use context::Context;
|
||||
use dyn_any::{DynAny, StaticType};
|
||||
pub use executor::GpuExecutor;
|
||||
use futures::Future;
|
||||
use glam::{DAffine2, UVec2};
|
||||
use gpu_executor::{ComputePassDimensions, GPUConstant, StorageBufferOptions, TextureBufferOptions, TextureBufferType, ToStorageBuffer, ToUniformBuffer};
|
||||
use dyn_any::StaticType;
|
||||
use glam::UVec2;
|
||||
use graphene_core::application_io::{ApplicationIo, EditorApi, SurfaceHandle};
|
||||
use graphene_core::instances::Instance;
|
||||
use graphene_core::raster::{Image, SRGBA8};
|
||||
use graphene_core::raster_types::{CPU, GPU, Raster, RasterDataTable};
|
||||
use graphene_core::transform::{Footprint, Transform};
|
||||
use graphene_core::{Color, Cow, Ctx, ExtractFootprint, Node, SurfaceFrame, Type};
|
||||
use std::pin::Pin;
|
||||
use graphene_core::{Color, Ctx};
|
||||
use std::sync::Arc;
|
||||
use vello::{AaConfig, AaSupport, RenderParams, Renderer, RendererOptions, Scene};
|
||||
use wgpu::util::DeviceExt;
|
||||
use wgpu::{Buffer, BufferDescriptor, Origin3d, ShaderModule, SurfaceConfiguration, SurfaceError, Texture, TextureAspect, TextureView};
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use web_sys::HtmlCanvasElement;
|
||||
use wgpu::{Origin3d, SurfaceConfiguration, TextureAspect};
|
||||
|
||||
#[derive(dyn_any::DynAny)]
|
||||
pub struct WgpuExecutor {
|
||||
pub context: Context,
|
||||
render_configuration: RenderConfiguration,
|
||||
vello_renderer: futures::lock::Mutex<vello::Renderer>,
|
||||
vello_renderer: futures::lock::Mutex<Renderer>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for WgpuExecutor {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("WgpuExecutor")
|
||||
.field("context", &self.context)
|
||||
.field("render_configuration", &self.render_configuration)
|
||||
.finish()
|
||||
f.debug_struct("WgpuExecutor").field("context", &self.context).finish()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,72 +37,12 @@ impl graphene_core::application_io::Size for Surface {
|
|||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
struct Vertex {
|
||||
position: [f32; 3],
|
||||
tex_coords: [f32; 2],
|
||||
}
|
||||
|
||||
impl Vertex {
|
||||
fn desc() -> wgpu::VertexBufferLayout<'static> {
|
||||
use std::mem;
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
shader_location: 0,
|
||||
format: wgpu::VertexFormat::Float32x3,
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
|
||||
shader_location: 1,
|
||||
format: wgpu::VertexFormat::Float32x2,
|
||||
},
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const VERTICES: &[Vertex] = &[
|
||||
Vertex {
|
||||
position: [-1., 1., 0.],
|
||||
tex_coords: [0., 0.],
|
||||
}, // A
|
||||
Vertex {
|
||||
position: [-1., -1., 0.],
|
||||
tex_coords: [0., 1.],
|
||||
}, // B
|
||||
Vertex {
|
||||
position: [1., 1., 0.],
|
||||
tex_coords: [1., 0.],
|
||||
}, // C
|
||||
Vertex {
|
||||
position: [1., -1., 0.],
|
||||
tex_coords: [1., 1.],
|
||||
}, // D
|
||||
];
|
||||
|
||||
const INDICES: &[u16] = &[0, 1, 2, 2, 1, 3];
|
||||
|
||||
#[derive(Debug, DynAny)]
|
||||
#[repr(transparent)]
|
||||
pub struct CommandBuffer(wgpu::CommandBuffer);
|
||||
|
||||
#[derive(Debug, DynAny)]
|
||||
#[repr(transparent)]
|
||||
pub struct ShaderModuleWrapper(ShaderModule);
|
||||
pub type ShaderHandle = ShaderModuleWrapper;
|
||||
pub type BufferHandle = Buffer;
|
||||
pub type TextureHandle = Texture;
|
||||
pub struct Surface {
|
||||
pub inner: wgpu::Surface<'static>,
|
||||
resolution: UVec2,
|
||||
}
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub type Window = HtmlCanvasElement;
|
||||
pub type Window = web_sys::HtmlCanvasElement;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub type Window = Arc<winit::window::Window>;
|
||||
|
||||
|
@ -129,10 +52,6 @@ unsafe impl StaticType for Surface {
|
|||
|
||||
pub use graphene_core::renderer::RenderContext;
|
||||
|
||||
// pub trait SpirVCompiler {
|
||||
// fn compile(&self, network: &[ProtoNetwork], io: &ShaderIO) -> Result<Shader>;
|
||||
// }
|
||||
|
||||
impl WgpuExecutor {
|
||||
pub async fn render_vello_scene(&self, scene: &Scene, surface: &WgpuSurface, width: u32, height: u32, context: &RenderContext, background: Color) -> Result<()> {
|
||||
let surface = &surface.surface.inner;
|
||||
|
@ -184,338 +103,6 @@ impl WgpuExecutor {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn load_shader(&self, shader: Shader) -> Result<ShaderHandle> {
|
||||
#[cfg(not(feature = "passthrough"))]
|
||||
let shader_module = self.context.device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some(shader.name),
|
||||
source: wgpu::ShaderSource::SpirV(shader.source),
|
||||
});
|
||||
#[cfg(feature = "passthrough")]
|
||||
let shader_module = unsafe {
|
||||
self.context.device.create_shader_module_spirv(&wgpu::ShaderModuleDescriptorSpirV {
|
||||
label: Some(shader.name),
|
||||
source: shader.source,
|
||||
})
|
||||
};
|
||||
Ok(ShaderModuleWrapper(shader_module))
|
||||
}
|
||||
|
||||
pub fn create_uniform_buffer<T: ToUniformBuffer>(&self, data: T) -> Result<WgpuShaderInput> {
|
||||
let bytes = data.to_bytes();
|
||||
let buffer = self.context.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: None,
|
||||
contents: bytes.as_ref(),
|
||||
usage: wgpu::BufferUsages::UNIFORM,
|
||||
});
|
||||
Ok(ShaderInput::UniformBuffer(buffer, Type::new::<T>()))
|
||||
}
|
||||
|
||||
pub fn create_storage_buffer<T: ToStorageBuffer>(&self, data: T, options: StorageBufferOptions) -> Result<WgpuShaderInput> {
|
||||
let bytes = data.to_bytes();
|
||||
let mut usage = wgpu::BufferUsages::empty();
|
||||
|
||||
if options.storage {
|
||||
usage |= wgpu::BufferUsages::STORAGE;
|
||||
}
|
||||
if options.gpu_writable {
|
||||
usage |= wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::COPY_DST;
|
||||
}
|
||||
if options.cpu_readable {
|
||||
usage |= wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST;
|
||||
}
|
||||
if options.cpu_writable {
|
||||
usage |= wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC;
|
||||
}
|
||||
|
||||
log::warn!("Creating storage buffer with usage {:?} and len: {}", usage, bytes.len());
|
||||
let buffer = self.context.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: None,
|
||||
contents: bytes.as_ref(),
|
||||
usage,
|
||||
});
|
||||
Ok(ShaderInput::StorageBuffer(buffer, data.ty()))
|
||||
}
|
||||
pub fn create_texture_buffer<T: gpu_executor::ToTextureBuffer>(&self, data: T, options: TextureBufferOptions) -> Result<WgpuShaderInput> {
|
||||
let bytes = data.to_bytes();
|
||||
let usage = match options {
|
||||
TextureBufferOptions::Storage => wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC,
|
||||
TextureBufferOptions::Texture => wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC,
|
||||
TextureBufferOptions::Surface => wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
};
|
||||
|
||||
let format = match T::format() {
|
||||
TextureBufferType::Rgba32Float => wgpu::TextureFormat::Rgba32Float,
|
||||
TextureBufferType::Rgba16Float => wgpu::TextureFormat::Rgba16Float,
|
||||
TextureBufferType::Rgba8Srgb => wgpu::TextureFormat::Rgba8UnormSrgb,
|
||||
};
|
||||
|
||||
let buffer = self.context.device.create_texture_with_data(
|
||||
self.context.queue.as_ref(),
|
||||
&wgpu::TextureDescriptor {
|
||||
label: None,
|
||||
size: wgpu::Extent3d {
|
||||
width: data.size().0,
|
||||
height: data.size().1,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format,
|
||||
usage,
|
||||
view_formats: &[format],
|
||||
},
|
||||
wgpu::util::TextureDataOrder::LayerMajor,
|
||||
bytes.as_ref(),
|
||||
);
|
||||
match options {
|
||||
TextureBufferOptions::Storage => Ok(ShaderInput::StorageTextureBuffer(buffer, T::ty())),
|
||||
TextureBufferOptions::Texture => Ok(ShaderInput::TextureBuffer(buffer, T::ty())),
|
||||
TextureBufferOptions::Surface => Ok(ShaderInput::TextureBuffer(buffer, T::ty())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_output_buffer(&self, len: usize, ty: Type, cpu_readable: bool) -> Result<WgpuShaderInput> {
|
||||
log::warn!("Creating output buffer with len: {len}");
|
||||
let create_buffer = |usage| {
|
||||
Ok::<_, anyhow::Error>(self.context.device.create_buffer(&BufferDescriptor {
|
||||
label: None,
|
||||
size: len as u64 * ty.size().ok_or_else(|| anyhow::anyhow!("Cannot create buffer of type {ty:?}"))? as u64,
|
||||
usage,
|
||||
mapped_at_creation: false,
|
||||
}))
|
||||
};
|
||||
let buffer = match cpu_readable {
|
||||
true => ShaderInput::ReadBackBuffer(create_buffer(wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ)?, ty),
|
||||
false => ShaderInput::OutputBuffer(create_buffer(wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC)?, ty),
|
||||
};
|
||||
Ok(buffer)
|
||||
}
|
||||
pub fn create_compute_pass(&self, layout: &PipelineLayout, read_back: Option<Arc<WgpuShaderInput>>, instances: ComputePassDimensions) -> Result<CommandBuffer> {
|
||||
let compute_pipeline = self.context.device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: None,
|
||||
layout: None,
|
||||
module: &layout.shader.0,
|
||||
entry_point: Some(layout.entry_point.as_str()),
|
||||
compilation_options: Default::default(),
|
||||
cache: None,
|
||||
});
|
||||
let bind_group_layout = compute_pipeline.get_bind_group_layout(0);
|
||||
|
||||
let entries = layout
|
||||
.bind_group
|
||||
.buffers
|
||||
.iter()
|
||||
.chain(std::iter::once(&layout.output_buffer))
|
||||
.flat_map(|input| input.binding())
|
||||
.enumerate()
|
||||
.map(|(i, buffer)| wgpu::BindGroupEntry {
|
||||
binding: i as u32,
|
||||
resource: match buffer {
|
||||
BindingType::UniformBuffer(buf) => buf.as_entire_binding(),
|
||||
BindingType::StorageBuffer(buf) => buf.as_entire_binding(),
|
||||
BindingType::TextureView(buf) => wgpu::BindingResource::TextureView(buf),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bind_group = self.context.device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: None,
|
||||
layout: &bind_group_layout,
|
||||
entries: entries.as_slice(),
|
||||
});
|
||||
|
||||
let mut encoder = self.context.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("compute encoder") });
|
||||
{
|
||||
let dimensions = instances.get();
|
||||
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None, timestamp_writes: None });
|
||||
cpass.set_pipeline(&compute_pipeline);
|
||||
cpass.set_bind_group(0, Some(&bind_group), &[]);
|
||||
cpass.insert_debug_marker("compute node network evaluation");
|
||||
cpass.push_debug_group("compute shader");
|
||||
cpass.dispatch_workgroups(dimensions.0, dimensions.1, dimensions.2); // Number of cells to run, the (x,y,z) size of item being processed
|
||||
cpass.pop_debug_group();
|
||||
}
|
||||
// Sets adds copy operation to command encoder.
|
||||
// Will copy data from storage buffer on GPU to staging buffer on CPU.
|
||||
if let Some(buffer) = read_back {
|
||||
let ShaderInput::ReadBackBuffer(output, _ty) = buffer.as_ref() else {
|
||||
bail!("Tried to read back from a non read back buffer");
|
||||
};
|
||||
let size = output.size();
|
||||
let ShaderInput::OutputBuffer(output_buffer, ty) = layout.output_buffer.as_ref() else {
|
||||
bail!("Tried to read back from a non output buffer");
|
||||
};
|
||||
assert_eq!(size, output_buffer.size());
|
||||
assert_eq!(ty, &layout.output_buffer.ty());
|
||||
encoder.copy_buffer_to_buffer(output_buffer, 0, output, 0, size);
|
||||
}
|
||||
|
||||
// Submits command encoder for processing
|
||||
Ok(CommandBuffer(encoder.finish()))
|
||||
}
|
||||
|
||||
pub fn create_render_pass(&self, _footprint: Footprint, texture: ShaderInputFrame, canvas: Arc<SurfaceHandle<Surface>>) -> Result<()> {
|
||||
let transform = texture.transform;
|
||||
let texture = texture.shader_input.texture().expect("Expected texture input");
|
||||
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
format: Some(wgpu::TextureFormat::Rgba16Float),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let surface = &canvas.as_ref().surface.inner;
|
||||
let surface_caps = surface.get_capabilities(&self.context.adapter);
|
||||
if surface_caps.formats.is_empty() {
|
||||
log::warn!("No surface formats available");
|
||||
return Ok(());
|
||||
}
|
||||
// TODO:
|
||||
let resolution = transform.decompose_scale().as_uvec2();
|
||||
let surface_format = wgpu::TextureFormat::Bgra8Unorm;
|
||||
let config = SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
format: surface_format,
|
||||
width: resolution.x,
|
||||
height: resolution.y,
|
||||
present_mode: surface_caps.present_modes[0],
|
||||
alpha_mode: surface_caps.alpha_modes[0],
|
||||
view_formats: vec![],
|
||||
desired_maximum_frame_latency: 2,
|
||||
};
|
||||
surface.configure(&self.context.device, &config);
|
||||
let result = surface.get_current_texture();
|
||||
|
||||
let output = match result {
|
||||
Err(SurfaceError::Timeout) => {
|
||||
log::warn!("Timeout when getting current texture");
|
||||
return Ok(());
|
||||
}
|
||||
Err(SurfaceError::Lost) => {
|
||||
log::warn!("Surface lost");
|
||||
|
||||
// surface.configure(&self.context.device, &new_config);
|
||||
return Ok(());
|
||||
}
|
||||
Err(SurfaceError::OutOfMemory) => {
|
||||
log::warn!("Out of memory");
|
||||
return Ok(());
|
||||
}
|
||||
Err(SurfaceError::Outdated) => {
|
||||
log::warn!("Surface outdated");
|
||||
// surface.configure(&self.context.device, &new_config);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(surface) => surface,
|
||||
};
|
||||
let view = output.texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
format: Some(wgpu::TextureFormat::Bgra8Unorm),
|
||||
..Default::default()
|
||||
});
|
||||
let output_texture_bind_group = self.context.device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &self.render_configuration.texture_bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(&texture_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(&self.render_configuration.sampler),
|
||||
},
|
||||
],
|
||||
label: Some("output_texture_bind_group"),
|
||||
});
|
||||
|
||||
let mut encoder = self.context.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Render Encoder") });
|
||||
|
||||
{
|
||||
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Render Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: &view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color::RED),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
timestamp_writes: None,
|
||||
occlusion_query_set: None,
|
||||
});
|
||||
|
||||
render_pass.set_pipeline(&self.render_configuration.render_pipeline);
|
||||
render_pass.set_bind_group(0, Some(&output_texture_bind_group), &[]);
|
||||
render_pass.set_vertex_buffer(0, self.render_configuration.vertex_buffer.slice(..));
|
||||
render_pass.set_index_buffer(self.render_configuration.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
|
||||
render_pass.draw_indexed(0..self.render_configuration.num_indices, 0, 0..1);
|
||||
render_pass.insert_debug_marker("render node network");
|
||||
}
|
||||
|
||||
let encoder = encoder.finish();
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_push!("render");
|
||||
self.context.queue.submit(Some(encoder));
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_pop!();
|
||||
log::trace!("Submitted render pass");
|
||||
output.present();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn execute_compute_pipeline(&self, encoder: CommandBuffer) -> Result<()> {
|
||||
self.context.queue.submit(Some(encoder.0));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_output_buffer(&self, buffer: Arc<WgpuShaderInput>) -> Pin<Box<dyn Future<Output = Result<Vec<u8>>> + Send>> {
|
||||
Box::pin(async move {
|
||||
let ShaderInput::ReadBackBuffer(buffer, _) = buffer.as_ref() else {
|
||||
bail!("Tried to read a non readback buffer")
|
||||
};
|
||||
|
||||
let buffer_slice = buffer.slice(..);
|
||||
|
||||
// Sets the buffer up for mapping, sending over the result of the mapping back to us when it is finished.
|
||||
let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel();
|
||||
buffer_slice.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap());
|
||||
|
||||
// Wait for the mapping to finish.
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_push!("compute");
|
||||
let result = receiver.receive().await;
|
||||
#[cfg(feature = "profiling")]
|
||||
nvtx::range_pop!();
|
||||
|
||||
if result.is_none_or(|x| x.is_err()) {
|
||||
bail!("failed to run compute on gpu!")
|
||||
}
|
||||
// Gets contents of buffer
|
||||
let data = buffer_slice.get_mapped_range();
|
||||
// Since contents are got in bytes, this converts these bytes back to u32
|
||||
let result = bytemuck::cast_slice(&data).to_vec();
|
||||
|
||||
// With the current interface, we have to make sure all mapped views are
|
||||
// dropped before we unmap the buffer.
|
||||
drop(data);
|
||||
buffer.unmap(); // Unmaps buffer from memory
|
||||
|
||||
// Returns data from buffer
|
||||
Ok(result)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_texture_view(&self, texture: WgpuShaderInput) -> Result<WgpuShaderInput> {
|
||||
// Ok(ShaderInput::TextureView(texture.create_view(&wgpu::TextureViewDescriptor::default()), ) )
|
||||
let ShaderInput::TextureBuffer(texture, ty) = &texture else {
|
||||
bail!("Tried to create a texture view from a non texture");
|
||||
};
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
Ok(ShaderInput::TextureView(view, ty.clone()))
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub fn create_surface(&self, canvas: graphene_core::WasmSurfaceHandle) -> Result<SurfaceHandle<Surface>> {
|
||||
let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Canvas(canvas.surface))?;
|
||||
|
@ -544,117 +131,6 @@ impl WgpuExecutor {
|
|||
impl WgpuExecutor {
|
||||
pub async fn new() -> Option<Self> {
|
||||
let context = Context::new().await?;
|
||||
println!("wgpu executor created");
|
||||
|
||||
let texture_bind_group_layout = context.device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
label: Some("texture_bind_group_layout"),
|
||||
});
|
||||
|
||||
let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Nearest,
|
||||
min_filter: wgpu::FilterMode::Nearest,
|
||||
mipmap_filter: wgpu::FilterMode::Nearest,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let shader = context.device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("Shader"),
|
||||
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
|
||||
});
|
||||
|
||||
let render_pipeline_layout = context.device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("Render Pipeline Layout"),
|
||||
bind_group_layouts: &[&texture_bind_group_layout],
|
||||
push_constant_ranges: &[],
|
||||
});
|
||||
|
||||
let render_pipeline = context.device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Render Pipeline"),
|
||||
layout: Some(&render_pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: Some("vs_main"),
|
||||
buffers: &[Vertex::desc()],
|
||||
compilation_options: Default::default(),
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: Some("fs_main"),
|
||||
targets: &[Some(wgpu::ColorTargetState {
|
||||
format: wgpu::TextureFormat::Bgra8Unorm,
|
||||
blend: Some(wgpu::BlendState {
|
||||
color: wgpu::BlendComponent::REPLACE,
|
||||
alpha: wgpu::BlendComponent::REPLACE,
|
||||
}),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
compilation_options: Default::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
topology: wgpu::PrimitiveTopology::TriangleList,
|
||||
strip_index_format: None,
|
||||
front_face: wgpu::FrontFace::Ccw,
|
||||
cull_mode: None,
|
||||
// Setting this to anything other than Fill requires Features::POLYGON_MODE_LINE
|
||||
// or Features::POLYGON_MODE_POINT
|
||||
polygon_mode: wgpu::PolygonMode::Fill,
|
||||
// Requires Features::DEPTH_CLIP_CONTROL
|
||||
unclipped_depth: false,
|
||||
// Requires Features::CONSERVATIVE_RASTERIZATION
|
||||
conservative: false,
|
||||
},
|
||||
depth_stencil: None,
|
||||
multisample: wgpu::MultisampleState {
|
||||
count: 1,
|
||||
mask: !0,
|
||||
alpha_to_coverage_enabled: false,
|
||||
},
|
||||
// If the pipeline will be used with a multiview render pass, this
|
||||
// indicates how many array layers the attachments will have.
|
||||
multiview: None,
|
||||
cache: None,
|
||||
});
|
||||
|
||||
let vertex_buffer = context.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Vertex Buffer"),
|
||||
contents: bytemuck::cast_slice(VERTICES),
|
||||
usage: wgpu::BufferUsages::VERTEX,
|
||||
});
|
||||
let index_buffer = context.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Index Buffer"),
|
||||
contents: bytemuck::cast_slice(INDICES),
|
||||
usage: wgpu::BufferUsages::INDEX,
|
||||
});
|
||||
let num_indices = INDICES.len() as u32;
|
||||
let render_configuration = RenderConfiguration {
|
||||
vertex_buffer,
|
||||
index_buffer,
|
||||
num_indices,
|
||||
render_pipeline,
|
||||
texture_bind_group_layout,
|
||||
sampler,
|
||||
};
|
||||
|
||||
let vello_renderer = Renderer::new(
|
||||
&context.device,
|
||||
|
@ -670,209 +146,11 @@ impl WgpuExecutor {
|
|||
|
||||
Some(Self {
|
||||
context,
|
||||
render_configuration,
|
||||
vello_renderer: vello_renderer.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct RenderConfiguration {
|
||||
vertex_buffer: wgpu::Buffer,
|
||||
index_buffer: wgpu::Buffer,
|
||||
num_indices: u32,
|
||||
render_pipeline: wgpu::RenderPipeline,
|
||||
texture_bind_group_layout: wgpu::BindGroupLayout,
|
||||
sampler: wgpu::Sampler,
|
||||
}
|
||||
|
||||
pub type WgpuShaderInput = ShaderInput<BufferHandle, TextureHandle, TextureView>;
|
||||
pub type AbstractShaderInput = ShaderInput<(), (), ()>;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
|
||||
/// All the possible inputs to a shader.
|
||||
pub enum ShaderInput<BufferHandle, TextureHandle, TextureView> {
|
||||
UniformBuffer(BufferHandle, Type),
|
||||
StorageBuffer(BufferHandle, Type),
|
||||
TextureBuffer(TextureHandle, Type),
|
||||
StorageTextureBuffer(TextureHandle, Type),
|
||||
TextureView(TextureView, Type),
|
||||
/// A struct representing a work group memory buffer. This cannot be accessed by the CPU.
|
||||
WorkGroupMemory(usize, Type),
|
||||
Constant(GPUConstant),
|
||||
OutputBuffer(BufferHandle, Type),
|
||||
ReadBackBuffer(BufferHandle, Type),
|
||||
}
|
||||
|
||||
unsafe impl<T: 'static, U: 'static, V: 'static> StaticType for ShaderInput<T, U, V> {
|
||||
type Static = ShaderInput<T, U, V>;
|
||||
}
|
||||
|
||||
pub enum BindingType<'a> {
|
||||
UniformBuffer(&'a BufferHandle),
|
||||
StorageBuffer(&'a BufferHandle),
|
||||
TextureView(&'a TextureView),
|
||||
}
|
||||
|
||||
/// Extract the buffer handle from a shader input.
|
||||
impl ShaderInput<BufferHandle, TextureHandle, TextureView> {
|
||||
pub fn binding(&self) -> Option<BindingType> {
|
||||
match self {
|
||||
ShaderInput::UniformBuffer(buffer, _) => Some(BindingType::UniformBuffer(buffer)),
|
||||
ShaderInput::StorageBuffer(buffer, _) => Some(BindingType::StorageBuffer(buffer)),
|
||||
ShaderInput::WorkGroupMemory(_, _) => None,
|
||||
ShaderInput::Constant(_) => None,
|
||||
ShaderInput::TextureBuffer(_, _) => None,
|
||||
ShaderInput::StorageTextureBuffer(_, _) => None,
|
||||
ShaderInput::TextureView(tex, _) => Some(BindingType::TextureView(tex)),
|
||||
ShaderInput::OutputBuffer(buffer, _) => Some(BindingType::StorageBuffer(buffer)),
|
||||
ShaderInput::ReadBackBuffer(buffer, _) => Some(BindingType::StorageBuffer(buffer)),
|
||||
}
|
||||
}
|
||||
pub fn buffer(&self) -> Option<&BufferHandle> {
|
||||
match self {
|
||||
ShaderInput::UniformBuffer(buffer, _) => Some(buffer),
|
||||
ShaderInput::StorageBuffer(buffer, _) => Some(buffer),
|
||||
ShaderInput::WorkGroupMemory(_, _) => None,
|
||||
ShaderInput::Constant(_) => None,
|
||||
ShaderInput::TextureBuffer(_, _) => None,
|
||||
ShaderInput::StorageTextureBuffer(_, _) => None,
|
||||
ShaderInput::TextureView(_tex, _) => None,
|
||||
ShaderInput::OutputBuffer(buffer, _) => Some(buffer),
|
||||
ShaderInput::ReadBackBuffer(buffer, _) => Some(buffer),
|
||||
}
|
||||
}
|
||||
pub fn texture(&self) -> Option<&TextureHandle> {
|
||||
match self {
|
||||
ShaderInput::UniformBuffer(_, _) => None,
|
||||
ShaderInput::StorageBuffer(_, _) => None,
|
||||
ShaderInput::WorkGroupMemory(_, _) => None,
|
||||
ShaderInput::Constant(_) => None,
|
||||
ShaderInput::TextureBuffer(tex, _) => Some(tex),
|
||||
ShaderInput::StorageTextureBuffer(tex, _) => Some(tex),
|
||||
ShaderInput::TextureView(_, _) => None,
|
||||
ShaderInput::OutputBuffer(_, _) => None,
|
||||
ShaderInput::ReadBackBuffer(_, _) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T, U, V> ShaderInput<T, U, V> {
|
||||
pub fn ty(&self) -> Type {
|
||||
match self {
|
||||
ShaderInput::UniformBuffer(_, ty) => ty.clone(),
|
||||
ShaderInput::StorageBuffer(_, ty) => ty.clone(),
|
||||
ShaderInput::WorkGroupMemory(_, ty) => ty.clone(),
|
||||
ShaderInput::Constant(c) => c.ty(),
|
||||
ShaderInput::TextureBuffer(_, ty) => ty.clone(),
|
||||
ShaderInput::StorageTextureBuffer(_, ty) => ty.clone(),
|
||||
ShaderInput::TextureView(_, ty) => ty.clone(),
|
||||
ShaderInput::OutputBuffer(_, ty) => ty.clone(),
|
||||
ShaderInput::ReadBackBuffer(_, ty) => ty.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_output(&self) -> bool {
|
||||
matches!(self, ShaderInput::OutputBuffer(_, _))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Shader<'a> {
|
||||
pub source: Cow<'a, [u32]>,
|
||||
pub name: &'a str,
|
||||
pub io: ShaderIO,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize, dyn_any::DynAny)]
|
||||
pub struct ShaderIO {
|
||||
pub inputs: Vec<AbstractShaderInput>,
|
||||
pub output: AbstractShaderInput,
|
||||
}
|
||||
|
||||
/// Collection of all arguments that are passed to the shader.
|
||||
#[derive(DynAny)]
|
||||
pub struct Bindgroup {
|
||||
pub buffers: Vec<Arc<WgpuShaderInput>>,
|
||||
}
|
||||
|
||||
/// A struct representing a compute pipeline.
|
||||
#[derive(DynAny, Clone)]
|
||||
pub struct PipelineLayout {
|
||||
pub shader: Arc<ShaderHandle>,
|
||||
pub entry_point: String,
|
||||
pub bind_group: Arc<Bindgroup>,
|
||||
pub output_buffer: Arc<WgpuShaderInput>,
|
||||
}
|
||||
|
||||
/// Extracts arguments from the function arguments and wraps them in a node.
|
||||
pub struct ShaderInputNode<T> {
|
||||
data: T,
|
||||
}
|
||||
|
||||
impl<'i, T: 'i> Node<'i, ()> for ShaderInputNode<T> {
|
||||
type Output = &'i T;
|
||||
|
||||
fn eval(&'i self, _: ()) -> Self::Output {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ShaderInputNode<T> {
|
||||
pub fn new(data: T) -> Self {
|
||||
Self { data }
|
||||
}
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn uniform<'a: 'n, T: ToUniformBuffer + Send + 'n>(_: impl Ctx, #[implementations(f32, DAffine2)] data: T, executor: &'a WgpuExecutor) -> WgpuShaderInput {
|
||||
executor.create_uniform_buffer(data).unwrap()
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn storage<'a: 'n, T: ToStorageBuffer + Send + 'n>(_: impl Ctx, #[implementations(Vec<u8>)] data: T, executor: &'a WgpuExecutor) -> WgpuShaderInput {
|
||||
executor
|
||||
.create_storage_buffer(
|
||||
data,
|
||||
StorageBufferOptions {
|
||||
cpu_writable: false,
|
||||
gpu_writable: true,
|
||||
cpu_readable: false,
|
||||
storage: true,
|
||||
},
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn create_output_buffer<'a: 'n>(_: impl Ctx + 'a, size: usize, executor: &'a WgpuExecutor, ty: Type) -> Arc<WgpuShaderInput> {
|
||||
Arc::new(executor.create_output_buffer(size, ty, true).unwrap())
|
||||
}
|
||||
|
||||
#[node_macro::node(skip_impl)]
|
||||
async fn create_compute_pass<'a: 'n>(_: impl Ctx + 'a, layout: PipelineLayout, executor: &'a WgpuExecutor, output: WgpuShaderInput, instances: ComputePassDimensions) -> CommandBuffer {
|
||||
executor.create_compute_pass(&layout, Some(output.into()), instances).unwrap()
|
||||
}
|
||||
|
||||
#[node_macro::node(category("Debug: GPU"))]
|
||||
async fn create_pipeline_layout(
|
||||
_: impl Ctx,
|
||||
shader: impl Node<(), Output = ShaderHandle>,
|
||||
entry_point: String,
|
||||
bind_group: impl Node<(), Output = Bindgroup>,
|
||||
output_buffer: Arc<WgpuShaderInput>,
|
||||
) -> PipelineLayout {
|
||||
PipelineLayout {
|
||||
shader: shader.eval(()).await.into(),
|
||||
entry_point,
|
||||
bind_group: bind_group.eval(()).await.into(),
|
||||
output_buffer,
|
||||
}
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn read_output_buffer<'a: 'n>(_: impl Ctx + 'a, buffer: Arc<WgpuShaderInput>, executor: &'a WgpuExecutor, _compute_pass: ()) -> Vec<u8> {
|
||||
executor.read_output_buffer(buffer).await.unwrap()
|
||||
}
|
||||
|
||||
pub type WindowHandle = Arc<SurfaceHandle<Window>>;
|
||||
|
||||
#[node_macro::node(skip_impl)]
|
||||
|
@ -881,63 +159,3 @@ fn create_gpu_surface<'a: 'n, Io: ApplicationIo<Executor = WgpuExecutor, Surface
|
|||
let executor = editor_api.application_io.as_ref()?.gpu_executor()?;
|
||||
Some(Arc::new(executor.create_surface(canvas).ok()?))
|
||||
}
|
||||
|
||||
#[derive(DynAny, Clone, Debug)]
|
||||
pub struct ShaderInputFrame {
|
||||
shader_input: Arc<WgpuShaderInput>,
|
||||
transform: DAffine2,
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn render_texture<'a: 'n>(
|
||||
_: impl Ctx + 'a,
|
||||
footprint: Footprint,
|
||||
image: impl Node<Footprint, Output = ShaderInputFrame>,
|
||||
surface: Option<WgpuSurface>,
|
||||
executor: &'a WgpuExecutor,
|
||||
) -> SurfaceFrame {
|
||||
let surface = surface.unwrap();
|
||||
let surface_id = surface.window_id;
|
||||
let image = image.eval(footprint).await;
|
||||
let transform = image.transform;
|
||||
|
||||
executor.create_render_pass(footprint, image, surface).unwrap();
|
||||
|
||||
SurfaceFrame {
|
||||
surface_id,
|
||||
transform,
|
||||
resolution: footprint.resolution,
|
||||
}
|
||||
}
|
||||
|
||||
#[node_macro::node(category(""))]
|
||||
async fn upload_texture<'a: 'n>(_: impl ExtractFootprint + Ctx, input: RasterDataTable<CPU>, executor: &'a WgpuExecutor) -> RasterDataTable<GPU> {
|
||||
let mut result_table = RasterDataTable::<GPU>::default();
|
||||
|
||||
for instance in input.instance_ref_iter() {
|
||||
let image = instance.instance;
|
||||
let new_data: Vec<SRGBA8> = image.data.iter().map(|x| (*x).into()).collect();
|
||||
let new_image = Image {
|
||||
width: image.width,
|
||||
height: image.height,
|
||||
data: new_data,
|
||||
base64_string: None,
|
||||
};
|
||||
|
||||
let shader_input = executor.create_texture_buffer(new_image, TextureBufferOptions::Texture).unwrap();
|
||||
let texture = match shader_input {
|
||||
ShaderInput::TextureBuffer(buffer, _) => buffer,
|
||||
ShaderInput::StorageTextureBuffer(buffer, _) => buffer,
|
||||
_ => unreachable!("Unsupported ShaderInput type"),
|
||||
};
|
||||
|
||||
result_table.push(Instance {
|
||||
instance: Raster::new_gpu(texture.into()),
|
||||
transform: *instance.transform,
|
||||
alpha_blending: *instance.alpha_blending,
|
||||
source_node_id: *instance.source_node_id,
|
||||
});
|
||||
}
|
||||
|
||||
result_table
|
||||
}
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
// Vertex shader
|
||||
|
||||
struct VertexInput {
|
||||
@location(0) position: vec3<f32>,
|
||||
@location(1) tex_coords: vec2<f32>,
|
||||
}
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) clip_position: vec4<f32>,
|
||||
@location(0) tex_coords: vec2<f32>,
|
||||
}
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
model: VertexInput,
|
||||
) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
out.tex_coords = model.tex_coords;
|
||||
out.clip_position = vec4<f32>(model.position, 1.0);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Fragment shader
|
||||
|
||||
@group(0) @binding(0)
|
||||
var t_diffuse: texture_2d<f32>;
|
||||
@group(0)@binding(1)
|
||||
var s_diffuse: sampler;
|
||||
|
||||
fn linearToSRGB(color: vec3<f32>) -> vec3<f32> {
|
||||
let a = 0.055;
|
||||
return select(pow(color, vec3<f32>(1.0 / 2.2)) * (1.0 + a) - a,
|
||||
color / 12.92,
|
||||
color <= vec3<f32>(0.0031308));
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
var color = textureSample(t_diffuse, s_diffuse, in.tex_coords);
|
||||
var linearColor = color.rgb;
|
||||
var srgbColor = linearToSRGB(linearColor);
|
||||
return vec4<f32>(srgbColor, color.a);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue