Merge branch 'master' into fix-range

This commit is contained in:
mTvare 2025-08-02 17:12:36 +05:30 committed by GitHub
commit fb7fa40bd2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
306 changed files with 13655 additions and 7691 deletions

View file

@ -57,6 +57,15 @@ pub struct ImageTexture {
pub texture: (),
}
impl<'a> serde::Deserialize<'a> for ImageTexture {
fn deserialize<D>(_: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
unimplemented!("attempted to serialize a texture")
}
}
impl Hash for ImageTexture {
#[cfg(feature = "wgpu")]
fn hash<H: Hasher>(&self, state: &mut H) {

View file

@ -14,7 +14,7 @@ use graphene_core::registry::FutureWrapperNode;
use graphene_core::transform::Transform;
use graphene_core::value::ClonedNode;
use graphene_core::{Ctx, Node};
use graphene_raster_nodes::adjustments::blend_colors;
use graphene_raster_nodes::blending_nodes::blend_colors;
use graphene_raster_nodes::std_nodes::{empty_image, extend_image_to_bounds};
#[derive(Clone, Copy, Debug, PartialEq)]

View file

@ -0,0 +1,35 @@
[package]
name = "graphene-core-shaders"
version = "0.1.0"
edition = "2024"
description = "no_std API definitions for Graphene"
authors = ["Graphite Authors <contact@graphite.rs>"]
license = "MIT OR Apache-2.0"
[features]
std = ["dep:dyn-any", "dep:serde", "dep:specta", "dep:log"]
[dependencies]
# Local std dependencies
dyn-any = { workspace = true, optional = true }
# Workspace dependencies
bytemuck = { workspace = true }
glam = { version = "0.29", default-features = false, features = ["nostd-libm", "scalar-math"] }
half = { workspace = true }
num-derive = { workspace = true }
num-traits = { workspace = true }
# Workspace std dependencies
serde = { workspace = true, optional = true }
specta = { workspace = true, optional = true }
log = { workspace = true, optional = true }
[dev-dependencies]
graphene-core = { workspace = true }
[lints.rust]
# the spirv target is not in the list of common cfgs so must be added manually
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(target_arch, values("spirv"))',
] }

View file

@ -1,8 +1,9 @@
use dyn_any::DynAny;
use std::hash::Hash;
use core::fmt::Display;
use core::hash::{Hash, Hasher};
#[derive(Copy, Clone, Debug, PartialEq, DynAny, specta::Type, serde::Serialize, serde::Deserialize)]
#[serde(default)]
#[derive(Debug, Clone, Copy, PartialEq)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "std", serde(default))]
pub struct AlphaBlending {
pub blend_mode: BlendMode,
pub opacity: f32,
@ -15,14 +16,14 @@ impl Default for AlphaBlending {
}
}
impl Hash for AlphaBlending {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
fn hash<H: Hasher>(&self, state: &mut H) {
self.opacity.to_bits().hash(state);
self.fill.to_bits().hash(state);
self.blend_mode.hash(state);
self.clip.hash(state);
}
}
impl std::fmt::Display for AlphaBlending {
impl Display for AlphaBlending {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let round = |x: f32| (x * 1e3).round() / 1e3;
write!(
@ -56,11 +57,15 @@ impl AlphaBlending {
clip: if t < 0.5 { self.clip } else { other.clip },
}
}
pub fn opacity(&self, mask: bool) -> f32 {
self.opacity * if mask { 1. } else { self.fill }
}
}
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, DynAny, Hash, specta::Type)]
#[repr(i32)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub enum BlendMode {
// Basic group
#[default]
@ -185,18 +190,19 @@ impl BlendMode {
}
/// Renders the blend mode CSS style declaration.
#[cfg(feature = "std")]
pub fn render(&self) -> String {
format!(
r#" mix-blend-mode: {};"#,
self.to_svg_style_name().unwrap_or_else(|| {
warn!("Unsupported blend mode {self:?}");
log::warn!("Unsupported blend mode {self:?}");
"normal"
})
)
}
}
impl std::fmt::Display for BlendMode {
impl Display for BlendMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
// Normal group

View file

@ -0,0 +1,26 @@
pub trait ChoiceTypeStatic: Sized + Copy + crate::AsU32 + Send + Sync {
const WIDGET_HINT: ChoiceWidgetHint;
const DESCRIPTION: Option<&'static str>;
fn list() -> &'static [&'static [(Self, VariantMetadata)]];
}
pub enum ChoiceWidgetHint {
Dropdown,
RadioButtons,
}
/// Translation struct between macro and definition.
#[derive(Clone, Debug)]
pub struct VariantMetadata {
/// Name as declared in source code.
pub name: &'static str,
/// Name to be displayed in UI.
pub label: &'static str,
/// User-facing documentation text.
pub docstring: Option<&'static str>,
/// Name of icon to display in radio buttons and such.
pub icon: Option<&'static str>,
}

View file

@ -1,16 +1,16 @@
use super::color_traits::{Alpha, AlphaMut, AssociatedAlpha, Luminance, LuminanceMut, Pixel, RGB, RGBMut, Rec709Primaries, SRGB};
use super::discrete_srgb::{float_to_srgb_u8, srgb_u8_to_float};
use bytemuck::{Pod, Zeroable};
use dyn_any::DynAny;
use core::hash::Hash;
use half::f16;
#[cfg(target_arch = "spirv")]
use spirv_std::num_traits::Euclid;
#[cfg(target_arch = "spirv")]
use spirv_std::num_traits::float::Float;
use std::hash::Hash;
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, Pod, Zeroable, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, serde::Serialize, serde::Deserialize))]
pub struct RGBA16F {
red: f16,
green: f16,
@ -82,7 +82,8 @@ impl Alpha for RGBA16F {
impl Pixel for RGBA16F {}
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, Pod, Zeroable, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub struct SRGBA8 {
red: u8,
green: u8,
@ -162,7 +163,8 @@ impl Alpha for SRGBA8 {
impl Pixel for SRGBA8 {}
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, Pod, Zeroable, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub struct Luma(pub f32);
impl Luminance for Luma {
@ -202,7 +204,8 @@ impl Pixel for Luma {}
/// The other components (RGB) are stored as `f32` that range from `0.0` up to `f32::MAX`,
/// the values encode the brightness of each channel proportional to the light intensity in cd/m² (nits) in HDR, and `0.0` (black) to `1.0` (white) in SDR color.
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, PartialEq, DynAny, Pod, Zeroable, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Pod, Zeroable)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub struct Color {
red: f32,
green: f32,
@ -835,6 +838,20 @@ impl Color {
[(gamma.red * 255.) as u8, (gamma.green * 255.) as u8, (gamma.blue * 255.) as u8, (gamma.alpha * 255.) as u8]
}
/// Return the all RGB components as a u8 slice, first component is red, followed by green, followed by blue. Use this if the [`Color`] is in linear space.
///
/// # Examples
/// ```
/// use graphene_core::color::Color;
/// let color = Color::from_rgbaf32(0.114, 0.103, 0.98, 0.97).unwrap();
/// // TODO: Add test
/// ```
#[inline(always)]
pub fn to_rgb8_srgb(&self) -> [u8; 3] {
let gamma = self.to_gamma_srgb();
[(gamma.red * 255.) as u8, (gamma.green * 255.) as u8, (gamma.blue * 255.) as u8]
}
// https://www.niwa.nu/2013/05/math-behind-colorspace-conversions-rgb-hsl/
/// Convert a [Color] to a hue, saturation, lightness and alpha (all between 0 and 1)
///

View file

@ -1,11 +1,10 @@
use bytemuck::{Pod, Zeroable};
use glam::DVec2;
use std::fmt::Debug;
#[cfg(target_arch = "spirv")]
use spirv_std::num_traits::float::Float;
pub use crate::blending::*;
use bytemuck::{Pod, Zeroable};
use core::fmt::Debug;
use glam::DVec2;
use num_derive::*;
#[cfg(target_arch = "spirv")]
use num_traits::float::Float;
pub trait Linear {
fn from_f32(x: f32) -> Self;
@ -64,7 +63,6 @@ impl<T: Linear + Debug + Copy> Channel for T {
impl<T: Linear + Debug + Copy> LinearChannel for T {}
use num_derive::*;
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Num, NumCast, NumOps, One, Zero, ToPrimitive, FromPrimitive)]
pub struct SRGBGammaFloat(f32);
@ -97,14 +95,6 @@ impl<T: Rec709Primaries> RGBPrimaries for T {
pub trait SRGB: Rec709Primaries {}
pub trait Serde: serde::Serialize + for<'a> serde::Deserialize<'a> {}
#[cfg(not(feature = "serde"))]
pub trait Serde {}
impl<T: serde::Serialize + for<'a> serde::Deserialize<'a>> Serde for T {}
#[cfg(not(feature = "serde"))]
impl<T> Serde for T {}
// TODO: Come up with a better name for this trait
pub trait Pixel: Clone + Pod + Zeroable + Default {
#[cfg(not(target_arch = "spirv"))]

View file

@ -0,0 +1,9 @@
pub trait Ctx: Clone + Send {}
impl<T: Ctx> Ctx for Option<T> {}
impl<T: Ctx + Sync> Ctx for &T {}
impl Ctx for () {}
pub trait ArcCtx: Send + Sync {}
#[cfg(feature = "std")]
impl<T: ArcCtx> Ctx for std::sync::Arc<T> {}

View file

@ -0,0 +1,17 @@
pub mod blending;
pub mod choice_type;
pub mod color;
pub mod context;
pub mod registry;
pub use context::Ctx;
pub use glam;
pub trait AsU32 {
fn as_u32(&self) -> u32;
}
impl AsU32 for u32 {
fn as_u32(&self) -> u32 {
*self
}
}

View file

@ -0,0 +1,24 @@
pub mod types {
/// 0% - 100%
pub type Percentage = f64;
/// -100% - 100%
pub type SignedPercentage = f64;
/// -180° - 180°
pub type Angle = f64;
/// Ends in the unit of x
pub type Multiplier = f64;
/// Non-negative integer with px unit
pub type PixelLength = f64;
/// Non-negative
pub type Length = f64;
/// 0 to 1
pub type Fraction = f64;
/// Unsigned integer
pub type IntegerCount = u32;
/// Unsigned integer to be used for random seeds
pub type SeedValue = u32;
/// DVec2 with px unit
pub type PixelSize = glam::DVec2;
/// String with one or more than one line
pub type TextArea = String;
}

View file

@ -14,10 +14,12 @@ wgpu = ["dep:wgpu"]
dealloc_nodes = []
[dependencies]
# Local dependencies
graphene-core-shaders = { workspace = true, features = ["std"] }
# Workspace dependencies
bytemuck = { workspace = true }
node-macro = { workspace = true }
num-derive = { workspace = true }
num-traits = { workspace = true }
rand = { workspace = true }
glam = { workspace = true }
@ -30,7 +32,6 @@ rand_chacha = { workspace = true }
bezier-rs = { workspace = true }
specta = { workspace = true }
image = { workspace = true }
half = { workspace = true }
tinyvec = { workspace = true }
parley = { workspace = true }
skrifa = { workspace = true }
@ -46,9 +47,3 @@ wgpu = { workspace = true, optional = true }
# Workspace dependencies
tokio = { workspace = true }
serde_json = { workspace = true }
[lints.rust]
# the spirv target is not in the list of common cfgs so must be added manually
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(target_arch, values("spirv"))',
] }

View file

@ -1,11 +1,10 @@
use crate::transform::Footprint;
pub use graphene_core_shaders::context::{ArcCtx, Ctx};
use std::any::Any;
use std::borrow::Borrow;
use std::panic::Location;
use std::sync::Arc;
pub trait Ctx: Clone + Send {}
pub trait ExtractFootprint {
#[track_caller]
fn try_footprint(&self) -> Option<&Footprint>;
@ -27,7 +26,7 @@ pub trait ExtractAnimationTime {
}
pub trait ExtractIndex {
fn try_index(&self) -> Option<usize>;
fn try_index(&self) -> Option<Vec<usize>>;
}
// Consider returning a slice or something like that
@ -51,9 +50,6 @@ pub enum VarArgsResult {
IndexOutOfBounds,
NoVarArgs,
}
impl<T: Ctx> Ctx for Option<T> {}
impl<T: Ctx + Sync> Ctx for &T {}
impl Ctx for () {}
impl Ctx for Footprint {}
impl ExtractFootprint for () {
fn try_footprint(&self) -> Option<&Footprint> {
@ -91,7 +87,7 @@ impl<T: ExtractAnimationTime + Sync> ExtractAnimationTime for Option<T> {
}
}
impl<T: ExtractIndex> ExtractIndex for Option<T> {
fn try_index(&self) -> Option<usize> {
fn try_index(&self) -> Option<Vec<usize>> {
self.as_ref().and_then(|x| x.try_index())
}
}
@ -122,7 +118,7 @@ impl<T: ExtractAnimationTime + Sync> ExtractAnimationTime for Arc<T> {
}
}
impl<T: ExtractIndex> ExtractIndex for Arc<T> {
fn try_index(&self) -> Option<usize> {
fn try_index(&self) -> Option<Vec<usize>> {
(**self).try_index()
}
}
@ -157,7 +153,7 @@ impl<T: CloneVarArgs + Sync> CloneVarArgs for Arc<T> {
}
impl Ctx for ContextImpl<'_> {}
impl Ctx for Arc<OwnedContextImpl> {}
impl ArcCtx for OwnedContextImpl {}
impl ExtractFootprint for ContextImpl<'_> {
fn try_footprint(&self) -> Option<&Footprint> {
@ -170,8 +166,8 @@ impl ExtractTime for ContextImpl<'_> {
}
}
impl ExtractIndex for ContextImpl<'_> {
fn try_index(&self) -> Option<usize> {
self.index
fn try_index(&self) -> Option<Vec<usize>> {
self.index.clone()
}
}
impl ExtractVarArgs for ContextImpl<'_> {
@ -202,8 +198,8 @@ impl ExtractAnimationTime for OwnedContextImpl {
}
}
impl ExtractIndex for OwnedContextImpl {
fn try_index(&self) -> Option<usize> {
self.index
fn try_index(&self) -> Option<Vec<usize>> {
self.index.clone()
}
}
impl ExtractVarArgs for OwnedContextImpl {
@ -244,7 +240,7 @@ pub struct OwnedContextImpl {
varargs: Option<Arc<[DynBox]>>,
parent: Option<Arc<dyn ExtractVarArgs + Sync + Send>>,
// This could be converted into a single enum to save extra bytes
index: Option<usize>,
index: Option<Vec<usize>>,
real_time: Option<f64>,
animation_time: Option<f64>,
}
@ -334,7 +330,11 @@ impl OwnedContextImpl {
self
}
pub fn with_index(mut self, index: usize) -> Self {
self.index = Some(index);
if let Some(current_index) = &mut self.index {
current_index.push(index);
} else {
self.index = Some(vec![index]);
}
self
}
pub fn into_context(self) -> Option<Arc<Self>> {
@ -346,12 +346,12 @@ impl OwnedContextImpl {
}
}
#[derive(Default, Clone, Copy, dyn_any::DynAny)]
#[derive(Default, Clone, dyn_any::DynAny)]
pub struct ContextImpl<'a> {
pub(crate) footprint: Option<&'a Footprint>,
varargs: Option<&'a [DynRef<'a>]>,
// This could be converted into a single enum to save extra bytes
index: Option<usize>,
index: Option<Vec<usize>>,
time: Option<f64>,
}
@ -363,6 +363,7 @@ impl<'a> ContextImpl<'a> {
ContextImpl {
footprint: Some(new_footprint),
varargs: varargs.map(|x| x.borrow()),
index: self.index.clone(),
..*self
}
}

View file

@ -2,9 +2,9 @@ use crate::Ctx;
use dyn_any::DynAny;
use glam::{DVec2, IVec2, UVec2};
/// Obtains the X or Y component of a coordinate point.
/// Obtains the X or Y component of a vec2.
///
/// The inverse of this node is "Coordinate Value", which can have either or both its X and Y exposed as graph inputs.
/// The inverse of this node is "Vec2 Value", which can have either or both its X and Y parameters exposed as graph inputs.
#[node_macro::node(name("Extract XY"), category("Math: Vector"))]
fn extract_xy<T: Into<DVec2>>(_: impl Ctx, #[implementations(DVec2, IVec2, UVec2)] vector: T, axis: XY) -> f64 {
match axis {
@ -13,7 +13,7 @@ fn extract_xy<T: Into<DVec2>>(_: impl Ctx, #[implementations(DVec2, IVec2, UVec2
}
}
/// The X or Y component of a coordinate.
/// The X or Y component of a vec2.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[widget(Dropdown)]
pub enum XY {

View file

@ -100,6 +100,11 @@ impl From<RasterDataTable<GPU>> for GraphicGroupTable {
Self::new(GraphicElement::RasterDataGPU(raster_data_table))
}
}
impl From<DAffine2> for GraphicGroupTable {
fn from(_: DAffine2) -> Self {
GraphicGroupTable::default()
}
}
/// The possible forms of graphical content held in a Vec by the `elements` field of [`GraphicElement`].
#[derive(Clone, Debug, Hash, PartialEq, DynAny, serde::Serialize, serde::Deserialize)]
@ -118,6 +123,12 @@ impl Default for GraphicElement {
}
}
impl From<DAffine2> for GraphicElement {
fn from(_: DAffine2) -> Self {
GraphicElement::default()
}
}
impl GraphicElement {
pub fn as_group(&self) -> Option<&GraphicGroupTable> {
match self {
@ -201,41 +212,6 @@ impl BoundingBox for GraphicGroupTable {
}
}
impl<'de> serde::Deserialize<'de> for Raster<CPU> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
Ok(Raster::new_cpu(Image::deserialize(deserializer)?))
}
}
impl serde::Serialize for Raster<CPU> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.data().serialize(serializer)
}
}
impl<'de> serde::Deserialize<'de> for Raster<GPU> {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
unimplemented!()
}
}
impl serde::Serialize for Raster<GPU> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
unimplemented!()
}
}
/// Some [`ArtboardData`] with some optional clipping bounds that can be exported.
#[derive(Clone, Debug, Hash, PartialEq, DynAny, serde::Serialize, serde::Deserialize)]
pub struct Artboard {
@ -351,6 +327,7 @@ async fn to_element<Data: Into<GraphicElement> + 'n>(
VectorDataTable,
RasterDataTable<CPU>,
RasterDataTable<GPU>,
DAffine2,
)]
data: Data,
) -> GraphicElement {
@ -463,14 +440,18 @@ async fn to_artboard<Data: Into<GraphicGroupTable> + 'n>(
Context -> VectorDataTable,
Context -> RasterDataTable<CPU>,
Context -> RasterDataTable<GPU>,
Context -> DAffine2,
)]
contents: impl Node<Context<'static>, Output = Data>,
label: String,
location: IVec2,
dimensions: IVec2,
location: DVec2,
dimensions: DVec2,
background: Color,
clip: bool,
) -> Artboard {
let location = location.as_ivec2();
let dimensions = dimensions.as_ivec2().max(IVec2::ONE);
let footprint = ctx.try_footprint().copied();
let mut new_ctx = OwnedContextImpl::from(ctx);
if let Some(mut footprint) = footprint {

View file

@ -1,4 +1,5 @@
use crate::AlphaBlending;
use crate::transform::ApplyTransform;
use crate::uuid::NodeId;
use dyn_any::StaticType;
use glam::DAffine2;
@ -26,6 +27,24 @@ impl<T> Instances<T> {
}
}
pub fn new_instance(instance: Instance<T>) -> Self {
Self {
instance: vec![instance.instance],
transform: vec![instance.transform],
alpha_blending: vec![instance.alpha_blending],
source_node_id: vec![instance.source_node_id],
}
}
pub fn with_capacity(capacity: usize) -> Self {
Self {
instance: Vec::with_capacity(capacity),
transform: Vec::with_capacity(capacity),
alpha_blending: Vec::with_capacity(capacity),
source_node_id: Vec::with_capacity(capacity),
}
}
pub fn push(&mut self, instance: Instance<T>) {
self.instance.push(instance.instance);
self.transform.push(instance.transform);
@ -136,6 +155,20 @@ impl<T: Hash> Hash for Instances<T> {
}
}
impl<T> ApplyTransform for Instances<T> {
fn apply_transform(&mut self, modification: &DAffine2) {
for transform in &mut self.transform {
*transform *= *modification;
}
}
fn left_apply_transform(&mut self, modification: &DAffine2) {
for transform in &mut self.transform {
*transform = *modification * *transform;
}
}
}
impl<T: PartialEq> PartialEq for Instances<T> {
fn eq(&self, other: &Self) -> bool {
self.instance.len() == other.instance.len() && { self.instance.iter().zip(other.instance.iter()).all(|(a, b)| a == b) }
@ -146,6 +179,18 @@ unsafe impl<T: StaticType + 'static> StaticType for Instances<T> {
type Static = Instances<T>;
}
impl<T> FromIterator<Instance<T>> for Instances<T> {
fn from_iter<I: IntoIterator<Item = Instance<T>>>(iter: I) -> Self {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
let mut instances = Self::with_capacity(lower);
for instance in iter {
instances.push(instance);
}
instances
}
}
fn one_daffine2_default() -> Vec<DAffine2> {
vec![DAffine2::IDENTITY]
}

View file

@ -2,10 +2,8 @@
extern crate log;
pub mod animation;
pub mod blending;
pub mod blending_nodes;
pub mod bounds;
pub mod color;
pub mod consts;
pub mod context;
pub mod debug;
@ -33,13 +31,17 @@ pub mod vector;
pub use crate as graphene_core;
pub use blending::*;
pub use color::Color;
pub use context::*;
pub use ctor;
pub use dyn_any::{StaticTypeSized, WasmNotSend, WasmNotSync};
pub use graphene_core_shaders::AsU32;
pub use graphene_core_shaders::blending;
pub use graphene_core_shaders::choice_type;
pub use graphene_core_shaders::color;
pub use graphic_element::{Artboard, ArtboardGroupTable, GraphicElement, GraphicGroupTable};
pub use memo::MemoHash;
pub use num_traits;
pub use raster::Color;
use std::any::TypeId;
use std::future::Future;
use std::pin::Pin;
@ -165,12 +167,3 @@ pub trait NodeInputDecleration {
fn identifier() -> ProtoNodeIdentifier;
type Result;
}
pub trait AsU32 {
fn as_u32(&self) -> u32;
}
impl AsU32 for u32 {
fn as_u32(&self) -> u32 {
*self
}
}

View file

@ -10,10 +10,18 @@ use crate::{Context, Ctx};
use glam::{DAffine2, DVec2};
#[node_macro::node(category("Text"))]
fn to_string<T: std::fmt::Debug>(_: impl Ctx, #[implementations(String, bool, f64, u32, u64, DVec2, VectorDataTable, DAffine2)] value: T) -> String {
fn to_string<T: std::fmt::Debug>(_: impl Ctx, #[implementations(String, bool, f64, u32, u64, DVec2, DAffine2, VectorDataTable)] value: T) -> String {
format!("{:?}", value)
}
#[node_macro::node(category("Text"))]
fn serialize<T: serde::Serialize>(
_: impl Ctx,
#[implementations(String, bool, f64, u32, u64, DVec2, DAffine2, Color, Option<Color>, GraphicGroupTable, VectorDataTable, RasterDataTable<CPU>)] value: T,
) -> String {
serde_json::to_string(&value).unwrap_or_else(|_| "Serialization Error".to_string())
}
#[node_macro::node(category("Text"))]
fn string_concatenate(_: impl Ctx, #[implementations(String)] first: String, second: TextArea) -> String {
first.clone() + &second
@ -33,8 +41,8 @@ fn string_slice(_: impl Ctx, #[implementations(String)] string: String, start: f
}
#[node_macro::node(category("Text"))]
fn string_length(_: impl Ctx, #[implementations(String)] string: String) -> usize {
string.len()
fn string_length(_: impl Ctx, #[implementations(String)] string: String) -> u32 {
string.chars().count() as u32
}
#[node_macro::node(category("Math: Logic"))]

View file

@ -1,12 +1,3 @@
use crate::GraphicGroupTable;
pub use crate::color::*;
use crate::raster_types::{CPU, RasterDataTable};
use crate::vector::VectorDataTable;
use std::fmt::Debug;
#[cfg(target_arch = "spirv")]
use spirv_std::num_traits::float::Float;
/// as to not yet rename all references
pub mod color {
pub use super::*;
@ -15,6 +6,11 @@ pub mod color {
pub mod image;
pub use self::image::Image;
use crate::GraphicGroupTable;
pub use crate::color::*;
use crate::raster_types::{CPU, RasterDataTable};
use crate::vector::VectorDataTable;
use std::fmt::Debug;
pub trait Bitmap {
type Pixel: Pixel;

View file

@ -50,6 +50,13 @@ pub struct Image<P: Pixel> {
// TODO: Currently it is always anchored at the top left corner at (0, 0). The bottom right corner of the new origin field would correspond to (1, 1).
}
#[derive(Debug, Clone, dyn_any::DynAny, Default, PartialEq, serde::Serialize, serde::Deserialize, specta::Type)]
pub struct TransformImage(pub DAffine2);
impl Hash for TransformImage {
fn hash<H: std::hash::Hasher>(&self, _: &mut H) {}
}
impl<P: Pixel + Debug> Debug for Image<P> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let length = self.data.len();

View file

@ -6,136 +6,202 @@ use crate::raster::Image;
use core::ops::Deref;
use dyn_any::DynAny;
use glam::{DAffine2, DVec2};
#[cfg(feature = "wgpu")]
use std::sync::Arc;
use std::fmt::Debug;
use std::ops::DerefMut;
#[derive(Clone, Debug, Hash, PartialEq, Eq, Copy)]
pub struct CPU;
#[derive(Clone, Debug, Hash, PartialEq, Eq, Copy)]
pub struct GPU;
mod __private {
pub trait Sealed {}
}
trait Storage: 'static {}
impl Storage for CPU {}
impl Storage for GPU {}
pub trait Storage: __private::Sealed + Clone + Debug + 'static {
fn is_empty(&self) -> bool;
}
#[derive(Clone, Debug, Hash, PartialEq)]
#[allow(private_bounds)]
pub struct Raster<T: Storage> {
data: RasterStorage,
#[derive(Clone, Debug, PartialEq, Hash, Default)]
pub struct Raster<T>
where
Raster<T>: Storage,
{
storage: T,
}
unsafe impl<T: Storage> dyn_any::StaticType for Raster<T> {
unsafe impl<T> dyn_any::StaticType for Raster<T>
where
Raster<T>: Storage,
{
type Static = Raster<T>;
}
#[derive(Clone, Debug, Hash, PartialEq, DynAny)]
pub enum RasterStorage {
Cpu(Image<Color>),
#[cfg(feature = "wgpu")]
Gpu(Arc<wgpu::Texture>),
#[cfg(not(feature = "wgpu"))]
Gpu(()),
impl<T> Raster<T>
where
Raster<T>: Storage,
{
pub fn new(t: T) -> Self {
Self { storage: t }
}
}
impl RasterStorage {}
impl Raster<CPU> {
pub fn new_cpu(image: Image<Color>) -> Self {
Self {
data: RasterStorage::Cpu(image),
storage: CPU,
}
}
pub fn data(&self) -> &Image<Color> {
let RasterStorage::Cpu(cpu) = &self.data else { unreachable!() };
cpu
}
pub fn data_mut(&mut self) -> &mut Image<Color> {
let RasterStorage::Cpu(cpu) = &mut self.data else { unreachable!() };
cpu
}
pub fn into_data(self) -> Image<Color> {
let RasterStorage::Cpu(cpu) = self.data else { unreachable!() };
cpu
}
pub fn is_empty(&self) -> bool {
let data = self.data();
data.height == 0 || data.width == 0
}
}
impl Default for Raster<CPU> {
fn default() -> Self {
Self {
data: RasterStorage::Cpu(Image::default()),
storage: CPU,
}
}
}
impl Deref for Raster<CPU> {
type Target = Image<Color>;
impl<T> Deref for Raster<T>
where
Raster<T>: Storage,
{
type Target = T;
fn deref(&self) -> &Self::Target {
self.data()
}
}
#[cfg(feature = "wgpu")]
impl Raster<GPU> {
pub fn new_gpu(image: Arc<wgpu::Texture>) -> Self {
Self {
data: RasterStorage::Gpu(image),
storage: GPU,
}
}
pub fn data(&self) -> &wgpu::Texture {
let RasterStorage::Gpu(gpu) = &self.data else { unreachable!() };
gpu
}
pub fn data_mut(&mut self) -> &mut Arc<wgpu::Texture> {
let RasterStorage::Gpu(gpu) = &mut self.data else { unreachable!() };
gpu
}
pub fn data_owned(&self) -> Arc<wgpu::Texture> {
let RasterStorage::Gpu(gpu) = &self.data else { unreachable!() };
gpu.clone()
&self.storage
}
}
impl Raster<GPU> {
#[cfg(feature = "wgpu")]
pub fn is_empty(&self) -> bool {
let data = self.data();
data.width() == 0 || data.height() == 0
}
#[cfg(not(feature = "wgpu"))]
pub fn is_empty(&self) -> bool {
true
}
}
#[cfg(feature = "wgpu")]
impl Deref for Raster<GPU> {
type Target = wgpu::Texture;
fn deref(&self) -> &Self::Target {
self.data()
impl<T> DerefMut for Raster<T>
where
Raster<T>: Storage,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.storage
}
}
pub type RasterDataTable<Storage> = Instances<Raster<Storage>>;
// TODO: Make this not dupliated
impl BoundingBox for RasterDataTable<CPU> {
fn bounding_box(&self, transform: DAffine2, _include_stroke: bool) -> Option<[DVec2; 2]> {
self.instance_ref_iter()
.filter(|instance| !instance.instance.is_empty()) // Eliminate empty images
.flat_map(|instance| {
let transform = transform * *instance.transform;
(transform.matrix2.determinant() != 0.).then(|| (transform * Quad::from_box([DVec2::ZERO, DVec2::ONE])).bounding_box())
})
.reduce(Quad::combine_bounds)
pub use cpu::CPU;
mod cpu {
use super::*;
use crate::raster_types::__private::Sealed;
#[derive(Clone, Debug, Default, PartialEq, Hash, DynAny)]
pub struct CPU(Image<Color>);
impl Sealed for Raster<CPU> {}
impl Storage for Raster<CPU> {
fn is_empty(&self) -> bool {
self.0.height == 0 || self.0.width == 0
}
}
impl Raster<CPU> {
pub fn new_cpu(image: Image<Color>) -> Self {
Self::new(CPU(image))
}
pub fn data(&self) -> &Image<Color> {
self
}
pub fn data_mut(&mut self) -> &mut Image<Color> {
self
}
pub fn into_data(self) -> Image<Color> {
self.storage.0
}
}
impl Deref for CPU {
type Target = Image<Color>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for CPU {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<'de> serde::Deserialize<'de> for Raster<CPU> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
Ok(Raster::new_cpu(Image::deserialize(deserializer)?))
}
}
impl serde::Serialize for Raster<CPU> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.0.serialize(serializer)
}
}
}
impl BoundingBox for RasterDataTable<GPU> {
pub use gpu::GPU;
#[cfg(feature = "wgpu")]
mod gpu {
use super::*;
use crate::raster_types::__private::Sealed;
#[derive(Clone, Debug, PartialEq, Hash)]
pub struct GPU {
texture: wgpu::Texture,
}
impl Sealed for Raster<GPU> {}
impl Storage for Raster<GPU> {
fn is_empty(&self) -> bool {
self.texture.width() == 0 || self.texture.height() == 0
}
}
impl Raster<GPU> {
pub fn new_gpu(texture: wgpu::Texture) -> Self {
Self::new(GPU { texture })
}
pub fn data(&self) -> &wgpu::Texture {
&self.texture
}
}
}
#[cfg(not(feature = "wgpu"))]
mod gpu {
use super::*;
#[derive(Clone, Debug)]
pub struct GPU;
impl Storage for Raster<GPU> {
fn is_empty(&self) -> bool {
true
}
}
}
mod gpu_common {
use super::*;
impl<'de> serde::Deserialize<'de> for Raster<GPU> {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
unimplemented!()
}
}
impl serde::Serialize for Raster<GPU> {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
unimplemented!()
}
}
}
impl<T> BoundingBox for RasterDataTable<T>
where
Raster<T>: Storage,
{
fn bounding_box(&self, transform: DAffine2, _include_stroke: bool) -> Option<[DVec2; 2]> {
self.instance_ref_iter()
.filter(|instance| !instance.instance.is_empty()) // Eliminate empty images

View file

@ -1,38 +1,12 @@
use crate::{Node, NodeIO, NodeIOTypes, ProtoNodeIdentifier, Type, WasmNotSend};
use dyn_any::{DynAny, StaticType};
use std::borrow::Cow;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::ops::Deref;
use std::pin::Pin;
use std::sync::{LazyLock, Mutex};
pub mod types {
/// 0% - 100%
pub type Percentage = f64;
/// -100% - 100%
pub type SignedPercentage = f64;
/// -180° - 180°
pub type Angle = f64;
/// Ends in the unit of x
pub type Multiplier = f64;
/// Non-negative integer with px unit
pub type PixelLength = f64;
/// Non-negative
pub type Length = f64;
/// 0 to 1
pub type Fraction = f64;
/// Unsigned integer
pub type IntegerCount = u32;
/// Unsigned integer to be used for random seeds
pub type SeedValue = u32;
/// Non-negative integer coordinate with px unit
pub type Resolution = glam::UVec2;
/// DVec2 with px unit
pub type PixelSize = glam::DVec2;
/// String with one or more than one line
pub type TextArea = String;
}
pub use graphene_core_shaders::registry::types;
// Translation struct between macro and definition
#[derive(Clone)]
@ -63,33 +37,6 @@ pub struct FieldMetadata {
pub unit: Option<&'static str>,
}
pub trait ChoiceTypeStatic: Sized + Copy + crate::AsU32 + Send + Sync {
const WIDGET_HINT: ChoiceWidgetHint;
const DESCRIPTION: Option<&'static str>;
fn list() -> &'static [&'static [(Self, VariantMetadata)]];
}
pub enum ChoiceWidgetHint {
Dropdown,
RadioButtons,
}
/// Translation struct between macro and definition.
#[derive(Clone, Debug)]
pub struct VariantMetadata {
/// Name as declared in source code.
pub name: Cow<'static, str>,
/// Name to be displayed in UI.
pub label: Cow<'static, str>,
/// User-facing documentation text.
pub docstring: Option<Cow<'static, str>>,
/// Name of icon to display in radio buttons and such.
pub icon: Option<Cow<'static, str>>,
}
#[derive(Clone, Debug)]
pub enum RegistryWidgetOverride {
None,

View file

@ -1,5 +1,31 @@
mod font_cache;
mod to_path;
use dyn_any::DynAny;
pub use font_cache::*;
pub use to_path::*;
/// Alignment of lines of type within a text block.
#[repr(C)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)]
#[widget(Radio)]
pub enum TextAlign {
#[default]
Left,
Center,
Right,
#[label("Justify")]
JustifyLeft,
// TODO: JustifyCenter, JustifyRight, JustifyAll
}
impl From<TextAlign> for parley::Alignment {
fn from(val: TextAlign) -> Self {
match val {
TextAlign::Left => parley::Alignment::Left,
TextAlign::Center => parley::Alignment::Middle,
TextAlign::Right => parley::Alignment::Right,
TextAlign::JustifyLeft => parley::Alignment::Justified,
}
}
}

View file

@ -1,9 +1,11 @@
use crate::vector::PointId;
use super::TextAlign;
use crate::instances::Instance;
use crate::vector::{PointId, VectorData, VectorDataTable};
use bezier_rs::{ManipulatorGroup, Subpath};
use core::cell::RefCell;
use glam::{DAffine2, DVec2};
use parley::fontique::Blob;
use parley::{Alignment, AlignmentOptions, FontContext, GlyphRun, Layout, LayoutContext, LineHeight, PositionedLayoutItem, StyleProperty};
use parley::{AlignmentOptions, FontContext, GlyphRun, Layout, LayoutContext, LineHeight, PositionedLayoutItem, StyleProperty};
use skrifa::GlyphId;
use skrifa::instance::{LocationRef, NormalizedCoord, Size};
use skrifa::outline::{DrawSettings, OutlinePen};
@ -20,24 +22,20 @@ thread_local! {
struct PathBuilder {
current_subpath: Subpath<PointId>,
glyph_subpaths: Vec<Subpath<PointId>>,
other_subpaths: Vec<Subpath<PointId>>,
origin: DVec2,
glyph_subpaths: Vec<Subpath<PointId>>,
vector_table: VectorDataTable,
scale: f64,
id: PointId,
}
impl PathBuilder {
fn point(&self, x: f32, y: f32) -> DVec2 {
// Y-axis inversion converts from font coordinate system (Y-up) to graphics coordinate system (Y-down)
DVec2::new(self.origin.x + x as f64, self.origin.y - y as f64) * self.scale
}
fn set_origin(&mut self, x: f64, y: f64) {
self.origin = DVec2::new(x, y);
}
fn draw_glyph(&mut self, glyph: &OutlineGlyph<'_>, size: f32, normalized_coords: &[NormalizedCoord], style_skew: Option<DAffine2>, skew: DAffine2) {
#[allow(clippy::too_many_arguments)]
fn draw_glyph(&mut self, glyph: &OutlineGlyph<'_>, size: f32, normalized_coords: &[NormalizedCoord], glyph_offset: DVec2, style_skew: Option<DAffine2>, skew: DAffine2, per_glyph_instances: bool) {
let location_ref = LocationRef::new(normalized_coords);
let settings = DrawSettings::unhinted(Size::new(size), location_ref);
glyph.draw(settings, self).unwrap();
@ -52,8 +50,17 @@ impl PathBuilder {
glyph_subpath.apply_transform(skew);
}
if !self.glyph_subpaths.is_empty() {
self.other_subpaths.extend(core::mem::take(&mut self.glyph_subpaths));
if per_glyph_instances {
self.vector_table.push(Instance {
instance: VectorData::from_subpaths(core::mem::take(&mut self.glyph_subpaths), false),
transform: DAffine2::from_translation(glyph_offset),
..Default::default()
});
} else {
for subpath in self.glyph_subpaths.drain(..) {
// Unwrapping here is ok because `self.vector_table` is initialized with a single `VectorData`
self.vector_table.get_mut(0).unwrap().instance.append_subpath(subpath, false);
}
}
}
}
@ -97,6 +104,7 @@ pub struct TypesettingConfig {
pub max_width: Option<f64>,
pub max_height: Option<f64>,
pub tilt: f64,
pub align: TextAlign,
}
impl Default for TypesettingConfig {
@ -108,11 +116,12 @@ impl Default for TypesettingConfig {
max_width: None,
max_height: None,
tilt: 0.,
align: TextAlign::default(),
}
}
}
fn render_glyph_run(glyph_run: &GlyphRun<'_, ()>, path_builder: &mut PathBuilder, tilt: f64) {
fn render_glyph_run(glyph_run: &GlyphRun<'_, ()>, path_builder: &mut PathBuilder, tilt: f64, per_glyph_instances: bool) {
let mut run_x = glyph_run.offset();
let run_y = glyph_run.baseline();
@ -120,18 +129,26 @@ fn render_glyph_run(glyph_run: &GlyphRun<'_, ()>, path_builder: &mut PathBuilder
// User-requested tilt applied around baseline to avoid vertical displacement
// Translation ensures rotation point is at the baseline, not origin
let skew = DAffine2::from_translation(DVec2::new(0., run_y as f64))
* DAffine2::from_cols_array(&[1., 0., -tilt.to_radians().tan(), 1., 0., 0.])
* DAffine2::from_translation(DVec2::new(0., -run_y as f64));
let skew = if per_glyph_instances {
DAffine2::from_cols_array(&[1., 0., -tilt.to_radians().tan(), 1., 0., 0.])
} else {
DAffine2::from_translation(DVec2::new(0., run_y as f64))
* DAffine2::from_cols_array(&[1., 0., -tilt.to_radians().tan(), 1., 0., 0.])
* DAffine2::from_translation(DVec2::new(0., -run_y as f64))
};
let synthesis = run.synthesis();
// Font synthesis (e.g., synthetic italic) applied separately from user transforms
// This preserves the distinction between font styling and user transformations
let style_skew = synthesis.skew().map(|angle| {
DAffine2::from_translation(DVec2::new(0., run_y as f64))
* DAffine2::from_cols_array(&[1., 0., -angle.to_radians().tan() as f64, 1., 0., 0.])
* DAffine2::from_translation(DVec2::new(0., -run_y as f64))
if per_glyph_instances {
DAffine2::from_cols_array(&[1., 0., -angle.to_radians().tan() as f64, 1., 0., 0.])
} else {
DAffine2::from_translation(DVec2::new(0., run_y as f64))
* DAffine2::from_cols_array(&[1., 0., -angle.to_radians().tan() as f64, 1., 0., 0.])
* DAffine2::from_translation(DVec2::new(0., -run_y as f64))
}
});
let font = run.font();
@ -145,14 +162,15 @@ fn render_glyph_run(glyph_run: &GlyphRun<'_, ()>, path_builder: &mut PathBuilder
let outlines = font_ref.outline_glyphs();
for glyph in glyph_run.glyphs() {
let glyph_x = run_x + glyph.x;
let glyph_y = run_y - glyph.y;
let glyph_offset = DVec2::new((run_x + glyph.x) as f64, (run_y - glyph.y) as f64);
run_x += glyph.advance;
let glyph_id = GlyphId::from(glyph.id);
if let Some(glyph_outline) = outlines.get(glyph_id) {
path_builder.set_origin(glyph_x as f64, glyph_y as f64);
path_builder.draw_glyph(&glyph_outline, font_size, &normalized_coords, style_skew, skew);
if !per_glyph_instances {
path_builder.origin = glyph_offset;
}
path_builder.draw_glyph(&glyph_outline, font_size, &normalized_coords, glyph_offset, style_skew, skew, per_glyph_instances);
}
}
}
@ -172,7 +190,7 @@ fn layout_text(str: &str, font_data: Option<Blob<u8>>, typesetting: TypesettingC
})?;
const DISPLAY_SCALE: f32 = 1.;
let mut builder = layout_cx.ranged_builder(&mut font_cx, str, DISPLAY_SCALE, true);
let mut builder = layout_cx.ranged_builder(&mut font_cx, str, DISPLAY_SCALE, false);
builder.push_default(StyleProperty::FontSize(typesetting.font_size as f32));
builder.push_default(StyleProperty::LetterSpacing(typesetting.character_spacing as f32));
@ -182,32 +200,42 @@ fn layout_text(str: &str, font_data: Option<Blob<u8>>, typesetting: TypesettingC
let mut layout: Layout<()> = builder.build(str);
layout.break_all_lines(typesetting.max_width.map(|mw| mw as f32));
layout.align(typesetting.max_width.map(|max_w| max_w as f32), Alignment::Left, AlignmentOptions::default());
layout.align(typesetting.max_width.map(|max_w| max_w as f32), typesetting.align.into(), AlignmentOptions::default());
Some(layout)
}
pub fn to_path(str: &str, font_data: Option<Blob<u8>>, typesetting: TypesettingConfig) -> Vec<Subpath<PointId>> {
let Some(layout) = layout_text(str, font_data, typesetting) else { return Vec::new() };
pub fn to_path(str: &str, font_data: Option<Blob<u8>>, typesetting: TypesettingConfig, per_glyph_instances: bool) -> VectorDataTable {
let Some(layout) = layout_text(str, font_data, typesetting) else {
return VectorDataTable::new(VectorData::default());
};
let mut path_builder = PathBuilder {
current_subpath: Subpath::new(Vec::new(), false),
glyph_subpaths: Vec::new(),
other_subpaths: Vec::new(),
origin: DVec2::ZERO,
vector_table: if per_glyph_instances {
VectorDataTable::default()
} else {
VectorDataTable::new(VectorData::default())
},
scale: layout.scale() as f64,
id: PointId::ZERO,
origin: DVec2::default(),
};
for line in layout.lines() {
for item in line.items() {
if let PositionedLayoutItem::GlyphRun(glyph_run) = item {
render_glyph_run(&glyph_run, &mut path_builder, typesetting.tilt);
render_glyph_run(&glyph_run, &mut path_builder, typesetting.tilt, per_glyph_instances);
}
}
}
path_builder.other_subpaths
if path_builder.vector_table.is_empty() {
path_builder.vector_table = VectorDataTable::new(VectorData::default());
}
path_builder.vector_table
}
pub fn bounding_box(str: &str, font_data: Option<Blob<u8>>, typesetting: TypesettingConfig, for_clipping_test: bool) -> DVec2 {

View file

@ -6,14 +6,20 @@ use glam::{DAffine2, DMat2, DVec2};
pub trait Transform {
fn transform(&self) -> DAffine2;
fn local_pivot(&self, pivot: DVec2) -> DVec2 {
pivot
}
fn decompose_scale(&self) -> DVec2 {
DVec2::new(
self.transform().transform_vector2((1., 0.).into()).length(),
self.transform().transform_vector2((0., 1.).into()).length(),
)
DVec2::new(self.transform().transform_vector2(DVec2::X).length(), self.transform().transform_vector2(DVec2::Y).length())
}
/// Requires that the transform does not contain any skew.
fn decompose_rotation(&self) -> f64 {
let rotation_matrix = (self.transform() * DAffine2::from_scale(self.decompose_scale().recip())).matrix2;
let rotation = -rotation_matrix.mul_vec2(DVec2::X).angle_to(DVec2::X);
if rotation == -0. { 0. } else { rotation }
}
}
@ -141,12 +147,21 @@ impl std::hash::Hash for Footprint {
pub trait ApplyTransform {
fn apply_transform(&mut self, modification: &DAffine2);
fn left_apply_transform(&mut self, modification: &DAffine2);
}
impl<T: TransformMut> ApplyTransform for T {
fn apply_transform(&mut self, &modification: &DAffine2) {
*self.transform_mut() = self.transform() * modification
}
fn left_apply_transform(&mut self, &modification: &DAffine2) {
*self.transform_mut() = modification * self.transform()
}
}
impl ApplyTransform for () {
fn apply_transform(&mut self, &_modification: &DAffine2) {}
impl ApplyTransform for DVec2 {
fn apply_transform(&mut self, modification: &DAffine2) {
*self = modification.transform_point2(*self);
}
fn left_apply_transform(&mut self, modification: &DAffine2) {
*self = modification.inverse().transform_point2(*self);
}
}

View file

@ -7,20 +7,22 @@ use core::f64;
use glam::{DAffine2, DVec2};
#[node_macro::node(category(""))]
async fn transform<T: 'n + 'static>(
async fn transform<T: ApplyTransform + 'n + 'static>(
ctx: impl Ctx + CloneVarArgs + ExtractAll,
#[implementations(
Context -> DAffine2,
Context -> DVec2,
Context -> VectorDataTable,
Context -> GraphicGroupTable,
Context -> RasterDataTable<CPU>,
Context -> RasterDataTable<GPU>,
)]
transform_target: impl Node<Context<'static>, Output = Instances<T>>,
value: impl Node<Context<'static>, Output = T>,
translate: DVec2,
rotate: f64,
scale: DVec2,
skew: DVec2,
) -> Instances<T> {
) -> T {
let matrix = DAffine2::from_scale_angle_translation(scale, rotate, translate) * DAffine2::from_cols_array(&[1., skew.y, skew.x, 1., 0., 0.]);
let footprint = ctx.try_footprint().copied();
@ -31,11 +33,9 @@ async fn transform<T: 'n + 'static>(
ctx = ctx.with_footprint(footprint);
}
let mut transform_target = transform_target.eval(ctx.into_context()).await;
let mut transform_target = value.eval(ctx.into_context()).await;
for data_transform in transform_target.instance_mut_iter() {
*data_transform.transform = matrix * *data_transform.transform;
}
transform_target.left_apply_transform(&matrix);
transform_target
}
@ -52,6 +52,40 @@ fn replace_transform<Data, TransformInput: Transform>(
data
}
#[node_macro::node(category("Math: Transform"), path(graphene_core::vector))]
async fn extract_transform<T>(
_: impl Ctx,
#[implementations(
GraphicGroupTable,
VectorDataTable,
RasterDataTable<CPU>,
RasterDataTable<GPU>,
)]
vector_data: Instances<T>,
) -> DAffine2 {
vector_data.instance_ref_iter().next().map(|vector_data| *vector_data.transform).unwrap_or_default()
}
#[node_macro::node(category("Math: Transform"))]
fn invert_transform(_: impl Ctx, transform: DAffine2) -> DAffine2 {
transform.inverse()
}
#[node_macro::node(category("Math: Transform"))]
fn decompose_translation(_: impl Ctx, transform: DAffine2) -> DVec2 {
transform.translation
}
#[node_macro::node(category("Math: Transform"))]
fn decompose_rotation(_: impl Ctx, transform: DAffine2) -> f64 {
transform.decompose_rotation()
}
#[node_macro::node(category("Math: Transform"))]
fn decompose_scale(_: impl Ctx, transform: DAffine2) -> DVec2 {
transform.decompose_scale()
}
#[node_macro::node(category("Debug"))]
async fn boundless_footprint<T: 'n + 'static>(
ctx: impl Ctx + CloneVarArgs + ExtractAll,

View file

@ -120,7 +120,6 @@ impl<'i, T: Clone + 'i> Node<'i, ()> for DebugClonedNode<T> {
type Output = T;
#[inline(always)]
fn eval(&'i self, _input: ()) -> Self::Output {
#[cfg(not(target_arch = "spirv"))]
// KEEP THIS `debug!()` - It acts as the output for the debug node itself
log::debug!("DebugClonedNode::eval");

View file

@ -1,17 +1,20 @@
use super::intersection::bezpath_intersections;
use super::poisson_disk::poisson_disk_sample;
use crate::vector::misc::{PointSpacingType, dvec2_to_point};
use glam::DVec2;
use kurbo::{BezPath, DEFAULT_ACCURACY, Line, ParamCurve, ParamCurveDeriv, PathEl, PathSeg, Point, Rect, Shape};
use super::util::segment_tangent;
use crate::vector::algorithms::offset_subpath::MAX_ABSOLUTE_DIFFERENCE;
use crate::vector::misc::{PointSpacingType, dvec2_to_point, point_to_dvec2};
use glam::{DMat2, DVec2};
use kurbo::{BezPath, CubicBez, DEFAULT_ACCURACY, Line, ParamCurve, ParamCurveDeriv, PathEl, PathSeg, Point, QuadBez, Rect, Shape};
use std::f64::consts::{FRAC_PI_2, PI};
/// Splits the [`BezPath`] at `t` value which lie in the range of [0, 1].
/// Splits the [`BezPath`] at segment index at `t` value which lie in the range of [0, 1].
/// Returns [`None`] if the given [`BezPath`] has no segments or `t` is within f64::EPSILON of 0 or 1.
pub fn split_bezpath(bezpath: &BezPath, t: f64, euclidian: bool) -> Option<(BezPath, BezPath)> {
pub fn split_bezpath_at_segment(bezpath: &BezPath, segment_index: usize, t: f64) -> Option<(BezPath, BezPath)> {
if t <= f64::EPSILON || (1. - t) <= f64::EPSILON || bezpath.segments().count() == 0 {
return None;
}
// Get the segment which lies at the split.
let (segment_index, t) = t_value_to_parametric(bezpath, t, euclidian, None);
let segment = bezpath.get_seg(segment_index + 1).unwrap();
// Divide the segment.
@ -52,14 +55,27 @@ pub fn split_bezpath(bezpath: &BezPath, t: f64, euclidian: bool) -> Option<(BezP
Some((first_bezpath, second_bezpath))
}
pub fn position_on_bezpath(bezpath: &BezPath, t: f64, euclidian: bool, segments_length: Option<&[f64]>) -> Point {
let (segment_index, t) = t_value_to_parametric(bezpath, t, euclidian, segments_length);
/// Splits the [`BezPath`] at a `t` value which lies in the range of [0, 1].
/// Returns [`None`] if the given [`BezPath`] has no segments.
pub fn split_bezpath(bezpath: &BezPath, t_value: TValue) -> Option<(BezPath, BezPath)> {
if bezpath.segments().count() == 0 {
return None;
}
// Get the segment which lies at the split.
let (segment_index, t) = eval_bezpath(bezpath, t_value, None);
split_bezpath_at_segment(bezpath, segment_index, t)
}
pub fn evaluate_bezpath(bezpath: &BezPath, t_value: TValue, segments_length: Option<&[f64]>) -> Point {
let (segment_index, t) = eval_bezpath(bezpath, t_value, segments_length);
bezpath.get_seg(segment_index + 1).unwrap().eval(t)
}
pub fn tangent_on_bezpath(bezpath: &BezPath, t: f64, euclidian: bool, segments_length: Option<&[f64]>) -> Point {
let (segment_index, t) = t_value_to_parametric(bezpath, t, euclidian, segments_length);
pub fn tangent_on_bezpath(bezpath: &BezPath, t_value: TValue, segments_length: Option<&[f64]>) -> Point {
let (segment_index, t) = eval_bezpath(bezpath, t_value, segments_length);
let segment = bezpath.get_seg(segment_index + 1).unwrap();
match segment {
PathSeg::Line(line) => line.deriv().eval(t),
PathSeg::Quad(quad_bez) => quad_bez.deriv().eval(t),
@ -165,23 +181,35 @@ pub fn sample_polyline_on_bezpath(
Some(sample_bezpath)
}
pub fn t_value_to_parametric(bezpath: &BezPath, t: f64, euclidian: bool, segments_length: Option<&[f64]>) -> (usize, f64) {
if euclidian {
let (segment_index, t) = bezpath_t_value_to_parametric(bezpath, BezPathTValue::GlobalEuclidean(t), segments_length);
let segment = bezpath.get_seg(segment_index + 1).unwrap();
return (segment_index, eval_pathseg_euclidean(segment, t, DEFAULT_ACCURACY));
#[derive(Debug, Clone, Copy)]
pub enum TValue {
Parametric(f64),
Euclidean(f64),
}
/// Return the subsegment for the given [TValue] range. Returns None if parametric value of `t1` is greater than `t2`.
pub fn trim_pathseg(segment: PathSeg, t1: TValue, t2: TValue) -> Option<PathSeg> {
let t1 = eval_pathseg(segment, t1);
let t2 = eval_pathseg(segment, t2);
if t1 > t2 { None } else { Some(segment.subsegment(t1..t2)) }
}
pub fn eval_pathseg(segment: PathSeg, t_value: TValue) -> f64 {
match t_value {
TValue::Parametric(t) => t,
TValue::Euclidean(t) => eval_pathseg_euclidean(segment, t, DEFAULT_ACCURACY),
}
bezpath_t_value_to_parametric(bezpath, BezPathTValue::GlobalParametric(t), segments_length)
}
/// Finds the t value of point on the given path segment i.e fractional distance along the segment's total length.
/// It uses a binary search to find the value `t` such that the ratio `length_up_to_t / total_length` approximates the input `distance`.
pub fn eval_pathseg_euclidean(path_segment: PathSeg, distance: f64, accuracy: f64) -> f64 {
pub fn eval_pathseg_euclidean(segment: PathSeg, distance: f64, accuracy: f64) -> f64 {
let mut low_t = 0.;
let mut mid_t = 0.5;
let mut high_t = 1.;
let total_length = path_segment.perimeter(accuracy);
let total_length = segment.perimeter(accuracy);
if !total_length.is_finite() || total_length <= f64::EPSILON {
return 0.;
@ -190,7 +218,7 @@ pub fn eval_pathseg_euclidean(path_segment: PathSeg, distance: f64, accuracy: f6
let distance = distance.clamp(0., 1.);
while high_t - low_t > accuracy {
let current_length = path_segment.subsegment(0.0..mid_t).perimeter(accuracy);
let current_length = segment.subsegment(0.0..mid_t).perimeter(accuracy);
let current_distance = current_length / total_length;
if current_distance > distance {
@ -207,7 +235,7 @@ pub fn eval_pathseg_euclidean(path_segment: PathSeg, distance: f64, accuracy: f6
/// Converts from a bezpath (composed of multiple segments) to a point along a certain segment represented.
/// The returned tuple represents the segment index and the `t` value along that segment.
/// Both the input global `t` value and the output `t` value are in euclidean space, meaning there is a constant rate of change along the arc length.
fn global_euclidean_to_local_euclidean(bezpath: &BezPath, global_t: f64, lengths: &[f64], total_length: f64) -> (usize, f64) {
fn eval_bazpath_to_euclidean(bezpath: &BezPath, global_t: f64, lengths: &[f64], total_length: f64) -> (usize, f64) {
let mut accumulator = 0.;
for (index, length) in lengths.iter().enumerate() {
let length_ratio = length / total_length;
@ -219,19 +247,14 @@ fn global_euclidean_to_local_euclidean(bezpath: &BezPath, global_t: f64, lengths
(bezpath.segments().count() - 1, 1.)
}
enum BezPathTValue {
GlobalEuclidean(f64),
GlobalParametric(f64),
}
/// Convert a [BezPathTValue] to a parametric `(segment_index, t)` tuple.
/// - Asserts that `t` values contained within the `SubpathTValue` argument lie in the range [0, 1].
fn bezpath_t_value_to_parametric(bezpath: &BezPath, t: BezPathTValue, precomputed_segments_length: Option<&[f64]>) -> (usize, f64) {
/// Convert a [TValue] to a parametric `(segment_index, t)` tuple.
/// - Asserts that `t` values contained within the `TValue` argument lie in the range [0, 1].
fn eval_bezpath(bezpath: &BezPath, t: TValue, precomputed_segments_length: Option<&[f64]>) -> (usize, f64) {
let segment_count = bezpath.segments().count();
assert!(segment_count >= 1);
match t {
BezPathTValue::GlobalEuclidean(t) => {
TValue::Euclidean(t) => {
let computed_segments_length;
let segments_length = if let Some(segments_length) = precomputed_segments_length {
@ -243,16 +266,18 @@ fn bezpath_t_value_to_parametric(bezpath: &BezPath, t: BezPathTValue, precompute
let total_length = segments_length.iter().sum();
global_euclidean_to_local_euclidean(bezpath, t, segments_length, total_length)
let (segment_index, t) = eval_bazpath_to_euclidean(bezpath, t, segments_length, total_length);
let segment = bezpath.get_seg(segment_index + 1).unwrap();
(segment_index, eval_pathseg_euclidean(segment, t, DEFAULT_ACCURACY))
}
BezPathTValue::GlobalParametric(global_t) => {
assert!((0.0..=1.).contains(&global_t));
TValue::Parametric(t) => {
assert!((0.0..=1.).contains(&t));
if global_t == 1. {
if t == 1. {
return (segment_count - 1, 1.);
}
let scaled_t = global_t * segment_count as f64;
let scaled_t = t * segment_count as f64;
let segment_index = scaled_t.floor() as usize;
let t = scaled_t - segment_index as f64;
@ -314,3 +339,130 @@ pub fn poisson_disk_points(bezpath_index: usize, bezpaths: &[(BezPath, Rect)], s
poisson_disk_sample(offset, width, height, separation_disk_diameter, point_in_shape_checker, line_intersect_shape_checker, rng)
}
/// Returns true if the Bezier curve is equivalent to a line.
///
/// **NOTE**: This is different from simply checking if the segment is [`PathSeg::Line`] or [`PathSeg::Quad`] or [`PathSeg::Cubic`]. Bezier curve can also be a line if the control points are colinear to the start and end points. Therefore if the handles exceed the start and end point, it will still be considered as a line.
pub fn is_linear(segment: &PathSeg) -> bool {
let is_colinear = |a: Point, b: Point, c: Point| -> bool { ((b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x)).abs() < MAX_ABSOLUTE_DIFFERENCE };
match *segment {
PathSeg::Line(_) => true,
PathSeg::Quad(QuadBez { p0, p1, p2 }) => is_colinear(p0, p1, p2),
PathSeg::Cubic(CubicBez { p0, p1, p2, p3 }) => is_colinear(p0, p1, p3) && is_colinear(p0, p2, p3),
}
}
// TODO: If a segment curls back on itself tightly enough it could intersect again at the portion that should be trimmed. This could cause the Subpaths to be clipped
// TODO: at the incorrect location. This can be avoided by first trimming the two Subpaths at any extrema, effectively ignoring loopbacks.
/// Helper function to clip overlap of two intersecting open BezPaths. Returns an Option because intersections may not exist for certain arrangements and distances.
/// Assumes that the BezPaths represents simple Bezier segments, and clips the BezPaths at the last intersection of the first BezPath, and first intersection of the last BezPath.
pub fn clip_simple_bezpaths(bezpath1: &BezPath, bezpath2: &BezPath) -> Option<(BezPath, BezPath)> {
// Split the first subpath at its last intersection
let subpath_1_intersections = bezpath_intersections(bezpath1, bezpath2, None, None);
if subpath_1_intersections.is_empty() {
return None;
}
let (segment_index, t) = *subpath_1_intersections.last()?;
let (clipped_subpath1, _) = split_bezpath_at_segment(bezpath1, segment_index, t)?;
// Split the second subpath at its first intersection
let subpath_2_intersections = bezpath_intersections(bezpath2, bezpath1, None, None);
if subpath_2_intersections.is_empty() {
return None;
}
let (segment_index, t) = subpath_2_intersections[0];
let (_, clipped_subpath2) = split_bezpath_at_segment(bezpath2, segment_index, t)?;
Some((clipped_subpath1, clipped_subpath2))
}
/// Returns the [`PathEl`] that is needed for a miter join if it is possible.
///
/// `miter_limit` defines a limit for the ratio between the miter length and the stroke width.
/// Alternatively, this can be interpreted as limiting the angle that the miter can form.
/// When the limit is exceeded, no [`PathEl`] will be returned.
/// This value should be greater than 0. If not, the default of 4 will be used.
pub fn miter_line_join(bezpath1: &BezPath, bezpath2: &BezPath, miter_limit: Option<f64>) -> Option<[PathEl; 2]> {
let miter_limit = match miter_limit {
Some(miter_limit) if miter_limit > f64::EPSILON => miter_limit,
_ => 4.,
};
// TODO: Besides returning None using the `?` operator, is there a more appropriate way to handle a `None` result from `get_segment`?
let in_segment = bezpath1.segments().last()?;
let out_segment = bezpath2.segments().next()?;
let in_tangent = segment_tangent(in_segment, 1.);
let out_tangent = segment_tangent(out_segment, 0.);
if in_tangent == DVec2::ZERO || out_tangent == DVec2::ZERO {
// Avoid panic from normalizing zero vectors
// TODO: Besides returning None, is there a more appropriate way to handle this?
return None;
}
let angle = (in_tangent * -1.).angle_to(out_tangent).abs();
if angle.to_degrees() < miter_limit {
return None;
}
let p1 = in_segment.end();
let p2 = point_to_dvec2(p1) + in_tangent.normalize();
let line1 = Line::new(p1, dvec2_to_point(p2));
let p1 = out_segment.start();
let p2 = point_to_dvec2(p1) + out_tangent.normalize();
let line2 = Line::new(p1, dvec2_to_point(p2));
// If we don't find the intersection point to draw the miter join, we instead default to a bevel join.
// Otherwise, we return the element to create the join.
let intersection = line1.crossing_point(line2)?;
Some([PathEl::LineTo(intersection), PathEl::LineTo(out_segment.start())])
}
/// Computes the [`PathEl`] to form a circular join from `left` to `right`, along a circle around `center`.
/// By default, the angle is assumed to be 180 degrees.
pub fn compute_circular_subpath_details(left: DVec2, arc_point: DVec2, right: DVec2, center: DVec2, angle: Option<f64>) -> [PathEl; 2] {
let center_to_arc_point = arc_point - center;
// Based on https://pomax.github.io/bezierinfo/#circles_cubic
let handle_offset_factor = if let Some(angle) = angle { 4. / 3. * (angle / 4.).tan() } else { 0.551784777779014 };
let p1 = dvec2_to_point(left - (left - center).perp() * handle_offset_factor);
let p2 = dvec2_to_point(arc_point + center_to_arc_point.perp() * handle_offset_factor);
let p3 = dvec2_to_point(arc_point);
let first_half = PathEl::CurveTo(p1, p2, p3);
let p1 = dvec2_to_point(arc_point - center_to_arc_point.perp() * handle_offset_factor);
let p2 = dvec2_to_point(right + (right - center).perp() * handle_offset_factor);
let p3 = dvec2_to_point(right);
let second_half = PathEl::CurveTo(p1, p2, p3);
[first_half, second_half]
}
/// Returns two [`PathEl`] to create a round join with the provided center.
pub fn round_line_join(bezpath1: &BezPath, bezpath2: &BezPath, center: DVec2) -> [PathEl; 2] {
let left = point_to_dvec2(bezpath1.segments().last().unwrap().end());
let right = point_to_dvec2(bezpath2.segments().next().unwrap().start());
let center_to_right = right - center;
let center_to_left = left - center;
let in_segment = bezpath1.segments().last();
let in_tangent = in_segment.map(|in_segment| segment_tangent(in_segment, 1.));
let mut angle = center_to_right.angle_to(center_to_left) / 2.;
let mut arc_point = center + DMat2::from_angle(angle).mul_vec2(center_to_right);
if in_tangent.map(|in_tangent| (arc_point - left).angle_to(in_tangent).abs()).unwrap_or_default() > FRAC_PI_2 {
angle = angle - PI * (if angle < 0. { -1. } else { 1. });
arc_point = center + DMat2::from_angle(angle).mul_vec2(center_to_right);
}
compute_circular_subpath_details(left, arc_point, right, center, Some(angle))
}

View file

@ -0,0 +1,6 @@
/// Minimum allowable separation between adjacent `t` values when calculating curve intersections
pub const MIN_SEPARATION_VALUE: f64 = 5. * 1e-3;
/// Constant used to determine if `f64`s are equivalent.
#[cfg(test)]
pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-3;

View file

@ -88,12 +88,10 @@ async fn instance_position(ctx: impl Ctx + ExtractVarArgs) -> DVec2 {
// TODO: Make this return a u32 instead of an f64, but we ned to improve math-related compatibility with integer types first.
#[node_macro::node(category("Instancing"), path(graphene_core::vector))]
async fn instance_index(ctx: impl Ctx + ExtractIndex) -> f64 {
match ctx.try_index() {
Some(index) => return index as f64,
None => warn!("Extracted value of incorrect type"),
}
0.
async fn instance_index(ctx: impl Ctx + ExtractIndex, _primary: (), loop_level: u32) -> f64 {
ctx.try_index()
.and_then(|indexes| indexes.get(indexes.len().wrapping_sub(1).wrapping_sub(loop_level as usize)).copied())
.unwrap_or_default() as f64
}
#[cfg(test)]

View file

@ -0,0 +1,365 @@
use super::contants::MIN_SEPARATION_VALUE;
use kurbo::{BezPath, DEFAULT_ACCURACY, ParamCurve, PathSeg, Shape};
/// Calculates the intersection points the bezpath has with a given segment and returns a list of `(usize, f64)` tuples,
/// where the `usize` represents the index of the segment in the bezpath, and the `f64` represents the `t`-value local to
/// that segment where the intersection occurred.
///
/// `minimum_separation` is the minimum difference that two adjacent `t`-values must have when comparing adjacent `t`-values in sorted order.
pub fn bezpath_and_segment_intersections(bezpath: &BezPath, segment: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(usize, f64)> {
bezpath
.segments()
.enumerate()
.flat_map(|(index, this_segment)| {
filtered_segment_intersections(this_segment, segment, accuracy, minimum_separation)
.into_iter()
.map(|t| (index, t))
.collect::<Vec<(usize, f64)>>()
})
.collect()
}
/// Calculates the intersection points the bezpath has with another given bezpath and returns a list of parametric `t`-values.
pub fn bezpath_intersections(bezpath1: &BezPath, bezpath2: &BezPath, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(usize, f64)> {
let mut intersection_t_values: Vec<(usize, f64)> = bezpath2
.segments()
.flat_map(|bezier| bezpath_and_segment_intersections(bezpath1, bezier, accuracy, minimum_separation))
.collect();
intersection_t_values.sort_by(|a, b| a.partial_cmp(b).unwrap());
intersection_t_values
}
/// Calculates the intersection points the segment has with another given segment and returns a list of parametric `t`-values with given accuracy.
pub fn segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>) -> Vec<(f64, f64)> {
let accuracy = accuracy.unwrap_or(DEFAULT_ACCURACY);
match (segment1, segment2) {
(PathSeg::Line(line), segment2) => segment2.intersect_line(line).iter().map(|i| (i.line_t, i.segment_t)).collect(),
(segment1, PathSeg::Line(line)) => segment1.intersect_line(line).iter().map(|i| (i.segment_t, i.line_t)).collect(),
(segment1, segment2) => {
let mut intersections = Vec::new();
segment_intersections_inner(segment1, 0., 1., segment2, 0., 1., accuracy, &mut intersections);
intersections
}
}
}
/// Implements [https://pomax.github.io/bezierinfo/#curveintersection] to find intersection between two Bezier segments
/// by splitting the segment recursively until the size of the subsegment's bounding box is smaller than the accuracy.
#[allow(clippy::too_many_arguments)]
fn segment_intersections_inner(segment1: PathSeg, min_t1: f64, max_t1: f64, segment2: PathSeg, min_t2: f64, max_t2: f64, accuracy: f64, intersections: &mut Vec<(f64, f64)>) {
let bbox1 = segment1.bounding_box();
let bbox2 = segment2.bounding_box();
let mid_t1 = (min_t1 + max_t1) / 2.;
let mid_t2 = (min_t2 + max_t2) / 2.;
// Check if the bounding boxes overlap
if bbox1.overlaps(bbox2) {
// If bounding boxes overlap and they are small enough, we have found an intersection
if bbox1.width() < accuracy && bbox1.height() < accuracy && bbox2.width() < accuracy && bbox2.height() < accuracy {
// Use the middle `t` value, append the corresponding `t` value
intersections.push((mid_t1, mid_t2));
return;
}
// Split curves in half
let (seg11, seg12) = segment1.subdivide();
let (seg21, seg22) = segment2.subdivide();
// Repeat checking the intersection with the combinations of the two halves of each curve
segment_intersections_inner(seg11, min_t1, mid_t1, seg21, min_t2, mid_t2, accuracy, intersections);
segment_intersections_inner(seg11, min_t1, mid_t1, seg22, mid_t2, max_t2, accuracy, intersections);
segment_intersections_inner(seg12, mid_t1, max_t1, seg21, min_t2, mid_t2, accuracy, intersections);
segment_intersections_inner(seg12, mid_t1, max_t1, seg22, mid_t2, max_t2, accuracy, intersections);
}
}
// TODO: Use an `impl Iterator` return type instead of a `Vec`
/// Returns a list of filtered parametric `t` values that correspond to intersection points between the current bezier segment and the provided one
/// such that the difference between adjacent `t` values in sorted order is greater than some minimum separation value. If the difference
/// between 2 adjacent `t` values is less than the minimum difference, the filtering takes the larger `t` value and discards the smaller `t` value.
/// The returned `t` values are with respect to the current bezier segment, not the provided parameter.
/// If the provided segment is linear, then zero intersection points will be returned along colinear segments.
///
/// `accuracy` defines, for intersections where the provided bezier segment is non-linear, the maximum size of the bounding boxes to be considered an intersection point.
///
/// `minimum_separation` is the minimum difference between adjacent `t` values in sorted order.
pub fn filtered_segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<f64> {
let mut intersection_t_values = segment_intersections(segment1, segment2, accuracy);
intersection_t_values.sort_by(|a, b| a.partial_cmp(b).unwrap());
intersection_t_values.iter().map(|x| x.0).fold(Vec::new(), |mut accumulator, t| {
if !accumulator.is_empty() && (accumulator.last().unwrap() - t).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE) {
accumulator.pop();
}
accumulator.push(t);
accumulator
})
}
// TODO: Use an `impl Iterator` return type instead of a `Vec`
/// Returns a list of pairs of filtered parametric `t` values that correspond to intersection points between the current bezier curve and the provided
/// one such that the difference between adjacent `t` values in sorted order is greater than some minimum separation value. If the difference between
/// two adjacent `t` values is less than the minimum difference, the filtering takes the larger `t` value and discards the smaller `t` value.
/// The first value in pair is with respect to the current bezier and the second value in pair is with respect to the provided parameter.
/// If the provided curve is linear, then zero intersection points will be returned along colinear segments.
///
/// `error`, for intersections where the provided bezier is non-linear, defines the threshold for bounding boxes to be considered an intersection point.
///
/// `minimum_separation` is the minimum difference between adjacent `t` values in sorted order
pub fn filtered_all_segment_intersections(segment1: PathSeg, segment2: PathSeg, accuracy: Option<f64>, minimum_separation: Option<f64>) -> Vec<(f64, f64)> {
let mut intersection_t_values = segment_intersections(segment1, segment2, accuracy);
intersection_t_values.sort_by(|a, b| (a.0 + a.1).partial_cmp(&(b.0 + b.1)).unwrap());
intersection_t_values.iter().fold(Vec::new(), |mut accumulator, t| {
if !accumulator.is_empty()
&& (accumulator.last().unwrap().0 - t.0).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE)
&& (accumulator.last().unwrap().1 - t.1).abs() < minimum_separation.unwrap_or(MIN_SEPARATION_VALUE)
{
accumulator.pop();
}
accumulator.push(*t);
accumulator
})
}
#[cfg(test)]
mod tests {
use super::{bezpath_and_segment_intersections, filtered_segment_intersections};
use crate::vector::algorithms::{
contants::MAX_ABSOLUTE_DIFFERENCE,
util::{compare_points, compare_vec_of_points, dvec2_compare},
};
use kurbo::{BezPath, CubicBez, Line, ParamCurve, PathEl, PathSeg, Point, QuadBez};
#[test]
fn test_intersect_line_segment_quadratic() {
let p1 = Point::new(30., 50.);
let p2 = Point::new(140., 30.);
let p3 = Point::new(160., 170.);
// Intersection at edge of curve
let bezier = PathSeg::Quad(QuadBez::new(p1, p2, p3));
let line1 = PathSeg::Line(Line::new(Point::new(20., 50.), Point::new(40., 50.)));
let intersections1 = filtered_segment_intersections(bezier, line1, None, None);
assert!(intersections1.len() == 1);
assert!(compare_points(bezier.eval(intersections1[0]), p1));
// Intersection in the middle of curve
let line2 = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(30., 30.)));
let intersections2 = filtered_segment_intersections(bezier, line2, None, None);
assert!(compare_points(bezier.eval(intersections2[0]), Point::new(47.77355, 47.77354)));
}
#[test]
fn test_intersect_curve_cubic_edge_case() {
// M34 107 C40 40 120 120 102 29
let p1 = Point::new(34., 107.);
let p2 = Point::new(40., 40.);
let p3 = Point::new(120., 120.);
let p4 = Point::new(102., 29.);
let cubic_segment = PathSeg::Cubic(CubicBez::new(p1, p2, p3, p4));
let linear_segment = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.)));
let intersections = filtered_segment_intersections(cubic_segment, linear_segment, None, None);
assert_eq!(intersections.len(), 1);
}
#[test]
fn test_intersect_curve() {
let p0 = Point::new(30., 30.);
let p1 = Point::new(60., 140.);
let p2 = Point::new(150., 30.);
let p3 = Point::new(160., 160.);
let cubic_segment = PathSeg::Cubic(CubicBez::new(p0, p1, p2, p3));
let p0 = Point::new(175., 140.);
let p1 = Point::new(20., 20.);
let p2 = Point::new(120., 20.);
let quadratic_segment = PathSeg::Quad(QuadBez::new(p0, p1, p2));
let intersections1 = filtered_segment_intersections(cubic_segment, quadratic_segment, None, None);
let intersections2 = filtered_segment_intersections(quadratic_segment, cubic_segment, None, None);
let intersections1_points: Vec<Point> = intersections1.iter().map(|&t| cubic_segment.eval(t)).collect();
let intersections2_points: Vec<Point> = intersections2.iter().map(|&t| quadratic_segment.eval(t)).rev().collect();
assert!(compare_vec_of_points(intersections1_points, intersections2_points, 2.));
}
#[test]
fn intersection_linear_multiple_subpath_curves_test_one() {
// M 35 125 C 40 40 120 120 43 43 Q 175 90 145 150 Q 70 185 35 125 Z
let cubic_start = Point::new(35., 125.);
let cubic_handle_1 = Point::new(40., 40.);
let cubic_handle_2 = Point::new(120., 120.);
let cubic_end = Point::new(43., 43.);
let quadratic_1_handle = Point::new(175., 90.);
let quadratic_end = Point::new(145., 150.);
let quadratic_2_handle = Point::new(70., 185.);
let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end));
let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end));
let bezpath = BezPath::from_vec(vec![
PathEl::MoveTo(cubic_start),
PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end),
PathEl::QuadTo(quadratic_1_handle, quadratic_end),
PathEl::QuadTo(quadratic_2_handle, cubic_start),
PathEl::ClosePath,
]);
let linear_segment = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.)));
let cubic_intersections = filtered_segment_intersections(cubic_segment, linear_segment, None, None);
let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, linear_segment, None, None);
let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, linear_segment, None, None);
assert!(
dvec2_compare(
cubic_segment.eval(cubic_intersections[0]),
bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
assert!(
dvec2_compare(
quadratic_segment.eval(quadratic_1_intersections[0]),
bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
assert!(
dvec2_compare(
quadratic_segment.eval(quadratic_1_intersections[1]),
bezpath.segments().nth(bezpath_intersections[2].0).unwrap().eval(bezpath_intersections[2].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
}
#[test]
fn intersection_linear_multiple_subpath_curves_test_two() {
// M34 107 C40 40 120 120 102 29 Q175 90 129 171 Q70 185 34 107 Z
// M150 150 L 20 20
let cubic_start = Point::new(34., 107.);
let cubic_handle_1 = Point::new(40., 40.);
let cubic_handle_2 = Point::new(120., 120.);
let cubic_end = Point::new(102., 29.);
let quadratic_1_handle = Point::new(175., 90.);
let quadratic_end = Point::new(129., 171.);
let quadratic_2_handle = Point::new(70., 185.);
let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end));
let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end));
let bezpath = BezPath::from_vec(vec![
PathEl::MoveTo(cubic_start),
PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end),
PathEl::QuadTo(quadratic_1_handle, quadratic_end),
PathEl::QuadTo(quadratic_2_handle, cubic_start),
PathEl::ClosePath,
]);
let line = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.)));
let cubic_intersections = filtered_segment_intersections(cubic_segment, line, None, None);
let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, line, None, None);
let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, line, None, None);
assert!(
dvec2_compare(
cubic_segment.eval(cubic_intersections[0]),
bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
assert!(
dvec2_compare(
quadratic_segment.eval(quadratic_1_intersections[0]),
bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
}
#[test]
fn intersection_linear_multiple_subpath_curves_test_three() {
// M35 125 C40 40 120 120 44 44 Q175 90 145 150 Q70 185 35 125 Z
let cubic_start = Point::new(35., 125.);
let cubic_handle_1 = Point::new(40., 40.);
let cubic_handle_2 = Point::new(120., 120.);
let cubic_end = Point::new(44., 44.);
let quadratic_1_handle = Point::new(175., 90.);
let quadratic_end = Point::new(145., 150.);
let quadratic_2_handle = Point::new(70., 185.);
let cubic_segment = PathSeg::Cubic(CubicBez::new(cubic_start, cubic_handle_1, cubic_handle_2, cubic_end));
let quadratic_segment = PathSeg::Quad(QuadBez::new(cubic_end, quadratic_1_handle, quadratic_end));
let bezpath = BezPath::from_vec(vec![
PathEl::MoveTo(cubic_start),
PathEl::CurveTo(cubic_handle_1, cubic_handle_2, cubic_end),
PathEl::QuadTo(quadratic_1_handle, quadratic_end),
PathEl::QuadTo(quadratic_2_handle, cubic_start),
PathEl::ClosePath,
]);
let line = PathSeg::Line(Line::new(Point::new(150., 150.), Point::new(20., 20.)));
let cubic_intersections = filtered_segment_intersections(cubic_segment, line, None, None);
let quadratic_1_intersections = filtered_segment_intersections(quadratic_segment, line, None, None);
let bezpath_intersections = bezpath_and_segment_intersections(&bezpath, line, None, None);
assert!(
dvec2_compare(
cubic_segment.eval(cubic_intersections[0]),
bezpath.segments().nth(bezpath_intersections[0].0).unwrap().eval(bezpath_intersections[0].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
assert!(
dvec2_compare(
quadratic_segment.eval(quadratic_1_intersections[0]),
bezpath.segments().nth(bezpath_intersections[1].0).unwrap().eval(bezpath_intersections[1].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
assert!(
dvec2_compare(
quadratic_segment.eval(quadratic_1_intersections[1]),
bezpath.segments().nth(bezpath_intersections[2].0).unwrap().eval(bezpath_intersections[2].1),
MAX_ABSOLUTE_DIFFERENCE
)
.all()
);
}
}

View file

@ -1,6 +1,9 @@
pub mod bezpath_algorithms;
mod contants;
pub mod instance;
pub mod intersection;
pub mod merge_by_distance;
pub mod offset_subpath;
pub mod poisson_disk;
pub mod spline;
pub mod util;

View file

@ -1,173 +1,137 @@
use crate::vector::PointId;
use bezier_rs::{Bezier, BezierHandles, Join, Subpath, TValue};
use super::bezpath_algorithms::{clip_simple_bezpaths, miter_line_join, round_line_join};
use crate::vector::misc::point_to_dvec2;
use kurbo::{BezPath, Join, ParamCurve, PathEl, PathSeg};
/// Value to control smoothness and mathematical accuracy to offset a cubic Bezier.
const CUBIC_REGULARIZATION_ACCURACY: f64 = 0.5;
/// Accuracy of fitting offset curve to Bezier paths.
const CUBIC_TO_BEZPATH_ACCURACY: f64 = 1e-3;
/// Constant used to determine if `f64`s are equivalent.
pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-3;
pub const MAX_ABSOLUTE_DIFFERENCE: f64 = 1e-7;
fn segment_to_bezier(seg: kurbo::PathSeg) -> Bezier {
match seg {
kurbo::PathSeg::Line(line) => Bezier::from_linear_coordinates(line.p0.x, line.p0.y, line.p1.x, line.p1.y),
kurbo::PathSeg::Quad(quad_bez) => Bezier::from_quadratic_coordinates(quad_bez.p0.x, quad_bez.p0.y, quad_bez.p1.x, quad_bez.p1.y, quad_bez.p1.x, quad_bez.p1.y),
kurbo::PathSeg::Cubic(cubic_bez) => Bezier::from_cubic_coordinates(
cubic_bez.p0.x,
cubic_bez.p0.y,
cubic_bez.p1.x,
cubic_bez.p1.y,
cubic_bez.p2.x,
cubic_bez.p2.y,
cubic_bez.p3.x,
cubic_bez.p3.y,
),
}
}
// TODO: Replace the implementation to use only Kurbo API.
/// Reduces the segments of the subpath into simple subcurves, then offset each subcurve a set `distance` away.
/// Reduces the segments of the bezpath into simple subcurves, then offset each subcurve a set `distance` away.
/// The intersections of segments of the subpath are joined using the method specified by the `join` argument.
pub fn offset_subpath(subpath: &Subpath<PointId>, distance: f64, join: Join) -> Subpath<PointId> {
pub fn offset_bezpath(bezpath: &BezPath, distance: f64, join: Join, miter_limit: Option<f64>) -> BezPath {
// An offset at a distance 0 from the curve is simply the same curve.
// An offset of a single point is not defined.
if distance == 0. || subpath.len() <= 1 || subpath.len_segments() < 1 {
return subpath.clone();
if distance == 0. || bezpath.get_seg(1).is_none() {
return bezpath.clone();
}
let mut subpaths = subpath
.iter()
.filter(|bezier| !bezier.is_point())
let mut bezpaths = bezpath
.segments()
.map(|bezier| bezier.to_cubic())
.map(|cubic| {
let Bezier { start, end, handles } = cubic;
let BezierHandles::Cubic { handle_start, handle_end } = handles else { unreachable!()};
let cubic_bez = kurbo::CubicBez::new((start.x, start.y), (handle_start.x, handle_start.y), (handle_end.x, handle_end.y), (end.x, end.y));
.map(|cubic_bez| {
let cubic_offset = kurbo::offset::CubicOffset::new_regularized(cubic_bez, distance, CUBIC_REGULARIZATION_ACCURACY);
let offset_bezpath = kurbo::fit_to_bezpath(&cubic_offset, CUBIC_TO_BEZPATH_ACCURACY);
let beziers = offset_bezpath.segments().fold(Vec::new(), |mut acc, seg| {
acc.push(segment_to_bezier(seg));
acc
});
Subpath::from_beziers(&beziers, false)
kurbo::fit_to_bezpath(&cubic_offset, CUBIC_TO_BEZPATH_ACCURACY)
})
.filter(|subpath| subpath.len() >= 2) // In some cases the reduced and scaled bézier is marked by is_point (so the subpath is empty).
.collect::<Vec<Subpath<PointId>>>();
let mut drop_common_point = vec![true; subpath.len()];
.filter(|bezpath| bezpath.get_seg(1).is_some()) // In some cases the reduced and scaled bézier is marked by is_point (so the subpath is empty).
.collect::<Vec<BezPath>>();
// Clip or join consecutive Subpaths
for i in 0..subpaths.len() - 1 {
for i in 0..bezpaths.len() - 1 {
let j = i + 1;
let subpath1 = &subpaths[i];
let subpath2 = &subpaths[j];
let bezpath1 = &bezpaths[i];
let bezpath2 = &bezpaths[j];
let last_segment = subpath1.get_segment(subpath1.len_segments() - 1).unwrap();
let first_segment = subpath2.get_segment(0).unwrap();
let last_segment_end = point_to_dvec2(bezpath1.segments().last().unwrap().end());
let first_segment_start = point_to_dvec2(bezpath2.segments().next().unwrap().start());
// If the anchors are approximately equal, there is no need to clip / join the segments
if last_segment.end().abs_diff_eq(first_segment.start(), MAX_ABSOLUTE_DIFFERENCE) {
if last_segment_end.abs_diff_eq(first_segment_start, MAX_ABSOLUTE_DIFFERENCE) {
continue;
}
// Calculate the angle formed between two consecutive Subpaths
let out_tangent = subpath.get_segment(i).unwrap().tangent(TValue::Parametric(1.));
let in_tangent = subpath.get_segment(j).unwrap().tangent(TValue::Parametric(0.));
let angle = out_tangent.angle_to(in_tangent);
// The angle is concave. The Subpath overlap and must be clipped
let mut apply_join = true;
if (angle > 0. && distance > 0.) || (angle < 0. && distance < 0.) {
// If the distance is large enough, there may still be no intersections. Also, if the angle is close enough to zero,
// subpath intersections may find no intersections. In this case, the points are likely close enough that we can approximate
// the points as being on top of one another.
if let Some((clipped_subpath1, clipped_subpath2)) = Subpath::clip_simple_subpaths(subpath1, subpath2) {
subpaths[i] = clipped_subpath1;
subpaths[j] = clipped_subpath2;
apply_join = false;
}
if let Some((clipped_subpath1, clipped_subpath2)) = clip_simple_bezpaths(bezpath1, bezpath2) {
bezpaths[i] = clipped_subpath1;
bezpaths[j] = clipped_subpath2;
apply_join = false;
}
// The angle is convex. The Subpath must be joined using the specified join type
if apply_join {
drop_common_point[j] = false;
match join {
Join::Bevel => {}
Join::Miter(miter_limit) => {
let miter_manipulator_group = subpaths[i].miter_line_join(&subpaths[j], miter_limit);
if let Some(miter_manipulator_group) = miter_manipulator_group {
subpaths[i].manipulator_groups_mut().push(miter_manipulator_group);
Join::Bevel => {
let element = PathEl::LineTo(bezpaths[j].segments().next().unwrap().start());
bezpaths[i].push(element);
}
Join::Miter => {
let element = miter_line_join(&bezpaths[i], &bezpaths[j], miter_limit);
if let Some(element) = element {
bezpaths[i].push(element[0]);
bezpaths[i].push(element[1]);
} else {
let element = PathEl::LineTo(bezpaths[j].segments().next().unwrap().start());
bezpaths[i].push(element);
}
}
Join::Round => {
let (out_handle, round_point, in_handle) = subpaths[i].round_line_join(&subpaths[j], subpath.manipulator_groups()[j].anchor);
let last_index = subpaths[i].manipulator_groups().len() - 1;
subpaths[i].manipulator_groups_mut()[last_index].out_handle = Some(out_handle);
subpaths[i].manipulator_groups_mut().push(round_point);
subpaths[j].manipulator_groups_mut()[0].in_handle = Some(in_handle);
let center = point_to_dvec2(bezpath.get_seg(i + 1).unwrap().end());
let elements = round_line_join(&bezpaths[i], &bezpaths[j], center);
bezpaths[i].push(elements[0]);
bezpaths[i].push(elements[1]);
}
}
}
}
// Clip any overlap in the last segment
if subpath.closed {
let out_tangent = subpath.get_segment(subpath.len_segments() - 1).unwrap().tangent(TValue::Parametric(1.));
let in_tangent = subpath.get_segment(0).unwrap().tangent(TValue::Parametric(0.));
let angle = out_tangent.angle_to(in_tangent);
let is_bezpath_closed = bezpath.elements().last().is_some_and(|element| *element == PathEl::ClosePath);
if is_bezpath_closed {
let mut apply_join = true;
if (angle > 0. && distance > 0.) || (angle < 0. && distance < 0.) {
if let Some((clipped_subpath1, clipped_subpath2)) = Subpath::clip_simple_subpaths(&subpaths[subpaths.len() - 1], &subpaths[0]) {
// Merge the clipped subpaths
let last_index = subpaths.len() - 1;
subpaths[last_index] = clipped_subpath1;
subpaths[0] = clipped_subpath2;
apply_join = false;
}
if let Some((clipped_subpath1, clipped_subpath2)) = clip_simple_bezpaths(&bezpaths[bezpaths.len() - 1], &bezpaths[0]) {
// Merge the clipped subpaths
let last_index = bezpaths.len() - 1;
bezpaths[last_index] = clipped_subpath1;
bezpaths[0] = clipped_subpath2;
apply_join = false;
}
if apply_join {
drop_common_point[0] = false;
match join {
Join::Bevel => {}
Join::Miter(miter_limit) => {
let last_subpath_index = subpaths.len() - 1;
let miter_manipulator_group = subpaths[last_subpath_index].miter_line_join(&subpaths[0], miter_limit);
if let Some(miter_manipulator_group) = miter_manipulator_group {
subpaths[last_subpath_index].manipulator_groups_mut().push(miter_manipulator_group);
Join::Bevel => {
let last_subpath_index = bezpaths.len() - 1;
let element = PathEl::LineTo(bezpaths[0].segments().next().unwrap().start());
bezpaths[last_subpath_index].push(element);
}
Join::Miter => {
let last_subpath_index = bezpaths.len() - 1;
let element = miter_line_join(&bezpaths[last_subpath_index], &bezpaths[0], miter_limit);
if let Some(element) = element {
bezpaths[last_subpath_index].push(element[0]);
bezpaths[last_subpath_index].push(element[1]);
} else {
let element = PathEl::LineTo(bezpaths[0].segments().next().unwrap().start());
bezpaths[last_subpath_index].push(element);
}
}
Join::Round => {
let last_subpath_index = subpaths.len() - 1;
let (out_handle, round_point, in_handle) = subpaths[last_subpath_index].round_line_join(&subpaths[0], subpath.manipulator_groups()[0].anchor);
let last_index = subpaths[last_subpath_index].manipulator_groups().len() - 1;
subpaths[last_subpath_index].manipulator_groups_mut()[last_index].out_handle = Some(out_handle);
subpaths[last_subpath_index].manipulator_groups_mut().push(round_point);
subpaths[0].manipulator_groups_mut()[0].in_handle = Some(in_handle);
let last_subpath_index = bezpaths.len() - 1;
let center = point_to_dvec2(bezpath.get_seg(1).unwrap().start());
let elements = round_line_join(&bezpaths[last_subpath_index], &bezpaths[0], center);
bezpaths[last_subpath_index].push(elements[0]);
bezpaths[last_subpath_index].push(elements[1]);
}
}
}
}
// Merge the subpaths. Drop points which overlap with one another.
let mut manipulator_groups = subpaths[0].manipulator_groups().to_vec();
for i in 1..subpaths.len() {
if drop_common_point[i] {
let last_group = manipulator_groups.pop().unwrap();
let mut manipulators_copy = subpaths[i].manipulator_groups().to_vec();
manipulators_copy[0].in_handle = last_group.in_handle;
manipulator_groups.append(&mut manipulators_copy);
} else {
manipulator_groups.append(&mut subpaths[i].manipulator_groups().to_vec());
// Merge the bezpaths and its segments. Drop points which overlap with one another.
let segments = bezpaths.iter().flat_map(|bezpath| bezpath.segments().collect::<Vec<PathSeg>>()).collect::<Vec<PathSeg>>();
let mut offset_bezpath = segments.iter().fold(BezPath::new(), |mut acc, segment| {
if acc.elements().is_empty() {
acc.move_to(segment.start());
}
}
if subpath.closed && drop_common_point[0] {
let last_group = manipulator_groups.pop().unwrap();
manipulator_groups[0].in_handle = last_group.in_handle;
acc.push(segment.as_path_el());
acc
});
if is_bezpath_closed {
offset_bezpath.close_path();
}
Subpath::new(manipulator_groups, subpath.closed)
offset_bezpath
}

View file

@ -182,7 +182,7 @@ where
A::Item: Clone,
B::Item: Clone,
{
a.flat_map(move |i| (b.clone().map(move |j| (i.clone(), j))))
a.flat_map(move |i| b.clone().map(move |j| (i.clone(), j)))
}
/// A square (represented by its top left corner position and width/height of `square_size`) that is currently a candidate for targetting by the dart throwing process.

View file

@ -0,0 +1,44 @@
use glam::DVec2;
use kurbo::{ParamCurve, ParamCurveDeriv, PathSeg};
pub fn segment_tangent(segment: PathSeg, t: f64) -> DVec2 {
// NOTE: .deriv() method gives inaccurate result when it is 1.
let t = if t == 1. { 1. - f64::EPSILON } else { t };
let tangent = match segment {
PathSeg::Line(line) => line.deriv().eval(t),
PathSeg::Quad(quad_bez) => quad_bez.deriv().eval(t),
PathSeg::Cubic(cubic_bez) => cubic_bez.deriv().eval(t),
};
DVec2::new(tangent.x, tangent.y)
}
// Compare two f64s with some maximum absolute difference to account for floating point errors
#[cfg(test)]
pub fn compare_f64s(f1: f64, f2: f64) -> bool {
(f1 - f2).abs() < super::contants::MAX_ABSOLUTE_DIFFERENCE
}
/// Compare points by allowing some maximum absolute difference to account for floating point errors
#[cfg(test)]
pub fn compare_points(p1: kurbo::Point, p2: kurbo::Point) -> bool {
let (p1, p2) = (crate::vector::misc::point_to_dvec2(p1), crate::vector::misc::point_to_dvec2(p2));
p1.abs_diff_eq(p2, super::contants::MAX_ABSOLUTE_DIFFERENCE)
}
/// Compare vectors of points by allowing some maximum absolute difference to account for floating point errors
#[cfg(test)]
pub fn compare_vec_of_points(a: Vec<kurbo::Point>, b: Vec<kurbo::Point>, max_absolute_difference: f64) -> bool {
a.len() == b.len()
&& a.into_iter()
.zip(b)
.map(|(p1, p2)| (crate::vector::misc::point_to_dvec2(p1), crate::vector::misc::point_to_dvec2(p2)))
.all(|(p1, p2)| p1.abs_diff_eq(p2, max_absolute_difference))
}
/// Compare the two values in a `DVec2` independently with a provided max absolute value difference.
#[cfg(test)]
pub fn dvec2_compare(a: kurbo::Point, b: kurbo::Point, max_abs_diff: f64) -> glam::BVec2 {
glam::BVec2::new((a.x - b.x).abs() < max_abs_diff, (a.y - b.y).abs() < max_abs_diff)
}

View file

@ -1,6 +1,10 @@
use super::PointId;
use super::algorithms::offset_subpath::MAX_ABSOLUTE_DIFFERENCE;
use bezier_rs::{BezierHandles, ManipulatorGroup, Subpath};
use dyn_any::DynAny;
use glam::DVec2;
use kurbo::Point;
use kurbo::{BezPath, CubicBez, Line, ParamCurve, PathSeg, Point, QuadBez};
use std::ops::Sub;
/// Represents different ways of calculating the centroid.
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize, Hash, DynAny, specta::Type, node_macro::ChoiceType)]
@ -64,7 +68,7 @@ pub enum GridType {
#[widget(Radio)]
pub enum ArcType {
#[default]
Open,
Open = 0,
Closed,
PieSlice,
}
@ -96,3 +100,140 @@ pub fn point_to_dvec2(point: Point) -> DVec2 {
pub fn dvec2_to_point(value: DVec2) -> Point {
Point { x: value.x, y: value.y }
}
pub fn segment_to_handles(segment: &PathSeg) -> BezierHandles {
match *segment {
PathSeg::Line(_) => BezierHandles::Linear,
PathSeg::Quad(QuadBez { p0: _, p1, p2: _ }) => BezierHandles::Quadratic { handle: point_to_dvec2(p1) },
PathSeg::Cubic(CubicBez { p0: _, p1, p2, p3: _ }) => BezierHandles::Cubic {
handle_start: point_to_dvec2(p1),
handle_end: point_to_dvec2(p2),
},
}
}
pub fn handles_to_segment(start: DVec2, handles: BezierHandles, end: DVec2) -> PathSeg {
match handles {
bezier_rs::BezierHandles::Linear => {
let p0 = dvec2_to_point(start);
let p1 = dvec2_to_point(end);
PathSeg::Line(Line::new(p0, p1))
}
bezier_rs::BezierHandles::Quadratic { handle } => {
let p0 = dvec2_to_point(start);
let p1 = dvec2_to_point(handle);
let p2 = dvec2_to_point(end);
PathSeg::Quad(QuadBez::new(p0, p1, p2))
}
bezier_rs::BezierHandles::Cubic { handle_start, handle_end } => {
let p0 = dvec2_to_point(start);
let p1 = dvec2_to_point(handle_start);
let p2 = dvec2_to_point(handle_end);
let p3 = dvec2_to_point(end);
PathSeg::Cubic(CubicBez::new(p0, p1, p2, p3))
}
}
}
pub fn subpath_to_kurbo_bezpath(subpath: Subpath<PointId>) -> BezPath {
let maniputor_groups = subpath.manipulator_groups();
let closed = subpath.closed();
bezpath_from_manipulator_groups(maniputor_groups, closed)
}
pub fn bezpath_from_manipulator_groups(manipulator_groups: &[ManipulatorGroup<PointId>], closed: bool) -> BezPath {
let mut bezpath = kurbo::BezPath::new();
let mut out_handle;
let Some(first) = manipulator_groups.first() else { return bezpath };
bezpath.move_to(dvec2_to_point(first.anchor));
out_handle = first.out_handle;
for manipulator in manipulator_groups.iter().skip(1) {
match (out_handle, manipulator.in_handle) {
(Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(manipulator.anchor)),
(None, None) => bezpath.line_to(dvec2_to_point(manipulator.anchor)),
(None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)),
(Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(manipulator.anchor)),
}
out_handle = manipulator.out_handle;
}
if closed {
match (out_handle, first.in_handle) {
(Some(handle_start), Some(handle_end)) => bezpath.curve_to(dvec2_to_point(handle_start), dvec2_to_point(handle_end), dvec2_to_point(first.anchor)),
(None, None) => bezpath.line_to(dvec2_to_point(first.anchor)),
(None, Some(handle)) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)),
(Some(handle), None) => bezpath.quad_to(dvec2_to_point(handle), dvec2_to_point(first.anchor)),
}
bezpath.close_path();
}
bezpath
}
pub fn bezpath_to_manipulator_groups(bezpath: &BezPath) -> (Vec<ManipulatorGroup<PointId>>, bool) {
let mut manipulator_groups = Vec::<ManipulatorGroup<PointId>>::new();
let mut is_closed = false;
for element in bezpath.elements() {
let manipulator_group = match *element {
kurbo::PathEl::MoveTo(point) => ManipulatorGroup::new(point_to_dvec2(point), None, None),
kurbo::PathEl::LineTo(point) => ManipulatorGroup::new(point_to_dvec2(point), None, None),
kurbo::PathEl::QuadTo(point, point1) => ManipulatorGroup::new(point_to_dvec2(point1), Some(point_to_dvec2(point)), None),
kurbo::PathEl::CurveTo(point, point1, point2) => {
if let Some(last_maipulator_group) = manipulator_groups.last_mut() {
last_maipulator_group.out_handle = Some(point_to_dvec2(point));
}
ManipulatorGroup::new(point_to_dvec2(point2), Some(point_to_dvec2(point1)), None)
}
kurbo::PathEl::ClosePath => {
if let Some(last_group) = manipulator_groups.pop() {
if let Some(first_group) = manipulator_groups.first_mut() {
first_group.out_handle = last_group.in_handle;
}
}
is_closed = true;
break;
}
};
manipulator_groups.push(manipulator_group);
}
(manipulator_groups, is_closed)
}
/// Returns true if the [`PathSeg`] is equivalent to a line.
///
/// This is different from simply checking if the segment is [`PathSeg::Line`] or [`PathSeg::Quad`] or [`PathSeg::Cubic`]. Bezier curve can also be a line if the control points are colinear to the start and end points. Therefore if the handles exceed the start and end point, it will still be considered as a line.
pub fn is_linear(segment: PathSeg) -> bool {
let is_colinear = |a: Point, b: Point, c: Point| -> bool { ((b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x)).abs() < MAX_ABSOLUTE_DIFFERENCE };
match segment {
PathSeg::Line(_) => true,
PathSeg::Quad(QuadBez { p0, p1, p2 }) => is_colinear(p0, p1, p2),
PathSeg::Cubic(CubicBez { p0, p1, p2, p3 }) => is_colinear(p0, p1, p3) && is_colinear(p0, p2, p3),
}
}
/// Get an iterator over the coordinates of all points in a path segment.
pub fn get_segment_points(segment: PathSeg) -> Vec<Point> {
match segment {
PathSeg::Line(line) => [line.p0, line.p1].to_vec(),
PathSeg::Quad(quad_bez) => [quad_bez.p0, quad_bez.p1, quad_bez.p2].to_vec(),
PathSeg::Cubic(cubic_bez) => [cubic_bez.p0, cubic_bez.p1, cubic_bez.p2, cubic_bez.p3].to_vec(),
}
}
/// Returns true if the corresponding points of the two [`PathSeg`]s are within the provided absolute value difference from each other.
pub fn pathseg_abs_diff_eq(seg1: PathSeg, seg2: PathSeg, max_abs_diff: f64) -> bool {
let seg1 = if is_linear(seg1) { PathSeg::Line(Line::new(seg1.start(), seg1.end())) } else { seg1 };
let seg2 = if is_linear(seg2) { PathSeg::Line(Line::new(seg2.start(), seg2.end())) } else { seg2 };
let seg1_points = get_segment_points(seg1);
let seg2_points = get_segment_points(seg2);
let cmp = |a: f64, b: f64| a.sub(b).abs() < max_abs_diff;
seg1_points.len() == seg2_points.len() && seg1_points.into_iter().zip(seg2_points).all(|(a, b)| cmp(a.x, b.x) && cmp(a.y, b.y))
}

View file

@ -17,7 +17,7 @@ use core::hash::Hash;
use dyn_any::DynAny;
use glam::{DAffine2, DVec2};
pub use indexed::VectorDataIndex;
use kurbo::{Affine, Rect, Shape};
use kurbo::{Affine, BezPath, Rect, Shape};
pub use modification::*;
use std::collections::HashMap;
@ -195,6 +195,13 @@ impl VectorData {
Self::from_subpaths([subpath], false)
}
/// Construct some new vector data from a single [`BezPath`] with an identity transform and black fill.
pub fn from_bezpath(bezpath: BezPath) -> Self {
let mut vector_data = Self::default();
vector_data.append_bezpath(bezpath);
vector_data
}
/// Construct some new vector data from subpaths with an identity transform and black fill.
pub fn from_subpaths(subpaths: impl IntoIterator<Item = impl Borrow<bezier_rs::Subpath<PointId>>>, preserve_id: bool) -> Self {
let mut vector_data = Self::default();
@ -226,10 +233,10 @@ impl VectorData {
pub fn close_subpaths(&mut self) {
let segments_to_add: Vec<_> = self
.stroke_bezier_paths()
.filter(|subpath| !subpath.closed)
.filter_map(|subpath| {
let (first, last) = subpath.manipulator_groups().first().zip(subpath.manipulator_groups().last())?;
.build_stroke_path_iter()
.filter(|(_, closed)| !closed)
.filter_map(|(manipulator_groups, _)| {
let (first, last) = manipulator_groups.first().zip(manipulator_groups.last())?;
let (start, end) = self.point_domain.resolve_id(first.id).zip(self.point_domain.resolve_id(last.id))?;
Some((start, end))
})
@ -337,7 +344,7 @@ impl VectorData {
/// Returns the number of linear segments connected to the given point.
pub fn connected_linear_segments(&self, point_id: PointId) -> usize {
self.segment_bezier_iter()
.filter(|(_, bez, start, end)| ((*start == point_id || *end == point_id) && matches!(bez.handles, BezierHandles::Linear)))
.filter(|(_, bez, start, end)| (*start == point_id || *end == point_id) && matches!(bez.handles, BezierHandles::Linear))
.count()
}
@ -370,7 +377,7 @@ impl VectorData {
}
pub fn check_point_inside_shape(&self, vector_data_transform: DAffine2, point: DVec2) -> bool {
let bez_paths: Vec<_> = self
let number = self
.stroke_bezpath_iter()
.map(|mut bezpath| {
// TODO: apply transform to points instead of modifying the paths
@ -379,19 +386,9 @@ impl VectorData {
let bbox = bezpath.bounding_box();
(bezpath, bbox)
})
.collect();
// Check against all paths the point is contained in to compute the correct winding number
let mut number = 0;
for (shape, bbox) in bez_paths {
if bbox.x0 > point.x || bbox.y0 > point.y || bbox.x1 < point.x || bbox.y1 < point.y {
continue;
}
let winding = shape.winding(dvec2_to_point(point));
number += winding;
}
.filter(|(_, bbox)| bbox.contains(dvec2_to_point(point)))
.map(|(bezpath, _)| bezpath.winding(dvec2_to_point(point)))
.sum::<i32>();
// Non-zero fill rule
number != 0
@ -571,6 +568,30 @@ impl ManipulatorPointId {
}
}
/// Finds all the connected handles of a point.
/// For an anchor it is all the connected handles.
/// For a handle it is all the handles connected to its corresponding anchor other than the current handle.
pub fn get_all_connected_handles(self, vector_data: &VectorData) -> Option<Vec<HandleId>> {
match self {
ManipulatorPointId::Anchor(point) => {
let connected = vector_data.all_connected(point).collect::<Vec<_>>();
Some(connected)
}
ManipulatorPointId::PrimaryHandle(segment) => {
let point = vector_data.segment_domain.segment_start_from_id(segment)?;
let current = HandleId::primary(segment);
let connected = vector_data.segment_domain.all_connected(point).filter(|&value| value != current).collect::<Vec<_>>();
Some(connected)
}
ManipulatorPointId::EndHandle(segment) => {
let point = vector_data.segment_domain.segment_end_from_id(segment)?;
let current = HandleId::end(segment);
let connected = vector_data.segment_domain.all_connected(point).filter(|&value| value != current).collect::<Vec<_>>();
Some(connected)
}
}
}
/// Attempt to find the closest anchor. If self is already an anchor then it is just self. If it is a start or end handle, then the start or end point is chosen.
#[must_use]
pub fn get_anchor(self, vector_data: &VectorData) -> Option<PointId> {

View file

@ -3,6 +3,7 @@ use crate::vector::vector_data::{HandleId, VectorData};
use bezier_rs::{BezierHandles, ManipulatorGroup};
use dyn_any::DynAny;
use glam::{DAffine2, DVec2};
use kurbo::{CubicBez, Line, PathSeg, QuadBez};
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::iter::zip;
@ -440,6 +441,35 @@ impl SegmentDomain {
let handles = self.handles.iter_mut();
zip(ids, zip(start_point, zip(end_point, handles))).map(|(id, (start_point, (end_point, handles)))| (id, start_point, end_point, handles))
}
pub(crate) fn pair_handles_and_points_mut_by_index(
&mut self,
index1: usize,
index2: usize,
) -> (&mut bezier_rs::BezierHandles, &mut usize, &mut usize, &mut bezier_rs::BezierHandles, &mut usize, &mut usize) {
// Use split_at_mut to avoid multiple mutable borrows of the same slice
let (handles_first, handles_second) = self.handles.split_at_mut(index2.max(index1));
let (start_first, start_second) = self.start_point.split_at_mut(index2.max(index1));
let (end_first, end_second) = self.end_point.split_at_mut(index2.max(index1));
let (h1, h2) = if index1 < index2 {
(&mut handles_first[index1], &mut handles_second[0])
} else {
(&mut handles_second[0], &mut handles_first[index2])
};
let (sp1, sp2) = if index1 < index2 {
(&mut start_first[index1], &mut start_second[0])
} else {
(&mut start_second[0], &mut start_first[index2])
};
let (ep1, ep2) = if index1 < index2 {
(&mut end_first[index1], &mut end_second[0])
} else {
(&mut end_second[0], &mut end_first[index2])
};
(h1, sp1, ep1, h2, sp2, ep2)
}
}
#[derive(Clone, Debug, Default, PartialEq, Hash, DynAny, serde::Serialize, serde::Deserialize)]
@ -644,6 +674,18 @@ impl FoundSubpath {
}
impl VectorData {
/// Construct a [`kurbo::PathSeg`] by resolving the points from their ids.
fn path_segment_from_index(&self, start: usize, end: usize, handles: BezierHandles) -> PathSeg {
let start = dvec2_to_point(self.point_domain.positions()[start]);
let end = dvec2_to_point(self.point_domain.positions()[end]);
match handles {
BezierHandles::Linear => PathSeg::Line(Line::new(start, end)),
BezierHandles::Quadratic { handle } => PathSeg::Quad(QuadBez::new(start, dvec2_to_point(handle), end)),
BezierHandles::Cubic { handle_start, handle_end } => PathSeg::Cubic(CubicBez::new(start, dvec2_to_point(handle_start), dvec2_to_point(handle_end), end)),
}
}
/// Construct a [`bezier_rs::Bezier`] curve spanning from the resolved position of the start and end points with the specified handles.
fn segment_to_bezier_with_index(&self, start: usize, end: usize, handles: BezierHandles) -> bezier_rs::Bezier {
let start = self.point_domain.positions()[start];
@ -670,6 +712,19 @@ impl VectorData {
(start_id, end_id, self.segment_to_bezier_with_index(start, end, self.segment_domain.handles[index]))
}
/// Iterator over all of the [`bezier_rs::Bezier`] following the order that they are stored in the segment domain, skipping invalid segments.
pub fn segment_iter(&self) -> impl Iterator<Item = (SegmentId, PathSeg, PointId, PointId)> {
let to_segment = |(((&handles, &id), &start), &end)| (id, self.path_segment_from_index(start, end, handles), self.point_domain.ids()[start], self.point_domain.ids()[end]);
self.segment_domain
.handles
.iter()
.zip(&self.segment_domain.id)
.zip(self.segment_domain.start_point())
.zip(self.segment_domain.end_point())
.map(to_segment)
}
/// Iterator over all of the [`bezier_rs::Bezier`] following the order that they are stored in the segment domain, skipping invalid segments.
pub fn segment_bezier_iter(&self) -> impl Iterator<Item = (SegmentId, bezier_rs::Bezier, PointId, PointId)> + '_ {
let to_bezier = |(((&handles, &id), &start), &end)| (id, self.segment_to_bezier_with_index(start, end, handles), self.point_domain.ids()[start], self.point_domain.ids()[end]);
@ -790,48 +845,8 @@ impl VectorData {
Some(bezier_rs::Subpath::new(groups, closed))
}
/// Construct a [`bezier_rs::Bezier`] curve from an iterator of segments with (handles, start point, end point). Returns None if any ids are invalid or if the segments are not continuous.
fn subpath_from_segments(&self, segments: impl Iterator<Item = (BezierHandles, usize, usize)>) -> Option<bezier_rs::Subpath<PointId>> {
let mut first_point = None;
let mut groups = Vec::new();
let mut last: Option<(usize, BezierHandles)> = None;
for (handle, start, end) in segments {
if last.is_some_and(|(previous_end, _)| previous_end != start) {
warn!("subpath_from_segments that were not continuous");
return None;
}
first_point = Some(first_point.unwrap_or(start));
groups.push(ManipulatorGroup {
anchor: self.point_domain.positions()[start],
in_handle: last.and_then(|(_, handle)| handle.end()),
out_handle: handle.start(),
id: self.point_domain.ids()[start],
});
last = Some((end, handle));
}
let closed = groups.len() > 1 && last.map(|(point, _)| point) == first_point;
if let Some((end, last_handle)) = last {
if closed {
groups[0].in_handle = last_handle.end();
} else {
groups.push(ManipulatorGroup {
anchor: self.point_domain.positions()[end],
in_handle: last_handle.end(),
out_handle: None,
id: self.point_domain.ids()[end],
});
}
}
Some(bezier_rs::Subpath::new(groups, closed))
}
/// Construct a [`bezier_rs::Bezier`] curve for each region, skipping invalid regions.
pub fn region_bezier_paths(&self) -> impl Iterator<Item = (RegionId, bezier_rs::Subpath<PointId>)> + '_ {
pub fn region_manipulator_groups(&self) -> impl Iterator<Item = (RegionId, Vec<ManipulatorGroup<PointId>>)> + '_ {
self.region_domain
.id
.iter()
@ -847,7 +862,29 @@ impl VectorData {
.zip(self.segment_domain.end_point.get(range)?)
.map(|((&handles, &start), &end)| (handles, start, end));
self.subpath_from_segments(segments_iter).map(|subpath| (id, subpath))
let mut manipulator_groups = Vec::new();
let mut in_handle = None;
for segment in segments_iter {
let (handles, start_point_index, _end_point_index) = segment;
let start_point_id = self.point_domain.id[start_point_index];
let start_point = self.point_domain.position[start_point_index];
let (manipulator_group, next_in_handle) = match handles {
BezierHandles::Linear => (ManipulatorGroup::new_with_id(start_point, in_handle, None, start_point_id), None),
BezierHandles::Quadratic { handle } => (ManipulatorGroup::new_with_id(start_point, in_handle, Some(handle), start_point_id), None),
BezierHandles::Cubic { handle_start, handle_end } => (ManipulatorGroup::new_with_id(start_point, in_handle, Some(handle_start), start_point_id), Some(handle_end)),
};
in_handle = next_in_handle;
manipulator_groups.push(manipulator_group);
}
if let Some(first) = manipulator_groups.first_mut() {
first.in_handle = in_handle;
}
Some((id, manipulator_groups))
})
}

View file

@ -418,7 +418,7 @@ impl Hash for VectorModification {
}
}
/// A node that applies a procedural modification to some [`VectorData`].
/// Applies a diff modification to a vector path.
#[node_macro::node(category(""))]
async fn path_modify(_ctx: impl Ctx, mut vector_data: VectorDataTable, modification: Box<VectorModification>, node_path: Vec<NodeId>) -> VectorDataTable {
if vector_data.is_empty() {
@ -437,6 +437,23 @@ async fn path_modify(_ctx: impl Ctx, mut vector_data: VectorDataTable, modificat
vector_data
}
/// Applies the vector path's local transformation to its geometry and resets it to the identity.
#[node_macro::node(category("Vector"))]
async fn apply_transform(_ctx: impl Ctx, mut vector_data: VectorDataTable) -> VectorDataTable {
for vector_data_instance in vector_data.instance_mut_iter() {
let vector_data = vector_data_instance.instance;
let transform = *vector_data_instance.transform;
for (_, point) in vector_data.point_domain.positions_mut() {
*point = transform.transform_point2(*point);
}
*vector_data_instance.transform = DAffine2::IDENTITY;
}
vector_data
}
// Do we want to enforce that all serialized/deserialized hashmaps are a vec of tuples?
// TODO: Eventually remove this document upgrade code
use serde::de::{SeqAccess, Visitor};

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
use glam::DVec2;
use glam::{DAffine2, DVec2};
use graphene_core::gradient::GradientStops;
use graphene_core::registry::types::{Fraction, Percentage, TextArea};
use graphene_core::registry::types::{Fraction, Percentage, PixelSize, TextArea};
use graphene_core::transform::Footprint;
use graphene_core::{Color, Ctx, num_traits};
use log::warn;
use math_parser::ast;
@ -107,11 +108,11 @@ fn subtract<U: Sub<T>, T>(
fn multiply<U: Mul<T>, T>(
_: impl Ctx,
/// The left-hand side of the multiplication operation.
#[implementations(f64, f32, u32, DVec2, f64, DVec2)]
#[implementations(f64, f32, u32, f64, DVec2, DVec2, DAffine2)]
multiplier: U,
/// The right-hand side of the multiplication operation.
#[default(1.)]
#[implementations(f64, f32, u32, DVec2, DVec2, f64)]
#[implementations(f64, f32, u32, DVec2, f64, DVec2, DAffine2)]
multiplicand: T,
) -> <U as Mul<T>>::Output {
multiplier * multiplicand
@ -285,7 +286,7 @@ fn cosine_inverse<U: num_traits::float::Float>(
/// The inverse tangent trigonometric function (atan or atan2, depending on input type) calculates:
/// atan: the angle whose tangent is the specified scalar number.
/// atan2: the angle of a ray from the origin to the specified coordinate.
/// atan2: the angle of a ray from the origin to the specified vec2.
///
/// The resulting angle is always in the range [0°, 180°] or, in radians, [-π/2, π/2].
#[node_macro::node(category("Math: Trig"))]
@ -462,15 +463,89 @@ fn clamp<T: std::cmp::PartialOrd>(
}
}
/// The greatest common divisor (GCD) calculates the largest positive integer that divides both of the two input numbers without leaving a remainder.
#[node_macro::node(category("Math: Numeric"))]
fn greatest_common_divisor<T: num_traits::int::PrimInt + std::ops::ShrAssign<i32> + std::ops::SubAssign>(
_: impl Ctx,
/// One of the two numbers for which the GCD will be calculated.
#[implementations(u32, u64, i32)]
value: T,
/// The other of the two numbers for which the GCD will be calculated.
#[implementations(u32, u64, i32)]
other_value: T,
) -> T {
if value == T::zero() {
return other_value;
}
if other_value == T::zero() {
return value;
}
binary_gcd(value, other_value)
}
/// The least common multiple (LCM) calculates the smallest positive integer that is a multiple of both of the two input numbers.
#[node_macro::node(category("Math: Numeric"))]
fn least_common_multiple<T: num_traits::ToPrimitive + num_traits::FromPrimitive + num_traits::identities::Zero>(
_: impl Ctx,
/// One of the two numbers for which the LCM will be calculated.
#[implementations(u32, u64, i32)]
value: T,
/// The other of the two numbers for which the LCM will be calculated.
#[implementations(u32, u64, i32)]
other_value: T,
) -> T {
let value = value.to_i128().unwrap();
let other_value = other_value.to_i128().unwrap();
if value == 0 || other_value == 0 {
return T::zero();
}
let gcd = binary_gcd(value, other_value);
T::from_i128((value * other_value).abs() / gcd).unwrap()
}
fn binary_gcd<T: num_traits::int::PrimInt + std::ops::ShrAssign<i32> + std::ops::SubAssign>(mut a: T, mut b: T) -> T {
if a == T::zero() {
return b;
}
if b == T::zero() {
return a;
}
let mut shift = 0;
while (a | b) & T::one() == T::zero() {
a >>= 1;
b >>= 1;
shift += 1;
}
while a & T::one() == T::zero() {
a >>= 1;
}
while b != T::zero() {
while b & T::one() == T::zero() {
b >>= 1;
}
if a > b {
std::mem::swap(&mut a, &mut b);
}
b -= a;
}
a << shift
}
/// The equality operation (==) compares two values and returns true if they are equal, or false if they are not.
#[node_macro::node(category("Math: Logic"))]
fn equals<U: std::cmp::PartialEq<T>, T>(
_: impl Ctx,
/// One of the two numbers to compare for equality.
#[implementations(f64, f32, u32, DVec2, &str)]
#[implementations(f64, f32, u32, DVec2, &str, String)]
value: T,
/// The other of the two numbers to compare for equality.
#[implementations(f64, f32, u32, DVec2, &str)]
#[implementations(f64, f32, u32, DVec2, &str, String)]
other_value: U,
) -> bool {
other_value == value
@ -576,9 +651,9 @@ fn percentage_value(_: impl Ctx, _primary: (), percentage: Percentage) -> f64 {
percentage
}
/// Constructs a two-dimensional vector value which may be set to any XY coordinate.
#[node_macro::node(category("Value"))]
fn coordinate_value(_: impl Ctx, _primary: (), x: f64, y: f64) -> DVec2 {
/// Constructs a two-dimensional vector value which may be set to any XY pair.
#[node_macro::node(category("Value"), name("Vec2 Value"))]
fn vec2_value(_: impl Ctx, _primary: (), x: f64, y: f64) -> DVec2 {
DVec2::new(x, y)
}
@ -607,6 +682,16 @@ fn string_value(_: impl Ctx, _primary: (), string: TextArea) -> String {
string
}
/// Constructs a footprint value which may be set to any transformation of a unit square describing a render area, and a render resolution at least 1x1 integer pixels.
#[node_macro::node(category("Value"))]
fn footprint_value(_: impl Ctx, _primary: (), transform: DAffine2, #[default(100., 100.)] resolution: PixelSize) -> Footprint {
Footprint {
transform,
resolution: resolution.max(DVec2::ONE).as_uvec2(),
..Default::default()
}
}
#[node_macro::node(category("Math: Vector"))]
fn dot_product(_: impl Ctx, vector_a: DVec2, vector_b: DVec2) -> f64 {
vector_a.dot(vector_b)

View file

@ -80,8 +80,7 @@ fn union<'a>(vector_data: impl DoubleEndedIterator<Item = InstanceRef<'a, Vector
// Reverse vector data so that the result style is the style of the first vector data
let mut vector_data_reversed = vector_data.rev();
let mut result_vector_data_table = VectorDataTable::default();
result_vector_data_table.push(vector_data_reversed.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut result_vector_data_table = VectorDataTable::new_instance(vector_data_reversed.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut first_instance = result_vector_data_table.instance_mut_iter().next().expect("Expected the one instance we just pushed");
// Loop over all vector data and union it with the result
@ -113,8 +112,7 @@ fn union<'a>(vector_data: impl DoubleEndedIterator<Item = InstanceRef<'a, Vector
fn subtract<'a>(vector_data: impl Iterator<Item = InstanceRef<'a, VectorData>>) -> VectorDataTable {
let mut vector_data = vector_data.into_iter();
let mut result_vector_data_table = VectorDataTable::default();
result_vector_data_table.push(vector_data.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut result_vector_data_table = VectorDataTable::new_instance(vector_data.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut first_instance = result_vector_data_table.instance_mut_iter().next().expect("Expected the one instance we just pushed");
let mut next_vector_data = vector_data.next();
@ -145,8 +143,7 @@ fn subtract<'a>(vector_data: impl Iterator<Item = InstanceRef<'a, VectorData>>)
fn intersect<'a>(vector_data: impl DoubleEndedIterator<Item = InstanceRef<'a, VectorData>>) -> VectorDataTable {
let mut vector_data = vector_data.rev();
let mut result_vector_data_table = VectorDataTable::default();
result_vector_data_table.push(vector_data.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut result_vector_data_table = VectorDataTable::new_instance(vector_data.next().map(|x| x.to_instance_cloned()).unwrap_or_default());
let mut first_instance = result_vector_data_table.instance_mut_iter().next().expect("Expected the one instance we just pushed");
let default = Instance::default();
@ -225,71 +222,67 @@ fn difference<'a>(vector_data: impl DoubleEndedIterator<Item = InstanceRef<'a, V
}
fn flatten_vector_data(graphic_group_table: &GraphicGroupTable) -> VectorDataTable {
let mut result_table = VectorDataTable::default();
graphic_group_table
.instance_ref_iter()
.flat_map(|element| {
match element.instance.clone() {
GraphicElement::VectorData(vector_data) => {
// Apply the parent group's transform to each element of vector data
vector_data
.instance_iter()
.map(|mut sub_vector_data| {
sub_vector_data.transform = *element.transform * sub_vector_data.transform;
for element in graphic_group_table.instance_ref_iter() {
match element.instance.clone() {
GraphicElement::VectorData(vector_data) => {
// Apply the parent group's transform to each element of vector data
for mut sub_vector_data in vector_data.instance_iter() {
sub_vector_data.transform = *element.transform * sub_vector_data.transform;
sub_vector_data
})
.collect::<Vec<_>>()
}
GraphicElement::RasterDataCPU(image) => {
let make_instance = |transform| {
// Convert the image frame into a rectangular subpath with the image's transform
let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE);
subpath.apply_transform(transform);
result_table.push(sub_vector_data);
// Create a vector data table row from the rectangular subpath, with a default black fill
let mut instance = VectorData::from_subpath(subpath);
instance.style.set_fill(Fill::Solid(Color::BLACK));
Instance { instance, ..Default::default() }
};
// Apply the parent group's transform to each element of raster data
image.instance_ref_iter().map(|instance| make_instance(*element.transform * *instance.transform)).collect::<Vec<_>>()
}
GraphicElement::RasterDataGPU(image) => {
let make_instance = |transform| {
// Convert the image frame into a rectangular subpath with the image's transform
let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE);
subpath.apply_transform(transform);
// Create a vector data table row from the rectangular subpath, with a default black fill
let mut instance = VectorData::from_subpath(subpath);
instance.style.set_fill(Fill::Solid(Color::BLACK));
Instance { instance, ..Default::default() }
};
// Apply the parent group's transform to each element of raster data
image.instance_ref_iter().map(|instance| make_instance(*element.transform * *instance.transform)).collect::<Vec<_>>()
}
GraphicElement::GraphicGroup(mut graphic_group) => {
// Apply the parent group's transform to each element of inner group
for sub_element in graphic_group.instance_mut_iter() {
*sub_element.transform = *element.transform * *sub_element.transform;
}
// Recursively flatten the inner group into vector data
let unioned = boolean_operation_on_vector_data_table(flatten_vector_data(&graphic_group).instance_ref_iter(), BooleanOperation::Union);
unioned.instance_iter().collect::<Vec<_>>()
}
}
GraphicElement::RasterDataCPU(image) => {
let make_instance = |transform| {
// Convert the image frame into a rectangular subpath with the image's transform
let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE);
subpath.apply_transform(transform);
// Create a vector data table row from the rectangular subpath, with a default black fill
let mut instance = VectorData::from_subpath(subpath);
instance.style.set_fill(Fill::Solid(Color::BLACK));
Instance { instance, ..Default::default() }
};
// Apply the parent group's transform to each element of raster data
for instance in image.instance_ref_iter() {
result_table.push(make_instance(*element.transform * *instance.transform));
}
}
GraphicElement::RasterDataGPU(image) => {
let make_instance = |transform| {
// Convert the image frame into a rectangular subpath with the image's transform
let mut subpath = Subpath::new_rect(DVec2::ZERO, DVec2::ONE);
subpath.apply_transform(transform);
// Create a vector data table row from the rectangular subpath, with a default black fill
let mut instance = VectorData::from_subpath(subpath);
instance.style.set_fill(Fill::Solid(Color::BLACK));
Instance { instance, ..Default::default() }
};
// Apply the parent group's transform to each element of raster data
for instance in image.instance_ref_iter() {
result_table.push(make_instance(*element.transform * *instance.transform));
}
}
GraphicElement::GraphicGroup(mut graphic_group) => {
// Apply the parent group's transform to each element of inner group
for sub_element in graphic_group.instance_mut_iter() {
*sub_element.transform = *element.transform * *sub_element.transform;
}
// Recursively flatten the inner group into vector data
let unioned = boolean_operation_on_vector_data_table(flatten_vector_data(&graphic_group).instance_ref_iter(), BooleanOperation::Union);
for element in unioned.instance_iter() {
result_table.push(element);
}
}
}
}
result_table
})
.collect()
}
fn to_path(vector: &VectorData, transform: DAffine2) -> Vec<path_bool::PathSegment> {

View file

@ -4,9 +4,10 @@ use crate::wasm_application_io::WasmEditorApi;
use dyn_any::DynAny;
pub use dyn_any::StaticType;
pub use glam::{DAffine2, DVec2, IVec2, UVec2};
use graphene_application_io::SurfaceFrame;
use graphene_application_io::{ImageTexture, SurfaceFrame};
use graphene_brush::brush_cache::BrushCache;
use graphene_brush::brush_stroke::BrushStroke;
use graphene_core::raster::Image;
use graphene_core::raster_types::CPU;
use graphene_core::transform::ReferencePoint;
use graphene_core::uuid::NodeId;
@ -165,8 +166,7 @@ tagged_value! {
U64(u64),
Bool(bool),
String(String),
UVec2(UVec2),
IVec2(IVec2),
#[serde(alias = "IVec2", alias = "UVec2")]
DVec2(DVec2),
DAffine2(DAffine2),
OptionalF64(Option<f64>),
@ -248,6 +248,7 @@ tagged_value! {
ReferencePoint(graphene_core::transform::ReferencePoint),
CentroidType(graphene_core::vector::misc::CentroidType),
BooleanOperation(graphene_path_bool::BooleanOperation),
TextAlign(graphene_core::text::TextAlign),
}
impl TaggedValue {
@ -425,10 +426,15 @@ pub struct RenderOutput {
pub metadata: RenderMetadata,
}
#[derive(Debug, Clone, PartialEq, dyn_any::DynAny, Hash, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Hash, PartialEq, dyn_any::DynAny, serde::Serialize, serde::Deserialize)]
pub enum RenderOutputType {
CanvasFrame(SurfaceFrame),
Svg(String),
#[serde(skip)]
Texture(ImageTexture),
Svg {
svg: String,
image_data: Vec<(u64, Image<Color>)>,
},
Image(Vec<u8>),
}

View file

@ -3,6 +3,7 @@ use graphene_application_io::{ApplicationError, ApplicationIo, ResourceFuture, S
#[cfg(target_arch = "wasm32")]
use js_sys::{Object, Reflect};
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::Arc;
#[cfg(target_arch = "wasm32")]
use std::sync::atomic::AtomicU64;
@ -39,7 +40,7 @@ impl Drop for WindowWrapper {
let wrapper = || {
if let Ok(canvases) = Reflect::get(&window, &image_canvases_key) {
// Convert key and value to JsValue
let js_key = JsValue::from_str(format!("canvas{}", self.window.window_id).as_str());
let js_key = JsValue::from_str(self.window.window_id.to_string().as_str());
// Use Reflect API to set property
Reflect::delete_property(&canvases.into(), &js_key)?;
@ -69,14 +70,6 @@ pub struct WasmApplicationIo {
static WGPU_AVAILABLE: std::sync::atomic::AtomicI8 = std::sync::atomic::AtomicI8::new(-1);
pub fn wgpu_available() -> Option<bool> {
// Always enable wgpu when running with Tauri
#[cfg(target_arch = "wasm32")]
if let Some(window) = web_sys::window() {
if js_sys::Reflect::get(&window, &wasm_bindgen::JsValue::from_str("__TAURI__")).is_ok() {
return Some(true);
}
}
match WGPU_AVAILABLE.load(Ordering::SeqCst) {
-1 => None,
0 => Some(false),
@ -136,7 +129,6 @@ impl WasmApplicationIo {
let wgpu_available = executor.is_some();
WGPU_AVAILABLE.store(wgpu_available as i8, Ordering::SeqCst);
// Always enable wgpu when running with Tauri
let mut io = Self {
#[cfg(target_arch = "wasm32")]
ids: AtomicU64::new(0),
@ -148,6 +140,27 @@ impl WasmApplicationIo {
io.resources.insert("null".to_string(), Arc::from(include_bytes!("null.png").to_vec()));
io
}
#[cfg(all(not(target_arch = "wasm32"), feature = "wgpu"))]
pub fn new_with_context(context: wgpu_executor::Context) -> Self {
#[cfg(feature = "wgpu")]
let executor = WgpuExecutor::with_context(context);
#[cfg(not(feature = "wgpu"))]
let wgpu_available = false;
#[cfg(feature = "wgpu")]
let wgpu_available = executor.is_some();
WGPU_AVAILABLE.store(wgpu_available as i8, Ordering::SeqCst);
let mut io = Self {
gpu_executor: executor,
windows: Vec::new(),
resources: HashMap::new(),
};
io.resources.insert("null".to_string(), Arc::from(include_bytes!("null.png").to_vec()));
io
}
}
@ -200,7 +213,7 @@ impl ApplicationIo for WasmApplicationIo {
}
// Convert key and value to JsValue
let js_key = JsValue::from_str(format!("canvas{}", id).as_str());
let js_key = JsValue::from_str(id.to_string().as_str());
let js_value = JsValue::from(canvas.clone());
let canvases = Object::from(canvases.unwrap());
@ -217,26 +230,30 @@ impl ApplicationIo for WasmApplicationIo {
}
#[cfg(not(target_arch = "wasm32"))]
fn create_window(&self) -> SurfaceHandle<Self::Surface> {
log::trace!("Spawning window");
todo!("winit api changed, calling create_window on EventLoop is deprecated");
#[cfg(all(not(test), target_os = "linux", feature = "wayland"))]
use winit::platform::wayland::EventLoopBuilderExtWayland;
// log::trace!("Spawning window");
#[cfg(all(not(test), target_os = "linux", feature = "wayland"))]
let event_loop = winit::event_loop::EventLoopBuilder::new().with_any_thread(true).build().unwrap();
#[cfg(not(all(not(test), target_os = "linux", feature = "wayland")))]
let event_loop = winit::event_loop::EventLoop::new().unwrap();
// #[cfg(all(not(test), target_os = "linux", feature = "wayland"))]
// use winit::platform::wayland::EventLoopBuilderExtWayland;
let window = winit::window::WindowBuilder::new()
.with_title("Graphite")
.with_inner_size(winit::dpi::PhysicalSize::new(800, 600))
.build(&event_loop)
.unwrap();
// #[cfg(all(not(test), target_os = "linux", feature = "wayland"))]
// let event_loop = winit::event_loop::EventLoopBuilder::new().with_any_thread(true).build().unwrap();
// #[cfg(not(all(not(test), target_os = "linux", feature = "wayland")))]
// let event_loop = winit::event_loop::EventLoop::new().unwrap();
SurfaceHandle {
window_id: SurfaceId(window.id().into()),
surface: Arc::new(window),
}
// let window = event_loop
// .create_window(
// winit::window::WindowAttributes::default()
// .with_title("Graphite")
// .with_inner_size(winit::dpi::PhysicalSize::new(800, 600)),
// )
// .unwrap();
// SurfaceHandle {
// window_id: SurfaceId(window.id().into()),
// surface: Arc::new(window),
// }
}
#[cfg(target_arch = "wasm32")]
@ -249,7 +266,7 @@ impl ApplicationIo for WasmApplicationIo {
let wrapper = || {
if let Ok(canvases) = Reflect::get(&window, &image_canvases_key) {
// Convert key and value to JsValue
let js_key = JsValue::from_str(format!("canvas{}", surface_id.0).as_str());
let js_key = JsValue::from_str(surface_id.0.to_string().as_str());
// Use Reflect API to set property
Reflect::delete_property(&canvases.into(), &js_key)?;

View file

@ -111,7 +111,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
std::thread::spawn(move || {
loop {
std::thread::sleep(std::time::Duration::from_nanos(10));
device.poll(wgpu::Maintain::Poll);
device.poll(wgpu::PollType::Poll).unwrap();
}
});
let executor = create_executor(proto_graph)?;
@ -123,7 +123,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
println!("{:?}", result);
break;
}
std::thread::sleep(std::time::Duration::from_millis(16));
tokio::time::sleep(std::time::Duration::from_millis(16)).await;
}
}
}

View file

@ -7,28 +7,44 @@ authors = ["Graphite Authors <contact@graphite.rs>"]
license = "MIT OR Apache-2.0"
[features]
default = ["serde"]
serde = ["dep:serde"]
default = ["std"]
std = [
"dep:graphene-core",
"dep:dyn-any",
"dep:image",
"dep:ndarray",
"dep:bezier-rs",
"dep:rand",
"dep:rand_chacha",
"dep:fastnoise-lite",
"dep:serde",
"dep:specta",
"dep:glam"
]
[dependencies]
# Local dependencies
dyn-any = { workspace = true }
graphene-core = { workspace = true }
graphene-core-shaders = { workspace = true }
node-macro = { workspace = true }
# Workspace dependencies
glam = { workspace = true }
specta = { workspace = true }
image = { workspace = true }
bytemuck = { workspace = true }
ndarray = { workspace = true }
bezier-rs = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
fastnoise-lite = { workspace = true }
# Local std dependencies
dyn-any = { workspace = true, optional = true }
graphene-core = { workspace = true, optional = true }
# Optional workspace dependencies
serde = { workspace = true, optional = true, features = ["derive"] }
# Workspace dependencies
bytemuck = { workspace = true }
# glam is reexported from gcore-shaders in no_std mode
glam = { workspace = true, optional = true }
# Workspace std dependencies
specta = { workspace = true, optional = true }
image = { workspace = true, optional = true }
ndarray = { workspace = true, optional = true }
bezier-rs = { workspace = true, optional = true }
rand = { workspace = true, optional = true }
rand_chacha = { workspace = true, optional = true }
fastnoise-lite = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
[dev-dependencies]
tokio = { workspace = true }

View file

@ -0,0 +1,40 @@
use graphene_core_shaders::color::Color;
pub trait Adjust<P> {
fn adjust(&mut self, map_fn: impl Fn(&P) -> P);
}
impl Adjust<Color> for Color {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
*self = map_fn(self);
}
}
impl Adjust<Color> for Option<Color> {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
if let Some(v) = self {
*v = map_fn(v)
}
}
}
#[cfg(feature = "std")]
mod adjust_std {
use super::*;
use graphene_core::gradient::GradientStops;
use graphene_core::raster_types::{CPU, RasterDataTable};
impl Adjust<Color> for GradientStops {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
for (_pos, c) in self.iter_mut() {
*c = map_fn(c);
}
}
}
impl Adjust<Color> for RasterDataTable<CPU> {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
for instance in self.instance_mut_iter() {
for c in instance.instance.data_mut().data.iter_mut() {
*c = map_fn(c);
}
}
}
}
}

View file

@ -1,18 +1,15 @@
#![allow(clippy::too_many_arguments)]
use crate::curve::CubicSplines;
use dyn_any::DynAny;
use graphene_core::Node;
use graphene_core::blending::BlendMode;
use graphene_core::color::Color;
use graphene_core::color::Pixel;
use graphene_core::context::Ctx;
use crate::adjust::Adjust;
use crate::cubic_spline::CubicSplines;
use core::fmt::Debug;
#[cfg(feature = "std")]
use graphene_core::gradient::GradientStops;
use graphene_core::raster::image::Image;
use graphene_core::raster_types::{CPU, Raster, RasterDataTable};
use graphene_core::registry::types::{Angle, Percentage, SignedPercentage};
use std::cmp::Ordering;
use std::fmt::Debug;
#[cfg(feature = "std")]
use graphene_core::raster_types::{CPU, RasterDataTable};
use graphene_core_shaders::color::Color;
use graphene_core_shaders::context::Ctx;
use graphene_core_shaders::registry::types::{Angle, Percentage, SignedPercentage};
// TODO: Implement the following:
// Color Balance
@ -29,7 +26,8 @@ use std::fmt::Debug;
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27clrL%27%20%3D%20Color%20Lookup
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=Color%20Lookup%20(Photoshop%20CS6
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, DynAny, Hash, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Dropdown)]
pub enum LuminanceCalculation {
#[default]
@ -41,7 +39,7 @@ pub enum LuminanceCalculation {
MaximumChannels,
}
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
fn luminance<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -65,7 +63,27 @@ fn luminance<T: Adjust<Color>>(
input
}
#[node_macro::node(category("Raster: Channels"))]
#[node_macro::node(category("Raster"), shader_node(PerPixelAdjust))]
fn gamma_correction<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut input: T,
#[default(2.2)]
#[range((0.01, 10.))]
#[hard_min(0.0001)]
gamma: f64,
inverse: bool,
) -> T {
let exponent = if inverse { 1. / gamma } else { gamma };
input.adjust(|color| color.gamma(exponent as f32));
input
}
#[node_macro::node(category("Raster: Channels"), shader_node(PerPixelAdjust))]
fn extract_channel<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -88,7 +106,7 @@ fn extract_channel<T: Adjust<Color>>(
input
}
#[node_macro::node(category("Raster: Channels"))]
#[node_macro::node(category("Raster: Channels"), shader_node(PerPixelAdjust))]
fn make_opaque<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -113,14 +131,14 @@ fn make_opaque<T: Adjust<Color>>(
//
// Some further analysis available at:
// https://geraldbakker.nl/psnumbers/brightness-contrast.html
#[node_macro::node(name("Brightness/Contrast"), category("Raster: Adjustment"), properties("brightness_contrast_properties"))]
#[node_macro::node(name("Brightness/Contrast"), category("Raster: Adjustment"), properties("brightness_contrast_properties"), shader_node(PerPixelAdjust))]
fn brightness_contrast<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut input: T,
brightness: SignedPercentage,
contrast: SignedPercentage,
@ -130,7 +148,7 @@ fn brightness_contrast<T: Adjust<Color>>(
let brightness = brightness as f32 / 255.;
let contrast = contrast as f32 / 100.;
let contrast = if contrast > 0. { (contrast * std::f32::consts::FRAC_PI_2 - 0.01).tan() } else { contrast };
let contrast = if contrast > 0. { (contrast * core::f32::consts::FRAC_PI_2 - 0.01).tan() } else { contrast };
let offset = brightness * contrast + brightness - contrast / 2.;
@ -152,13 +170,13 @@ fn brightness_contrast<T: Adjust<Color>>(
y: [0., 130. + brightness * 51., 233. + brightness * 10., 255.].map(|x| x / 255.),
};
let brightness_curve_solutions = brightness_curve_points.solve();
let mut brightness_lut: [f32; WINDOW_SIZE] = std::array::from_fn(|i| {
let mut brightness_lut: [f32; WINDOW_SIZE] = core::array::from_fn(|i| {
let x = i as f32 / (WINDOW_SIZE as f32 - 1.);
brightness_curve_points.interpolate(x, &brightness_curve_solutions)
});
// Special handling for when brightness is negative
if brightness_is_negative {
brightness_lut = std::array::from_fn(|i| {
brightness_lut = core::array::from_fn(|i| {
let mut x = i;
while x > 1 && brightness_lut[x] > i as f32 / WINDOW_SIZE as f32 {
x -= 1;
@ -177,7 +195,7 @@ fn brightness_contrast<T: Adjust<Color>>(
y: [0., 64. - contrast * 30., 192. + contrast * 30., 255.].map(|x| x / 255.),
};
let contrast_curve_solutions = contrast_curve_points.solve();
let contrast_lut: [f32; WINDOW_SIZE] = std::array::from_fn(|i| {
let contrast_lut: [f32; WINDOW_SIZE] = core::array::from_fn(|i| {
let x = i as f32 / (WINDOW_SIZE as f32 - 1.);
contrast_curve_points.interpolate(x, &contrast_curve_solutions)
});
@ -202,7 +220,7 @@ fn brightness_contrast<T: Adjust<Color>>(
//
// Some further analysis available at:
// https://geraldbakker.nl/psnumbers/levels.html
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
fn levels<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -269,7 +287,7 @@ fn levels<T: Adjust<Color>>(
// Algorithm from:
// https://stackoverflow.com/a/55233732/775283
// Works the same for gamma and linear color
#[node_macro::node(name("Black & White"), category("Raster: Adjustment"))]
#[node_macro::node(name("Black & White"), category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn black_and_white<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -341,7 +359,7 @@ async fn black_and_white<T: Adjust<Color>>(
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27hue%20%27%20%3D%20Old,saturation%2C%20Photoshop%205.0
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=0%20%3D%20Use%20other.-,Hue/Saturation,-Hue/Saturation%20settings
#[node_macro::node(name("Hue/Saturation"), category("Raster: Adjustment"))]
#[node_macro::node(name("Hue/Saturation"), category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn hue_saturation<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -375,7 +393,7 @@ async fn hue_saturation<T: Adjust<Color>>(
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27%20%3D%20Color%20Lookup-,%27nvrt%27%20%3D%20Invert,-%27post%27%20%3D%20Posterize
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn invert<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -397,7 +415,7 @@ async fn invert<T: Adjust<Color>>(
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=post%27%20%3D%20Posterize-,%27thrs%27%20%3D%20Threshold,-%27grdm%27%20%3D%20Gradient
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn threshold<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -427,202 +445,6 @@ async fn threshold<T: Adjust<Color>>(
image
}
trait Blend<P: Pixel> {
fn blend(&self, under: &Self, blend_fn: impl Fn(P, P) -> P) -> Self;
}
impl Blend<Color> for Color {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
blend_fn(*self, *under)
}
}
impl Blend<Color> for Option<Color> {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
match (self, under) {
(Some(a), Some(b)) => Some(blend_fn(*a, *b)),
(a, None) => *a,
(None, b) => *b,
}
}
}
impl Blend<Color> for RasterDataTable<CPU> {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
let mut result_table = self.clone();
for (over, under) in result_table.instance_mut_iter().zip(under.instance_ref_iter()) {
let data = over.instance.data.iter().zip(under.instance.data.iter()).map(|(a, b)| blend_fn(*a, *b)).collect();
*over.instance = Raster::new_cpu(Image {
data,
width: over.instance.width,
height: over.instance.height,
base64_string: None,
});
}
result_table
}
}
impl Blend<Color> for GradientStops {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
let mut combined_stops = self.iter().map(|(position, _)| position).chain(under.iter().map(|(position, _)| position)).collect::<Vec<_>>();
combined_stops.dedup_by(|&mut a, &mut b| (a - b).abs() < 1e-6);
combined_stops.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
let stops = combined_stops
.into_iter()
.map(|&position| {
let over_color = self.evaluate(position);
let under_color = under.evaluate(position);
let color = blend_fn(over_color, under_color);
(position, color)
})
.collect::<Vec<_>>();
GradientStops::new(stops)
}
}
#[node_macro::node(category("Raster"))]
async fn blend<T: Blend<Color> + Send>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
over: T,
#[expose]
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
under: T,
blend_mode: BlendMode,
#[default(100.)] opacity: Percentage,
) -> T {
over.blend(&under, |a, b| blend_colors(a, b, blend_mode, opacity / 100.))
}
#[node_macro::node(category(""), skip_impl)]
fn blend_color_pair<BlendModeNode, OpacityNode>(input: (Color, Color), blend_mode: &'n BlendModeNode, opacity: &'n OpacityNode) -> Color
where
BlendModeNode: Node<'n, (), Output = BlendMode> + 'n,
OpacityNode: Node<'n, (), Output = Percentage> + 'n,
{
let blend_mode = blend_mode.eval(());
let opacity = opacity.eval(());
blend_colors(input.0, input.1, blend_mode, opacity / 100.)
}
pub fn apply_blend_mode(foreground: Color, background: Color, blend_mode: BlendMode) -> Color {
match blend_mode {
// Normal group
BlendMode::Normal => background.blend_rgb(foreground, Color::blend_normal),
// Darken group
BlendMode::Darken => background.blend_rgb(foreground, Color::blend_darken),
BlendMode::Multiply => background.blend_rgb(foreground, Color::blend_multiply),
BlendMode::ColorBurn => background.blend_rgb(foreground, Color::blend_color_burn),
BlendMode::LinearBurn => background.blend_rgb(foreground, Color::blend_linear_burn),
BlendMode::DarkerColor => background.blend_darker_color(foreground),
// Lighten group
BlendMode::Lighten => background.blend_rgb(foreground, Color::blend_lighten),
BlendMode::Screen => background.blend_rgb(foreground, Color::blend_screen),
BlendMode::ColorDodge => background.blend_rgb(foreground, Color::blend_color_dodge),
BlendMode::LinearDodge => background.blend_rgb(foreground, Color::blend_linear_dodge),
BlendMode::LighterColor => background.blend_lighter_color(foreground),
// Contrast group
BlendMode::Overlay => foreground.blend_rgb(background, Color::blend_hardlight),
BlendMode::SoftLight => background.blend_rgb(foreground, Color::blend_softlight),
BlendMode::HardLight => background.blend_rgb(foreground, Color::blend_hardlight),
BlendMode::VividLight => background.blend_rgb(foreground, Color::blend_vivid_light),
BlendMode::LinearLight => background.blend_rgb(foreground, Color::blend_linear_light),
BlendMode::PinLight => background.blend_rgb(foreground, Color::blend_pin_light),
BlendMode::HardMix => background.blend_rgb(foreground, Color::blend_hard_mix),
// Inversion group
BlendMode::Difference => background.blend_rgb(foreground, Color::blend_difference),
BlendMode::Exclusion => background.blend_rgb(foreground, Color::blend_exclusion),
BlendMode::Subtract => background.blend_rgb(foreground, Color::blend_subtract),
BlendMode::Divide => background.blend_rgb(foreground, Color::blend_divide),
// Component group
BlendMode::Hue => background.blend_hue(foreground),
BlendMode::Saturation => background.blend_saturation(foreground),
BlendMode::Color => background.blend_color(foreground),
BlendMode::Luminosity => background.blend_luminosity(foreground),
// Other utility blend modes (hidden from the normal list) - do not have alpha blend
_ => panic!("Used blend mode without alpha blend"),
}
}
trait Adjust<P> {
fn adjust(&mut self, map_fn: impl Fn(&P) -> P);
}
impl Adjust<Color> for Color {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
*self = map_fn(self);
}
}
impl Adjust<Color> for Option<Color> {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
if let Some(v) = self {
*v = map_fn(v)
}
}
}
impl Adjust<Color> for GradientStops {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
for (_pos, c) in self.iter_mut() {
*c = map_fn(c);
}
}
}
impl Adjust<Color> for RasterDataTable<CPU> {
fn adjust(&mut self, map_fn: impl Fn(&Color) -> Color) {
for instance in self.instance_mut_iter() {
for c in instance.instance.data_mut().data.iter_mut() {
*c = map_fn(c);
}
}
}
}
#[inline(always)]
pub fn blend_colors(foreground: Color, background: Color, blend_mode: BlendMode, opacity: f64) -> Color {
let target_color = match blend_mode {
// Other utility blend modes (hidden from the normal list) - do not have alpha blend
BlendMode::Erase => return background.alpha_subtract(foreground),
BlendMode::Restore => return background.alpha_add(foreground),
BlendMode::MultiplyAlpha => return background.alpha_multiply(foreground),
blend_mode => apply_blend_mode(foreground, background, blend_mode),
};
background.alpha_blend(target_color.to_associated_alpha(opacity as f32))
}
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27grdm%27%20%3D%20Gradient%20Map
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=Gradient%20settings%20(Photoshop%206.0)
#[node_macro::node(category("Raster: Adjustment"))]
async fn gradient_map<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut image: T,
gradient: GradientStops,
reverse: bool,
) -> T {
image.adjust(|color| {
let intensity = color.luminance_srgb();
let intensity = if reverse { 1. - intensity } else { intensity };
gradient.evaluate(intensity as f64).to_linear_srgb()
});
image
}
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27-,vibA%27%20%3D%20Vibrance,-%27hue%20%27%20%3D%20Old
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=Vibrance%20(Photoshop%20CS3)
@ -638,7 +460,7 @@ async fn gradient_map<T: Adjust<Color>>(
// It's not the same as the saturation component of Hue/Saturation/Value. Vibrance and Saturation are both separable.
// When both parameters are set, it is equivalent to running this adjustment twice, with only vibrance set and then only saturation set.
// (Except for some noise probably due to rounding error.)
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn vibrance<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -700,7 +522,8 @@ async fn vibrance<T: Adjust<Color>>(
}
/// Color Channel
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Radio)]
pub enum RedGreenBlue {
#[default]
@ -710,7 +533,8 @@ pub enum RedGreenBlue {
}
/// Color Channel
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Radio)]
pub enum RedGreenBlueAlpha {
#[default]
@ -721,7 +545,8 @@ pub enum RedGreenBlueAlpha {
}
/// Style of noise pattern
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Dropdown)]
pub enum NoiseType {
#[default]
@ -736,7 +561,8 @@ pub enum NoiseType {
WhiteNoise,
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
/// Style of layered levels of the noise pattern
pub enum FractalType {
#[default]
@ -752,7 +578,8 @@ pub enum FractalType {
}
/// Distance function used by the cellular noise
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub enum CellularDistanceFunction {
#[default]
Euclidean,
@ -762,7 +589,8 @@ pub enum CellularDistanceFunction {
Hybrid,
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub enum CellularReturnType {
CellValue,
#[default]
@ -781,7 +609,8 @@ pub enum CellularReturnType {
}
/// Type of domain warp
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Dropdown)]
pub enum DomainWarpType {
#[default]
@ -796,7 +625,7 @@ pub enum DomainWarpType {
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27mixr%27%20%3D%20Channel%20Mixer
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=Lab%20color%20only-,Channel%20Mixer,-Key%20is%20%27mixr
#[node_macro::node(category("Raster: Adjustment"), properties("channel_mixer_properties"))]
#[node_macro::node(category("Raster: Adjustment"), properties("channel_mixer_properties"), shader_node(PerPixelAdjust))]
async fn channel_mixer<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -891,7 +720,8 @@ async fn channel_mixer<T: Adjust<Color>>(
image
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
#[widget(Radio)]
pub enum RelativeAbsolute {
#[default]
@ -900,7 +730,8 @@ pub enum RelativeAbsolute {
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DynAny, node_macro::ChoiceType, specta::Type, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, node_macro::ChoiceType)]
#[cfg_attr(feature = "std", derive(dyn_any::DynAny, specta::Type, serde::Serialize, serde::Deserialize))]
pub enum SelectiveColorChoice {
#[default]
Reds,
@ -922,7 +753,7 @@ pub enum SelectiveColorChoice {
//
// Algorithm based on:
// https://blog.pkh.me/p/22-understanding-selective-coloring-in-adobe-photoshop.html
#[node_macro::node(category("Raster: Adjustment"), properties("selective_color_properties"))]
#[node_macro::node(category("Raster: Adjustment"), properties("selective_color_properties"), shader_node(PerPixelAdjust))]
async fn selective_color<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -1064,7 +895,7 @@ async fn selective_color<T: Adjust<Color>>(
// Algorithm based on:
// https://www.axiomx.com/posterize.htm
// This algorithm produces fully accurate output in relation to the industry standard.
#[node_macro::node(category("Raster: Adjustment"))]
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
async fn posterize<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -1097,7 +928,7 @@ async fn posterize<T: Adjust<Color>>(
//
// Algorithm based on:
// https://geraldbakker.nl/psnumbers/exposure.html
#[node_macro::node(category("Raster: Adjustment"), properties("exposure_properties"))]
#[node_macro::node(category("Raster: Adjustment"), properties("exposure_properties"), shader_node(PerPixelAdjust))]
async fn exposure<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
@ -1126,79 +957,3 @@ async fn exposure<T: Adjust<Color>>(
});
input
}
#[node_macro::node(category("Raster: Adjustment"))]
fn color_overlay<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut image: T,
#[default(Color::BLACK)] color: Color,
blend_mode: BlendMode,
#[default(100.)] opacity: Percentage,
) -> T {
let opacity = (opacity as f32 / 100.).clamp(0., 1.);
image.adjust(|pixel| {
let image = pixel.map_rgb(|channel| channel * (1. - opacity));
// The apply blend mode function divides rgb by the alpha channel for the background. This undoes that.
let associated_pixel = Color::from_rgbaf32_unchecked(pixel.r() * pixel.a(), pixel.g() * pixel.a(), pixel.b() * pixel.a(), pixel.a());
let overlay = apply_blend_mode(color, associated_pixel, blend_mode).map_rgb(|channel| channel * opacity);
Color::from_rgbaf32_unchecked(image.r() + overlay.r(), image.g() + overlay.g(), image.b() + overlay.b(), pixel.a())
});
image
}
// pub use index_node::IndexNode;
// mod index_node {
// use crate::raster::{Color, Image};
// use crate::Ctx;
// #[node_macro::node(category(""))]
// pub fn index<T: Default + Clone>(
// _: impl Ctx,
// #[implementations(Vec<Image<Color>>, Vec<Color>)]
// #[widget(ParsedWidgetOverride::Hidden)]
// input: Vec<T>,
// index: u32,
// ) -> T {
// if (index as usize) < input.len() {
// input[index as usize].clone()
// } else {
// warn!("The number of segments is {} but the requested segment is {}!", input.len(), index);
// Default::default()
// }
// }
// }
#[cfg(test)]
mod test {
use graphene_core::blending::BlendMode;
use graphene_core::color::Color;
use graphene_core::raster::image::Image;
use graphene_core::raster_types::{Raster, RasterDataTable};
#[tokio::test]
async fn color_overlay_multiply() {
let image_color = Color::from_rgbaf32_unchecked(0.7, 0.6, 0.5, 0.4);
let image = Image::new(1, 1, image_color);
// Color { red: 0., green: 1., blue: 0., alpha: 1. }
let overlay_color = Color::GREEN;
// 100% of the output should come from the multiplied value
let opacity = 100_f64;
let result = super::color_overlay((), RasterDataTable::new(Raster::new_cpu(image.clone())), overlay_color, BlendMode::Multiply, opacity);
let result = result.instance_ref_iter().next().unwrap().instance;
// The output should just be the original green and alpha channels (as we multiply them by 1 and other channels by 0)
assert_eq!(result.data[0], Color::from_rgbaf32_unchecked(0., image_color.g(), 0., image_color.a()));
}
}

View file

@ -0,0 +1,207 @@
use crate::adjust::Adjust;
#[cfg(feature = "std")]
use graphene_core::gradient::GradientStops;
#[cfg(feature = "std")]
use graphene_core::raster_types::{CPU, RasterDataTable};
use graphene_core_shaders::Ctx;
use graphene_core_shaders::blending::BlendMode;
use graphene_core_shaders::color::{Color, Pixel};
use graphene_core_shaders::registry::types::Percentage;
pub trait Blend<P: Pixel> {
fn blend(&self, under: &Self, blend_fn: impl Fn(P, P) -> P) -> Self;
}
impl Blend<Color> for Color {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
blend_fn(*self, *under)
}
}
impl Blend<Color> for Option<Color> {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
match (self, under) {
(Some(a), Some(b)) => Some(blend_fn(*a, *b)),
(a, None) => *a,
(None, b) => *b,
}
}
}
#[cfg(feature = "std")]
mod blend_std {
use super::*;
use core::cmp::Ordering;
use graphene_core::raster::Image;
use graphene_core::raster_types::Raster;
impl Blend<Color> for RasterDataTable<CPU> {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
let mut result_table = self.clone();
for (over, under) in result_table.instance_mut_iter().zip(under.instance_ref_iter()) {
let data = over.instance.data.iter().zip(under.instance.data.iter()).map(|(a, b)| blend_fn(*a, *b)).collect();
*over.instance = Raster::new_cpu(Image {
data,
width: over.instance.width,
height: over.instance.height,
base64_string: None,
});
}
result_table
}
}
impl Blend<Color> for GradientStops {
fn blend(&self, under: &Self, blend_fn: impl Fn(Color, Color) -> Color) -> Self {
let mut combined_stops = self.iter().map(|(position, _)| position).chain(under.iter().map(|(position, _)| position)).collect::<Vec<_>>();
combined_stops.dedup_by(|&mut a, &mut b| (a - b).abs() < 1e-6);
combined_stops.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
let stops = combined_stops
.into_iter()
.map(|&position| {
let over_color = self.evaluate(position);
let under_color = under.evaluate(position);
let color = blend_fn(over_color, under_color);
(position, color)
})
.collect::<Vec<_>>();
GradientStops::new(stops)
}
}
}
#[inline(always)]
pub fn blend_colors(foreground: Color, background: Color, blend_mode: BlendMode, opacity: f64) -> Color {
let target_color = match blend_mode {
// Other utility blend modes (hidden from the normal list) - do not have alpha blend
BlendMode::Erase => return background.alpha_subtract(foreground),
BlendMode::Restore => return background.alpha_add(foreground),
BlendMode::MultiplyAlpha => return background.alpha_multiply(foreground),
blend_mode => apply_blend_mode(foreground, background, blend_mode),
};
background.alpha_blend(target_color.to_associated_alpha(opacity as f32))
}
pub fn apply_blend_mode(foreground: Color, background: Color, blend_mode: BlendMode) -> Color {
match blend_mode {
// Normal group
BlendMode::Normal => background.blend_rgb(foreground, Color::blend_normal),
// Darken group
BlendMode::Darken => background.blend_rgb(foreground, Color::blend_darken),
BlendMode::Multiply => background.blend_rgb(foreground, Color::blend_multiply),
BlendMode::ColorBurn => background.blend_rgb(foreground, Color::blend_color_burn),
BlendMode::LinearBurn => background.blend_rgb(foreground, Color::blend_linear_burn),
BlendMode::DarkerColor => background.blend_darker_color(foreground),
// Lighten group
BlendMode::Lighten => background.blend_rgb(foreground, Color::blend_lighten),
BlendMode::Screen => background.blend_rgb(foreground, Color::blend_screen),
BlendMode::ColorDodge => background.blend_rgb(foreground, Color::blend_color_dodge),
BlendMode::LinearDodge => background.blend_rgb(foreground, Color::blend_linear_dodge),
BlendMode::LighterColor => background.blend_lighter_color(foreground),
// Contrast group
BlendMode::Overlay => foreground.blend_rgb(background, Color::blend_hardlight),
BlendMode::SoftLight => background.blend_rgb(foreground, Color::blend_softlight),
BlendMode::HardLight => background.blend_rgb(foreground, Color::blend_hardlight),
BlendMode::VividLight => background.blend_rgb(foreground, Color::blend_vivid_light),
BlendMode::LinearLight => background.blend_rgb(foreground, Color::blend_linear_light),
BlendMode::PinLight => background.blend_rgb(foreground, Color::blend_pin_light),
BlendMode::HardMix => background.blend_rgb(foreground, Color::blend_hard_mix),
// Inversion group
BlendMode::Difference => background.blend_rgb(foreground, Color::blend_difference),
BlendMode::Exclusion => background.blend_rgb(foreground, Color::blend_exclusion),
BlendMode::Subtract => background.blend_rgb(foreground, Color::blend_subtract),
BlendMode::Divide => background.blend_rgb(foreground, Color::blend_divide),
// Component group
BlendMode::Hue => background.blend_hue(foreground),
BlendMode::Saturation => background.blend_saturation(foreground),
BlendMode::Color => background.blend_color(foreground),
BlendMode::Luminosity => background.blend_luminosity(foreground),
// Other utility blend modes (hidden from the normal list) - do not have alpha blend
_ => panic!("Used blend mode without alpha blend"),
}
}
#[node_macro::node(category("Raster"), shader_node(PerPixelAdjust))]
async fn blend<T: Blend<Color> + Send>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
over: T,
#[expose]
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
under: T,
blend_mode: BlendMode,
#[default(100.)] opacity: Percentage,
) -> T {
over.blend(&under, |a, b| blend_colors(a, b, blend_mode, opacity / 100.))
}
#[node_macro::node(category("Raster: Adjustment"), shader_node(PerPixelAdjust))]
fn color_overlay<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut image: T,
#[default(Color::BLACK)] color: Color,
blend_mode: BlendMode,
#[default(100.)] opacity: Percentage,
) -> T {
let opacity = (opacity as f32 / 100.).clamp(0., 1.);
image.adjust(|pixel| {
let image = pixel.map_rgb(|channel| channel * (1. - opacity));
// The apply blend mode function divides rgb by the alpha channel for the background. This undoes that.
let associated_pixel = Color::from_rgbaf32_unchecked(pixel.r() * pixel.a(), pixel.g() * pixel.a(), pixel.b() * pixel.a(), pixel.a());
let overlay = apply_blend_mode(color, associated_pixel, blend_mode).map_rgb(|channel| channel * opacity);
Color::from_rgbaf32_unchecked(image.r() + overlay.r(), image.g() + overlay.g(), image.b() + overlay.b(), pixel.a())
});
image
}
#[cfg(feature = "std")]
#[node_macro::node(category(""), skip_impl)]
fn blend_color_pair<BlendModeNode, OpacityNode>(input: (Color, Color), blend_mode: &'n BlendModeNode, opacity: &'n OpacityNode) -> Color
where
BlendModeNode: graphene_core::Node<'n, (), Output = BlendMode> + 'n,
OpacityNode: graphene_core::Node<'n, (), Output = Percentage> + 'n,
{
let blend_mode = blend_mode.eval(());
let opacity = opacity.eval(());
blend_colors(input.0, input.1, blend_mode, opacity / 100.)
}
#[cfg(all(feature = "std", test))]
mod test {
use graphene_core::blending::BlendMode;
use graphene_core::color::Color;
use graphene_core::raster::image::Image;
use graphene_core::raster_types::{Raster, RasterDataTable};
#[tokio::test]
async fn color_overlay_multiply() {
let image_color = Color::from_rgbaf32_unchecked(0.7, 0.6, 0.5, 0.4);
let image = Image::new(1, 1, image_color);
// Color { red: 0., green: 1., blue: 0., alpha: 1. }
let overlay_color = Color::GREEN;
// 100% of the output should come from the multiplied value
let opacity = 100_f64;
let result = super::color_overlay((), RasterDataTable::new(Raster::new_cpu(image.clone())), overlay_color, BlendMode::Multiply, opacity);
let result = result.instance_ref_iter().next().unwrap().instance;
// The output should just be the original green and alpha channels (as we multiply them by 1 and other channels by 0)
assert_eq!(result.data[0], Color::from_rgbaf32_unchecked(0., image_color.g(), 0., image_color.a()));
}
}

View file

@ -0,0 +1,123 @@
#[derive(Debug)]
pub struct CubicSplines {
pub x: [f32; 4],
pub y: [f32; 4],
}
impl CubicSplines {
pub fn solve(&self) -> [f32; 4] {
let (x, y) = (&self.x, &self.y);
// Build an augmented matrix to solve the system of equations using Gaussian elimination
let mut augmented_matrix = [
[
2. / (x[1] - x[0]),
1. / (x[1] - x[0]),
0.,
0.,
// |
3. * (y[1] - y[0]) / ((x[1] - x[0]) * (x[1] - x[0])),
],
[
1. / (x[1] - x[0]),
2. * (1. / (x[1] - x[0]) + 1. / (x[2] - x[1])),
1. / (x[2] - x[1]),
0.,
// |
3. * ((y[1] - y[0]) / ((x[1] - x[0]) * (x[1] - x[0])) + (y[2] - y[1]) / ((x[2] - x[1]) * (x[2] - x[1]))),
],
[
0.,
1. / (x[2] - x[1]),
2. * (1. / (x[2] - x[1]) + 1. / (x[3] - x[2])),
1. / (x[3] - x[2]),
// |
3. * ((y[2] - y[1]) / ((x[2] - x[1]) * (x[2] - x[1])) + (y[3] - y[2]) / ((x[3] - x[2]) * (x[3] - x[2]))),
],
[
0.,
0.,
1. / (x[3] - x[2]),
2. / (x[3] - x[2]),
// |
3. * (y[3] - y[2]) / ((x[3] - x[2]) * (x[3] - x[2])),
],
];
// Gaussian elimination: forward elimination
for row in 0..4 {
let pivot_row_index = (row..4)
.max_by(|&a_row, &b_row| {
augmented_matrix[a_row][row]
.abs()
.partial_cmp(&augmented_matrix[b_row][row].abs())
.unwrap_or(core::cmp::Ordering::Equal)
})
.unwrap();
// Swap the current row with the row that has the largest pivot element
augmented_matrix.swap(row, pivot_row_index);
// Eliminate the current column in all rows below the current one
for row_below_current in row + 1..4 {
assert!(augmented_matrix[row][row].abs() > f32::EPSILON);
let scale_factor = augmented_matrix[row_below_current][row] / augmented_matrix[row][row];
for col in row..5 {
augmented_matrix[row_below_current][col] -= augmented_matrix[row][col] * scale_factor
}
}
}
// Gaussian elimination: back substitution
let mut solutions = [0.; 4];
for col in (0..4).rev() {
assert!(augmented_matrix[col][col].abs() > f32::EPSILON);
solutions[col] = augmented_matrix[col][4] / augmented_matrix[col][col];
for row in (0..col).rev() {
augmented_matrix[row][4] -= augmented_matrix[row][col] * solutions[col];
augmented_matrix[row][col] = 0.;
}
}
solutions
}
pub fn interpolate(&self, input: f32, solutions: &[f32]) -> f32 {
if input <= self.x[0] {
return self.y[0];
}
if input >= self.x[self.x.len() - 1] {
return self.y[self.x.len() - 1];
}
// Find the segment that the input falls between
let mut segment = 1;
while self.x[segment] < input {
segment += 1;
}
let segment_start = segment - 1;
let segment_end = segment;
// Calculate the output value using quadratic interpolation
let input_value = self.x[segment_start];
let input_value_prev = self.x[segment_end];
let output_value = self.y[segment_start];
let output_value_prev = self.y[segment_end];
let solutions_value = solutions[segment_start];
let solutions_value_prev = solutions[segment_end];
let output_delta = solutions_value_prev * (input_value - input_value_prev) - (output_value - output_value_prev);
let solution_delta = (output_value - output_value_prev) - solutions_value * (input_value - input_value_prev);
let input_ratio = (input - input_value_prev) / (input_value - input_value_prev);
let prev_output_ratio = (1. - input_ratio) * output_value_prev;
let output_ratio = input_ratio * output_value;
let quadratic_ratio = input_ratio * (1. - input_ratio) * (output_delta * (1. - input_ratio) + solution_delta * input_ratio);
let result = prev_output_ratio + output_ratio + quadratic_ratio;
result.clamp(0., 1.)
}
}

View file

@ -45,125 +45,6 @@ impl Hash for CurveManipulatorGroup {
}
}
#[derive(Debug)]
pub struct CubicSplines {
pub x: [f32; 4],
pub y: [f32; 4],
}
impl CubicSplines {
pub fn solve(&self) -> [f32; 4] {
let (x, y) = (&self.x, &self.y);
// Build an augmented matrix to solve the system of equations using Gaussian elimination
let mut augmented_matrix = [
[
2. / (x[1] - x[0]),
1. / (x[1] - x[0]),
0.,
0.,
// |
3. * (y[1] - y[0]) / ((x[1] - x[0]) * (x[1] - x[0])),
],
[
1. / (x[1] - x[0]),
2. * (1. / (x[1] - x[0]) + 1. / (x[2] - x[1])),
1. / (x[2] - x[1]),
0.,
// |
3. * ((y[1] - y[0]) / ((x[1] - x[0]) * (x[1] - x[0])) + (y[2] - y[1]) / ((x[2] - x[1]) * (x[2] - x[1]))),
],
[
0.,
1. / (x[2] - x[1]),
2. * (1. / (x[2] - x[1]) + 1. / (x[3] - x[2])),
1. / (x[3] - x[2]),
// |
3. * ((y[2] - y[1]) / ((x[2] - x[1]) * (x[2] - x[1])) + (y[3] - y[2]) / ((x[3] - x[2]) * (x[3] - x[2]))),
],
[
0.,
0.,
1. / (x[3] - x[2]),
2. / (x[3] - x[2]),
// |
3. * (y[3] - y[2]) / ((x[3] - x[2]) * (x[3] - x[2])),
],
];
// Gaussian elimination: forward elimination
for row in 0..4 {
let pivot_row_index = (row..4)
.max_by(|&a_row, &b_row| augmented_matrix[a_row][row].abs().partial_cmp(&augmented_matrix[b_row][row].abs()).unwrap_or(std::cmp::Ordering::Equal))
.unwrap();
// Swap the current row with the row that has the largest pivot element
augmented_matrix.swap(row, pivot_row_index);
// Eliminate the current column in all rows below the current one
for row_below_current in row + 1..4 {
assert!(augmented_matrix[row][row].abs() > f32::EPSILON);
let scale_factor = augmented_matrix[row_below_current][row] / augmented_matrix[row][row];
for col in row..5 {
augmented_matrix[row_below_current][col] -= augmented_matrix[row][col] * scale_factor
}
}
}
// Gaussian elimination: back substitution
let mut solutions = [0.; 4];
for col in (0..4).rev() {
assert!(augmented_matrix[col][col].abs() > f32::EPSILON);
solutions[col] = augmented_matrix[col][4] / augmented_matrix[col][col];
for row in (0..col).rev() {
augmented_matrix[row][4] -= augmented_matrix[row][col] * solutions[col];
augmented_matrix[row][col] = 0.;
}
}
solutions
}
pub fn interpolate(&self, input: f32, solutions: &[f32]) -> f32 {
if input <= self.x[0] {
return self.y[0];
}
if input >= self.x[self.x.len() - 1] {
return self.y[self.x.len() - 1];
}
// Find the segment that the input falls between
let mut segment = 1;
while self.x[segment] < input {
segment += 1;
}
let segment_start = segment - 1;
let segment_end = segment;
// Calculate the output value using quadratic interpolation
let input_value = self.x[segment_start];
let input_value_prev = self.x[segment_end];
let output_value = self.y[segment_start];
let output_value_prev = self.y[segment_end];
let solutions_value = solutions[segment_start];
let solutions_value_prev = solutions[segment_end];
let output_delta = solutions_value_prev * (input_value - input_value_prev) - (output_value - output_value_prev);
let solution_delta = (output_value - output_value_prev) - solutions_value * (input_value - input_value_prev);
let input_ratio = (input - input_value_prev) / (input_value - input_value_prev);
let prev_output_ratio = (1. - input_ratio) * output_value_prev;
let output_ratio = input_ratio * output_value;
let quadratic_ratio = input_ratio * (1. - input_ratio) * (output_delta * (1. - input_ratio) + solution_delta * input_ratio);
let result = prev_output_ratio + output_ratio + quadratic_ratio;
result.clamp(0., 1.)
}
}
pub struct ValueMapperNode<C> {
lut: Vec<C>,
}

View file

@ -8,34 +8,32 @@ use std::cmp::{max, min};
#[node_macro::node(category("Raster: Filter"))]
async fn dehaze(_: impl Ctx, image_frame: RasterDataTable<CPU>, strength: Percentage) -> RasterDataTable<CPU> {
let mut result_table = RasterDataTable::default();
image_frame
.instance_iter()
.map(|mut image_frame_instance| {
let image = image_frame_instance.instance;
// Prepare the image data for processing
let image_data = bytemuck::cast_vec(image.data.clone());
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, image_data).expect("Failed to convert internal image format into image-rs data type.");
let dynamic_image: DynamicImage = image_buffer.into();
for mut image_frame_instance in image_frame.instance_iter() {
let image = image_frame_instance.instance;
// Prepare the image data for processing
let image_data = bytemuck::cast_vec(image.data.clone());
let image_buffer = image::Rgba32FImage::from_raw(image.width, image.height, image_data).expect("Failed to convert internal image format into image-rs data type.");
let dynamic_image: DynamicImage = image_buffer.into();
// Run the dehaze algorithm
let dehazed_dynamic_image = dehaze_image(dynamic_image, strength / 100.);
// Run the dehaze algorithm
let dehazed_dynamic_image = dehaze_image(dynamic_image, strength / 100.);
// Prepare the image data for returning
let buffer = dehazed_dynamic_image.to_rgba32f().into_raw();
let color_vec = bytemuck::cast_vec(buffer);
let dehazed_image = Image {
width: image.width,
height: image.height,
data: color_vec,
base64_string: None,
};
// Prepare the image data for returning
let buffer = dehazed_dynamic_image.to_rgba32f().into_raw();
let color_vec = bytemuck::cast_vec(buffer);
let dehazed_image = Image {
width: image.width,
height: image.height,
data: color_vec,
base64_string: None,
};
image_frame_instance.instance = Raster::new_cpu(dehazed_image);
image_frame_instance.source_node_id = None;
result_table.push(image_frame_instance);
}
result_table
image_frame_instance.instance = Raster::new_cpu(dehazed_image);
image_frame_instance
})
.collect()
}
// There is no real point in modifying these values because they do not change the final result all that much.

View file

@ -20,27 +20,25 @@ async fn blur(
/// Opt to incorrectly apply the filter with color calculations in gamma space for compatibility with the results from other software.
gamma: bool,
) -> RasterDataTable<CPU> {
let mut result_table = RasterDataTable::default();
image_frame
.instance_iter()
.map(|mut image_instance| {
let image = image_instance.instance.clone();
for mut image_instance in image_frame.instance_iter() {
let image = image_instance.instance.clone();
// Run blur algorithm
let blurred_image = if radius < 0.1 {
// Minimum blur radius
image.clone()
} else if box_blur {
Raster::new_cpu(box_blur_algorithm(image.into_data(), radius, gamma))
} else {
Raster::new_cpu(gaussian_blur_algorithm(image.into_data(), radius, gamma))
};
// Run blur algorithm
let blurred_image = if radius < 0.1 {
// Minimum blur radius
image.clone()
} else if box_blur {
Raster::new_cpu(box_blur_algorithm(image.into_data(), radius, gamma))
} else {
Raster::new_cpu(gaussian_blur_algorithm(image.into_data(), radius, gamma))
};
image_instance.instance = blurred_image;
image_instance.source_node_id = None;
result_table.push(image_instance);
}
result_table
image_instance.instance = blurred_image;
image_instance
})
.collect()
}
// 1D gaussian kernel

View file

@ -0,0 +1,30 @@
//! Not immediately shader compatible due to needing [`GradientStops`] as a param, which needs [`Vec`]
use crate::adjust::Adjust;
use graphene_core::gradient::GradientStops;
use graphene_core::raster_types::{CPU, RasterDataTable};
use graphene_core::{Color, Ctx};
// Aims for interoperable compatibility with:
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=%27grdm%27%20%3D%20Gradient%20Map
// https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#:~:text=Gradient%20settings%20(Photoshop%206.0)
#[node_macro::node(category("Raster: Adjustment"))]
async fn gradient_map<T: Adjust<Color>>(
_: impl Ctx,
#[implementations(
Color,
RasterDataTable<CPU>,
GradientStops,
)]
mut image: T,
gradient: GradientStops,
reverse: bool,
) -> T {
image.adjust(|color| {
let intensity = color.luminance_srgb();
let intensity = if reverse { 1. - intensity } else { intensity };
gradient.evaluate(intensity as f64).to_linear_srgb()
});
image
}

View file

@ -1,7 +1,24 @@
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
pub use graphene_core_shaders::glam;
pub mod adjust;
pub mod adjustments;
pub mod blending_nodes;
pub mod cubic_spline;
#[cfg(feature = "std")]
pub mod curve;
#[cfg(feature = "std")]
pub mod dehaze;
#[cfg(feature = "std")]
pub mod filter;
#[cfg(feature = "std")]
pub mod generate_curves;
#[cfg(feature = "std")]
pub mod gradient_map;
#[cfg(feature = "std")]
pub mod image_color_palette;
#[cfg(feature = "std")]
pub mod std_nodes;

View file

@ -31,69 +31,67 @@ impl From<std::io::Error> for Error {
#[node_macro::node(category("Debug: Raster"))]
pub fn sample_image(ctx: impl ExtractFootprint + Clone + Send, image_frame: RasterDataTable<CPU>) -> RasterDataTable<CPU> {
let mut result_table = RasterDataTable::default();
image_frame
.instance_iter()
.filter_map(|mut image_frame_instance| {
let image_frame_transform = image_frame_instance.transform;
let image = image_frame_instance.instance;
for mut image_frame_instance in image_frame.instance_iter() {
let image_frame_transform = image_frame_instance.transform;
let image = image_frame_instance.instance;
// Resize the image using the image crate
let data = bytemuck::cast_vec(image.data.clone());
// Resize the image using the image crate
let data = bytemuck::cast_vec(image.data.clone());
let footprint = ctx.footprint();
let viewport_bounds = footprint.viewport_bounds_in_local_space();
let image_bounds = Bbox::from_transform(image_frame_transform).to_axis_aligned_bbox();
let intersection = viewport_bounds.intersect(&image_bounds);
let image_size = DAffine2::from_scale(DVec2::new(image.width as f64, image.height as f64));
let size = intersection.size();
let size_px = image_size.transform_vector2(size).as_uvec2();
let footprint = ctx.footprint();
let viewport_bounds = footprint.viewport_bounds_in_local_space();
let image_bounds = Bbox::from_transform(image_frame_transform).to_axis_aligned_bbox();
let intersection = viewport_bounds.intersect(&image_bounds);
let image_size = DAffine2::from_scale(DVec2::new(image.width as f64, image.height as f64));
let size = intersection.size();
let size_px = image_size.transform_vector2(size).as_uvec2();
// If the image would not be visible, add nothing.
if size.x <= 0. || size.y <= 0. {
return None;
}
// If the image would not be visible, add nothing.
if size.x <= 0. || size.y <= 0. {
continue;
}
let image_buffer = ::image::Rgba32FImage::from_raw(image.width, image.height, data).expect("Failed to convert internal image format into image-rs data type.");
let image_buffer = ::image::Rgba32FImage::from_raw(image.width, image.height, data).expect("Failed to convert internal image format into image-rs data type.");
let dynamic_image: ::image::DynamicImage = image_buffer.into();
let offset = (intersection.start - image_bounds.start).max(DVec2::ZERO);
let offset_px = image_size.transform_vector2(offset).as_uvec2();
let cropped = dynamic_image.crop_imm(offset_px.x, offset_px.y, size_px.x, size_px.y);
let dynamic_image: ::image::DynamicImage = image_buffer.into();
let offset = (intersection.start - image_bounds.start).max(DVec2::ZERO);
let offset_px = image_size.transform_vector2(offset).as_uvec2();
let cropped = dynamic_image.crop_imm(offset_px.x, offset_px.y, size_px.x, size_px.y);
let viewport_resolution_x = footprint.transform.transform_vector2(DVec2::X * size.x).length();
let viewport_resolution_y = footprint.transform.transform_vector2(DVec2::Y * size.y).length();
let mut new_width = size_px.x;
let mut new_height = size_px.y;
let viewport_resolution_x = footprint.transform.transform_vector2(DVec2::X * size.x).length();
let viewport_resolution_y = footprint.transform.transform_vector2(DVec2::Y * size.y).length();
let mut new_width = size_px.x;
let mut new_height = size_px.y;
// Only downscale the image for now
let resized = if new_width < image.width || new_height < image.height {
new_width = viewport_resolution_x as u32;
new_height = viewport_resolution_y as u32;
// TODO: choose filter based on quality requirements
cropped.resize_exact(new_width, new_height, ::image::imageops::Triangle)
} else {
cropped
};
let buffer = resized.to_rgba32f();
let buffer = buffer.into_raw();
let vec = bytemuck::cast_vec(buffer);
let image = Image {
width: new_width,
height: new_height,
data: vec,
base64_string: None,
};
// we need to adjust the offset if we truncate the offset calculation
// Only downscale the image for now
let resized = if new_width < image.width || new_height < image.height {
new_width = viewport_resolution_x as u32;
new_height = viewport_resolution_y as u32;
// TODO: choose filter based on quality requirements
cropped.resize_exact(new_width, new_height, ::image::imageops::Triangle)
} else {
cropped
};
let buffer = resized.to_rgba32f();
let buffer = buffer.into_raw();
let vec = bytemuck::cast_vec(buffer);
let image = Image {
width: new_width,
height: new_height,
data: vec,
base64_string: None,
};
// we need to adjust the offset if we truncate the offset calculation
let new_transform = image_frame_transform * DAffine2::from_translation(offset) * DAffine2::from_scale(size);
let new_transform = image_frame_transform * DAffine2::from_translation(offset) * DAffine2::from_scale(size);
image_frame_instance.transform = new_transform;
image_frame_instance.source_node_id = None;
image_frame_instance.instance = Raster::new_cpu(image);
result_table.push(image_frame_instance)
}
result_table
image_frame_instance.transform = new_transform;
image_frame_instance.instance = Raster::new_cpu(image);
Some(image_frame_instance)
})
.collect()
}
#[node_macro::node(category("Raster: Channels"))]
@ -105,84 +103,84 @@ pub fn combine_channels(
#[expose] blue: RasterDataTable<CPU>,
#[expose] alpha: RasterDataTable<CPU>,
) -> RasterDataTable<CPU> {
let mut result_table = RasterDataTable::default();
let max_len = red.len().max(green.len()).max(blue.len()).max(alpha.len());
let red = red.instance_iter().map(Some).chain(std::iter::repeat(None)).take(max_len);
let green = green.instance_iter().map(Some).chain(std::iter::repeat(None)).take(max_len);
let blue = blue.instance_iter().map(Some).chain(std::iter::repeat(None)).take(max_len);
let alpha = alpha.instance_iter().map(Some).chain(std::iter::repeat(None)).take(max_len);
for (((red, green), blue), alpha) in red.zip(green).zip(blue).zip(alpha) {
// Turn any default zero-sized image instances into None
let red = red.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let green = green.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let blue = blue.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let alpha = alpha.filter(|i| i.instance.width > 0 && i.instance.height > 0);
red.zip(green)
.zip(blue)
.zip(alpha)
.filter_map(|(((red, green), blue), alpha)| {
// Turn any default zero-sized image instances into None
let red = red.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let green = green.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let blue = blue.filter(|i| i.instance.width > 0 && i.instance.height > 0);
let alpha = alpha.filter(|i| i.instance.width > 0 && i.instance.height > 0);
// Get this instance's transform and alpha blending mode from the first non-empty channel
let Some((transform, alpha_blending)) = [&red, &green, &blue, &alpha].iter().find_map(|i| i.as_ref()).map(|i| (i.transform, i.alpha_blending)) else {
continue;
};
// Get the common width and height of the channels, which must have equal dimensions
let channel_dimensions = [
red.as_ref().map(|r| (r.instance.width, r.instance.height)),
green.as_ref().map(|g| (g.instance.width, g.instance.height)),
blue.as_ref().map(|b| (b.instance.width, b.instance.height)),
alpha.as_ref().map(|a| (a.instance.width, a.instance.height)),
];
if channel_dimensions.iter().all(Option::is_none)
|| channel_dimensions
// Get this instance's transform and alpha blending mode from the first non-empty channel
let (transform, alpha_blending, source_node_id) = [&red, &green, &blue, &alpha]
.iter()
.flatten()
.any(|&(x, y)| channel_dimensions.iter().flatten().any(|&(other_x, other_y)| x != other_x || y != other_y))
{
continue;
}
let Some(&(width, height)) = channel_dimensions.iter().flatten().next() else { continue };
.find_map(|i| i.as_ref())
.map(|i| (i.transform, i.alpha_blending, i.source_node_id))?;
// Create a new image for this instance output
let mut image = Image::new(width, height, Color::TRANSPARENT);
// Get the common width and height of the channels, which must have equal dimensions
let channel_dimensions = [
red.as_ref().map(|r| (r.instance.width, r.instance.height)),
green.as_ref().map(|g| (g.instance.width, g.instance.height)),
blue.as_ref().map(|b| (b.instance.width, b.instance.height)),
alpha.as_ref().map(|a| (a.instance.width, a.instance.height)),
];
if channel_dimensions.iter().all(Option::is_none)
|| channel_dimensions
.iter()
.flatten()
.any(|&(x, y)| channel_dimensions.iter().flatten().any(|&(other_x, other_y)| x != other_x || y != other_y))
{
return None;
}
let &(width, height) = channel_dimensions.iter().flatten().next()?;
// Iterate over all pixels in the image and set the color channels
for y in 0..image.height() {
for x in 0..image.width() {
let image_pixel = image.get_pixel_mut(x, y).unwrap();
// Create a new image for this instance output
let mut image = Image::new(width, height, Color::TRANSPARENT);
if let Some(r) = red.as_ref().and_then(|r| r.instance.get_pixel(x, y)) {
image_pixel.set_red(r.l().cast_linear_channel());
} else {
image_pixel.set_red(Channel::from_linear(0.));
}
if let Some(g) = green.as_ref().and_then(|g| g.instance.get_pixel(x, y)) {
image_pixel.set_green(g.l().cast_linear_channel());
} else {
image_pixel.set_green(Channel::from_linear(0.));
}
if let Some(b) = blue.as_ref().and_then(|b| b.instance.get_pixel(x, y)) {
image_pixel.set_blue(b.l().cast_linear_channel());
} else {
image_pixel.set_blue(Channel::from_linear(0.));
}
if let Some(a) = alpha.as_ref().and_then(|a| a.instance.get_pixel(x, y)) {
image_pixel.set_alpha(a.l().cast_linear_channel());
} else {
image_pixel.set_alpha(Channel::from_linear(1.));
// Iterate over all pixels in the image and set the color channels
for y in 0..image.height() {
for x in 0..image.width() {
let image_pixel = image.get_pixel_mut(x, y).unwrap();
if let Some(r) = red.as_ref().and_then(|r| r.instance.get_pixel(x, y)) {
image_pixel.set_red(r.l().cast_linear_channel());
} else {
image_pixel.set_red(Channel::from_linear(0.));
}
if let Some(g) = green.as_ref().and_then(|g| g.instance.get_pixel(x, y)) {
image_pixel.set_green(g.l().cast_linear_channel());
} else {
image_pixel.set_green(Channel::from_linear(0.));
}
if let Some(b) = blue.as_ref().and_then(|b| b.instance.get_pixel(x, y)) {
image_pixel.set_blue(b.l().cast_linear_channel());
} else {
image_pixel.set_blue(Channel::from_linear(0.));
}
if let Some(a) = alpha.as_ref().and_then(|a| a.instance.get_pixel(x, y)) {
image_pixel.set_alpha(a.l().cast_linear_channel());
} else {
image_pixel.set_alpha(Channel::from_linear(1.));
}
}
}
}
// Add this instance to the result table
result_table.push(Instance {
instance: Raster::new_cpu(image),
transform,
alpha_blending,
source_node_id: None,
});
}
result_table
Some(Instance {
instance: Raster::new_cpu(image),
transform,
alpha_blending,
source_node_id,
})
})
.collect()
}
#[node_macro::node(category("Raster"))]
@ -201,91 +199,84 @@ pub fn mask(
};
let stencil_size = DVec2::new(stencil_instance.instance.width as f64, stencil_instance.instance.height as f64);
let mut result_table = RasterDataTable::default();
image
.instance_iter()
.filter_map(|mut image_instance| {
let image_size = DVec2::new(image_instance.instance.width as f64, image_instance.instance.height as f64);
let mask_size = stencil_instance.transform.decompose_scale();
for mut image_instance in image.instance_iter() {
let image_size = DVec2::new(image_instance.instance.width as f64, image_instance.instance.height as f64);
let mask_size = stencil_instance.transform.decompose_scale();
if mask_size == DVec2::ZERO {
continue;
}
// Transforms a point from the background image to the foreground image
let bg_to_fg = image_instance.transform * DAffine2::from_scale(1. / image_size);
let stencil_transform_inverse = stencil_instance.transform.inverse();
for y in 0..image_instance.instance.height {
for x in 0..image_instance.instance.width {
let image_point = DVec2::new(x as f64, y as f64);
let mask_point = bg_to_fg.transform_point2(image_point);
let local_mask_point = stencil_transform_inverse.transform_point2(mask_point);
let mask_point = stencil_instance.transform.transform_point2(local_mask_point.clamp(DVec2::ZERO, DVec2::ONE));
let mask_point = (DAffine2::from_scale(stencil_size) * stencil_instance.transform.inverse()).transform_point2(mask_point);
let image_pixel = image_instance.instance.data_mut().get_pixel_mut(x, y).unwrap();
let mask_pixel = stencil_instance.instance.sample(mask_point);
*image_pixel = image_pixel.multiplied_alpha(mask_pixel.l().cast_linear_channel());
if mask_size == DVec2::ZERO {
return None;
}
}
result_table.push(image_instance);
}
// Transforms a point from the background image to the foreground image
let bg_to_fg = image_instance.transform * DAffine2::from_scale(1. / image_size);
let stencil_transform_inverse = stencil_instance.transform.inverse();
result_table
for y in 0..image_instance.instance.height {
for x in 0..image_instance.instance.width {
let image_point = DVec2::new(x as f64, y as f64);
let mask_point = bg_to_fg.transform_point2(image_point);
let local_mask_point = stencil_transform_inverse.transform_point2(mask_point);
let mask_point = stencil_instance.transform.transform_point2(local_mask_point.clamp(DVec2::ZERO, DVec2::ONE));
let mask_point = (DAffine2::from_scale(stencil_size) * stencil_instance.transform.inverse()).transform_point2(mask_point);
let image_pixel = image_instance.instance.data_mut().get_pixel_mut(x, y).unwrap();
let mask_pixel = stencil_instance.instance.sample(mask_point);
*image_pixel = image_pixel.multiplied_alpha(mask_pixel.l().cast_linear_channel());
}
}
Some(image_instance)
})
.collect()
}
#[node_macro::node(category(""))]
pub fn extend_image_to_bounds(_: impl Ctx, image: RasterDataTable<CPU>, bounds: DAffine2) -> RasterDataTable<CPU> {
let mut result_table = RasterDataTable::default();
for mut image_instance in image.instance_iter() {
let image_aabb = Bbox::unit().affine_transform(image_instance.transform).to_axis_aligned_bbox();
let bounds_aabb = Bbox::unit().affine_transform(bounds.transform()).to_axis_aligned_bbox();
if image_aabb.contains(bounds_aabb.start) && image_aabb.contains(bounds_aabb.end) {
result_table.push(image_instance);
continue;
}
let image_data = &image_instance.instance.data;
let (image_width, image_height) = (image_instance.instance.width, image_instance.instance.height);
if image_width == 0 || image_height == 0 {
for image_instance in empty_image((), bounds, Color::TRANSPARENT).instance_iter() {
result_table.push(image_instance);
image
.instance_iter()
.map(|mut image_instance| {
let image_aabb = Bbox::unit().affine_transform(image_instance.transform).to_axis_aligned_bbox();
let bounds_aabb = Bbox::unit().affine_transform(bounds.transform()).to_axis_aligned_bbox();
if image_aabb.contains(bounds_aabb.start) && image_aabb.contains(bounds_aabb.end) {
return image_instance;
}
continue;
}
let orig_image_scale = DVec2::new(image_width as f64, image_height as f64);
let layer_to_image_space = DAffine2::from_scale(orig_image_scale) * image_instance.transform.inverse();
let bounds_in_image_space = Bbox::unit().affine_transform(layer_to_image_space * bounds).to_axis_aligned_bbox();
let image_data = &image_instance.instance.data;
let (image_width, image_height) = (image_instance.instance.width, image_instance.instance.height);
if image_width == 0 || image_height == 0 {
return empty_image((), bounds, Color::TRANSPARENT).instance_iter().next().unwrap();
}
let new_start = bounds_in_image_space.start.floor().min(DVec2::ZERO);
let new_end = bounds_in_image_space.end.ceil().max(orig_image_scale);
let new_scale = new_end - new_start;
let orig_image_scale = DVec2::new(image_width as f64, image_height as f64);
let layer_to_image_space = DAffine2::from_scale(orig_image_scale) * image_instance.transform.inverse();
let bounds_in_image_space = Bbox::unit().affine_transform(layer_to_image_space * bounds).to_axis_aligned_bbox();
// Copy over original image into enlarged image.
let mut new_image = Image::new(new_scale.x as u32, new_scale.y as u32, Color::TRANSPARENT);
let offset_in_new_image = (-new_start).as_uvec2();
for y in 0..image_height {
let old_start = y * image_width;
let new_start = (y + offset_in_new_image.y) * new_image.width + offset_in_new_image.x;
let old_row = &image_data[old_start as usize..(old_start + image_width) as usize];
let new_row = &mut new_image.data[new_start as usize..(new_start + image_width) as usize];
new_row.copy_from_slice(old_row);
}
let new_start = bounds_in_image_space.start.floor().min(DVec2::ZERO);
let new_end = bounds_in_image_space.end.ceil().max(orig_image_scale);
let new_scale = new_end - new_start;
// Compute new transform.
// let layer_to_new_texture_space = (DAffine2::from_scale(1. / new_scale) * DAffine2::from_translation(new_start) * layer_to_image_space).inverse();
let new_texture_to_layer_space = image_instance.transform * DAffine2::from_scale(1. / orig_image_scale) * DAffine2::from_translation(new_start) * DAffine2::from_scale(new_scale);
// Copy over original image into enlarged image.
let mut new_image = Image::new(new_scale.x as u32, new_scale.y as u32, Color::TRANSPARENT);
let offset_in_new_image = (-new_start).as_uvec2();
for y in 0..image_height {
let old_start = y * image_width;
let new_start = (y + offset_in_new_image.y) * new_image.width + offset_in_new_image.x;
let old_row = &image_data[old_start as usize..(old_start + image_width) as usize];
let new_row = &mut new_image.data[new_start as usize..(new_start + image_width) as usize];
new_row.copy_from_slice(old_row);
}
image_instance.instance = Raster::new_cpu(new_image);
image_instance.transform = new_texture_to_layer_space;
image_instance.source_node_id = None;
result_table.push(image_instance);
}
// Compute new transform.
// let layer_to_new_texture_space = (DAffine2::from_scale(1. / new_scale) * DAffine2::from_translation(new_start) * layer_to_image_space).inverse();
let new_texture_to_layer_space = image_instance.transform * DAffine2::from_scale(1. / orig_image_scale) * DAffine2::from_translation(new_start) * DAffine2::from_scale(new_scale);
result_table
image_instance.instance = Raster::new_cpu(new_image);
image_instance.transform = new_texture_to_layer_space;
image_instance
})
.collect()
}
#[node_macro::node(category("Debug: Raster"))]
@ -392,14 +383,11 @@ pub fn noise_pattern(
}
}
let mut result = RasterDataTable::default();
result.push(Instance {
return RasterDataTable::new_instance(Instance {
instance: Raster::new_cpu(image),
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
..Default::default()
});
return result;
}
};
noise.set_noise_type(Some(noise_type));
@ -457,14 +445,11 @@ pub fn noise_pattern(
}
}
let mut result = RasterDataTable::default();
result.push(Instance {
RasterDataTable::new_instance(Instance {
instance: Raster::new_cpu(image),
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
..Default::default()
});
result
})
}
#[node_macro::node(category("Raster: Pattern"))]
@ -502,20 +487,16 @@ pub fn mandelbrot(ctx: impl ExtractFootprint + Send) -> RasterDataTable<CPU> {
}
}
let image = Image {
width,
height,
data,
..Default::default()
};
let mut result = RasterDataTable::default();
result.push(Instance {
instance: Raster::new_cpu(image),
RasterDataTable::new_instance(Instance {
instance: Raster::new_cpu(Image {
width,
height,
data,
..Default::default()
}),
transform: DAffine2::from_translation(offset) * DAffine2::from_scale(size),
..Default::default()
});
result
})
}
#[inline(always)]

View file

@ -1,11 +0,0 @@
use graphene_core::Ctx;
#[node_macro::node(category("Web Request"))]
async fn get_request(_: impl Ctx, url: String) -> reqwest::Response {
reqwest::get(url).await.unwrap()
}
#[node_macro::node(category("Web Request"))]
async fn post_request(_: impl Ctx, url: String, body: String) -> reqwest::Response {
reqwest::Client::new().post(url).body(body).send().await.unwrap()
}

View file

@ -1,5 +1,4 @@
pub mod any;
pub mod http;
pub mod text;
#[cfg(feature = "wasm")]
pub mod wasm_application_io;

View file

@ -1,4 +1,4 @@
use crate::vector::{VectorData, VectorDataTable};
use crate::vector::VectorDataTable;
use graph_craft::wasm_application_io::WasmEditorApi;
use graphene_core::Ctx;
pub use graphene_core::text::*;
@ -24,9 +24,14 @@ fn text<'i: 'n>(
#[unit(" px")]
#[default(None)]
max_height: Option<f64>,
/// Faux italic.
#[unit("°")]
#[default(0.)]
tilt: f64,
align: TextAlign,
/// Splits each text glyph into its own instance, i.e. row in the table of vector data.
#[default(false)]
per_glyph_instances: bool,
) -> VectorDataTable {
let typesetting = TypesettingConfig {
font_size,
@ -35,11 +40,10 @@ fn text<'i: 'n>(
max_width,
max_height,
tilt,
align,
};
let font_data = editor.font_cache.get(&font_name).map(|f| load_font(f));
let result = VectorData::from_subpaths(to_path(&text, font_data, typesetting), false);
VectorDataTable::new(result)
to_path(&text, font_data, typesetting, per_glyph_instances)
}

View file

@ -30,34 +30,80 @@ async fn create_surface<'a: 'n>(_: impl Ctx, editor: &'a WasmEditorApi) -> Arc<W
Arc::new(editor.application_io.as_ref().unwrap().create_window())
}
// TODO: Fix and reenable in order to get the 'Draw Canvas' node working again.
// #[cfg(target_arch = "wasm32")]
// use wasm_bindgen::Clamped;
//
// #[node_macro::node(category("Debug: GPU"))]
// #[cfg(target_arch = "wasm32")]
// async fn draw_image_frame(
// _: impl Ctx,
// image: RasterDataTable<graphene_core::raster::SRGBA8>,
// surface_handle: Arc<WasmSurfaceHandle>,
// ) -> graphene_core::application_io::SurfaceHandleFrame<HtmlCanvasElement> {
// let image = image.instance_ref_iter().next().unwrap().instance;
// let image_data = image.image.data;
// let array: Clamped<&[u8]> = Clamped(bytemuck::cast_slice(image_data.as_slice()));
// if image.image.width > 0 && image.image.height > 0 {
// let canvas = &surface_handle.surface;
// canvas.set_width(image.image.width);
// canvas.set_height(image.image.height);
// // TODO: replace "2d" with "bitmaprenderer" once we switch to ImageBitmap (lives on gpu) from RasterData (lives on cpu)
// let context = canvas.get_context("2d").unwrap().unwrap().dyn_into::<CanvasRenderingContext2d>().unwrap();
// let image_data = web_sys::ImageData::new_with_u8_clamped_array_and_sh(array, image.image.width, image.image.height).expect("Failed to construct RasterData");
// context.put_image_data(&image_data, 0., 0.).unwrap();
// }
// graphene_core::application_io::SurfaceHandleFrame {
// surface_handle,
// transform: image.transform,
// }
// }
#[node_macro::node(category("Web Request"))]
async fn get_request(_: impl Ctx, _primary: (), #[name("URL")] url: String, discard_result: bool) -> String {
#[cfg(target_arch = "wasm32")]
{
if discard_result {
wasm_bindgen_futures::spawn_local(async move {
let _ = reqwest::get(url).await;
});
return String::new();
}
}
#[cfg(not(target_arch = "wasm32"))]
{
#[cfg(feature = "tokio")]
if discard_result {
tokio::spawn(async move {
let _ = reqwest::get(url).await;
});
return String::new();
}
#[cfg(not(feature = "tokio"))]
if discard_result {
return String::new();
}
}
let Ok(response) = reqwest::get(url).await else { return String::new() };
response.text().await.ok().unwrap_or_default()
}
#[node_macro::node(category("Web Request"))]
async fn post_request(_: impl Ctx, _primary: (), #[name("URL")] url: String, body: Vec<u8>, discard_result: bool) -> String {
#[cfg(target_arch = "wasm32")]
{
if discard_result {
wasm_bindgen_futures::spawn_local(async move {
let _ = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await;
});
return String::new();
}
}
#[cfg(not(target_arch = "wasm32"))]
{
#[cfg(feature = "tokio")]
if discard_result {
let url = url.clone();
let body = body.clone();
tokio::spawn(async move {
let _ = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await;
});
return String::new();
}
#[cfg(not(feature = "tokio"))]
if discard_result {
return String::new();
}
}
let Ok(response) = reqwest::Client::new().post(url).body(body).header("Content-Type", "application/octet-stream").send().await else {
return String::new();
};
response.text().await.ok().unwrap_or_default()
}
#[node_macro::node(category("Web Request"), name("String to Bytes"))]
fn string_to_bytes(_: impl Ctx, string: String) -> Vec<u8> {
string.into_bytes()
}
#[node_macro::node(category("Web Request"), name("Image to Bytes"))]
fn image_to_bytes(_: impl Ctx, image: RasterDataTable<CPU>) -> Vec<u8> {
let Some(image) = image.instance_ref_iter().next() else { return vec![] };
image.instance.data.iter().flat_map(|color| color.to_rgb8_srgb().into_iter()).collect::<Vec<u8>>()
}
#[node_macro::node(category("Web Request"))]
async fn load_resource<'a: 'n>(_: impl Ctx, _primary: (), #[scope("editor-api")] editor: &'a WasmEditorApi, #[name("URL")] url: String) -> Arc<[u8]> {
@ -112,7 +158,10 @@ fn render_svg(data: impl GraphicElementRendered, mut render: SvgRender, render_p
render.wrap_with_transform(footprint.transform, Some(footprint.resolution.as_dvec2()));
RenderOutputType::Svg(render.svg.to_svg_string())
RenderOutputType::Svg {
svg: render.svg.to_svg_string(),
image_data: render.image_data,
}
}
#[cfg(feature = "vello")]
@ -121,10 +170,10 @@ async fn render_canvas(
render_config: RenderConfig,
data: impl GraphicElementRendered,
editor: &WasmEditorApi,
surface_handle: wgpu_executor::WgpuSurface,
surface_handle: Option<wgpu_executor::WgpuSurface>,
render_params: RenderParams,
) -> RenderOutputType {
use graphene_application_io::SurfaceFrame;
use graphene_application_io::{ImageTexture, SurfaceFrame};
let footprint = render_config.viewport;
let Some(exec) = editor.application_io.as_ref().unwrap().gpu_executor() else {
@ -145,17 +194,26 @@ async fn render_canvas(
if !data.contains_artboard() && !render_config.hide_artboards {
background = Color::WHITE;
}
exec.render_vello_scene(&scene, &surface_handle, footprint.resolution.x, footprint.resolution.y, &context, background)
.await
.expect("Failed to render Vello scene");
if let Some(surface_handle) = surface_handle {
exec.render_vello_scene(&scene, &surface_handle, footprint.resolution, &context, background)
.await
.expect("Failed to render Vello scene");
let frame = SurfaceFrame {
surface_id: surface_handle.window_id,
resolution: render_config.viewport.resolution,
transform: glam::DAffine2::IDENTITY,
};
let frame = SurfaceFrame {
surface_id: surface_handle.window_id,
resolution: render_config.viewport.resolution,
transform: glam::DAffine2::IDENTITY,
};
RenderOutputType::CanvasFrame(frame)
RenderOutputType::CanvasFrame(frame)
} else {
let texture = exec
.render_vello_scene_to_texture(&scene, footprint.resolution, &context, background)
.await
.expect("Failed to render Vello scene");
RenderOutputType::Texture(ImageTexture { texture: Arc::new(texture) })
}
}
#[cfg(target_arch = "wasm32")]
@ -187,6 +245,7 @@ where
let resolution = footprint.resolution;
let render_params = RenderParams {
culling_bounds: None,
for_export: true,
..Default::default()
};
@ -217,15 +276,12 @@ where
let rasterized = context.get_image_data(0., 0., resolution.x as f64, resolution.y as f64).unwrap();
let mut result = RasterDataTable::default();
let image = Image::from_image_data(&rasterized.data().0, resolution.x as u32, resolution.y as u32);
result.push(Instance {
RasterDataTable::new_instance(Instance {
instance: Raster::new_cpu(image),
transform: footprint.transform,
..Default::default()
});
result
})
}
#[node_macro::node(category(""))]
@ -270,12 +326,14 @@ async fn render<'a: 'n, T: 'n + GraphicElementRendered + WasmNotSend>(
let data = data.eval(ctx.clone()).await;
let editor_api = editor_api.eval(None).await;
#[cfg(all(feature = "vello", not(test)))]
let surface_handle = _surface_handle.eval(None).await;
#[cfg(all(feature = "vello", not(test), target_arch = "wasm32"))]
let _surface_handle = _surface_handle.eval(None).await;
#[cfg(not(target_arch = "wasm32"))]
let _surface_handle: Option<wgpu_executor::WgpuSurface> = None;
let use_vello = editor_api.editor_preferences.use_vello();
#[cfg(all(feature = "vello", not(test)))]
let use_vello = use_vello && surface_handle.is_some();
#[cfg(all(feature = "vello", not(test), target_arch = "wasm32"))]
let use_vello = use_vello && _surface_handle.is_some();
let mut metadata = RenderMetadata::default();
data.collect_metadata(&mut metadata, footprint, None);
@ -287,7 +345,7 @@ async fn render<'a: 'n, T: 'n + GraphicElementRendered + WasmNotSend>(
if use_vello && editor_api.application_io.as_ref().unwrap().gpu_executor().is_some() {
#[cfg(all(feature = "vello", not(test)))]
return RenderOutput {
data: render_canvas(render_config, data, editor_api, surface_handle.unwrap(), render_params).await,
data: render_canvas(render_config, data, editor_api, _surface_handle, render_params).await,
metadata,
};
#[cfg(any(not(feature = "vello"), test))]

View file

@ -16,10 +16,13 @@ use graphene_core::uuid::{NodeId, generate_uuid};
use graphene_core::vector::VectorDataTable;
use graphene_core::vector::click_target::{ClickTarget, FreePoint};
use graphene_core::vector::style::{Fill, Stroke, StrokeAlign, ViewMode};
use graphene_core::{AlphaBlending, Artboard, ArtboardGroupTable, GraphicElement, GraphicGroupTable};
use graphene_core::{Artboard, ArtboardGroupTable, GraphicElement, GraphicGroupTable};
use num_traits::Zero;
use std::collections::{HashMap, HashSet};
use std::fmt::Write;
use std::hash::{DefaultHasher, Hash, Hasher};
use std::ops::Deref;
use std::sync::{Arc, LazyLock};
#[cfg(feature = "vello")]
use vello::*;
@ -38,10 +41,10 @@ impl MaskType {
}
fn write_to_defs(self, svg_defs: &mut String, uuid: u64, svg_string: String) {
let id = format!("mask-{}", uuid);
let id = format!("mask-{uuid}");
match self {
Self::Clip => write!(svg_defs, r##"<clipPath id="{id}">{}</clipPath>"##, svg_string).unwrap(),
Self::Mask => write!(svg_defs, r##"<mask id="{id}" mask-type="alpha">{}</mask>"##, svg_string).unwrap(),
Self::Clip => write!(svg_defs, r##"<clipPath id="{id}">{svg_string}</clipPath>"##).unwrap(),
Self::Mask => write!(svg_defs, r##"<mask id="{id}" mask-type="alpha">{svg_string}</mask>"##).unwrap(),
}
}
}
@ -89,9 +92,9 @@ impl SvgRender {
.unwrap_or_default();
let matrix = format_transform_matrix(transform);
let transform = if matrix.is_empty() { String::new() } else { format!(r#" transform="{}""#, matrix) };
let transform = if matrix.is_empty() { String::new() } else { format!(r#" transform="{matrix}""#) };
let svg_header = format!(r#"<svg xmlns="http://www.w3.org/2000/svg" {}><defs>{defs}</defs><g{transform}>"#, view_box);
let svg_header = format!(r#"<svg xmlns="http://www.w3.org/2000/svg" {view_box}><defs>{defs}</defs><g{transform}>"#);
self.svg.insert(0, svg_header.into());
self.svg.push("</g></svg>".into());
}
@ -145,7 +148,7 @@ impl Default for SvgRender {
#[derive(Clone, Debug, Default)]
pub struct RenderContext {
#[cfg(feature = "vello")]
pub resource_overrides: HashMap<u64, std::sync::Arc<wgpu::Texture>>,
pub resource_overrides: Vec<(peniko::Image, wgpu::Texture)>,
}
/// Static state used whilst rendering
@ -156,11 +159,11 @@ pub struct RenderParams {
pub thumbnail: bool,
/// Don't render the rectangle for an artboard to allow exporting with a transparent background.
pub hide_artboards: bool,
/// Are we exporting? Causes the text above an artboard to be hidden.
/// Are we exporting as a standalone SVG?
pub for_export: bool,
/// Are we generating a mask in this render pass? Used to see if fill should be multiplied with alpha.
pub for_mask: bool,
/// Are we generating a mask for alignment? Used to prevent unnecesary transforms in masks
/// Are we generating a mask for alignment? Used to prevent unnecessary transforms in masks
pub alignment_parent_transform: Option<DAffine2>,
}
@ -173,6 +176,10 @@ impl RenderParams {
let alignment_parent_transform = Some(transform);
Self { alignment_parent_transform, ..*self }
}
pub fn to_canvas(&self) -> bool {
!self.for_export && !self.thumbnail && !self.for_mask
}
}
pub fn format_transform_matrix(transform: DAffine2) -> String {
@ -241,8 +248,7 @@ impl GraphicElementRendered for GraphicGroupTable {
attributes.push("transform", matrix);
}
let factor = if render_params.for_mask { 1. } else { instance.alpha_blending.fill };
let opacity = instance.alpha_blending.opacity * factor;
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. {
attributes.push("opacity", opacity.to_string());
}
@ -267,7 +273,7 @@ impl GraphicElementRendered for GraphicGroupTable {
mask_state = None;
}
let id = format!("mask-{}", uuid);
let id = format!("mask-{uuid}");
let selector = format!("url(#{id})");
attributes.push(mask_type.to_attribute(), selector);
@ -297,13 +303,9 @@ impl GraphicElementRendered for GraphicGroupTable {
};
let mut bounds = None;
let factor = if render_params.for_mask { 1. } else { alpha_blending.fill };
let opacity = alpha_blending.opacity * factor;
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. || (render_params.view_mode != ViewMode::Outline && alpha_blending.blend_mode != BlendMode::default()) {
bounds = self
.instance_ref_iter()
.filter_map(|element| element.instance.bounding_box(transform, true))
.reduce(Quad::combine_bounds);
bounds = instance.instance.bounding_box(transform, true);
if let Some(bounds) = bounds {
scene.push_layer(
@ -326,10 +328,7 @@ impl GraphicElementRendered for GraphicGroupTable {
mask_instance_state = None;
}
if !layer {
bounds = self
.instance_ref_iter()
.filter_map(|element| element.instance.bounding_box(transform, true))
.reduce(Quad::combine_bounds);
bounds = instance.instance.bounding_box(transform, true);
}
if let Some(bounds) = bounds {
@ -444,18 +443,18 @@ impl GraphicElementRendered for VectorDataTable {
let can_use_order = !instance.instance.style.fill().is_none() && mask_type == MaskType::Mask;
if !can_use_order {
let id = format!("alignment-{}", generate_uuid());
let mut vector_row = VectorDataTable::default();
let mut fill_instance = instance.instance.clone();
let mut fill_instance = instance.instance.clone();
fill_instance.style.clear_stroke();
fill_instance.style.set_fill(Fill::solid(Color::BLACK));
vector_row.push(Instance {
let vector_row = VectorDataTable::new_instance(Instance {
instance: fill_instance,
alpha_blending: *instance.alpha_blending,
transform: *instance.transform,
source_node_id: None,
});
push_id = Some((id, mask_type, vector_row));
}
}
@ -477,7 +476,7 @@ impl GraphicElementRendered for VectorDataTable {
let (x, y) = quad.top_left().into();
let (width, height) = (quad.bottom_right() - quad.top_left()).into();
write!(defs, r##"{}"##, svg.svg_defs).unwrap();
let rect = format!(r##"<rect x="{}" y="{}" width="{width}" height="{height}" fill="white" />"##, x, y);
let rect = format!(r##"<rect x="{x}" y="{y}" width="{width}" height="{height}" fill="white" />"##);
match mask_type {
MaskType::Clip => write!(defs, r##"<clipPath id="{id}">{}</clipPath>"##, svg.svg.to_svg_string()).unwrap(),
MaskType::Mask => write!(defs, r##"<mask id="{id}">{}{}</mask>"##, rect, svg.svg.to_svg_string()).unwrap(),
@ -501,8 +500,7 @@ impl GraphicElementRendered for VectorDataTable {
}
attributes.push_val(fill_and_stroke);
let factor = if render_params.for_mask { 1. } else { instance.alpha_blending.fill };
let opacity = instance.alpha_blending.opacity * factor;
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. {
attributes.push("opacity", opacity.to_string());
}
@ -543,10 +541,13 @@ impl GraphicElementRendered for VectorDataTable {
_ => instance.alpha_blending.blend_mode.to_peniko(),
};
let mut layer = false;
let factor = if render_params.for_mask { 1. } else { instance.alpha_blending.fill };
let opacity = instance.alpha_blending.opacity * factor;
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. || instance.alpha_blending.blend_mode != BlendMode::default() {
layer = true;
let weight = instance.instance.style.stroke().unwrap().weight;
let quad = Quad::from_box(layer_bounds).inflate(weight * element_transform.matrix2.determinant());
let layer_bounds = quad.bounding_box();
scene.push_layer(
peniko::BlendMode::new(blend_mode, peniko::Compose::SrcOver),
opacity,
@ -558,34 +559,29 @@ impl GraphicElementRendered for VectorDataTable {
let can_draw_aligned_stroke = instance.instance.style.stroke().is_some_and(|stroke| stroke.has_renderable_stroke() && stroke.align.is_not_centered())
&& instance.instance.stroke_bezier_paths().all(|path| path.closed());
let reorder_for_outside = instance
.instance
.style
.stroke()
.is_some_and(|stroke| stroke.align == StrokeAlign::Outside && !instance.instance.style.fill().is_none());
if can_draw_aligned_stroke && !reorder_for_outside {
let mut vector_data = VectorDataTable::default();
let reorder_for_outside = instance.instance.style.stroke().is_some_and(|stroke| stroke.align == StrokeAlign::Outside) && !instance.instance.style.fill().is_none();
let use_layer = can_draw_aligned_stroke && !reorder_for_outside;
if use_layer {
let mut fill_instance = instance.instance.clone();
fill_instance.style.clear_stroke();
fill_instance.style.set_fill(Fill::solid(Color::BLACK));
vector_data.push(Instance {
let vector_data = VectorDataTable::new_instance(Instance {
instance: fill_instance,
alpha_blending: *instance.alpha_blending,
transform: *instance.transform,
source_node_id: None,
});
let bounds = instance.instance.bounding_box_with_transform(multiplied_transform).unwrap_or(layer_bounds);
let weight = instance.instance.style.stroke().unwrap().weight;
let quad = Quad::from_box(layer_bounds).inflate(weight * element_transform.matrix2.determinant());
let rect = kurbo::Rect::new(quad.top_left().x, quad.top_left().y, quad.bottom_right().x, quad.bottom_right().y);
let quad = Quad::from_box(bounds).inflate(weight * element_transform.matrix2.determinant());
let bounds = quad.bounding_box();
let rect = kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y);
let inside = instance.instance.style.stroke().unwrap().align == StrokeAlign::Inside;
let compose = if inside { peniko::Compose::SrcIn } else { peniko::Compose::SrcOut };
scene.push_layer(peniko::Mix::Normal, 1., kurbo::Affine::IDENTITY, &rect);
vector_data.render_to_vello(scene, parent_transform, _context, &render_params.for_alignment(applied_stroke_transform));
scene.push_layer(peniko::BlendMode::new(peniko::Mix::Clip, compose), 1., kurbo::Affine::IDENTITY, &rect);
scene.push_layer(peniko::BlendMode::new(peniko::Mix::Clip, peniko::Compose::SrcIn), 1., kurbo::Affine::IDENTITY, &rect);
}
// Render the path
@ -619,6 +615,7 @@ impl GraphicElementRendered for VectorDataTable {
true => [Op::Stroke, Op::Fill],
false => [Op::Fill, Op::Stroke], // Default
};
for operation in order {
match operation {
Op::Fill => {
@ -639,7 +636,11 @@ impl GraphicElementRendered for VectorDataTable {
let bounds = instance.instance.nonzero_bounding_box();
let bound_transform = DAffine2::from_scale_angle_translation(bounds[1] - bounds[0], 0., bounds[0]);
let inverse_parent_transform = (parent_transform.matrix2.determinant() != 0.).then(|| parent_transform.inverse()).unwrap_or_default();
let inverse_parent_transform = if parent_transform.matrix2.determinant() != 0. {
parent_transform.inverse()
} else {
Default::default()
};
let mod_points = inverse_parent_transform * multiplied_transform * bound_transform;
let start = mod_points.transform_point2(gradient.start);
@ -666,7 +667,11 @@ impl GraphicElementRendered for VectorDataTable {
});
// Vello does `element_transform * brush_transform` internally. We don't want element_transform to have any impact so we need to left multiply by the inverse.
// This makes the final internal brush transform equal to `parent_transform`, allowing you to stretch a gradient by transforming the parent folder.
let inverse_element_transform = (element_transform.matrix2.determinant() != 0.).then(|| element_transform.inverse()).unwrap_or_default();
let inverse_element_transform = if element_transform.matrix2.determinant() != 0. {
element_transform.inverse()
} else {
Default::default()
};
let brush_transform = kurbo::Affine::new((inverse_element_transform * parent_transform).to_cols_array());
scene.fill(peniko::Fill::NonZero, kurbo::Affine::new(element_transform.to_cols_array()), &fill, Some(brush_transform), &path);
}
@ -710,7 +715,7 @@ impl GraphicElementRendered for VectorDataTable {
}
}
if can_draw_aligned_stroke {
if use_layer {
scene.pop_layer();
scene.pop_layer();
}
@ -756,7 +761,7 @@ impl GraphicElementRendered for VectorDataTable {
.chain(single_anchors_targets.into_iter())
.collect::<Vec<ClickTarget>>();
metadata.click_targets.insert(element_id, click_targets);
metadata.click_targets.entry(element_id).or_insert(click_targets);
}
if let Some(upstream_graphic_group) = &instance.upstream_graphic_group {
@ -937,56 +942,122 @@ impl GraphicElementRendered for RasterDataTable<CPU> {
fn render_svg(&self, render: &mut SvgRender, render_params: &RenderParams) {
for instance in self.instance_ref_iter() {
let transform = *instance.transform;
let image = &instance.instance;
if image.data.is_empty() {
return;
continue;
}
let base64_string = image.base64_string.clone().unwrap_or_else(|| {
use base64::Engine;
if render_params.to_canvas() {
let id = instance.source_node_id.map(|x| x.0).unwrap_or_else(|| {
let mut state = DefaultHasher::new();
image.data().hash(&mut state);
state.finish()
});
if !render.image_data.iter().any(|(old_id, _)| *old_id == id) {
render.image_data.push((id, image.data().clone()));
}
render.parent_tag(
"foreignObject",
|attributes| {
let mut transform_values = transform.to_scale_angle_translation();
let size = DVec2::new(image.width as f64, image.height as f64);
transform_values.0 /= size;
let output = image.to_png();
let preamble = "data:image/png;base64,";
let mut base64_string = String::with_capacity(preamble.len() + output.len() * 4);
base64_string.push_str(preamble);
base64::engine::general_purpose::STANDARD.encode_string(output, &mut base64_string);
base64_string
});
render.leaf_tag("image", |attributes| {
attributes.push("width", 1.to_string());
attributes.push("height", 1.to_string());
attributes.push("preserveAspectRatio", "none");
attributes.push("href", base64_string);
let matrix = format_transform_matrix(transform);
if !matrix.is_empty() {
attributes.push("transform", matrix);
}
let factor = if render_params.for_mask { 1. } else { instance.alpha_blending.fill };
let opacity = instance.alpha_blending.opacity * factor;
if opacity < 1. {
attributes.push("opacity", opacity.to_string());
}
if instance.alpha_blending.blend_mode != BlendMode::default() {
attributes.push("style", instance.alpha_blending.blend_mode.render());
}
});
let matrix = DAffine2::from_scale_angle_translation(transform_values.0, transform_values.1, transform_values.2);
let matrix = format_transform_matrix(matrix);
if !matrix.is_empty() {
attributes.push("transform", matrix);
}
attributes.push("width", size.x.to_string());
attributes.push("height", size.y.to_string());
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. {
attributes.push("opacity", opacity.to_string());
}
if instance.alpha_blending.blend_mode != BlendMode::default() {
attributes.push("style", instance.alpha_blending.blend_mode.render());
}
},
|render| {
render.leaf_tag(
"img", // Must be a self-closing (void element) tag, so we can't use `div` or `span`, for example
|attributes| {
attributes.push("data-canvas-placeholder", id.to_string());
},
)
},
);
} else {
let base64_string = image.base64_string.clone().unwrap_or_else(|| {
use base64::Engine;
let output = image.to_png();
let preamble = "data:image/png;base64,";
let mut base64_string = String::with_capacity(preamble.len() + output.len() * 4);
base64_string.push_str(preamble);
base64::engine::general_purpose::STANDARD.encode_string(output, &mut base64_string);
base64_string
});
render.leaf_tag("image", |attributes| {
attributes.push("width", "1");
attributes.push("height", "1");
attributes.push("preserveAspectRatio", "none");
attributes.push("href", base64_string);
let matrix = format_transform_matrix(transform);
if !matrix.is_empty() {
attributes.push("transform", matrix);
}
let opacity = instance.alpha_blending.opacity(render_params.for_mask);
if opacity < 1. {
attributes.push("opacity", opacity.to_string());
}
if instance.alpha_blending.blend_mode != BlendMode::default() {
attributes.push("style", instance.alpha_blending.blend_mode.render());
}
});
}
}
}
#[cfg(feature = "vello")]
fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, _: &mut RenderContext, _render_params: &RenderParams) {
fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, _: &mut RenderContext, render_params: &RenderParams) {
use vello::peniko;
for instance in self.instance_ref_iter() {
let image = &instance.instance;
if image.data.is_empty() {
return;
continue;
}
let image = peniko::Image::new(image.to_flat_u8().0.into(), peniko::Format::Rgba8, image.width, image.height).with_extend(peniko::Extend::Repeat);
let transform = transform * *instance.transform * DAffine2::from_scale(1. / DVec2::new(image.width as f64, image.height as f64));
scene.draw_image(&image, kurbo::Affine::new(transform.to_cols_array()));
let alpha_blending = *instance.alpha_blending;
let blend_mode = alpha_blending.blend_mode.to_peniko();
let opacity = alpha_blending.opacity(render_params.for_mask);
let mut layer = false;
if opacity < 1. || alpha_blending.blend_mode != BlendMode::default() {
if let Some(bounds) = self.bounding_box(transform, false) {
let blending = peniko::BlendMode::new(blend_mode, peniko::Compose::SrcOver);
let rect = kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y);
scene.push_layer(blending, opacity, kurbo::Affine::IDENTITY, &rect);
layer = true;
}
}
let image = peniko::Image::new(image.to_flat_u8().0.into(), peniko::ImageFormat::Rgba8, image.width, image.height).with_extend(peniko::Extend::Repeat);
let image_transform = transform * *instance.transform * DAffine2::from_scale(1. / DVec2::new(image.width as f64, image.height as f64));
scene.draw_image(&image, kurbo::Affine::new(image_transform.to_cols_array()));
if layer {
scene.pop_layer();
}
}
}
@ -1008,6 +1079,8 @@ impl GraphicElementRendered for RasterDataTable<CPU> {
}
}
const LAZY_ARC_VEC_ZERO_U8: LazyLock<Arc<Vec<u8>>> = LazyLock::new(|| Arc::new(Vec::new()));
impl GraphicElementRendered for RasterDataTable<GPU> {
fn render_svg(&self, _render: &mut SvgRender, _render_params: &RenderParams) {
log::warn!("tried to render texture as an svg");
@ -1017,30 +1090,32 @@ impl GraphicElementRendered for RasterDataTable<GPU> {
fn render_to_vello(&self, scene: &mut Scene, transform: DAffine2, context: &mut RenderContext, _render_params: &RenderParams) {
use vello::peniko;
let mut render_stuff = |image: peniko::Image, instance_transform: DAffine2, blend_mode: AlphaBlending| {
let image_transform = transform * instance_transform * DAffine2::from_scale(1. / DVec2::new(image.width as f64, image.height as f64));
let layer = blend_mode != Default::default();
let Some(bounds) = self.bounding_box(transform, true) else { return };
let blending = peniko::BlendMode::new(blend_mode.blend_mode.to_peniko(), peniko::Compose::SrcOver);
if layer {
let rect = kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y);
scene.push_layer(blending, blend_mode.opacity, kurbo::Affine::IDENTITY, &rect);
for instance in self.instance_ref_iter() {
let blend_mode = *instance.alpha_blending;
let mut layer = false;
if blend_mode != Default::default() {
if let Some(bounds) = self.bounding_box(transform, true) {
let blending = peniko::BlendMode::new(blend_mode.blend_mode.to_peniko(), peniko::Compose::SrcOver);
let rect = kurbo::Rect::new(bounds[0].x, bounds[0].y, bounds[1].x, bounds[1].y);
scene.push_layer(blending, blend_mode.opacity, kurbo::Affine::IDENTITY, &rect);
layer = true;
}
}
let image = peniko::Image::new(
peniko::Blob::new(LAZY_ARC_VEC_ZERO_U8.deref().clone()),
peniko::ImageFormat::Rgba8,
instance.instance.data().width(),
instance.instance.data().height(),
)
.with_extend(peniko::Extend::Repeat);
let image_transform = transform * *instance.transform * DAffine2::from_scale(1. / DVec2::new(image.width as f64, image.height as f64));
scene.draw_image(&image, kurbo::Affine::new(image_transform.to_cols_array()));
context.resource_overrides.push((image, instance.instance.data().clone()));
if layer {
scene.pop_layer()
}
};
for instance in self.instance_ref_iter() {
let image = peniko::Image::new(vec![].into(), peniko::Format::Rgba8, instance.instance.data().width(), instance.instance.data().height()).with_extend(peniko::Extend::Repeat);
let id = image.data.id();
context.resource_overrides.insert(id, instance.instance.data_owned());
render_stuff(image, *instance.transform, *instance.alpha_blending);
}
}

View file

@ -1,5 +1,5 @@
use dyn_any::StaticType;
use glam::{DVec2, IVec2, UVec2};
use glam::{DAffine2, DVec2, IVec2, UVec2};
use graph_craft::document::value::RenderOutput;
use graph_craft::proto::{NodeConstructor, TypeErasedBox};
use graphene_core::raster::color::Color;
@ -52,6 +52,7 @@ fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeCons
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => String]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => IVec2]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => DVec2]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => DAffine2]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => bool]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => f64]),
async_node!(graphene_core::memo::MonitorNode<_, _, _>, input: Context, fn_params: [Context => u32]),
@ -166,7 +167,7 @@ fn node_registry() -> HashMap<ProtoNodeIdentifier, HashMap<NodeIOTypes, NodeCons
for (id, entry) in graphene_core::registry::NODE_REGISTRY.lock().unwrap().iter() {
for (constructor, types) in entry.iter() {
map.entry(id.clone().into()).or_default().insert(types.clone(), *constructor);
map.entry(id.clone()).or_default().insert(types.clone(), *constructor);
}
}

View file

@ -2,7 +2,7 @@
name = "node-macro"
publish = false
version = "0.0.0"
rust-version = "1.85"
rust-version = "1.88"
authors = ["Graphite Authors <contact@graphite.rs>"]
edition = "2024"
readme = "../../README.md"
@ -19,6 +19,7 @@ syn = { workspace = true }
proc-macro2 = { workspace = true }
quote = { workspace = true }
convert_case = { workspace = true }
strum = { workspace = true }
indoc = "2.0.5"
proc-macro-crate = "3.1.0"
@ -26,4 +27,3 @@ proc-macro-error2 = "2"
[dev-dependencies]
graphene-core = { workspace = true }

View file

@ -362,13 +362,15 @@ pub(crate) fn generate_node_code(parsed: &ParsedNodeFn) -> syn::Result<TokenStre
let properties = &attributes.properties_string.as_ref().map(|value| quote!(Some(#value))).unwrap_or(quote!(None));
let node_input_accessor = generate_node_input_references(parsed, fn_generics, &field_idents, &graphene_core, &identifier);
let cfg = crate::shader_nodes::modify_cfg(&attributes);
let node_input_accessor = generate_node_input_references(parsed, fn_generics, &field_idents, &graphene_core, &identifier, &cfg);
Ok(quote! {
/// Underlying implementation for [#struct_name]
#[inline]
#[allow(clippy::too_many_arguments)]
#vis #async_keyword fn #fn_name <'n, #(#fn_generics,)*> (#input_ident: #input_type #(, #field_idents: #field_types)*) -> #output_type #where_clause #body
#cfg
#[automatically_derived]
impl<'n, #(#fn_generics,)* #(#struct_generics,)* #(#future_idents,)*> #graphene_core::Node<'n, #input_type> for #mod_name::#struct_name<#(#struct_generics,)*>
#struct_where_clause
@ -376,16 +378,19 @@ pub(crate) fn generate_node_code(parsed: &ParsedNodeFn) -> syn::Result<TokenStre
#eval_impl
}
#cfg
const fn #identifier() -> #graphene_core::ProtoNodeIdentifier {
#graphene_core::ProtoNodeIdentifier::new(std::concat!(#identifier_path, "::", std::stringify!(#struct_name)))
}
#cfg
#[doc(inline)]
pub use #mod_name::#struct_name;
#[doc(hidden)]
#node_input_accessor
#cfg
#[doc(hidden)]
mod #mod_name {
use super::*;
@ -453,7 +458,14 @@ pub(crate) fn generate_node_code(parsed: &ParsedNodeFn) -> syn::Result<TokenStre
}
/// Generates strongly typed utilites to access inputs
fn generate_node_input_references(parsed: &ParsedNodeFn, fn_generics: &[crate::GenericParam], field_idents: &[&PatIdent], graphene_core: &TokenStream2, identifier: &Ident) -> TokenStream2 {
fn generate_node_input_references(
parsed: &ParsedNodeFn,
fn_generics: &[crate::GenericParam],
field_idents: &[&PatIdent],
graphene_core: &TokenStream2,
identifier: &Ident,
cfg: &TokenStream2,
) -> TokenStream2 {
let inputs_module_name = format_ident!("{}", parsed.struct_name.to_string().to_case(Case::Snake));
let mut generated_input_accessor = Vec::new();
@ -498,6 +510,7 @@ fn generate_node_input_references(parsed: &ParsedNodeFn, fn_generics: &[crate::G
}
quote! {
#cfg
pub mod #inputs_module_name {
use super::*;

View file

@ -111,13 +111,21 @@ fn derive_enum(enum_attributes: &[Attribute], name: Ident, input: syn::DataEnum)
})
.collect();
let crate_name = proc_macro_crate::crate_name("graphene-core")
.map_err(|e| syn::Error::new(Span::call_site(), format!("Failed to find location of graphene_core. Make sure it is imported as a dependency: {}", e)))?;
let crate_name = match crate_name {
proc_macro_crate::FoundCrate::Itself => quote!(crate),
proc_macro_crate::FoundCrate::Name(name) => {
let identifier = Ident::new(&name, Span::call_site());
quote! { #identifier }
let crate_name = {
let crate_name = proc_macro_crate::crate_name("graphene-core-shaders")
.or_else(|_e| proc_macro_crate::crate_name("graphene-core"))
.map_err(|e| {
syn::Error::new(
Span::call_site(),
format!("Failed to find location of 'graphene_core' or 'graphene-core-shaders'. Make sure it is imported as a dependency: {}", e),
)
})?;
match crate_name {
proc_macro_crate::FoundCrate::Itself => quote!(crate),
proc_macro_crate::FoundCrate::Name(name) => {
let identifier = Ident::new(&name, Span::call_site());
quote! { #identifier }
}
}
};
@ -140,19 +148,19 @@ fn derive_enum(enum_attributes: &[Attribute], name: Ident, input: syn::DataEnum)
let docstring = match &variant.basic_item.description {
Some(s) => {
let s = s.trim();
quote! { Some(::std::borrow::Cow::Borrowed(#s)) }
quote! { Some(#s) }
}
None => quote! { None },
};
let icon = match &variant.basic_item.icon {
Some(s) => quote! { Some(::std::borrow::Cow::Borrowed(#s)) },
Some(s) => quote! { Some(#s) },
None => quote! { None },
};
quote! {
(
#name::#vname, #crate_name::registry::VariantMetadata {
name: ::std::borrow::Cow::Borrowed(#vname_str),
label: ::std::borrow::Cow::Borrowed(#label),
#name::#vname, #crate_name::choice_type::VariantMetadata {
name: #vname_str,
label: #label,
docstring: #docstring,
icon: #icon,
}
@ -174,10 +182,10 @@ fn derive_enum(enum_attributes: &[Attribute], name: Ident, input: syn::DataEnum)
}
}
impl #crate_name::registry::ChoiceTypeStatic for #name {
const WIDGET_HINT: #crate_name::registry::ChoiceWidgetHint = #crate_name::registry::ChoiceWidgetHint::#widget_hint;
impl #crate_name::choice_type::ChoiceTypeStatic for #name {
const WIDGET_HINT: #crate_name::choice_type::ChoiceWidgetHint = #crate_name::choice_type::ChoiceWidgetHint::#widget_hint;
const DESCRIPTION: Option<&'static str> = #enum_description;
fn list() -> &'static [&'static [(Self, #crate_name::registry::VariantMetadata)]] {
fn list() -> &'static [&'static [(Self, #crate_name::choice_type::VariantMetadata)]] {
&[ #(#group)* ]
}
}

View file

@ -5,6 +5,7 @@ use syn::GenericParam;
mod codegen;
mod derive_choice_type;
mod parsing;
mod shader_nodes;
mod validation;
/// Used to create a node definition.

View file

@ -12,6 +12,7 @@ use syn::{
};
use crate::codegen::generate_node_code;
use crate::shader_nodes::ShaderNodeType;
#[derive(Debug)]
pub(crate) struct Implementation {
@ -45,6 +46,10 @@ pub(crate) struct NodeFnAttributes {
pub(crate) path: Option<Path>,
pub(crate) skip_impl: bool,
pub(crate) properties_string: Option<LitStr>,
/// whether to `#[cfg]` gate the node implementation, defaults to None
pub(crate) cfg: Option<TokenStream2>,
/// if this node should get a gpu implementation, defaults to None
pub(crate) shader_node: Option<ShaderNodeType>,
// Add more attributes as needed
}
@ -184,6 +189,8 @@ impl Parse for NodeFnAttributes {
let mut path = None;
let mut skip_impl = false;
let mut properties_string = None;
let mut cfg = None;
let mut shader_node = None;
let content = input;
// let content;
@ -191,8 +198,10 @@ impl Parse for NodeFnAttributes {
let nested = content.call(Punctuated::<Meta, Comma>::parse_terminated)?;
for meta in nested {
match meta {
Meta::List(meta) if meta.path.is_ident("category") => {
let name = meta.path().get_ident().ok_or_else(|| Error::new_spanned(meta.path(), "Node macro expects a known Ident, not a path"))?;
match name.to_string().as_str() {
"category" => {
let meta = meta.require_list()?;
if category.is_some() {
return Err(Error::new_spanned(meta, "Multiple 'category' attributes are not allowed"));
}
@ -201,14 +210,16 @@ impl Parse for NodeFnAttributes {
.map_err(|_| Error::new_spanned(meta, "Expected a string literal for 'category', e.g., category(\"Value\")"))?;
category = Some(lit);
}
Meta::List(meta) if meta.path.is_ident("name") => {
"name" => {
let meta = meta.require_list()?;
if display_name.is_some() {
return Err(Error::new_spanned(meta, "Multiple 'name' attributes are not allowed"));
}
let parsed_name: LitStr = meta.parse_args().map_err(|_| Error::new_spanned(meta, "Expected a string for 'name', e.g., name(\"Memoize\")"))?;
display_name = Some(parsed_name);
}
Meta::List(meta) if meta.path.is_ident("path") => {
"path" => {
let meta = meta.require_list()?;
if path.is_some() {
return Err(Error::new_spanned(meta, "Multiple 'path' attributes are not allowed"));
}
@ -217,13 +228,15 @@ impl Parse for NodeFnAttributes {
.map_err(|_| Error::new_spanned(meta, "Expected a valid path for 'path', e.g., path(crate::MemoizeNode)"))?;
path = Some(parsed_path);
}
Meta::Path(path) if path.is_ident("skip_impl") => {
"skip_impl" => {
let path = meta.require_path_only()?;
if skip_impl {
return Err(Error::new_spanned(path, "Multiple 'skip_impl' attributes are not allowed"));
}
skip_impl = true;
}
Meta::List(meta) if meta.path.is_ident("properties") => {
"properties" => {
let meta = meta.require_list()?;
if properties_string.is_some() {
return Err(Error::new_spanned(path, "Multiple 'properties_string' attributes are not allowed"));
}
@ -233,13 +246,27 @@ impl Parse for NodeFnAttributes {
properties_string = Some(parsed_properties_string);
}
"cfg" => {
if cfg.is_some() {
return Err(Error::new_spanned(path, "Multiple 'feature' attributes are not allowed"));
}
let meta = meta.require_list()?;
cfg = Some(meta.tokens.clone());
}
"shader_node" => {
if shader_node.is_some() {
return Err(Error::new_spanned(path, "Multiple 'feature' attributes are not allowed"));
}
let meta = meta.require_list()?;
shader_node = Some(syn::parse2(meta.tokens.to_token_stream())?);
}
_ => {
return Err(Error::new_spanned(
meta,
indoc!(
r#"
Unsupported attribute in `node`.
Supported attributes are 'category', 'path' and 'name'.
Supported attributes are 'category', 'path' 'name', 'skip_impl', 'cfg' and 'properties'.
Example usage:
#[node_macro::node(category("Value"), name("Test Node"))]
@ -256,6 +283,8 @@ impl Parse for NodeFnAttributes {
path,
skip_impl,
properties_string,
cfg,
shader_node,
})
}
}
@ -758,6 +787,8 @@ mod tests {
path: Some(parse_quote!(graphene_core::TestNode)),
skip_impl: true,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("add", Span::call_site()),
struct_name: Ident::new("Add", Span::call_site()),
@ -819,6 +850,8 @@ mod tests {
path: None,
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("transform", Span::call_site()),
struct_name: Ident::new("Transform", Span::call_site()),
@ -891,6 +924,8 @@ mod tests {
path: None,
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("circle", Span::call_site()),
struct_name: Ident::new("Circle", Span::call_site()),
@ -948,6 +983,8 @@ mod tests {
path: None,
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("levels", Span::call_site()),
struct_name: Ident::new("Levels", Span::call_site()),
@ -1017,6 +1054,8 @@ mod tests {
path: Some(parse_quote!(graphene_core::TestNode)),
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("add", Span::call_site()),
struct_name: Ident::new("Add", Span::call_site()),
@ -1074,6 +1113,8 @@ mod tests {
path: None,
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("load_image", Span::call_site()),
struct_name: Ident::new("LoadImage", Span::call_site()),
@ -1131,6 +1172,8 @@ mod tests {
path: None,
skip_impl: false,
properties_string: None,
cfg: None,
shader_node: None,
},
fn_name: Ident::new("custom_node", Span::call_site()),
struct_name: Ident::new("CustomNode", Span::call_site()),

View file

@ -0,0 +1,32 @@
use crate::parsing::NodeFnAttributes;
use proc_macro2::{Ident, TokenStream};
use quote::quote;
use strum::{EnumString, VariantNames};
use syn::Error;
use syn::parse::{Parse, ParseStream};
pub const STD_FEATURE_GATE: &str = "std";
pub fn modify_cfg(attributes: &NodeFnAttributes) -> TokenStream {
match (&attributes.cfg, &attributes.shader_node) {
(Some(cfg), Some(_)) => quote!(#[cfg(all(#cfg, feature = #STD_FEATURE_GATE))]),
(Some(cfg), None) => quote!(#[cfg(#cfg)]),
(None, Some(_)) => quote!(#[cfg(feature = #STD_FEATURE_GATE)]),
(None, None) => quote!(),
}
}
#[derive(Debug, EnumString, VariantNames)]
pub(crate) enum ShaderNodeType {
PerPixelAdjust,
}
impl Parse for ShaderNodeType {
fn parse(input: ParseStream) -> syn::Result<Self> {
let ident: Ident = input.parse()?;
Ok(match ident.to_string().as_str() {
"PerPixelAdjust" => ShaderNodeType::PerPixelAdjust,
_ => return Err(Error::new_spanned(&ident, format!("attr 'shader_node' must be one of {:?}", Self::VARIANTS))),
})
}
}

View file

@ -25,3 +25,4 @@ futures = { workspace = true }
web-sys = { workspace = true }
winit = { workspace = true }
vello = { workspace = true }
bytemuck = { workspace = true }

View file

@ -16,7 +16,7 @@ impl Context {
backends: wgpu::Backends::all(),
..Default::default()
};
let instance = Instance::new(instance_descriptor);
let instance = Instance::new(&instance_descriptor);
let adapter_options = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
@ -24,34 +24,30 @@ impl Context {
force_fallback_adapter: false,
};
// `request_adapter` instantiates the general connection to the GPU
let adapter = instance.request_adapter(&adapter_options).await?;
let adapter = instance.request_adapter(&adapter_options).await.ok()?;
let required_limits = adapter.limits();
// `request_device` instantiates the feature specific connection to the GPU, defining some parameters,
// `features` being the available features.
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
// #[cfg(not(feature = "passthrough"))]
required_features: wgpu::Features::empty(),
// Currently disabled because not all backend support passthrough.
// TODO: reenable only when vulkan adapter is available
// #[cfg(feature = "passthrough")]
// required_features: wgpu::Features::SPIRV_SHADER_PASSTHROUGH,
required_limits,
memory_hints: Default::default(),
},
None,
)
.request_device(&wgpu::DeviceDescriptor {
label: None,
// #[cfg(not(feature = "passthrough"))]
#[cfg(target_arch = "wasm32")]
required_features: wgpu::Features::empty(),
#[cfg(not(target_arch = "wasm32"))]
required_features: wgpu::Features::PUSH_CONSTANTS,
// Currently disabled because not all backend support passthrough.
// TODO: reenable only when vulkan adapter is available
// #[cfg(feature = "passthrough")]
// required_features: wgpu::Features::SPIRV_SHADER_PASSTHROUGH,
required_limits,
memory_hints: Default::default(),
trace: wgpu::Trace::Off,
})
.await
.unwrap();
.ok()?;
let info = adapter.get_info();
// skip this on LavaPipe temporarily
if info.vendor == 0x10005 {
return None;
}
Some(Self {
device: Arc::new(device),
queue: Arc::new(queue),

View file

@ -1,20 +1,23 @@
mod context;
pub mod texture_upload;
use anyhow::Result;
pub use context::Context;
use dyn_any::StaticType;
use futures::lock::Mutex;
use glam::UVec2;
use graphene_application_io::{ApplicationIo, EditorApi, SurfaceHandle};
use graphene_application_io::{ApplicationIo, EditorApi, SurfaceHandle, SurfaceId};
use graphene_core::{Color, Ctx};
pub use graphene_svg_renderer::RenderContext;
use std::sync::Arc;
use vello::{AaConfig, AaSupport, RenderParams, Renderer, RendererOptions, Scene};
use wgpu::util::TextureBlitter;
use wgpu::{Origin3d, SurfaceConfiguration, TextureAspect};
#[derive(dyn_any::DynAny)]
pub struct WgpuExecutor {
pub context: Context,
vello_renderer: futures::lock::Mutex<Renderer>,
vello_renderer: Mutex<Renderer>,
}
impl std::fmt::Debug for WgpuExecutor {
@ -32,16 +35,17 @@ impl<'a, T: ApplicationIo<Executor = WgpuExecutor>> From<&'a EditorApi<T>> for &
pub type WgpuSurface = Arc<SurfaceHandle<Surface>>;
pub type WgpuWindow = Arc<SurfaceHandle<WindowHandle>>;
impl graphene_application_io::Size for Surface {
fn size(&self) -> UVec2 {
self.resolution
}
}
pub struct Surface {
pub inner: wgpu::Surface<'static>,
resolution: UVec2,
pub target_texture: Mutex<Option<TargetTexture>>,
pub blitter: TextureBlitter,
}
pub struct TargetTexture {
view: wgpu::TextureView,
size: UVec2,
}
#[cfg(target_arch = "wasm32")]
pub type Window = web_sys::HtmlCanvasElement;
#[cfg(not(target_arch = "wasm32"))]
@ -51,78 +55,159 @@ unsafe impl StaticType for Surface {
type Static = Surface;
}
const VELLO_SURFACE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;
impl WgpuExecutor {
pub async fn render_vello_scene(&self, scene: &Scene, surface: &WgpuSurface, width: u32, height: u32, context: &RenderContext, background: Color) -> Result<()> {
let surface = &surface.surface.inner;
let surface_caps = surface.get_capabilities(&self.context.adapter);
surface.configure(
pub async fn render_vello_scene(&self, scene: &Scene, surface: &WgpuSurface, size: UVec2, context: &RenderContext, background: Color) -> Result<()> {
let mut guard = surface.surface.target_texture.lock().await;
let target_texture = if let Some(target_texture) = &*guard
&& target_texture.size == size
{
target_texture
} else {
let texture = self.context.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
width: size.x,
height: size.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING,
format: VELLO_SURFACE_FORMAT,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
*guard = Some(TargetTexture { size, view });
guard.as_ref().unwrap()
};
let surface_inner = &surface.surface.inner;
let surface_caps = surface_inner.get_capabilities(&self.context.adapter);
surface_inner.configure(
&self.context.device,
&SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::STORAGE_BINDING,
format: wgpu::TextureFormat::Rgba8Unorm,
width,
height,
format: VELLO_SURFACE_FORMAT,
width: size.x,
height: size.y,
present_mode: surface_caps.present_modes[0],
alpha_mode: wgpu::CompositeAlphaMode::Opaque,
view_formats: vec![],
desired_maximum_frame_latency: 2,
},
);
let surface_texture = surface.get_current_texture()?;
let [r, g, b, _] = background.to_rgba8_srgb();
let render_params = RenderParams {
// We are using an explicit opaque color here to eliminate the alpha premultiplication step
// which would be required to support a transparent webgpu canvas
base_color: vello::peniko::Color::from_rgba8(r, g, b, 0xff),
width,
height,
width: size.x,
height: size.y,
antialiasing_method: AaConfig::Msaa16,
};
{
let mut renderer = self.vello_renderer.lock().await;
for (id, texture) in context.resource_overrides.iter() {
let texture_view = wgpu::ImageCopyTextureBase {
for (image, texture) in context.resource_overrides.iter() {
let texture_view = wgpu::TexelCopyTextureInfoBase {
texture: texture.clone(),
mip_level: 0,
origin: Origin3d::ZERO,
aspect: TextureAspect::All,
};
renderer.override_image(
&vello::peniko::Image::new(vello::peniko::Blob::from_raw_parts(Arc::new(vec![]), *id), vello::peniko::Format::Rgba8, 0, 0),
Some(texture_view),
);
renderer.override_image(image, Some(texture_view));
}
renderer.render_to_texture(&self.context.device, &self.context.queue, scene, &target_texture.view, &render_params)?;
for (image, _) in context.resource_overrides.iter() {
renderer.override_image(image, None);
}
renderer.render_to_surface(&self.context.device, &self.context.queue, scene, &surface_texture, &render_params).unwrap();
}
let surface_texture = surface_inner.get_current_texture()?;
let mut encoder = self.context.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Surface Blit") });
surface.surface.blitter.copy(
&self.context.device,
&mut encoder,
&target_texture.view,
&surface_texture.texture.create_view(&wgpu::TextureViewDescriptor::default()),
);
self.context.queue.submit([encoder.finish()]);
surface_texture.present();
Ok(())
}
pub async fn render_vello_scene_to_texture(&self, scene: &Scene, size: UVec2, context: &RenderContext, background: Color) -> Result<wgpu::Texture> {
let texture = self.context.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
width: size.x.max(1),
height: size.y.max(1),
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING,
format: VELLO_SURFACE_FORMAT,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let [r, g, b, _] = background.to_rgba8_srgb();
let render_params = RenderParams {
// We are using an explicit opaque color here to eliminate the alpha premultiplication step
// which would be required to support a transparent webgpu canvas
base_color: vello::peniko::Color::from_rgba8(r, g, b, 0xff),
width: size.x,
height: size.y,
antialiasing_method: AaConfig::Msaa16,
};
{
let mut renderer = self.vello_renderer.lock().await;
for (image, texture) in context.resource_overrides.iter() {
let texture_view = wgpu::TexelCopyTextureInfoBase {
texture: texture.clone(),
mip_level: 0,
origin: Origin3d::ZERO,
aspect: TextureAspect::All,
};
renderer.override_image(image, Some(texture_view));
}
renderer.render_to_texture(&self.context.device, &self.context.queue, scene, &view, &render_params)?;
for (image, _) in context.resource_overrides.iter() {
renderer.override_image(image, None);
}
}
Ok(texture)
}
#[cfg(target_arch = "wasm32")]
pub fn create_surface(&self, canvas: graphene_application_io::WasmSurfaceHandle) -> Result<SurfaceHandle<Surface>> {
let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Canvas(canvas.surface))?;
Ok(SurfaceHandle {
window_id: canvas.window_id,
surface: Surface {
inner: surface,
resolution: UVec2::ZERO,
},
})
self.create_surface_inner(surface, canvas.window_id)
}
#[cfg(not(target_arch = "wasm32"))]
pub fn create_surface(&self, window: SurfaceHandle<Window>) -> Result<SurfaceHandle<Surface>> {
let size = window.surface.inner_size();
let resolution = UVec2::new(size.width, size.height);
let surface = self.context.instance.create_surface(wgpu::SurfaceTarget::Window(Box::new(window.surface)))?;
self.create_surface_inner(surface, window.window_id)
}
pub fn create_surface_inner(&self, surface: wgpu::Surface<'static>, window_id: SurfaceId) -> Result<SurfaceHandle<Surface>> {
let blitter = TextureBlitter::new(&self.context.device, VELLO_SURFACE_FORMAT);
Ok(SurfaceHandle {
window_id: window.window_id,
surface: Surface { inner: surface, resolution },
window_id,
surface: Surface {
inner: surface,
target_texture: Mutex::new(None),
blitter,
},
})
}
}
@ -134,7 +219,26 @@ impl WgpuExecutor {
let vello_renderer = Renderer::new(
&context.device,
RendererOptions {
surface_format: Some(wgpu::TextureFormat::Rgba8Unorm),
// surface_format: Some(wgpu::TextureFormat::Rgba8Unorm),
pipeline_cache: None,
use_cpu: false,
antialiasing_support: AaSupport::all(),
num_init_threads: std::num::NonZeroUsize::new(1),
},
)
.map_err(|e| anyhow::anyhow!("Failed to create Vello renderer: {:?}", e))
.ok()?;
Some(Self {
context,
vello_renderer: vello_renderer.into(),
})
}
pub fn with_context(context: Context) -> Option<Self> {
let vello_renderer = Renderer::new(
&context.device,
RendererOptions {
pipeline_cache: None,
use_cpu: false,
antialiasing_support: AaSupport::all(),
num_init_threads: std::num::NonZeroUsize::new(1),

View file

@ -0,0 +1,51 @@
use crate::WgpuExecutor;
use graphene_core::color::SRGBA8;
use graphene_core::instances::Instance;
use graphene_core::raster_types::{CPU, GPU, Raster, RasterDataTable};
use graphene_core::{Ctx, ExtractFootprint};
use wgpu::util::{DeviceExt, TextureDataOrder};
use wgpu::{Extent3d, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages};
#[node_macro::node(category(""))]
pub async fn upload_texture<'a: 'n>(_: impl ExtractFootprint + Ctx, input: RasterDataTable<CPU>, executor: &'a WgpuExecutor) -> RasterDataTable<GPU> {
let device = &executor.context.device;
let queue = &executor.context.queue;
let instances = input
.instance_ref_iter()
.map(|instance| {
let image = instance.instance;
let rgba8_data: Vec<SRGBA8> = image.data.iter().map(|x| (*x).into()).collect();
let texture = device.create_texture_with_data(
queue,
&TextureDescriptor {
label: Some("upload_texture node texture"),
size: Extent3d {
width: image.width,
height: image.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: TextureDimension::D2,
format: TextureFormat::Rgba8UnormSrgb,
// I don't know what usages are actually necessary
usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST | TextureUsages::COPY_SRC,
view_formats: &[],
},
TextureDataOrder::LayerMajor,
bytemuck::cast_slice(rgba8_data.as_slice()),
);
Instance {
instance: Raster::new_gpu(texture.into()),
transform: *instance.transform,
alpha_blending: *instance.alpha_blending,
source_node_id: *instance.source_node_id,
}
})
.collect();
queue.submit([]);
instances
}