Include graph runtime benchmarks in CI perf regression runs (#2780)
Some checks are pending
Editor: Dev & CI / build (push) Waiting to run
Editor: Dev & CI / cargo-deny (push) Waiting to run

* Include graph runtime benchmarks in ci regression run

* Update benchmarking workflow

* Add render slowdown

* Fix baseline cache

* Remove benchmark compilation validation run

* Include render node in runtime benchmarks

* Collapse sections without changes

* Readd rulers between the sections

* Add review suggestions

* Rulers rule

* Fix whitespace
This commit is contained in:
Dennis Kobert 2025-08-07 14:32:19 +02:00 committed by GitHub
parent b491cfcd2b
commit d2ddf94bd0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 176 additions and 87 deletions

View file

@ -10,31 +10,33 @@ jobs:
profile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
uses: dtolnay/rust-toolchain@stable
- name: Install Valgrind
run: |
sudo apt update
sudo apt install -y valgrind
- name: Cache dependencies
uses: actions/cache@v3
- name: Cache Rust dependencies
uses: Swatinem/rust-cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
# Cache on Cargo.lock file
cache-on-failure: true
- name: Cache iai-callgrind binary
id: cache-iai
uses: actions/cache@v4
with:
path: ~/.cargo/bin/iai-callgrind-runner
key: ${{ runner.os }}-iai-callgrind-runner-0.12.3
- name: Install iai-callgrind
if: steps.cache-iai.outputs.cache-hit != 'true'
run: |
cargo install iai-callgrind-runner@0.12.3
@ -43,10 +45,30 @@ jobs:
git fetch origin master:master
git checkout master
- name: Get master commit SHA
id: master-sha
run: echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
- name: Cache benchmark baselines
id: cache-benchmark-baselines
uses: actions/cache@v4
with:
path: target/iai
key: ${{ runner.os }}-benchmark-baselines-master-${{ steps.master-sha.outputs.sha }}
restore-keys: |
${{ runner.os }}-benchmark-baselines-master-
- name: Run baseline benchmarks
if: steps.cache-benchmark-baselines.outputs.cache-hit != 'true'
run: |
# Compile benchmarks
cargo bench --bench compile_demo_art_iai -- --save-baseline=master
# Runtime benchmarks
cargo bench --bench update_executor_iai -- --save-baseline=master
cargo bench --bench run_once_iai -- --save-baseline=master
cargo bench --bench run_cached_iai -- --save-baseline=master
- name: Checkout PR branch
run: |
git checkout ${{ github.event.pull_request.head.sha }}
@ -54,13 +76,33 @@ jobs:
- name: Run PR benchmarks
id: benchmark
run: |
BENCH_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
echo "BENCHMARK_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$BENCH_OUTPUT" >> $GITHUB_OUTPUT
# Compile benchmarks
COMPILE_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
# Runtime benchmarks
UPDATE_OUTPUT=$(cargo bench --bench update_executor_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_ONCE_OUTPUT=$(cargo bench --bench run_once_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_CACHED_OUTPUT=$(cargo bench --bench run_cached_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
# Store outputs
echo "COMPILE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$COMPILE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "UPDATE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$UPDATE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "RUN_ONCE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_ONCE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "RUN_CACHED_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_CACHED_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Make old comments collapsed by default
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
@ -85,11 +127,15 @@ jobs:
}
- name: Comment PR
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const benchmarkOutput = JSON.parse(`${{ steps.benchmark.outputs.BENCHMARK_OUTPUT }}`);
const compileOutput = JSON.parse(`${{ steps.benchmark.outputs.COMPILE_OUTPUT }}`);
const updateOutput = JSON.parse(`${{ steps.benchmark.outputs.UPDATE_OUTPUT }}`);
const runOnceOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_ONCE_OUTPUT }}`);
const runCachedOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_CACHED_OUTPUT }}`);
let significantChanges = false;
let commentBody = "";
@ -110,58 +156,97 @@ jobs:
return str.padStart(len);
}
for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;
function processBenchmarkOutput(benchmarkOutput, sectionTitle, isLast = false) {
let sectionBody = "";
let hasResults = false;
let hasSignificantChanges = false;
if (irDiff.diff_pct !== null) {
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";
for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;
commentBody += "---\n\n";
commentBody += `${benchmark.module_path} ${benchmark.id}:${benchmark.details}\n`;
commentBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) -> \`${formatNumber(irDiff.new)}\` (HEAD) : `;
commentBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;
if (irDiff.diff_pct !== null) {
hasResults = true;
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";
commentBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
commentBody += `Baselines: master| HEAD\n`;
sectionBody += `**${benchmark.module_path} ${benchmark.id}:${benchmark.details}**\n`;
sectionBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) → \`${formatNumber(irDiff.new)}\` (HEAD) : `;
sectionBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;
for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
commentBody += `${line}\n`;
sectionBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
sectionBody += `Baselines: master| HEAD\n`;
for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
sectionBody += `${line}\n`;
}
}
}
commentBody += "```\n</details>\n\n";
sectionBody += "```\n</details>\n\n";
if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
hasSignificantChanges = true;
}
}
}
}
if (hasResults) {
// Wrap section in collapsible details, open only if there are significant changes
const openAttribute = hasSignificantChanges ? " open" : "";
const ruler = isLast ? "" : "\n\n---";
return `<details${openAttribute}>\n<summary><h2>${sectionTitle}</h2></summary>\n\n${sectionBody}${ruler}\n</details>`;
}
return "";
}
const output = `
<details open>
// Process each benchmark category
const sections = [
{ output: compileOutput, title: "🔧 Graph Compilation" },
{ output: updateOutput, title: "🔄 Executor Update" },
{ output: runOnceOutput, title: "🚀 Render: Cold Execution" },
{ output: runCachedOutput, title: "⚡ Render: Cached Execution" }
];
<summary>Performance Benchmark Results</summary>
// Generate sections and determine which ones have results
const generatedSections = sections.map(({ output, title }) =>
processBenchmarkOutput(output, title, true) // temporarily mark all as last
).filter(section => section.length > 0);
${commentBody}
// Re-generate with correct isLast flags
let sectionIndex = 0;
const finalSections = sections.map(({ output, title }) => {
const section = processBenchmarkOutput(output, title, true); // check if it has results
if (section.length > 0) {
const isLast = sectionIndex === generatedSections.length - 1;
sectionIndex++;
return processBenchmarkOutput(output, title, isLast);
}
return "";
}).filter(section => section.length > 0);
</details>
`;
// Combine all sections
commentBody = finalSections.join("\n\n");
if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
if (commentBody.length > 0) {
const output = `<details open>\n<summary>Performance Benchmark Results</summary>\n\n${commentBody}\n</details>`;
if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
}
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
console.log("No benchmark results to display.");
}

View file

@ -16,7 +16,6 @@ use graphene_svg_renderer::{Render, RenderParams, RenderSvgSegmentList, SvgRende
#[cfg(target_family = "wasm")]
use base64::Engine;
use glam::DAffine2;
use std::sync::Arc;
#[cfg(target_family = "wasm")]
use wasm_bindgen::JsCast;
@ -196,7 +195,7 @@ async fn render_canvas(render_config: RenderConfig, data: impl Render, editor: &
let frame = SurfaceFrame {
surface_id: surface_handle.window_id,
resolution: render_config.viewport.resolution,
transform: DAffine2::IDENTITY,
transform: glam::DAffine2::IDENTITY,
};
RenderOutputType::CanvasFrame(frame)
@ -244,7 +243,7 @@ where
};
for row in data.iter_mut() {
*row.transform = DAffine2::from_translation(-aabb.start) * *row.transform;
*row.transform = glam::DAffine2::from_translation(-aabb.start) * *row.transform;
}
data.render_svg(&mut render, &render_params);
render.format_svg(glam::DVec2::ZERO, size);

View file

@ -3,10 +3,14 @@ use criterion::measurement::Measurement;
use futures::executor::block_on;
use graph_craft::proto::ProtoNetwork;
use graph_craft::util::{DEMO_ART, compile, load_from_name};
use graphene_std::application_io::EditorApi;
use interpreted_executor::dynamic_executor::DynamicExecutor;
use interpreted_executor::util::wrap_network_in_scope;
pub fn setup_network(name: &str) -> (DynamicExecutor, ProtoNetwork) {
let network = load_from_name(name);
let editor_api = std::sync::Arc::new(EditorApi::default());
let network = wrap_network_in_scope(network, editor_api);
let proto_network = compile(network);
let executor = block_on(DynamicExecutor::new(proto_network.clone())).unwrap();
(executor, proto_network)

View file

@ -2,15 +2,15 @@ mod benchmark_util;
use benchmark_util::{bench_for_each_demo, setup_network};
use criterion::{Criterion, criterion_group, criterion_main};
use graphene_std::Context;
use graphene_std::application_io::RenderConfig;
fn subsequent_evaluations(c: &mut Criterion) {
let mut group = c.benchmark_group("Subsequent Evaluations");
let context: Context = None;
let context = RenderConfig::default();
bench_for_each_demo(&mut group, |name, g| {
let (executor, _) = setup_network(name);
g.bench_function(name, |b| {
b.iter(|| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), criterion::black_box(context.clone()))).unwrap())
b.iter(|| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), criterion::black_box(context))).unwrap())
});
});
group.finish();

View file

@ -1,16 +1,16 @@
use graph_craft::util::*;
use graphene_std::Context;
mod benchmark_util;
use benchmark_util::setup_network;
use graphene_std::application_io::RenderConfig;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_run_cached(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
let executor = futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap();
let (executor, _) = setup_network(name);
// Warm up the cache by running once
let context: Context = None;
let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context.clone()));
let context = RenderConfig::default();
let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context));
executor
}
@ -18,7 +18,7 @@ fn setup_run_cached(name: &str) -> DynamicExecutor {
#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_cached)]
pub fn run_cached(executor: DynamicExecutor) {
let context: Context = None;
let context = RenderConfig::default();
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}

View file

@ -2,16 +2,16 @@ mod benchmark_util;
use benchmark_util::{bench_for_each_demo, setup_network};
use criterion::{Criterion, criterion_group, criterion_main};
use graphene_std::Context;
use graphene_std::application_io::RenderConfig;
fn run_once(c: &mut Criterion) {
let mut group = c.benchmark_group("Run Once");
let context: Context = None;
let context = RenderConfig::default();
bench_for_each_demo(&mut group, |name, g| {
g.bench_function(name, |b| {
b.iter_batched(
|| setup_network(name),
|(executor, _)| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), criterion::black_box(context.clone()))).unwrap(),
|(executor, _)| futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), criterion::black_box(context))).unwrap(),
criterion::BatchSize::SmallInput,
)
});

View file

@ -1,18 +1,19 @@
use graph_craft::util::*;
use graphene_std::Context;
mod benchmark_util;
use benchmark_util::setup_network;
use graphene_std::application_io;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_run_once(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap()
let (executor, _) = setup_network(name);
executor
}
#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_once)]
pub fn run_once(executor: DynamicExecutor) {
let context: Context = None;
let context = application_io::RenderConfig::default();
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}

View file

@ -1,11 +1,12 @@
mod benchmark_util;
use benchmark_util::setup_network;
use graph_craft::proto::ProtoNetwork;
use graph_craft::util::*;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_update_executor(name: &str) -> (DynamicExecutor, ProtoNetwork) {
let network = load_from_name(name);
let proto_network = compile(network);
let (_, proto_network) = setup_network(name);
let empty = ProtoNetwork::default();
let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap();
(executor, proto_network)

View file

@ -8,7 +8,6 @@ use graphene_std::Context;
use graphene_std::uuid::NodeId;
use std::sync::Arc;
// TODO: this is copy pasta from the editor (and does get out of sync)
pub fn wrap_network_in_scope(mut network: NodeNetwork, editor_api: Arc<WasmEditorApi>) -> NodeNetwork {
network.generate_node_paths(&[]);