Include graph runtime benchmarks in ci regression run

This commit is contained in:
Dennis Kobert 2025-06-28 09:02:14 +02:00
parent 532e913017
commit 81a87cf4de
No known key found for this signature in database
GPG key ID: 5A4358CB9530F933
6 changed files with 190 additions and 46 deletions

View file

@ -45,7 +45,15 @@ jobs:
- name: Run baseline benchmarks
run: |
# Compile benchmarks
cargo bench --bench compile_demo_art_iai -- --save-baseline=master
# Runtime benchmarks
cd node-graph/interpreted-executor
cargo bench --bench update_executor_iai -- --save-baseline=master
cargo bench --bench run_once_iai -- --save-baseline=master
cargo bench --bench run_cached_iai -- --save-baseline=master
cd ../..
- name: Checkout PR branch
run: |
@ -54,9 +62,31 @@ jobs:
- name: Run PR benchmarks
id: benchmark
run: |
BENCH_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
echo "BENCHMARK_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$BENCH_OUTPUT" >> $GITHUB_OUTPUT
# Compile benchmarks
COMPILE_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
# Runtime benchmarks
cd node-graph/interpreted-executor
UPDATE_OUTPUT=$(cargo bench --bench update_executor_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_ONCE_OUTPUT=$(cargo bench --bench run_once_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_CACHED_OUTPUT=$(cargo bench --bench run_cached_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
cd ../..
# Store outputs
echo "COMPILE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$COMPILE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "UPDATE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$UPDATE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "RUN_ONCE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_ONCE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "RUN_CACHED_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_CACHED_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Make old comments collapsed by default
@ -89,7 +119,11 @@ jobs:
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const benchmarkOutput = JSON.parse(`${{ steps.benchmark.outputs.BENCHMARK_OUTPUT }}`);
const compileOutput = JSON.parse(`${{ steps.benchmark.outputs.COMPILE_OUTPUT }}`);
const updateOutput = JSON.parse(`${{ steps.benchmark.outputs.UPDATE_OUTPUT }}`);
const runOnceOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_ONCE_OUTPUT }}`);
const runCachedOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_CACHED_OUTPUT }}`);
let significantChanges = false;
let commentBody = "";
@ -110,58 +144,83 @@ jobs:
return str.padStart(len);
}
for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;
if (irDiff.diff_pct !== null) {
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";
function processBenchmarkOutput(benchmarkOutput, sectionTitle) {
let sectionBody = "";
let hasResults = false;
for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;
commentBody += "---\n\n";
commentBody += `${benchmark.module_path} ${benchmark.id}:${benchmark.details}\n`;
commentBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) -> \`${formatNumber(irDiff.new)}\` (HEAD) : `;
commentBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;
commentBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
commentBody += `Baselines: master| HEAD\n`;
for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
commentBody += `${line}\n`;
if (irDiff.diff_pct !== null) {
hasResults = true;
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";
sectionBody += `**${benchmark.module_path} ${benchmark.id}:${benchmark.details}**\n`;
sectionBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) → \`${formatNumber(irDiff.new)}\` (HEAD) : `;
sectionBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;
sectionBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
sectionBody += `Baselines: master| HEAD\n`;
for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
sectionBody += `${line}\n`;
}
}
sectionBody += "```\n</details>\n\n";
if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
}
}
commentBody += "```\n</details>\n\n";
if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
}
}
}
if (hasResults) {
return `## ${sectionTitle}\n\n${sectionBody}`;
}
return "";
}
const output = `
<details open>
// Process each benchmark category
const compileSection = processBenchmarkOutput(compileOutput, "🔧 Compile Time");
const updateSection = processBenchmarkOutput(updateOutput, "🔄 Executor Update");
const runOnceSection = processBenchmarkOutput(runOnceOutput, "🚀 Cold Execution");
const runCachedSection = processBenchmarkOutput(runCachedOutput, "⚡ Cached Execution");
<summary>Performance Benchmark Results</summary>
// Combine all sections
commentBody = [compileSection, updateSection, runOnceSection, runCachedSection]
.filter(section => section.length > 0)
.join("---\n\n");
${commentBody}
if (commentBody.length > 0) {
const output = `
<details open>
</details>
`;
<summary>Performance Benchmark Results</summary>
if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
${commentBody}
</details>
`;
if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
}
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
console.log("No benchmark results to display.");
}

1
Cargo.lock generated
View file

@ -2991,6 +2991,7 @@ dependencies = [
"graphene-core",
"graphene-path-bool",
"graphene-std",
"iai-callgrind",
"log",
"once_cell",
"serde",

View file

@ -28,6 +28,7 @@ serde = { workspace = true }
# Workspace dependencies
graph-craft = { workspace = true, features = ["loading"] }
criterion = { workspace = true }
iai-callgrind = { workspace = true }
# Benchmarks
[[bench]]
@ -42,3 +43,15 @@ harness = false
name = "run_cached"
harness = false
[[bench]]
name = "update_executor_iai"
harness = false
[[bench]]
name = "run_once_iai"
harness = false
[[bench]]
name = "run_cached_iai"
harness = false

View file

@ -0,0 +1,27 @@
use graph_craft::util::*;
use graphene_std::Context;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_run_cached(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
let executor = futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap();
// Warm up the cache by running once
let context: Context = None;
let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context.clone()));
executor
}
#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_cached)]
pub fn run_cached(executor: DynamicExecutor) {
let context: Context = None;
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}
library_benchmark_group!(name = run_cached_group; benchmarks = run_cached);
main!(library_benchmark_groups = run_cached_group);

View file

@ -0,0 +1,21 @@
use graph_craft::util::*;
use graphene_std::Context;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_run_once(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap()
}
#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_once)]
pub fn run_once(executor: DynamicExecutor) {
let context: Context = None;
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}
library_benchmark_group!(name = run_once_group; benchmarks = run_once);
main!(library_benchmark_groups = run_once_group);

View file

@ -0,0 +1,23 @@
use graph_craft::proto::ProtoNetwork;
use graph_craft::util::*;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;
fn setup_update_executor(name: &str) -> (DynamicExecutor, ProtoNetwork) {
let network = load_from_name(name);
let proto_network = compile(network);
let empty = ProtoNetwork::default();
let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap();
(executor, proto_network)
}
#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_update_executor)]
pub fn update_executor(setup: (DynamicExecutor, ProtoNetwork)) {
let (mut executor, network) = setup;
let _ = black_box(futures::executor::block_on(executor.update(black_box(network))));
}
library_benchmark_group!(name = update_group; benchmarks = update_executor);
main!(library_benchmark_groups = update_group);