feat: benchmarking (#999)

* feat: add benchmarking dashboard, CI hook on PR, and store lifetime results

* refactor: change python env to 3.13 in benchmarks

* refactor: add verbosity, use 3.11 for benchmarking

* fix: OSError: [Errno 7] Argument list too long

* refactor: add debug statements

* refactor: remove extraneous -e

* refactor: fix tests and linter errors

* fix: track main package in coverage

* refactor: fix test coverage testing

* refactor: fix repo owner name in benchmark on pushing comment

* refactor: add asv monkeypatch to docs workflow

* refactor: temporarily allow building docs in forks

* refactor: use py 3.13 for benchmarking

* refactor: run only a single benchmark for PRs to speed them up

* refactor: install asv in the docs build workflow

* refactor: use hatch docs env to generate benhcmarks in docs CI

* refactor: more trying

* refactor: move tests

* Add benchmark results for 0.137

* Trigger Build

* Add benchmark results for 0.138

* refactor: set constant machine name when benchmarking

* Add benchmark results for 0.139

* refactor: fix issue with paths too long

* Add benchmark results for 0.140

* docs: update comment

* refactor: remove test benchmarking data

* refactor: fix comment

* refactor: allow the benchmark workflow to write to PRs

* refactor: use personal access token to set up the PR benchmark bot

* refactor: split the benchmark PR flow into two to make it work with PRs from forks

* refactor: update deprecated actions/upload-artifact@v3 to v4

* refactor: fix missing directory in benchmarking workflow

* refactor: fix triggering of second workflow

* refactor: fix workflow finally?

* docs: add comments to cut-offs and direct people to benchmarks PR

---------

Co-authored-by: github-actions <github-actions@github.com>
This commit is contained in:
Juro Oravec 2025-02-23 16:18:57 +01:00 committed by GitHub
parent dcd4203eea
commit f36581ed86
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
90 changed files with 40817 additions and 443 deletions

View file

@ -9,11 +9,6 @@ on:
- '[0-9]+.[0-9]+.[0-9]+'
branches:
- master
pull_request:
branches:
- main
release:
types: [published]
workflow_dispatch:
jobs:
@ -27,30 +22,116 @@ jobs:
# Only run in original repo (not in forks)
if: github.repository == 'django-components/django-components'
steps:
##############################
# SETUP
##############################
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
python-version: "3.13"
cache: 'pip'
- name: Install Hatch
run: |
python -m pip install --upgrade pip wheel
python -m pip install -q hatch pre-commit
python -m pip install -q hatch pre-commit asv
hatch --version
- name: Create Virtual Environment
run: hatch env create docs
- name: Configure git
run: |
# Get the master branch so we can run benchmarks on it
git remote add upstream https://github.com/${{ github.repository }}.git
git fetch origin master:master
git checkout master
# required for "mike deploy" command below which pushes to gh-pages
git config user.name github-actions
git config user.email github-actions@github.com
###########################################
# RECORD BENCHMARK - ONLY ON PUSH TO MASTER
###########################################
- name: Run benchmarks for tag
if: github.ref_type == 'tag' && github.event_name == 'push'
run: |
# Get tag name
TAG=${GITHUB_REF#refs/tags/}
echo "TAG: $TAG"
# TODO: REMOVE ONCE FIXED UPSTREAM
# Fix for https://github.com/airspeed-velocity/asv_runner/issues/45
# Prepare virtual environment
# Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark.
# The problem is that `asv` passes the code to execute via command line, and when the
# code is too big, it fails with `OSError: [Errno 7] Argument list too long`.
# So we have to tweak it to pass the code via STDIN, which doesn't have this limitation.
#
# 1. First create the virtual environment, so that asv generates the directories where
# the monkeypatch can be applied.
echo "Creating virtual environment..."
asv setup -v || true
echo "Virtual environment created."
# 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files.
# First find all `timeraw.py` files
echo "Applying monkeypatch..."
find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do
# Add a newline and then append the monkeypatch contents
echo "" >> "$file"
cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file"
done
echo "Monkeypatch applied."
# END OF MONKEYPATCH
# Prepare the profile under which the benchmarks will be saved.
# We assume that the CI machine has a name that is unique and stable.
# See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794
echo "Preparing benchmarks profile..."
asv machine --yes --machine ci-linux
echo "Benchmarks profile DONE."
# Run benchmarks for the current tag
# - `^` means that we mean the COMMIT of the tag's branch, not the BRANCH itself.
# Without it, we would run benchmarks for the whole branch history.
# With it, we run benchmarks FROM the tag's commit (incl) TO ...
# - `!` means that we want to select range spanning a single commit.
# Without it, we would run benchmarks for all commits FROM the tag's commit
# TO the start of the branch history.
# With it, we run benchmarks ONLY FOR the tag's commit.
echo "Running benchmarks for tag ${TAG}..."
asv run master^! -v
echo "Benchmarks for tag ${TAG} DONE."
# Generate benchmarks site
# This should save it in `docs/benchmarks/`, so we can then use it when
# building docs site with `mkdocs`.
echo "Generating benchmarks site..."
asv publish
echo "Benchmarks site DONE."
# Stage and commit benchmark results
echo "Staging and committing benchmark results..."
git add .asv/results/
git add docs/benchmarks/
git commit -m "Add benchmark results for ${TAG}"
echo "Benchmark results committed."
git push origin master
echo "Benchmark results pushed to master."
##############################
# BUILD & DEPLOY DOCS
##############################
- name: Create Virtual Environment
run: hatch env create docs
# Conditions make sure to select the right step, depending on the job trigger.
# Only one of the steps below will run at a time. The others will be skipped.
@ -72,9 +153,3 @@ jobs:
run: |
hatch run docs:mike deploy --push --update-aliases ${{ github.ref_name }} latest
hatch run docs:mike set-default latest --push
- name: Build & deploy docs for a new release
if: github.event_name == 'release'
run: |
hatch run docs:mike deploy --push --update-aliases ${{ github.ref_name }} latest
hatch run docs:mike set-default latest --push

View file

@ -0,0 +1,98 @@
# Run benchmark report on pull requests to master.
# The report is added to the PR as a comment.
#
# NOTE: When making a PR from a fork, the worker doesn't have sufficient
# access to make comments on the target repo's PR. And so, this workflow
# is split to two parts:
#
# 1. Benchmarking and saving results as artifacts
# 2. Downloading the results and commenting on the PR
#
# See https://stackoverflow.com/a/71683208/9788634
name: PR benchmark comment
on:
workflow_run:
# NOTE: The name here MUST match the name of the workflow that generates the data
workflows: [PR benchmarks generate]
types:
- completed
jobs:
download:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
repository-projects: write
steps:
########## USE FOR DEBUGGING ##########
# - name: Debug workflow run info
# uses: actions/github-script@v7
# with:
# script: |
# console.log('Workflow Run ID:', context.payload.workflow_run.id);
# const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
# owner: context.repo.owner,
# repo: context.repo.repo,
# run_id: context.payload.workflow_run.id
# });
# console.log('Available artifacts:');
# console.log(JSON.stringify(artifacts.data, null, 2));
# console.log(`PRs: ` + JSON.stringify(context.payload.workflow_run.pull_requests));
#########################################
# NOTE: The next two steps (download and unzip) are equivalent to using `actions/download-artifact@v4`
# However, `download-artifact` was not picking up the artifact, while the REST client does.
- name: Download benchmark results
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
// Find the artifact that was generated by the "pr-benchmark-generate" workflow
const allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
// Explicitly search the workflow run that generated the the results
// (AKA the "pr-benchmark-generate" workflow).
run_id: context.payload.workflow_run.id,
});
const matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "benchmark_results"
})[0];
// Download the artifact
const download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
fs.writeFileSync(
`${process.env.GITHUB_WORKSPACE}/benchmark_results.zip`,
Buffer.from(download.data),
);
- name: Unzip artifact
run: unzip benchmark_results.zip
- name: Comment on PR
# See https://github.com/actions/github-script
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const results = fs.readFileSync('./benchmark_results.md', 'utf8');
const body = `## Performance Benchmark Results\n\nComparing PR changes against master branch:\n\n${results}`;
// See https://octokit.github.io/rest.js/v21/#issues-create-comment
await github.rest.issues.createComment({
body: body,
// See https://github.com/actions/toolkit/blob/662b9d91f584bf29efbc41b86723e0e376010e41/packages/github/src/context.ts#L66
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.workflow_run.pull_requests[0].number,
});

View file

@ -0,0 +1,102 @@
# Run benchmark report on pull requests to master.
# The report is added to the PR as a comment.
#
# NOTE: When making a PR from a fork, the worker doesn't have sufficient
# access to make comments on the target repo's PR. And so, this workflow
# is split to two parts:
#
# 1. Benchmarking and saving results as artifacts
# 2. Downloading the results and commenting on the PR
#
# See https://stackoverflow.com/a/71683208/9788634
name: PR benchmarks generate
on:
pull_request:
branches: [ master ]
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Need full history for ASV
- name: Fetch base branch
run: |
git remote add upstream https://github.com/${{ github.repository }}.git
git fetch upstream master
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.13'
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install asv
- name: Run benchmarks
run: |
# TODO: REMOVE ONCE FIXED UPSTREAM
# Fix for https://github.com/airspeed-velocity/asv_runner/issues/45
# Prepare virtual environment
# Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark.
# The problem is that `asv` passes the code to execute via command line, and when the
# code is too big, it fails with `OSError: [Errno 7] Argument list too long`.
# So we have to tweak it to pass the code via STDIN, which doesn't have this limitation.
#
# 1. First create the virtual environment, so that asv generates the directories where
# the monkeypatch can be applied.
echo "Creating virtual environment..."
asv setup -v || true
echo "Virtual environment created."
# 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files.
# First find all `timeraw.py` files
echo "Applying monkeypatch..."
find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do
# Add a newline and then append the monkeypatch contents
echo "" >> "$file"
cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file"
done
echo "Monkeypatch applied."
# END OF MONKEYPATCH
# Prepare the profile under which the benchmarks will be saved.
# We assume that the CI machine has a name that is unique and stable.
# See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794
echo "Preparing benchmarks profile..."
asv machine --yes
echo "Benchmarks profile DONE."
# Generate benchmark data
# - `^` means that we mean the COMMIT of the branch, not the BRANCH itself.
# Without it, we would run benchmarks for the whole branch history.
# With it, we run benchmarks FROM the latest commit (incl) TO ...
# - `!` means that we want to select range spanning a single commit.
# Without it, we would run benchmarks for all commits FROM the latest commit
# TO the start of the branch history.
# With it, we run benchmarks ONLY FOR the latest commit.
echo "Running benchmarks for upstream/master..."
DJC_BENCHMARK_QUICK=1 asv run upstream/master^! -v
echo "Benchmarks for upstream/master DONE."
echo "Running benchmarks for HEAD..."
DJC_BENCHMARK_QUICK=1 asv run HEAD^! -v
echo "Benchmarks for HEAD DONE."
# Compare against master
echo "Comparing benchmarks..."
mkdir -p pr
asv compare upstream/master HEAD --factor 1.1 --split > ./pr/benchmark_results.md
echo "Benchmarks comparison DONE."
- name: Save benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark_results
path: pr/

View file

@ -17,6 +17,16 @@ jobs:
os: [ubuntu-20.04, windows-latest]
steps:
# Configure git to handle long paths
# See https://stackoverflow.com/questions/22575662/filename-too-long-in-git-for-windows
#
# Long paths that are over the limit are because of the benchmarking data
# created by asv, as these may look like this:
# docs/benchmarks/graphs/arch-x86_64/branch-master/cpu-AMD EPYC 7763 64-Core Processor/django-5.1/djc-core-html-parser/machine-fv-az1693-854/num_cpu-4/os-Linux 6.8.0-1021-azure/python-3.13/ram-16373792/isolated vs django modes.timeraw_render_lg_subsequent.json
- name: Configure git
run: |
git config --global core.longpaths true
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}

210
asv.conf.json Normal file
View file

@ -0,0 +1,210 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing
"version": 1,
// The name of the project being benchmarked
"project": "django-components",
// The project's homepage
// "project_url": "https://django-components.github.io/django-components/",
"project_url": "/django-components/", // Relative path, since benchmarks are nested under the docs site
// The URL or local path of the source code repository for the
// project being benchmarked
"repo": ".",
// The Python project's subdirectory in your repo. If missing or
// the empty string, the project is assumed to be located at the root
// of the repository.
// "repo_subdir": "",
// Customizable commands for building the project.
// See asv.conf.json documentation.
// To build the package using pyproject.toml (PEP518), uncomment the following lines
// "build_command": [
// "python -m pip install build",
// "python -m build",
// "python -mpip wheel -w {build_cache_dir} {build_dir}"
// ],
// To build the package using setuptools and a setup.py file, uncomment the following lines
// "build_command": [
// "python setup.py build",
// "python -mpip wheel -w {build_cache_dir} {build_dir}"
// ],
// Customizable commands for installing and uninstalling the project.
// See asv.conf.json documentation.
// "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
// "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
"install_command": ["in-dir={env_dir} python -mpip install ./project"],
// List of branches to benchmark. If not provided, defaults to "main"
// (for git) or "default" (for mercurial).
// "branches": ["main"], // for git
// "branches": ["default"], // for mercurial
"branches": [
"master"
],
// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
// "dvcs": "git",
// The tool to use to create environments. May be "conda",
// "virtualenv", "mamba" (above 3.8)
// or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "virtualenv",
// timeout in seconds for installing any dependencies in environment
// defaults to 10 min
//"install_timeout": 600,
// the base URL to show a commit for the project.
// "show_commit_url": "http://github.com/owner/project/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": [
"3.13"
],
// The list of conda channel names to be searched for benchmark
// dependency packages in the specified order
// "conda_channels": ["conda-forge", "defaults"],
// A conda environment file that is used for environment creation.
// "conda_environment_file": "environment.yml",
// The matrix of dependencies to test. Each key of the "req"
// requirements dictionary is the name of a package (in PyPI) and
// the values are version numbers. An empty list or empty string
// indicates to just test against the default (latest)
// version. null indicates that the package is to not be
// installed. If the package to be tested is only available from
// PyPi, and the 'environment_type' is conda, then you can preface
// the package name by 'pip+', and the package will be installed
// via pip (with all the conda available packages installed first,
// followed by the pip installed packages).
//
// The ``@env`` and ``@env_nobuild`` keys contain the matrix of
// environment variables to pass to build and benchmark commands.
// An environment will be created for every combination of the
// cartesian product of the "@env" variables in this matrix.
// Variables in "@env_nobuild" will be passed to every environment
// during the benchmark phase, but will not trigger creation of
// new environments. A value of ``null`` means that the variable
// will not be set for the current combination.
//
// "matrix": {
// "req": {
// "numpy": ["1.6", "1.7"],
// "six": ["", null], // test with and without six installed
// "pip+emcee": [""] // emcee is only available for install with pip.
// },
// "env": {"ENV_VAR_1": ["val1", "val2"]},
// "env_nobuild": {"ENV_VAR_2": ["val3", null]},
// },
"matrix": {
"req": {
"django": [
"5.1"
],
"djc-core-html-parser": [""] // Empty string means the latest version
}
},
// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
//
// An exclude entry excludes entries where all values match. The
// values are regexps that should match the whole string.
//
// An include entry adds an environment. Only the packages listed
// are installed. The 'python' key is required. The exclude rules
// do not apply to includes.
//
// In addition to package names, the following keys are available:
//
// - python
// Python version, as in the *pythons* variable above.
// - environment_type
// Environment type, as above.
// - sys_platform
// Platform, as in sys.platform. Possible values for the common
// cases: 'linux2', 'win32', 'cygwin', 'darwin'.
// - req
// Required packages
// - env
// Environment variables
// - env_nobuild
// Non-build environment variables
//
// "exclude": [
// {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
// {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda
// {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1
// ],
//
// "include": [
// // additional env for python3.12
// {"python": "3.12", "req": {"numpy": "1.26"}, "env_nobuild": {"FOO": "123"}},
// // additional env if run on windows+conda
// {"platform": "win32", "environment_type": "conda", "python": "3.12", "req": {"libpython": ""}},
// ],
// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
"benchmark_dir": "benchmarks",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": ".asv/env",
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": ".asv/results",
// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
// "html_dir": ".asv/html",
"html_dir": "docs/benchmarks", // # TODO
// The number of characters to retain in the commit hashes.
// "hash_length": 8,
// `asv` will cache results of the recent builds in each
// environment, making them faster to install next time. This is
// the number of builds to keep, per environment.
// "build_cache_size": 2,
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// },
// The thresholds for relative change in results, after which `asv
// publish` starts reporting regressions. Dictionary of the same
// form as in ``regressions_first_commits``, with values
// indicating the thresholds. If multiple entries match, the
// maximum is taken. If no entry matches, the default is 5%.
//
// "regressions_thresholds": {
// "some_benchmark": 0.01, // Threshold of 1%
// "another_benchmark": 0.5, // Threshold of 50%
// },
}

195
benchmarks/README.md Normal file
View file

@ -0,0 +1,195 @@
# Benchmarks
## Overview
[`asv`](https://github.com/airspeed-velocity/) (Airspeed Velocity) is used for benchmarking performance.
`asv` covers the entire benchmarking workflow. We can:
1. Define benchmark tests similarly to writing pytest tests (supports both timing and memory benchmarks)
2. Run the benchmarks and generate results for individual git commits, tags, or entire branches
3. View results as an HTML report (dashboard with charts)
4. Compare performance between two commits / tags / branches for CI integration
![asv dashboard](./assets/asv_dashboard.png)
django-components uses `asv` for these use cases:
- Benchmarking across releases:
1. When a git tag is created and pushed, this triggers a Github Action workflow (see `docs.yml`).
2. The workflow runs the benchmarks with the latest release, and commits the results to the repository.
Thus, we can see how performance changes across releases.
- Displaying performance results on the website:
1. When a git tag is created and pushed, we also update the documentation website (see `docs.yml`).
2. Before we publish the docs website, we generate the HTML report for the benchmark results.
3. The generated report is placed in the `docs/benchmarks/` directory, and is thus
published with the rest of the docs website and available under [`/benchmarks/`](https://django-components.github.io/django-components/benchmarks).
- NOTE: The location where the report is placed is defined in `asv.conf.json`.
- Compare performance between commits on pull requests:
1. When a pull request is made, this triggers a Github Action workflow (see `benchmark.yml`).
2. The workflow compares performance between commits.
3. The report is added to the PR as a comment made by a bot.
## Interpreting benchmarks
The results CANNOT be taken as ABSOLUTE values e.g.:
"This example took 200ms to render, so my page will also take 200ms to render."
Each UI may consist of different number of Django templates, template tags, and components, and all these may influence the rendering time differently.
Instead, the results MUST be understood as RELATIVE values.
- If a commit is 10% slower than the master branch, that's valid.
- If Django components are 10% slower than vanilla Django templates, that's valid.
- If "isolated" mode is 10% slower than "django" mode, that's valid.
## Development
Let's say we want to generate results for the last 5 commits.
1. Install `asv`
```bash
pip install asv
```
2. Run benchmarks and generate results
```bash
asv run HEAD --steps 5 -e
```
- `HEAD` means that we want to run benchmarks against the [current branch](https://stackoverflow.com/a/2304106/9788634).
- `--steps 5` means that we want to run benchmarks for the last 5 commits.
- `-e` to print out any errors.
The results will be stored in `.asv/results/`, as configured in `asv.conf.json`.
3. Generate HTML report
```bash
asv publish
asv preview
```
- `publish` generates the HTML report and stores it in `docs/benchmarks/`, as configured in `asv.conf.json`.
- `preview` starts a local server and opens the report in the browser.
NOTE: Since the results are stored in `docs/benchmarks/`, you can also view the results
with `mkdocs serve` and navigating to `http://localhost:9000/django-components/benchmarks/`.
NOTE 2: Running `publish` will overwrite the existing contents of `docs/benchmarks/`.
## Writing benchmarks
`asv` supports writing different [types of benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#benchmark-types). What's relevant for us is:
- [Raw timing benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#raw-timing-benchmarks)
- [Peak memory benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#peak-memory)
Notes:
- The difference between "raw timing" and "timing" tests is that "raw timing" is ran in a separate process.
And instead of running the logic within the test function itself, we return a script (string)
that will be executed in the separate process.
- The difference between "peak memory" and "memory" tests is that "memory" calculates the memory
of the object returned from the test function. On the other hand, "peak memory" detects the
peak memory usage during the execution of the test function (including the setup function).
You can write the test file anywhere in the `benchmarks/` directory, `asv` will automatically find it.
Inside the file, write a test function. Depending on the type of the benchmark,
prefix the test function name with `timeraw_` or `peakmem_`. See [`benchmarks/benchmark_templating.py`](benchmark_templating.py) for examples.
### Ensuring that the benchmarked logic is correct
The approach I (Juro) took with benchmarking the overall template rendering is that
I've defined the actual logic in `tests/test_benchmark_*.py` files. So those files
are part of the normal pytest testing, and even contain a section with pytest tests.
This ensures that the benchmarked logic remains functional and error-free.
However, there's some caveats:
1. I wasn't able to import files from `tests/`.
2. When running benchmarks, we don't want to run the pytest tests.
To work around that, the approach I used for loading the files from the `tests/` directory is to:
1. Get the file's source code as a string.
2. Cut out unwanted sections (like the pytest tests).
3. Append the benchmark-specific code to the file (e.g. to actually render the templates).
4. In case of "timeraw" benchmarks, we can simply return the remaining code as a string
to be run in a separate process.
5. In case of "peakmem" benchmarks, we need to access this modified source code as Python objects.
So the code is made available as a "virtual" module, which makes it possible to import Python objects like so:
```py
from my_virtual_module import run_my_benchmark
```
## Using `asv`
### Compare latest commit against master
Note: Before comparing, you must run the benchmarks first to generate the results. The `continuous` command does not generate the results by itself.
```bash
asv continuous master^! HEAD^! --factor 1.1
```
- Factor of `1.1` means that the new commit is allowed to be 10% slower/faster than the master commit.
- `^` means that we mean the COMMIT of the branch, not the BRANCH itself.
Without it, we would run benchmarks for the whole branch history.
With it, we run benchmarks FROM the latest commit (incl) TO ...
- `!` means that we want to select range spanning a single commit.
Without it, we would run benchmarks for all commits FROM the latest commit
TO the start of the branch history.
With it, we run benchmarks ONLY FOR the latest commit.
### More Examples
Notes:
- Use `~1` to select the second-latest commit, `~2` for the third-latest, etc..
Generate benchmarks for the latest commit in `master` branch.
```bash
asv run master^!
```
Generate benchmarks for second-latest commit in `master` branch.
```bash
asv run master~1^!
```
Generate benchmarks for all commits in `master` branch.
```bash
asv run master
```
Generate benchmarks for all commits in `master` branch, but exclude the latest commit.
```bash
asv run master~1
```
Generate benchmarks for the LAST 5 commits in `master` branch, but exclude the latest commit.
```bash
asv run master~1 --steps 5
```

0
benchmarks/__init__.py Normal file
View file

Binary file not shown.

After

Width:  |  Height:  |  Size: 321 KiB

View file

@ -0,0 +1,447 @@
# Write the benchmarking functions here
# See "Writing benchmarks" in the asv docs for more information.
import re
from pathlib import Path
from types import ModuleType
from typing import Literal
# Fix for for https://github.com/airspeed-velocity/asv_runner/pull/44
import benchmarks.monkeypatch_asv # noqa: F401
from benchmarks.utils import benchmark, create_virtual_module
DJC_VS_DJ_GROUP = "Components vs Django"
DJC_ISOLATED_VS_NON_GROUP = "isolated vs django modes"
OTHER_GROUP = "Other"
DjcContextMode = Literal["isolated", "django"]
TemplatingRenderer = Literal["django", "django-components", "none"]
TemplatingTestSize = Literal["lg", "sm"]
TemplatingTestType = Literal[
"first", # Testing performance of the first time the template is rendered
"subsequent", # Testing performance of the subsequent times the template is rendered
"startup", # Testing performance of the startup time (e.g. defining classes and templates)
]
def _get_templating_filepath(renderer: TemplatingRenderer, size: TemplatingTestSize) -> Path:
if renderer == "none":
raise ValueError("Cannot get filepath for renderer 'none'")
elif renderer not in ["django", "django-components"]:
raise ValueError(f"Invalid renderer: {renderer}")
if size not in ("lg", "sm"):
raise ValueError(f"Invalid size: {size}, must be one of ('lg', 'sm')")
# At this point, we know the renderer is either "django" or "django-components"
root = file_path = Path(__file__).parent.parent
if renderer == "django":
if size == "lg":
file_path = root / "tests" / "test_benchmark_django.py"
else:
file_path = root / "tests" / "test_benchmark_django_small.py"
else:
if size == "lg":
file_path = root / "tests" / "test_benchmark_djc.py"
else:
file_path = root / "tests" / "test_benchmark_djc_small.py"
return file_path
def _get_templating_script(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
context_mode: DjcContextMode,
imports_only: bool,
) -> str:
if renderer == "none":
return ""
elif renderer not in ["django", "django-components"]:
raise ValueError(f"Invalid renderer: {renderer}")
# At this point, we know the renderer is either "django" or "django-components"
file_path = _get_templating_filepath(renderer, size)
contents = file_path.read_text()
# The files with benchmarked code also have a section for testing them with pytest.
# We remove that pytest section, so the script is only the benchmark code.
contents = contents.split("# ----------- TESTS START ------------ #")[0]
if imports_only:
# There is a benchmark test for measuring the time it takes to import the module.
# For that, we exclude from the code everything AFTER this line
contents = contents.split("# ----------- IMPORTS END ------------ #")[0]
else:
# Set the context mode by replacing variable in the script
contents = re.sub(r"CONTEXT_MODE.*?\n", f"CONTEXT_MODE = '{context_mode}'\n", contents, count=1)
return contents
def _get_templating_module(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
context_mode: DjcContextMode,
imports_only: bool,
) -> ModuleType:
if renderer not in ("django", "django-components"):
raise ValueError(f"Invalid renderer: {renderer}")
file_path = _get_templating_filepath(renderer, size)
script = _get_templating_script(renderer, size, context_mode, imports_only)
# This makes it possible to import the module in the benchmark function
# as `import test_templating`
module = create_virtual_module("test_templating", script, str(file_path))
return module
# The `timeraw_` tests run in separate processes. But when running memory benchmarks,
# the tested logic runs in the same process as the where we run the benchmark functions
# (e.g. `peakmem_render_lg_first()`). Thus, the `peakmem_` functions have access to this file
# when the tested logic runs.
#
# Secondly, `asv` doesn't offer any way to pass data from `setup` to actual test.
#
# And so we define this global, which, when running memory benchmarks, the `setup` function
# populates. And then we trigger the actual render from within the test body.
do_render = lambda: None # noqa: E731
def setup_templating_memory_benchmark(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
test_type: TemplatingTestType,
context_mode: DjcContextMode,
imports_only: bool = False,
):
global do_render
module = _get_templating_module(renderer, size, context_mode, imports_only)
data = module.gen_render_data()
render = module.render
do_render = lambda: render(data) # noqa: E731
# Do the first render as part of setup if we're testing the subsequent renders
if test_type == "subsequent":
do_render()
# The timing benchmarks run the actual code in a separate process, by using the `timeraw_` prefix.
# As such, we don't actually load the code in this file. Instead, we only prepare a script (raw string)
# that will be run in the new process.
def prepare_templating_benchmark(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
test_type: TemplatingTestType,
context_mode: DjcContextMode,
imports_only: bool = False,
):
global do_render
setup_script = _get_templating_script(renderer, size, context_mode, imports_only)
# If we're testing the startup time, then the setup is actually the tested code
if test_type == "startup":
return setup_script
else:
# Otherwise include also data generation as part of setup
setup_script += "\n\n" "render_data = gen_render_data()\n"
# Do the first render as part of setup if we're testing the subsequent renders
if test_type == "subsequent":
setup_script += "render(render_data)\n"
benchmark_script = "render(render_data)\n"
return benchmark_script, setup_script
# - Group: django-components vs django
# - time: djc vs django (startup lg)
# - time: djc vs django (lg - FIRST)
# - time: djc vs django (sm - FIRST)
# - time: djc vs django (lg - SUBSEQUENT)
# - time: djc vs django (sm - SUBSEQUENT)
# - mem: djc vs django (lg - FIRST)
# - mem: djc vs django (sm - FIRST)
# - mem: djc vs django (lg - SUBSEQUENT)
# - mem: djc vs django (sm - SUBSEQUENT)
#
# NOTE: While the name suggests we're comparing Django and Django-components, be aware that
# in our "Django" tests, we still install and import django-components. We also use
# django-components's `{% html_attrs %}` tag in the Django scenario. `{% html_attrs %}`
# was used because the original sample code was from django-components.
#
# As such, these tests should seen not as "Using Django vs Using Components". But instead,
# it should be "What is the relative cost of using Components?".
#
# As an example, the benchmarking for the startup time and memory usage is not comparing
# two independent approaches. Rather, the test is checking if defining Components classes
# is more expensive than vanilla Django templates.
class DjangoComponentsVsDjangoTests:
# Testing startup time (e.g. defining classes and templates)
@benchmark(
pretty_name="startup - large",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_startup_lg(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "startup", "isolated")
@benchmark(
pretty_name="render - small - first render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_sm_first(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "sm", "first", "isolated")
@benchmark(
pretty_name="render - small - second render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_sm_subsequent(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "sm", "subsequent", "isolated")
@benchmark(
pretty_name="render - large - first render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
include_in_quick_benchmark=True,
)
def timeraw_render_lg_first(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "first", "isolated")
@benchmark(
pretty_name="render - large - second render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_lg_subsequent(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "subsequent", "isolated")
@benchmark(
pretty_name="render - small - first render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "first", "isolated"),
)
def peakmem_render_sm_first(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - small - second render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "subsequent", "isolated"),
)
def peakmem_render_sm_subsequent(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - large - first render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "first", "isolated"),
)
def peakmem_render_lg_first(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - large - second render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "subsequent", "isolated"),
)
def peakmem_render_lg_subsequent(self, renderer: TemplatingRenderer):
do_render()
# - Group: Django-components "isolated" vs "django" modes
# - time: Isolated vs django djc (startup lg)
# - time: Isolated vs django djc (lg - FIRST)
# - time: Isolated vs django djc (sm - FIRST)
# - time: Isolated vs django djc (lg - SUBSEQUENT)
# - time: Isolated vs django djc (sm - SUBSEQUENT)
# - mem: Isolated vs django djc (lg - FIRST)
# - mem: Isolated vs django djc (sm - FIRST)
# - mem: Isolated vs django djc (lg - SUBSEQUENT)
# - mem: Isolated vs django djc (sm - SUBSEQUENT)
class IsolatedVsDjangoContextModesTests:
# Testing startup time (e.g. defining classes and templates)
@benchmark(
pretty_name="startup - large",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_startup_lg(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "startup", context_mode)
@benchmark(
pretty_name="render - small - first render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_sm_first(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "sm", "first", context_mode)
@benchmark(
pretty_name="render - small - second render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_sm_subsequent(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "sm", "subsequent", context_mode)
@benchmark(
pretty_name="render - large - first render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_lg_first(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "first", context_mode)
@benchmark(
pretty_name="render - large - second render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_lg_subsequent(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "subsequent", context_mode)
@benchmark(
pretty_name="render - small - first render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark("django-components", "sm", "first", context_mode),
)
def peakmem_render_sm_first(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - small - second render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"sm",
"subsequent",
context_mode,
),
)
def peakmem_render_sm_subsequent(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - large - first render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"lg",
"first",
context_mode,
),
)
def peakmem_render_lg_first(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - large - second render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"lg",
"subsequent",
context_mode,
),
)
def peakmem_render_lg_subsequent(self, context_mode: DjcContextMode):
do_render()
class OtherTests:
@benchmark(
pretty_name="import time",
group_name=OTHER_GROUP,
number=1,
rounds=5,
)
def timeraw_import_time(self):
return prepare_templating_benchmark("django-components", "lg", "startup", "isolated", imports_only=True)

View file

@ -1,174 +0,0 @@
from time import perf_counter
from django.template import Context, Template
from django_components import Component, registry, types
from django_components.dependencies import CSS_DEPENDENCY_PLACEHOLDER, JS_DEPENDENCY_PLACEHOLDER
from tests.django_test_setup import * # NOQA
from tests.testutils import BaseTestCase, create_and_process_template_response
class SlottedComponent(Component):
template: types.django_html = """
{% load component_tags %}
<custom-template>
<header>{% slot "header" %}Default header{% endslot %}</header>
<main>{% slot "main" %}Default main{% endslot %}</main>
<footer>{% slot "footer" %}Default footer{% endslot %}</footer>
</custom-template>
"""
class SimpleComponent(Component):
template: types.django_html = """
Variable: <strong>{{ variable }}</strong>
"""
css_file = "style.css"
js_file = "script.js"
def get_context_data(self, variable, variable2="default"):
return {
"variable": variable,
"variable2": variable2,
}
class BreadcrumbComponent(Component):
template: types.django_html = """
<div class="breadcrumb-container">
<nav class="breadcrumbs">
<ol typeof="BreadcrumbList" vocab="https://schema.org/" aria-label="breadcrumbs">
{% for label, url in links %}
<li property="itemListElement" typeof="ListItem">
<a class="breadcrumb-current-page" property="item" typeof="WebPage" href="{{ url }}">
<span property="name">{{ label }}</span>
</a>
<meta property="position" content="4">
</li>
{% endfor %}
</ol>
</nav>
</div>
"""
css_file = "test.css"
js_file = "test.js"
LINKS = [
(
"https://developer.mozilla.org/en-US/docs/Learn",
"Learn web development",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML",
"Structuring the web with HTML",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML",
"Introduction to HTML",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML/Document_and_website_structure",
"Document and website structure",
),
]
def get_context_data(self, items):
if items > 4:
items = 4
elif items < 0:
items = 0
return {"links": self.LINKS[: items - 1]}
EXPECTED_CSS = """<link href="test.css" media="all" rel="stylesheet">"""
EXPECTED_JS = """<script src="test.js"></script>"""
class RenderBenchmarks(BaseTestCase):
def setUp(self):
registry.clear()
registry.register("test_component", SlottedComponent)
registry.register("inner_component", SimpleComponent)
registry.register("breadcrumb_component", BreadcrumbComponent)
@staticmethod
def timed_loop(func, iterations=1000):
"""Run func iterations times, and return the time in ms per iteration."""
start_time = perf_counter()
for _ in range(iterations):
func()
end_time = perf_counter()
total_elapsed = end_time - start_time # NOQA
return total_elapsed * 1000 / iterations
def test_render_time_for_small_component(self):
template_str: types.django_html = """
{% load component_tags %}
{% component 'test_component' %}
{% slot "header" %}
{% component 'inner_component' variable='foo' %}{% endcomponent %}
{% endslot %}
{% endcomponent %}
"""
template = Template(template_str)
print(f"{self.timed_loop(lambda: template.render(Context({})))} ms per iteration")
def test_middleware_time_with_dependency_for_small_page(self):
template_str: types.django_html = """
{% load component_tags %}
{% component_js_dependencies %}
{% component_css_dependencies %}
{% component 'test_component' %}
{% slot "header" %}
{% component 'inner_component' variable='foo' %}{% endcomponent %}
{% endslot %}
{% endcomponent %}
"""
template = Template(template_str)
# Sanity tests
response_content = create_and_process_template_response(template)
self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertIn("style.css", response_content)
self.assertIn("script.js", response_content)
without_middleware = self.timed_loop(
lambda: create_and_process_template_response(template, use_middleware=False)
)
with_middleware = self.timed_loop(lambda: create_and_process_template_response(template, use_middleware=True))
print("Small page middleware test")
self.report_results(with_middleware, without_middleware)
def test_render_time_with_dependency_for_large_page(self):
from django.template.loader import get_template
template = get_template("mdn_complete_page.html")
response_content = create_and_process_template_response(template, {})
self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertIn("test.css", response_content)
self.assertIn("test.js", response_content)
without_middleware = self.timed_loop(
lambda: create_and_process_template_response(template, {}, use_middleware=False)
)
with_middleware = self.timed_loop(
lambda: create_and_process_template_response(template, {}, use_middleware=True)
)
print("Large page middleware test")
self.report_results(with_middleware, without_middleware)
@staticmethod
def report_results(with_middleware, without_middleware):
print(f"Middleware active\t\t{with_middleware:.3f} ms per iteration")
print(f"Middleware inactive\t{without_middleware:.3f} ms per iteration")
time_difference = with_middleware - without_middleware
if without_middleware > with_middleware:
print(f"Decrease of {-100 * time_difference / with_middleware:.2f}%")
else:
print(f"Increase of {100 * time_difference / without_middleware:.2f}%")

View file

@ -0,0 +1,29 @@
from asv_runner.benchmarks.timeraw import TimerawBenchmark, _SeparateProcessTimer
# Fix for https://github.com/airspeed-velocity/asv_runner/pull/44
def _get_timer(self, *param):
"""
Returns a timer that runs the benchmark function in a separate process.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**timer** (`_SeparateProcessTimer`)
: A timer that runs the function in a separate process.
"""
if param:
def func():
# ---------- OUR CHANGES: ADDED RETURN STATEMENT ----------
return self.func(*param)
# ---------- OUR CHANGES END ----------
else:
func = self.func
return _SeparateProcessTimer(func)
TimerawBenchmark._get_timer = _get_timer

View file

@ -0,0 +1,66 @@
# ------------ FIX FOR #45 ------------
# See https://github.com/airspeed-velocity/asv_runner/issues/45
# This fix is applied in CI in the `benchmark.yml` file.
# This file is intentionally named `monkeypatch_asv_ci.txt` to avoid being
# loaded as a python file by `asv`.
# -------------------------------------
def timeit(self, number):
"""
Run the function's code `number` times in a separate Python process, and
return the execution time.
#### Parameters
**number** (`int`)
: The number of times to execute the function's code.
#### Returns
**time** (`float`)
: The time it took to execute the function's code `number` times.
#### Notes
The function's code is executed in a separate Python process to avoid
interference from the parent process. The function can return either a
single string of code to be executed, or a tuple of two strings: the
code to be executed and the setup code to be run before timing.
"""
stmt = self.func()
if isinstance(stmt, tuple):
stmt, setup = stmt
else:
setup = ""
stmt = textwrap.dedent(stmt)
setup = textwrap.dedent(setup)
stmt = stmt.replace(r'"""', r"\"\"\"")
setup = setup.replace(r'"""', r"\"\"\"")
# TODO
# -----------ORIGINAL CODE-----------
# code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
# res = subprocess.check_output([sys.executable, "-c", code])
# return float(res.strip())
# -----------NEW CODE-----------
code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
evaler = textwrap.dedent(
"""
import sys
code = sys.stdin.read()
exec(code)
"""
)
proc = subprocess.Popen([sys.executable, "-c", evaler],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(input=code.encode("utf-8"))
if proc.returncode != 0:
raise RuntimeError(f"Subprocess failed: {stderr.decode()}")
return float(stdout.decode("utf-8").strip())
_SeparateProcessTimer.timeit = timeit
# ------------ END FIX #45 ------------

View file

@ -1,195 +0,0 @@
# NOTE: This file is more of a playground than a proper test
import timeit
from typing import List, Tuple
from django.template.base import DebugLexer, Lexer, Token
from django_components.util.template_parser import parse_template
def django_lexer(template: str) -> List[Token]:
"""Use Django's built-in lexer to tokenize a template."""
lexer = Lexer(template)
return list(lexer.tokenize())
def django_debug_lexer(template: str) -> List[Token]:
"""Use Django's built-in lexer to tokenize a template."""
lexer = DebugLexer(template)
return list(lexer.tokenize())
def run_benchmark(template: str, num_iterations: int = 5000) -> Tuple[float, float]:
"""Run performance comparison between Django and custom lexer."""
# django_time = timeit.timeit(lambda: django_lexer(template), number=num_iterations)
django_debug_time = timeit.timeit(lambda: django_debug_lexer(template), number=num_iterations)
custom_time = timeit.timeit(lambda: parse_template(template), number=num_iterations)
# return django_time, django_debug_time
return django_debug_time, custom_time
def print_benchmark_results(template: str, django_time: float, custom_time: float, num_iterations: int) -> None:
"""Print formatted benchmark results."""
print(f"\nTemplate: {template}")
print(f"Iterations: {num_iterations}")
print(f"Django Lexer: {django_time:.6f} seconds")
print(f"Custom Lexer: {custom_time:.6f} seconds")
print(f"Difference: {abs(django_time - custom_time):.6f} seconds")
print(f"Custom lexer is {(django_time / custom_time):.2f}x {'faster' if custom_time < django_time else 'slower'}")
if __name__ == "__main__":
test_cases = [
# Simple text
"Hello World",
# Simple variable
"Hello {{ name }}",
# Simple block
"{% if condition %}Hello{% endif %}",
# Complex nested template
"""
{% extends "base.html" %}
{% block content %}
<h1>{{ title }}</h1>
{% for item in items %}
<div class="{{ item.class }}">
{{ item.name }}
{% if item.description %}
<p>{{ item.description }}</p>
{% endif %}
</div>
{% endfor %}
{% endblock %}
""",
# Component with nested tags
"""
{% component 'table'
headers=headers
rows=rows
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
%}
""",
# Real world example
"""
<div class="prose flex flex-col gap-8">
{# Info section #}
<div class="border-b border-neutral-300">
<div class="flex justify-between items-start">
<h3 class="mt-0">Project Info</h3>
{% if editable %}
{% component "Button"
href=project_edit_url
attrs:class="not-prose"
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
%}
Edit Project
{% endcomponent %}
{% endif %}
</div>
<table>
{% for key, value in project_info %}
<tr>
<td class="font-bold pr-4">
{{ key }}:
</td>
<td>
{{ value }}
</td>
</tr>
{% endfor %}
</table>
</div>
{# Status Updates section #}
{% component "ProjectStatusUpdates"
project_id=project.pk
status_updates=status_updates
editable=editable
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
/ %}
<div class="xl:grid xl:grid-cols-2 gap-10">
{# Team section #}
<div class="border-b border-neutral-300">
<div class="flex justify-between items-start">
<h3 class="mt-0">Dcode Team</h3>
{% if editable %}
{% component "Button"
href=edit_project_roles_url
attrs:class="not-prose"
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
%}
Edit Team
{% endcomponent %}
{% endif %}
</div>
{% component "ProjectUsers"
project_id=project.pk
roles_with_users=roles_with_users
editable=False
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
/ %}
</div>
{# POCs section #}
<div>
<div class="flex justify-between items-start max-xl:mt-6">
<h3 class="mt-0">Client POCs</h3>
{% if editable %}
{% component "Button"
href=edit_pocs_url
attrs:class="not-prose"
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
%}
Edit POCs
{% endcomponent %}
{% endif %}
</div>
{% if poc_data %}
<table>
<tr>
<th>Name</th>
<th>Job Title</th>
<th>Hubspot Profile</th>
</tr>
{% for data in poc_data %}
<tr>
<td>{{ data.poc.contact.first_name }} {{ data.poc.contact.last_name }}</td>
<td>{{ data.poc.contact.job_title }}</td>
<td>
{% component "Icon"
href=data.hubspot_url
name="arrow-top-right-on-square"
variant="outline"
color="text-gray-400 hover:text-gray-500"
footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}"
title="{% trans 'Data Table' %}"
/ %}
</td>
</tr>
{% endfor %}
</table>
{% else %}
<p class="text-sm italic">No entries</p>
{% endif %}
</div>
</div>
</div>
""",
]
for template in test_cases:
django_time, custom_time = run_benchmark(template)
print_benchmark_results(template, django_time, custom_time, 200)

99
benchmarks/utils.py Normal file
View file

@ -0,0 +1,99 @@
import os
import sys
from importlib.abc import Loader
from importlib.util import spec_from_loader, module_from_spec
from types import ModuleType
from typing import Any, Dict, List, Optional
# NOTE: benchmark_name constraints:
# - MUST BE UNIQUE
# - MUST NOT CONTAIN `-`
# - MUST START WITH `time_`, `mem_`, `peakmem_`
# See https://github.com/airspeed-velocity/asv/pull/1470
def benchmark(
*,
pretty_name: Optional[str] = None,
timeout: Optional[int] = None,
group_name: Optional[str] = None,
params: Optional[Dict[str, List[Any]]] = None,
number: Optional[int] = None,
min_run_count: Optional[int] = None,
include_in_quick_benchmark: bool = False,
**kwargs,
):
def decorator(func):
# For pull requests, we want to run benchmarks only for a subset of tests,
# because the full set of tests takes about 10 minutes to run (5 min per commit).
# This is done by setting DJC_BENCHMARK_QUICK=1 in the environment.
if os.getenv("DJC_BENCHMARK_QUICK") and not include_in_quick_benchmark:
# By setting the benchmark name to something that does NOT start with
# valid prefixes like `time_`, `mem_`, or `peakmem_`, this function will be ignored by asv.
func.benchmark_name = "noop"
return func
# "group_name" is our custom field, which we actually convert to asv's "benchmark_name"
if group_name is not None:
benchmark_name = f"{group_name}.{func.__name__}"
func.benchmark_name = benchmark_name
# Also "params" is custom, so we normalize it to "params" and "param_names"
if params is not None:
func.params, func.param_names = list(params.values()), list(params.keys())
if pretty_name is not None:
func.pretty_name = pretty_name
if timeout is not None:
func.timeout = timeout
if number is not None:
func.number = number
if min_run_count is not None:
func.min_run_count = min_run_count
# Additional, untyped kwargs
for k, v in kwargs.items():
setattr(func, k, v)
return func
return decorator
class VirtualModuleLoader(Loader):
def __init__(self, code_string):
self.code_string = code_string
def exec_module(self, module):
exec(self.code_string, module.__dict__)
def create_virtual_module(name: str, code_string: str, file_path: str) -> ModuleType:
"""
To avoid the headaches of importing the tested code from another diretory,
we create a "virtual" module that we can import from anywhere.
E.g.
```py
from benchmarks.utils import create_virtual_module
create_virtual_module("my_module", "print('Hello, world!')", __file__)
# Now you can import my_module from anywhere
import my_module
```
"""
# Create the module specification
spec = spec_from_loader(name, VirtualModuleLoader(code_string))
# Create the module
module = module_from_spec(spec) # type: ignore[arg-type]
module.__file__ = file_path
module.__name__ = name
# Add it to sys.modules
sys.modules[name] = module
# Execute the module
spec.loader.exec_module(module) # type: ignore[union-attr]
return module

161
docs/benchmarks/asv.css Normal file
View file

@ -0,0 +1,161 @@
/* Basic navigation */
.asv-navigation {
padding: 2px;
}
nav ul li.active a {
height: 52px;
}
nav li.active span.navbar-brand {
background-color: #e7e7e7;
height: 52px;
}
nav li.active span.navbar-brand:hover {
background-color: #e7e7e7;
}
.navbar-default .navbar-link {
color: #2458D9;
}
.panel-body {
padding: 0;
}
.panel {
margin-bottom: 4px;
-webkit-box-shadow: none;
box-shadow: none;
border-radius: 0;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.panel-default>.panel-heading,
.panel-heading {
font-size: 12px;
font-weight:bold;
padding: 2px;
text-align: center;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
background-color: #eee;
}
.btn,
.btn-group,
.btn-group-vertical>.btn:first-child,
.btn-group-vertical>.btn:last-child:not(:first-child),
.btn-group-vertical>.btn:last-child {
border: none;
border-radius: 0px;
overflow: hidden;
}
.btn-default:focus, .btn-default:active, .btn-default.active {
border: none;
color: #fff;
background-color: #99bfcd;
}
#range {
font-family: monospace;
text-align: center;
background: #ffffff;
}
.form-control {
border: none;
border-radius: 0px;
font-size: 12px;
padding: 0px;
}
.tooltip-inner {
min-width: 100px;
max-width: 800px;
text-align: left;
white-space: pre-wrap;
font-family: monospace;
}
/* Benchmark tree */
.nav-list {
font-size: 12px;
padding: 0;
padding-left: 15px;
}
.nav-list>li {
overflow-x: hidden;
}
.nav-list>li>a {
padding: 0;
padding-left: 5px;
color: #000;
}
.nav-list>li>a:focus {
color: #fff;
background-color: #99bfcd;
box-shadow: inset 0 3px 5px rgba(0,0,0,.125);
}
.nav-list>li>.nav-header {
white-space: nowrap;
font-weight: 500;
margin-bottom: 2px;
}
.caret-right {
display: inline-block;
width: 0;
height: 0;
margin-left: 2px;
vertical-align: middle;
border-left: 4px solid;
border-bottom: 4px solid transparent;
border-top: 4px solid transparent;
}
/* Summary page */
.benchmark-group > h1 {
text-align: center;
}
.benchmark-container {
width: 300px;
height: 116px;
padding: 4px;
border-radius: 3px;
}
.benchmark-container:hover {
background-color: #eee;
}
.benchmark-plot {
width: 292px;
height: 88px;
}
.benchmark-text {
font-size: 12px;
color: #000;
width: 292px;
overflow: hidden;
}
#extra-buttons {
margin: 1em;
}
#extra-buttons a {
border: solid 1px #ccc;
}

525
docs/benchmarks/asv.js Normal file
View file

@ -0,0 +1,525 @@
'use strict';
$(document).ready(function() {
/* GLOBAL STATE */
/* The index.json content as returned from the server */
var main_timestamp = '';
var main_json = {};
/* Extra pages: {name: show_function} */
var loaded_pages = {};
/* Previous window scroll positions */
var window_scroll_positions = {};
/* Previous window hash location */
var window_last_location = null;
/* Graph data cache */
var graph_cache = {};
var graph_cache_max_size = 5;
var colors = [
'#247AAD',
'#E24A33',
'#988ED5',
'#777777',
'#FBC15E',
'#8EBA42',
'#FFB5B8'
];
var time_units = [
['ps', 'picoseconds', 0.000000000001],
['ns', 'nanoseconds', 0.000000001],
['μs', 'microseconds', 0.000001],
['ms', 'milliseconds', 0.001],
['s', 'seconds', 1],
['m', 'minutes', 60],
['h', 'hours', 60 * 60],
['d', 'days', 60 * 60 * 24],
['w', 'weeks', 60 * 60 * 24 * 7],
['y', 'years', 60 * 60 * 24 * 7 * 52],
['C', 'centuries', 60 * 60 * 24 * 7 * 52 * 100]
];
var mem_units = [
['', 'bytes', 1],
['k', 'kilobytes', 1000],
['M', 'megabytes', 1000000],
['G', 'gigabytes', 1000000000],
['T', 'terabytes', 1000000000000]
];
function pretty_second(x) {
for (var i = 0; i < time_units.length - 1; ++i) {
if (Math.abs(x) < time_units[i+1][2]) {
return (x / time_units[i][2]).toFixed(3) + time_units[i][0];
}
}
return 'inf';
}
function pretty_byte(x) {
for (var i = 0; i < mem_units.length - 1; ++i) {
if (Math.abs(x) < mem_units[i+1][2]) {
break;
}
}
if (i == 0) {
return x + '';
}
return (x / mem_units[i][2]).toFixed(3) + mem_units[i][0];
}
function pretty_unit(x, unit) {
if (unit == "seconds") {
return pretty_second(x);
}
else if (unit == "bytes") {
return pretty_byte(x);
}
else if (unit && unit != "unit") {
return '' + x.toPrecision(3) + ' ' + unit;
}
else {
return '' + x.toPrecision(3);
}
}
function pad_left(s, c, num) {
s = '' + s;
while (s.length < num) {
s = c + s;
}
return s;
}
function format_date_yyyymmdd(date) {
return (pad_left(date.getFullYear(), '0', 4)
+ '-' + pad_left(date.getMonth() + 1, '0', 2)
+ '-' + pad_left(date.getDate(), '0', 2));
}
function format_date_yyyymmdd_hhmm(date) {
return (format_date_yyyymmdd(date) + ' '
+ pad_left(date.getHours(), '0', 2)
+ ':' + pad_left(date.getMinutes(), '0', 2));
}
/* Convert a flat index to permutation to the corresponding value */
function param_selection_from_flat_idx(params, idx) {
var selection = [];
if (idx < 0) {
idx = 0;
}
for (var k = params.length-1; k >= 0; --k) {
var j = idx % params[k].length;
selection.unshift([j]);
idx = (idx - j) / params[k].length;
}
selection.unshift([null]);
return selection;
}
/* Convert a benchmark parameter value from their native Python
repr format to a number or a string, ready for presentation */
function convert_benchmark_param_value(value_repr) {
var match = Number(value_repr);
if (!isNaN(match)) {
return match;
}
/* Python str */
match = value_repr.match(/^'(.+)'$/);
if (match) {
return match[1];
}
/* Python unicode */
match = value_repr.match(/^u'(.+)'$/);
if (match) {
return match[1];
}
/* Python class */
match = value_repr.match(/^<class '(.+)'>$/);
if (match) {
return match[1];
}
return value_repr;
}
/* Convert loaded graph data to a format flot understands, by
treating either time or one of the parameters as x-axis,
and selecting only one value of the remaining axes */
function filter_graph_data(raw_series, x_axis, other_indices, params) {
if (params.length == 0) {
/* Simple time series */
return raw_series;
}
/* Compute position of data entry in the results list,
and stride corresponding to plot x-axis parameter */
var stride = 1;
var param_stride = 0;
var param_idx = 0;
for (var k = params.length - 1; k >= 0; --k) {
if (k == x_axis - 1) {
param_stride = stride;
}
else {
param_idx += other_indices[k + 1] * stride;
}
stride *= params[k].length;
}
if (x_axis == 0) {
/* x-axis is time axis */
var series = new Array(raw_series.length);
for (var k = 0; k < raw_series.length; ++k) {
if (raw_series[k][1] === null) {
series[k] = [raw_series[k][0], null];
} else {
series[k] = [raw_series[k][0],
raw_series[k][1][param_idx]];
}
}
return series;
}
else {
/* x-axis is some parameter axis */
var time_idx = null;
if (other_indices[0] === null) {
time_idx = raw_series.length - 1;
}
else {
/* Need to search for the correct time value */
for (var k = 0; k < raw_series.length; ++k) {
if (raw_series[k][0] == other_indices[0]) {
time_idx = k;
break;
}
}
if (time_idx === null) {
/* No data points */
return [];
}
}
var x_values = params[x_axis - 1];
var series = new Array(x_values.length);
for (var k = 0; k < x_values.length; ++k) {
if (raw_series[time_idx][1] === null) {
series[k] = [convert_benchmark_param_value(x_values[k]),
null];
}
else {
series[k] = [convert_benchmark_param_value(x_values[k]),
raw_series[time_idx][1][param_idx]];
}
param_idx += param_stride;
}
return series;
}
}
function filter_graph_data_idx(raw_series, x_axis, flat_idx, params) {
var selection = param_selection_from_flat_idx(params, flat_idx);
var flat_selection = [];
$.each(selection, function(i, v) {
flat_selection.push(v[0]);
});
return filter_graph_data(raw_series, x_axis, flat_selection, params);
}
/* Escape special characters in graph item file names.
The implementation must match asv.util.sanitize_filename */
function sanitize_filename(name) {
var bad_re = /[<>:"\/\\^|?*\x00-\x1f]/g;
var bad_names = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3",
"COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
"LPT9"];
name = name.replace(bad_re, "_");
if (bad_names.indexOf(name.toUpperCase()) != -1) {
name = name + "_";
}
return name;
}
/* Given a specific group of parameters, generate the URL to
use to load that graph.
The implementation must match asv.graph.Graph.get_file_path
*/
function graph_to_path(benchmark_name, state) {
var parts = [];
$.each(state, function(key, value) {
var part;
if (value === null) {
part = key + "-null";
} else if (value) {
part = key + "-" + value;
} else {
part = key;
}
parts.push(sanitize_filename('' + part));
});
parts.sort();
parts.splice(0, 0, "graphs");
parts.push(sanitize_filename(benchmark_name));
/* Escape URI components */
parts = $.map(parts, function (val) { return encodeURIComponent(val); });
return parts.join('/') + ".json";
}
/*
Load and cache graph data (on javascript side)
*/
function load_graph_data(url, success, failure) {
var dfd = $.Deferred();
if (graph_cache[url]) {
setTimeout(function() {
dfd.resolve(graph_cache[url]);
}, 1);
}
else {
$.ajax({
url: url + '?timestamp=' + $.asv.main_timestamp,
dataType: "json",
cache: true
}).done(function(data) {
if (Object.keys(graph_cache).length > graph_cache_max_size) {
$.each(Object.keys(graph_cache), function (i, key) {
delete graph_cache[key];
});
}
graph_cache[url] = data;
dfd.resolve(data);
}).fail(function() {
dfd.reject();
});
}
return dfd.promise();
}
/*
Parse hash string, assuming format similar to standard URL
query strings
*/
function parse_hash_string(str) {
var info = {location: [''], params: {}};
if (str && str[0] == '#') {
str = str.slice(1);
}
if (str && str[0] == '/') {
str = str.slice(1);
}
var match = str.match(/^([^?]*?)\?/);
if (match) {
info['location'] = decodeURIComponent(match[1]).replace(/\/+/, '/').split('/');
var rest = str.slice(match[1].length+1);
var parts = rest.split('&');
for (var i = 0; i < parts.length; ++i) {
var part = parts[i].split('=');
if (part.length != 2) {
continue;
}
var key = decodeURIComponent(part[0].replace(/\+/g, " "));
var value = decodeURIComponent(part[1].replace(/\+/g, " "));
if (value == '[none]') {
value = null;
}
if (info['params'][key] === undefined) {
info['params'][key] = [value];
}
else {
info['params'][key].push(value);
}
}
}
else {
info['location'] = decodeURIComponent(str).replace(/\/+/, '/').split('/');
}
return info;
}
/*
Generate a hash string, inverse of parse_hash_string
*/
function format_hash_string(info) {
var parts = info['params'];
var str = '#' + info['location'];
if (parts) {
str = str + '?';
var first = true;
$.each(parts, function (key, values) {
$.each(values, function (idx, value) {
if (!first) {
str = str + '&';
}
if (value === null) {
value = '[none]';
}
str = str + encodeURIComponent(key) + '=' + encodeURIComponent(value);
first = false;
});
});
}
return str;
}
/*
Dealing with sub-pages
*/
function show_page(name, params) {
if (loaded_pages[name] !== undefined) {
$("#nav ul li.active").removeClass('active');
$("#nav-li-" + name).addClass('active');
$("#graph-display").hide();
$("#summarygrid-display").hide();
$("#summarylist-display").hide();
$('#regressions-display').hide();
$('.tooltip').remove();
loaded_pages[name](params);
return true;
}
else {
return false;
}
}
function hashchange() {
var info = parse_hash_string(window.location.hash);
/* Keep track of window scroll position; makes the back-button work */
var old_scroll_pos = window_scroll_positions[info.location.join('/')];
window_scroll_positions[window_last_location] = $(window).scrollTop();
window_last_location = info.location.join('/');
/* Redirect to correct handler */
if (show_page(info.location, info.params)) {
/* show_page does the work */
}
else {
/* Display benchmark page */
info.params['benchmark'] = info.location[0];
show_page('graphdisplay', info.params);
}
/* Scroll back to previous position, if any */
if (old_scroll_pos !== undefined) {
$(window).scrollTop(old_scroll_pos);
}
}
function get_commit_hash(revision) {
var commit_hash = main_json.revision_to_hash[revision];
if (commit_hash) {
// Return printable commit hash
commit_hash = commit_hash.slice(0, main_json.hash_length);
}
return commit_hash;
}
function get_revision(commit_hash) {
var rev = null;
$.each(main_json.revision_to_hash, function(revision, full_commit_hash) {
if (full_commit_hash.startsWith(commit_hash)) {
rev = revision;
// break the $.each loop
return false;
}
});
return rev;
}
function init_index() {
/* Fetch the main index.json and then set up the page elements
based on it. */
$.ajax({
url: "index.json" + '?timestamp=' + $.asv.main_timestamp,
dataType: "json",
cache: true
}).done(function (index) {
main_json = index;
$.asv.main_json = index;
/* Page title */
var project_name = $("#project-name")[0];
project_name.textContent = index.project;
project_name.setAttribute("href", index.project_url);
$("#project-name").textContent = index.project;
document.title = "airspeed velocity of an unladen " + index.project;
$(window).on('hashchange', hashchange);
$('#graph-display').hide();
$('#regressions-display').hide();
$('#summarygrid-display').hide();
$('#summarylist-display').hide();
hashchange();
}).fail(function () {
$.asv.ui.network_error();
});
}
function init() {
/* Fetch the info.json */
$.ajax({
url: "info.json",
dataType: "json",
cache: false
}).done(function (info) {
main_timestamp = info['timestamp'];
$.asv.main_timestamp = main_timestamp;
init_index();
}).fail(function () {
$.asv.ui.network_error();
});
}
/*
Set up $.asv
*/
this.register_page = function(name, show_function) {
loaded_pages[name] = show_function;
}
this.parse_hash_string = parse_hash_string;
this.format_hash_string = format_hash_string;
this.filter_graph_data = filter_graph_data;
this.filter_graph_data_idx = filter_graph_data_idx;
this.convert_benchmark_param_value = convert_benchmark_param_value;
this.param_selection_from_flat_idx = param_selection_from_flat_idx;
this.graph_to_path = graph_to_path;
this.load_graph_data = load_graph_data;
this.get_commit_hash = get_commit_hash;
this.get_revision = get_revision;
this.main_timestamp = main_timestamp; /* Updated after info.json loads */
this.main_json = main_json; /* Updated after index.json loads */
this.format_date_yyyymmdd = format_date_yyyymmdd;
this.format_date_yyyymmdd_hhmm = format_date_yyyymmdd_hhmm;
this.pretty_unit = pretty_unit;
this.time_units = time_units;
this.mem_units = mem_units;
this.colors = colors;
$.asv = this;
/*
Launch it
*/
init();
});

231
docs/benchmarks/asv_ui.js Normal file
View file

@ -0,0 +1,231 @@
'use strict';
$(document).ready(function() {
function make_panel(nav, heading) {
var panel = $('<div class="panel panel-default"/>');
nav.append(panel);
var panel_header = $(
'<div class="panel-heading">' + heading + '</div>');
panel.append(panel_header);
var panel_body = $('<div class="panel-body"/>');
panel.append(panel_body);
return panel_body;
}
function make_value_selector_panel(nav, heading, values, setup_callback) {
var panel_body = make_panel(nav, heading);
var vertical = false;
var buttons = $('<div class="btn-group" ' +
'data-toggle="buttons"/>');
panel_body.append(buttons);
$.each(values, function (idx, value) {
var button = $(
'<a class="btn btn-default btn-xs active" role="button"/>');
setup_callback(idx, value, button);
buttons.append(button);
});
return panel_body;
}
function reflow_value_selector_panels(no_timeout) {
$('.panel').each(function (i, panel_obj) {
var panel = $(panel_obj);
panel.find('.btn-group').each(function (i, buttons_obj) {
var buttons = $(buttons_obj);
var width = 0;
if (buttons.hasClass('reflow-done')) {
/* already processed */
return;
}
$.each(buttons.children(), function(idx, value) {
width += value.scrollWidth;
});
var max_width = panel_obj.clientWidth;
if (width >= max_width) {
buttons.addClass("btn-group-vertical");
buttons.css("width", "100%");
buttons.css("max-height", "20ex");
buttons.css("overflow-y", "auto");
}
else {
buttons.addClass("btn-group-justified");
}
/* The widths can be zero if the UI is not fully layouted yet,
so mark the adjustment complete only if this is not the case */
if (width > 0 && max_width > 0) {
buttons.addClass("reflow-done");
}
});
});
if (!no_timeout) {
/* Call again asynchronously, in case the UI was not fully layouted yet */
setTimeout(function() { $.asv.ui.reflow_value_selector_panels(true); }, 0);
}
}
function network_error(ajax, status, error) {
$("#error-message").text(
"Error fetching content. " +
"Perhaps web server has gone down.");
$("#error").modal('show');
}
function hover_graph(element, graph_url, benchmark_basename, parameter_idx, revisions) {
/* Show the summary graph as a popup */
var plot_div = $('<div/>');
plot_div.css('width', '11.8em');
plot_div.css('height', '7em');
plot_div.css('border', '2px solid black');
plot_div.css('background-color', 'white');
function update_plot() {
var markings = [];
if (revisions) {
$.each(revisions, function(i, revs) {
var rev_a = revs[0];
var rev_b = revs[1];
if (rev_a !== null) {
markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_a, to: rev_a }});
markings.push({ color: "rgba(255,0,0,0.1)", xaxis: { from: rev_a, to: rev_b }});
}
markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_b, to: rev_b }});
});
}
$.asv.load_graph_data(
graph_url
).done(function (data) {
var params = $.asv.main_json.benchmarks[benchmark_basename].params;
data = $.asv.filter_graph_data_idx(data, 0, parameter_idx, params);
var options = {
colors: ['#000'],
series: {
lines: {
show: true,
lineWidth: 2
},
shadowSize: 0
},
grid: {
borderWidth: 1,
margin: 0,
labelMargin: 0,
axisMargin: 0,
minBorderMargin: 0,
markings: markings,
},
xaxis: {
ticks: [],
},
yaxis: {
ticks: [],
min: 0
},
legend: {
show: false
}
};
var plot = $.plot(plot_div, [{data: data}], options);
}).fail(function () {
// TODO: Handle failure
});
return plot_div;
}
element.popover({
placement: 'left auto',
trigger: 'hover',
html: true,
delay: 50,
content: $('<div/>').append(plot_div)
});
element.on('show.bs.popover', update_plot);
}
function hover_summary_graph(element, benchmark_basename) {
/* Show the summary graph as a popup */
var plot_div = $('<div/>');
plot_div.css('width', '11.8em');
plot_div.css('height', '7em');
plot_div.css('border', '2px solid black');
plot_div.css('background-color', 'white');
function update_plot() {
var markings = [];
$.asv.load_graph_data(
'graphs/summary/' + benchmark_basename + '.json'
).done(function (data) {
var options = {
colors: $.asv.colors,
series: {
lines: {
show: true,
lineWidth: 2
},
shadowSize: 0
},
grid: {
borderWidth: 1,
margin: 0,
labelMargin: 0,
axisMargin: 0,
minBorderMargin: 0,
markings: markings,
},
xaxis: {
ticks: [],
},
yaxis: {
ticks: [],
min: 0
},
legend: {
show: false
}
};
var plot = $.plot(plot_div, [{data: data}], options);
}).fail(function () {
// TODO: Handle failure
});
return plot_div;
}
element.popover({
placement: 'left auto',
trigger: 'hover',
html: true,
delay: 50,
content: $('<div/>').append(plot_div)
});
element.on('show.bs.popover', update_plot);
}
/*
Set up $.asv.ui
*/
this.network_error = network_error;
this.make_panel = make_panel;
this.make_value_selector_panel = make_value_selector_panel;
this.reflow_value_selector_panels = reflow_value_selector_panels;
this.hover_graph = hover_graph;
this.hover_summary_graph = hover_summary_graph;
$.asv.ui = this;
});

View file

@ -0,0 +1,23 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>airspeed velocity error</title>
<link rel="shortcut icon" href="swallow.ico"/>
</head>
<body>
<h1>
<img src="swallow.png" width="22" height="22" alt="swallow"></img>
Can not determine continental origin of swallow.
</h1>
<h3>
One or more external (JavaScript) dependencies of airspeed velocity failed to load.
</h3>
<p>
Make sure you have an active internet connection and enable 3rd-party scripts
in your browser the first time you load airspeed velocity.
</p>
</body>
</html>

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
[[1566, [52121600.0, 54030336.0]], [1569, [52006912.0, 54030336.0]]]

View file

@ -0,0 +1 @@
[[1566, [52129792.0, 54525952.0]], [1569, [52019200.0, 54542336.0]]]

View file

@ -0,0 +1 @@
[[1566, [43671552.0, 43925504.0]], [1569, [43667456.0, 43921408.0]]]

View file

@ -0,0 +1 @@
[[1566, [43675648.0, 43929600.0]], [1569, [43671552.0, 43925504.0]]]

View file

@ -0,0 +1 @@
[[1566, [0.06844023999997262, 0.25948228500001846]], [1569, [0.0695505670000216, 0.2585547080000197]]]

View file

@ -0,0 +1 @@
[[1566, [0.03282115399997565, 0.1461098879999838]], [1569, [0.03280580699998836, 0.14556000400000357]]]

View file

@ -0,0 +1 @@
[[1566, [0.0035044859999970868, 0.004806205000022601]], [1569, [0.003512746000012612, 0.004752005000000281]]]

View file

@ -0,0 +1 @@
[[1566, [9.84330000051159e-05, 0.000542360999986613]], [1569, [9.899399998403169e-05, 0.0005454930000041713]]]

View file

@ -0,0 +1 @@
[[1566, [0.21055065100000547, 0.20605139500003133]], [1569, [0.20886826499997824, 0.20461195600000792]]]

View file

@ -0,0 +1 @@
[[1566, 0.19086866300000338], [1569, 0.1891247499999622]]

View file

@ -0,0 +1 @@
[[1566, [54018048.0, 53542912.0]], [1569, [53940224.0, 53530624.0]]]

View file

@ -0,0 +1 @@
[[1566, [54571008.0, 54530048.0]], [1569, [54566912.0, 54525952.0]]]

View file

@ -0,0 +1 @@
[[1566, [43925504.0, 43925504.0]], [1569, [43921408.0, 43687936.0]]]

View file

@ -0,0 +1 @@
[[1566, [43929600.0, 43929600.0]], [1569, [43925504.0, 43925504.0]]]

View file

@ -0,0 +1 @@
[[1566, [0.2582084590000022, 0.2630794039999955]], [1569, [0.2609166309999864, 0.26201485799998636]]]

View file

@ -0,0 +1 @@
[[1566, [0.14612163999998984, 0.15001642000004267]], [1569, [0.14627813999999262, 0.15037803699999586]]]

View file

@ -0,0 +1 @@
[[1566, [0.004762013999993542, 0.004786298999988503]], [1569, [0.004790214000024662, 0.004819428000018888]]]

View file

@ -0,0 +1 @@
[[1566, [0.0005448759999922004, 0.0005334049999987656]], [1569, [0.0005436999999801628, 0.0005416959999706705]]]

View file

@ -0,0 +1 @@
[[1566, [0.2058522649999759, 0.2053180329999691]], [1569, [0.20448447999996233, 0.2037191150000126]]]

View file

@ -0,0 +1 @@
[[1566, 53067386.97974115], [1569, 53008970.27562818]]

View file

@ -0,0 +1 @@
[[1566, 53314412.08868371], [1569, 53265830.36855053]]

View file

@ -0,0 +1 @@
[[1566, 43798343.942005485], [1569, 43794247.92479085]]

View file

@ -0,0 +1 @@
[[1566, 43802439.95921688], [1569, 43798343.942005485]]

View file

@ -0,0 +1 @@
[[1566, 0.13326301010086242], [1569, 0.13409931596367813]]

View file

@ -0,0 +1 @@
[[1566, 0.06924951360815947], [1569, 0.06910291888293614]]

View file

@ -0,0 +1 @@
[[1566, 0.0041040563026955666], [1569, 0.0040856561964745545]]

View file

@ -0,0 +1 @@
[[1566, 0.00023105458297652734], [1569, 0.00023238014982717938]]

View file

@ -0,0 +1 @@
[[1566, 0.20828887477903343], [1569, 0.20672915674372966]]

View file

@ -0,0 +1 @@
[[1566, 0.19086866300000338], [1569, 0.1891247499999622]]

View file

@ -0,0 +1 @@
[[1566, 53779955.28517828], [1569, 53735033.72493383]]

View file

@ -0,0 +1 @@
[[1566, 54550524.155578785], [1569, 54546428.1552901]]

View file

@ -0,0 +1 @@
[[1566, 43925504.00000001], [1569, 43804516.45360199]]

View file

@ -0,0 +1 @@
[[1566, 43929599.99999999], [1569, 43925504.00000001]]

View file

@ -0,0 +1 @@
[[1566, 0.2606325526511949], [1569, 0.26146516789296487]]

View file

@ -0,0 +1 @@
[[1566, 0.14805622350084952], [1569, 0.1483139223040422]]

View file

@ -0,0 +1 @@
[[1566, 0.004774141058462804], [1569, 0.004804798796807347]]

View file

@ -0,0 +1 @@
[[1566, 0.0005391099913516415], [1569, 0.0005426970749629188]]

View file

@ -0,0 +1 @@
[[1566, 0.20558497546849924], [1569, 0.20410143874267547]]

161
docs/benchmarks/index.html Normal file
View file

@ -0,0 +1,161 @@
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>airspeed velocity</title>
<script type="text/javascript">
function js_load_failure() {
window.location = "error.html";
}
</script>
<script src="https://code.jquery.com/jquery-3.3.1.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.min.js" integrity="sha512-eO1AKNIv7KSFl5n81oHCKnYLMi8UV4wWD1TcLYKNTssoECDuiGhoRsQkdiZkl8VUjoms2SeJY7zTSw5noGSqbQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.time.min.js" integrity="sha512-lcRowrkiQvFli9HkuJ2Yr58iEwAtzhFNJ1Galsko4SJDhcZfUub8UxGlMQIsMvARiTqx2pm7g6COxJozihOixA==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.selection.min.js" integrity="sha512-3EUG0t3qfbLaGN3FXO86i+57nvxHOXvIb/xMSKRrCuX/HXdn1bkbqwAeLd6U1PDmuEB2cnKhfM+SGLAVQbyjWQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.categories.min.js" integrity="sha512-x4QGSZkQ57pNuICMFFevIhDer5NVB5eJCRmENlCdJukMs8xWFH8OHfzWQVSkl9VQ4+4upPPTkHSAewR6KNMjGA==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.axislabels.js"></script>
<script src="https://cdn.jsdelivr.net/npm/flot-orderbars@1.0.0/js/jquery.flot.orderBars.js" integrity="sha256-OXNbT0b5b/TgglckAfR8VaJ2ezZv0dHoIeRKjYMKEr8=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/stupidtable/1.0.1/stupidtable.min.js" integrity="sha512-GM3Ds3dUrgkpKVXc+4RxKbQDoeTemdlzXxn5d/QCOJT6EFdEufu1UTVBpIFDLd6YjIhSThNe+zpo1mwqzNq4GQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/blueimp-md5/2.19.0/js/md5.min.js" integrity="sha512-8pbzenDolL1l5OPSsoURCx9TEdMFTaeFipASVrMYKhuYtly+k3tcsQYliOEKTmuB1t7yuzAiVo+yd7SJz+ijFQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@3.1.1/dist/js/bootstrap.min.js" integrity="sha256-iY0FoX8s/FEg3c26R6iFw3jAtGbzDwcA5QJ1fiS0A6E=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@3.1.1/dist/css/bootstrap.min.css" integrity="sha256-6VA0SGkrc43SYPvX98q/LhHwm2APqX5us6Vuulsafps=" crossorigin="anonymous">
<script language="javascript" type="text/javascript"
src="asv.js">
</script>
<script language="javascript" type="text/javascript"
src="asv_ui.js">
</script>
<script language="javascript" type="text/javascript"
src="summarygrid.js">
</script>
<script language="javascript" type="text/javascript"
src="summarylist.js">
</script>
<script language="javascript" type="text/javascript"
src="graphdisplay.js">
</script>
<script language="javascript" type="text/javascript"
src="regressions.js">
</script>
<link href="asv.css" rel="stylesheet" type="text/css"/>
<link href="regressions.css" rel="stylesheet" type="text/css"/>
<link href="summarylist.css" rel="stylesheet" type="text/css"/>
<link rel="shortcut icon" href="swallow.ico"/>
<link rel="alternate" type="application/atom+xml" title="Regressions" href="regressions.xml"/>
</head>
<body>
<nav id="nav" class="navbar navbar-left navbar-default navbar-fixed-top" role="navigation">
<ul class="nav navbar-nav navbar-left">
<li>
<p class="navbar-text">
<a href="http://github.com/airspeed-velocity/asv/" class="navbar-link" target="_blank"><img src="swallow.png" width="22" height="22" alt="swallow"></img>airspeed velocity</a>
of an unladen
<a id="project-name" href="#" class="navbar-link" target="_blank">project</a>
</p>
</li>
<li id="nav-li-" class="active"><a href="#/">Benchmark grid</a></li>
<li id="nav-li-summarylist"><a href="#/summarylist">Benchmark list</a></li>
<li id="nav-li-regressions"><a href="#/regressions">Regressions</a></li>
<li id="nav-li-graphdisplay">
<span class="navbar-brand" id="title">
benchmark
</span>
</li>
</ul>
</nav>
<div id="summarygrid-display" style="position: absolute; left: 0; top: 55px; width: 100%; height: 100%">
</div>
<div id="summarylist-display" style="width: 100%; height: 100%">
<div id="summarylist-navigation" class="asv-navigation" style="position: absolute; left: 0; top: 55px; bottom: 0; width: 200px; overflow-y: scroll">
</div>
<div id="summarylist-body" style="position: absolute; left: 200px; top: 55px; bottom: 0px; right: 0px; overflow-y: scroll;">
</div>
</div>
<div id="graph-display" style="width: 100%; height: 100%;">
<div id="graphdisplay-navigation" class="asv-navigation" style="position: absolute; left: 0; top: 55px; bottom: 0; width: 200px; overflow-y: scroll">
<div class="panel panel-default">
<div class="panel-heading">
commits
</div>
<div class="panel-body">
<input id="range" type="text" class="form-control" size="24" readonly/>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading">
plot settings
</div>
<div class="panel-body">
<div class="btn-group-vertical" style="width: 100%" data-toggle="buttons">
<a id="log-scale" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Use a logarithmic scale on the y-axis">
log scale
</a>
<a id="zoom-y-axis" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Zoom y axis to the range of the data, rather than down to zero.">
zoom <i>y</i> axis
</a>
<a id="reference" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Select a reference point">
reference
</a>
<a id="even-spacing" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Space commits evenly, rather than by revision, along the x-axis">
even commit spacing
</a>
<a id="date-scale" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Space commits by commit date along the x-axis">
date scale
</a>
<a id="show-legend" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Show legend in the graph">
legend
</a>
</div>
</div>
</div>
</div>
<div style="position: absolute; left: 220px; top: 60px; bottom: 10px; right: 20px;">
<div id="graph">
<div style="position: absolute; top: 48px; left: 0; right: 0; bottom: 100px;">
<div id="main-graph" style="min-height: 100px; width: 100%; height: 100%"></div>
</div>
<div style="position: absolute; height: 100px; left: 0; right: 0; bottom: 0; padding-top: 24px">
<div id="overview" style="min-height: 100px; width: 100%; height: 100%"></div>
</div>
</div>
</div>
</div>
<div id="regressions-display" style="position: absolute; left: 0; top: 55px; width: 100%; height: 100%">
<div id="regressions-body">
</div>
</div>
<!-- A modal dialog box for displaying error messages -->
<div class="modal fade" id="error" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title alert alert-danger" id="myModalLabel">Error</h4>
</div>
<div class="modal-body" id="error-message">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,4 @@
{
"asv-version": "0.6.4",
"timestamp": 1740214032970
}

View file

@ -0,0 +1,140 @@
/*
CAxis Labels Plugin for flot. :P
Copyright (c) 2010 Xuan Luo
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
(function ($) {
var options = { };
function init(plot) {
// This is kind of a hack. There are no hooks in Flot between
// the creation and measuring of the ticks (setTicks, measureTickLabels
// in setupGrid() ) and the drawing of the ticks and plot box
// (insertAxisLabels in setupGrid() ).
//
// Therefore, we use a trick where we run the draw routine twice:
// the first time to get the tick measurements, so that we can change
// them, and then have it draw it again.
var secondPass = false;
plot.hooks.draw.push(function (plot, ctx) {
if (!secondPass) {
// MEASURE AND SET OPTIONS
$.each(plot.getAxes(), function(axisName, axis) {
var opts = axis.options // Flot 0.7
|| plot.getOptions()[axisName]; // Flot 0.6
if (!opts || !opts.axisLabel)
return;
var w, h;
if (opts.axisLabelUseCanvas != false)
opts.axisLabelUseCanvas = true;
if (opts.axisLabelUseCanvas) {
// canvas text
if (!opts.axisLabelFontSizePixels)
opts.axisLabelFontSizePixels = 14;
if (!opts.axisLabelFontFamily)
opts.axisLabelFontFamily = 'sans-serif';
// since we currently always display x as horiz.
// and y as vertical, we only care about the height
w = opts.axisLabelFontSizePixels;
h = opts.axisLabelFontSizePixels;
} else {
// HTML text
var elem = $('<div class="axisLabels" style="position:absolute;">' + opts.axisLabel + '</div>');
plot.getPlaceholder().append(elem);
w = elem.outerWidth(true);
h = elem.outerHeight(true);
elem.remove();
}
if (axisName.charAt(0) == 'x')
axis.labelHeight += h;
else
axis.labelWidth += w;
opts.labelHeight = axis.labelHeight;
opts.labelWidth = axis.labelWidth;
});
// re-draw with new label widths and heights
secondPass = true;
plot.setupGrid();
plot.draw();
} else {
// DRAW
$.each(plot.getAxes(), function(axisName, axis) {
var opts = axis.options // Flot 0.7
|| plot.getOptions()[axisName]; // Flot 0.6
if (!opts || !opts.axisLabel)
return;
if (opts.axisLabelUseCanvas) {
// canvas text
var ctx = plot.getCanvas().getContext('2d');
ctx.save();
ctx.font = opts.axisLabelFontSizePixels + 'px ' +
opts.axisLabelFontFamily;
var width = ctx.measureText(opts.axisLabel).width;
var height = opts.axisLabelFontSizePixels;
var x, y;
if (axisName.charAt(0) == 'x') {
x = plot.getPlotOffset().left + plot.width()/2 - width/2;
y = plot.getCanvas().height;
} else {
x = height * 0.72;
y = plot.getPlotOffset().top + plot.height()/2 - width/2;
}
ctx.translate(x, y);
ctx.rotate((axisName.charAt(0) == 'x') ? 0 : -Math.PI/2);
ctx.fillText(opts.axisLabel, 0, 0);
ctx.restore();
} else {
// HTML text
plot.getPlaceholder().find('#' + axisName + 'Label').remove();
var elem = $('<div id="' + axisName + 'Label" " class="axisLabels" style="position:absolute;">' + opts.axisLabel + '</div>');
if (axisName.charAt(0) == 'x') {
elem.css('left', plot.getPlotOffset().left + plot.width()/2 - elem.outerWidth()/2 + 'px');
elem.css('bottom', '0px');
} else {
elem.css('top', plot.getPlotOffset().top + plot.height()/2 - elem.outerHeight()/2 + 'px');
elem.css('left', '0px');
}
plot.getPlaceholder().append(elem);
}
});
secondPass = false;
}
});
}
$.plot.plugins.push({
init: init,
options: options,
name: 'axisLabels',
version: '1.0'
});
})(jQuery);

View file

@ -0,0 +1,44 @@
#regressions-body {
margin-left: 2em;
margin-right: 2em;
margin-top: 1em;
margin-bottom: 2em;
}
#regressions-body table thead th {
cursor: pointer;
white-space: nowrap;
}
#regressions-body table thead th.desc:after {
content: ' \2191';
}
#regressions-body table thead th.asc:after {
content: ' \2193';
}
#regressions-body table.ignored {
padding-top: 1em;
color: #ccc;
background-color: #eee;
}
#regressions-body table.ignored a {
color: #82abda;
}
#regressions-body .feed-div {
float: right;
}
#regressions-body table tbody td.date {
white-space: nowrap;
}
#regressions-body table button {
margin-top: -2px;
padding-top: 2px;
padding-bottom: 0px;
white-space: nowrap;
}

View file

@ -0,0 +1,618 @@
'use strict';
$(document).ready(function() {
/* Cached contents of downloaded regressions.json */
var regression_data = null;
/* Current page title */
var current_title = "All regressions";
/* Whether HTML5 local storage is available */
var local_storage_available = false;
/* Key prefix for ignored regressions. For each ignored regression,
a key "ignore_key_prefix + md5(benchmark_name + date_a + date_b)"
is added to HTML5 local storage.
*/
var ignore_key_prefix = null;
/* Set of ignored regressions, same information as in HTML5 local storage.
Useful if local storage runs out of space. */
var ignored_regressions = {};
/* Whether to force reload on next page update */
var skip_reload = false;
function load_data(params) {
$("#title").text(current_title);
if (typeof(Storage) !== "undefined") {
/* html5 local storage available */
local_storage_available = true;
}
if (regression_data !== null) {
// already displayed
if (!skip_reload) {
var main_div = display_data(regression_data, params);
$('#regressions-body').empty();
$('#regressions-body').append(main_div);
}
skip_reload = false;
}
else {
var message = $('<div>Loading...</div>');
skip_reload = false;
$('#regressions-body').append(message);
$.ajax({
url: 'regressions.json' + '?timestamp=' + $.asv.main_timestamp,
dataType: "json",
cache: true
}).done(function (data) {
regression_data = data;
var main_div = display_data(data, params);
$('#regressions-body').empty();
$('#regressions-body').append(main_div);
});
}
}
function update_url(params, reload) {
var info = $.asv.parse_hash_string(window.location.hash);
$.each(params || {}, function(key, value) {
info.params[key] = value;
});
var new_hash = $.asv.format_hash_string(info);
if (new_hash != window.location.hash) {
if (reload === undefined) {
skip_reload = false;
}
else {
skip_reload = !reload;
}
window.location.hash = new_hash;
}
else {
skip_reload = false;
}
}
function display_data(data, params) {
var main_div = $('<div/>');
var branches = $.asv.main_json.params['branch'];
var all_ignored_keys = {};
ignore_key_prefix = 'asv-r-' + $.asv.main_json.project;
if (branches && branches.length > 1) {
/* Add a branch selector */
var dropdown_menu = $('<ul class="dropdown-menu" role="menu"/>');
var dropdown_div = $('<div class="dropdown">');
dropdown_div.append($('<button class="btn btn-default dropdown-toggle" data-toggle="dropdown">Branches ' +
'<span class="caret"/></button>'));
dropdown_div.append(dropdown_menu);
main_div.append(dropdown_div);
}
var feed_div = $('<div class="feed-div"><a class="btn" href="regressions.xml">Feed (Atom)</a></div>');
main_div.append(feed_div);
var group_div = $('<div>');
var group_button = $('<button class="btn btn-small"/>');
if (params.grouped) {
group_button.text('Ungroup regressions');
group_button.on('click', function(evt) {
update_url({'grouped': []});
});
}
else {
group_button.text('Group regressions');
group_button.on('click', function(evt) {
update_url({'grouped': ["true"]});
});
}
group_div.append(group_button);
main_div.append(group_div);
$.each(branches, function(i, branch) {
var branch_div = $('<div class="regression-div"/>')
var display_table = $('<table class="table table-hover"/>');
var ignored_table = $('<table class="table table-hover ignored"/>');
var ignored_button = $('<button class="btn btn-default">Show ignored regressions...</button>');
var ignored_conf_sample_div = $('<div class="ignored"/>');
if (branches && branches.length > 1) {
var branch_link = $('<a/>')
branch_link.text(branch);
dropdown_menu.append($('<li role="presentation"/>').append(branch_link));
branch_link.on('click', function(evt) {
current_title = "Regressions in " + branch + " branch";
update_url({'branch': [branch]}, false);
$("#title").text(current_title);
$(".regression-div").hide();
$(".ignored").hide();
ignored_button.show();
$("#regression-div-" + i).show();
$("#regression-div-" + i + '-ignored').show();
});
}
else {
branch = null;
}
branch_div.attr('id', 'regression-div-' + i);
branch_div.hide();
main_div.append(branch_div);
if (params.grouped) {
create_grouped_data_table(display_table, ignored_table, ignored_conf_sample_div,
data, params, branch, all_ignored_keys);
}
else {
create_data_table(display_table, ignored_table, ignored_conf_sample_div,
data, params, branch, all_ignored_keys);
}
branch_div.append(display_table);
ignored_table.hide();
ignored_conf_sample_div.hide();
branch_div.append(ignored_table);
branch_div.append(ignored_conf_sample_div);
update_ignore_conf_sample(data, ignored_conf_sample_div, branch);
branch_div.append(ignored_button);
ignored_button.on('click', function(evt) {
ignored_button.hide();
$(".ignored").show();
});
});
var branch_index = 0;
if (branches && branches.length > 1) {
if (params.branch) {
branch_index = branches.indexOf(params.branch[0]);
if (branch_index < 0) {
branch_index = 0;
}
}
current_title = "Regressions in " + branches[branch_index] + " branch";
}
$("#title").text(current_title);
main_div.find("#regression-div-" + branch_index).show();
main_div.show();
if (local_storage_available) {
/* Clear out local storage space */
var keys = Object.keys(localStorage);
$.each(keys, function(i, key) {
if (key.slice(0, ignore_key_prefix.length) == ignore_key_prefix &&
!all_ignored_keys[key]) {
delete localStorage[key];
}
});
}
return main_div;
}
function create_data_table(display_table, ignored_table, ignored_conf_sample_div,
data, params, branch, all_ignored_keys) {
var table_head = $('<thead><tr>' +
'<th data-sort="string">Benchmark</th>' +
'<th data-sort="string">Date</th>' +
'<th data-sort="string">Commit</th>' +
'<th data-sort="factor">Factor</th>' +
'<th data-sort="value">Before</th>' +
'<th data-sort="value">Best after</th>' +
'<th></th>' +
'</tr></thead>');
display_table.append(table_head);
ignored_table.append(table_head.clone());
var table_body = $('<tbody/>');
var ignored_table_body = $('<tbody/>');
var regressions = data['regressions'];
$.each(regressions, function (i, item) {
var benchmark_name = item[0];
var graph_url = item[1];
var param_dict = item[2];
var parameter_idx = item[3];
var last_value = item[4];
var best_value = item[5];
var jumps = item[6]; // [[rev1, rev2, before, after], ...]
if (jumps === null) {
return;
}
if (branch !== null && param_dict['branch'] != branch) {
return;
}
var benchmark_basename = benchmark_name.replace(/\([\s\S]*/, '');
var benchmark = $.asv.main_json.benchmarks[benchmark_basename];
var url_params = {};
$.each(param_dict, function (key, value) {
url_params[key] = [value];
});
if (parameter_idx !== null) {
$.each($.asv.param_selection_from_flat_idx(benchmark.params, parameter_idx).slice(1), function(i, param_values) {
url_params['p-'+benchmark.param_names[i]] = [benchmark.params[i][param_values[0]]];
});
}
$.each(jumps, function(i, revs) {
var row = $('<tr/>');
var commit_a = $.asv.get_commit_hash(revs[0]);
var commit_b = $.asv.get_commit_hash(revs[1]);
var old_value = revs[2];
var new_value = revs[3];
var factor = new_value / old_value;
if (commit_a) {
url_params.commits = [commit_a + '-' + commit_b];
}
else {
url_params.commits = [commit_b];
}
var benchmark_url = $.asv.format_hash_string({
location: [benchmark_basename],
params: url_params
});
new_value = $.asv.pretty_unit(new_value, benchmark.unit);
old_value = $.asv.pretty_unit(old_value, benchmark.unit);
var benchmark_link = $('<a/>').attr('href', benchmark_url).text(benchmark_name);
row.append($('<td/>').append(benchmark_link));
var date_fmt = new Date($.asv.main_json.revision_to_date[revs[1]]);
row.append($('<td class="date"/>').text($.asv.format_date_yyyymmdd_hhmm(date_fmt)));
var commit_td = $('<td/>');
if (commit_a) {
if ($.asv.main_json.show_commit_url.match(/.*\/\/github.com\//)) {
var commit_url = ($.asv.main_json.show_commit_url + '../compare/'
+ commit_a + '...' + commit_b);
commit_td.append(
$('<a/>').attr('href', commit_url).text(commit_a + '..' + commit_b));
}
else {
commit_td.append($('<span/>').text(commit_a + '..' + commit_b));
}
}
else {
var commit_url = $.asv.main_json.show_commit_url + commit_b;
commit_td.append(
$('<a/>').attr('href', commit_url).text(commit_b));
}
row.append(commit_td);
row.append($('<td/>').text(factor.toFixed(2) + 'x'));
row.append($('<td/>').text(old_value));
row.append($('<td/>').text(new_value));
/* html5 local storage has limited size, so store hashes
rather than potentially long strings */
var ignore_key = get_ignore_key(item, revs);
all_ignored_keys[ignore_key] = 1;
var is_ignored = is_key_ignored(ignore_key);
var ignore_button = $('<button class="btn btn-small"/>');
row.attr('id', ignore_key);
ignore_button.on('click', function(evt) {
if (is_key_ignored(ignore_key)) {
set_key_ignore_status(ignore_key, false);
var item = ignored_table_body.find('#' + ignore_key).detach();
ignore_button.text('Ignore');
table_body.append(item);
}
else {
set_key_ignore_status(ignore_key, true);
var item = table_body.find('#' + ignore_key).detach();
ignore_button.text('Unignore');
ignored_table_body.append(item);
}
update_ignore_conf_sample(data, ignored_conf_sample_div, branch);
});
row.append($('<td/>').append(ignore_button));
if (!is_ignored) {
ignore_button.text('Ignore');
table_body.append(row);
}
else {
ignore_button.text('Unignore');
ignored_table_body.append(row);
}
/* Show a graph as a popup */
$.asv.ui.hover_graph(benchmark_link, graph_url, benchmark_basename, parameter_idx, [revs]);
});
});
display_table.append(table_body);
ignored_table.append(ignored_table_body);
setup_sort(params, display_table);
setup_sort(params, ignored_table);
}
function create_grouped_data_table(display_table, ignored_table, ignored_conf_sample_div,
data, params, branch, all_ignored_keys) {
var table_head = $('<thead><tr>' +
'<th data-sort="string">Benchmark</th>' +
'<th data-sort="string">Last date</th>' +
'<th data-sort="string">Commits</th>' +
'<th data-sort="factor">Factor</th>' +
'<th data-sort="value">Best</th>' +
'<th data-sort="value">Current</th>' +
'<th></th>' +
'</tr></thead>');
display_table.append(table_head);
ignored_table.append(table_head.clone());
var table_body = $('<tbody/>');
var regressions = data['regressions'];
$.each(regressions, function (i, item) {
var benchmark_name = item[0];
var graph_url = item[1];
var param_dict = item[2];
var parameter_idx = item[3];
var last_value = item[4];
var best_value = item[5];
var jumps = item[6]; // [[rev1, rev2, before, after], ...]
if (jumps === null) {
return;
}
if (branch !== null && param_dict['branch'] != branch) {
return;
}
var benchmark_basename = benchmark_name.replace(/\(.*/, '');
var benchmark = $.asv.main_json.benchmarks[benchmark_basename];
var url_params = {};
$.each(param_dict, function (key, value) {
url_params[key] = [value];
});
if (parameter_idx !== null) {
$.each($.asv.param_selection_from_flat_idx(benchmark.params, parameter_idx).slice(1), function(i, param_values) {
url_params['p-'+benchmark.param_names[i]] = [benchmark.params[i][param_values[0]]];
});
}
url_params.commits = [];
var commit_td = $('<td/>');
$.each(jumps, function(i, revs) {
var commit_a = $.asv.get_commit_hash(revs[0]);
var commit_b = $.asv.get_commit_hash(revs[1]);
if (commit_a) {
url_params.commits = url_params.commits.concat([commit_a + '-' + commit_b]);
}
else {
url_params.commits = url_params.commits.concat([commit_b]);
}
if (i > 0) {
commit_td.append($('<span>, </span>'));
}
if (commit_a) {
if ($.asv.main_json.show_commit_url.match(/.*\/\/github.com\//)) {
var commit_url = ($.asv.main_json.show_commit_url + '../compare/'
+ commit_a + '...' + commit_b);
commit_td.append(
$('<a/>').attr('href', commit_url).text(commit_a + '..' + commit_b));
}
else {
commit_td.append($('<span/>').text(commit_a + '..' + commit_b));
}
}
else {
var commit_url = $.asv.main_json.show_commit_url + commit_b;
commit_td.append(
$('<a/>').attr('href', commit_url).text(commit_b));
}
});
var row = $('<tr/>');
var benchmark_url = $.asv.format_hash_string({
location: [benchmark_basename],
params: url_params
});
var benchmark_link = $('<a/>').attr('href', benchmark_url).text(benchmark_name);
$.asv.ui.hover_graph(benchmark_link, graph_url, benchmark_basename, parameter_idx, jumps);
row.append($('<td/>').append(benchmark_link));
var date_td = $('<td class="date"/>');
var date_fmt = new Date($.asv.main_json.revision_to_date[jumps[jumps.length-1][1]]);
date_td.text($.asv.format_date_yyyymmdd_hhmm(date_fmt));
row.append(date_td);
row.append(commit_td);
var factor_td = $('<td/>');
row.append(factor_td);
var factor = last_value / best_value;
factor_td.text(factor.toFixed(2) + 'x');
var best_td = $('<td/>');
best_td.text($.asv.pretty_unit(best_value, benchmark.unit));
row.append(best_td);
var last_td = $('<td/>');
last_td.text($.asv.pretty_unit(last_value, benchmark.unit));
row.append(last_td);
table_body.append(row);
});
display_table.append(table_body);
setup_sort(params, display_table);
}
function get_ignore_key(item, revs) {
var benchmark_name = item[0];
var ignore_payload = benchmark_name;
if (revs[0] === null) {
ignore_payload = ignore_payload + ',';
}
else {
ignore_payload = (ignore_payload + ','
+ $.asv.main_json.revision_to_hash[revs[0]]);
}
ignore_payload = (ignore_payload + ','
+ $.asv.main_json.revision_to_hash[revs[1]]);
return ignore_key_prefix + md5(ignore_payload);
}
function is_key_ignored(ignore_key) {
if (local_storage_available) {
return (ignore_key in localStorage) || (ignore_key in ignored_regressions);
}
else {
return (ignore_key in ignored_regressions);
}
}
function set_key_ignore_status(ignore_key, is_ignored) {
if (is_ignored) {
if (local_storage_available) {
try {
localStorage[ignore_key] = 1;
} catch (err) {
/* Out of quota -- we're just going to ignore that */
}
}
ignored_regressions[ignore_key] = 1;
}
else {
if (local_storage_available) {
delete localStorage[ignore_key];
}
delete ignored_regressions[ignore_key];
}
}
function update_ignore_conf_sample(data, ignored_conf_sample_div, branch) {
var regressions = data['regressions'];
var entries = {};
var branch_suffix = "";
if (branch) {
branch_suffix = "@" + branch;
}
$.each(regressions, function (i, item) {
var param_dict = item[2];
if (branch !== null && param_dict['branch'] != branch) {
return;
}
$.each(item[6], function (i, revs) {
var ignore_key = get_ignore_key(item, revs);
if (is_key_ignored(ignore_key)) {
var benchmark_name = item[0];
var benchmark_name_re = (benchmark_name + branch_suffix).replace(/[.?*+^$[\]\\(){}|-]/g, "\\\\$&");
var commit = $.asv.get_commit_hash(revs[1]);
var entry = " \"^" + benchmark_name_re + "$\": \"" + commit + "\",\n";
entries[entry] = 1;
}
});
});
entries = Object.keys(entries);
entries.sort();
var text = "// asv.conf.json excerpt for ignoring the above permanently\n\n";
text += " \"regressions_first_commits\": {\n";
$.each(entries, function (i, entry) {
text += entry;
});
text += " }";
var pre = $('<pre/>');
pre.text(text);
ignored_conf_sample_div.empty();
ignored_conf_sample_div.append(pre);
}
function setup_sort(params, table) {
table.stupidtable({
'value': function(a, b) {
function key(s) {
for (var k = 0; k < $.asv.time_units.length; ++k) {
var entry = $.asv.time_units[k];
var m = s.match('^([0-9.]+)'+entry[0]+'$');
if (m) {
return parseFloat(m[1]) * entry[2] * 1e-30;
}
}
return 0;
}
return key(a) - key(b)
},
'factor': function(a, b) {
return parseFloat(a.replace(/x/, '')) - parseFloat(b.replace(/x/, ''));
}
});
table.on('aftertablesort', function (event, data) {
update_url({'sort': [data.column], 'dir': [data.direction]}, false);
/* Update appearance */
table.find('thead th').removeClass('asc');
table.find('thead th').removeClass('desc');
var th_to_sort = table.find("thead th").eq(parseInt(data.column));
if (th_to_sort) {
th_to_sort.addClass(data.direction);
}
});
if (params.sort && params.dir) {
var th_to_sort = table.find("thead th").eq(parseInt(params.sort[0]));
th_to_sort.stupidsort(params.dir[0]);
}
else {
var th_to_sort = table.find("thead th").eq(3);
th_to_sort.stupidsort("desc");
}
}
/*
Setup display hooks
*/
$.asv.register_page('regressions', function(params) {
$('#regressions-display').show()
load_data(params);
});
});

View file

@ -0,0 +1 @@
{"regressions": []}

View file

@ -0,0 +1,2 @@
<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"><id>tag:django-components.asv,1970-01-01:/cddbdcca8b398afd301fbfc73cc4d51103d4e3059c0e6b938d4c467ad3d1aa25</id><author><name>Airspeed Velocity</name></author><title xml:lang="en">django-components performance regressions</title><updated>2025-02-22T08:47:12Z</updated></feed>

View file

@ -0,0 +1,136 @@
'use strict';
$(document).ready(function() {
var summary_loaded = false;
/* Callback a function when an element comes in view */
function callback_in_view(element, func) {
function handler(evt) {
var visible = (
$('#summarygrid-display').css('display') != 'none' &&
(element.offset().top <= $(window).height() + $(window).scrollTop()) &&
(element.offset().top + element.height() >= $(window).scrollTop()));
if (visible) {
func();
$(window).off('scroll', handler);
}
}
$(window).on('scroll', handler);
}
function get_benchmarks_by_groups() {
var main_json = $.asv.main_json;
var groups = {};
$.each(main_json.benchmarks, function(bm_name, bm) {
var i = bm_name.indexOf('.');
var group = bm_name.slice(0, i);
var name = bm_name.slice(i + 1);
if (groups[group] === undefined) {
groups[group] = [];
}
groups[group].push(bm_name);
});
return groups;
}
function benchmark_container(bm) {
var container = $(
'<a class="btn benchmark-container" href="#' + bm.name +
'"/>');
var plot_div = $(
'<div id="summarygrid-' + bm.name + '" class="benchmark-plot"/>');
var display_name = bm.pretty_name || bm.name.slice(bm.name.indexOf('.') + 1);
var name = $('<div class="benchmark-text">' + display_name + '</div>');
name.tooltip({
title: bm.name,
html: true,
placement: 'top',
container: 'body',
animation: false
});
plot_div.tooltip({
title: bm.code,
html: true,
placement: 'bottom',
container: 'body',
animation: false
});
container.append(name);
container.append(plot_div);
callback_in_view(plot_div, function() {
$.asv.load_graph_data(
'graphs/summary/' + bm.name + '.json'
).done(function(data) {
var options = {
colors: $.asv.colors,
series: {
lines: {
show: true,
lineWidth: 2
},
shadowSize: 0
},
grid: {
borderWidth: 1,
margin: 0,
labelMargin: 0,
axisMargin: 0,
minBorderMargin: 0
},
xaxis: {
ticks: [],
},
yaxis: {
ticks: [],
min: 0
},
legend: {
show: false
}
};
var plot = $.plot(
plot_div, [{data: data}], options);
}).fail(function() {
// TODO: Handle failure
});
});
return container;
}
function make_summary() {
var summary_display = $('#summarygrid-display');
var main_json = $.asv.main_json;
var summary_container = $('<div/>');
if (summary_loaded) {
return;
}
$.each(get_benchmarks_by_groups(), function(group, benchmarks) {
var group_container = $('<div class="benchmark-group"/>')
group_container.attr('id', 'group-' + group)
group_container.append($('<h1>' + group + '</h1>'));
summary_display.append(group_container);
$.each(benchmarks, function(i, bm_name) {
var bm = $.asv.main_json.benchmarks[bm_name];
group_container.append(benchmark_container(bm));
});
});
summary_display.append(summary_container);
$(window).trigger('scroll');
summary_loaded = true;
}
$.asv.register_page('', function(params) {
$('#summarygrid-display').show();
$("#title").text("All benchmarks");
$('.tooltip').remove();
make_summary();
});
});

View file

@ -0,0 +1,50 @@
#summarylist-body {
padding-left: 2em;
padding-right: 2em;
padding-top: 1em;
padding-bottom: 2em;
}
#summarylist-body table thead th {
cursor: pointer;
white-space: nowrap;
}
#summarylist-body table thead th.desc:after {
content: ' \2191';
}
#summarylist-body table thead th.asc:after {
content: ' \2193';
}
#summarylist-body table.ignored {
padding-top: 1em;
color: #ccc;
background-color: #eee;
}
#summarylist-body table.ignored a {
color: #82abda;
}
#summarylist-body table tbody td.positive-change {
background-color: #fdd;
}
#summarylist-body table tbody td.negative-change {
background-color: #dfd;
}
#summarylist-body table tbody td.value {
white-space: nowrap;
}
#summarylist-body table tbody td.change a {
color: black;
white-space: nowrap;
}
#summarylist-body table tbody td.change-date {
white-space: nowrap;
}

View file

@ -0,0 +1,451 @@
'use strict';
$(document).ready(function() {
/* The state of the parameters in the sidebar. Dictionary mapping
strings to values determining the "enabled" configurations. */
var state = null;
/* Cache of constructed tables, {data_path: table_dom_id} */
var table_cache = {};
var table_cache_counter = 0;
function setup_display(state_selection) {
var new_state = setup_state(state_selection);
var same_state = (state !== null);
/* Avoid needless UI updates, e.g., on table sort */
if (same_state) {
$.each(state, function (key, value) {
if (value != new_state[key]) {
same_state = false;
}
});
}
if (!same_state) {
state = new_state;
replace_params_ui();
var filename = $.asv.graph_to_path('summary', state);
$("#summarylist-body table").hide();
$("#summarylist-body .message").remove();
if (table_cache[filename] !== undefined) {
$(table_cache[filename]).show();
}
else {
$("#summarylist-body").append($("<p class='message'>Loading...</p>"));
$.asv.load_graph_data(
filename
).done(function (data) {
var table = construct_benchmark_table(data);
var table_name = 'summarylist-table-' + table_cache_counter;
++table_cache_counter;
table.attr('id', table_name);
table_cache[filename] = '#' + table_name;
$("#summarylist-body .message").remove();
$("#summarylist-body").append(table);
table.show()
});
}
}
}
function update_state_url(key, value) {
var info = $.asv.parse_hash_string(window.location.hash);
var new_state = get_valid_state(state, key, value);
$.each($.asv.main_json.params, function(param, values) {
if (values.length > 1) {
info.params[param] = [new_state[param]];
}
else if (info.params[param]) {
delete info.params[param];
}
});
window.location.hash = $.asv.format_hash_string(info);
}
function obj_copy(obj) {
var newobj = {};
$.each(obj, function(key, val) {
newobj[key] = val;
});
return newobj;
}
function obj_diff(obj1, obj2) {
var count = 0;
$.each(obj1, function(key, val) {
if (obj2[key] != val) {
++count
}
});
return count;
}
function get_valid_state(tmp_state, wanted_key, wanted_value) {
/*
Get an available state with wanted_key having wanted_value,
preferably as a minor modification of tmp_state.
*/
var best_params = null;
var best_diff = 1e99;
var best_hit = false;
tmp_state = obj_copy(tmp_state);
if (wanted_key !== undefined) {
tmp_state[wanted_key] = wanted_value;
}
$.each($.asv.main_json.graph_param_list, function(idx, params) {
var diff = obj_diff(tmp_state, params);
var hit = (wanted_key === undefined || params[wanted_key] == wanted_value);
if ((!best_hit && hit) || (hit == best_hit && diff < best_diff)) {
best_params = params;
best_diff = diff;
best_hit = hit;
}
});
if (best_params === null) {
best_params = $.asv.main_json.graph_param_list[0];
}
return obj_copy(best_params);
}
function setup_state(state_selection) {
var index = $.asv.main_json;
var state = {};
state.machine = index.params.machine;
$.each(index.params, function(param, values) {
state[param] = values[0];
});
if (state_selection !== null) {
/* Select a specific generic parameter state */
$.each(index.params, function(param, values) {
if (state_selection[param]) {
state[param] = state_selection[param][0];
}
});
}
return get_valid_state(state);
}
function replace_params_ui() {
var index = $.asv.main_json;
var nav = $('#summarylist-navigation');
nav.empty();
/* Machine selection */
$.asv.ui.make_value_selector_panel(nav, 'machine', index.params.machine, function(i, machine, button) {
button.text(machine);
button.on('click', function(evt) {
update_state_url('machine', machine);
});
if (state.machine != machine) {
button.removeClass('active');
}
button.removeAttr('data-toggle');
/* Create tooltips for each machine */
var details = [];
$.each(index.machines[machine], function(key, val) {
details.push(key + ': ' + val);
});
details = details.join('<br/>');
button.tooltip({
title: details,
html: true,
placement: 'right',
container: 'body',
animation: false
});
});
/* Generic parameter selectors */
$.each(index.params, function(param, values) {
if (values.length > 1 && param != 'machine') {
$.asv.ui.make_value_selector_panel(nav, param, values, function(i, value, button) {
var value_display;
if (value === null)
value_display = '[none]';
else if (!value)
value_display = '[default]';
else
value_display = value;
button.text(value_display);
if (state[param] != value) {
button.removeClass('active');
}
button.on('click', function(evt) {
update_state_url(param, value);
});
});
}
});
$(nav).find(".btn-group").removeAttr("data-toggle");
$.asv.ui.reflow_value_selector_panels();
}
function construct_benchmark_table(data) {
var index = $.asv.main_json;
/* Form a new table */
var table = $('<table class="table table-hover"/>');
var table_head = $('<thead><tr>' +
'<th data-sort="string">Benchmark</th>' +
'<th data-sort="float">Value</th>' +
'<th data-sort="float">Recent change</th>' +
'<th data-sort="string">Changed at</th>' +
'</tr></thead>');
table.append(table_head);
var table_body = $('<tbody/>');
$.each(data, function(row_idx, row) {
var tr = $('<tr/>');
var name_td = $('<td/>');
var name = $('<a/>');
var benchmark_url_args = {};
var benchmark_full_url;
var benchmark_base_url;
/* Format benchmark url */
benchmark_url_args.location = [row.name];
benchmark_url_args.params = {};
$.each($.asv.main_json.params, function (key, values) {
if (values.length > 1) {
benchmark_url_args.params[key] = [state[key]];
}
});
benchmark_base_url = $.asv.format_hash_string(benchmark_url_args);
if (row.idx !== null) {
var benchmark = $.asv.main_json.benchmarks[row.name];
$.each($.asv.param_selection_from_flat_idx(benchmark.params, row.idx).slice(1),
function(i, param_values) {
benchmark_url_args.params['p-'+benchmark.param_names[i]]
= [benchmark.params[i][param_values[0]]];
});
}
benchmark_full_url = $.asv.format_hash_string(benchmark_url_args);
/* Benchmark name column */
var bm_link;
if (row.idx === null) {
bm_link = $('<a/>').attr('href', benchmark_base_url).text(row.pretty_name);
name_td.append(bm_link);
}
else {
var basename = row.pretty_name;
var args = null;
var m = row.pretty_name.match(/(.*)\(.*$/);
if (m) {
basename = m[1];
args = row.pretty_name.slice(basename.length);
}
bm_link = $('<a/>').attr('href', benchmark_base_url).text(basename);
name_td.append(bm_link);
if (args) {
var bm_idx_link;
var graph_url;
bm_idx_link = $('<a/>').attr('href', benchmark_full_url).text(' ' + args);
name_td.append(bm_idx_link);
graph_url = $.asv.graph_to_path(row.name, state);
$.asv.ui.hover_graph(bm_idx_link, graph_url, row.name, row.idx, null);
}
}
$.asv.ui.hover_summary_graph(bm_link, row.name);
/* Value column */
var value_td = $('<td class="value"/>');
if (row.last_value !== null) {
var value, err, err_str, sort_value;
var unit = $.asv.main_json.benchmarks[row.name].unit;
value = $.asv.pretty_unit(row.last_value, unit);
if (unit == "seconds") {
sort_value = row.last_value * 1e100;
}
else {
sort_value = row.last_value;
}
var value_span = $('<span/>').text(value);
err = 100*row.last_err/row.last_value;
if (err == err) {
err_str = " \u00b1 " + err.toFixed(0.1) + '%';
}
else {
err_str = "";
}
value_span.attr('data-toggle', 'tooltip');
value_span.attr('title', value + err_str);
value_td.append(value_span);
value_td.attr('data-sort-value', sort_value);
}
else {
value_td.attr('data-sort-value', -1e99);
}
/* Change percentage column */
var change_td = $('<td class="change"/>');
if (row.prev_value !== null) {
var text, change_str, change = 0, sort_value = 0;
var unit = $.asv.main_json.benchmarks[row.name].unit;
change_str = $.asv.pretty_unit(row.last_value - row.prev_value, unit);
if (!change_str.match(/^-/)) {
change_str = '+' + change_str;
}
if (row.prev_value != 0) {
change = 100 * (row.last_value / row.prev_value - 1);
text = change.toFixed(1) + '% (' + change_str + ')';
if (change > 0) {
text = '+' + text;
}
sort_value = change;
}
else {
text = ' (' + change_str + ')';
}
text = text.replace('-', '\u2212');
var change_commit_a = $.asv.main_json.revision_to_hash[row.change_rev[0]];
var change_commit_b = $.asv.main_json.revision_to_hash[row.change_rev[1]];
var change_q;
if (change_commit_a === undefined) {
change_q = '&commits=' + change_commit_b;
}
else {
change_q = '&commits=' + change_commit_a + '-' + change_commit_b;
}
var change_link = $('<a/>').attr('href', benchmark_full_url + change_q).text(text);
graph_url = $.asv.graph_to_path(row.name, state);
$.asv.ui.hover_graph(change_link, graph_url, row.name, row.idx, [row.change_rev]);
change_td.append(change_link);
if (change > 5) {
change_td.addClass('positive-change');
}
else if (change < -5) {
change_td.addClass('negative-change');
}
change_td.attr('data-sort-value', sort_value);
}
else {
change_td.attr('data-sort-value', 0);
}
/* Change date column */
var changed_at_td = $('<td class="change-date"/>');
if (row.change_rev !== null) {
var date = new Date($.asv.main_json.revision_to_date[row.change_rev[1]]);
var commit_1 = $.asv.get_commit_hash(row.change_rev[0]);
var commit_2 = $.asv.get_commit_hash(row.change_rev[1]);
var commit_a = $('<a/>');
var span = $('<span/>');
if (commit_1) {
var commit_url;
if ($.asv.main_json.show_commit_url.match(/.*\/\/github.com\//)) {
commit_url = ($.asv.main_json.show_commit_url + '../compare/'
+ commit_1 + '...' + commit_2);
}
else {
commit_url = $.asv.main_json.show_commit_url + commit_2;
}
commit_a.attr('href', commit_url);
commit_a.text(commit_1 + '...' + commit_2);
}
else {
commit_a.attr('href', $.asv.main_json.show_commit_url + commit_2);
commit_a.text(commit_2);
}
span.text($.asv.format_date_yyyymmdd(date) + ' ');
span.append(commit_a);
changed_at_td.append(span);
}
tr.append(name_td);
tr.append(value_td);
tr.append(change_td);
tr.append(changed_at_td);
table_body.append(tr);
});
table_body.find('[data-toggle="tooltip"]').tooltip();
/* Finalize */
table.append(table_body);
setup_sort(table);
return table;
}
function setup_sort(table) {
var info = $.asv.parse_hash_string(window.location.hash);
table.stupidtable();
table.on('aftertablesort', function (event, data) {
var info = $.asv.parse_hash_string(window.location.hash);
info.params['sort'] = [data.column];
info.params['dir'] = [data.direction];
window.location.hash = $.asv.format_hash_string(info);
/* Update appearance */
table.find('thead th').removeClass('asc');
table.find('thead th').removeClass('desc');
var th_to_sort = table.find("thead th").eq(parseInt(data.column));
if (th_to_sort) {
th_to_sort.addClass(data.direction);
}
});
if (info.params.sort && info.params.dir) {
var th_to_sort = table.find("thead th").eq(parseInt(info.params.sort[0]));
th_to_sort.stupidsort(info.params.dir[0]);
}
else {
var th_to_sort = table.find("thead th").eq(0);
th_to_sort.stupidsort("asc");
}
}
/*
* Entry point
*/
$.asv.register_page('summarylist', function(params) {
var state_selection = null;
if (Object.keys(params).length > 0) {
state_selection = params;
}
setup_display(state_selection);
$('#summarylist-display').show();
$("#title").text("List of benchmarks");
});
});

BIN
docs/benchmarks/swallow.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

BIN
docs/benchmarks/swallow.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 893 B

View file

@ -1,6 +1,6 @@
---
title: Code of Conduct
weight: 8
weight: 9
---
--8<-- "CODE_OF_CONDUCT.md"

View file

@ -1,6 +1,6 @@
---
title: License
weight: 9
weight: 10
---
--8<-- "LICENSE"

View file

@ -0,0 +1,8 @@
---
title: Performance
weight: 8
---
We track the performance of `django-components` using [ASV](https://asv.readthedocs.io/en/stable/).
See the [benchmarks dashboard](../../benchmarks).

View file

@ -128,7 +128,7 @@ dependencies = [
"pre-commit",
"black",
"mypy",
]
]
type = "pip-compile"
lock-filename = "requirements-dev.txt"

View file

@ -3,4 +3,5 @@ tox-gh-actions
playwright
requests
types-requests
whitenoise
whitenoise
asv

View file

@ -2,6 +2,7 @@ django
djc-core-html-parser
tox
pytest
syrupy
flake8
flake8-pyproject
isort
@ -12,4 +13,6 @@ playwright
requests
types-requests
whitenoise
pygments-djc
pygments
pygments-djc
asv

View file

@ -19,7 +19,10 @@
# - mkdocstrings-python
# - pymdown-extensions
# - black
# - pygments
# - pygments-djc
# - django>=4.2
# - djc-core-html-parser>=1.0
#
asgiref==3.8.1
@ -46,12 +49,9 @@ click==8.1.8
# via
# black
# mkdocs
# mkdocstrings
colorama==0.4.6
# via
# click
# griffe
# mkdocs
# mkdocs-material
csscompressor==0.9.5
# via mkdocs-minify-plugin
@ -61,6 +61,8 @@ defusedxml==0.7.1
# via cairosvg
django==5.1.6
# via hatch.envs.docs
djc-core-html-parser==1.0.1
# via hatch.envs.docs
ghp-import==2.1.0
# via mkdocs
gitdb==4.0.12
@ -129,7 +131,9 @@ mkdocs-autorefs==1.3.1
mkdocs-gen-files==0.5.0
# via hatch.envs.docs
mkdocs-get-deps==0.2.0
# via mkdocs
# via
# mkdocs
# mkdocstrings
mkdocs-git-authors-plugin==0.9.2
# via hatch.envs.docs
mkdocs-git-revision-date-localized-plugin==1.3.0
@ -166,7 +170,7 @@ pathspec==0.12.1
# via
# black
# mkdocs
pillow==11.1.0
pillow==10.4.0
# via
# cairosvg
# mkdocs-material
@ -174,15 +178,15 @@ platformdirs==4.3.6
# via
# black
# mkdocs-get-deps
# mkdocstrings
pycparser==2.22
# via cffi
pygments==2.19.1
# via
# hatch.envs.docs
# mkdocs-material
# pygments-djc
pygments-djc==1.0.1
# via -r requirements-dev.in
# via hatch.envs.docs
pymdown-extensions==10.14.3
# via
# hatch.envs.docs
@ -220,8 +224,6 @@ tinycss2==1.4.0
# via
# cairosvg
# cssselect2
tzdata==2025.1
# via django
urllib3==2.3.0
# via requests
verspec==0.1.0

View file

@ -29,18 +29,19 @@ def get_template_cache() -> LRUCache:
def get_component_media_cache() -> BaseCache:
if app_settings.CACHE is not None:
return caches[app_settings.CACHE]
# If no cache is set, use a local memory cache.
global component_media_cache
if component_media_cache is None:
if app_settings.CACHE is not None:
component_media_cache = caches[app_settings.CACHE]
else:
component_media_cache = LocMemCache(
"django-components-media",
{
"TIMEOUT": None, # No timeout
"MAX_ENTRIES": None, # No max size
"CULL_FREQUENCY": 3,
},
)
component_media_cache = LocMemCache(
"django-components-media",
{
"TIMEOUT": None, # No timeout
"MAX_ENTRIES": None, # No max size
"CULL_FREQUENCY": 3,
},
)
return component_media_cache

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,21 @@
# serializer version: 1
# name: test_render
'''
<a
href="https://example.com"
class="py-2 px-4 bg-blue-600 text-white hover:bg-blue-500 focus-visible:outline-blue-600 transition inline-flex w-full text-sm font-semibold sm:mt-0 sm:w-auto focus-visible:outline-2 focus-visible:outline-offset-2 px-3 py-2 justify-center rounded-md shadow-sm no-underline"
>
Click me!
</a>
'''
# ---

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,21 @@
# serializer version: 1
# name: test_render
'''
<a
href="https://example.com"
class="py-2 px-4 bg-blue-600 text-white hover:bg-blue-500 focus-visible:outline-blue-600 transition inline-flex w-full text-sm font-semibold sm:mt-0 sm:w-auto focus-visible:outline-2 focus-visible:outline-offset-2 px-3 py-2 justify-center rounded-md shadow-sm no-underline"
data-djc-id-a1bc3e="">
Click me!
</a>
'''
# ---

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,348 @@
# NOTE: This file is used for benchmarking. Before editing this file,
# please read through these:
# - `benchmarks/README`
# - https://github.com/django-components/django-components/pull/999
from pathlib import Path
from typing import Dict, Literal, NamedTuple, Optional, Union
import django
from django.conf import settings
from django.template import Context, Template
from django_components import types
# DO NOT REMOVE - See https://github.com/django-components/django-components/pull/999
# ----------- IMPORTS END ------------ #
# This variable is overridden by the benchmark runner
CONTEXT_MODE: Literal["django", "isolated"] = "isolated"
if not settings.configured:
settings.configure(
BASE_DIR=Path(__file__).resolve().parent,
INSTALLED_APPS=["django_components"],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
"tests/templates/",
"tests/components/", # Required for template relative imports in tests
],
"OPTIONS": {
"builtins": [
"django_components.templatetags.component_tags",
]
},
}
],
COMPONENTS={
"template_cache_size": 128,
"autodiscover": False,
"context_behavior": CONTEXT_MODE,
},
MIDDLEWARE=["django_components.middleware.ComponentDependencyMiddleware"],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SECRET_KEY="secret",
ROOT_URLCONF="django_components.urls",
)
django.setup()
else:
settings.COMPONENTS["context_behavior"] = CONTEXT_MODE
#####################################
#
# IMPLEMENTATION START
#
#####################################
templates_cache: Dict[int, Template] = {}
def lazy_load_template(template: str) -> Template:
template_hash = hash(template)
if template_hash in templates_cache:
return templates_cache[template_hash]
else:
template_instance = Template(template)
templates_cache[template_hash] = template_instance
return template_instance
#####################################
# RENDER ENTRYPOINT
#####################################
def gen_render_data():
data = ButtonData(
href="https://example.com",
disabled=False,
variant="primary",
type="button",
attrs={
"class": "py-2 px-4",
},
slot_content="Click me!",
)
return data
def render(data: "ButtonData"):
# Render
result = button(Context(), data)
return result
#####################################
# THEME
#####################################
ThemeColor = Literal["default", "error", "success", "alert", "info"]
ThemeVariant = Literal["primary", "secondary"]
VARIANTS = ["primary", "secondary"]
class ThemeStylingUnit(NamedTuple):
"""
Smallest unit of info, this class defines a specific styling of a specific
component in a specific state.
E.g. styling of a disabled "Error" button.
"""
color: str
"""CSS class(es) specifying color"""
css: str = ""
"""Other CSS classes not specific to color"""
class ThemeStylingVariant(NamedTuple):
"""
Collection of styling combinations that are meaningful as a group.
E.g. all "error" variants - primary, disabled, secondary, ...
"""
primary: ThemeStylingUnit
primary_disabled: ThemeStylingUnit
secondary: ThemeStylingUnit
secondary_disabled: ThemeStylingUnit
class Theme(NamedTuple):
"""Class for defining a styling and color theme for the app."""
default: ThemeStylingVariant
error: ThemeStylingVariant
alert: ThemeStylingVariant
success: ThemeStylingVariant
info: ThemeStylingVariant
_secondary_btn_styling = "ring-1 ring-inset"
theme = Theme(
default=ThemeStylingVariant(
primary=ThemeStylingUnit(
color="bg-blue-600 text-white hover:bg-blue-500 focus-visible:outline-blue-600 transition"
),
primary_disabled=ThemeStylingUnit(color="bg-blue-300 text-blue-50 focus-visible:outline-blue-600 transition"),
secondary=ThemeStylingUnit(
color="bg-white text-gray-800 ring-gray-300 hover:bg-gray-100 focus-visible:outline-gray-600 transition",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-gray-300 ring-gray-300 focus-visible:outline-gray-600 transition",
css=_secondary_btn_styling,
),
),
error=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-red-600 text-white hover:bg-red-500 focus-visible:outline-red-600"),
primary_disabled=ThemeStylingUnit(color="bg-red-300 text-white focus-visible:outline-red-600"),
secondary=ThemeStylingUnit(
color="bg-white text-red-600 ring-red-300 hover:bg-red-100 focus-visible:outline-red-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-red-200 ring-red-100 focus-visible:outline-red-600",
css=_secondary_btn_styling,
),
),
alert=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-amber-500 text-white hover:bg-amber-400 focus-visible:outline-amber-500"),
primary_disabled=ThemeStylingUnit(color="bg-amber-100 text-orange-300 focus-visible:outline-amber-500"),
secondary=ThemeStylingUnit(
color="bg-white text-amber-500 ring-amber-300 hover:bg-amber-100 focus-visible:outline-amber-500",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-orange-200 ring-amber-100 focus-visible:outline-amber-500",
css=_secondary_btn_styling,
),
),
success=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-green-600 text-white hover:bg-green-500 focus-visible:outline-green-600"),
primary_disabled=ThemeStylingUnit(color="bg-green-300 text-white focus-visible:outline-green-600"),
secondary=ThemeStylingUnit(
color="bg-white text-green-600 ring-green-300 hover:bg-green-100 focus-visible:outline-green-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-green-200 ring-green-100 focus-visible:outline-green-600",
css=_secondary_btn_styling,
),
),
info=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-sky-600 text-white hover:bg-sky-500 focus-visible:outline-sky-600"),
primary_disabled=ThemeStylingUnit(color="bg-sky-300 text-white focus-visible:outline-sky-600"),
secondary=ThemeStylingUnit(
color="bg-white text-sky-600 ring-sky-300 hover:bg-sky-100 focus-visible:outline-sky-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-sky-200 ring-sky-100 focus-visible:outline-sky-600",
css=_secondary_btn_styling,
),
),
)
def get_styling_css(
variant: Optional["ThemeVariant"] = None,
color: Optional["ThemeColor"] = None,
disabled: Optional[bool] = None,
):
"""
Dynamically access CSS styling classes for a specific variant and state.
E.g. following two calls get styling classes for:
1. Secondary error state
1. Secondary alert disabled state
2. Primary default disabled state
```py
get_styling_css('secondary', 'error')
get_styling_css('secondary', 'alert', disabled=True)
get_styling_css(disabled=True)
```
"""
variant = variant or "primary"
color = color or "default"
disabled = disabled if disabled is not None else False
color_variants: ThemeStylingVariant = getattr(theme, color)
if variant not in VARIANTS:
raise ValueError(f'Unknown theme variant "{variant}", must be one of {VARIANTS}')
variant_name = variant if not disabled else f"{variant}_disabled"
styling: ThemeStylingUnit = getattr(color_variants, variant_name)
css = f"{styling.color} {styling.css}".strip()
return css
#####################################
# BUTTON
#####################################
button_template_str: types.django_html = """
{# Based on buttons from https://tailwindui.com/components/application-ui/overlays/modals #}
{% if is_link %}
<a
href="{{ href }}"
{% html_attrs attrs class=btn_class class="no-underline" %}
>
{% else %}
<button
type="{{ type }}"
{% if disabled %} disabled {% endif %}
{% html_attrs attrs class=btn_class %}
>
{% endif %}
{{ slot_content }}
{% if is_link %}
</a>
{% else %}
</button>
{% endif %}
"""
class ButtonData(NamedTuple):
href: Optional[str] = None
link: Optional[bool] = None
disabled: Optional[bool] = False
variant: Union["ThemeVariant", Literal["plain"]] = "primary"
color: Union["ThemeColor", str] = "default"
type: Optional[str] = "button"
attrs: Optional[dict] = None
slot_content: Optional[str] = ""
def button(context: Context, data: ButtonData):
common_css = (
"inline-flex w-full text-sm font-semibold"
" sm:mt-0 sm:w-auto focus-visible:outline-2 focus-visible:outline-offset-2"
)
if data.variant == "plain":
all_css_class = common_css
else:
button_classes = get_styling_css(data.variant, data.color, data.disabled) # type: ignore[arg-type]
all_css_class = f"{button_classes} {common_css} px-3 py-2 justify-center rounded-md shadow-sm"
is_link = not data.disabled and (data.href or data.link)
all_attrs = {**(data.attrs or {})}
if data.disabled:
all_attrs["aria-disabled"] = "true"
with context.push(
{
"href": data.href,
"disabled": data.disabled,
"type": data.type,
"btn_class": all_css_class,
"attrs": all_attrs,
"is_link": is_link,
"slot_content": data.slot_content,
}
):
return lazy_load_template(button_template_str).render(context)
#####################################
#
# IMPLEMENTATION END
#
#####################################
# DO NOT REMOVE - See https://github.com/django-components/django-components/pull/999
# ----------- TESTS START ------------ #
# The code above is used also used when benchmarking.
# The section below is NOT included.
from .testutils import CsrfTokenPatcher, GenIdPatcher # noqa: E402
def test_render(snapshot):
id_patcher = GenIdPatcher()
id_patcher.start()
csrf_token_patcher = CsrfTokenPatcher()
csrf_token_patcher.start()
data = gen_render_data()
rendered = render(data)
assert rendered == snapshot
id_patcher.stop()
csrf_token_patcher.stop()

6060
tests/test_benchmark_djc.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,351 @@
# NOTE: This file is used for benchmarking. Before editing this file,
# please read through these:
# - `benchmarks/README`
# - https://github.com/django-components/django-components/pull/999
from pathlib import Path
from typing import Dict, Literal, NamedTuple, Optional, Union
import django
from django.conf import settings
from django.template import Context, Template
from django_components import Component, types
# DO NOT REMOVE - See https://github.com/django-components/django-components/pull/999
# ----------- IMPORTS END ------------ #
# This variable is overridden by the benchmark runner
CONTEXT_MODE: Literal["django", "isolated"] = "isolated"
if not settings.configured:
settings.configure(
BASE_DIR=Path(__file__).resolve().parent,
INSTALLED_APPS=["django_components"],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
"tests/templates/",
"tests/components/", # Required for template relative imports in tests
],
"OPTIONS": {
"builtins": [
"django_components.templatetags.component_tags",
]
},
}
],
COMPONENTS={
"template_cache_size": 128,
"autodiscover": False,
"context_behavior": CONTEXT_MODE,
},
MIDDLEWARE=["django_components.middleware.ComponentDependencyMiddleware"],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SECRET_KEY="secret",
ROOT_URLCONF="django_components.urls",
)
django.setup()
else:
settings.COMPONENTS["context_behavior"] = CONTEXT_MODE
#####################################
#
# IMPLEMENTATION START
#
#####################################
templates_cache: Dict[int, Template] = {}
def lazy_load_template(template: str) -> Template:
template_hash = hash(template)
if template_hash in templates_cache:
return templates_cache[template_hash]
else:
template_instance = Template(template)
templates_cache[template_hash] = template_instance
return template_instance
#####################################
# RENDER ENTRYPOINT
#####################################
def gen_render_data():
data = {
"href": "https://example.com",
"disabled": False,
"variant": "primary",
"type": "button",
"attrs": {
"class": "py-2 px-4",
},
}
return data
def render(data: Dict):
# Render
result = Button.render(
context=Context(),
kwargs=data,
slots={
"content": "Click me!",
},
)
return result
#####################################
# THEME
#####################################
ThemeColor = Literal["default", "error", "success", "alert", "info"]
ThemeVariant = Literal["primary", "secondary"]
VARIANTS = ["primary", "secondary"]
class ThemeStylingUnit(NamedTuple):
"""
Smallest unit of info, this class defines a specific styling of a specific
component in a specific state.
E.g. styling of a disabled "Error" button.
"""
color: str
"""CSS class(es) specifying color"""
css: str = ""
"""Other CSS classes not specific to color"""
class ThemeStylingVariant(NamedTuple):
"""
Collection of styling combinations that are meaningful as a group.
E.g. all "error" variants - primary, disabled, secondary, ...
"""
primary: ThemeStylingUnit
primary_disabled: ThemeStylingUnit
secondary: ThemeStylingUnit
secondary_disabled: ThemeStylingUnit
class Theme(NamedTuple):
"""Class for defining a styling and color theme for the app."""
default: ThemeStylingVariant
error: ThemeStylingVariant
alert: ThemeStylingVariant
success: ThemeStylingVariant
info: ThemeStylingVariant
_secondary_btn_styling = "ring-1 ring-inset"
theme = Theme(
default=ThemeStylingVariant(
primary=ThemeStylingUnit(
color="bg-blue-600 text-white hover:bg-blue-500 focus-visible:outline-blue-600 transition"
),
primary_disabled=ThemeStylingUnit(color="bg-blue-300 text-blue-50 focus-visible:outline-blue-600 transition"),
secondary=ThemeStylingUnit(
color="bg-white text-gray-800 ring-gray-300 hover:bg-gray-100 focus-visible:outline-gray-600 transition",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-gray-300 ring-gray-300 focus-visible:outline-gray-600 transition",
css=_secondary_btn_styling,
),
),
error=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-red-600 text-white hover:bg-red-500 focus-visible:outline-red-600"),
primary_disabled=ThemeStylingUnit(color="bg-red-300 text-white focus-visible:outline-red-600"),
secondary=ThemeStylingUnit(
color="bg-white text-red-600 ring-red-300 hover:bg-red-100 focus-visible:outline-red-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-red-200 ring-red-100 focus-visible:outline-red-600",
css=_secondary_btn_styling,
),
),
alert=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-amber-500 text-white hover:bg-amber-400 focus-visible:outline-amber-500"),
primary_disabled=ThemeStylingUnit(color="bg-amber-100 text-orange-300 focus-visible:outline-amber-500"),
secondary=ThemeStylingUnit(
color="bg-white text-amber-500 ring-amber-300 hover:bg-amber-100 focus-visible:outline-amber-500",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-orange-200 ring-amber-100 focus-visible:outline-amber-500",
css=_secondary_btn_styling,
),
),
success=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-green-600 text-white hover:bg-green-500 focus-visible:outline-green-600"),
primary_disabled=ThemeStylingUnit(color="bg-green-300 text-white focus-visible:outline-green-600"),
secondary=ThemeStylingUnit(
color="bg-white text-green-600 ring-green-300 hover:bg-green-100 focus-visible:outline-green-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-green-200 ring-green-100 focus-visible:outline-green-600",
css=_secondary_btn_styling,
),
),
info=ThemeStylingVariant(
primary=ThemeStylingUnit(color="bg-sky-600 text-white hover:bg-sky-500 focus-visible:outline-sky-600"),
primary_disabled=ThemeStylingUnit(color="bg-sky-300 text-white focus-visible:outline-sky-600"),
secondary=ThemeStylingUnit(
color="bg-white text-sky-600 ring-sky-300 hover:bg-sky-100 focus-visible:outline-sky-600",
css=_secondary_btn_styling,
),
secondary_disabled=ThemeStylingUnit(
color="bg-white text-sky-200 ring-sky-100 focus-visible:outline-sky-600",
css=_secondary_btn_styling,
),
),
)
def get_styling_css(
variant: Optional["ThemeVariant"] = None,
color: Optional["ThemeColor"] = None,
disabled: Optional[bool] = None,
):
"""
Dynamically access CSS styling classes for a specific variant and state.
E.g. following two calls get styling classes for:
1. Secondary error state
1. Secondary alert disabled state
2. Primary default disabled state
```py
get_styling_css('secondary', 'error')
get_styling_css('secondary', 'alert', disabled=True)
get_styling_css(disabled=True)
```
"""
variant = variant or "primary"
color = color or "default"
disabled = disabled if disabled is not None else False
color_variants: ThemeStylingVariant = getattr(theme, color)
if variant not in VARIANTS:
raise ValueError(f'Unknown theme variant "{variant}", must be one of {VARIANTS}')
variant_name = variant if not disabled else f"{variant}_disabled"
styling: ThemeStylingUnit = getattr(color_variants, variant_name)
css = f"{styling.color} {styling.css}".strip()
return css
#####################################
# BUTTON
#####################################
class Button(Component):
def get_context_data(
self,
/,
*,
href: Optional[str] = None,
link: Optional[bool] = None,
disabled: Optional[bool] = False,
variant: Union["ThemeVariant", Literal["plain"]] = "primary",
color: Union["ThemeColor", str] = "default",
type: Optional[str] = "button",
attrs: Optional[dict] = None,
):
common_css = (
"inline-flex w-full text-sm font-semibold"
" sm:mt-0 sm:w-auto focus-visible:outline-2 focus-visible:outline-offset-2"
)
if variant == "plain":
all_css_class = common_css
else:
button_classes = get_styling_css(variant, color, disabled) # type: ignore[arg-type]
all_css_class = f"{button_classes} {common_css} px-3 py-2 justify-center rounded-md shadow-sm"
is_link = not disabled and (href or link)
all_attrs = {**(attrs or {})}
if disabled:
all_attrs["aria-disabled"] = "true"
return {
"href": href,
"disabled": disabled,
"type": type,
"btn_class": all_css_class,
"attrs": all_attrs,
"is_link": is_link,
}
template: types.django_html = """
{# Based on buttons from https://tailwindui.com/components/application-ui/overlays/modals #}
{% if is_link %}
<a
href="{{ href }}"
{% html_attrs attrs class=btn_class class="no-underline" %}
>
{% else %}
<button
type="{{ type }}"
{% if disabled %} disabled {% endif %}
{% html_attrs attrs class=btn_class %}
>
{% endif %}
{% slot "content" default / %}
{% if is_link %}
</a>
{% else %}
</button>
{% endif %}
"""
#####################################
#
# IMPLEMENTATION END
#
#####################################
# DO NOT REMOVE - See https://github.com/django-components/django-components/pull/999
# ----------- TESTS START ------------ #
# The code above is used also used when benchmarking.
# The section below is NOT included.
from .testutils import CsrfTokenPatcher, GenIdPatcher # noqa: E402
def test_render(snapshot):
id_patcher = GenIdPatcher()
id_patcher.start()
csrf_token_patcher = CsrfTokenPatcher()
csrf_token_patcher.start()
data = gen_render_data()
rendered = render(data)
assert rendered == snapshot
id_patcher.stop()
csrf_token_patcher.stop()

View file

@ -1115,11 +1115,10 @@ class ComponentRenderTest(BaseTestCase):
response.content.decode(),
)
token_re = re.compile(rb"CSRF token:\s+(?P<token>[0-9a-zA-Z]{64})")
token_re = re.compile(rb"CSRF token:\s+predictabletoken")
token = token_re.findall(response.content)[0]
self.assertTrue(token)
self.assertEqual(len(token), 64)
self.assertEqual(token, b'CSRF token: predictabletoken')
def test_request_context_created_when_no_context(self):
@register("thing")
@ -1136,11 +1135,10 @@ class ComponentRenderTest(BaseTestCase):
self.assertEqual(response.status_code, 200)
token_re = re.compile(rb"CSRF token:\s+(?P<token>[0-9a-zA-Z]{64})")
token_re = re.compile(rb"CSRF token:\s+predictabletoken")
token = token_re.findall(response.content)[0]
self.assertTrue(token)
self.assertEqual(len(token), 64)
self.assertEqual(token, b'CSRF token: predictabletoken')
def test_request_context_created_when_already_a_context_dict(self):
@register("thing")
@ -1158,11 +1156,10 @@ class ComponentRenderTest(BaseTestCase):
self.assertEqual(response.status_code, 200)
token_re = re.compile(rb"CSRF token:\s+(?P<token>[0-9a-zA-Z]{64})")
token_re = re.compile(rb"CSRF token:\s+predictabletoken")
token = token_re.findall(response.content)[0]
self.assertTrue(token)
self.assertEqual(len(token), 64)
self.assertEqual(token, b'CSRF token: predictabletoken')
self.assertInHTML("Existing context: foo", response.content.decode())
def request_context_ignores_context_when_already_a_context(self):

View file

@ -19,14 +19,50 @@ response_stash = None
middleware = ComponentDependencyMiddleware(get_response=lambda _: response_stash)
class GenIdPatcher:
def __init__(self):
self._gen_id_count = 10599485
# Mock the `generate` function used inside `gen_id` so it returns deterministic IDs
def start(self):
# Random number so that the generated IDs are "hex-looking", e.g. a1bc3d
self._gen_id_count = 10599485
def mock_gen_id(*args, **kwargs):
self._gen_id_count += 1
return hex(self._gen_id_count)[2:]
self._gen_id_patch = patch("django_components.util.misc.generate", side_effect=mock_gen_id)
self._gen_id_patch.start()
def stop(self):
self._gen_id_patch.stop()
self._gen_id_count = 10599485
class CsrfTokenPatcher:
def __init__(self):
self._csrf_token = "predictabletoken"
def start(self):
self._csrf_token_patch = patch("django.middleware.csrf.get_token", return_value=self._csrf_token)
self._csrf_token_patch.start()
def stop(self):
self._csrf_token_patch.stop()
class BaseTestCase(SimpleTestCase):
def setUp(self):
super().setUp()
self._start_gen_id_patch()
self.gen_id_patcher = GenIdPatcher()
self.gen_id_patcher.start()
self.csrf_token_patcher = CsrfTokenPatcher()
self.csrf_token_patcher.start()
def tearDown(self):
self._stop_gen_id_patch()
self.gen_id_patcher.stop()
self.csrf_token_patcher.stop()
super().tearDown()
registry.clear()
@ -42,22 +78,6 @@ class BaseTestCase(SimpleTestCase):
from django_components.component import component_node_subclasses_by_name
component_node_subclasses_by_name.clear()
# Mock the `generate` function used inside `gen_id` so it returns deterministic IDs
def _start_gen_id_patch(self):
# Random number so that the generated IDs are "hex-looking", e.g. a1bc3d
self._gen_id_count = 10599485
def mock_gen_id(*args, **kwargs):
self._gen_id_count += 1
return hex(self._gen_id_count)[2:]
self._gen_id_patch = patch("django_components.util.misc.generate", side_effect=mock_gen_id)
self._gen_id_patch.start()
def _stop_gen_id_patch(self):
self._gen_id_patch.stop()
self._gen_id_count = 10599485
request = Mock()
mock_template = Mock()
@ -179,8 +199,8 @@ def parametrize_context_behavior(cases: List[ContextBehParam], settings: Optiona
engine.engine.template_loaders[0].reset()
# Reset gen_id
self._stop_gen_id_patch()
self._start_gen_id_patch()
self.gen_id_patcher.stop()
self.gen_id_patcher.start()
# Reset template cache
from django_components.cache import component_media_cache, template_cache

View file

@ -34,6 +34,7 @@ deps =
djc-core-html-parser
pytest
pytest-xdist
syrupy # snapshot testing
# NOTE: Keep playwright is sync with the version in requirements-ci.txt
# Othrwise we get error:
# playwright._impl._errors.Error: BrowserType.launch: Executable doesn't exist at /home/runner/.cache/ms-playwright/chromium-1140/chrome-linux/chrome
@ -54,15 +55,15 @@ commands = isort --check-only --diff src/django_components
[testenv:coverage]
deps =
pytest-coverage
pytest-cov
syrupy # snapshot testing
# NOTE: Keep playwright in sync with the version in requirements-ci.txt
playwright==1.48.0
requests
types-requests
whitenoise
commands =
coverage run --branch -m pytest
coverage report -m --fail-under=97
pytest --cov=django_components --cov-fail-under=87 --cov-branch
[testenv:mypy]
deps =