diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 1dbcbb1a..b544edb3 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,11 +9,6 @@ on: - '[0-9]+.[0-9]+.[0-9]+' branches: - master - pull_request: - branches: - - main - release: - types: [published] workflow_dispatch: jobs: @@ -27,30 +22,116 @@ jobs: # Only run in original repo (not in forks) if: github.repository == 'django-components/django-components' steps: + + ############################## + # SETUP + ############################## + - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.12" + python-version: "3.13" + cache: 'pip' - name: Install Hatch run: | python -m pip install --upgrade pip wheel - python -m pip install -q hatch pre-commit + python -m pip install -q hatch pre-commit asv hatch --version - - name: Create Virtual Environment - run: hatch env create docs - - name: Configure git run: | + # Get the master branch so we can run benchmarks on it + git remote add upstream https://github.com/${{ github.repository }}.git + git fetch origin master:master + git checkout master + # required for "mike deploy" command below which pushes to gh-pages git config user.name github-actions git config user.email github-actions@github.com + ########################################### + # RECORD BENCHMARK - ONLY ON PUSH TO MASTER + ########################################### + + - name: Run benchmarks for tag + if: github.ref_type == 'tag' && github.event_name == 'push' + run: | + # Get tag name + TAG=${GITHUB_REF#refs/tags/} + echo "TAG: $TAG" + + # TODO: REMOVE ONCE FIXED UPSTREAM + # Fix for https://github.com/airspeed-velocity/asv_runner/issues/45 + # Prepare virtual environment + # Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark. + # The problem is that `asv` passes the code to execute via command line, and when the + # code is too big, it fails with `OSError: [Errno 7] Argument list too long`. + # So we have to tweak it to pass the code via STDIN, which doesn't have this limitation. + # + # 1. First create the virtual environment, so that asv generates the directories where + # the monkeypatch can be applied. + echo "Creating virtual environment..." + asv setup -v || true + echo "Virtual environment created." + # 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files. + # First find all `timeraw.py` files + echo "Applying monkeypatch..." + find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do + # Add a newline and then append the monkeypatch contents + echo "" >> "$file" + cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file" + done + echo "Monkeypatch applied." + # END OF MONKEYPATCH + + # Prepare the profile under which the benchmarks will be saved. + # We assume that the CI machine has a name that is unique and stable. + # See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794 + echo "Preparing benchmarks profile..." + asv machine --yes --machine ci-linux + echo "Benchmarks profile DONE." + + # Run benchmarks for the current tag + # - `^` means that we mean the COMMIT of the tag's branch, not the BRANCH itself. + # Without it, we would run benchmarks for the whole branch history. + # With it, we run benchmarks FROM the tag's commit (incl) TO ... + # - `!` means that we want to select range spanning a single commit. + # Without it, we would run benchmarks for all commits FROM the tag's commit + # TO the start of the branch history. + # With it, we run benchmarks ONLY FOR the tag's commit. + echo "Running benchmarks for tag ${TAG}..." + asv run master^! -v + echo "Benchmarks for tag ${TAG} DONE." + + # Generate benchmarks site + # This should save it in `docs/benchmarks/`, so we can then use it when + # building docs site with `mkdocs`. + echo "Generating benchmarks site..." + asv publish + echo "Benchmarks site DONE." + + # Stage and commit benchmark results + echo "Staging and committing benchmark results..." + git add .asv/results/ + git add docs/benchmarks/ + git commit -m "Add benchmark results for ${TAG}" + echo "Benchmark results committed." + git push origin master + echo "Benchmark results pushed to master." + + ############################## + # BUILD & DEPLOY DOCS + ############################## + + - name: Create Virtual Environment + run: hatch env create docs + # Conditions make sure to select the right step, depending on the job trigger. # Only one of the steps below will run at a time. The others will be skipped. @@ -72,9 +153,3 @@ jobs: run: | hatch run docs:mike deploy --push --update-aliases ${{ github.ref_name }} latest hatch run docs:mike set-default latest --push - - - name: Build & deploy docs for a new release - if: github.event_name == 'release' - run: | - hatch run docs:mike deploy --push --update-aliases ${{ github.ref_name }} latest - hatch run docs:mike set-default latest --push diff --git a/.github/workflows/pr-benchmark-comment.yml b/.github/workflows/pr-benchmark-comment.yml new file mode 100644 index 00000000..07b9de16 --- /dev/null +++ b/.github/workflows/pr-benchmark-comment.yml @@ -0,0 +1,98 @@ +# Run benchmark report on pull requests to master. +# The report is added to the PR as a comment. +# +# NOTE: When making a PR from a fork, the worker doesn't have sufficient +# access to make comments on the target repo's PR. And so, this workflow +# is split to two parts: +# +# 1. Benchmarking and saving results as artifacts +# 2. Downloading the results and commenting on the PR +# +# See https://stackoverflow.com/a/71683208/9788634 + +name: PR benchmark comment + +on: + workflow_run: + # NOTE: The name here MUST match the name of the workflow that generates the data + workflows: [PR benchmarks generate] + types: + - completed + +jobs: + download: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + repository-projects: write + steps: + ########## USE FOR DEBUGGING ########## + # - name: Debug workflow run info + # uses: actions/github-script@v7 + # with: + # script: | + # console.log('Workflow Run ID:', context.payload.workflow_run.id); + # const artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + # owner: context.repo.owner, + # repo: context.repo.repo, + # run_id: context.payload.workflow_run.id + # }); + # console.log('Available artifacts:'); + # console.log(JSON.stringify(artifacts.data, null, 2)); + # console.log(`PRs: ` + JSON.stringify(context.payload.workflow_run.pull_requests)); + ######################################### + + # NOTE: The next two steps (download and unzip) are equivalent to using `actions/download-artifact@v4` + # However, `download-artifact` was not picking up the artifact, while the REST client does. + - name: Download benchmark results + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + // Find the artifact that was generated by the "pr-benchmark-generate" workflow + const allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + // Explicitly search the workflow run that generated the the results + // (AKA the "pr-benchmark-generate" workflow). + run_id: context.payload.workflow_run.id, + }); + const matchArtifact = allArtifacts.data.artifacts.filter((artifact) => { + return artifact.name == "benchmark_results" + })[0]; + + // Download the artifact + const download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + fs.writeFileSync( + `${process.env.GITHUB_WORKSPACE}/benchmark_results.zip`, + Buffer.from(download.data), + ); + + - name: Unzip artifact + run: unzip benchmark_results.zip + + - name: Comment on PR + # See https://github.com/actions/github-script + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const fs = require('fs'); + const results = fs.readFileSync('./benchmark_results.md', 'utf8'); + const body = `## Performance Benchmark Results\n\nComparing PR changes against master branch:\n\n${results}`; + + // See https://octokit.github.io/rest.js/v21/#issues-create-comment + await github.rest.issues.createComment({ + body: body, + // See https://github.com/actions/toolkit/blob/662b9d91f584bf29efbc41b86723e0e376010e41/packages/github/src/context.ts#L66 + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.workflow_run.pull_requests[0].number, + }); diff --git a/.github/workflows/pr-benchmark-generate.yml b/.github/workflows/pr-benchmark-generate.yml new file mode 100644 index 00000000..5e198200 --- /dev/null +++ b/.github/workflows/pr-benchmark-generate.yml @@ -0,0 +1,102 @@ +# Run benchmark report on pull requests to master. +# The report is added to the PR as a comment. +# +# NOTE: When making a PR from a fork, the worker doesn't have sufficient +# access to make comments on the target repo's PR. And so, this workflow +# is split to two parts: +# +# 1. Benchmarking and saving results as artifacts +# 2. Downloading the results and commenting on the PR +# +# See https://stackoverflow.com/a/71683208/9788634 + +name: PR benchmarks generate + +on: + pull_request: + branches: [ master ] + +jobs: + benchmark: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Need full history for ASV + + - name: Fetch base branch + run: | + git remote add upstream https://github.com/${{ github.repository }}.git + git fetch upstream master + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install asv + + - name: Run benchmarks + run: | + # TODO: REMOVE ONCE FIXED UPSTREAM + # Fix for https://github.com/airspeed-velocity/asv_runner/issues/45 + # Prepare virtual environment + # Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark. + # The problem is that `asv` passes the code to execute via command line, and when the + # code is too big, it fails with `OSError: [Errno 7] Argument list too long`. + # So we have to tweak it to pass the code via STDIN, which doesn't have this limitation. + # + # 1. First create the virtual environment, so that asv generates the directories where + # the monkeypatch can be applied. + echo "Creating virtual environment..." + asv setup -v || true + echo "Virtual environment created." + # 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files. + # First find all `timeraw.py` files + echo "Applying monkeypatch..." + find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do + # Add a newline and then append the monkeypatch contents + echo "" >> "$file" + cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file" + done + echo "Monkeypatch applied." + # END OF MONKEYPATCH + + # Prepare the profile under which the benchmarks will be saved. + # We assume that the CI machine has a name that is unique and stable. + # See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794 + echo "Preparing benchmarks profile..." + asv machine --yes + echo "Benchmarks profile DONE." + + # Generate benchmark data + # - `^` means that we mean the COMMIT of the branch, not the BRANCH itself. + # Without it, we would run benchmarks for the whole branch history. + # With it, we run benchmarks FROM the latest commit (incl) TO ... + # - `!` means that we want to select range spanning a single commit. + # Without it, we would run benchmarks for all commits FROM the latest commit + # TO the start of the branch history. + # With it, we run benchmarks ONLY FOR the latest commit. + echo "Running benchmarks for upstream/master..." + DJC_BENCHMARK_QUICK=1 asv run upstream/master^! -v + echo "Benchmarks for upstream/master DONE." + echo "Running benchmarks for HEAD..." + DJC_BENCHMARK_QUICK=1 asv run HEAD^! -v + echo "Benchmarks for HEAD DONE." + + # Compare against master + echo "Comparing benchmarks..." + mkdir -p pr + asv compare upstream/master HEAD --factor 1.1 --split > ./pr/benchmark_results.md + echo "Benchmarks comparison DONE." + + - name: Save benchmark results + uses: actions/upload-artifact@v4 + with: + name: benchmark_results + path: pr/ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f1506e21..ee01f9ec 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,6 +17,16 @@ jobs: os: [ubuntu-20.04, windows-latest] steps: + # Configure git to handle long paths + # See https://stackoverflow.com/questions/22575662/filename-too-long-in-git-for-windows + # + # Long paths that are over the limit are because of the benchmarking data + # created by asv, as these may look like this: + # docs/benchmarks/graphs/arch-x86_64/branch-master/cpu-AMD EPYC 7763 64-Core Processor/django-5.1/djc-core-html-parser/machine-fv-az1693-854/num_cpu-4/os-Linux 6.8.0-1021-azure/python-3.13/ram-16373792/isolated vs django modes.timeraw_render_lg_subsequent.json + - name: Configure git + run: | + git config --global core.longpaths true + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} diff --git a/asv.conf.json b/asv.conf.json new file mode 100644 index 00000000..0ae16e29 --- /dev/null +++ b/asv.conf.json @@ -0,0 +1,210 @@ +{ + // The version of the config file format. Do not change, unless + // you know what you are doing + "version": 1, + + // The name of the project being benchmarked + "project": "django-components", + + // The project's homepage + // "project_url": "https://django-components.github.io/django-components/", + "project_url": "/django-components/", // Relative path, since benchmarks are nested under the docs site + + // The URL or local path of the source code repository for the + // project being benchmarked + "repo": ".", + + // The Python project's subdirectory in your repo. If missing or + // the empty string, the project is assumed to be located at the root + // of the repository. + // "repo_subdir": "", + + // Customizable commands for building the project. + // See asv.conf.json documentation. + // To build the package using pyproject.toml (PEP518), uncomment the following lines + // "build_command": [ + // "python -m pip install build", + // "python -m build", + // "python -mpip wheel -w {build_cache_dir} {build_dir}" + // ], + // To build the package using setuptools and a setup.py file, uncomment the following lines + // "build_command": [ + // "python setup.py build", + // "python -mpip wheel -w {build_cache_dir} {build_dir}" + // ], + + // Customizable commands for installing and uninstalling the project. + // See asv.conf.json documentation. + // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"], + // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"], + "install_command": ["in-dir={env_dir} python -mpip install ./project"], + + // List of branches to benchmark. If not provided, defaults to "main" + // (for git) or "default" (for mercurial). + // "branches": ["main"], // for git + // "branches": ["default"], // for mercurial + "branches": [ + "master" + ], + + // The DVCS being used. If not set, it will be automatically + // determined from "repo" by looking at the protocol in the URL + // (if remote), or by looking for special directories, such as + // ".git" (if local). + // "dvcs": "git", + + // The tool to use to create environments. May be "conda", + // "virtualenv", "mamba" (above 3.8) + // or other value depending on the plugins in use. + // If missing or the empty string, the tool will be automatically + // determined by looking for tools on the PATH environment + // variable. + "environment_type": "virtualenv", + + // timeout in seconds for installing any dependencies in environment + // defaults to 10 min + //"install_timeout": 600, + + // the base URL to show a commit for the project. + // "show_commit_url": "http://github.com/owner/project/commit/", + + // The Pythons you'd like to test against. If not provided, defaults + // to the current version of Python used to run `asv`. + "pythons": [ + "3.13" + ], + + // The list of conda channel names to be searched for benchmark + // dependency packages in the specified order + // "conda_channels": ["conda-forge", "defaults"], + + // A conda environment file that is used for environment creation. + // "conda_environment_file": "environment.yml", + + // The matrix of dependencies to test. Each key of the "req" + // requirements dictionary is the name of a package (in PyPI) and + // the values are version numbers. An empty list or empty string + // indicates to just test against the default (latest) + // version. null indicates that the package is to not be + // installed. If the package to be tested is only available from + // PyPi, and the 'environment_type' is conda, then you can preface + // the package name by 'pip+', and the package will be installed + // via pip (with all the conda available packages installed first, + // followed by the pip installed packages). + // + // The ``@env`` and ``@env_nobuild`` keys contain the matrix of + // environment variables to pass to build and benchmark commands. + // An environment will be created for every combination of the + // cartesian product of the "@env" variables in this matrix. + // Variables in "@env_nobuild" will be passed to every environment + // during the benchmark phase, but will not trigger creation of + // new environments. A value of ``null`` means that the variable + // will not be set for the current combination. + // + // "matrix": { + // "req": { + // "numpy": ["1.6", "1.7"], + // "six": ["", null], // test with and without six installed + // "pip+emcee": [""] // emcee is only available for install with pip. + // }, + // "env": {"ENV_VAR_1": ["val1", "val2"]}, + // "env_nobuild": {"ENV_VAR_2": ["val3", null]}, + // }, + "matrix": { + "req": { + "django": [ + "5.1" + ], + "djc-core-html-parser": [""] // Empty string means the latest version + } + }, + + // Combinations of libraries/python versions can be excluded/included + // from the set to test. Each entry is a dictionary containing additional + // key-value pairs to include/exclude. + // + // An exclude entry excludes entries where all values match. The + // values are regexps that should match the whole string. + // + // An include entry adds an environment. Only the packages listed + // are installed. The 'python' key is required. The exclude rules + // do not apply to includes. + // + // In addition to package names, the following keys are available: + // + // - python + // Python version, as in the *pythons* variable above. + // - environment_type + // Environment type, as above. + // - sys_platform + // Platform, as in sys.platform. Possible values for the common + // cases: 'linux2', 'win32', 'cygwin', 'darwin'. + // - req + // Required packages + // - env + // Environment variables + // - env_nobuild + // Non-build environment variables + // + // "exclude": [ + // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows + // {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda + // {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1 + // ], + // + // "include": [ + // // additional env for python3.12 + // {"python": "3.12", "req": {"numpy": "1.26"}, "env_nobuild": {"FOO": "123"}}, + // // additional env if run on windows+conda + // {"platform": "win32", "environment_type": "conda", "python": "3.12", "req": {"libpython": ""}}, + // ], + + // The directory (relative to the current directory) that benchmarks are + // stored in. If not provided, defaults to "benchmarks" + "benchmark_dir": "benchmarks", + + // The directory (relative to the current directory) to cache the Python + // environments in. If not provided, defaults to "env" + "env_dir": ".asv/env", + + // The directory (relative to the current directory) that raw benchmark + // results are stored in. If not provided, defaults to "results". + "results_dir": ".asv/results", + + // The directory (relative to the current directory) that the html tree + // should be written to. If not provided, defaults to "html". + // "html_dir": ".asv/html", + "html_dir": "docs/benchmarks", // # TODO + + // The number of characters to retain in the commit hashes. + // "hash_length": 8, + + // `asv` will cache results of the recent builds in each + // environment, making them faster to install next time. This is + // the number of builds to keep, per environment. + // "build_cache_size": 2, + + // The commits after which the regression search in `asv publish` + // should start looking for regressions. Dictionary whose keys are + // regexps matching to benchmark names, and values corresponding to + // the commit (exclusive) after which to start looking for + // regressions. The default is to start from the first commit + // with results. If the commit is `null`, regression detection is + // skipped for the matching benchmark. + // + // "regressions_first_commits": { + // "some_benchmark": "352cdf", // Consider regressions only after this commit + // "another_benchmark": null, // Skip regression detection altogether + // }, + + // The thresholds for relative change in results, after which `asv + // publish` starts reporting regressions. Dictionary of the same + // form as in ``regressions_first_commits``, with values + // indicating the thresholds. If multiple entries match, the + // maximum is taken. If no entry matches, the default is 5%. + // + // "regressions_thresholds": { + // "some_benchmark": 0.01, // Threshold of 1% + // "another_benchmark": 0.5, // Threshold of 50% + // }, +} diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000..4ef8dc8d --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,195 @@ +# Benchmarks + +## Overview + +[`asv`](https://github.com/airspeed-velocity/) (Airspeed Velocity) is used for benchmarking performance. + +`asv` covers the entire benchmarking workflow. We can: + +1. Define benchmark tests similarly to writing pytest tests (supports both timing and memory benchmarks) +2. Run the benchmarks and generate results for individual git commits, tags, or entire branches +3. View results as an HTML report (dashboard with charts) +4. Compare performance between two commits / tags / branches for CI integration + +![asv dashboard](./assets/asv_dashboard.png) + +django-components uses `asv` for these use cases: + +- Benchmarking across releases: + + 1. When a git tag is created and pushed, this triggers a Github Action workflow (see `docs.yml`). + 2. The workflow runs the benchmarks with the latest release, and commits the results to the repository. + Thus, we can see how performance changes across releases. + +- Displaying performance results on the website: + + 1. When a git tag is created and pushed, we also update the documentation website (see `docs.yml`). + 2. Before we publish the docs website, we generate the HTML report for the benchmark results. + 3. The generated report is placed in the `docs/benchmarks/` directory, and is thus + published with the rest of the docs website and available under [`/benchmarks/`](https://django-components.github.io/django-components/benchmarks). + - NOTE: The location where the report is placed is defined in `asv.conf.json`. + +- Compare performance between commits on pull requests: + 1. When a pull request is made, this triggers a Github Action workflow (see `benchmark.yml`). + 2. The workflow compares performance between commits. + 3. The report is added to the PR as a comment made by a bot. + +## Interpreting benchmarks + +The results CANNOT be taken as ABSOLUTE values e.g.: + +"This example took 200ms to render, so my page will also take 200ms to render." + +Each UI may consist of different number of Django templates, template tags, and components, and all these may influence the rendering time differently. + +Instead, the results MUST be understood as RELATIVE values. + +- If a commit is 10% slower than the master branch, that's valid. +- If Django components are 10% slower than vanilla Django templates, that's valid. +- If "isolated" mode is 10% slower than "django" mode, that's valid. + +## Development + +Let's say we want to generate results for the last 5 commits. + +1. Install `asv` + + ```bash + pip install asv + ``` + +2. Run benchmarks and generate results + + ```bash + asv run HEAD --steps 5 -e + ``` + + - `HEAD` means that we want to run benchmarks against the [current branch](https://stackoverflow.com/a/2304106/9788634). + - `--steps 5` means that we want to run benchmarks for the last 5 commits. + - `-e` to print out any errors. + + The results will be stored in `.asv/results/`, as configured in `asv.conf.json`. + +3. Generate HTML report + + ```bash + asv publish + asv preview + ``` + + - `publish` generates the HTML report and stores it in `docs/benchmarks/`, as configured in `asv.conf.json`. + - `preview` starts a local server and opens the report in the browser. + + NOTE: Since the results are stored in `docs/benchmarks/`, you can also view the results + with `mkdocs serve` and navigating to `http://localhost:9000/django-components/benchmarks/`. + + NOTE 2: Running `publish` will overwrite the existing contents of `docs/benchmarks/`. + +## Writing benchmarks + +`asv` supports writing different [types of benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#benchmark-types). What's relevant for us is: + +- [Raw timing benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#raw-timing-benchmarks) +- [Peak memory benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#peak-memory) + +Notes: + +- The difference between "raw timing" and "timing" tests is that "raw timing" is ran in a separate process. + And instead of running the logic within the test function itself, we return a script (string) + that will be executed in the separate process. + +- The difference between "peak memory" and "memory" tests is that "memory" calculates the memory + of the object returned from the test function. On the other hand, "peak memory" detects the + peak memory usage during the execution of the test function (including the setup function). + +You can write the test file anywhere in the `benchmarks/` directory, `asv` will automatically find it. + +Inside the file, write a test function. Depending on the type of the benchmark, +prefix the test function name with `timeraw_` or `peakmem_`. See [`benchmarks/benchmark_templating.py`](benchmark_templating.py) for examples. + +### Ensuring that the benchmarked logic is correct + +The approach I (Juro) took with benchmarking the overall template rendering is that +I've defined the actual logic in `tests/test_benchmark_*.py` files. So those files +are part of the normal pytest testing, and even contain a section with pytest tests. + +This ensures that the benchmarked logic remains functional and error-free. + +However, there's some caveats: + +1. I wasn't able to import files from `tests/`. +2. When running benchmarks, we don't want to run the pytest tests. + +To work around that, the approach I used for loading the files from the `tests/` directory is to: + +1. Get the file's source code as a string. +2. Cut out unwanted sections (like the pytest tests). +3. Append the benchmark-specific code to the file (e.g. to actually render the templates). +4. In case of "timeraw" benchmarks, we can simply return the remaining code as a string + to be run in a separate process. +5. In case of "peakmem" benchmarks, we need to access this modified source code as Python objects. + So the code is made available as a "virtual" module, which makes it possible to import Python objects like so: + ```py + from my_virtual_module import run_my_benchmark + ``` + +## Using `asv` + +### Compare latest commit against master + +Note: Before comparing, you must run the benchmarks first to generate the results. The `continuous` command does not generate the results by itself. + +```bash +asv continuous master^! HEAD^! --factor 1.1 +``` + +- Factor of `1.1` means that the new commit is allowed to be 10% slower/faster than the master commit. + +- `^` means that we mean the COMMIT of the branch, not the BRANCH itself. + + Without it, we would run benchmarks for the whole branch history. + + With it, we run benchmarks FROM the latest commit (incl) TO ... + +- `!` means that we want to select range spanning a single commit. + + Without it, we would run benchmarks for all commits FROM the latest commit + TO the start of the branch history. + + With it, we run benchmarks ONLY FOR the latest commit. + +### More Examples + +Notes: + +- Use `~1` to select the second-latest commit, `~2` for the third-latest, etc.. + +Generate benchmarks for the latest commit in `master` branch. + +```bash +asv run master^! +``` + +Generate benchmarks for second-latest commit in `master` branch. + +```bash +asv run master~1^! +``` + +Generate benchmarks for all commits in `master` branch. + +```bash +asv run master +``` + +Generate benchmarks for all commits in `master` branch, but exclude the latest commit. + +```bash +asv run master~1 +``` + +Generate benchmarks for the LAST 5 commits in `master` branch, but exclude the latest commit. + +```bash +asv run master~1 --steps 5 +``` diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/benchmarks/assets/asv_dashboard.png b/benchmarks/assets/asv_dashboard.png new file mode 100644 index 00000000..524990b8 Binary files /dev/null and b/benchmarks/assets/asv_dashboard.png differ diff --git a/benchmarks/benchmark_templating.py b/benchmarks/benchmark_templating.py new file mode 100644 index 00000000..91a7d4ba --- /dev/null +++ b/benchmarks/benchmark_templating.py @@ -0,0 +1,447 @@ +# Write the benchmarking functions here +# See "Writing benchmarks" in the asv docs for more information. + +import re +from pathlib import Path +from types import ModuleType +from typing import Literal + +# Fix for for https://github.com/airspeed-velocity/asv_runner/pull/44 +import benchmarks.monkeypatch_asv # noqa: F401 + +from benchmarks.utils import benchmark, create_virtual_module + + +DJC_VS_DJ_GROUP = "Components vs Django" +DJC_ISOLATED_VS_NON_GROUP = "isolated vs django modes" +OTHER_GROUP = "Other" + + +DjcContextMode = Literal["isolated", "django"] +TemplatingRenderer = Literal["django", "django-components", "none"] +TemplatingTestSize = Literal["lg", "sm"] +TemplatingTestType = Literal[ + "first", # Testing performance of the first time the template is rendered + "subsequent", # Testing performance of the subsequent times the template is rendered + "startup", # Testing performance of the startup time (e.g. defining classes and templates) +] + + +def _get_templating_filepath(renderer: TemplatingRenderer, size: TemplatingTestSize) -> Path: + if renderer == "none": + raise ValueError("Cannot get filepath for renderer 'none'") + elif renderer not in ["django", "django-components"]: + raise ValueError(f"Invalid renderer: {renderer}") + + if size not in ("lg", "sm"): + raise ValueError(f"Invalid size: {size}, must be one of ('lg', 'sm')") + + # At this point, we know the renderer is either "django" or "django-components" + root = file_path = Path(__file__).parent.parent + if renderer == "django": + if size == "lg": + file_path = root / "tests" / "test_benchmark_django.py" + else: + file_path = root / "tests" / "test_benchmark_django_small.py" + else: + if size == "lg": + file_path = root / "tests" / "test_benchmark_djc.py" + else: + file_path = root / "tests" / "test_benchmark_djc_small.py" + + return file_path + + +def _get_templating_script( + renderer: TemplatingRenderer, + size: TemplatingTestSize, + context_mode: DjcContextMode, + imports_only: bool, +) -> str: + if renderer == "none": + return "" + elif renderer not in ["django", "django-components"]: + raise ValueError(f"Invalid renderer: {renderer}") + + # At this point, we know the renderer is either "django" or "django-components" + file_path = _get_templating_filepath(renderer, size) + contents = file_path.read_text() + + # The files with benchmarked code also have a section for testing them with pytest. + # We remove that pytest section, so the script is only the benchmark code. + contents = contents.split("# ----------- TESTS START ------------ #")[0] + + if imports_only: + # There is a benchmark test for measuring the time it takes to import the module. + # For that, we exclude from the code everything AFTER this line + contents = contents.split("# ----------- IMPORTS END ------------ #")[0] + else: + # Set the context mode by replacing variable in the script + contents = re.sub(r"CONTEXT_MODE.*?\n", f"CONTEXT_MODE = '{context_mode}'\n", contents, count=1) + + return contents + + +def _get_templating_module( + renderer: TemplatingRenderer, + size: TemplatingTestSize, + context_mode: DjcContextMode, + imports_only: bool, +) -> ModuleType: + if renderer not in ("django", "django-components"): + raise ValueError(f"Invalid renderer: {renderer}") + + file_path = _get_templating_filepath(renderer, size) + script = _get_templating_script(renderer, size, context_mode, imports_only) + + # This makes it possible to import the module in the benchmark function + # as `import test_templating` + module = create_virtual_module("test_templating", script, str(file_path)) + return module + + +# The `timeraw_` tests run in separate processes. But when running memory benchmarks, +# the tested logic runs in the same process as the where we run the benchmark functions +# (e.g. `peakmem_render_lg_first()`). Thus, the `peakmem_` functions have access to this file +# when the tested logic runs. +# +# Secondly, `asv` doesn't offer any way to pass data from `setup` to actual test. +# +# And so we define this global, which, when running memory benchmarks, the `setup` function +# populates. And then we trigger the actual render from within the test body. +do_render = lambda: None # noqa: E731 + + +def setup_templating_memory_benchmark( + renderer: TemplatingRenderer, + size: TemplatingTestSize, + test_type: TemplatingTestType, + context_mode: DjcContextMode, + imports_only: bool = False, +): + global do_render + module = _get_templating_module(renderer, size, context_mode, imports_only) + data = module.gen_render_data() + render = module.render + do_render = lambda: render(data) # noqa: E731 + + # Do the first render as part of setup if we're testing the subsequent renders + if test_type == "subsequent": + do_render() + + +# The timing benchmarks run the actual code in a separate process, by using the `timeraw_` prefix. +# As such, we don't actually load the code in this file. Instead, we only prepare a script (raw string) +# that will be run in the new process. +def prepare_templating_benchmark( + renderer: TemplatingRenderer, + size: TemplatingTestSize, + test_type: TemplatingTestType, + context_mode: DjcContextMode, + imports_only: bool = False, +): + global do_render + setup_script = _get_templating_script(renderer, size, context_mode, imports_only) + + # If we're testing the startup time, then the setup is actually the tested code + if test_type == "startup": + return setup_script + else: + # Otherwise include also data generation as part of setup + setup_script += "\n\n" "render_data = gen_render_data()\n" + + # Do the first render as part of setup if we're testing the subsequent renders + if test_type == "subsequent": + setup_script += "render(render_data)\n" + + benchmark_script = "render(render_data)\n" + return benchmark_script, setup_script + + +# - Group: django-components vs django +# - time: djc vs django (startup lg) +# - time: djc vs django (lg - FIRST) +# - time: djc vs django (sm - FIRST) +# - time: djc vs django (lg - SUBSEQUENT) +# - time: djc vs django (sm - SUBSEQUENT) +# - mem: djc vs django (lg - FIRST) +# - mem: djc vs django (sm - FIRST) +# - mem: djc vs django (lg - SUBSEQUENT) +# - mem: djc vs django (sm - SUBSEQUENT) +# +# NOTE: While the name suggests we're comparing Django and Django-components, be aware that +# in our "Django" tests, we still install and import django-components. We also use +# django-components's `{% html_attrs %}` tag in the Django scenario. `{% html_attrs %}` +# was used because the original sample code was from django-components. +# +# As such, these tests should seen not as "Using Django vs Using Components". But instead, +# it should be "What is the relative cost of using Components?". +# +# As an example, the benchmarking for the startup time and memory usage is not comparing +# two independent approaches. Rather, the test is checking if defining Components classes +# is more expensive than vanilla Django templates. +class DjangoComponentsVsDjangoTests: + # Testing startup time (e.g. defining classes and templates) + @benchmark( + pretty_name="startup - large", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + ) + def timeraw_startup_lg(self, renderer: TemplatingRenderer): + return prepare_templating_benchmark(renderer, "lg", "startup", "isolated") + + @benchmark( + pretty_name="render - small - first render", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + ) + def timeraw_render_sm_first(self, renderer: TemplatingRenderer): + return prepare_templating_benchmark(renderer, "sm", "first", "isolated") + + @benchmark( + pretty_name="render - small - second render", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + ) + def timeraw_render_sm_subsequent(self, renderer: TemplatingRenderer): + return prepare_templating_benchmark(renderer, "sm", "subsequent", "isolated") + + @benchmark( + pretty_name="render - large - first render", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + include_in_quick_benchmark=True, + ) + def timeraw_render_lg_first(self, renderer: TemplatingRenderer): + return prepare_templating_benchmark(renderer, "lg", "first", "isolated") + + @benchmark( + pretty_name="render - large - second render", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + ) + def timeraw_render_lg_subsequent(self, renderer: TemplatingRenderer): + return prepare_templating_benchmark(renderer, "lg", "subsequent", "isolated") + + @benchmark( + pretty_name="render - small - first render (mem)", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "first", "isolated"), + ) + def peakmem_render_sm_first(self, renderer: TemplatingRenderer): + do_render() + + @benchmark( + pretty_name="render - small - second render (mem)", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "subsequent", "isolated"), + ) + def peakmem_render_sm_subsequent(self, renderer: TemplatingRenderer): + do_render() + + @benchmark( + pretty_name="render - large - first render (mem)", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "first", "isolated"), + ) + def peakmem_render_lg_first(self, renderer: TemplatingRenderer): + do_render() + + @benchmark( + pretty_name="render - large - second render (mem)", + group_name=DJC_VS_DJ_GROUP, + number=1, + rounds=5, + params={ + "renderer": ["django", "django-components"], + }, + setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "subsequent", "isolated"), + ) + def peakmem_render_lg_subsequent(self, renderer: TemplatingRenderer): + do_render() + + +# - Group: Django-components "isolated" vs "django" modes +# - time: Isolated vs django djc (startup lg) +# - time: Isolated vs django djc (lg - FIRST) +# - time: Isolated vs django djc (sm - FIRST) +# - time: Isolated vs django djc (lg - SUBSEQUENT) +# - time: Isolated vs django djc (sm - SUBSEQUENT) +# - mem: Isolated vs django djc (lg - FIRST) +# - mem: Isolated vs django djc (sm - FIRST) +# - mem: Isolated vs django djc (lg - SUBSEQUENT) +# - mem: Isolated vs django djc (sm - SUBSEQUENT) +class IsolatedVsDjangoContextModesTests: + # Testing startup time (e.g. defining classes and templates) + @benchmark( + pretty_name="startup - large", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + ) + def timeraw_startup_lg(self, context_mode: DjcContextMode): + return prepare_templating_benchmark("django-components", "lg", "startup", context_mode) + + @benchmark( + pretty_name="render - small - first render", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + ) + def timeraw_render_sm_first(self, context_mode: DjcContextMode): + return prepare_templating_benchmark("django-components", "sm", "first", context_mode) + + @benchmark( + pretty_name="render - small - second render", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + ) + def timeraw_render_sm_subsequent(self, context_mode: DjcContextMode): + return prepare_templating_benchmark("django-components", "sm", "subsequent", context_mode) + + @benchmark( + pretty_name="render - large - first render", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + ) + def timeraw_render_lg_first(self, context_mode: DjcContextMode): + return prepare_templating_benchmark("django-components", "lg", "first", context_mode) + + @benchmark( + pretty_name="render - large - second render", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + ) + def timeraw_render_lg_subsequent(self, context_mode: DjcContextMode): + return prepare_templating_benchmark("django-components", "lg", "subsequent", context_mode) + + @benchmark( + pretty_name="render - small - first render (mem)", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + setup=lambda context_mode: setup_templating_memory_benchmark("django-components", "sm", "first", context_mode), + ) + def peakmem_render_sm_first(self, context_mode: DjcContextMode): + do_render() + + @benchmark( + pretty_name="render - small - second render (mem)", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + setup=lambda context_mode: setup_templating_memory_benchmark( + "django-components", + "sm", + "subsequent", + context_mode, + ), + ) + def peakmem_render_sm_subsequent(self, context_mode: DjcContextMode): + do_render() + + @benchmark( + pretty_name="render - large - first render (mem)", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + setup=lambda context_mode: setup_templating_memory_benchmark( + "django-components", + "lg", + "first", + context_mode, + ), + ) + def peakmem_render_lg_first(self, context_mode: DjcContextMode): + do_render() + + @benchmark( + pretty_name="render - large - second render (mem)", + group_name=DJC_ISOLATED_VS_NON_GROUP, + number=1, + rounds=5, + params={ + "context_mode": ["isolated", "django"], + }, + setup=lambda context_mode: setup_templating_memory_benchmark( + "django-components", + "lg", + "subsequent", + context_mode, + ), + ) + def peakmem_render_lg_subsequent(self, context_mode: DjcContextMode): + do_render() + + +class OtherTests: + @benchmark( + pretty_name="import time", + group_name=OTHER_GROUP, + number=1, + rounds=5, + ) + def timeraw_import_time(self): + return prepare_templating_benchmark("django-components", "lg", "startup", "isolated", imports_only=True) diff --git a/benchmarks/component_rendering.py b/benchmarks/component_rendering.py deleted file mode 100644 index 7d430e16..00000000 --- a/benchmarks/component_rendering.py +++ /dev/null @@ -1,174 +0,0 @@ -from time import perf_counter - -from django.template import Context, Template - -from django_components import Component, registry, types -from django_components.dependencies import CSS_DEPENDENCY_PLACEHOLDER, JS_DEPENDENCY_PLACEHOLDER -from tests.django_test_setup import * # NOQA -from tests.testutils import BaseTestCase, create_and_process_template_response - - -class SlottedComponent(Component): - template: types.django_html = """ - {% load component_tags %} - -
{% slot "header" %}Default header{% endslot %}
-
{% slot "main" %}Default main{% endslot %}
- -
- """ - - -class SimpleComponent(Component): - template: types.django_html = """ - Variable: {{ variable }} - """ - - css_file = "style.css" - js_file = "script.js" - - def get_context_data(self, variable, variable2="default"): - return { - "variable": variable, - "variable2": variable2, - } - - -class BreadcrumbComponent(Component): - template: types.django_html = """ - - """ - - css_file = "test.css" - js_file = "test.js" - - LINKS = [ - ( - "https://developer.mozilla.org/en-US/docs/Learn", - "Learn web development", - ), - ( - "https://developer.mozilla.org/en-US/docs/Learn/HTML", - "Structuring the web with HTML", - ), - ( - "https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML", - "Introduction to HTML", - ), - ( - "https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML/Document_and_website_structure", - "Document and website structure", - ), - ] - - def get_context_data(self, items): - if items > 4: - items = 4 - elif items < 0: - items = 0 - return {"links": self.LINKS[: items - 1]} - - -EXPECTED_CSS = """""" -EXPECTED_JS = """""" - - -class RenderBenchmarks(BaseTestCase): - def setUp(self): - registry.clear() - registry.register("test_component", SlottedComponent) - registry.register("inner_component", SimpleComponent) - registry.register("breadcrumb_component", BreadcrumbComponent) - - @staticmethod - def timed_loop(func, iterations=1000): - """Run func iterations times, and return the time in ms per iteration.""" - start_time = perf_counter() - for _ in range(iterations): - func() - end_time = perf_counter() - total_elapsed = end_time - start_time # NOQA - return total_elapsed * 1000 / iterations - - def test_render_time_for_small_component(self): - template_str: types.django_html = """ - {% load component_tags %} - {% component 'test_component' %} - {% slot "header" %} - {% component 'inner_component' variable='foo' %}{% endcomponent %} - {% endslot %} - {% endcomponent %} - """ - template = Template(template_str) - - print(f"{self.timed_loop(lambda: template.render(Context({})))} ms per iteration") - - def test_middleware_time_with_dependency_for_small_page(self): - template_str: types.django_html = """ - {% load component_tags %} - {% component_js_dependencies %} - {% component_css_dependencies %} - {% component 'test_component' %} - {% slot "header" %} - {% component 'inner_component' variable='foo' %}{% endcomponent %} - {% endslot %} - {% endcomponent %} - """ - template = Template(template_str) - # Sanity tests - response_content = create_and_process_template_response(template) - self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content) - self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content) - self.assertIn("style.css", response_content) - self.assertIn("script.js", response_content) - - without_middleware = self.timed_loop( - lambda: create_and_process_template_response(template, use_middleware=False) - ) - with_middleware = self.timed_loop(lambda: create_and_process_template_response(template, use_middleware=True)) - - print("Small page middleware test") - self.report_results(with_middleware, without_middleware) - - def test_render_time_with_dependency_for_large_page(self): - from django.template.loader import get_template - - template = get_template("mdn_complete_page.html") - response_content = create_and_process_template_response(template, {}) - self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content) - self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content) - self.assertIn("test.css", response_content) - self.assertIn("test.js", response_content) - - without_middleware = self.timed_loop( - lambda: create_and_process_template_response(template, {}, use_middleware=False) - ) - with_middleware = self.timed_loop( - lambda: create_and_process_template_response(template, {}, use_middleware=True) - ) - - print("Large page middleware test") - self.report_results(with_middleware, without_middleware) - - @staticmethod - def report_results(with_middleware, without_middleware): - print(f"Middleware active\t\t{with_middleware:.3f} ms per iteration") - print(f"Middleware inactive\t{without_middleware:.3f} ms per iteration") - time_difference = with_middleware - without_middleware - if without_middleware > with_middleware: - print(f"Decrease of {-100 * time_difference / with_middleware:.2f}%") - else: - print(f"Increase of {100 * time_difference / without_middleware:.2f}%") diff --git a/benchmarks/monkeypatch_asv.py b/benchmarks/monkeypatch_asv.py new file mode 100644 index 00000000..23003311 --- /dev/null +++ b/benchmarks/monkeypatch_asv.py @@ -0,0 +1,29 @@ +from asv_runner.benchmarks.timeraw import TimerawBenchmark, _SeparateProcessTimer + + +# Fix for https://github.com/airspeed-velocity/asv_runner/pull/44 +def _get_timer(self, *param): + """ + Returns a timer that runs the benchmark function in a separate process. + + #### Parameters + **param** (`tuple`) + : The parameters to pass to the benchmark function. + + #### Returns + **timer** (`_SeparateProcessTimer`) + : A timer that runs the function in a separate process. + """ + if param: + + def func(): + # ---------- OUR CHANGES: ADDED RETURN STATEMENT ---------- + return self.func(*param) + # ---------- OUR CHANGES END ---------- + + else: + func = self.func + return _SeparateProcessTimer(func) + + +TimerawBenchmark._get_timer = _get_timer diff --git a/benchmarks/monkeypatch_asv_ci.txt b/benchmarks/monkeypatch_asv_ci.txt new file mode 100644 index 00000000..30158b6d --- /dev/null +++ b/benchmarks/monkeypatch_asv_ci.txt @@ -0,0 +1,66 @@ +# ------------ FIX FOR #45 ------------ +# See https://github.com/airspeed-velocity/asv_runner/issues/45 +# This fix is applied in CI in the `benchmark.yml` file. +# This file is intentionally named `monkeypatch_asv_ci.txt` to avoid being +# loaded as a python file by `asv`. +# ------------------------------------- + +def timeit(self, number): + """ + Run the function's code `number` times in a separate Python process, and + return the execution time. + + #### Parameters + **number** (`int`) + : The number of times to execute the function's code. + + #### Returns + **time** (`float`) + : The time it took to execute the function's code `number` times. + + #### Notes + The function's code is executed in a separate Python process to avoid + interference from the parent process. The function can return either a + single string of code to be executed, or a tuple of two strings: the + code to be executed and the setup code to be run before timing. + """ + stmt = self.func() + if isinstance(stmt, tuple): + stmt, setup = stmt + else: + setup = "" + stmt = textwrap.dedent(stmt) + setup = textwrap.dedent(setup) + stmt = stmt.replace(r'"""', r"\"\"\"") + setup = setup.replace(r'"""', r"\"\"\"") + + # TODO + # -----------ORIGINAL CODE----------- + # code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number) + + # res = subprocess.check_output([sys.executable, "-c", code]) + # return float(res.strip()) + + # -----------NEW CODE----------- + code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number) + + evaler = textwrap.dedent( + """ + import sys + code = sys.stdin.read() + exec(code) + """ + ) + + proc = subprocess.Popen([sys.executable, "-c", evaler], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = proc.communicate(input=code.encode("utf-8")) + if proc.returncode != 0: + raise RuntimeError(f"Subprocess failed: {stderr.decode()}") + return float(stdout.decode("utf-8").strip()) + +_SeparateProcessTimer.timeit = timeit + +# ------------ END FIX #45 ------------ diff --git a/benchmarks/test_lexer_performance.py b/benchmarks/test_lexer_performance.py deleted file mode 100644 index b95f7d63..00000000 --- a/benchmarks/test_lexer_performance.py +++ /dev/null @@ -1,195 +0,0 @@ -# NOTE: This file is more of a playground than a proper test - -import timeit -from typing import List, Tuple - -from django.template.base import DebugLexer, Lexer, Token - -from django_components.util.template_parser import parse_template - - -def django_lexer(template: str) -> List[Token]: - """Use Django's built-in lexer to tokenize a template.""" - lexer = Lexer(template) - return list(lexer.tokenize()) - - -def django_debug_lexer(template: str) -> List[Token]: - """Use Django's built-in lexer to tokenize a template.""" - lexer = DebugLexer(template) - return list(lexer.tokenize()) - - -def run_benchmark(template: str, num_iterations: int = 5000) -> Tuple[float, float]: - """Run performance comparison between Django and custom lexer.""" - # django_time = timeit.timeit(lambda: django_lexer(template), number=num_iterations) - django_debug_time = timeit.timeit(lambda: django_debug_lexer(template), number=num_iterations) - custom_time = timeit.timeit(lambda: parse_template(template), number=num_iterations) - # return django_time, django_debug_time - return django_debug_time, custom_time - - -def print_benchmark_results(template: str, django_time: float, custom_time: float, num_iterations: int) -> None: - """Print formatted benchmark results.""" - print(f"\nTemplate: {template}") - print(f"Iterations: {num_iterations}") - print(f"Django Lexer: {django_time:.6f} seconds") - print(f"Custom Lexer: {custom_time:.6f} seconds") - print(f"Difference: {abs(django_time - custom_time):.6f} seconds") - print(f"Custom lexer is {(django_time / custom_time):.2f}x {'faster' if custom_time < django_time else 'slower'}") - - -if __name__ == "__main__": - test_cases = [ - # Simple text - "Hello World", - # Simple variable - "Hello {{ name }}", - # Simple block - "{% if condition %}Hello{% endif %}", - # Complex nested template - """ - {% extends "base.html" %} - {% block content %} -

{{ title }}

- {% for item in items %} -
- {{ item.name }} - {% if item.description %} -

{{ item.description }}

- {% endif %} -
- {% endfor %} - {% endblock %} - """, - # Component with nested tags - """ - {% component 'table' - headers=headers - rows=rows - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - %} - """, - # Real world example - """ -
- {# Info section #} -
-
-

Project Info

- - {% if editable %} - {% component "Button" - href=project_edit_url - attrs:class="not-prose" - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - %} - Edit Project - {% endcomponent %} - {% endif %} -
- - - {% for key, value in project_info %} - - - - - {% endfor %} -
- {{ key }}: - - {{ value }} -
-
- - {# Status Updates section #} - {% component "ProjectStatusUpdates" - project_id=project.pk - status_updates=status_updates - editable=editable - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - / %} -
- {# Team section #} -
-
-

Dcode Team

- - {% if editable %} - {% component "Button" - href=edit_project_roles_url - attrs:class="not-prose" - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - %} - Edit Team - {% endcomponent %} - {% endif %} -
- - {% component "ProjectUsers" - project_id=project.pk - roles_with_users=roles_with_users - editable=False - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - / %} -
- - {# POCs section #} -
-
-

Client POCs

- - {% if editable %} - {% component "Button" - href=edit_pocs_url - attrs:class="not-prose" - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - %} - Edit POCs - {% endcomponent %} - {% endif %} -
- - {% if poc_data %} - - - - - - - {% for data in poc_data %} - - - - - - {% endfor %} -
NameJob TitleHubspot Profile
{{ data.poc.contact.first_name }} {{ data.poc.contact.last_name }}{{ data.poc.contact.job_title }} - {% component "Icon" - href=data.hubspot_url - name="arrow-top-right-on-square" - variant="outline" - color="text-gray-400 hover:text-gray-500" - footer="{% slot 'footer' %}Total: {{ total }}{% endslot %}" - title="{% trans 'Data Table' %}" - / %} -
- {% else %} -

No entries

- {% endif %} -
-
-
- """, - ] - - for template in test_cases: - django_time, custom_time = run_benchmark(template) - print_benchmark_results(template, django_time, custom_time, 200) diff --git a/benchmarks/utils.py b/benchmarks/utils.py new file mode 100644 index 00000000..eb160cb0 --- /dev/null +++ b/benchmarks/utils.py @@ -0,0 +1,99 @@ +import os +import sys +from importlib.abc import Loader +from importlib.util import spec_from_loader, module_from_spec +from types import ModuleType +from typing import Any, Dict, List, Optional + + +# NOTE: benchmark_name constraints: +# - MUST BE UNIQUE +# - MUST NOT CONTAIN `-` +# - MUST START WITH `time_`, `mem_`, `peakmem_` +# See https://github.com/airspeed-velocity/asv/pull/1470 +def benchmark( + *, + pretty_name: Optional[str] = None, + timeout: Optional[int] = None, + group_name: Optional[str] = None, + params: Optional[Dict[str, List[Any]]] = None, + number: Optional[int] = None, + min_run_count: Optional[int] = None, + include_in_quick_benchmark: bool = False, + **kwargs, +): + def decorator(func): + # For pull requests, we want to run benchmarks only for a subset of tests, + # because the full set of tests takes about 10 minutes to run (5 min per commit). + # This is done by setting DJC_BENCHMARK_QUICK=1 in the environment. + if os.getenv("DJC_BENCHMARK_QUICK") and not include_in_quick_benchmark: + # By setting the benchmark name to something that does NOT start with + # valid prefixes like `time_`, `mem_`, or `peakmem_`, this function will be ignored by asv. + func.benchmark_name = "noop" + return func + + # "group_name" is our custom field, which we actually convert to asv's "benchmark_name" + if group_name is not None: + benchmark_name = f"{group_name}.{func.__name__}" + func.benchmark_name = benchmark_name + + # Also "params" is custom, so we normalize it to "params" and "param_names" + if params is not None: + func.params, func.param_names = list(params.values()), list(params.keys()) + + if pretty_name is not None: + func.pretty_name = pretty_name + if timeout is not None: + func.timeout = timeout + if number is not None: + func.number = number + if min_run_count is not None: + func.min_run_count = min_run_count + + # Additional, untyped kwargs + for k, v in kwargs.items(): + setattr(func, k, v) + + return func + + return decorator + + +class VirtualModuleLoader(Loader): + def __init__(self, code_string): + self.code_string = code_string + + def exec_module(self, module): + exec(self.code_string, module.__dict__) + + +def create_virtual_module(name: str, code_string: str, file_path: str) -> ModuleType: + """ + To avoid the headaches of importing the tested code from another diretory, + we create a "virtual" module that we can import from anywhere. + + E.g. + ```py + from benchmarks.utils import create_virtual_module + + create_virtual_module("my_module", "print('Hello, world!')", __file__) + + # Now you can import my_module from anywhere + import my_module + ``` + """ + # Create the module specification + spec = spec_from_loader(name, VirtualModuleLoader(code_string)) + + # Create the module + module = module_from_spec(spec) # type: ignore[arg-type] + module.__file__ = file_path + module.__name__ = name + + # Add it to sys.modules + sys.modules[name] = module + + # Execute the module + spec.loader.exec_module(module) # type: ignore[union-attr] + + return module diff --git a/docs/benchmarks/asv.css b/docs/benchmarks/asv.css new file mode 100644 index 00000000..d7867516 --- /dev/null +++ b/docs/benchmarks/asv.css @@ -0,0 +1,161 @@ +/* Basic navigation */ + +.asv-navigation { + padding: 2px; +} + +nav ul li.active a { + height: 52px; +} + +nav li.active span.navbar-brand { + background-color: #e7e7e7; + height: 52px; +} + +nav li.active span.navbar-brand:hover { + background-color: #e7e7e7; +} + +.navbar-default .navbar-link { + color: #2458D9; +} + +.panel-body { + padding: 0; +} + +.panel { + margin-bottom: 4px; + -webkit-box-shadow: none; + box-shadow: none; + border-radius: 0; + border-top-left-radius: 3px; + border-top-right-radius: 3px; +} + +.panel-default>.panel-heading, +.panel-heading { + font-size: 12px; + font-weight:bold; + padding: 2px; + text-align: center; + border-top-left-radius: 3px; + border-top-right-radius: 3px; + background-color: #eee; +} + +.btn, +.btn-group, +.btn-group-vertical>.btn:first-child, +.btn-group-vertical>.btn:last-child:not(:first-child), +.btn-group-vertical>.btn:last-child { + border: none; + border-radius: 0px; + overflow: hidden; +} + +.btn-default:focus, .btn-default:active, .btn-default.active { + border: none; + color: #fff; + background-color: #99bfcd; +} + +#range { + font-family: monospace; + text-align: center; + background: #ffffff; +} + +.form-control { + border: none; + border-radius: 0px; + font-size: 12px; + padding: 0px; +} + +.tooltip-inner { + min-width: 100px; + max-width: 800px; + text-align: left; + white-space: pre-wrap; + font-family: monospace; +} + +/* Benchmark tree */ + +.nav-list { + font-size: 12px; + padding: 0; + padding-left: 15px; +} + +.nav-list>li { + overflow-x: hidden; +} + +.nav-list>li>a { + padding: 0; + padding-left: 5px; + color: #000; +} + +.nav-list>li>a:focus { + color: #fff; + background-color: #99bfcd; + box-shadow: inset 0 3px 5px rgba(0,0,0,.125); +} + +.nav-list>li>.nav-header { + white-space: nowrap; + font-weight: 500; + margin-bottom: 2px; +} + +.caret-right { + display: inline-block; + width: 0; + height: 0; + margin-left: 2px; + vertical-align: middle; + border-left: 4px solid; + border-bottom: 4px solid transparent; + border-top: 4px solid transparent; +} + +/* Summary page */ + +.benchmark-group > h1 { + text-align: center; +} + +.benchmark-container { + width: 300px; + height: 116px; + padding: 4px; + border-radius: 3px; +} + +.benchmark-container:hover { + background-color: #eee; +} + +.benchmark-plot { + width: 292px; + height: 88px; +} + +.benchmark-text { + font-size: 12px; + color: #000; + width: 292px; + overflow: hidden; +} + +#extra-buttons { + margin: 1em; +} + +#extra-buttons a { + border: solid 1px #ccc; +} diff --git a/docs/benchmarks/asv.js b/docs/benchmarks/asv.js new file mode 100644 index 00000000..ac235639 --- /dev/null +++ b/docs/benchmarks/asv.js @@ -0,0 +1,525 @@ +'use strict'; + +$(document).ready(function() { + /* GLOBAL STATE */ + /* The index.json content as returned from the server */ + var main_timestamp = ''; + var main_json = {}; + /* Extra pages: {name: show_function} */ + var loaded_pages = {}; + /* Previous window scroll positions */ + var window_scroll_positions = {}; + /* Previous window hash location */ + var window_last_location = null; + /* Graph data cache */ + var graph_cache = {}; + var graph_cache_max_size = 5; + + var colors = [ + '#247AAD', + '#E24A33', + '#988ED5', + '#777777', + '#FBC15E', + '#8EBA42', + '#FFB5B8' + ]; + + var time_units = [ + ['ps', 'picoseconds', 0.000000000001], + ['ns', 'nanoseconds', 0.000000001], + ['μs', 'microseconds', 0.000001], + ['ms', 'milliseconds', 0.001], + ['s', 'seconds', 1], + ['m', 'minutes', 60], + ['h', 'hours', 60 * 60], + ['d', 'days', 60 * 60 * 24], + ['w', 'weeks', 60 * 60 * 24 * 7], + ['y', 'years', 60 * 60 * 24 * 7 * 52], + ['C', 'centuries', 60 * 60 * 24 * 7 * 52 * 100] + ]; + + var mem_units = [ + ['', 'bytes', 1], + ['k', 'kilobytes', 1000], + ['M', 'megabytes', 1000000], + ['G', 'gigabytes', 1000000000], + ['T', 'terabytes', 1000000000000] + ]; + + function pretty_second(x) { + for (var i = 0; i < time_units.length - 1; ++i) { + if (Math.abs(x) < time_units[i+1][2]) { + return (x / time_units[i][2]).toFixed(3) + time_units[i][0]; + } + } + + return 'inf'; + } + + function pretty_byte(x) { + for (var i = 0; i < mem_units.length - 1; ++i) { + if (Math.abs(x) < mem_units[i+1][2]) { + break; + } + } + if (i == 0) { + return x + ''; + } + return (x / mem_units[i][2]).toFixed(3) + mem_units[i][0]; + } + + function pretty_unit(x, unit) { + if (unit == "seconds") { + return pretty_second(x); + } + else if (unit == "bytes") { + return pretty_byte(x); + } + else if (unit && unit != "unit") { + return '' + x.toPrecision(3) + ' ' + unit; + } + else { + return '' + x.toPrecision(3); + } + } + + function pad_left(s, c, num) { + s = '' + s; + while (s.length < num) { + s = c + s; + } + return s; + } + + function format_date_yyyymmdd(date) { + return (pad_left(date.getFullYear(), '0', 4) + + '-' + pad_left(date.getMonth() + 1, '0', 2) + + '-' + pad_left(date.getDate(), '0', 2)); + } + + function format_date_yyyymmdd_hhmm(date) { + return (format_date_yyyymmdd(date) + ' ' + + pad_left(date.getHours(), '0', 2) + + ':' + pad_left(date.getMinutes(), '0', 2)); + } + + /* Convert a flat index to permutation to the corresponding value */ + function param_selection_from_flat_idx(params, idx) { + var selection = []; + if (idx < 0) { + idx = 0; + } + for (var k = params.length-1; k >= 0; --k) { + var j = idx % params[k].length; + selection.unshift([j]); + idx = (idx - j) / params[k].length; + } + selection.unshift([null]); + return selection; + } + + /* Convert a benchmark parameter value from their native Python + repr format to a number or a string, ready for presentation */ + function convert_benchmark_param_value(value_repr) { + var match = Number(value_repr); + if (!isNaN(match)) { + return match; + } + + /* Python str */ + match = value_repr.match(/^'(.+)'$/); + if (match) { + return match[1]; + } + + /* Python unicode */ + match = value_repr.match(/^u'(.+)'$/); + if (match) { + return match[1]; + } + + /* Python class */ + match = value_repr.match(/^$/); + if (match) { + return match[1]; + } + + return value_repr; + } + + /* Convert loaded graph data to a format flot understands, by + treating either time or one of the parameters as x-axis, + and selecting only one value of the remaining axes */ + function filter_graph_data(raw_series, x_axis, other_indices, params) { + if (params.length == 0) { + /* Simple time series */ + return raw_series; + } + + /* Compute position of data entry in the results list, + and stride corresponding to plot x-axis parameter */ + var stride = 1; + var param_stride = 0; + var param_idx = 0; + for (var k = params.length - 1; k >= 0; --k) { + if (k == x_axis - 1) { + param_stride = stride; + } + else { + param_idx += other_indices[k + 1] * stride; + } + stride *= params[k].length; + } + + if (x_axis == 0) { + /* x-axis is time axis */ + var series = new Array(raw_series.length); + for (var k = 0; k < raw_series.length; ++k) { + if (raw_series[k][1] === null) { + series[k] = [raw_series[k][0], null]; + } else { + series[k] = [raw_series[k][0], + raw_series[k][1][param_idx]]; + } + } + return series; + } + else { + /* x-axis is some parameter axis */ + var time_idx = null; + if (other_indices[0] === null) { + time_idx = raw_series.length - 1; + } + else { + /* Need to search for the correct time value */ + for (var k = 0; k < raw_series.length; ++k) { + if (raw_series[k][0] == other_indices[0]) { + time_idx = k; + break; + } + } + if (time_idx === null) { + /* No data points */ + return []; + } + } + + var x_values = params[x_axis - 1]; + var series = new Array(x_values.length); + for (var k = 0; k < x_values.length; ++k) { + if (raw_series[time_idx][1] === null) { + series[k] = [convert_benchmark_param_value(x_values[k]), + null]; + } + else { + series[k] = [convert_benchmark_param_value(x_values[k]), + raw_series[time_idx][1][param_idx]]; + } + param_idx += param_stride; + } + return series; + } + } + + function filter_graph_data_idx(raw_series, x_axis, flat_idx, params) { + var selection = param_selection_from_flat_idx(params, flat_idx); + var flat_selection = []; + $.each(selection, function(i, v) { + flat_selection.push(v[0]); + }); + return filter_graph_data(raw_series, x_axis, flat_selection, params); + } + + /* Escape special characters in graph item file names. + The implementation must match asv.util.sanitize_filename */ + function sanitize_filename(name) { + var bad_re = /[<>:"\/\\^|?*\x00-\x1f]/g; + var bad_names = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", + "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", + "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", + "LPT9"]; + name = name.replace(bad_re, "_"); + if (bad_names.indexOf(name.toUpperCase()) != -1) { + name = name + "_"; + } + return name; + } + + /* Given a specific group of parameters, generate the URL to + use to load that graph. + The implementation must match asv.graph.Graph.get_file_path + */ + function graph_to_path(benchmark_name, state) { + var parts = []; + $.each(state, function(key, value) { + var part; + if (value === null) { + part = key + "-null"; + } else if (value) { + part = key + "-" + value; + } else { + part = key; + } + parts.push(sanitize_filename('' + part)); + }); + parts.sort(); + parts.splice(0, 0, "graphs"); + parts.push(sanitize_filename(benchmark_name)); + + /* Escape URI components */ + parts = $.map(parts, function (val) { return encodeURIComponent(val); }); + return parts.join('/') + ".json"; + } + + /* + Load and cache graph data (on javascript side) + */ + function load_graph_data(url, success, failure) { + var dfd = $.Deferred(); + if (graph_cache[url]) { + setTimeout(function() { + dfd.resolve(graph_cache[url]); + }, 1); + } + else { + $.ajax({ + url: url + '?timestamp=' + $.asv.main_timestamp, + dataType: "json", + cache: true + }).done(function(data) { + if (Object.keys(graph_cache).length > graph_cache_max_size) { + $.each(Object.keys(graph_cache), function (i, key) { + delete graph_cache[key]; + }); + } + graph_cache[url] = data; + dfd.resolve(data); + }).fail(function() { + dfd.reject(); + }); + } + return dfd.promise(); + } + + /* + Parse hash string, assuming format similar to standard URL + query strings + */ + function parse_hash_string(str) { + var info = {location: [''], params: {}}; + + if (str && str[0] == '#') { + str = str.slice(1); + } + if (str && str[0] == '/') { + str = str.slice(1); + } + + var match = str.match(/^([^?]*?)\?/); + if (match) { + info['location'] = decodeURIComponent(match[1]).replace(/\/+/, '/').split('/'); + var rest = str.slice(match[1].length+1); + var parts = rest.split('&'); + for (var i = 0; i < parts.length; ++i) { + var part = parts[i].split('='); + if (part.length != 2) { + continue; + } + var key = decodeURIComponent(part[0].replace(/\+/g, " ")); + var value = decodeURIComponent(part[1].replace(/\+/g, " ")); + if (value == '[none]') { + value = null; + } + if (info['params'][key] === undefined) { + info['params'][key] = [value]; + } + else { + info['params'][key].push(value); + } + } + } + else { + info['location'] = decodeURIComponent(str).replace(/\/+/, '/').split('/'); + } + return info; + } + + /* + Generate a hash string, inverse of parse_hash_string + */ + function format_hash_string(info) { + var parts = info['params']; + var str = '#' + info['location']; + + if (parts) { + str = str + '?'; + var first = true; + $.each(parts, function (key, values) { + $.each(values, function (idx, value) { + if (!first) { + str = str + '&'; + } + if (value === null) { + value = '[none]'; + } + str = str + encodeURIComponent(key) + '=' + encodeURIComponent(value); + first = false; + }); + }); + } + return str; + } + + /* + Dealing with sub-pages + */ + + function show_page(name, params) { + if (loaded_pages[name] !== undefined) { + $("#nav ul li.active").removeClass('active'); + $("#nav-li-" + name).addClass('active'); + $("#graph-display").hide(); + $("#summarygrid-display").hide(); + $("#summarylist-display").hide(); + $('#regressions-display').hide(); + $('.tooltip').remove(); + loaded_pages[name](params); + return true; + } + else { + return false; + } + } + + function hashchange() { + var info = parse_hash_string(window.location.hash); + + /* Keep track of window scroll position; makes the back-button work */ + var old_scroll_pos = window_scroll_positions[info.location.join('/')]; + window_scroll_positions[window_last_location] = $(window).scrollTop(); + window_last_location = info.location.join('/'); + + /* Redirect to correct handler */ + if (show_page(info.location, info.params)) { + /* show_page does the work */ + } + else { + /* Display benchmark page */ + info.params['benchmark'] = info.location[0]; + show_page('graphdisplay', info.params); + } + + /* Scroll back to previous position, if any */ + if (old_scroll_pos !== undefined) { + $(window).scrollTop(old_scroll_pos); + } + } + + function get_commit_hash(revision) { + var commit_hash = main_json.revision_to_hash[revision]; + if (commit_hash) { + // Return printable commit hash + commit_hash = commit_hash.slice(0, main_json.hash_length); + } + return commit_hash; + } + + function get_revision(commit_hash) { + var rev = null; + $.each(main_json.revision_to_hash, function(revision, full_commit_hash) { + if (full_commit_hash.startsWith(commit_hash)) { + rev = revision; + // break the $.each loop + return false; + } + }); + return rev; + } + + function init_index() { + /* Fetch the main index.json and then set up the page elements + based on it. */ + $.ajax({ + url: "index.json" + '?timestamp=' + $.asv.main_timestamp, + dataType: "json", + cache: true + }).done(function (index) { + main_json = index; + $.asv.main_json = index; + + /* Page title */ + var project_name = $("#project-name")[0]; + project_name.textContent = index.project; + project_name.setAttribute("href", index.project_url); + $("#project-name").textContent = index.project; + document.title = "airspeed velocity of an unladen " + index.project; + + $(window).on('hashchange', hashchange); + + $('#graph-display').hide(); + $('#regressions-display').hide(); + $('#summarygrid-display').hide(); + $('#summarylist-display').hide(); + + hashchange(); + }).fail(function () { + $.asv.ui.network_error(); + }); + } + + function init() { + /* Fetch the info.json */ + $.ajax({ + url: "info.json", + dataType: "json", + cache: false + }).done(function (info) { + main_timestamp = info['timestamp']; + $.asv.main_timestamp = main_timestamp; + init_index(); + }).fail(function () { + $.asv.ui.network_error(); + }); + } + + + /* + Set up $.asv + */ + + this.register_page = function(name, show_function) { + loaded_pages[name] = show_function; + } + this.parse_hash_string = parse_hash_string; + this.format_hash_string = format_hash_string; + + this.filter_graph_data = filter_graph_data; + this.filter_graph_data_idx = filter_graph_data_idx; + this.convert_benchmark_param_value = convert_benchmark_param_value; + this.param_selection_from_flat_idx = param_selection_from_flat_idx; + this.graph_to_path = graph_to_path; + this.load_graph_data = load_graph_data; + this.get_commit_hash = get_commit_hash; + this.get_revision = get_revision; + + this.main_timestamp = main_timestamp; /* Updated after info.json loads */ + this.main_json = main_json; /* Updated after index.json loads */ + + this.format_date_yyyymmdd = format_date_yyyymmdd; + this.format_date_yyyymmdd_hhmm = format_date_yyyymmdd_hhmm; + this.pretty_unit = pretty_unit; + this.time_units = time_units; + this.mem_units = mem_units; + + this.colors = colors; + + $.asv = this; + + + /* + Launch it + */ + + init(); +}); diff --git a/docs/benchmarks/asv_ui.js b/docs/benchmarks/asv_ui.js new file mode 100644 index 00000000..af757c70 --- /dev/null +++ b/docs/benchmarks/asv_ui.js @@ -0,0 +1,231 @@ +'use strict'; + +$(document).ready(function() { + function make_panel(nav, heading) { + var panel = $('
'); + nav.append(panel); + var panel_header = $( + '
' + heading + '
'); + panel.append(panel_header); + var panel_body = $('
'); + panel.append(panel_body); + return panel_body; + } + + function make_value_selector_panel(nav, heading, values, setup_callback) { + var panel_body = make_panel(nav, heading); + var vertical = false; + var buttons = $('
'); + + panel_body.append(buttons); + + $.each(values, function (idx, value) { + var button = $( + ''); + setup_callback(idx, value, button); + buttons.append(button); + }); + + return panel_body; + } + + function reflow_value_selector_panels(no_timeout) { + $('.panel').each(function (i, panel_obj) { + var panel = $(panel_obj); + panel.find('.btn-group').each(function (i, buttons_obj) { + var buttons = $(buttons_obj); + var width = 0; + + if (buttons.hasClass('reflow-done')) { + /* already processed */ + return; + } + + $.each(buttons.children(), function(idx, value) { + width += value.scrollWidth; + }); + + var max_width = panel_obj.clientWidth; + + if (width >= max_width) { + buttons.addClass("btn-group-vertical"); + buttons.css("width", "100%"); + buttons.css("max-height", "20ex"); + buttons.css("overflow-y", "auto"); + } + else { + buttons.addClass("btn-group-justified"); + } + + /* The widths can be zero if the UI is not fully layouted yet, + so mark the adjustment complete only if this is not the case */ + if (width > 0 && max_width > 0) { + buttons.addClass("reflow-done"); + } + }); + }); + + if (!no_timeout) { + /* Call again asynchronously, in case the UI was not fully layouted yet */ + setTimeout(function() { $.asv.ui.reflow_value_selector_panels(true); }, 0); + } + } + + function network_error(ajax, status, error) { + $("#error-message").text( + "Error fetching content. " + + "Perhaps web server has gone down."); + $("#error").modal('show'); + } + + function hover_graph(element, graph_url, benchmark_basename, parameter_idx, revisions) { + /* Show the summary graph as a popup */ + var plot_div = $('
'); + plot_div.css('width', '11.8em'); + plot_div.css('height', '7em'); + plot_div.css('border', '2px solid black'); + plot_div.css('background-color', 'white'); + + function update_plot() { + var markings = []; + + if (revisions) { + $.each(revisions, function(i, revs) { + var rev_a = revs[0]; + var rev_b = revs[1]; + + if (rev_a !== null) { + markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_a, to: rev_a }}); + markings.push({ color: "rgba(255,0,0,0.1)", xaxis: { from: rev_a, to: rev_b }}); + } + markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_b, to: rev_b }}); + }); + } + + $.asv.load_graph_data( + graph_url + ).done(function (data) { + var params = $.asv.main_json.benchmarks[benchmark_basename].params; + data = $.asv.filter_graph_data_idx(data, 0, parameter_idx, params); + var options = { + colors: ['#000'], + series: { + lines: { + show: true, + lineWidth: 2 + }, + shadowSize: 0 + }, + grid: { + borderWidth: 1, + margin: 0, + labelMargin: 0, + axisMargin: 0, + minBorderMargin: 0, + markings: markings, + }, + xaxis: { + ticks: [], + }, + yaxis: { + ticks: [], + min: 0 + }, + legend: { + show: false + } + }; + var plot = $.plot(plot_div, [{data: data}], options); + }).fail(function () { + // TODO: Handle failure + }); + + return plot_div; + } + + element.popover({ + placement: 'left auto', + trigger: 'hover', + html: true, + delay: 50, + content: $('
').append(plot_div) + }); + + element.on('show.bs.popover', update_plot); + } + + function hover_summary_graph(element, benchmark_basename) { + /* Show the summary graph as a popup */ + var plot_div = $('
'); + plot_div.css('width', '11.8em'); + plot_div.css('height', '7em'); + plot_div.css('border', '2px solid black'); + plot_div.css('background-color', 'white'); + + function update_plot() { + var markings = []; + + $.asv.load_graph_data( + 'graphs/summary/' + benchmark_basename + '.json' + ).done(function (data) { + var options = { + colors: $.asv.colors, + series: { + lines: { + show: true, + lineWidth: 2 + }, + shadowSize: 0 + }, + grid: { + borderWidth: 1, + margin: 0, + labelMargin: 0, + axisMargin: 0, + minBorderMargin: 0, + markings: markings, + }, + xaxis: { + ticks: [], + }, + yaxis: { + ticks: [], + min: 0 + }, + legend: { + show: false + } + }; + var plot = $.plot(plot_div, [{data: data}], options); + }).fail(function () { + // TODO: Handle failure + }); + + return plot_div; + } + + element.popover({ + placement: 'left auto', + trigger: 'hover', + html: true, + delay: 50, + content: $('
').append(plot_div) + }); + + element.on('show.bs.popover', update_plot); + } + + /* + Set up $.asv.ui + */ + + this.network_error = network_error; + this.make_panel = make_panel; + this.make_value_selector_panel = make_value_selector_panel; + this.reflow_value_selector_panels = reflow_value_selector_panels; + this.hover_graph = hover_graph; + this.hover_summary_graph = hover_summary_graph; + + $.asv.ui = this; +}); diff --git a/docs/benchmarks/error.html b/docs/benchmarks/error.html new file mode 100644 index 00000000..af2a4d54 --- /dev/null +++ b/docs/benchmarks/error.html @@ -0,0 +1,23 @@ + + + + airspeed velocity error + + + + +

+ swallow + Can not determine continental origin of swallow. +

+ +

+ One or more external (JavaScript) dependencies of airspeed velocity failed to load. +

+ +

+ Make sure you have an active internet connection and enable 3rd-party scripts + in your browser the first time you load airspeed velocity. +

+ + diff --git a/docs/benchmarks/graphdisplay.js b/docs/benchmarks/graphdisplay.js new file mode 100644 index 00000000..ba715322 --- /dev/null +++ b/docs/benchmarks/graphdisplay.js @@ -0,0 +1,1427 @@ +'use strict'; + +$(document).ready(function() { + /* The state of the parameters in the sidebar. Dictionary mapping + strings to arrays containing the "enabled" configurations. */ + var state = null; + /* The name of the current benchmark being displayed. */ + var current_benchmark = null; + /* An array of graphs being displayed. */ + var graphs = []; + var orig_graphs = []; + /* An array of commit revisions being displayed */ + var current_revisions = []; + /* True when log scaling is enabled. */ + var log_scale = false; + /* True when zooming in on the y-axis. */ + var zoom_y_axis = false; + /* True when log scaling is enabled. */ + var reference_scale = false; + /* True when selecting a reference point */ + var select_reference = false; + /* The reference value */ + var reference = 1.0; + /* Whether to show the legend */ + var show_legend = true; + /* Is even commit spacing being used? */ + var even_spacing = false; + var even_spacing_revisions = []; + /* Is date scale being used ? */ + var date_scale = false; + var date_to_revision = {}; + /* A little div to handle tooltip placement on the graph */ + var tooltip = null; + /* X-axis coordinate axis in the data set; always 0 for + non-parameterized tests where revision and date are the only potential x-axis */ + var x_coordinate_axis = 0; + var x_coordinate_is_category = false; + /* List of lists of value combinations to plot (apart from x-axis) + in parameterized tests. */ + var benchmark_param_selection = [[null]]; + /* Highlighted revisions */ + var highlighted_revisions = null; + /* Whether benchmark graph display was set up */ + var benchmark_graph_display_ready = false; + + + /* UTILITY FUNCTIONS */ + function arr_remove_from(a, x) { + var out = []; + $.each(a, function(i, val) { + if (x !== val) { + out.push(val); + } + }); + return out; + } + + function obj_copy(obj) { + var newobj = {}; + $.each(obj, function(key, val) { + newobj[key] = val; + }); + return newobj; + } + + function obj_length(obj) { + var i = 0; + for (var x in obj) + ++i; + return i; + } + + function obj_get_first_key(data) { + for (var prop in data) + return prop; + } + + function no_data(ajax, status, error) { + $("#error-message").text( + "No data for this combination of filters. "); + $("#error").modal('show'); + } + + function get_x_from_revision(rev) { + if (date_scale) { + return $.asv.main_json.revision_to_date[rev]; + } else { + return rev; + } + } + + function get_commit_hash(x) { + // Return the commit hash in the current graph located at position x + if (date_scale) { + x = date_to_revision[x]; + } + return $.asv.get_commit_hash(x); + } + + + function display_benchmark(bm_name, state_selection, highlight_revisions) { + setup_benchmark_graph_display(); + + $('#graph-display').show(); + $('#summarygrid-display').hide(); + $('#regressions-display').hide(); + $('.tooltip').remove(); + + if (reference_scale) { + reference_scale = false; + $('#reference').removeClass('active'); + reference = 1.0; + } + current_benchmark = bm_name; + highlighted_revisions = highlight_revisions; + $("#title").text(bm_name); + setup_benchmark_params(state_selection); + replace_graphs(); + } + + function setup_benchmark_graph_display() { + if (benchmark_graph_display_ready) { + return; + } + benchmark_graph_display_ready = true; + + /* When the window resizes, redraw the graphs */ + $(window).on('resize', function() { + update_graphs(); + }); + + var nav = $("#graphdisplay-navigation"); + + /* Make the static tooltips look correct */ + $('[data-toggle="tooltip"]').tooltip({container: 'body'}); + + /* Add insertion point for benchmark parameters */ + var state_params_nav = $("
"); + nav.append(state_params_nav); + + /* Add insertion point for benchmark parameters */ + var bench_params_nav = $("
"); + nav.append(bench_params_nav); + + /* Benchmark panel */ + var panel_body = $.asv.ui.make_panel(nav, 'benchmark'); + + var tree = $('