mirror of
https://github.com/django-components/django-components.git
synced 2025-08-28 01:44:05 +00:00
feat: benchmarking (#999)
* feat: add benchmarking dashboard, CI hook on PR, and store lifetime results * refactor: change python env to 3.13 in benchmarks * refactor: add verbosity, use 3.11 for benchmarking * fix: OSError: [Errno 7] Argument list too long * refactor: add debug statements * refactor: remove extraneous -e * refactor: fix tests and linter errors * fix: track main package in coverage * refactor: fix test coverage testing * refactor: fix repo owner name in benchmark on pushing comment * refactor: add asv monkeypatch to docs workflow * refactor: temporarily allow building docs in forks * refactor: use py 3.13 for benchmarking * refactor: run only a single benchmark for PRs to speed them up * refactor: install asv in the docs build workflow * refactor: use hatch docs env to generate benhcmarks in docs CI * refactor: more trying * refactor: move tests * Add benchmark results for 0.137 * Trigger Build * Add benchmark results for 0.138 * refactor: set constant machine name when benchmarking * Add benchmark results for 0.139 * refactor: fix issue with paths too long * Add benchmark results for 0.140 * docs: update comment * refactor: remove test benchmarking data * refactor: fix comment * refactor: allow the benchmark workflow to write to PRs * refactor: use personal access token to set up the PR benchmark bot * refactor: split the benchmark PR flow into two to make it work with PRs from forks * refactor: update deprecated actions/upload-artifact@v3 to v4 * refactor: fix missing directory in benchmarking workflow * refactor: fix triggering of second workflow * refactor: fix workflow finally? * docs: add comments to cut-offs and direct people to benchmarks PR --------- Co-authored-by: github-actions <github-actions@github.com>
This commit is contained in:
parent
dcd4203eea
commit
f36581ed86
90 changed files with 40817 additions and 443 deletions
99
benchmarks/utils.py
Normal file
99
benchmarks/utils.py
Normal file
|
@ -0,0 +1,99 @@
|
|||
import os
|
||||
import sys
|
||||
from importlib.abc import Loader
|
||||
from importlib.util import spec_from_loader, module_from_spec
|
||||
from types import ModuleType
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
# NOTE: benchmark_name constraints:
|
||||
# - MUST BE UNIQUE
|
||||
# - MUST NOT CONTAIN `-`
|
||||
# - MUST START WITH `time_`, `mem_`, `peakmem_`
|
||||
# See https://github.com/airspeed-velocity/asv/pull/1470
|
||||
def benchmark(
|
||||
*,
|
||||
pretty_name: Optional[str] = None,
|
||||
timeout: Optional[int] = None,
|
||||
group_name: Optional[str] = None,
|
||||
params: Optional[Dict[str, List[Any]]] = None,
|
||||
number: Optional[int] = None,
|
||||
min_run_count: Optional[int] = None,
|
||||
include_in_quick_benchmark: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
def decorator(func):
|
||||
# For pull requests, we want to run benchmarks only for a subset of tests,
|
||||
# because the full set of tests takes about 10 minutes to run (5 min per commit).
|
||||
# This is done by setting DJC_BENCHMARK_QUICK=1 in the environment.
|
||||
if os.getenv("DJC_BENCHMARK_QUICK") and not include_in_quick_benchmark:
|
||||
# By setting the benchmark name to something that does NOT start with
|
||||
# valid prefixes like `time_`, `mem_`, or `peakmem_`, this function will be ignored by asv.
|
||||
func.benchmark_name = "noop"
|
||||
return func
|
||||
|
||||
# "group_name" is our custom field, which we actually convert to asv's "benchmark_name"
|
||||
if group_name is not None:
|
||||
benchmark_name = f"{group_name}.{func.__name__}"
|
||||
func.benchmark_name = benchmark_name
|
||||
|
||||
# Also "params" is custom, so we normalize it to "params" and "param_names"
|
||||
if params is not None:
|
||||
func.params, func.param_names = list(params.values()), list(params.keys())
|
||||
|
||||
if pretty_name is not None:
|
||||
func.pretty_name = pretty_name
|
||||
if timeout is not None:
|
||||
func.timeout = timeout
|
||||
if number is not None:
|
||||
func.number = number
|
||||
if min_run_count is not None:
|
||||
func.min_run_count = min_run_count
|
||||
|
||||
# Additional, untyped kwargs
|
||||
for k, v in kwargs.items():
|
||||
setattr(func, k, v)
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class VirtualModuleLoader(Loader):
|
||||
def __init__(self, code_string):
|
||||
self.code_string = code_string
|
||||
|
||||
def exec_module(self, module):
|
||||
exec(self.code_string, module.__dict__)
|
||||
|
||||
|
||||
def create_virtual_module(name: str, code_string: str, file_path: str) -> ModuleType:
|
||||
"""
|
||||
To avoid the headaches of importing the tested code from another diretory,
|
||||
we create a "virtual" module that we can import from anywhere.
|
||||
|
||||
E.g.
|
||||
```py
|
||||
from benchmarks.utils import create_virtual_module
|
||||
|
||||
create_virtual_module("my_module", "print('Hello, world!')", __file__)
|
||||
|
||||
# Now you can import my_module from anywhere
|
||||
import my_module
|
||||
```
|
||||
"""
|
||||
# Create the module specification
|
||||
spec = spec_from_loader(name, VirtualModuleLoader(code_string))
|
||||
|
||||
# Create the module
|
||||
module = module_from_spec(spec) # type: ignore[arg-type]
|
||||
module.__file__ = file_path
|
||||
module.__name__ = name
|
||||
|
||||
# Add it to sys.modules
|
||||
sys.modules[name] = module
|
||||
|
||||
# Execute the module
|
||||
spec.loader.exec_module(module) # type: ignore[union-attr]
|
||||
|
||||
return module
|
Loading…
Add table
Add a link
Reference in a new issue