mirror of
https://github.com/django-components/django-components.git
synced 2025-07-07 17:34:59 +00:00

* feat: add benchmarking dashboard, CI hook on PR, and store lifetime results * refactor: change python env to 3.13 in benchmarks * refactor: add verbosity, use 3.11 for benchmarking * fix: OSError: [Errno 7] Argument list too long * refactor: add debug statements * refactor: remove extraneous -e * refactor: fix tests and linter errors * fix: track main package in coverage * refactor: fix test coverage testing * refactor: fix repo owner name in benchmark on pushing comment * refactor: add asv monkeypatch to docs workflow * refactor: temporarily allow building docs in forks * refactor: use py 3.13 for benchmarking * refactor: run only a single benchmark for PRs to speed them up * refactor: install asv in the docs build workflow * refactor: use hatch docs env to generate benhcmarks in docs CI * refactor: more trying * refactor: move tests * Add benchmark results for 0.137 * Trigger Build * Add benchmark results for 0.138 * refactor: set constant machine name when benchmarking * Add benchmark results for 0.139 * refactor: fix issue with paths too long * Add benchmark results for 0.140 * docs: update comment * refactor: remove test benchmarking data * refactor: fix comment * refactor: allow the benchmark workflow to write to PRs * refactor: use personal access token to set up the PR benchmark bot * refactor: split the benchmark PR flow into two to make it work with PRs from forks * refactor: update deprecated actions/upload-artifact@v3 to v4 * refactor: fix missing directory in benchmarking workflow * refactor: fix triggering of second workflow * refactor: fix workflow finally? * docs: add comments to cut-offs and direct people to benchmarks PR --------- Co-authored-by: github-actions <github-actions@github.com>
66 lines
2.2 KiB
Text
66 lines
2.2 KiB
Text
# ------------ FIX FOR #45 ------------
|
|
# See https://github.com/airspeed-velocity/asv_runner/issues/45
|
|
# This fix is applied in CI in the `benchmark.yml` file.
|
|
# This file is intentionally named `monkeypatch_asv_ci.txt` to avoid being
|
|
# loaded as a python file by `asv`.
|
|
# -------------------------------------
|
|
|
|
def timeit(self, number):
|
|
"""
|
|
Run the function's code `number` times in a separate Python process, and
|
|
return the execution time.
|
|
|
|
#### Parameters
|
|
**number** (`int`)
|
|
: The number of times to execute the function's code.
|
|
|
|
#### Returns
|
|
**time** (`float`)
|
|
: The time it took to execute the function's code `number` times.
|
|
|
|
#### Notes
|
|
The function's code is executed in a separate Python process to avoid
|
|
interference from the parent process. The function can return either a
|
|
single string of code to be executed, or a tuple of two strings: the
|
|
code to be executed and the setup code to be run before timing.
|
|
"""
|
|
stmt = self.func()
|
|
if isinstance(stmt, tuple):
|
|
stmt, setup = stmt
|
|
else:
|
|
setup = ""
|
|
stmt = textwrap.dedent(stmt)
|
|
setup = textwrap.dedent(setup)
|
|
stmt = stmt.replace(r'"""', r"\"\"\"")
|
|
setup = setup.replace(r'"""', r"\"\"\"")
|
|
|
|
# TODO
|
|
# -----------ORIGINAL CODE-----------
|
|
# code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
|
|
|
|
# res = subprocess.check_output([sys.executable, "-c", code])
|
|
# return float(res.strip())
|
|
|
|
# -----------NEW CODE-----------
|
|
code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
|
|
|
|
evaler = textwrap.dedent(
|
|
"""
|
|
import sys
|
|
code = sys.stdin.read()
|
|
exec(code)
|
|
"""
|
|
)
|
|
|
|
proc = subprocess.Popen([sys.executable, "-c", evaler],
|
|
stdin=subprocess.PIPE,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE)
|
|
stdout, stderr = proc.communicate(input=code.encode("utf-8"))
|
|
if proc.returncode != 0:
|
|
raise RuntimeError(f"Subprocess failed: {stderr.decode()}")
|
|
return float(stdout.decode("utf-8").strip())
|
|
|
|
_SeparateProcessTimer.timeit = timeit
|
|
|
|
# ------------ END FIX #45 ------------
|