Compare commits

..

No commits in common. "master" and "0.21" have entirely different histories.
master ... 0.21

482 changed files with 3298 additions and 112603 deletions

View file

@ -1,385 +0,0 @@
{
"Components vs Django.peakmem_render_lg_first": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - large - first render (mem)\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n setup=lambda renderer: setup_templating_memory_benchmark(renderer, \"lg\", \"first\", \"isolated\"),\n )\n def peakmem_render_lg_first(self, renderer: TemplatingRenderer):\n do_render()\n\nsetup=lambda renderer: setup_templating_memory_benchmark(renderer, \"lg\", \"first\", \"isolated\"),",
"name": "Components vs Django.peakmem_render_lg_first",
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - large - first render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "301c396f017f45a5b3f71e85df58d15f54153fcfd951af7ef424641d4b31b528"
},
"Components vs Django.peakmem_render_lg_subsequent": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - large - second render (mem)\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n setup=lambda renderer: setup_templating_memory_benchmark(renderer, \"lg\", \"subsequent\", \"isolated\"),\n )\n def peakmem_render_lg_subsequent(self, renderer: TemplatingRenderer):\n do_render()\n\nsetup=lambda renderer: setup_templating_memory_benchmark(renderer, \"lg\", \"subsequent\", \"isolated\"),",
"name": "Components vs Django.peakmem_render_lg_subsequent",
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - large - second render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "9a44e9999ef3ef42ea7e01323727490244febb43d66a87a4d8f88c6b8a133b8b"
},
"Components vs Django.peakmem_render_sm_first": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - small - first render (mem)\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n setup=lambda renderer: setup_templating_memory_benchmark(renderer, \"sm\", \"first\", \"isolated\"),\n )\n def peakmem_render_sm_first(self, renderer: TemplatingRenderer):\n do_render()\n\nsetup=lambda renderer: setup_templating_memory_benchmark(renderer, \"sm\", \"first\", \"isolated\"),",
"name": "Components vs Django.peakmem_render_sm_first",
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - small - first render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "e93b7a5193681c883edf85bdb30b1bc0821263bf51033fdcee215b155085e036"
},
"Components vs Django.peakmem_render_sm_subsequent": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - small - second render (mem)\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n setup=lambda renderer: setup_templating_memory_benchmark(renderer, \"sm\", \"subsequent\", \"isolated\"),\n )\n def peakmem_render_sm_subsequent(self, renderer: TemplatingRenderer):\n do_render()\n\nsetup=lambda renderer: setup_templating_memory_benchmark(renderer, \"sm\", \"subsequent\", \"isolated\"),",
"name": "Components vs Django.peakmem_render_sm_subsequent",
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - small - second render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "b46e0820b18950aa7cc5e61306ff3425b76b4da9dca42d64fae5b1d25c6c9026"
},
"Components vs Django.timeraw_render_lg_first": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - large - first render\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n include_in_quick_benchmark=True,\n )\n def timeraw_render_lg_first(self, renderer: TemplatingRenderer):\n return prepare_templating_benchmark(renderer, \"lg\", \"first\", \"isolated\")",
"min_run_count": 2,
"name": "Components vs Django.timeraw_render_lg_first",
"number": 1,
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - large - first render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "be3bf6236960046a028b6ea007aad28b2337fc2b906b8ce317a09a5d4f1a6193",
"warmup_time": -1
},
"Components vs Django.timeraw_render_lg_subsequent": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - large - second render\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n )\n def timeraw_render_lg_subsequent(self, renderer: TemplatingRenderer):\n return prepare_templating_benchmark(renderer, \"lg\", \"subsequent\", \"isolated\")",
"min_run_count": 2,
"name": "Components vs Django.timeraw_render_lg_subsequent",
"number": 1,
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - large - second render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "b98221c11a0ee6e9de0778d416d31b9dd514a674d9017a2bb9b2fc1cd0f01920",
"warmup_time": -1
},
"Components vs Django.timeraw_render_sm_first": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - small - first render\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n )\n def timeraw_render_sm_first(self, renderer: TemplatingRenderer):\n return prepare_templating_benchmark(renderer, \"sm\", \"first\", \"isolated\")",
"min_run_count": 2,
"name": "Components vs Django.timeraw_render_sm_first",
"number": 1,
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - small - first render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "f1fc17e4a31c71f4d9265f1122da52e7cf57addb4dfa02606e303b33d6431b9b",
"warmup_time": -1
},
"Components vs Django.timeraw_render_sm_subsequent": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"render - small - second render\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n )\n def timeraw_render_sm_subsequent(self, renderer: TemplatingRenderer):\n return prepare_templating_benchmark(renderer, \"sm\", \"subsequent\", \"isolated\")",
"min_run_count": 2,
"name": "Components vs Django.timeraw_render_sm_subsequent",
"number": 1,
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "render - small - second render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "6fce1cd85a9344fee383b40a22f27862120b9488a628420625592dc14e0307d3",
"warmup_time": -1
},
"Components vs Django.timeraw_startup_lg": {
"code": "class DjangoComponentsVsDjangoTests:\n @benchmark(\n pretty_name=\"startup - large\",\n group_name=DJC_VS_DJ_GROUP,\n number=1,\n rounds=5,\n params={\n \"renderer\": [\"django\", \"django-components\"],\n },\n )\n def timeraw_startup_lg(self, renderer: TemplatingRenderer):\n return prepare_templating_benchmark(renderer, \"lg\", \"startup\", \"isolated\")",
"min_run_count": 2,
"name": "Components vs Django.timeraw_startup_lg",
"number": 1,
"param_names": [
"renderer"
],
"params": [
[
"'django'",
"'django-components'"
]
],
"pretty_name": "startup - large",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "53151821c128ad0ecfb0707fff3146e1abd8d0bcfa301aa056b5d3fae3d793e2",
"warmup_time": -1
},
"Other.timeraw_import_time": {
"code": "class OtherTests:\n @benchmark(\n pretty_name=\"import time\",\n group_name=OTHER_GROUP,\n number=1,\n rounds=5,\n )\n def timeraw_import_time(self):\n return prepare_templating_benchmark(\"django-components\", \"lg\", \"startup\", \"isolated\", imports_only=True)",
"min_run_count": 2,
"name": "Other.timeraw_import_time",
"number": 1,
"param_names": [],
"params": [],
"pretty_name": "import time",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "a0a1c1c0db22509410b946d0d4384b52ea4a09b47b6048d7d1cfb89b0c7fe5c3",
"warmup_time": -1
},
"isolated vs django modes.peakmem_render_lg_first": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - large - first render (mem)\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n setup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"lg\",\n \"first\",\n context_mode,\n ),\n )\n def peakmem_render_lg_first(self, context_mode: DjcContextMode):\n do_render()\n\nsetup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"lg\",\n \"first\",\n context_mode,\n),",
"name": "isolated vs django modes.peakmem_render_lg_first",
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - large - first render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "c4bf0016d48d210f08b8db733b57c7dcba1cebbf548c458b93b86ace387067e9"
},
"isolated vs django modes.peakmem_render_lg_subsequent": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - large - second render (mem)\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n setup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"lg\",\n \"subsequent\",\n context_mode,\n ),\n )\n def peakmem_render_lg_subsequent(self, context_mode: DjcContextMode):\n do_render()\n\nsetup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"lg\",\n \"subsequent\",\n context_mode,\n),",
"name": "isolated vs django modes.peakmem_render_lg_subsequent",
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - large - second render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "65bb1b8586487197a79bb6073e4c71642877b845b6eb42d1bd32398299daffbf"
},
"isolated vs django modes.peakmem_render_sm_first": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - small - first render (mem)\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n setup=lambda context_mode: setup_templating_memory_benchmark(\"django-components\", \"sm\", \"first\", context_mode),\n )\n def peakmem_render_sm_first(self, context_mode: DjcContextMode):\n do_render()\n\nsetup=lambda context_mode: setup_templating_memory_benchmark(\"django-components\", \"sm\", \"first\", context_mode),",
"name": "isolated vs django modes.peakmem_render_sm_first",
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - small - first render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "c51b91fc583295776062822225e720b5ed71aef9c9288217c401c54283c62840"
},
"isolated vs django modes.peakmem_render_sm_subsequent": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - small - second render (mem)\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n setup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"sm\",\n \"subsequent\",\n context_mode,\n ),\n )\n def peakmem_render_sm_subsequent(self, context_mode: DjcContextMode):\n do_render()\n\nsetup=lambda context_mode: setup_templating_memory_benchmark(\n \"django-components\",\n \"sm\",\n \"subsequent\",\n context_mode,\n),",
"name": "isolated vs django modes.peakmem_render_sm_subsequent",
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - small - second render (mem)",
"type": "peakmemory",
"unit": "bytes",
"version": "54d747fb8f40179b7ff3d2fc49eb195909ad1c880b5ef7b82f82742b27b67260"
},
"isolated vs django modes.timeraw_render_lg_first": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - large - first render\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n )\n def timeraw_render_lg_first(self, context_mode: DjcContextMode):\n return prepare_templating_benchmark(\"django-components\", \"lg\", \"first\", context_mode)",
"min_run_count": 2,
"name": "isolated vs django modes.timeraw_render_lg_first",
"number": 1,
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - large - first render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "f94af83427c6346f88f8785a3cd2fc42415ac5a9fbbdb7de71d27e22e6a81699",
"warmup_time": -1
},
"isolated vs django modes.timeraw_render_lg_subsequent": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - large - second render\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n )\n def timeraw_render_lg_subsequent(self, context_mode: DjcContextMode):\n return prepare_templating_benchmark(\"django-components\", \"lg\", \"subsequent\", context_mode)",
"min_run_count": 2,
"name": "isolated vs django modes.timeraw_render_lg_subsequent",
"number": 1,
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - large - second render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "9f7c2fde6b33f0451a1794ed903c48d96cd7822f67da502cec36fe8e977c2414",
"warmup_time": -1
},
"isolated vs django modes.timeraw_render_sm_first": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - small - first render\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n )\n def timeraw_render_sm_first(self, context_mode: DjcContextMode):\n return prepare_templating_benchmark(\"django-components\", \"sm\", \"first\", context_mode)",
"min_run_count": 2,
"name": "isolated vs django modes.timeraw_render_sm_first",
"number": 1,
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - small - first render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "d15ca68909d7f1f43ff16863befb6f42681f17461417fc0069eefd6db3569296",
"warmup_time": -1
},
"isolated vs django modes.timeraw_render_sm_subsequent": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"render - small - second render\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n )\n def timeraw_render_sm_subsequent(self, context_mode: DjcContextMode):\n return prepare_templating_benchmark(\"django-components\", \"sm\", \"subsequent\", context_mode)",
"min_run_count": 2,
"name": "isolated vs django modes.timeraw_render_sm_subsequent",
"number": 1,
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "render - small - second render",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "7444bc9516dd087e3f420349345eae991ad6941bbd22fce45265b18034b7cf77",
"warmup_time": -1
},
"isolated vs django modes.timeraw_startup_lg": {
"code": "class IsolatedVsDjangoContextModesTests:\n @benchmark(\n pretty_name=\"startup - large\",\n group_name=DJC_ISOLATED_VS_NON_GROUP,\n number=1,\n rounds=5,\n params={\n \"context_mode\": [\"isolated\", \"django\"],\n },\n )\n def timeraw_startup_lg(self, context_mode: DjcContextMode):\n return prepare_templating_benchmark(\"django-components\", \"lg\", \"startup\", context_mode)",
"min_run_count": 2,
"name": "isolated vs django modes.timeraw_startup_lg",
"number": 1,
"param_names": [
"context_mode"
],
"params": [
[
"'isolated'",
"'django'"
]
],
"pretty_name": "startup - large",
"repeat": 0,
"rounds": 5,
"sample_time": 0.01,
"type": "time",
"unit": "seconds",
"version": "eabe311ebee4a15c5816617be12f00ec30376f7506bd668219e1c50bc897c134",
"warmup_time": -1
},
"version": 2
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,4 +0,0 @@
{
"machine": "ci-linux",
"version": 1
}

View file

@ -1,32 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.0/containers/python-3
{
// Uncomment to run Python 3.13 or other specific version
// "image": "mcr.microsoft.com/devcontainers/python:3.13-bullseye",
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {
"python.defaultInterpreterPath": "/usr/local/bin/python",
"python.linting.enabled": true
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"ms-python.vscode-python-envs",
"jurooravec.python-inline-source-2"
]
}
}
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
//"postCreateCommand": ""
}

2
.github/FUNDING.yml vendored
View file

@ -1,2 +0,0 @@
github: ["EmilStenstrom"]

View file

@ -1,17 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"
- package-ecosystem: github-actions
# This actually targets ./.github/workflows/
# See https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#directories-or-directory--
directory: "/"
schedule:
interval: monthly
interval: "daily"

View file

@ -1,22 +0,0 @@
name: Dependabot auto-merge
on: pull_request
permissions:
contents: write
pull-requests: write
jobs:
dependabot:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@v2
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Enable auto-merge for Dependabot PRs
run: gh pr merge --auto --merge "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GH_TOKEN: ${{secrets.GITHUB_TOKEN}}

19
.github/workflows/contributors.yml vendored Normal file
View file

@ -0,0 +1,19 @@
name: Add contributors to readme
on:
push:
branches:
- master
workflow_dispatch:
jobs:
contrib-readme-job:
runs-on: ubuntu-latest
name: Add contributors to readme
steps:
- name: Contribute List
uses: akhilmhdh/contributors-readme-action@v2.3.3
with:
is_protected: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,195 +0,0 @@
---
name: Docs - build & deploy
on:
push:
tags:
# for versions 0.### (before 1.0.0)
- '0.[0-9]+'
# after 1.0.0
- '[0-9]+.[0-9]+.[0-9]+'
branches:
- master
workflow_dispatch:
jobs:
docs:
# Grant GITHUB_TOKEN the permissions required to make a Pages deployment
permissions:
contents: write # to let mkdocs write the new docs
pages: write # to deploy to Pages
id-token: write # to verify the deployment originates from an appropriate source
runs-on: ubuntu-latest
# Only run in original repo (not in forks)
if: github.repository == 'django-components/django-components'
steps:
##############################
# SETUP
##############################
# Authenticate with git with the Github App that has permission
# to push to master, in order to push benchmark results.
# See https://stackoverflow.com/a/79142962/9788634
- uses: actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.RELEASE_BOT_APP_ID }}
private-key: ${{ secrets.RELEASE_BOT_APP_PRIVATE_KEY }}
- name: Checkout
uses: actions/checkout@v4
with:
token: ${{ steps.app-token.outputs.token }}
fetch-depth: 0
- name: Configure git account
run: |
git config user.name components-release-bot
git config user.email "components-release-bot@users.noreply.github.com"
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.13"
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip wheel
# NOTE: pin virtualenv to <20.31 until asv fixes it.
# See https://github.com/airspeed-velocity/asv/issues/1484
python -m pip install -q hatch pre-commit asv virtualenv==20.30
hatch --version
###########################################
# RECORD BENCHMARK - ONLY ON PUSH TO MASTER
###########################################
- name: Run benchmarks for tag
if: github.ref_type == 'tag' && github.event_name == 'push'
env:
# See https://github.com/github/docs/issues/21930
# And https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Get the master branch so we can run benchmarks on it
git remote add upstream https://github.com/${{ github.repository }}.git
git fetch origin master:master
git checkout master
# Get tag name
TAG=${GITHUB_REF#refs/tags/}
echo "TAG: $TAG"
# TODO: REMOVE ONCE FIXED UPSTREAM
# Fix for https://github.com/airspeed-velocity/asv_runner/issues/45
# Prepare virtual environment
# Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark.
# The problem is that `asv` passes the code to execute via command line, and when the
# code is too big, it fails with `OSError: [Errno 7] Argument list too long`.
# So we have to tweak it to pass the code via STDIN, which doesn't have this limitation.
#
# 1. First create the virtual environment, so that asv generates the directories where
# the monkeypatch can be applied.
echo "Creating virtual environment..."
asv setup -v || true
echo "Virtual environment created."
# 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files.
# First find all `timeraw.py` files
echo "Applying monkeypatch..."
find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do
# Add a newline and then append the monkeypatch contents
echo "" >> "$file"
cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file"
done
echo "Monkeypatch applied."
# END OF MONKEYPATCH
# Prepare the profile under which the benchmarks will be saved.
# We assume that the CI machine has a name that is unique and stable.
# See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794
echo "Preparing benchmarks profile..."
asv machine --yes --machine ci-linux
echo "Benchmarks profile DONE."
# Run benchmarks for the current tag
# - `^` means that we mean the COMMIT of the tag's branch, not the BRANCH itself.
# Without it, we would run benchmarks for the whole branch history.
# With it, we run benchmarks FROM the tag's commit (incl) TO ...
# - `!` means that we want to select range spanning a single commit.
# Without it, we would run benchmarks for all commits FROM the tag's commit
# TO the start of the branch history.
# With it, we run benchmarks ONLY FOR the tag's commit.
echo "Running benchmarks for tag ${TAG}..."
asv run master^! -v
echo "Benchmarks for tag ${TAG} DONE."
# Generate benchmarks site
# This should save it in `docs/benchmarks/`, so we can then use it when
# building docs site with `mkdocs`.
echo "Generating benchmarks site..."
asv publish
echo "Benchmarks site DONE."
# Commit benchmark results
echo "Staging and committing benchmark results..."
git add .asv/results/
git add docs/benchmarks/
git commit -m "Add benchmark results for ${TAG}"
echo "Benchmark results committed."
# Push to the new branch
echo "Pushing benchmark results..."
git push origin master
echo "Benchmark results pushed to master."
###########################################
# BUILD & RELEASE DOCS
###########################################
# Change git authentication to Github Actions, so the rest of the
# workflow will have lower privileges.
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure git
run: |
# required for "mike deploy" command below which pushes to gh-pages
git config user.name github-actions
git config user.email github-actions@github.com
# Conditions make sure to select the right step, depending on the job trigger.
# Only one of the steps below will run at a time. The others will be skipped.
- name: Check docs in pull requests with strict mode
if: github.event_name == 'pull_request'
run: |
# XXX Enable strict mode once docs are clean
echo "Strict check of docs disabled."
# hatch run docs:build --strict
- name: Build & deploy "dev" docs for a new commit to master
if: github.event_name == 'push' && github.ref_type != 'tag'
run: |
# Fetch and checkout gh-pages to ensure we have the latest version
git fetch origin gh-pages
git checkout gh-pages
git pull origin gh-pages
git checkout master
export SHORT_SHA=$(echo "${GITHUB_SHA}" | cut -c1-7)
hatch run docs:mike deploy --push --update-aliases --title "dev (${SHORT_SHA})" dev
- name: Build & deploy docs for a new tag
if: github.ref_type == 'tag' && github.event_name == 'push'
run: |
# Fetch and checkout gh-pages to ensure we have the latest version
git fetch origin gh-pages
git checkout gh-pages
git pull origin gh-pages
git checkout master
hatch run docs:mike deploy --push --update-aliases ${{ github.ref_name }} latest
hatch run docs:mike set-default latest --push

View file

@ -1,99 +0,0 @@
# Run benchmark report on pull requests to master.
# The report is added to the PR as a comment.
#
# NOTE: When making a PR from a fork, the worker doesn't have sufficient
# access to make comments on the target repo's PR. And so, this workflow
# is split to two parts:
#
# 1. Benchmarking and saving results as artifacts
# 2. Downloading the results and commenting on the PR
#
# See https://stackoverflow.com/a/71683208/9788634
name: PR benchmark comment
on:
workflow_run:
# NOTE: The name here MUST match the name of the workflow that generates the data
workflows: [PR benchmarks generate]
types:
- completed
jobs:
download:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
repository-projects: write
steps:
########## USE FOR DEBUGGING ##########
- name: Debug workflow run info
uses: actions/github-script@v7
with:
script: |
console.log('Workflow Run ID:', context.payload.workflow_run.id);
const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id
});
console.log('Available artifacts:');
console.log(JSON.stringify(artifacts.data, null, 2));
console.log(`PRs: ` + JSON.stringify(context.payload.workflow_run.pull_requests));
#########################################
# NOTE: The next two steps (download and unzip) are equivalent to using `actions/download-artifact@v4`
# However, `download-artifact` was not picking up the artifact, while the REST client does.
- name: Download benchmark results
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
// Find the artifact that was generated by the "pr-benchmark-generate" workflow
const allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
// Explicitly search the workflow run that generated the the results
// (AKA the "pr-benchmark-generate" workflow).
run_id: context.payload.workflow_run.id,
});
const matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "benchmark_results"
})[0];
// Download the artifact
const download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
fs.writeFileSync(
`${process.env.GITHUB_WORKSPACE}/benchmark_results.zip`,
Buffer.from(download.data),
);
- name: Unzip artifact
run: unzip benchmark_results.zip
- name: Comment on PR
# See https://github.com/actions/github-script
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const results = fs.readFileSync('./benchmark_results.md', 'utf8');
const issue_number = Number.parseInt(fs.readFileSync('./pr_number.txt', 'utf8'));
const body = `## Performance Benchmark Results\n\nComparing PR changes against master branch:\n\n${results}`;
// See https://octokit.github.io/rest.js/v21/#issues-create-comment
await github.rest.issues.createComment({
body: body,
// See https://github.com/actions/toolkit/blob/662b9d91f584bf29efbc41b86723e0e376010e41/packages/github/src/context.ts#L66
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue_number,
});

View file

@ -1,112 +0,0 @@
# Run benchmark report on pull requests to master.
# The report is added to the PR as a comment.
#
# NOTE: When making a PR from a fork, the worker doesn't have sufficient
# access to make comments on the target repo's PR. And so, this workflow
# is split to two parts:
#
# 1. Benchmarking and saving results as artifacts
# 2. Downloading the results and commenting on the PR
#
# See https://stackoverflow.com/a/71683208/9788634
name: PR benchmarks generate
on:
pull_request:
branches: [ master ]
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Need full history for ASV
- name: Fetch base branch
run: |
git remote add upstream https://github.com/${{ github.repository }}.git
git fetch upstream master
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.13'
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
# NOTE: pin virtualenv to <20.31 until asv fixes it.
# See https://github.com/airspeed-velocity/asv/issues/1484
pip install asv virtualenv==20.30
- name: Run benchmarks
run: |
# TODO: REMOVE ONCE FIXED UPSTREAM
# Fix for https://github.com/airspeed-velocity/asv_runner/issues/45
# Prepare virtual environment
# Currently, we have to monkeypatch the `timeit` function in the `timeraw` benchmark.
# The problem is that `asv` passes the code to execute via command line, and when the
# code is too big, it fails with `OSError: [Errno 7] Argument list too long`.
# So we have to tweak it to pass the code via STDIN, which doesn't have this limitation.
#
# 1. First create the virtual environment, so that asv generates the directories where
# the monkeypatch can be applied.
echo "Creating virtual environment..."
asv setup -v || true
echo "Virtual environment created."
# 2. Now let's apply the monkeypatch by appending it to the `timeraw.py` files.
# First find all `timeraw.py` files
echo "Applying monkeypatch..."
find .asv/env -type f -path "*/site-packages/asv_runner/benchmarks/timeraw.py" | while read -r file; do
# Add a newline and then append the monkeypatch contents
echo "" >> "$file"
cat "benchmarks/monkeypatch_asv_ci.txt" >> "$file"
done
echo "Monkeypatch applied."
# END OF MONKEYPATCH
# Prepare the profile under which the benchmarks will be saved.
# We assume that the CI machine has a name that is unique and stable.
# See https://github.com/airspeed-velocity/asv/issues/796#issuecomment-1188431794
echo "Preparing benchmarks profile..."
MACHINE="ci_benchmark_${{ github.event.pull_request.number }}"
asv machine --yes -v --machine ${MACHINE}
echo "Benchmarks profile DONE."
# Generate benchmark data
# - `^` means that we mean the COMMIT of the branch, not the BRANCH itself.
# Without it, we would run benchmarks for the whole branch history.
# With it, we run benchmarks FROM the latest commit (incl) TO ...
# - `!` means that we want to select range spanning a single commit.
# Without it, we would run benchmarks for all commits FROM the latest commit
# TO the start of the branch history.
# With it, we run benchmarks ONLY FOR the latest commit.
echo "Running benchmarks for upstream/master..."
DJC_BENCHMARK_QUICK=1 asv run upstream/master^! -v --machine ${MACHINE}
echo "Benchmarks for upstream/master DONE."
echo "Running benchmarks for HEAD..."
DJC_BENCHMARK_QUICK=1 asv run HEAD^! -v --machine ${MACHINE}
echo "Benchmarks for HEAD DONE."
echo "Creating pr directory..."
mkdir -p pr
# Save the PR number to a file, so that it can be used by the next step.
echo "${{ github.event.pull_request.number }}" > ./pr/pr_number.txt
# Compare against master
# NOTE: The command is run twice, once so we can see the debug output, and once to save the results.
echo "Comparing benchmarks... (debug)"
asv compare upstream/master HEAD --factor 1.1 --split --machine ${MACHINE} --verbose
echo "Comparing benchmarks... (saving results)"
asv compare upstream/master HEAD --factor 1.1 --split --machine ${MACHINE} > ./pr/benchmark_results.md
echo "Benchmarks comparison DONE."
- name: Save benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark_results
path: pr/

View file

@ -12,15 +12,15 @@ on:
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'django-components/django-components'
steps:
- name: Checkout the repo
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: '3.13'
python-version: '3.9'
- name: Install pypa/build
run: >-

View file

@ -1,101 +1,28 @@
name: Run tests
on:
push:
branches:
- 'master'
- 'dev'
pull_request:
workflow_dispatch:
on: [push, pull_request, workflow_dispatch]
jobs:
build:
runs-on: ${{ matrix.os }}
run-tests:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
os: [ubuntu-latest, windows-latest]
python-version:
- "3.6"
- "3.7"
- "3.8"
- "3.9"
- "3.10"
steps:
# Configure git to handle long paths
# See https://stackoverflow.com/questions/22575662/filename-too-long-in-git-for-windows
#
# Long paths that are over the limit are because of the benchmarking data
# created by asv, as these may look like this:
# docs/benchmarks/graphs/arch-x86_64/branch-master/cpu-AMD EPYC 7763 64-Core Processor/django-5.1/djc-core-html-parser/machine-fv-az1693-854/num_cpu-4/os-Linux 6.8.0-1021-azure/python-3.13/ram-16373792/isolated vs django modes.timeraw_render_lg_subsequent.json
- name: Configure git
run: |
git config --global core.longpaths true
- uses: actions/checkout@v4
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -r requirements-ci.txt
# See https://playwright.dev/python/docs/intro#installing-playwright-pytest
playwright install chromium --with-deps
pip install tox tox-gh-actions
- name: Run tests
run: tox
# Verify that docs build
test_docs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.13']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -r requirements-docs.txt
# Install your package locally
python -m pip install -e .
- name: Build documentation
run: mkdocs build --verbose
# Verify that the sample project works
test_sampleproject:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.13']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "pip"
- name: Install dependencies
run: |
cd sampleproject
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
# Install django-components locally
python -m pip install -e ..
- name: Check Django project
run: |
cd sampleproject
python manage.py check
python manage.py migrate --noinput
# Start the server, make request, and exit with error if it fails
python manage.py runserver & sleep 5
curl http://127.0.0.1:8000/ || exit 1
tox

25
.gitignore vendored
View file

@ -1,6 +1,3 @@
# Project-specific files
sampleproject/staticfiles/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@ -46,7 +43,6 @@ htmlcov/
nosetests.xml
coverage.xml
*,cover
.pytest_cache/
# Translations
*.mo
@ -54,7 +50,6 @@ coverage.xml
# Django stuff:
*.log
*.sqlite3
# Sphinx documentation
docs/_build/
@ -69,22 +64,4 @@ target/
# lock file is not needed for development
# as project supports variety of Django versions
poetry.lock
# PyCharm
.idea/
# Python environment
.venv/
.DS_Store
.python-version
site
.direnv/
.envrc
.mypy_cache/
# JS, NPM Dependency directories
node_modules/
jspm_packages/
# Cursor
.cursorrules
pyproject.toml

View file

@ -1,14 +1,14 @@
repos:
- repo: https://github.com/pycqa/isort
rev: 5.13.2
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: 24.10.0
rev: 22.6.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
rev: 7.1.1
rev: 4.0.1
hooks:
- id: flake8
additional_dependencies: [flake8-pyproject]

File diff suppressed because it is too large Load diff

View file

@ -55,7 +55,7 @@ further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at emil@emilstenstrom.se. All
reported by contacting the project team at em@kth.se. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.

View file

@ -1,5 +0,0 @@
# MANIFEST.in is defined so we can include non-Python (e.g. JS) files
# in the built distribution.
# See https://setuptools.pypa.io/en/latest/userguide/miscellaneous.html
graft src/django_components/static
prune tests

871
README.md
View file

@ -1,565 +1,420 @@
# <img src="https://raw.githubusercontent.com/django-components/django-components/master/logo/logo-black-on-white.svg" alt="django-components" style="max-width: 100%; background: white; color: black;">
# django-components
<a href="https://github.com/EmilStenstrom/django-components/actions?query=workflow%3A%22Run+tests%22"><img align="right" src="https://github.com/EmilStenstrom/django-components/workflows/Run%20tests/badge.svg" alt="Show test status"></a>
<a href="https://pepy.tech/project/django-components"><img align="right" src="https://pepy.tech/badge/django-components" alt="Show download stats"></a>
[![PyPI - Version](https://img.shields.io/pypi/v/django-components)](https://pypi.org/project/django-components/) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/django-components)](https://pypi.org/project/django-components/) [![PyPI - License](https://img.shields.io/pypi/l/django-components)](https://github.com/django-components/django-components/blob/master/LICENSE/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/django-components)](https://pypistats.org/packages/django-components) [![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/django-components/django-components/tests.yml)](https://github.com/django-components/django-components/actions/workflows/tests.yml) [![asv](https://img.shields.io/badge/benchmarked%20by-asv-blue.svg?style=flat)](https://django-components.github.io/django-components/latest/benchmarks/)
A way to create simple reusable template components in Django.
### <table><td>[Read the full documentation](https://django-components.github.io/django-components/latest/)</td></table>
It lets you create "template components", that contains both the template, the Javascript and the CSS needed to generate the front end code you need for a modern app. Components look like this:
`django-components` is a modular and extensible UI framework for Django.
It combines Django's templating system with the modularity seen
in modern frontend frameworks like Vue or React.
With `django-components` you can support Django projects small and large without leaving the Django ecosystem.
## Quickstart
A component in django-components can be as simple as a Django template and Python code to declare the component:
```django
{# components/calendar/calendar.html #}
<div class="calendar">
Today's date is <span>{{ date }}</span>
</div>
```htmldjango
{% component "calendar" date="2015-06-19" %}
```
```py
# components/calendar/calendar.py
from django_components import Component, register
And this is what gets rendered (plus the CSS and Javascript you've specified):
@register("calendar")
class Calendar(Component):
template_file = "calendar.html"
```html
<div class="calendar-component">Today's date is <span>2015-06-19</span></div>
```
Or a combination of Django template, Python, CSS, and Javascript:
Read on to learn about the details!
```django
{# components/calendar/calendar.html #}
<div class="calendar">
Today's date is <span>{{ date }}</span>
</div>
# Release notes
*Version 0.17* renames `Component.context` and `Component.template` to `get_context_data` and `get_template_name`. The old methods still work, but emit a deprecation warning. This change was done to sync naming with Django's class based views, and make using django-components more familiar to Django users. `Component.context` and `Component.template` will be removed when version 1.0 is released.
# Installation
Install the app into your environment:
> ```pip install django_components```
Then add the app into INSTALLED APPS in settings.py
```python
INSTALLED_APPS = [
...,
"django_components",
...
]
```
Modify `TEMPLATES` section of settings.py as follows:
- Remove `'APP_DIRS': True,`
- add `loaders` to `OPTIONS` list and set it to following value:
```python
TEMPLATES = [
{
...,
'OPTIONS': {
'context_processors': [
...
],
'loaders':[(
'django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django_components.template_loader.Loader',
]
)],
},
},
]
```
## Optional
To avoid loading the app in each template using ``` {% load django_components %} ```, you can add the tag as a 'builtin' in settings.py
```python
TEMPLATES = [
{
...,
'OPTIONS': {
'context_processors': [
...
],
'builtins': [
'django_components.templatetags.component_tags',
]
},
},
]
```
# Contributors
<!-- readme: contributors -start -->
<table>
<tr>
<td align="center">
<a href="https://github.com/EmilStenstrom">
<img src="https://avatars.githubusercontent.com/u/224130?v=4" width="100;" alt="EmilStenstrom"/>
<br />
<sub><b>Emil Stenström</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/hanifbirgani">
<img src="https://avatars.githubusercontent.com/u/53351186?v=4" width="100;" alt="hanifbirgani"/>
<br />
<sub><b>Hanif Birgani</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/ryanhiebert">
<img src="https://avatars.githubusercontent.com/u/425099?v=4" width="100;" alt="ryanhiebert"/>
<br />
<sub><b>Ryan Hiebert</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/rbeard0330">
<img src="https://avatars.githubusercontent.com/u/2442690?v=4" width="100;" alt="rbeard0330"/>
<br />
<sub><b>Rbeard0330</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/BradleyKirton">
<img src="https://avatars.githubusercontent.com/u/6583221?v=4" width="100;" alt="BradleyKirton"/>
<br />
<sub><b>Bradley Stuart Kirton</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/David-Guillot">
<img src="https://avatars.githubusercontent.com/u/1136694?v=4" width="100;" alt="David-Guillot"/>
<br />
<sub><b>David Guillot</b></sub>
</a>
</td></tr>
<tr>
<td align="center">
<a href="https://github.com/housUnus">
<img src="https://avatars.githubusercontent.com/u/44384710?v=4" width="100;" alt="housUnus"/>
<br />
<sub><b><AIT ALI EL HOSAYN></b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/danjac">
<img src="https://avatars.githubusercontent.com/u/249779?v=4" width="100;" alt="danjac"/>
<br />
<sub><b>Dan Jacob</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Real-Gecko">
<img src="https://avatars.githubusercontent.com/u/2231969?v=4" width="100;" alt="Real-Gecko"/>
<br />
<sub><b>Real-Gecko</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/simkimsia">
<img src="https://avatars.githubusercontent.com/u/245021?v=4" width="100;" alt="simkimsia"/>
<br />
<sub><b>KimSia Sim</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/telenieko">
<img src="https://avatars.githubusercontent.com/u/10505?v=4" width="100;" alt="telenieko"/>
<br />
<sub><b>Marc Fargas</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/spollard">
<img src="https://avatars.githubusercontent.com/u/1459574?v=4" width="100;" alt="spollard"/>
<br />
<sub><b>Spollard</b></sub>
</a>
</td></tr>
</table>
<!-- readme: contributors -end -->
# Compatiblity
Django-components supports all <a href="https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django">officially supported versions</a> of Django and Python.
| Python version | Django version |
|----------------|--------------------------|
| 3.6 | 3.2 |
| 3.7 | 3.2 |
| 3.8 | 3.2, 4.0 |
| 3.9 | 3.2, 4.0 |
| 3.10 | 4.0 |
# Create your first component
A component in django-components is the combination of four things: CSS, Javascript, a Django template, and some Python code to put them all together.
First you need a CSS file. Be sure to prefix all rules with a unique class so they don't clash with other rules.
```css
/* components/calendar/calendar.css */
.calendar {
width: 200px;
background: pink;
}
/* In a file called [your app]/components/calendar/style.css */
.calendar-component { width: 200px; background: pink; }
.calendar-component span { font-weight: bold; }
```
Then you need a javascript file that specifies how you interact with this component. You are free to use any javascript framework you want. A good way to make sure this component doesn't clash with other components is to define all code inside an anonymous function that calls itself. This makes all variables defined only be defined inside this component and not affect other components.
```js
/* components/calendar/calendar.js */
document.querySelector(".calendar").onclick = () => {
alert("Clicked calendar!");
};
/* In a file called [your app]/components/calendar/script.js */
(function(){
$(".calendar-component").click(function(){ alert("Clicked calendar!"); })
})()
```
```py
# components/calendar/calendar.py
from django_components import Component, register
Now you need a Django template for your component. Feel free to define more variables like `date` in this example. When creating an instance of this component we will send in the values for these variables. The template will be rendered with whatever template backend you've specified in your Django settings file.
@register("calendar")
class Calendar(Component):
template_file = "calendar.html"
js_file = "calendar.js"
css_file = "calendar.css"
def get_template_data(self, args, kwargs, slots, context):
return {"date": kwargs["date"]}
```htmldjango
{# In a file called [your app]/components/calendar/calendar.html #}
<div class="calendar-component">Today's date is <span>{{ date }}</span></div>
```
Use the component like this:
Finally, we use django-components to tie this together. Start by creating a file called `components.py` in any of your apps. It will be auto-detected and loaded by the app.
```django
{% component "calendar" date="2024-11-06" %}{% endcomponent %}
Inside this file we create a Component by inheriting from the Component class and specifying the context method. We also register the global component registry so that we easily can render it anywhere in our templates.
```python
from django_components import component
@component.register("calendar")
class Calendar(component.Component):
# Note that Django will look for templates inside `[your app]/components` dir
# To customize which template to use based on context override get_template_name instead
template_name = "calendar/calendar.html"
# This component takes one parameter, a date string to show in the template
def get_context_data(self, date):
return {
"date": date,
}
class Media:
css = '[your app]/components/calendar/calendar.css'
js = '[your app]/components/calendar/calendar.js'
```
And this is what gets rendered:
And voilá!! We've created our first component.
# Use the component in a template
First load the `component_tags` tag library, then use the `component_[js/css]_dependencies` and `component` tags to render the component to the page.
```htmldjango
{% load component_tags %}
<!DOCTYPE html>
<html>
<head>
<title>My example calendar</title>
{% component_css_dependencies %}
</head>
<body>
{% component "calendar" date="2015-06-19" %}
{% component_js_dependencies %}
</body>
<html>
```
The output from the above template will be:
```html
<!DOCTYPE html>
<html>
<head>
<title>My example calendar</title>
<link href="style.css" type="text/css" media="all" rel="stylesheet">
</head>
<body>
<div class="calendar-component">Today's date is <span>2015-06-19</span></div>
<script src="script.js"></script>
</body>
<html>
```
This makes it possible to organize your front-end around reusable components. Instead of relying on template tags and keeping your CSS and Javascript in the static directory.
# Using slots in templates
Components support something called slots. They work a lot like Django blocks, but only inside components you define. Let's update our calendar component to support more customization, by updating our calendar.html template:
```htmldjango
<div class="calendar-component">
<div class="header">
{% slot "header" %}Calendar header{% endslot %}
</div>
<div class="body">
{% slot "body" %}Today's date is <span>{{ date }}</span>{% endslot %}
</div>
</div>
```
When using the component, you specify what slots you want to fill and where you want to use the defaults from the template. It looks like this:
```htmldjango
{% component_block "calendar" date="2020-06-06" %}
{% slot "body" %}Can you believe it's already <span>{{ date }}</span>??{% endslot %}
{% endcomponent_block %}
```
Since the header block is unspecified, it's taken from the base template. If you put this in a template, and send in date=2020-06-06, this is what's rendered:
```html
<div class="calendar-component">
Today's date is <span>2024-11-06</span>
<div class="header">
Calendar header
</div>
<div class="body">
Can you believe it's already <span>2020-06-06</span>??
</div>
</div>
```
As you can see, component slots lets you write reusable containers, that you fill out when you use a component. This makes for highly reusable components, that can be used in different circumstances.
If you want to include a slot's default content while adding additional content, you can call `slot.super` to insert the base content, which works similarly to `block.super`.
```htmldjango
{% component_block "calendar" date="2020-06-06" %}
{% slot "body" %}{{ slot.super }}. Have a great day!{% endslot %}
{% endcomponent_block %}
```
Produces:
```html
<div class="calendar-component">
<div class="header">
Calendar header
</div>
<div class="body">
Today's date is <span>2020-06-06</span>. Have a great day!
</div>
</div>
```
Read on to learn about all the exciting details and configuration possibilities!
# Component context and scope
(If you instead prefer to jump right into the code, [check out the example project](https://github.com/django-components/django-components/tree/master/sampleproject))
By default, components can access context variables from the parent template, just like templates that are included with the `{% include %}` tag. Just like with `{% include %}`, if you don't want the component template to have access to the parent context, add `only` to the end of the `{% component %}` (or `{% component_block %}` tag):
## Features
```htmldjango
{% component "calendar" date="2015-06-19" only %}
```
### Modern and modular UI
NOTE: `{% csrf_token %}` tags need access to the top-level context, and they will not function properly if they are rendered in a component that is called with the `only` modifier.
- Create self-contained, reusable UI elements.
- Each component can include its own HTML, CSS, and JS, or additional third-party JS and CSS.
- HTML, CSS, and JS can be defined on the component class, or loaded from files.
Components can also access the outer context in their context methods by accessing the property `outer_context`.
# Available settings
All library settings are handled from a global COMPONENTS variable that is read from settings.py. By default you don't need it set, there are resonable defaults.
## Configure the module where components are loaded from
Configure the location where components are loaded. To do this, add a COMPONENTS variable to you settings.py with a list of python paths to load. This allows you to build a structure of components that are independent from your apps.
```python
from django_components import Component
@register("calendar")
class Calendar(Component):
template = """
<div class="calendar">
Today's date is
<span>{{ date }}</span>
</div>
"""
css = """
.calendar {
width: 200px;
background: pink;
}
"""
js = """
document.querySelector(".calendar")
.addEventListener("click", () => {
alert("Clicked calendar!");
});
"""
# Additional JS and CSS
class Media:
js = ["https://cdn.jsdelivr.net/npm/htmx.org@2/dist/htmx.min.js"]
css = ["bootstrap/dist/css/bootstrap.min.css"]
# Variables available in the template
def get_template_data(self, args, kwargs, slots, context):
return {
"date": kwargs["date"]
}
```
### Composition with slots
- Render components inside templates with
[`{% component %}`](https://django-components.github.io/django-components/latest/reference/template_tags#component) tag.
- Compose them with [`{% slot %}`](https://django-components.github.io/django-components/latest/reference/template_tags#slot)
and [`{% fill %}`](https://django-components.github.io/django-components/latest/reference/template_tags#fill) tags.
- Vue-like slot system, including [scoped slots](https://django-components.github.io/django-components/latest/concepts/fundamentals/slots/#slot-data).
```django
{% component "Layout"
bookmarks=bookmarks
breadcrumbs=breadcrumbs
%}
{% fill "header" %}
<div class="flex justify-between gap-x-12">
<div class="prose">
<h3>{{ project.name }}</h3>
</div>
<div class="font-semibold text-gray-500">
{{ project.start_date }} - {{ project.end_date }}
</div>
</div>
{% endfill %}
{# Access data passed to `{% slot %}` with `data` #}
{% fill "tabs" data="tabs_data" %}
{% component "TabItem" header="Project Info" %}
{% component "ProjectInfo"
project=project
project_tags=project_tags
attrs:class="py-5"
attrs:width=tabs_data.width
/ %}
{% endcomponent %}
{% endfill %}
{% endcomponent %}
```
### Extended template tags
`django-components` is designed for flexibility, making working with templates a breeze.
It extends Django's template tags syntax with:
<!-- TODO - Document literal lists and dictionaries -->
- Literal lists and dictionaries in the template
- [Self-closing tags](https://django-components.github.io/django-components/latest/concepts/fundamentals/template_tag_syntax#self-closing-tags) `{% mytag / %}`
- [Multi-line template tags](https://django-components.github.io/django-components/latest/concepts/fundamentals/template_tag_syntax#multiline-tags)
- [Spread operator](https://django-components.github.io/django-components/latest/concepts/fundamentals/template_tag_syntax#spread-operator) `...` to dynamically pass args or kwargs into the template tag
- [Template tags inside literal strings](https://django-components.github.io/django-components/latest/concepts/fundamentals/template_tag_syntax#template-tags-inside-literal-strings) like `"{{ first_name }} {{ last_name }}"`
- [Pass dictonaries by their key-value pairs](https://django-components.github.io/django-components/latest/concepts/fundamentals/template_tag_syntax#pass-dictonary-by-its-key-value-pairs) `attr:key=val`
```django
{% component "table"
...default_attrs
title="Friend list for {{ user.name }}"
headers=["Name", "Age", "Email"]
data=[
{
"name": "John"|upper,
"age": 30|add:1,
"email": "john@example.com",
"hobbies": ["reading"],
},
{
"name": "Jane"|upper,
"age": 25|add:1,
"email": "jane@example.com",
"hobbies": ["reading", "coding"],
},
COMPONENTS = {
"libraries": [
"mysite.components.forms",
"mysite.components.buttons",
"mysite.components.cards",
],
attrs:class="py-4 ma-2 border-2 border-gray-300 rounded-md"
/ %}
}
```
You too can define template tags with these features by using
[`@template_tag()`](https://django-components.github.io/django-components/latest/reference/api/#django_components.template_tag)
or [`BaseNode`](https://django-components.github.io/django-components/latest/reference/api/#django_components.BaseNode).
## Disable autodiscovery
Read more on [Custom template tags](https://django-components.github.io/django-components/latest/concepts/advanced/template_tags/).
### Full programmatic access
When you render a component, you can access everything about the component:
- Component input: [args, kwargs, slots and context](https://django-components.github.io/django-components/latest/concepts/fundamentals/render_api/#component-inputs)
- Component's template, CSS and JS
- Django's [context processors](https://django-components.github.io/django-components/latest/concepts/fundamentals/render_api/#request-and-context-processors)
- Unique [render ID](https://django-components.github.io/django-components/latest/concepts/fundamentals/render_api/#component-id)
If you specify all the component locations with the setting above and have a lot of apps, you can (very) slightly speed things up by disabling autodiscovery.
```python
class Table(Component):
js_file = "table.js"
css_file = "table.css"
template = """
<div class="table">
<span>{{ variable }}</span>
</div>
"""
def get_template_data(self, args, kwargs, slots, context):
# Access component's ID
assert self.id == "djc1A2b3c"
# Access component's inputs and slots
assert self.args == [123, "str"]
assert self.kwargs == {"variable": "test", "another": 1}
footer_slot = self.slots["footer"]
some_var = self.context["some_var"]
# Access the request object and Django's context processors, if available
assert self.request.GET == {"query": "something"}
assert self.context_processors_data['user'].username == "admin"
return {
"variable": kwargs["variable"],
}
# Access component's HTML / JS / CSS
Table.template
Table.js
Table.css
# Render the component
rendered = Table.render(
kwargs={"variable": "test", "another": 1},
args=(123, "str"),
slots={"footer": "MY_FOOTER"},
)
COMPONENTS = {
"autodiscovery": False,
}
```
### Granular HTML attributes
## Tune the template cache
Use the [`{% html_attrs %}`](https://django-components.github.io/django-components/latest/concepts/fundamentals/html_attributes/) template tag to render HTML attributes.
It supports:
- Defining attributes as whole dictionaries or keyword arguments
- Merging attributes from multiple sources
- Boolean attributes
- Appending attributes
- Removing attributes
- Defining default attributes
```django
<div
{% html_attrs
attrs
defaults:class="default-class"
class="extra-class"
%}
>
```
[`{% html_attrs %}`](https://django-components.github.io/django-components/latest/concepts/fundamentals/html_attributes/) offers a Vue-like granular control for
[`class`](https://django-components.github.io/django-components/latest/concepts/fundamentals/html_attributes/#merging-class-attributes)
and [`style`](https://django-components.github.io/django-components/latest/concepts/fundamentals/html_attributes/#merging-style-attributes)
HTML attributes,
where you can use a dictionary to manage each class name or style property separately.
```django
{% html_attrs
class="foo bar"
class={
"baz": True,
"foo": False,
}
class="extra"
%}
```
```django
{% html_attrs
style="text-align: center; background-color: blue;"
style={
"background-color": "green",
"color": None,
"width": False,
}
style="position: absolute; height: 12px;"
%}
```
Read more about [HTML attributes](https://django-components.github.io/django-components/latest/concepts/fundamentals/html_attributes/).
### HTML fragment support
`django-components` makes integration with HTMX, AlpineJS or jQuery easy by allowing components to be rendered as [HTML fragments](https://django-components.github.io/django-components/latest/concepts/advanced/html_fragments/):
- Components's JS and CSS files are loaded automatically when the fragment is inserted into the DOM.
- Components can be [exposed as Django Views](https://django-components.github.io/django-components/latest/concepts/fundamentals/component_views_urls/) with `get()`, `post()`, `put()`, `patch()`, `delete()` methods
- Automatically create an endpoint for a component with [`Component.View.public`](https://django-components.github.io/django-components/latest/concepts/fundamentals/component_views_urls/#register-urls-automatically)
```py
# components/calendar/calendar.py
@register("calendar")
class Calendar(Component):
template_file = "calendar.html"
class View:
# Register Component with `urlpatterns`
public = True
# Define handlers
def get(self, request, *args, **kwargs):
page = request.GET.get("page", 1)
return self.component.render_to_response(
request=request,
kwargs={
"page": page,
},
)
def get_template_data(self, args, kwargs, slots, context):
return {
"page": kwargs["page"],
}
# Get auto-generated URL for the component
url = get_component_url(Calendar)
# Or define explicit URL in urls.py
path("calendar/", Calendar.as_view())
```
### Provide / Inject
`django-components` supports the provide / inject pattern, similarly to React's [Context Providers](https://react.dev/learn/passing-data-deeply-with-context) or Vue's [provide / inject](https://vuejs.org/guide/components/provide-inject):
- Use the [`{% provide %}`](https://django-components.github.io/django-components/latest/reference/template_tags/#provide) tag to provide data to the component tree
- Use the [`Component.inject()`](https://django-components.github.io/django-components/latest/reference/api/#django_components.Component.inject) method to inject data into the component
Read more about [Provide / Inject](https://django-components.github.io/django-components/latest/concepts/advanced/provide_inject).
```django
<body>
{% provide "theme" variant="light" %}
{% component "header" / %}
{% endprovide %}
</body>
```
```djc_py
@register("header")
class Header(Component):
template = "..."
def get_template_data(self, args, kwargs, slots, context):
theme = self.inject("theme").variant
return {
"theme": theme,
}
```
### Input validation and static type hints
Avoid needless errors with [type hints and runtime input validation](https://django-components.github.io/django-components/latest/concepts/fundamentals/typing_and_validation/).
To opt-in to input validation, define types for component's args, kwargs, slots, and more:
```py
from typing import NamedTuple, Optional
from django.template import Context
from django_components import Component, Slot, SlotInput
class Button(Component):
class Args(NamedTuple):
size: int
text: str
class Kwargs(NamedTuple):
variable: str
another: int
maybe_var: Optional[int] = None # May be omitted
class Slots(NamedTuple):
my_slot: Optional[SlotInput] = None
another_slot: SlotInput
def get_template_data(self, args: Args, kwargs: Kwargs, slots: Slots, context: Context):
args.size # int
kwargs.variable # str
slots.my_slot # Slot[MySlotData]
```
To have type hints when calling
[`Button.render()`](https://django-components.github.io/django-components/latest/reference/api/#django_components.Component.render) or
[`Button.render_to_response()`](https://django-components.github.io/django-components/latest/reference/api/#django_components.Component.render_to_response),
wrap the inputs in their respective `Args`, `Kwargs`, and `Slots` classes:
```py
Button.render(
# Error: First arg must be `int`, got `float`
args=Button.Args(
size=1.25,
text="abc",
),
# Error: Key "another" is missing
kwargs=Button.Kwargs(
variable="text",
),
)
```
### Extensions
Django-components functionality can be extended with [Extensions](https://django-components.github.io/django-components/latest/concepts/advanced/extensions/).
Extensions allow for powerful customization and integrations. They can:
- Tap into lifecycle events, such as when a component is created, deleted, or registered
- Add new attributes and methods to the components
- Add custom CLI commands
- Add custom URLs
Some of the extensions include:
- [Component caching](https://github.com/django-components/django-components/blob/master/src/django_components/extensions/cache.py)
- [Django View integration](https://github.com/django-components/django-components/blob/master/src/django_components/extensions/view.py)
- [Component defaults](https://github.com/django-components/django-components/blob/master/src/django_components/extensions/defaults.py)
- [Pydantic integration (input validation)](https://github.com/django-components/djc-ext-pydantic)
Some of the planned extensions include:
- AlpineJS integration
- Storybook integration
- Component-level benchmarking with asv
### Caching
- [Components can be cached](https://django-components.github.io/django-components/latest/concepts/advanced/component_caching/) using Django's cache framework.
- Caching rules can be configured on a per-component basis.
- Components are cached based on their input. Or you can write custom caching logic.
```py
from django_components import Component
class MyComponent(Component):
class Cache:
enabled = True
ttl = 60 * 60 * 24 # 1 day
def hash(self, *args, **kwargs):
return hash(f"{json.dumps(args)}:{json.dumps(kwargs)}")
```
### Simple testing
- Write tests for components with [`@djc_test`](https://django-components.github.io/django-components/latest/concepts/advanced/testing/) decorator.
- The decorator manages global state, ensuring that tests don't leak.
- If using `pytest`, the decorator allows you to parametrize Django or Components settings.
- The decorator also serves as a stand-in for Django's [`@override_settings`](https://docs.djangoproject.com/en/5.2/topics/testing/tools/#django.test.override_settings).
Each time a template is rendered it is cached to a global in-memory cache (using Python's lru_cache decorator). This speeds up the next render of the component. As the same component is often used many times on the same page, these savings add up. By default the cache holds 128 component templates in memory, which should be enough for most sites. But if you have a lot of components, or if you are using the `template` method of a component to render lots of dynamic templates, you can increase this number. To remove the cache limit altogether and cache everything, set template_cache_size to `None`.
```python
from django_components.testing import djc_test
from components.my_table import MyTable
@djc_test
def test_my_table():
rendered = MyTable.render(
kwargs={
"title": "My table",
},
)
assert rendered == "<table>My table</table>"
COMPONENTS = {
"template_cache_size": 256,
}
```
### Debugging features
# Install locally and run the tests
- **Visual component inspection**: Highlight components and slots directly in your browser.
- **Detailed tracing logs to supply AI-agents with context**: The logs include component and slot names and IDs, and their position in the tree.
Start by forking the project by clicking the **Fork button** up in the right corner in the GitHub . This makes a copy of the repository in your own name. Now you can clone this repository locally and start adding features:
<div style="text-align: center;">
<img src="https://github.com/django-components/django-components/blob/master/docs/images/debug-highlight-slots.png?raw=true" alt="Component debugging visualization showing slot highlighting" width="500" style="margin: auto;">
</div>
```sh
git clone https://github.com/<your GitHub username>/django-components.git
```
### Sharing components
To quickly run the tests install the local dependencies by running:
- Install and use third-party components from PyPI
- Or publish your own "component registry"
- Highly customizable - Choose how the components are called in the template (and more):
```sh
pip install -r requirements-dev.txt
```
```django
{% component "calendar" date="2024-11-06" %}
{% endcomponent %}
Now you can run the tests to make sure everything works as expected:
{% calendar date="2024-11-06" %}
{% endcalendar %}
```
```sh
pytest
```
## Documentation
The library is also tested across many versions of Python and Django. To run tests that way:
[Read the full documentation here](https://django-components.github.io/django-components/latest/).
```sh
pyenv install 3.6.9
pyenv install 3.7.9
pyenv install 3.8.9
pyenv install 3.9.4
pyenv local 3.6.9 3.7.9 3.8.9 3.9.4
tox -p
```
... or jump right into the code, [check out the example project](https://github.com/django-components/django-components/tree/master/sampleproject).
## Performance
Our aim is to be at least as fast as Django templates.
As of `0.130`, `django-components` is ~4x slower than Django templates.
| | Render time|
|----------|----------------------|
| django | 68.9±0.6ms |
| django-components | 259±4ms |
See the [full performance breakdown](https://django-components.github.io/django-components/latest/benchmarks/) for more information.
## Release notes
Read the [Release Notes](https://github.com/django-components/django-components/tree/master/CHANGELOG.md)
to see the latest features and fixes.
## Community examples
One of our goals with `django-components` is to make it easy to share components between projects. If you have a set of components that you think would be useful to others, please open a pull request to add them to the list below.
- [django-htmx-components](https://github.com/iwanalabs/django-htmx-components): A set of components for use with [htmx](https://htmx.org/).
- [djc-heroicons](https://pypi.org/project/djc-heroicons/): A component that renders icons from [Heroicons.com](https://heroicons.com/).
## Contributing and development
Get involved or sponsor this project - [See here](https://django-components.github.io/django-components/dev/overview/contributing/)
Running django-components locally for development - [See here](https://django-components.github.io/django-components/dev/overview/development/)

View file

@ -1,210 +0,0 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing
"version": 1,
// The name of the project being benchmarked
"project": "django-components",
// The project's homepage
// "project_url": "https://django-components.github.io/django-components/",
"project_url": "/django-components/", // Relative path, since benchmarks are nested under the docs site
// The URL or local path of the source code repository for the
// project being benchmarked
"repo": ".",
// The Python project's subdirectory in your repo. If missing or
// the empty string, the project is assumed to be located at the root
// of the repository.
// "repo_subdir": "",
// Customizable commands for building the project.
// See asv.conf.json documentation.
// To build the package using pyproject.toml (PEP518), uncomment the following lines
// "build_command": [
// "python -m pip install build",
// "python -m build",
// "python -mpip wheel -w {build_cache_dir} {build_dir}"
// ],
// To build the package using setuptools and a setup.py file, uncomment the following lines
// "build_command": [
// "python setup.py build",
// "python -mpip wheel -w {build_cache_dir} {build_dir}"
// ],
// Customizable commands for installing and uninstalling the project.
// See asv.conf.json documentation.
// "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
// "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
"install_command": ["in-dir={env_dir} python -mpip install ./project"],
// List of branches to benchmark. If not provided, defaults to "main"
// (for git) or "default" (for mercurial).
// "branches": ["main"], // for git
// "branches": ["default"], // for mercurial
"branches": [
"master"
],
// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
// "dvcs": "git",
// The tool to use to create environments. May be "conda",
// "virtualenv", "mamba" (above 3.8)
// or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "virtualenv",
// timeout in seconds for installing any dependencies in environment
// defaults to 10 min
//"install_timeout": 600,
// the base URL to show a commit for the project.
// "show_commit_url": "http://github.com/owner/project/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": [
"3.13"
],
// The list of conda channel names to be searched for benchmark
// dependency packages in the specified order
// "conda_channels": ["conda-forge", "defaults"],
// A conda environment file that is used for environment creation.
// "conda_environment_file": "environment.yml",
// The matrix of dependencies to test. Each key of the "req"
// requirements dictionary is the name of a package (in PyPI) and
// the values are version numbers. An empty list or empty string
// indicates to just test against the default (latest)
// version. null indicates that the package is to not be
// installed. If the package to be tested is only available from
// PyPi, and the 'environment_type' is conda, then you can preface
// the package name by 'pip+', and the package will be installed
// via pip (with all the conda available packages installed first,
// followed by the pip installed packages).
//
// The ``@env`` and ``@env_nobuild`` keys contain the matrix of
// environment variables to pass to build and benchmark commands.
// An environment will be created for every combination of the
// cartesian product of the "@env" variables in this matrix.
// Variables in "@env_nobuild" will be passed to every environment
// during the benchmark phase, but will not trigger creation of
// new environments. A value of ``null`` means that the variable
// will not be set for the current combination.
//
// "matrix": {
// "req": {
// "numpy": ["1.6", "1.7"],
// "six": ["", null], // test with and without six installed
// "pip+emcee": [""] // emcee is only available for install with pip.
// },
// "env": {"ENV_VAR_1": ["val1", "val2"]},
// "env_nobuild": {"ENV_VAR_2": ["val3", null]},
// },
"matrix": {
"req": {
"django": [
"5.1"
],
"djc-core-html-parser": [""] // Empty string means the latest version
}
},
// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
//
// An exclude entry excludes entries where all values match. The
// values are regexps that should match the whole string.
//
// An include entry adds an environment. Only the packages listed
// are installed. The 'python' key is required. The exclude rules
// do not apply to includes.
//
// In addition to package names, the following keys are available:
//
// - python
// Python version, as in the *pythons* variable above.
// - environment_type
// Environment type, as above.
// - sys_platform
// Platform, as in sys.platform. Possible values for the common
// cases: 'linux2', 'win32', 'cygwin', 'darwin'.
// - req
// Required packages
// - env
// Environment variables
// - env_nobuild
// Non-build environment variables
//
// "exclude": [
// {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
// {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda
// {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1
// ],
//
// "include": [
// // additional env for python3.12
// {"python": "3.12", "req": {"numpy": "1.26"}, "env_nobuild": {"FOO": "123"}},
// // additional env if run on windows+conda
// {"platform": "win32", "environment_type": "conda", "python": "3.12", "req": {"libpython": ""}},
// ],
// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
"benchmark_dir": "benchmarks",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": ".asv/env",
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": ".asv/results",
// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
// "html_dir": ".asv/html",
"html_dir": "docs/benchmarks", // # TODO
// The number of characters to retain in the commit hashes.
// "hash_length": 8,
// `asv` will cache results of the recent builds in each
// environment, making them faster to install next time. This is
// the number of builds to keep, per environment.
// "build_cache_size": 2,
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// },
// The thresholds for relative change in results, after which `asv
// publish` starts reporting regressions. Dictionary of the same
// form as in ``regressions_first_commits``, with values
// indicating the thresholds. If multiple entries match, the
// maximum is taken. If no entry matches, the default is 5%.
//
// "regressions_thresholds": {
// "some_benchmark": 0.01, // Threshold of 1%
// "another_benchmark": 0.5, // Threshold of 50%
// },
}

View file

@ -1,195 +0,0 @@
# Benchmarks
## Overview
[`asv`](https://github.com/airspeed-velocity/) (Airspeed Velocity) is used for benchmarking performance.
`asv` covers the entire benchmarking workflow. We can:
1. Define benchmark tests similarly to writing pytest tests (supports both timing and memory benchmarks)
2. Run the benchmarks and generate results for individual git commits, tags, or entire branches
3. View results as an HTML report (dashboard with charts)
4. Compare performance between two commits / tags / branches for CI integration
![asv dashboard](./assets/asv_dashboard.png)
django-components uses `asv` for these use cases:
- Benchmarking across releases:
1. When a git tag is created and pushed, this triggers a Github Action workflow (see `docs.yml`).
2. The workflow runs the benchmarks with the latest release, and commits the results to the repository.
Thus, we can see how performance changes across releases.
- Displaying performance results on the website:
1. When a git tag is created and pushed, we also update the documentation website (see `docs.yml`).
2. Before we publish the docs website, we generate the HTML report for the benchmark results.
3. The generated report is placed in the `docs/benchmarks/` directory, and is thus
published with the rest of the docs website and available under [`/benchmarks/`](https://django-components.github.io/django-components/latest/benchmarks).
- NOTE: The location where the report is placed is defined in `asv.conf.json`.
- Compare performance between commits on pull requests:
1. When a pull request is made, this triggers a Github Action workflow (see `benchmark.yml`).
2. The workflow compares performance between commits.
3. The report is added to the PR as a comment made by a bot.
## Interpreting benchmarks
The results CANNOT be taken as ABSOLUTE values e.g.:
"This example took 200ms to render, so my page will also take 200ms to render."
Each UI may consist of different number of Django templates, template tags, and components, and all these may influence the rendering time differently.
Instead, the results MUST be understood as RELATIVE values.
- If a commit is 10% slower than the master branch, that's valid.
- If Django components are 10% slower than vanilla Django templates, that's valid.
- If "isolated" mode is 10% slower than "django" mode, that's valid.
## Development
Let's say we want to generate results for the last 5 commits.
1. Install `asv`
```bash
pip install asv
```
2. Run benchmarks and generate results
```bash
asv run HEAD --steps 5 -e
```
- `HEAD` means that we want to run benchmarks against the [current branch](https://stackoverflow.com/a/2304106/9788634).
- `--steps 5` means that we want to run benchmarks for the last 5 commits.
- `-e` to print out any errors.
The results will be stored in `.asv/results/`, as configured in `asv.conf.json`.
3. Generate HTML report
```bash
asv publish
asv preview
```
- `publish` generates the HTML report and stores it in `docs/benchmarks/`, as configured in `asv.conf.json`.
- `preview` starts a local server and opens the report in the browser.
NOTE: Since the results are stored in `docs/benchmarks/`, you can also view the results
with `mkdocs serve` and navigating to `http://localhost:9000/django-components/benchmarks/`.
NOTE 2: Running `publish` will overwrite the existing contents of `docs/benchmarks/`.
## Writing benchmarks
`asv` supports writing different [types of benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#benchmark-types). What's relevant for us is:
- [Raw timing benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#raw-timing-benchmarks)
- [Peak memory benchmarks](https://asv.readthedocs.io/en/latest/writing_benchmarks.html#peak-memory)
Notes:
- The difference between "raw timing" and "timing" tests is that "raw timing" is ran in a separate process.
And instead of running the logic within the test function itself, we return a script (string)
that will be executed in the separate process.
- The difference between "peak memory" and "memory" tests is that "memory" calculates the memory
of the object returned from the test function. On the other hand, "peak memory" detects the
peak memory usage during the execution of the test function (including the setup function).
You can write the test file anywhere in the `benchmarks/` directory, `asv` will automatically find it.
Inside the file, write a test function. Depending on the type of the benchmark,
prefix the test function name with `timeraw_` or `peakmem_`. See [`benchmarks/benchmark_templating.py`](benchmark_templating.py) for examples.
### Ensuring that the benchmarked logic is correct
The approach I (Juro) took with benchmarking the overall template rendering is that
I've defined the actual logic in `tests/test_benchmark_*.py` files. So those files
are part of the normal pytest testing, and even contain a section with pytest tests.
This ensures that the benchmarked logic remains functional and error-free.
However, there's some caveats:
1. I wasn't able to import files from `tests/`.
2. When running benchmarks, we don't want to run the pytest tests.
To work around that, the approach I used for loading the files from the `tests/` directory is to:
1. Get the file's source code as a string.
2. Cut out unwanted sections (like the pytest tests).
3. Append the benchmark-specific code to the file (e.g. to actually render the templates).
4. In case of "timeraw" benchmarks, we can simply return the remaining code as a string
to be run in a separate process.
5. In case of "peakmem" benchmarks, we need to access this modified source code as Python objects.
So the code is made available as a "virtual" module, which makes it possible to import Python objects like so:
```py
from my_virtual_module import run_my_benchmark
```
## Using `asv`
### Compare latest commit against master
Note: Before comparing, you must run the benchmarks first to generate the results. The `continuous` command does not generate the results by itself.
```bash
asv continuous master^! HEAD^! --factor 1.1
```
- Factor of `1.1` means that the new commit is allowed to be 10% slower/faster than the master commit.
- `^` means that we mean the COMMIT of the branch, not the BRANCH itself.
Without it, we would run benchmarks for the whole branch history.
With it, we run benchmarks FROM the latest commit (incl) TO ...
- `!` means that we want to select range spanning a single commit.
Without it, we would run benchmarks for all commits FROM the latest commit
TO the start of the branch history.
With it, we run benchmarks ONLY FOR the latest commit.
### More Examples
Notes:
- Use `~1` to select the second-latest commit, `~2` for the third-latest, etc..
Generate benchmarks for the latest commit in `master` branch.
```bash
asv run master^!
```
Generate benchmarks for second-latest commit in `master` branch.
```bash
asv run master~1^!
```
Generate benchmarks for all commits in `master` branch.
```bash
asv run master
```
Generate benchmarks for all commits in `master` branch, but exclude the latest commit.
```bash
asv run master~1
```
Generate benchmarks for the LAST 5 commits in `master` branch, but exclude the latest commit.
```bash
asv run master~1 --steps 5
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 321 KiB

View file

@ -1,446 +0,0 @@
# Write the benchmarking functions here
# See "Writing benchmarks" in the asv docs for more information.
import re
from pathlib import Path
from types import ModuleType
from typing import Literal
# Fix for for https://github.com/airspeed-velocity/asv_runner/pull/44
import benchmarks.monkeypatch_asv # noqa: F401
from benchmarks.utils import benchmark, create_virtual_module
DJC_VS_DJ_GROUP = "Components vs Django"
DJC_ISOLATED_VS_NON_GROUP = "isolated vs django modes"
OTHER_GROUP = "Other"
DjcContextMode = Literal["isolated", "django"]
TemplatingRenderer = Literal["django", "django-components", "none"]
TemplatingTestSize = Literal["lg", "sm"]
TemplatingTestType = Literal[
"first", # Testing performance of the first time the template is rendered
"subsequent", # Testing performance of the subsequent times the template is rendered
"startup", # Testing performance of the startup time (e.g. defining classes and templates)
]
def _get_templating_filepath(renderer: TemplatingRenderer, size: TemplatingTestSize) -> Path:
if renderer == "none":
raise ValueError("Cannot get filepath for renderer 'none'")
elif renderer not in ["django", "django-components"]:
raise ValueError(f"Invalid renderer: {renderer}")
if size not in ("lg", "sm"):
raise ValueError(f"Invalid size: {size}, must be one of ('lg', 'sm')")
# At this point, we know the renderer is either "django" or "django-components"
root = file_path = Path(__file__).parent.parent
if renderer == "django":
if size == "lg":
file_path = root / "tests" / "test_benchmark_django.py"
else:
file_path = root / "tests" / "test_benchmark_django_small.py"
else:
if size == "lg":
file_path = root / "tests" / "test_benchmark_djc.py"
else:
file_path = root / "tests" / "test_benchmark_djc_small.py"
return file_path
def _get_templating_script(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
context_mode: DjcContextMode,
imports_only: bool,
) -> str:
if renderer == "none":
return ""
elif renderer not in ["django", "django-components"]:
raise ValueError(f"Invalid renderer: {renderer}")
# At this point, we know the renderer is either "django" or "django-components"
file_path = _get_templating_filepath(renderer, size)
contents = file_path.read_text()
# The files with benchmarked code also have a section for testing them with pytest.
# We remove that pytest section, so the script is only the benchmark code.
contents = contents.split("# ----------- TESTS START ------------ #")[0]
if imports_only:
# There is a benchmark test for measuring the time it takes to import the module.
# For that, we exclude from the code everything AFTER this line
contents = contents.split("# ----------- IMPORTS END ------------ #")[0]
else:
# Set the context mode by replacing variable in the script
contents = re.sub(r"CONTEXT_MODE.*?\n", f"CONTEXT_MODE = '{context_mode}'\n", contents, count=1)
return contents
def _get_templating_module(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
context_mode: DjcContextMode,
imports_only: bool,
) -> ModuleType:
if renderer not in ("django", "django-components"):
raise ValueError(f"Invalid renderer: {renderer}")
file_path = _get_templating_filepath(renderer, size)
script = _get_templating_script(renderer, size, context_mode, imports_only)
# This makes it possible to import the module in the benchmark function
# as `import test_templating`
module = create_virtual_module("test_templating", script, str(file_path))
return module
# The `timeraw_` tests run in separate processes. But when running memory benchmarks,
# the tested logic runs in the same process as the where we run the benchmark functions
# (e.g. `peakmem_render_lg_first()`). Thus, the `peakmem_` functions have access to this file
# when the tested logic runs.
#
# Secondly, `asv` doesn't offer any way to pass data from `setup` to actual test.
#
# And so we define this global, which, when running memory benchmarks, the `setup` function
# populates. And then we trigger the actual render from within the test body.
do_render = lambda: None # noqa: E731
def setup_templating_memory_benchmark(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
test_type: TemplatingTestType,
context_mode: DjcContextMode,
imports_only: bool = False,
):
global do_render
module = _get_templating_module(renderer, size, context_mode, imports_only)
data = module.gen_render_data()
render = module.render
do_render = lambda: render(data) # noqa: E731
# Do the first render as part of setup if we're testing the subsequent renders
if test_type == "subsequent":
do_render()
# The timing benchmarks run the actual code in a separate process, by using the `timeraw_` prefix.
# As such, we don't actually load the code in this file. Instead, we only prepare a script (raw string)
# that will be run in the new process.
def prepare_templating_benchmark(
renderer: TemplatingRenderer,
size: TemplatingTestSize,
test_type: TemplatingTestType,
context_mode: DjcContextMode,
imports_only: bool = False,
):
setup_script = _get_templating_script(renderer, size, context_mode, imports_only)
# If we're testing the startup time, then the setup is actually the tested code
if test_type == "startup":
return setup_script
else:
# Otherwise include also data generation as part of setup
setup_script += "\n\n" "render_data = gen_render_data()\n"
# Do the first render as part of setup if we're testing the subsequent renders
if test_type == "subsequent":
setup_script += "render(render_data)\n"
benchmark_script = "render(render_data)\n"
return benchmark_script, setup_script
# - Group: django-components vs django
# - time: djc vs django (startup lg)
# - time: djc vs django (lg - FIRST)
# - time: djc vs django (sm - FIRST)
# - time: djc vs django (lg - SUBSEQUENT)
# - time: djc vs django (sm - SUBSEQUENT)
# - mem: djc vs django (lg - FIRST)
# - mem: djc vs django (sm - FIRST)
# - mem: djc vs django (lg - SUBSEQUENT)
# - mem: djc vs django (sm - SUBSEQUENT)
#
# NOTE: While the name suggests we're comparing Django and Django-components, be aware that
# in our "Django" tests, we still install and import django-components. We also use
# django-components's `{% html_attrs %}` tag in the Django scenario. `{% html_attrs %}`
# was used because the original sample code was from django-components.
#
# As such, these tests should seen not as "Using Django vs Using Components". But instead,
# it should be "What is the relative cost of using Components?".
#
# As an example, the benchmarking for the startup time and memory usage is not comparing
# two independent approaches. Rather, the test is checking if defining Components classes
# is more expensive than vanilla Django templates.
class DjangoComponentsVsDjangoTests:
# Testing startup time (e.g. defining classes and templates)
@benchmark(
pretty_name="startup - large",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_startup_lg(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "startup", "isolated")
@benchmark(
pretty_name="render - small - first render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_sm_first(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "sm", "first", "isolated")
@benchmark(
pretty_name="render - small - second render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_sm_subsequent(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "sm", "subsequent", "isolated")
@benchmark(
pretty_name="render - large - first render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
include_in_quick_benchmark=True,
)
def timeraw_render_lg_first(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "first", "isolated")
@benchmark(
pretty_name="render - large - second render",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
)
def timeraw_render_lg_subsequent(self, renderer: TemplatingRenderer):
return prepare_templating_benchmark(renderer, "lg", "subsequent", "isolated")
@benchmark(
pretty_name="render - small - first render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "first", "isolated"),
)
def peakmem_render_sm_first(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - small - second render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "sm", "subsequent", "isolated"),
)
def peakmem_render_sm_subsequent(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - large - first render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "first", "isolated"),
)
def peakmem_render_lg_first(self, renderer: TemplatingRenderer):
do_render()
@benchmark(
pretty_name="render - large - second render (mem)",
group_name=DJC_VS_DJ_GROUP,
number=1,
rounds=5,
params={
"renderer": ["django", "django-components"],
},
setup=lambda renderer: setup_templating_memory_benchmark(renderer, "lg", "subsequent", "isolated"),
)
def peakmem_render_lg_subsequent(self, renderer: TemplatingRenderer):
do_render()
# - Group: Django-components "isolated" vs "django" modes
# - time: Isolated vs django djc (startup lg)
# - time: Isolated vs django djc (lg - FIRST)
# - time: Isolated vs django djc (sm - FIRST)
# - time: Isolated vs django djc (lg - SUBSEQUENT)
# - time: Isolated vs django djc (sm - SUBSEQUENT)
# - mem: Isolated vs django djc (lg - FIRST)
# - mem: Isolated vs django djc (sm - FIRST)
# - mem: Isolated vs django djc (lg - SUBSEQUENT)
# - mem: Isolated vs django djc (sm - SUBSEQUENT)
class IsolatedVsDjangoContextModesTests:
# Testing startup time (e.g. defining classes and templates)
@benchmark(
pretty_name="startup - large",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_startup_lg(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "startup", context_mode)
@benchmark(
pretty_name="render - small - first render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_sm_first(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "sm", "first", context_mode)
@benchmark(
pretty_name="render - small - second render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_sm_subsequent(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "sm", "subsequent", context_mode)
@benchmark(
pretty_name="render - large - first render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_lg_first(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "first", context_mode)
@benchmark(
pretty_name="render - large - second render",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
)
def timeraw_render_lg_subsequent(self, context_mode: DjcContextMode):
return prepare_templating_benchmark("django-components", "lg", "subsequent", context_mode)
@benchmark(
pretty_name="render - small - first render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark("django-components", "sm", "first", context_mode),
)
def peakmem_render_sm_first(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - small - second render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"sm",
"subsequent",
context_mode,
),
)
def peakmem_render_sm_subsequent(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - large - first render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"lg",
"first",
context_mode,
),
)
def peakmem_render_lg_first(self, context_mode: DjcContextMode):
do_render()
@benchmark(
pretty_name="render - large - second render (mem)",
group_name=DJC_ISOLATED_VS_NON_GROUP,
number=1,
rounds=5,
params={
"context_mode": ["isolated", "django"],
},
setup=lambda context_mode: setup_templating_memory_benchmark(
"django-components",
"lg",
"subsequent",
context_mode,
),
)
def peakmem_render_lg_subsequent(self, context_mode: DjcContextMode):
do_render()
class OtherTests:
@benchmark(
pretty_name="import time",
group_name=OTHER_GROUP,
number=1,
rounds=5,
)
def timeraw_import_time(self):
return prepare_templating_benchmark("django-components", "lg", "startup", "isolated", imports_only=True)

View file

@ -0,0 +1,170 @@
from time import perf_counter
from django.template import Context, Template
from django.test import override_settings
from django_components import component
from django_components.middleware import (
CSS_DEPENDENCY_PLACEHOLDER,
JS_DEPENDENCY_PLACEHOLDER,
)
from tests.django_test_setup import * # NOQA
from tests.testutils import Django30CompatibleSimpleTestCase as SimpleTestCase
from tests.testutils import create_and_process_template_response
class SlottedComponent(component.Component):
template_name = "slotted_template.html"
class SimpleComponent(component.Component):
template_name = "simple_template.html"
def get_context_data(self, variable, variable2="default"):
return {
"variable": variable,
"variable2": variable2,
}
class Media:
css = {"all": ["style.css"]}
js = ["script.js"]
class BreadcrumbComponent(component.Component):
template_name = "mdn_component_template.html"
LINKS = [
(
"https://developer.mozilla.org/en-US/docs/Learn",
"Learn web development",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML",
"Structuring the web with HTML",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML",
"Introduction to HTML",
),
(
"https://developer.mozilla.org/en-US/docs/Learn/HTML/Introduction_to_HTML/Document_and_website_structure",
"Document and website structure",
),
]
def get_context_data(self, items):
if items > 4:
items = 4
elif items < 0:
items = 0
return {"links": self.LINKS[: items - 1]}
class Media:
css = {"all": ["test.css"]}
js = ["test.js"]
EXPECTED_CSS = """<link href="test.css" media="all" rel="stylesheet">"""
EXPECTED_JS = """<script src="test.js"></script>"""
@override_settings(COMPONENTS={"RENDER_DEPENDENCIES": True})
class RenderBenchmarks(SimpleTestCase):
def setUp(self):
component.registry.clear()
component.registry.register("test_component", SlottedComponent)
component.registry.register("inner_component", SimpleComponent)
component.registry.register(
"breadcrumb_component", BreadcrumbComponent
)
@staticmethod
def timed_loop(func, iterations=1000):
"""Run func iterations times, and return the time in ms per iteration."""
start_time = perf_counter()
for _ in range(iterations):
func()
end_time = perf_counter()
total_elapsed = end_time - start_time # NOQA
return total_elapsed * 1000 / iterations
def test_render_time_for_small_component(self):
template = Template(
"{% load component_tags %}{% component_block 'test_component' %}"
"{% slot \"header\" %}{% component 'inner_component' variable='foo' %}{% endslot %}"
"{% endcomponent_block %}",
name="root",
)
print(
f"{self.timed_loop(lambda: template.render(Context({})))} ms per iteration"
)
def test_middleware_time_with_dependency_for_small_page(self):
template = Template(
"{% load component_tags %}{% component_dependencies %}"
"{% component_block 'test_component' %}{% slot \"header\" %}"
"{% component 'inner_component' variable='foo' %}{% endslot %}{% endcomponent_block %}",
name="root",
)
# Sanity tests
response_content = create_and_process_template_response(template)
self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertIn("style.css", response_content)
self.assertIn("script.js", response_content)
without_middleware = self.timed_loop(
lambda: create_and_process_template_response(
template, use_middleware=False
)
)
with_middleware = self.timed_loop(
lambda: create_and_process_template_response(
template, use_middleware=True
)
)
print("Small page middleware test")
self.report_results(with_middleware, without_middleware)
def test_render_time_with_dependency_for_large_page(self):
from django.template.loader import get_template
template = get_template("mdn_complete_page.html")
response_content = create_and_process_template_response(template, {})
self.assertNotIn(CSS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertNotIn(JS_DEPENDENCY_PLACEHOLDER, response_content)
self.assertIn("test.css", response_content)
self.assertIn("test.js", response_content)
without_middleware = self.timed_loop(
lambda: create_and_process_template_response(
template, {}, use_middleware=False
)
)
with_middleware = self.timed_loop(
lambda: create_and_process_template_response(
template, {}, use_middleware=True
)
)
print("Large page middleware test")
self.report_results(with_middleware, without_middleware)
@staticmethod
def report_results(with_middleware, without_middleware):
print(f"Middleware active\t\t{with_middleware:.3f} ms per iteration")
print(
f"Middleware inactive\t{without_middleware:.3f} ms per iteration"
)
time_difference = with_middleware - without_middleware
if without_middleware > with_middleware:
print(
f"Decrease of {-100 * time_difference / with_middleware:.2f}%"
)
else:
print(
f"Increase of {100 * time_difference / without_middleware:.2f}%"
)

View file

@ -1,29 +0,0 @@
from asv_runner.benchmarks.timeraw import TimerawBenchmark, _SeparateProcessTimer
# Fix for https://github.com/airspeed-velocity/asv_runner/pull/44
def _get_timer(self, *param):
"""
Returns a timer that runs the benchmark function in a separate process.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**timer** (`_SeparateProcessTimer`)
: A timer that runs the function in a separate process.
"""
if param:
def func():
# ---------- OUR CHANGES: ADDED RETURN STATEMENT ----------
return self.func(*param)
# ---------- OUR CHANGES END ----------
else:
func = self.func
return _SeparateProcessTimer(func)
TimerawBenchmark._get_timer = _get_timer

View file

@ -1,66 +0,0 @@
# ------------ FIX FOR #45 ------------
# See https://github.com/airspeed-velocity/asv_runner/issues/45
# This fix is applied in CI in the `benchmark.yml` file.
# This file is intentionally named `monkeypatch_asv_ci.txt` to avoid being
# loaded as a python file by `asv`.
# -------------------------------------
def timeit(self, number):
"""
Run the function's code `number` times in a separate Python process, and
return the execution time.
#### Parameters
**number** (`int`)
: The number of times to execute the function's code.
#### Returns
**time** (`float`)
: The time it took to execute the function's code `number` times.
#### Notes
The function's code is executed in a separate Python process to avoid
interference from the parent process. The function can return either a
single string of code to be executed, or a tuple of two strings: the
code to be executed and the setup code to be run before timing.
"""
stmt = self.func()
if isinstance(stmt, tuple):
stmt, setup = stmt
else:
setup = ""
stmt = textwrap.dedent(stmt)
setup = textwrap.dedent(setup)
stmt = stmt.replace(r'"""', r"\"\"\"")
setup = setup.replace(r'"""', r"\"\"\"")
# TODO
# -----------ORIGINAL CODE-----------
# code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
# res = subprocess.check_output([sys.executable, "-c", code])
# return float(res.strip())
# -----------NEW CODE-----------
code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
evaler = textwrap.dedent(
"""
import sys
code = sys.stdin.read()
exec(code)
"""
)
proc = subprocess.Popen([sys.executable, "-c", evaler],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(input=code.encode("utf-8"))
if proc.returncode != 0:
raise RuntimeError(f"Subprocess failed: {stderr.decode()}")
return float(stdout.decode("utf-8").strip())
_SeparateProcessTimer.timeit = timeit
# ------------ END FIX #45 ------------

View file

@ -1,99 +0,0 @@
import os
import sys
from importlib.abc import Loader
from importlib.util import spec_from_loader, module_from_spec
from types import ModuleType
from typing import Any, Dict, List, Optional
# NOTE: benchmark_name constraints:
# - MUST BE UNIQUE
# - MUST NOT CONTAIN `-`
# - MUST START WITH `time_`, `mem_`, `peakmem_`
# See https://github.com/airspeed-velocity/asv/pull/1470
def benchmark(
*,
pretty_name: Optional[str] = None,
timeout: Optional[int] = None,
group_name: Optional[str] = None,
params: Optional[Dict[str, List[Any]]] = None,
number: Optional[int] = None,
min_run_count: Optional[int] = None,
include_in_quick_benchmark: bool = False,
**kwargs,
):
def decorator(func):
# For pull requests, we want to run benchmarks only for a subset of tests,
# because the full set of tests takes about 10 minutes to run (5 min per commit).
# This is done by setting DJC_BENCHMARK_QUICK=1 in the environment.
if os.getenv("DJC_BENCHMARK_QUICK") and not include_in_quick_benchmark:
# By setting the benchmark name to something that does NOT start with
# valid prefixes like `time_`, `mem_`, or `peakmem_`, this function will be ignored by asv.
func.benchmark_name = "noop"
return func
# "group_name" is our custom field, which we actually convert to asv's "benchmark_name"
if group_name is not None:
benchmark_name = f"{group_name}.{func.__name__}"
func.benchmark_name = benchmark_name
# Also "params" is custom, so we normalize it to "params" and "param_names"
if params is not None:
func.params, func.param_names = list(params.values()), list(params.keys())
if pretty_name is not None:
func.pretty_name = pretty_name
if timeout is not None:
func.timeout = timeout
if number is not None:
func.number = number
if min_run_count is not None:
func.min_run_count = min_run_count
# Additional, untyped kwargs
for k, v in kwargs.items():
setattr(func, k, v)
return func
return decorator
class VirtualModuleLoader(Loader):
def __init__(self, code_string):
self.code_string = code_string
def exec_module(self, module):
exec(self.code_string, module.__dict__)
def create_virtual_module(name: str, code_string: str, file_path: str) -> ModuleType:
"""
To avoid the headaches of importing the tested code from another diretory,
we create a "virtual" module that we can import from anywhere.
E.g.
```py
from benchmarks.utils import create_virtual_module
create_virtual_module("my_module", "print('Hello, world!')", __file__)
# Now you can import my_module from anywhere
import my_module
```
"""
# Create the module specification
spec = spec_from_loader(name, VirtualModuleLoader(code_string))
# Create the module
module = module_from_spec(spec) # type: ignore[arg-type]
module.__file__ = file_path
module.__name__ = name
# Add it to sys.modules
sys.modules[name] = module
# Execute the module
spec.loader.exec_module(module) # type: ignore[union-attr]
return module

View file

@ -0,0 +1,17 @@
from importlib import import_module
import django
from django.utils.module_loading import autodiscover_modules
if django.VERSION < (3, 2):
default_app_config = "django_components.apps.ComponentsConfig"
def autodiscover():
# look for "components" module/pkg in each app
from . import app_settings
if app_settings.AUTODISCOVER:
autodiscover_modules("components")
for path in app_settings.LIBRARIES:
import_module(path)

View file

@ -0,0 +1,25 @@
import sys
from django.conf import settings
class AppSettings:
def __init__(self):
self.settings = getattr(settings, "COMPONENTS", {})
@property
def AUTODISCOVER(self):
return self.settings.setdefault("autodiscover", True)
@property
def LIBRARIES(self):
return self.settings.setdefault("libraries", [])
@property
def TEMPLATE_CACHE_SIZE(self):
return self.settings.setdefault("template_cache_size", 128)
app_settings = AppSettings()
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings

View file

@ -0,0 +1,8 @@
from django.apps import AppConfig
class ComponentsConfig(AppConfig):
name = "django_components"
def ready(self):
self.module.autodiscover()

View file

@ -0,0 +1,179 @@
import warnings
from functools import lru_cache
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.forms.widgets import MediaDefiningClass
from django.template.base import Node, TokenType
from django.template.loader import get_template
from django.utils.safestring import mark_safe
# Allow "component.AlreadyRegistered" instead of having to import these everywhere
from django_components.component_registry import ( # noqa
AlreadyRegistered,
ComponentRegistry,
NotRegistered,
)
TEMPLATE_CACHE_SIZE = getattr(settings, "COMPONENTS", {}).get(
"TEMPLATE_CACHE_SIZE", 128
)
ACTIVE_SLOT_CONTEXT_KEY = "_DJANGO_COMPONENTS_ACTIVE_SLOTS"
class SimplifiedInterfaceMediaDefiningClass(MediaDefiningClass):
def __new__(mcs, name, bases, attrs):
if "Media" in attrs:
media = attrs["Media"]
# Allow: class Media: css = "style.css"
if isinstance(media.css, str):
media.css = [media.css]
# Allow: class Media: css = ["style.css"]
if isinstance(media.css, list):
media.css = {"all": media.css}
# Allow: class Media: css = {"all": "style.css"}
if isinstance(media.css, dict):
for media_type, path_list in media.css.items():
if isinstance(path_list, str):
media.css[media_type] = [path_list]
# Allow: class Media: js = "script.js"
if isinstance(media.js, str):
media.js = [media.js]
return super().__new__(mcs, name, bases, attrs)
class Component(metaclass=SimplifiedInterfaceMediaDefiningClass):
template_name = None
def __init__(self, component_name):
self._component_name = component_name
self.instance_template = None
self.slots = {}
def get_context_data(self):
return {}
def get_template_name(self, context=None):
if not self.template_name:
raise ImproperlyConfigured(
f"Template name is not set for Component {self.__class__.__name__}"
)
return self.template_name
def render_dependencies(self):
"""Helper function to access media.render()"""
return self.media.render()
def render_css_dependencies(self):
"""Render only CSS dependencies available in the media class."""
return mark_safe("\n".join(self.media.render_css()))
def render_js_dependencies(self):
"""Render only JS dependencies available in the media class."""
return mark_safe("\n".join(self.media.render_js()))
@staticmethod
def slots_in_template(template):
return {
node.name: node.nodelist
for node in template.template.nodelist
if Component.is_slot_node(node)
}
@staticmethod
def is_slot_node(node):
return (
isinstance(node, Node)
and node.token.token_type == TokenType.BLOCK
and node.token.split_contents()[0] == "slot"
)
@lru_cache(maxsize=TEMPLATE_CACHE_SIZE)
def get_processed_template(self, template_name):
"""Retrieve the requested template and check for unused slots."""
component_template = get_template(template_name).template
# Traverse template nodes and descendants
visited_nodes = set()
nodes_to_visit = list(component_template.nodelist)
slots_seen = set()
while nodes_to_visit:
current_node = nodes_to_visit.pop()
if current_node in visited_nodes:
continue
visited_nodes.add(current_node)
for nodelist_name in current_node.child_nodelists:
nodes_to_visit.extend(getattr(current_node, nodelist_name, []))
if self.is_slot_node(current_node):
slots_seen.add(current_node.name)
# Check and warn for unknown slots
if settings.DEBUG:
filled_slot_names = set(self.slots.keys())
unused_slots = filled_slot_names - slots_seen
if unused_slots:
warnings.warn(
"Component {} was provided with slots that were not used in a template: {}".format(
self._component_name, unused_slots
)
)
return component_template
def render(self, context):
if hasattr(self, "context"):
warnings.warn(
f"{self.__class__.__name__}: `context` method is deprecated, use `get_context` instead",
DeprecationWarning,
)
if hasattr(self, "template"):
warnings.warn(
f"{self.__class__.__name__}: `template` method is deprecated, \
set `template_name` or override `get_template_name` instead",
DeprecationWarning,
)
template_name = self.template(context)
else:
template_name = self.get_template_name(context)
instance_template = self.get_processed_template(template_name)
with context.update({ACTIVE_SLOT_CONTEXT_KEY: self.slots}):
return instance_template.render(context)
class Media:
css = {}
js = []
# This variable represents the global component registry
registry = ComponentRegistry()
def register(name):
"""Class decorator to register a component.
Usage:
@register("my_component")
class MyComponent(component.Component):
...
"""
def decorator(component):
registry.register(name=name, component=component)
return component
return decorator

View file

@ -0,0 +1,36 @@
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class ComponentRegistry(object):
def __init__(self):
self._registry = {} # component name -> component_class mapping
def register(self, name=None, component=None):
if name in self._registry:
raise AlreadyRegistered(
'The component "%s" is already registered' % name
)
self._registry[name] = component
def unregister(self, name):
self.get(name)
del self._registry[name]
def get(self, name):
if name not in self._registry:
raise NotRegistered('The component "%s" is not registered' % name)
return self._registry[name]
def all(self):
return self._registry
def clear(self):
self._registry = {}

View file

@ -0,0 +1,91 @@
import re
from django.conf import settings
from django.forms import Media
from django.http import StreamingHttpResponse
RENDERED_COMPONENTS_CONTEXT_KEY = "_COMPONENT_DEPENDENCIES"
CSS_DEPENDENCY_PLACEHOLDER = '<link name="CSS_PLACEHOLDER">'
JS_DEPENDENCY_PLACEHOLDER = '<script name="JS_PLACEHOLDER">'
SCRIPT_TAG_REGEX = re.compile("<script")
COMPONENT_COMMENT_REGEX = re.compile(rb"<!-- _RENDERED (?P<name>\w+?) -->")
PLACEHOLDER_REGEX = re.compile(
rb"<!-- _RENDERED (?P<name>\w+?) -->"
rb'|<link name="CSS_PLACEHOLDER">'
rb'|<script name="JS_PLACEHOLDER">'
)
class ComponentDependencyMiddleware:
"""Middleware that inserts CSS/JS dependencies for all rendered components at points marked with template tags."""
dependency_regex = COMPONENT_COMMENT_REGEX
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if (
getattr(settings, "COMPONENTS", {}).get(
"RENDER_DEPENDENCIES", False
)
and not isinstance(response, StreamingHttpResponse)
and response["Content-Type"].startswith("text/html")
):
response.content = process_response_content(response.content)
return response
def process_response_content(content):
from django_components.component import registry
component_names_seen = {
match.group("name")
for match in COMPONENT_COMMENT_REGEX.finditer(content)
}
all_components = [
registry.get(name.decode("utf-8"))("") for name in component_names_seen
]
all_media = join_media(all_components)
js_dependencies = b"".join(
media.encode("utf-8") for media in all_media.render_js()
)
css_dependencies = b"".join(
media.encode("utf-8") for media in all_media.render_css()
)
return PLACEHOLDER_REGEX.sub(
DependencyReplacer(css_dependencies, js_dependencies), content
)
def add_module_attribute_to_scripts(scripts):
return re.sub(SCRIPT_TAG_REGEX, '<script type="module"', scripts)
class DependencyReplacer:
"""Replacer for use in re.sub that replaces the first placeholder CSS and JS
tags it encounters and removes any subsequent ones."""
CSS_PLACEHOLDER = bytes(CSS_DEPENDENCY_PLACEHOLDER, encoding="utf-8")
JS_PLACEHOLDER = bytes(JS_DEPENDENCY_PLACEHOLDER, encoding="utf-8")
def __init__(self, css_string, js_string):
self.js_string = js_string
self.css_string = css_string
def __call__(self, match):
if match[0] == self.CSS_PLACEHOLDER:
replacement, self.css_string = self.css_string, b""
elif match[0] == self.JS_PLACEHOLDER:
replacement, self.js_string = self.js_string, b""
else:
replacement = b""
return replacement
def join_media(components):
"""Return combined media object for iterable of components."""
return sum([component.media for component in components], Media())

View file

@ -0,0 +1,26 @@
"""
Template loader that loads templates from each Django app's "components" directory.
"""
from pathlib import Path
from django.conf import settings
from django.template.loaders.filesystem import Loader as FilesystemLoader
from django.template.utils import get_app_template_dirs
class Loader(FilesystemLoader):
def get_dirs(self):
component_dir = "components"
directories = list(get_app_template_dirs(component_dir))
settings_path = Path(*settings.SETTINGS_MODULE.split("."))
if (path := (settings_path / ".." / component_dir).resolve()).is_dir():
directories.append(path)
if (
path := (settings_path / ".." / ".." / component_dir).resolve()
).is_dir():
directories.append(path)
return directories

View file

@ -0,0 +1,388 @@
from collections import defaultdict
from django import template
from django.conf import settings
from django.template.base import Node, NodeList, TemplateSyntaxError, TokenType
from django.template.library import parse_bits
from django.utils.safestring import mark_safe
from django_components.component import ACTIVE_SLOT_CONTEXT_KEY, registry
from django_components.middleware import (
CSS_DEPENDENCY_PLACEHOLDER,
JS_DEPENDENCY_PLACEHOLDER,
)
register = template.Library()
RENDERED_COMMENT_TEMPLATE = "<!-- _RENDERED {name} -->"
def get_components_from_registry(registry):
"""Returns a list unique components from the registry."""
unique_component_classes = set(registry.all().values())
components = []
for component_class in unique_component_classes:
components.append(component_class(component_class.__name__))
return components
def get_components_from_preload_str(preload_str):
"""Returns a list of unique components from a comma-separated str"""
components = []
for component_name in preload_str.split(","):
component_name = component_name.strip()
if not component_name:
continue
component_class = registry.get(component_name)
components.append(component_class(component_name))
return components
@register.simple_tag(name="component_dependencies")
def component_dependencies_tag(preload=""):
"""Marks location where CSS link and JS script tags should be rendered."""
if is_dependency_middleware_active():
preloaded_dependencies = []
for component in get_components_from_preload_str(preload):
preloaded_dependencies.append(
RENDERED_COMMENT_TEMPLATE.format(
name=component._component_name
)
)
return mark_safe(
"\n".join(preloaded_dependencies)
+ CSS_DEPENDENCY_PLACEHOLDER
+ JS_DEPENDENCY_PLACEHOLDER
)
else:
rendered_dependencies = []
for component in get_components_from_registry(registry):
rendered_dependencies.append(component.render_dependencies())
return mark_safe("\n".join(rendered_dependencies))
@register.simple_tag(name="component_css_dependencies")
def component_css_dependencies_tag(preload=""):
"""Marks location where CSS link tags should be rendered."""
if is_dependency_middleware_active():
preloaded_dependencies = []
for component in get_components_from_preload_str(preload):
preloaded_dependencies.append(
RENDERED_COMMENT_TEMPLATE.format(
name=component._component_name
)
)
return mark_safe(
"\n".join(preloaded_dependencies) + CSS_DEPENDENCY_PLACEHOLDER
)
else:
rendered_dependencies = []
for component in get_components_from_registry(registry):
rendered_dependencies.append(component.render_css_dependencies())
return mark_safe("\n".join(rendered_dependencies))
@register.simple_tag(name="component_js_dependencies")
def component_js_dependencies_tag(preload=""):
"""Marks location where JS script tags should be rendered."""
if is_dependency_middleware_active():
preloaded_dependencies = []
for component in get_components_from_preload_str(preload):
preloaded_dependencies.append(
RENDERED_COMMENT_TEMPLATE.format(
name=component._component_name
)
)
return mark_safe(
"\n".join(preloaded_dependencies) + JS_DEPENDENCY_PLACEHOLDER
)
else:
rendered_dependencies = []
for component in get_components_from_registry(registry):
rendered_dependencies.append(component.render_js_dependencies())
return mark_safe("\n".join(rendered_dependencies))
@register.tag(name="component")
def do_component(parser, token):
bits = token.split_contents()
bits, isolated_context = check_for_isolated_context_keyword(bits)
component_name, context_args, context_kwargs = parse_component_with_args(
parser, bits, "component"
)
return ComponentNode(
component_name,
context_args,
context_kwargs,
isolated_context=isolated_context,
)
class SlotNode(Node):
def __init__(self, name, nodelist):
self.name, self.nodelist = name, nodelist
self.parent_component = None
self.context = None
def __repr__(self):
return "<Slot Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
# Thread safety: storing the context as a property of the cloned SlotNode without using
# the render_context facility should be thread-safe, since each cloned_node
# is only used for a single render.
cloned_node = SlotNode(self.name, self.nodelist)
cloned_node.parent_component = self.parent_component
cloned_node.context = context
with context.update({"slot": cloned_node}):
return self.get_nodelist(context).render(context)
def get_nodelist(self, context):
if ACTIVE_SLOT_CONTEXT_KEY not in context:
raise TemplateSyntaxError(
f"Attempted to render SlotNode {self.name} outside of a parent Component or "
"without access to context provided by its parent Component. This will not"
"work properly."
)
overriding_nodelist = context[ACTIVE_SLOT_CONTEXT_KEY].get(
self.name, None
)
return (
overriding_nodelist
if overriding_nodelist is not None
else self.nodelist
)
def super(self):
"""Render default slot content."""
return mark_safe(self.nodelist.render(self.context))
@register.tag("slot")
def do_slot(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
slot_name = bits[1].strip('"')
nodelist = parser.parse(parse_until=["endslot"])
parser.delete_first_token()
return SlotNode(slot_name, nodelist)
class ComponentNode(Node):
class InvalidSlot:
def super(self):
raise TemplateSyntaxError(
"slot.super may only be called within a {% slot %}/{% endslot %} block."
)
def __init__(
self,
component_name,
context_args,
context_kwargs,
slots=None,
isolated_context=False,
):
self.context_args = context_args or []
self.context_kwargs = context_kwargs or {}
self.component_name, self.isolated_context = (
component_name,
isolated_context,
)
self.slots = slots
def __repr__(self):
return "<Component Node: %s. Contents: %r>" % (
self.component_name,
self.nodelist,
)
def render(self, context):
component_name = template.Variable(self.component_name).resolve(
context
)
component_class = registry.get(component_name)
component = component_class(component_name)
# Group slot notes by name and concatenate their nodelists
component.slots = defaultdict(NodeList)
for slot in self.slots or []:
component.slots[slot.name].extend(slot.nodelist)
component.outer_context = context.flatten()
# Resolve FilterExpressions and Variables that were passed as args to the component, then call component's
# context method to get values to insert into the context
resolved_context_args = [
safe_resolve(arg, context) for arg in self.context_args
]
resolved_context_kwargs = {
key: safe_resolve(kwarg, context)
for key, kwarg in self.context_kwargs.items()
}
component_context = component.get_context_data(
*resolved_context_args, **resolved_context_kwargs
)
# Create a fresh context if requested
if self.isolated_context:
context = context.new()
with context.update(component_context):
rendered_component = component.render(context)
if is_dependency_middleware_active():
return (
RENDERED_COMMENT_TEMPLATE.format(
name=component._component_name
)
+ rendered_component
)
else:
return rendered_component
@register.tag(name="component_block")
def do_component_block(parser, token):
"""
To give the component access to the template context:
{% component_block "name" positional_arg keyword_arg=value ... %}
To render the component in an isolated context:
{% component_block "name" positional_arg keyword_arg=value ... only %}
Positional and keyword arguments can be literals or template variables.
The component name must be a single- or double-quotes string and must
be either the first positional argument or, if there are no positional
arguments, passed as 'name'.
"""
bits = token.split_contents()
bits, isolated_context = check_for_isolated_context_keyword(bits)
component_name, context_args, context_kwargs = parse_component_with_args(
parser, bits, "component_block"
)
return ComponentNode(
component_name,
context_args,
context_kwargs,
slots=[
do_slot(parser, slot_token) for slot_token in slot_tokens(parser)
],
isolated_context=isolated_context,
)
def slot_tokens(parser):
"""Yield each 'slot' token appearing before the next 'endcomponent_block' token.
Raises TemplateSyntaxError if there are other content tokens or if there is no endcomponent_block token."""
def is_whitespace(token):
return (
token.token_type == TokenType.TEXT and not token.contents.strip()
)
def is_block_tag(token, name):
return (
token.token_type == TokenType.BLOCK
and token.split_contents()[0] == name
)
while True:
try:
token = parser.next_token()
except IndexError:
raise TemplateSyntaxError("Unclosed component_block tag")
if is_block_tag(token, name="endcomponent_block"):
return
elif is_block_tag(token, name="slot"):
yield token
elif (
not is_whitespace(token) and token.token_type != TokenType.COMMENT
):
raise TemplateSyntaxError(
f"Content tokens in component blocks must be inside of slot tags: {token}"
)
def check_for_isolated_context_keyword(bits):
"""Return True and strip the last word if token ends with 'only' keyword."""
if bits[-1] == "only":
return bits[:-1], True
return bits, False
def parse_component_with_args(parser, bits, tag_name):
tag_args, tag_kwargs = parse_bits(
parser=parser,
bits=bits,
params=["tag_name", "name"],
takes_context=False,
name=tag_name,
varargs=True,
varkw=[],
defaults=None,
kwonly=[],
kwonly_defaults=None,
)
assert (
tag_name == tag_args[0].token
), "Internal error: Expected tag_name to be {}, but it was {}".format(
tag_name, tag_args[0].token
)
if (
len(tag_args) > 1
): # At least one position arg, so take the first as the component name
component_name = tag_args[1].token
context_args = tag_args[2:]
context_kwargs = tag_kwargs
else: # No positional args, so look for component name as keyword arg
try:
component_name = tag_kwargs.pop("name").token
context_args = []
context_kwargs = tag_kwargs
except IndexError:
raise TemplateSyntaxError(
"Call the '%s' tag with a component name as the first parameter"
% tag_name
)
return component_name, context_args, context_kwargs
def safe_resolve(context_item, context):
"""Resolve FilterExpressions and Variables in context if possible. Return other items unchanged."""
return (
context_item.resolve(context)
if hasattr(context_item, "resolve")
else context_item
)
def is_wrapped_in_quotes(s):
return s.startswith(('"', "'")) and s[0] == s[-1]
def is_dependency_middleware_active():
return getattr(settings, "COMPONENTS", {}).get(
"RENDER_DEPENDENCIES", False
)

View file

@ -1,10 +0,0 @@
# For navigation content inspo see Pydantic https://docs.pydantic.dev/latest
#
# `.nav.yml` is provided by https://lukasgeiter.github.io/mkdocs-awesome-nav
nav:
- overview
- Getting Started: getting_started
- concepts
- guides
- API Documentation: reference
- Release Notes: release_notes.md

View file

@ -1,6 +0,0 @@
---
title: Welcome to Django Components
weight: 1
---
<!-- NOTE: This README.md page is required, because it generates the top-level `index.html` -->
--8<-- "docs/overview/welcome.md:4"

View file

@ -1,161 +0,0 @@
/* Basic navigation */
.asv-navigation {
padding: 2px;
}
nav ul li.active a {
height: 52px;
}
nav li.active span.navbar-brand {
background-color: #e7e7e7;
height: 52px;
}
nav li.active span.navbar-brand:hover {
background-color: #e7e7e7;
}
.navbar-default .navbar-link {
color: #2458D9;
}
.panel-body {
padding: 0;
}
.panel {
margin-bottom: 4px;
-webkit-box-shadow: none;
box-shadow: none;
border-radius: 0;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.panel-default>.panel-heading,
.panel-heading {
font-size: 12px;
font-weight:bold;
padding: 2px;
text-align: center;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
background-color: #eee;
}
.btn,
.btn-group,
.btn-group-vertical>.btn:first-child,
.btn-group-vertical>.btn:last-child:not(:first-child),
.btn-group-vertical>.btn:last-child {
border: none;
border-radius: 0px;
overflow: hidden;
}
.btn-default:focus, .btn-default:active, .btn-default.active {
border: none;
color: #fff;
background-color: #99bfcd;
}
#range {
font-family: monospace;
text-align: center;
background: #ffffff;
}
.form-control {
border: none;
border-radius: 0px;
font-size: 12px;
padding: 0px;
}
.tooltip-inner {
min-width: 100px;
max-width: 800px;
text-align: left;
white-space: pre-wrap;
font-family: monospace;
}
/* Benchmark tree */
.nav-list {
font-size: 12px;
padding: 0;
padding-left: 15px;
}
.nav-list>li {
overflow-x: hidden;
}
.nav-list>li>a {
padding: 0;
padding-left: 5px;
color: #000;
}
.nav-list>li>a:focus {
color: #fff;
background-color: #99bfcd;
box-shadow: inset 0 3px 5px rgba(0,0,0,.125);
}
.nav-list>li>.nav-header {
white-space: nowrap;
font-weight: 500;
margin-bottom: 2px;
}
.caret-right {
display: inline-block;
width: 0;
height: 0;
margin-left: 2px;
vertical-align: middle;
border-left: 4px solid;
border-bottom: 4px solid transparent;
border-top: 4px solid transparent;
}
/* Summary page */
.benchmark-group > h1 {
text-align: center;
}
.benchmark-container {
width: 300px;
height: 116px;
padding: 4px;
border-radius: 3px;
}
.benchmark-container:hover {
background-color: #eee;
}
.benchmark-plot {
width: 292px;
height: 88px;
}
.benchmark-text {
font-size: 12px;
color: #000;
width: 292px;
overflow: hidden;
}
#extra-buttons {
margin: 1em;
}
#extra-buttons a {
border: solid 1px #ccc;
}

View file

@ -1,525 +0,0 @@
'use strict';
$(document).ready(function() {
/* GLOBAL STATE */
/* The index.json content as returned from the server */
var main_timestamp = '';
var main_json = {};
/* Extra pages: {name: show_function} */
var loaded_pages = {};
/* Previous window scroll positions */
var window_scroll_positions = {};
/* Previous window hash location */
var window_last_location = null;
/* Graph data cache */
var graph_cache = {};
var graph_cache_max_size = 5;
var colors = [
'#247AAD',
'#E24A33',
'#988ED5',
'#777777',
'#FBC15E',
'#8EBA42',
'#FFB5B8'
];
var time_units = [
['ps', 'picoseconds', 0.000000000001],
['ns', 'nanoseconds', 0.000000001],
['μs', 'microseconds', 0.000001],
['ms', 'milliseconds', 0.001],
['s', 'seconds', 1],
['m', 'minutes', 60],
['h', 'hours', 60 * 60],
['d', 'days', 60 * 60 * 24],
['w', 'weeks', 60 * 60 * 24 * 7],
['y', 'years', 60 * 60 * 24 * 7 * 52],
['C', 'centuries', 60 * 60 * 24 * 7 * 52 * 100]
];
var mem_units = [
['', 'bytes', 1],
['k', 'kilobytes', 1000],
['M', 'megabytes', 1000000],
['G', 'gigabytes', 1000000000],
['T', 'terabytes', 1000000000000]
];
function pretty_second(x) {
for (var i = 0; i < time_units.length - 1; ++i) {
if (Math.abs(x) < time_units[i+1][2]) {
return (x / time_units[i][2]).toFixed(3) + time_units[i][0];
}
}
return 'inf';
}
function pretty_byte(x) {
for (var i = 0; i < mem_units.length - 1; ++i) {
if (Math.abs(x) < mem_units[i+1][2]) {
break;
}
}
if (i == 0) {
return x + '';
}
return (x / mem_units[i][2]).toFixed(3) + mem_units[i][0];
}
function pretty_unit(x, unit) {
if (unit == "seconds") {
return pretty_second(x);
}
else if (unit == "bytes") {
return pretty_byte(x);
}
else if (unit && unit != "unit") {
return '' + x.toPrecision(3) + ' ' + unit;
}
else {
return '' + x.toPrecision(3);
}
}
function pad_left(s, c, num) {
s = '' + s;
while (s.length < num) {
s = c + s;
}
return s;
}
function format_date_yyyymmdd(date) {
return (pad_left(date.getFullYear(), '0', 4)
+ '-' + pad_left(date.getMonth() + 1, '0', 2)
+ '-' + pad_left(date.getDate(), '0', 2));
}
function format_date_yyyymmdd_hhmm(date) {
return (format_date_yyyymmdd(date) + ' '
+ pad_left(date.getHours(), '0', 2)
+ ':' + pad_left(date.getMinutes(), '0', 2));
}
/* Convert a flat index to permutation to the corresponding value */
function param_selection_from_flat_idx(params, idx) {
var selection = [];
if (idx < 0) {
idx = 0;
}
for (var k = params.length-1; k >= 0; --k) {
var j = idx % params[k].length;
selection.unshift([j]);
idx = (idx - j) / params[k].length;
}
selection.unshift([null]);
return selection;
}
/* Convert a benchmark parameter value from their native Python
repr format to a number or a string, ready for presentation */
function convert_benchmark_param_value(value_repr) {
var match = Number(value_repr);
if (!isNaN(match)) {
return match;
}
/* Python str */
match = value_repr.match(/^'(.+)'$/);
if (match) {
return match[1];
}
/* Python unicode */
match = value_repr.match(/^u'(.+)'$/);
if (match) {
return match[1];
}
/* Python class */
match = value_repr.match(/^<class '(.+)'>$/);
if (match) {
return match[1];
}
return value_repr;
}
/* Convert loaded graph data to a format flot understands, by
treating either time or one of the parameters as x-axis,
and selecting only one value of the remaining axes */
function filter_graph_data(raw_series, x_axis, other_indices, params) {
if (params.length == 0) {
/* Simple time series */
return raw_series;
}
/* Compute position of data entry in the results list,
and stride corresponding to plot x-axis parameter */
var stride = 1;
var param_stride = 0;
var param_idx = 0;
for (var k = params.length - 1; k >= 0; --k) {
if (k == x_axis - 1) {
param_stride = stride;
}
else {
param_idx += other_indices[k + 1] * stride;
}
stride *= params[k].length;
}
if (x_axis == 0) {
/* x-axis is time axis */
var series = new Array(raw_series.length);
for (var k = 0; k < raw_series.length; ++k) {
if (raw_series[k][1] === null) {
series[k] = [raw_series[k][0], null];
} else {
series[k] = [raw_series[k][0],
raw_series[k][1][param_idx]];
}
}
return series;
}
else {
/* x-axis is some parameter axis */
var time_idx = null;
if (other_indices[0] === null) {
time_idx = raw_series.length - 1;
}
else {
/* Need to search for the correct time value */
for (var k = 0; k < raw_series.length; ++k) {
if (raw_series[k][0] == other_indices[0]) {
time_idx = k;
break;
}
}
if (time_idx === null) {
/* No data points */
return [];
}
}
var x_values = params[x_axis - 1];
var series = new Array(x_values.length);
for (var k = 0; k < x_values.length; ++k) {
if (raw_series[time_idx][1] === null) {
series[k] = [convert_benchmark_param_value(x_values[k]),
null];
}
else {
series[k] = [convert_benchmark_param_value(x_values[k]),
raw_series[time_idx][1][param_idx]];
}
param_idx += param_stride;
}
return series;
}
}
function filter_graph_data_idx(raw_series, x_axis, flat_idx, params) {
var selection = param_selection_from_flat_idx(params, flat_idx);
var flat_selection = [];
$.each(selection, function(i, v) {
flat_selection.push(v[0]);
});
return filter_graph_data(raw_series, x_axis, flat_selection, params);
}
/* Escape special characters in graph item file names.
The implementation must match asv.util.sanitize_filename */
function sanitize_filename(name) {
var bad_re = /[<>:"\/\\^|?*\x00-\x1f]/g;
var bad_names = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3",
"COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
"LPT9"];
name = name.replace(bad_re, "_");
if (bad_names.indexOf(name.toUpperCase()) != -1) {
name = name + "_";
}
return name;
}
/* Given a specific group of parameters, generate the URL to
use to load that graph.
The implementation must match asv.graph.Graph.get_file_path
*/
function graph_to_path(benchmark_name, state) {
var parts = [];
$.each(state, function(key, value) {
var part;
if (value === null) {
part = key + "-null";
} else if (value) {
part = key + "-" + value;
} else {
part = key;
}
parts.push(sanitize_filename('' + part));
});
parts.sort();
parts.splice(0, 0, "graphs");
parts.push(sanitize_filename(benchmark_name));
/* Escape URI components */
parts = $.map(parts, function (val) { return encodeURIComponent(val); });
return parts.join('/') + ".json";
}
/*
Load and cache graph data (on javascript side)
*/
function load_graph_data(url, success, failure) {
var dfd = $.Deferred();
if (graph_cache[url]) {
setTimeout(function() {
dfd.resolve(graph_cache[url]);
}, 1);
}
else {
$.ajax({
url: url + '?timestamp=' + $.asv.main_timestamp,
dataType: "json",
cache: true
}).done(function(data) {
if (Object.keys(graph_cache).length > graph_cache_max_size) {
$.each(Object.keys(graph_cache), function (i, key) {
delete graph_cache[key];
});
}
graph_cache[url] = data;
dfd.resolve(data);
}).fail(function() {
dfd.reject();
});
}
return dfd.promise();
}
/*
Parse hash string, assuming format similar to standard URL
query strings
*/
function parse_hash_string(str) {
var info = {location: [''], params: {}};
if (str && str[0] == '#') {
str = str.slice(1);
}
if (str && str[0] == '/') {
str = str.slice(1);
}
var match = str.match(/^([^?]*?)\?/);
if (match) {
info['location'] = decodeURIComponent(match[1]).replace(/\/+/, '/').split('/');
var rest = str.slice(match[1].length+1);
var parts = rest.split('&');
for (var i = 0; i < parts.length; ++i) {
var part = parts[i].split('=');
if (part.length != 2) {
continue;
}
var key = decodeURIComponent(part[0].replace(/\+/g, " "));
var value = decodeURIComponent(part[1].replace(/\+/g, " "));
if (value == '[none]') {
value = null;
}
if (info['params'][key] === undefined) {
info['params'][key] = [value];
}
else {
info['params'][key].push(value);
}
}
}
else {
info['location'] = decodeURIComponent(str).replace(/\/+/, '/').split('/');
}
return info;
}
/*
Generate a hash string, inverse of parse_hash_string
*/
function format_hash_string(info) {
var parts = info['params'];
var str = '#' + info['location'];
if (parts) {
str = str + '?';
var first = true;
$.each(parts, function (key, values) {
$.each(values, function (idx, value) {
if (!first) {
str = str + '&';
}
if (value === null) {
value = '[none]';
}
str = str + encodeURIComponent(key) + '=' + encodeURIComponent(value);
first = false;
});
});
}
return str;
}
/*
Dealing with sub-pages
*/
function show_page(name, params) {
if (loaded_pages[name] !== undefined) {
$("#nav ul li.active").removeClass('active');
$("#nav-li-" + name).addClass('active');
$("#graph-display").hide();
$("#summarygrid-display").hide();
$("#summarylist-display").hide();
$('#regressions-display').hide();
$('.tooltip').remove();
loaded_pages[name](params);
return true;
}
else {
return false;
}
}
function hashchange() {
var info = parse_hash_string(window.location.hash);
/* Keep track of window scroll position; makes the back-button work */
var old_scroll_pos = window_scroll_positions[info.location.join('/')];
window_scroll_positions[window_last_location] = $(window).scrollTop();
window_last_location = info.location.join('/');
/* Redirect to correct handler */
if (show_page(info.location, info.params)) {
/* show_page does the work */
}
else {
/* Display benchmark page */
info.params['benchmark'] = info.location[0];
show_page('graphdisplay', info.params);
}
/* Scroll back to previous position, if any */
if (old_scroll_pos !== undefined) {
$(window).scrollTop(old_scroll_pos);
}
}
function get_commit_hash(revision) {
var commit_hash = main_json.revision_to_hash[revision];
if (commit_hash) {
// Return printable commit hash
commit_hash = commit_hash.slice(0, main_json.hash_length);
}
return commit_hash;
}
function get_revision(commit_hash) {
var rev = null;
$.each(main_json.revision_to_hash, function(revision, full_commit_hash) {
if (full_commit_hash.startsWith(commit_hash)) {
rev = revision;
// break the $.each loop
return false;
}
});
return rev;
}
function init_index() {
/* Fetch the main index.json and then set up the page elements
based on it. */
$.ajax({
url: "index.json" + '?timestamp=' + $.asv.main_timestamp,
dataType: "json",
cache: true
}).done(function (index) {
main_json = index;
$.asv.main_json = index;
/* Page title */
var project_name = $("#project-name")[0];
project_name.textContent = index.project;
project_name.setAttribute("href", index.project_url);
$("#project-name").textContent = index.project;
document.title = "airspeed velocity of an unladen " + index.project;
$(window).on('hashchange', hashchange);
$('#graph-display').hide();
$('#regressions-display').hide();
$('#summarygrid-display').hide();
$('#summarylist-display').hide();
hashchange();
}).fail(function () {
$.asv.ui.network_error();
});
}
function init() {
/* Fetch the info.json */
$.ajax({
url: "info.json",
dataType: "json",
cache: false
}).done(function (info) {
main_timestamp = info['timestamp'];
$.asv.main_timestamp = main_timestamp;
init_index();
}).fail(function () {
$.asv.ui.network_error();
});
}
/*
Set up $.asv
*/
this.register_page = function(name, show_function) {
loaded_pages[name] = show_function;
}
this.parse_hash_string = parse_hash_string;
this.format_hash_string = format_hash_string;
this.filter_graph_data = filter_graph_data;
this.filter_graph_data_idx = filter_graph_data_idx;
this.convert_benchmark_param_value = convert_benchmark_param_value;
this.param_selection_from_flat_idx = param_selection_from_flat_idx;
this.graph_to_path = graph_to_path;
this.load_graph_data = load_graph_data;
this.get_commit_hash = get_commit_hash;
this.get_revision = get_revision;
this.main_timestamp = main_timestamp; /* Updated after info.json loads */
this.main_json = main_json; /* Updated after index.json loads */
this.format_date_yyyymmdd = format_date_yyyymmdd;
this.format_date_yyyymmdd_hhmm = format_date_yyyymmdd_hhmm;
this.pretty_unit = pretty_unit;
this.time_units = time_units;
this.mem_units = mem_units;
this.colors = colors;
$.asv = this;
/*
Launch it
*/
init();
});

View file

@ -1,231 +0,0 @@
'use strict';
$(document).ready(function() {
function make_panel(nav, heading) {
var panel = $('<div class="panel panel-default"/>');
nav.append(panel);
var panel_header = $(
'<div class="panel-heading">' + heading + '</div>');
panel.append(panel_header);
var panel_body = $('<div class="panel-body"/>');
panel.append(panel_body);
return panel_body;
}
function make_value_selector_panel(nav, heading, values, setup_callback) {
var panel_body = make_panel(nav, heading);
var vertical = false;
var buttons = $('<div class="btn-group" ' +
'data-toggle="buttons"/>');
panel_body.append(buttons);
$.each(values, function (idx, value) {
var button = $(
'<a class="btn btn-default btn-xs active" role="button"/>');
setup_callback(idx, value, button);
buttons.append(button);
});
return panel_body;
}
function reflow_value_selector_panels(no_timeout) {
$('.panel').each(function (i, panel_obj) {
var panel = $(panel_obj);
panel.find('.btn-group').each(function (i, buttons_obj) {
var buttons = $(buttons_obj);
var width = 0;
if (buttons.hasClass('reflow-done')) {
/* already processed */
return;
}
$.each(buttons.children(), function(idx, value) {
width += value.scrollWidth;
});
var max_width = panel_obj.clientWidth;
if (width >= max_width) {
buttons.addClass("btn-group-vertical");
buttons.css("width", "100%");
buttons.css("max-height", "20ex");
buttons.css("overflow-y", "auto");
}
else {
buttons.addClass("btn-group-justified");
}
/* The widths can be zero if the UI is not fully layouted yet,
so mark the adjustment complete only if this is not the case */
if (width > 0 && max_width > 0) {
buttons.addClass("reflow-done");
}
});
});
if (!no_timeout) {
/* Call again asynchronously, in case the UI was not fully layouted yet */
setTimeout(function() { $.asv.ui.reflow_value_selector_panels(true); }, 0);
}
}
function network_error(ajax, status, error) {
$("#error-message").text(
"Error fetching content. " +
"Perhaps web server has gone down.");
$("#error").modal('show');
}
function hover_graph(element, graph_url, benchmark_basename, parameter_idx, revisions) {
/* Show the summary graph as a popup */
var plot_div = $('<div/>');
plot_div.css('width', '11.8em');
plot_div.css('height', '7em');
plot_div.css('border', '2px solid black');
plot_div.css('background-color', 'white');
function update_plot() {
var markings = [];
if (revisions) {
$.each(revisions, function(i, revs) {
var rev_a = revs[0];
var rev_b = revs[1];
if (rev_a !== null) {
markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_a, to: rev_a }});
markings.push({ color: "rgba(255,0,0,0.1)", xaxis: { from: rev_a, to: rev_b }});
}
markings.push({ color: '#d00', lineWidth: 2, xaxis: { from: rev_b, to: rev_b }});
});
}
$.asv.load_graph_data(
graph_url
).done(function (data) {
var params = $.asv.main_json.benchmarks[benchmark_basename].params;
data = $.asv.filter_graph_data_idx(data, 0, parameter_idx, params);
var options = {
colors: ['#000'],
series: {
lines: {
show: true,
lineWidth: 2
},
shadowSize: 0
},
grid: {
borderWidth: 1,
margin: 0,
labelMargin: 0,
axisMargin: 0,
minBorderMargin: 0,
markings: markings,
},
xaxis: {
ticks: [],
},
yaxis: {
ticks: [],
min: 0
},
legend: {
show: false
}
};
var plot = $.plot(plot_div, [{data: data}], options);
}).fail(function () {
// TODO: Handle failure
});
return plot_div;
}
element.popover({
placement: 'left auto',
trigger: 'hover',
html: true,
delay: 50,
content: $('<div/>').append(plot_div)
});
element.on('show.bs.popover', update_plot);
}
function hover_summary_graph(element, benchmark_basename) {
/* Show the summary graph as a popup */
var plot_div = $('<div/>');
plot_div.css('width', '11.8em');
plot_div.css('height', '7em');
plot_div.css('border', '2px solid black');
plot_div.css('background-color', 'white');
function update_plot() {
var markings = [];
$.asv.load_graph_data(
'graphs/summary/' + benchmark_basename + '.json'
).done(function (data) {
var options = {
colors: $.asv.colors,
series: {
lines: {
show: true,
lineWidth: 2
},
shadowSize: 0
},
grid: {
borderWidth: 1,
margin: 0,
labelMargin: 0,
axisMargin: 0,
minBorderMargin: 0,
markings: markings,
},
xaxis: {
ticks: [],
},
yaxis: {
ticks: [],
min: 0
},
legend: {
show: false
}
};
var plot = $.plot(plot_div, [{data: data}], options);
}).fail(function () {
// TODO: Handle failure
});
return plot_div;
}
element.popover({
placement: 'left auto',
trigger: 'hover',
html: true,
delay: 50,
content: $('<div/>').append(plot_div)
});
element.on('show.bs.popover', update_plot);
}
/*
Set up $.asv.ui
*/
this.network_error = network_error;
this.make_panel = make_panel;
this.make_value_selector_panel = make_value_selector_panel;
this.reflow_value_selector_panels = reflow_value_selector_panels;
this.hover_graph = hover_graph;
this.hover_summary_graph = hover_summary_graph;
$.asv.ui = this;
});

View file

@ -1,23 +0,0 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>airspeed velocity error</title>
<link rel="shortcut icon" href="swallow.ico"/>
</head>
<body>
<h1>
<img src="swallow.png" width="22" height="22" alt="swallow"></img>
Can not determine continental origin of swallow.
</h1>
<h3>
One or more external (JavaScript) dependencies of airspeed velocity failed to load.
</h3>
<p>
Make sure you have an active internet connection and enable 3rd-party scripts
in your browser the first time you load airspeed velocity.
</p>
</body>
</html>

File diff suppressed because it is too large Load diff

View file

@ -1 +0,0 @@
[[1662, [52920320.0, 54566912.0]], [1672, [52350976.0, 54599680.0]], [1687, [52109312.0, 54779904.0]], [1691, [52899840.0, 54730752.0]], [1709, [52936704.0, 55009280.0]], [1726, [52379648.0, 54992896.0]], [1766, [53084160.0, 55382016.0]], [1770, [53047296.0, 55373824.0]], [1776, [52490240.0, 55361536.0]], [1801, [53153792.0, 55410688.0]], [1937, [52957184.0, 55177216.0]], [1960, [52932608.0, 55693312.0]], [1996, [53096448.0, 55484416.0]], [2029, [52715520.0, 56090624.0]]]

View file

@ -1 +0,0 @@
[[1662, [53800960.0, 54734848.0]], [1672, [52289536.0, 55099392.0]], [1687, [52142080.0, 55255040.0]], [1691, [53796864.0, 55238656.0]], [1709, [53768192.0, 55455744.0]], [1726, [51998720.0, 55451648.0]], [1766, [53739520.0, 55812096.0]], [1770, [53948416.0, 55824384.0]], [1776, [52097024.0, 55791616.0]], [1801, [53919744.0, 55799808.0]], [1937, [52822016.0, 56242176.0]], [1960, [53063680.0, 56180736.0]], [1996, [53018624.0, 56389632.0]], [2029, [52736000.0, 56791040.0]]]

View file

@ -1 +0,0 @@
[[1662, [44191744.0, 44191744.0]], [1672, [44056576.0, 44048384.0]], [1687, [44191744.0, 44310528.0]], [1691, [44183552.0, 44175360.0]], [1709, [44191744.0, 44314624.0]], [1726, [44195840.0, 44314624.0]], [1766, [44322816.0, 44314624.0]], [1770, [44326912.0, 44322816.0]], [1776, [44183552.0, 44306432.0]], [1801, [44195840.0, 44453888.0]], [1937, [44756992.0, 44744704.0]], [1960, [44716032.0, 44834816.0]], [1996, [44716032.0, 44969984.0]], [2029, [44871680.0, 44912640.0]]]

View file

@ -1 +0,0 @@
[[1662, [44195840.0, 44187648.0]], [1672, [44060672.0, 43917312.0]], [1687, [44105728.0, 44310528.0]], [1691, [44187648.0, 44183552.0]], [1709, [44191744.0, 44437504.0]], [1726, [44322816.0, 44314624.0]], [1766, [44322816.0, 44310528.0]], [1770, [44101632.0, 44310528.0]], [1776, [44314624.0, 44437504.0]], [1801, [44191744.0, 44453888.0]], [1937, [44527616.0, 44744704.0]], [1960, [44716032.0, 44838912.0]], [1996, [44724224.0, 44969984.0]], [2029, [44617728.0, 44986368.0]]]

View file

@ -1 +0,0 @@
[[1662, [0.06960565700001098, 0.25608221199996706]], [1672, [0.07114163800000028, 0.26389872900000455]], [1687, [0.06910802600003763, 0.25746033199999374]], [1691, [0.07048037500001669, 0.2598985070000026]], [1709, [0.07402671400001282, 0.26584690599997884]], [1726, [0.07297276199997782, 0.2569234329999972]], [1766, [0.07308550800001967, 0.26274096600002395]], [1770, [0.0749189080000292, 0.26436952000000247]], [1776, [0.07303507899999317, 0.2628890319999755]], [1801, [0.07360306399999672, 0.2678246009999725]], [1937, [0.07941284200001064, 0.26779402600004687]], [1960, [0.08026317200000221, 0.26819844099998136]], [1996, [0.0814841690000776, 0.28364495499999975]], [2029, [0.08105427499998541, 0.29477426600001877]]]

View file

@ -1 +0,0 @@
[[1662, [0.03327357099999517, 0.1421111020000012]], [1672, [0.033918617999972867, 0.14395761299999776]], [1687, [0.03317536700001256, 0.14245594600001255]], [1691, [0.034316510999985894, 0.1444248799999741]], [1709, [0.03742426899998463, 0.14901454800002512]], [1726, [0.03658580800001232, 0.1459621130000528]], [1766, [0.03723830100000214, 0.15196534300002895]], [1770, [0.03752758399997447, 0.15356457899997622]], [1776, [0.03678920999999491, 0.14955294699998944]], [1801, [0.037022983000014165, 0.15138703899998518]], [1937, [0.043317416999911984, 0.15457556900003055]], [1960, [0.04349111400000538, 0.15453611999998884]], [1996, [0.04362213900003553, 0.16551773399999092]], [2029, [0.043648402000002307, 0.17461173199995983]]]

View file

@ -1 +0,0 @@
[[1662, [0.0035443229999998493, 0.00467639600003622]], [1672, [0.0036137869999777195, 0.004807943000002979]], [1687, [0.0035223549999727766, 0.004706463999980315]], [1691, [0.00364059099999281, 0.004926952999994683]], [1709, [0.003602947999979733, 0.004853936999950292]], [1726, [0.0035008030000085455, 0.004695608999981005]], [1766, [0.003566315000000486, 0.004791812000007667]], [1770, [0.0036766670000361046, 0.004929383999979109]], [1776, [0.0035613420000117912, 0.004760385999986738]], [1801, [0.003639607999986083, 0.004848561000017071]], [1937, [0.0036632869999948525, 0.00493345400002454]], [1960, [0.0036145729999930154, 0.004811176000004025]], [1996, [0.00375721499995052, 0.0049729269999261305]], [2029, [0.0037106409999978496, 0.004899473999955717]]]

View file

@ -1 +0,0 @@
[[1662, [0.00010400499999718704, 0.0005328339999977061]], [1672, [0.00010086800000408402, 0.0005549249999887707]], [1687, [9.818199998790078e-05, 0.0005511469999817109]], [1691, [0.0001005780000014056, 0.0005555879999974422]], [1709, [0.00012266099997759738, 0.0005711430000019391]], [1726, [0.00011641800000461444, 0.0005489540000098714]], [1766, [0.00011609900002440554, 0.0005779780000239043]], [1770, [0.0001176700000087294, 0.0005864990000077341]], [1776, [0.00011622699999236374, 0.0005842630000074678]], [1801, [0.00011665800002447213, 0.000582710000003317]], [1937, [0.00012153600005149201, 0.0005999570000199128]], [1960, [0.00012332000000014887, 0.0005915369999911491]], [1996, [0.00012686900004155177, 0.0006182140000419167]], [2029, [0.00012706900002967814, 0.0006100459999629493]]]

View file

@ -1 +0,0 @@
[[1662, [0.21775109000003567, 0.21398552899995593]], [1672, [0.22476057199997967, 0.22048105400000395]], [1687, [0.21809406599999193, 0.2131839880000257]], [1691, [0.22356123500000535, 0.22167734499998915]], [1709, [0.22133603999998286, 0.21805855799999563]], [1726, [0.2166100470000174, 0.21420494400001644]], [1766, [0.22339861599999722, 0.22020213500002228]], [1770, [0.22985272800002576, 0.22544496099999378]], [1776, [0.22073260000001937, 0.2182690520000392]], [1801, [0.224061646999985, 0.2246476189999953]], [1937, [0.22743783699991127, 0.226070988999993]], [1960, [0.2252378419999843, 0.2247263650000093]], [1996, [0.23076480500003527, 0.23163660399995933]], [2029, [0.22799248500001568, 0.22723498599998493]]]

View file

@ -1 +0,0 @@
[[1662, 0.19832900800003017], [1672, 0.20217585500000723], [1687, 0.19726691500000015], [1691, 0.20350580199999513], [1709, 0.19950735400001918], [1726, 0.19625152499997967], [1766, 0.20073733000003813], [1770, 0.20376683500001036], [1776, 0.19919827600000417], [1801, 0.2053688209999791], [1937, 0.2063091950000171], [1960, 0.20468290799999522], [1996, 0.21042045099989082], [2029, 0.2056691309999792]]

View file

@ -1 +0,0 @@
[[1662, [54439936.0, 53968896.0]], [1672, [54616064.0, 54140928.0]], [1687, [54767616.0, 54296576.0]], [1691, [54743040.0, 54087680.0]], [1709, [55001088.0, 54312960.0]], [1726, [54992896.0, 54345728.0]], [1766, [55373824.0, 54894592.0]], [1770, [55246848.0, 54898688.0]], [1776, [55357440.0, 54874112.0]], [1801, [55382016.0, 54882304.0]], [1937, [55222272.0, 54722560.0]], [1960, [55263232.0, 54693888.0]], [1996, [55476224.0, 54968320.0]], [2029, [56090624.0, 55582720.0]]]

View file

@ -1 +0,0 @@
[[1662, [54968320.0, 54792192.0]], [1672, [54849536.0, 54841344.0]], [1687, [55271424.0, 55304192.0]], [1691, [54984704.0, 54964224.0]], [1709, [55439360.0, 55369728.0]], [1726, [55455744.0, 55177216.0]], [1766, [55545856.0, 55631872.0]], [1770, [55812096.0, 55611392.0]], [1776, [55640064.0, 55631872.0]], [1801, [55812096.0, 55902208.0]], [1937, [56008704.0, 56143872.0]], [1960, [55783424.0, 56160256.0]], [1996, [56352768.0, 56516608.0]], [2029, [56786944.0, 56778752.0]]]

View file

@ -1 +0,0 @@
[[1662, [44187648.0, 44183552.0]], [1672, [44048384.0, 44048384.0]], [1687, [44314624.0, 44310528.0]], [1691, [44179456.0, 44175360.0]], [1709, [44314624.0, 44310528.0]], [1726, [44314624.0, 44314624.0]], [1766, [44318720.0, 44314624.0]], [1770, [44322816.0, 44314624.0]], [1776, [44306432.0, 44240896.0]], [1801, [44453888.0, 44453888.0]], [1937, [44744704.0, 44744704.0]], [1960, [44838912.0, 44838912.0]], [1996, [44969984.0, 44969984.0]], [2029, [44843008.0, 44851200.0]]]

View file

@ -1 +0,0 @@
[[1662, [44187648.0, 44187648.0]], [1672, [44052480.0, 44052480.0]], [1687, [44314624.0, 44310528.0]], [1691, [44179456.0, 44179456.0]], [1709, [44310528.0, 44314624.0]], [1726, [44314624.0, 44314624.0]], [1766, [44310528.0, 44314624.0]], [1770, [44314624.0, 44318720.0]], [1776, [44437504.0, 44437504.0]], [1801, [44449792.0, 44449792.0]], [1937, [44744704.0, 44744704.0]], [1960, [44965888.0, 44834816.0]], [1996, [44974080.0, 44974080.0]], [2029, [44982272.0, 44986368.0]]]

View file

@ -1 +0,0 @@
[[1662, [0.2574955810000006, 0.2591010970000127]], [1672, [0.2600247560000071, 0.26185358800000813]], [1687, [0.2567828300000201, 0.2602957870000182]], [1691, [0.259077934000004, 0.2619792840000059]], [1709, [0.2646600410000133, 0.2676605120000204]], [1726, [0.2570519909999689, 0.2606809000000112]], [1766, [0.262679922000018, 0.2686107789999994]], [1770, [0.265977821000007, 0.26914772099999595]], [1776, [0.2626667089999728, 0.2663110299999971]], [1801, [0.2658582709999848, 0.2712929850000023]], [1937, [0.2675778039999841, 0.2724974679999832]], [1960, [0.26819597400000816, 0.2740507329999957]], [1996, [0.2794132599999557, 0.28440619299999526]], [2029, [0.2920349000000044, 0.2976166970000236]]]

View file

@ -1 +0,0 @@
[[1662, [0.14273938200000202, 0.1464969190000147]], [1672, [0.14515931700000806, 0.14909453600000688]], [1687, [0.1423055980000072, 0.14642362500001127]], [1691, [0.1436571560000175, 0.14915657599999577]], [1709, [0.14860135300000366, 0.15305296299999327]], [1726, [0.14520097999997006, 0.14991973799999414]], [1766, [0.15071133700001837, 0.15540660900001058]], [1770, [0.15150350199996865, 0.1558047899999906]], [1776, [0.14876902899999322, 0.15549233400000162]], [1801, [0.15248822700002052, 0.15465820200000735]], [1937, [0.15459265900005903, 0.15926110200007315]], [1960, [0.15396625699997912, 0.16023626799997714]], [1996, [0.16650312799993117, 0.17177308600003016]], [2029, [0.17414895399997476, 0.178393189000019]]]

View file

@ -1 +0,0 @@
[[1662, [0.004720848000005162, 0.004705489000002672]], [1672, [0.004856270999994194, 0.00490694800001279]], [1687, [0.00473016699999107, 0.004734037999980956]], [1691, [0.004871503999993365, 0.0048899079999955575]], [1709, [0.0048215560000244295, 0.004858458999990489]], [1726, [0.004671787999996013, 0.004672599999992144]], [1766, [0.00478528000002143, 0.0047485900000197034]], [1770, [0.004901490999998259, 0.004895917999988342]], [1776, [0.00480728600001612, 0.00472804499997892]], [1801, [0.004847185000016907, 0.004857667999999649]], [1937, [0.004923484000073586, 0.004925836999973399]], [1960, [0.004825538000005736, 0.0047952310000027865]], [1996, [0.005049280000093859, 0.004947880000145233]], [2029, [0.004897051999989799, 0.004863266000029398]]]

View file

@ -1 +0,0 @@
[[1662, [0.0005377129999999397, 0.0005395769999836375]], [1672, [0.000547750000009728, 0.0005677989999810507]], [1687, [0.0005471899999918151, 0.0005447550000212686]], [1691, [0.0005559489999882317, 0.0005480739999939033]], [1709, [0.0005736080000247057, 0.0005720849999875099]], [1726, [0.000542692999999872, 0.0005430530000012368]], [1766, [0.0005853119999983392, 0.000582014999963576]], [1770, [0.0005929909999622396, 0.000583071999983531]], [1776, [0.0005810670000130358, 0.000576186999978745]], [1801, [0.0005717709999828458, 0.0005785939999896073]], [1937, [0.0005969709999362749, 0.0005864510000037626]], [1960, [0.0005953940000154034, 0.0005933700000468889]], [1996, [0.0006160310000495883, 0.0006166809999967882]], [2029, [0.0006159270000125616, 0.0006080119999865019]]]

View file

@ -1 +0,0 @@
[[1662, [0.21402431699999624, 0.21364062999998623]], [1672, [0.2221746719999942, 0.2222580240000127]], [1687, [0.2142312400000037, 0.21397752699999728]], [1691, [0.22129613300000983, 0.21942976399998315]], [1709, [0.2199001029999863, 0.22046102699999892]], [1726, [0.2147675530000015, 0.21506381099999317]], [1766, [0.22056839900000114, 0.21916191200000412]], [1770, [0.22394285699999728, 0.22330144500000415]], [1776, [0.21867883100003382, 0.21859779499999377]], [1801, [0.22378945699995256, 0.22211803700002974]], [1937, [0.22545313400001987, 0.22602228000005198]], [1960, [0.22564571399999522, 0.22598634599995648]], [1996, [0.2295973340000046, 0.23030742100002044]], [2029, [0.22777395400004252, 0.2292747939999913]]]

View file

@ -1 +0,0 @@
[[1662, 53737309.613078326], [1672, 53463506.59363525], [1687, 53427924.42970294], [1691, 53807508.99158667], [1709, 53963042.655257314], [1726, 53670369.245800875], [1766, 54220916.6140389], [1770, 54198077.75539557], [1776, 53906774.26269022], [1801, 54270509.344660625], [1937, 54055804.31664803], [1960, 54295416.494559616], [1996, 54277301.04707094], [2029, 54376892.25474807]]

View file

@ -1 +0,0 @@
[[1662, 54265895.0709751], [1672, 53676080.7209516], [1687, 53675997.57883592], [1691, 54512993.537089705], [1709, 54605449.27839023], [1726, 53697436.790693834], [1766, 54766004.5031032], [1770, 54878384.55144014], [1776, 53912680.86221259], [1801, 54851721.60114168], [1937, 54505276.07990639], [1960, 54599968.83944605], [1996, 54678155.56971878], [2029, 54725974.50425164]]

View file

@ -1 +0,0 @@
[[1662, 44191743.99999999], [1672, 44052479.80957694], [1687, 44251096.14326895], [1691, 44179455.81012423], [1709, 44253141.3491094], [1726, 44255192.14695785], [1766, 44318719.81072088], [1770, 44324863.95268679], [1776, 44244949.34121254], [1801, 44324676.21343578], [1937, 44750847.578234404], [1960, 44775384.609963], [1996, 44842828.229087956], [2029, 44892155.328466915]]

View file

@ -1 +0,0 @@
[[1662, 44191743.81017703], [1672, 43988933.59873213], [1687, 44208009.40445502], [1691, 44185599.95253766], [1709, 44314453.63272547], [1726, 44318719.81072088], [1766, 44316671.57410231], [1770, 44205956.60747199], [1776, 44376021.4672124], [1801, 44322622.19567646], [1937, 44636028.0238471], [1960, 44777429.84849827], [1996, 44846935.655543014], [2029, 44801668.84315699]]

View file

@ -1 +0,0 @@
[[1662, 0.13350944016163727], [1672, 0.1370189324406613], [1687, 0.13338881256624893], [1691, 0.13534306127506], [1709, 0.14028461383291016], [1726, 0.1369248426273554], [1766, 0.13857329097819557], [1770, 0.14073477092350728], [1776, 0.1385645020210802], [1801, 0.14040196312080028], [1937, 0.14582964264952603], [1960, 0.14671897491501892], [1996, 0.15202819951982394], [2029, 0.15457268328939747]]

View file

@ -1 +0,0 @@
[[1662, 0.0687644082522681], [1672, 0.06987734456556612], [1687, 0.06874611472573841], [1691, 0.07039998567606925], [1709, 0.07467771106069107], [1726, 0.07307627413528986], [1766, 0.0752258677863117], [1770, 0.07591381717343901], [1776, 0.0741750279629251], [1801, 0.07486521068773488], [1937, 0.08182795598310513], [1960, 0.08198138820511656], [1996, 0.08497198126158123], [2029, 0.08730133488241124]]

View file

@ -1 +0,0 @@
[[1662, 0.004071198582731586], [1672, 0.004168318834979474], [1687, 0.004071589002161507], [1691, 0.004235212007582172], [1709, 0.004181923314217816], [1726, 0.004054429932062044], [1766, 0.004133897799028137], [1770, 0.004257194320585938], [1776, 0.004117446125697445], [1801, 0.004200816754404154], [1937, 0.004251194879485355], [1960, 0.0041701734817425696], [1996, 0.004322540447211732], [2029, 0.004263823296369016]]

View file

@ -1 +0,0 @@
[[1662, 0.00023540900613243872], [1672, 0.0002365886195511814], [1687, 0.0002326213978668684], [1691, 0.0002363893607261623], [1709, 0.0002646827752432008], [1726, 0.00025280056719810247], [1766, 0.00025904182642747317], [1770, 0.0002627038966898471], [1776, 0.00026058997620285855], [1801, 0.000260725493948419], [1937, 0.0002700303204925571], [1960, 0.00027008950893915996], [1996, 0.0002800574798090668], [2029, 0.000278420428825539]]

View file

@ -1 +0,0 @@
[[1662, 0.21586009863792485], [1672, 0.22261052942796597], [1687, 0.21562505130206716], [1691, 0.2226172972159168], [1709, 0.21969118716012626], [1726, 0.21540413874268913], [1766, 0.2217946171557135], [1770, 0.22763817627917332], [1776, 0.21949736979633283], [1801, 0.22435444169386096], [1937, 0.22675338309844276], [1960, 0.22498195815021013], [1996, 0.23120029358312028], [2029, 0.22761342037999505]]

View file

@ -1 +0,0 @@
[[1662, 0.19832900800003017], [1672, 0.20217585500000723], [1687, 0.19726691500000015], [1691, 0.20350580199999513], [1709, 0.19950735400001918], [1726, 0.19625152499997967], [1766, 0.20073733000003813], [1770, 0.20376683500001036], [1776, 0.19919827600000417], [1801, 0.2053688209999791], [1937, 0.2063091950000171], [1960, 0.20468290799999522], [1996, 0.21042045099989082], [2029, 0.2056691309999792]]

View file

@ -1 +0,0 @@
[[1662, 54203904.32644733], [1672, 54377977.05567385], [1687, 54531587.401090905], [1691, 54414373.37457081], [1709, 54655941.05401974], [1726, 54668354.35558938], [1766, 55133687.30603648], [1770, 55072492.873806104], [1776, 55115246.19008138], [1801, 55131593.83007953], [1937, 54971848.18483294], [1960, 54977822.99733244], [1996, 55221688.06930552], [2029, 55836094.494666085]]

View file

@ -1 +0,0 @@
[[1662, 54880185.34368702], [1672, 54845439.84705003], [1687, 55287805.57238104], [1691, 54974463.04630629], [1709, 55404533.06087942], [1726, 55316304.695168346], [1766, 55588847.36277981], [1770, 55711653.6193069], [1776, 55635967.849223286], [1801, 55857133.82825839], [1937, 56076247.273349956], [1960, 55971522.87008585], [1996, 56434628.542863145], [2029, 56782847.85226863]]

View file

@ -1 +0,0 @@
[[1662, 44185599.95253766], [1672, 44048383.99999999], [1687, 44312575.95267366], [1691, 44177407.95252886], [1709, 44312575.95267366], [1726, 44314624.0], [1766, 44316671.95267803], [1770, 44318719.81072088], [1776, 44273651.87380721], [1801, 44453888.0], [1937, 44744704.0], [1960, 44838912.00000001], [1996, 44969983.99999999], [2029, 44847103.812950954]]

View file

@ -1 +0,0 @@
[[1662, 44187648.0], [1672, 44052479.99999999], [1687, 44312575.95267366], [1691, 44179455.99999999], [1709, 44312575.95267366], [1726, 44314624.0], [1766, 44312575.95267366], [1770, 44316671.95267803], [1776, 44437504.0], [1801, 44449792.0], [1937, 44744704.0], [1960, 44900304.17220587], [1996, 44974080.0], [2029, 44984319.953380376]]

View file

@ -1 +0,0 @@
[[1662, 0.2582970915627115], [1672, 0.2609375697890752], [1687, 0.2585333418012986], [1691, 0.2605245701455466], [1709, 0.26615604836262874], [1726, 0.25886008645727265], [1766, 0.2656287982807661], [1770, 0.2675580766089799], [1776, 0.2644825926606367], [1801, 0.26856188100049755], [1937, 0.2700264321932048], [1960, 0.2711075492537049], [1996, 0.28189867248766043], [2029, 0.29481258851469266]]

View file

@ -1 +0,0 @@
[[1662, 0.144605946222714], [1672, 0.14711376894836906], [1687, 0.14434992731884352], [1691, 0.14638104217028877], [1709, 0.1508107336447194], [1726, 0.14754149544768042], [1766, 0.15304096778650703], [1770, 0.1536390943522132], [1776, 0.15209353551720362], [1801, 0.15356938175949056], [1937, 0.15690951925702573], [1960, 0.15706997937098616], [1996, 0.16911758076913888], [2029, 0.17625829701058932]]

View file

@ -1 +0,0 @@
[[1662, 0.004713162243622524], [1672, 0.004881543738505435], [1687, 0.004732102104161917], [1691, 0.00488069732533968], [1709, 0.004839972328668506], [1726, 0.004672193982353972], [1766, 0.004766899700580667], [1770, 0.004898703707479391], [1776, 0.004767500868992566], [1801, 0.004852423669122516], [1937, 0.004924660359490744], [1960, 0.004810360631940079], [1996, 0.004998322871483767], [2029, 0.004880129761791827]]

View file

@ -1 +0,0 @@
[[1662, 0.0005386441936864901], [1672, 0.0005576844109755481], [1687, 0.0005459711425132094], [1691, 0.0005519974567116778], [1709, 0.0005728459938648165], [1726, 0.0005428729701593198], [1766, 0.0005836611719634209], [1770, 0.0005880105852110292], [1776, 0.0005786218553806627], [1801, 0.0005751723828193878], [1937, 0.0005916876201898046], [1960, 0.000594381138510516], [1996, 0.0006163559143381376], [2029, 0.0006119567036345985]]

View file

@ -1 +0,0 @@
[[1662, 0.21383238744211777], [1672, 0.22221634409189991], [1687, 0.21410434591886193], [1691, 0.2203609725843055], [1709, 0.22018038637622225], [1726, 0.2149156309515977], [1766, 0.2198640308272821], [1770, 0.22362192103085216], [1776, 0.21863830924562072], [1801, 0.22295218072522197], [1937, 0.22573752762853083], [1960, 0.22581596577171012], [1996, 0.22995210340856065], [2029, 0.2285231418957897]]

View file

@ -1,161 +0,0 @@
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>airspeed velocity</title>
<script type="text/javascript">
function js_load_failure() {
window.location = "error.html";
}
</script>
<script src="https://code.jquery.com/jquery-3.3.1.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.min.js" integrity="sha512-eO1AKNIv7KSFl5n81oHCKnYLMi8UV4wWD1TcLYKNTssoECDuiGhoRsQkdiZkl8VUjoms2SeJY7zTSw5noGSqbQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.time.min.js" integrity="sha512-lcRowrkiQvFli9HkuJ2Yr58iEwAtzhFNJ1Galsko4SJDhcZfUub8UxGlMQIsMvARiTqx2pm7g6COxJozihOixA==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.selection.min.js" integrity="sha512-3EUG0t3qfbLaGN3FXO86i+57nvxHOXvIb/xMSKRrCuX/HXdn1bkbqwAeLd6U1PDmuEB2cnKhfM+SGLAVQbyjWQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/flot/0.8.3/jquery.flot.categories.min.js" integrity="sha512-x4QGSZkQ57pNuICMFFevIhDer5NVB5eJCRmENlCdJukMs8xWFH8OHfzWQVSkl9VQ4+4upPPTkHSAewR6KNMjGA==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.axislabels.js"></script>
<script src="https://cdn.jsdelivr.net/npm/flot-orderbars@1.0.0/js/jquery.flot.orderBars.js" integrity="sha256-OXNbT0b5b/TgglckAfR8VaJ2ezZv0dHoIeRKjYMKEr8=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/stupidtable/1.0.1/stupidtable.min.js" integrity="sha512-GM3Ds3dUrgkpKVXc+4RxKbQDoeTemdlzXxn5d/QCOJT6EFdEufu1UTVBpIFDLd6YjIhSThNe+zpo1mwqzNq4GQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/blueimp-md5/2.19.0/js/md5.min.js" integrity="sha512-8pbzenDolL1l5OPSsoURCx9TEdMFTaeFipASVrMYKhuYtly+k3tcsQYliOEKTmuB1t7yuzAiVo+yd7SJz+ijFQ==" crossorigin="anonymous" referrerpolicy="no-referrer" onerror="js_load_failure()"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@3.1.1/dist/js/bootstrap.min.js" integrity="sha256-iY0FoX8s/FEg3c26R6iFw3jAtGbzDwcA5QJ1fiS0A6E=" crossorigin="anonymous" onerror="js_load_failure()"></script>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@3.1.1/dist/css/bootstrap.min.css" integrity="sha256-6VA0SGkrc43SYPvX98q/LhHwm2APqX5us6Vuulsafps=" crossorigin="anonymous">
<script language="javascript" type="text/javascript"
src="asv.js">
</script>
<script language="javascript" type="text/javascript"
src="asv_ui.js">
</script>
<script language="javascript" type="text/javascript"
src="summarygrid.js">
</script>
<script language="javascript" type="text/javascript"
src="summarylist.js">
</script>
<script language="javascript" type="text/javascript"
src="graphdisplay.js">
</script>
<script language="javascript" type="text/javascript"
src="regressions.js">
</script>
<link href="asv.css" rel="stylesheet" type="text/css"/>
<link href="regressions.css" rel="stylesheet" type="text/css"/>
<link href="summarylist.css" rel="stylesheet" type="text/css"/>
<link rel="shortcut icon" href="swallow.ico"/>
<link rel="alternate" type="application/atom+xml" title="Regressions" href="regressions.xml"/>
</head>
<body>
<nav id="nav" class="navbar navbar-left navbar-default navbar-fixed-top" role="navigation">
<ul class="nav navbar-nav navbar-left">
<li>
<p class="navbar-text">
<a href="http://github.com/airspeed-velocity/asv/" class="navbar-link" target="_blank"><img src="swallow.png" width="22" height="22" alt="swallow"></img>airspeed velocity</a>
of an unladen
<a id="project-name" href="#" class="navbar-link" target="_blank">project</a>
</p>
</li>
<li id="nav-li-" class="active"><a href="#/">Benchmark grid</a></li>
<li id="nav-li-summarylist"><a href="#/summarylist">Benchmark list</a></li>
<li id="nav-li-regressions"><a href="#/regressions">Regressions</a></li>
<li id="nav-li-graphdisplay">
<span class="navbar-brand" id="title">
benchmark
</span>
</li>
</ul>
</nav>
<div id="summarygrid-display" style="position: absolute; left: 0; top: 55px; width: 100%; height: 100%">
</div>
<div id="summarylist-display" style="width: 100%; height: 100%">
<div id="summarylist-navigation" class="asv-navigation" style="position: absolute; left: 0; top: 55px; bottom: 0; width: 200px; overflow-y: scroll">
</div>
<div id="summarylist-body" style="position: absolute; left: 200px; top: 55px; bottom: 0px; right: 0px; overflow-y: scroll;">
</div>
</div>
<div id="graph-display" style="width: 100%; height: 100%;">
<div id="graphdisplay-navigation" class="asv-navigation" style="position: absolute; left: 0; top: 55px; bottom: 0; width: 200px; overflow-y: scroll">
<div class="panel panel-default">
<div class="panel-heading">
commits
</div>
<div class="panel-body">
<input id="range" type="text" class="form-control" size="24" readonly/>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading">
plot settings
</div>
<div class="panel-body">
<div class="btn-group-vertical" style="width: 100%" data-toggle="buttons">
<a id="log-scale" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Use a logarithmic scale on the y-axis">
log scale
</a>
<a id="zoom-y-axis" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Zoom y axis to the range of the data, rather than down to zero.">
zoom <i>y</i> axis
</a>
<a id="reference" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Select a reference point">
reference
</a>
<a id="even-spacing" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Space commits evenly, rather than by revision, along the x-axis">
even commit spacing
</a>
<a id="date-scale" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Space commits by commit date along the x-axis">
date scale
</a>
<a id="show-legend" class="btn btn-default btn-xs" role="button"
data-toggle="tooltip" data-placement="right"
title="Show legend in the graph">
legend
</a>
</div>
</div>
</div>
</div>
<div style="position: absolute; left: 220px; top: 60px; bottom: 10px; right: 20px;">
<div id="graph">
<div style="position: absolute; top: 48px; left: 0; right: 0; bottom: 100px;">
<div id="main-graph" style="min-height: 100px; width: 100%; height: 100%"></div>
</div>
<div style="position: absolute; height: 100px; left: 0; right: 0; bottom: 0; padding-top: 24px">
<div id="overview" style="min-height: 100px; width: 100%; height: 100%"></div>
</div>
</div>
</div>
</div>
<div id="regressions-display" style="position: absolute; left: 0; top: 55px; width: 100%; height: 100%">
<div id="regressions-body">
</div>
</div>
<!-- A modal dialog box for displaying error messages -->
<div class="modal fade" id="error" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title alert alert-danger" id="myModalLabel">Error</h4>
</div>
<div class="modal-body" id="error-message">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

View file

@ -1,4 +0,0 @@
{
"asv-version": "0.6.4",
"timestamp": 1753049912703
}

View file

@ -1,140 +0,0 @@
/*
CAxis Labels Plugin for flot. :P
Copyright (c) 2010 Xuan Luo
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
(function ($) {
var options = { };
function init(plot) {
// This is kind of a hack. There are no hooks in Flot between
// the creation and measuring of the ticks (setTicks, measureTickLabels
// in setupGrid() ) and the drawing of the ticks and plot box
// (insertAxisLabels in setupGrid() ).
//
// Therefore, we use a trick where we run the draw routine twice:
// the first time to get the tick measurements, so that we can change
// them, and then have it draw it again.
var secondPass = false;
plot.hooks.draw.push(function (plot, ctx) {
if (!secondPass) {
// MEASURE AND SET OPTIONS
$.each(plot.getAxes(), function(axisName, axis) {
var opts = axis.options // Flot 0.7
|| plot.getOptions()[axisName]; // Flot 0.6
if (!opts || !opts.axisLabel)
return;
var w, h;
if (opts.axisLabelUseCanvas != false)
opts.axisLabelUseCanvas = true;
if (opts.axisLabelUseCanvas) {
// canvas text
if (!opts.axisLabelFontSizePixels)
opts.axisLabelFontSizePixels = 14;
if (!opts.axisLabelFontFamily)
opts.axisLabelFontFamily = 'sans-serif';
// since we currently always display x as horiz.
// and y as vertical, we only care about the height
w = opts.axisLabelFontSizePixels;
h = opts.axisLabelFontSizePixels;
} else {
// HTML text
var elem = $('<div class="axisLabels" style="position:absolute;">' + opts.axisLabel + '</div>');
plot.getPlaceholder().append(elem);
w = elem.outerWidth(true);
h = elem.outerHeight(true);
elem.remove();
}
if (axisName.charAt(0) == 'x')
axis.labelHeight += h;
else
axis.labelWidth += w;
opts.labelHeight = axis.labelHeight;
opts.labelWidth = axis.labelWidth;
});
// re-draw with new label widths and heights
secondPass = true;
plot.setupGrid();
plot.draw();
} else {
// DRAW
$.each(plot.getAxes(), function(axisName, axis) {
var opts = axis.options // Flot 0.7
|| plot.getOptions()[axisName]; // Flot 0.6
if (!opts || !opts.axisLabel)
return;
if (opts.axisLabelUseCanvas) {
// canvas text
var ctx = plot.getCanvas().getContext('2d');
ctx.save();
ctx.font = opts.axisLabelFontSizePixels + 'px ' +
opts.axisLabelFontFamily;
var width = ctx.measureText(opts.axisLabel).width;
var height = opts.axisLabelFontSizePixels;
var x, y;
if (axisName.charAt(0) == 'x') {
x = plot.getPlotOffset().left + plot.width()/2 - width/2;
y = plot.getCanvas().height;
} else {
x = height * 0.72;
y = plot.getPlotOffset().top + plot.height()/2 - width/2;
}
ctx.translate(x, y);
ctx.rotate((axisName.charAt(0) == 'x') ? 0 : -Math.PI/2);
ctx.fillText(opts.axisLabel, 0, 0);
ctx.restore();
} else {
// HTML text
plot.getPlaceholder().find('#' + axisName + 'Label').remove();
var elem = $('<div id="' + axisName + 'Label" " class="axisLabels" style="position:absolute;">' + opts.axisLabel + '</div>');
if (axisName.charAt(0) == 'x') {
elem.css('left', plot.getPlotOffset().left + plot.width()/2 - elem.outerWidth()/2 + 'px');
elem.css('bottom', '0px');
} else {
elem.css('top', plot.getPlotOffset().top + plot.height()/2 - elem.outerHeight()/2 + 'px');
elem.css('left', '0px');
}
plot.getPlaceholder().append(elem);
}
});
secondPass = false;
}
});
}
$.plot.plugins.push({
init: init,
options: options,
name: 'axisLabels',
version: '1.0'
});
})(jQuery);

View file

@ -1,44 +0,0 @@
#regressions-body {
margin-left: 2em;
margin-right: 2em;
margin-top: 1em;
margin-bottom: 2em;
}
#regressions-body table thead th {
cursor: pointer;
white-space: nowrap;
}
#regressions-body table thead th.desc:after {
content: ' \2191';
}
#regressions-body table thead th.asc:after {
content: ' \2193';
}
#regressions-body table.ignored {
padding-top: 1em;
color: #ccc;
background-color: #eee;
}
#regressions-body table.ignored a {
color: #82abda;
}
#regressions-body .feed-div {
float: right;
}
#regressions-body table tbody td.date {
white-space: nowrap;
}
#regressions-body table button {
margin-top: -2px;
padding-top: 2px;
padding-bottom: 0px;
white-space: nowrap;
}

Some files were not shown because too many files have changed in this diff Show more