add benchmarks suite

This commit is contained in:
Giovanni Barillari 2022-04-18 15:14:09 +02:00
parent 3f030e9d78
commit e0d0ee14e9
6 changed files with 352 additions and 0 deletions

42
.github/workflows/bechmarks.yml vendored Normal file
View file

@ -0,0 +1,42 @@
name: benchmarks
on: workflow_dispatch
env:
MATURIN_VERSION: 0.12.12
jobs:
benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
- uses: messense/maturin-action@v1
with:
maturin-version: v${{ env.MATURIN_VERSION }}
command: build
args: --release --strip --no-sdist --interpreter python3.9
target: x64
manylinux: auto
container: off
- name: Deps
run: |
pip install target/wheels/granian-0.1.0-cp39-cp39-manylinux_2_28_x86_64.whl
pip install uvicorn==0.17.6
pip install httptools
- name: wrk
run: |
git clone https://github.com/wg/wrk.git .wrk
cd .wrk && make && sudo mv wrk /usr/local/bin
- name: Benchmarks
run: |
cd benchmarks && python benchmarks.py
- name: Upload results
uses: actions/upload-artifact@v2
with:
name: results
path: benchmarks/results/*

82
benchmarks/app/asgi.py Normal file
View file

@ -0,0 +1,82 @@
import sys
PLAINTEXT_RESPONSE = {
'type': 'http.response.start',
'status': 200,
'headers': [
[b'content-type', b'text/plain; charset=utf-8'],
]
}
BODY_BYTES_SHORT = b"Test"
BODY_BYTES_LONG = b"Test" * 20_000
BODY_STR_SHORT = "Test"
BODY_STR_LONG = "Test" * 20_000
async def b_short(scope, receive, send):
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': BODY_BYTES_SHORT,
'more_body': False
})
async def b_long(scope, receive, send):
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': BODY_BYTES_LONG,
'more_body': False
})
async def s_short(scope, receive, send):
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': BODY_STR_SHORT.encode("utf8"),
'more_body': False
})
async def s_long(scope, receive, send):
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': BODY_STR_LONG.encode("utf8"),
'more_body': False
})
async def handle_404(scope, receive, send):
content = b'Not found'
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
routes = {
'/b': b_short,
'/bb': b_long,
'/s': s_short,
'/ss': s_long
}
def app(scope, receive, send):
handler = routes.get(scope['path'], handle_404)
return handler(scope, receive, send)
def granian(wrk, thr):
from granian import Granian
Granian("asgi:app", workers=int(wrk), threads=int(thr), interface="asgi").serve()
if __name__ == "__main__":
granian(sys.argv[1], sys.argv[2])

62
benchmarks/app/rsgi.py Normal file
View file

@ -0,0 +1,62 @@
import sys
from granian.rsgi import Response
HEADERS = {'content-type': 'text/plain; charset=utf-8'}
BODY_BYTES_SHORT = b"Test"
BODY_BYTES_LONG = b"Test" * 20_000
BODY_STR_SHORT = "Test"
BODY_STR_LONG = "Test" * 20_000
async def b_short(scope, receive):
return Response(
1, 200, HEADERS, BODY_BYTES_SHORT, None, None
)
async def b_long(scope, receive):
return Response(
1, 200, HEADERS, BODY_BYTES_LONG, None, None
)
async def s_short(scope, receive):
return Response(
2, 200, HEADERS, None, BODY_STR_SHORT, None
)
async def s_long(scope, receive):
return Response(
2, 200, HEADERS, None, BODY_STR_LONG, None
)
async def handle_404(scope, receive):
return Response(
2, 200, HEADERS, None, "not found", None
)
routes = {
'/b': b_short,
'/bb': b_long,
'/s': s_short,
'/ss': s_long
}
def app(scope, receive):
handler = routes.get(scope.path, handle_404)
return handler(scope, receive)
def granian(wrk, thr):
from granian import Granian
Granian("rsgi:app", workers=int(wrk), threads=int(thr), interface="rsgi").serve()
if __name__ == "__main__":
granian(sys.argv[1], sys.argv[2])

133
benchmarks/benchmarks.py Normal file
View file

@ -0,0 +1,133 @@
import datetime
import json
import math
import multiprocessing
import os
import signal
import subprocess
import time
from contextlib import contextmanager
CPU = multiprocessing.cpu_count()
CONCURRENCIES = [CPU * 2 ** i for i in range(3, 7)]
@contextmanager
def app(name, procs = None, threads = None):
procs = procs or CPU
# threads = threads or CPU // 2
threads = threads or CPU * 2
proc = {
"uvicorn_h11": (
"uvicorn --interface asgi3 "
"--no-access-log --log-level warning "
f"--http h11 --workers {procs} app.asgi:app"
),
"uvicorn_httptools": (
"uvicorn --interface asgi3 "
"--no-access-log --log-level warning "
f"--http httptools --workers {procs} app.asgi:app"
),
"asgi": f"python app/asgi.py {procs} {threads}",
"rsgi": f"python app/rsgi.py {procs} {threads}"
}
proc = subprocess.Popen(proc[name], shell=True, preexec_fn=os.setsid)
time.sleep(2)
yield proc
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
def wrk(duration, concurrency, endpoint):
threads = max(4, CPU // 2)
proc = subprocess.run(
f"wrk -d{duration}s -H \"Connection: keep-alive\" -t{threads} -c{concurrency} "
f"-s wrk.lua http://localhost:8000/{endpoint}",
shell=True,
check=True,
capture_output=True
)
data = proc.stderr.decode("utf8").split(",")
return {
"requests": {"total": data[1], "rps": data[2]},
"latency": {"avg": data[11], "max": data[10], "stdev": data[12]}
}
def benchmark(endpoint):
results = {}
# primer
wrk(5, 8, endpoint)
time.sleep(5)
# warmup
wrk(10, max(CONCURRENCIES), endpoint)
time.sleep(5)
# bench
for concurrency in CONCURRENCIES:
res = wrk(15, concurrency, endpoint)
results[concurrency] = res
time.sleep(3)
time.sleep(5)
return results
def procs_threads():
results = {}
for procs in [2 ** i for i in range(0, math.ceil(math.log2(CPU)) + 1)]:
for threads in [2 ** i for i in range(0, math.ceil(math.log(CPU)) + 3)]:
with app("rsgi", procs, threads):
results[f"{procs} procs - {threads} threads"] = benchmark("b")
return results
def rsgi_body_type():
results = {}
with app("rsgi"):
results["bytes small"] = benchmark("b")
results["str small"] = benchmark("s")
results["bytes big"] = benchmark("bb")
results["str big"] = benchmark("ss")
return results
def rsgi_vs_asgi():
results = {}
with app("rsgi"):
results["RSGI bytes"] = benchmark("b")
results["RSGI str"] = benchmark("s")
with app("asgi"):
results["ASGI bytes"] = benchmark("b")
results["ASGI str"] = benchmark("s")
return results
def uvicorn():
results = {}
with app("asgi"):
results["Granian ASGI"] = benchmark("b")
with app("rsgi"):
results["Granian RSGI"] = benchmark("b")
with app("uvicorn_h11"):
results["Uvicorn H11"] = benchmark("b")
with app("uvicorn_httptools"):
results["Uvicorn http-tools"] = benchmark("b")
return results
def run():
now = datetime.datetime.utcnow()
results = {}
# results["procs_threads"] = procs_threads()
results["rsgi_body"] = rsgi_body_type()
results["rsgi_asgi"] = rsgi_vs_asgi()
results["uvicorn"] = uvicorn()
with open(f"results/data.json", "w") as f:
f.write(json.dumps({
"cpu": CPU,
"run_at": now.isoformat(),
"results": results
}))
if __name__ == "__main__":
run()

2
benchmarks/results/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
*
!.gitignore

31
benchmarks/wrk.lua Normal file
View file

@ -0,0 +1,31 @@
done = function(summary, latency, requests)
out = {
summary.duration,
summary.requests,
summary.requests/(summary.duration/1000000),
summary.bytes,
summary.errors.connect,
summary.errors.read,
summary.errors.write,
summary.errors.status,
summary.errors.timeout,
latency.min,
latency.max,
latency.mean,
latency.stdev,
latency:percentile(50),
latency:percentile(75),
latency:percentile(90),
latency:percentile(99),
latency:percentile(99.999)
}
for key, value in pairs(out) do
if key > 1 then
io.stderr:write(",")
end
io.stderr:write(string.format("%d", value))
end
end