26
loading...
This website collects cookies to deliver better user experience
asyncio
-friendly. Tiangolo, the author, claims that the performance is on par with Go and Node webservers. We're going to see a glimpse of the reason (spoilers: concurrency).server.py
file:# server.py
import time
from fastapi import FastAPI
app = FastAPI()
@app.get("/wait")
def wait():
duration = 1.
time.sleep(duration)
return {"duration": duration}
uvicorn server:app --reload
http://127.0.0.1:8000/wait
something like:{ "duration": 1 }
client.py
file:# client.py
import functools
import time
import requests
def timed(N, url, fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
start = time.time()
res = fn(*args, **kwargs)
stop = time.time()
duration = stop - start
print(f"{N / duration:.2f} reqs / sec | {N} reqs | {url} | {fn.__name__}")
return res
return wrapper
def get(url):
resp = requests.get(url)
assert resp.status_code == 200
return resp.json()
def sync_get_all(url, n):
l = [get(url) for _ in range(n)]
return l
def run_bench(n, funcs, urls):
for url in urls:
for func in funcs:
timed(n, url, func)(url, n)
if __name__ == "__main__":
urls = ["http://127.0.0.1:8000/wait"]
funcs = [sync_get_all]
run_bench(10, funcs, urls)
python client.py
0.99 reqs / sec | 10 reqs | http://127.0.0.1:8000/wait | sync_get_all
# client.py
...
from concurrent.futures import ThreadPoolExecutor as Pool
...
def thread_pool(url, n, limit=None):
limit_ = limit or n
with Pool(max_workers=limit_) as pool:
result = pool.map(get, [url] * n)
return result
if __name__ == "__main__":
urls = ["http://127.0.0.1:8000/wait"]
run_bench(10, [sync_get_all, thread_pool], urls)
0.99 reqs / sec | 10 reqs | http://127.0.0.1:8000/wait | sync_get_all
9.56 reqs / sec | 10 reqs | http://127.0.0.1:8000/wait | thread_pool
async
nor await
...# server.py
...
@app.get("/wait")
def wait():
duration = 0.05
time.sleep(duration)
return {"duration": duration}
# client.py
...
if __name__ == "__main__":
urls = ["http://127.0.0.1:8000/wait"]
run_bench(100, [sync_get_all, thread_pool], urls)
15.91 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | sync_get_all
196.06 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | thread_pool
asyncio
keywords.# server.py
import asyncio
...
@app.get("/asyncwait")
async def asyncwait():
duration = 0.05
await asyncio.sleep(duration)
return {"duration": duration}
# client.py
if __name__ == "__main__":
urls = ["http://127.0.0.1:8000/wait", "http://127.0.0.1:8000/asyncwait"]
run_bench(10, [sync_get_all, thread_pool], urls)
15.66 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | sync_get_all
195.41 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | thread_pool
15.52 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | sync_get_all
208.06 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
Ultra fast asyncio event loop.
async
s and await
s. I know you secretly enjoy these.pip install aiohttp
, then:# client.py
import asyncio
...
import aiohttp
...
async def aget(session, url):
async with session.get(url) as response:
assert response.status == 200
json = await response.json()
return json
async def gather_limit(n_workers, *tasks):
semaphore = asyncio.Semaphore(n_workers)
async def sem_task(task):
async with semaphore:
return await task
return await asyncio.gather(*(sem_task(task) for task in tasks))
async def aget_all(url, n, n_workers=None):
limit = n_workers or n
async with aiohttp.ClientSession() as session:
result = await gather_limit(limit, *[aget(session, url) for _ in range(n)])
return result
def async_main(url, n):
return asyncio.run(aget_all(url, n))
# client.py
if __name__ == "__main__":
urls = ["http://127.0.0.1:8000/wait", "http://127.0.0.1:8000/asyncwait"]
funcs = [sync_get_all, thread_pool, async_main]
run_bench(100, funcs, urls)
run_bench(1000, [thread_pool, async_main], urls)
15.84 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | sync_get_all
191.74 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | thread_pool
187.36 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | async_main
15.69 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | sync_get_all
217.35 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
666.23 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | async_main
234.24 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | thread_pool
222.16 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | async_main
316.08 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
1031.05 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | async_main
# server.py
...
def fibo(n):
if n < 2:
return 1
else:
return fibo(n - 1) + fibo(n - 2)
@app.get("/fib/{n}")
def fib(n: int):
return {"fib": fibo(n)}
curl -I http://127.0.0.1:8000/fib/42
in one and python client.py
in the other, we see the following results:8.75 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | sync_get_all
54.94 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | thread_pool
60.64 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | async_main
9.52 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | sync_get_all
53.02 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
46.81 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | async_main
72.87 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | thread_pool
122.97 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | async_main
72.36 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
51.73 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | async_main
asyncwait
route x async_main
client). # server.py
...
async def afibo(n):
if n < 2:
return 1
else:
fib1 = await afibo(n - 1)
fib2 = await afibo(n - 2)
return fib1 + fib2
@app.get("/asyncfib/{n}")
async def asyncfib(n: int):
res = await afibo(n)
return {"fib": res}
# server.py
...
from fastapi import FastAPI, Request
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
curl -D - http://127.0.0.1:8000/fib/30
HTTP/1.1 200 OK
server: uvicorn
content-length: 15
content-type: application/json
x-process-time: 0.17467308044433594
{"fib":1346269}
curl -D - http://127.0.0.1:8000/asyncfib/30
HTTP/1.1 200 OK
server: uvicorn
content-length: 15
content-type: application/json
x-process-time: 0.46001315116882324
{"fib":1346269}
n
worker processes, and each worker is managed by Uvicorn (with the asynchronous uvloop). Which means:pip install gunicorn
gunicorn server:app -w 2 -k uvicorn.workers.UvicornWorker --reload
19.02 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | sync_get_all
216.84 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | thread_pool
223.52 reqs / sec | 100 reqs | http://127.0.0.1:8000/wait | async_main
18.80 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | sync_get_all
400.12 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
208.68 reqs / sec | 100 reqs | http://127.0.0.1:8000/asyncwait | async_main
241.06 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | thread_pool
311.40 reqs / sec | 1000 reqs | http://127.0.0.1:8000/wait | async_main
433.80 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | thread_pool
1275.48 reqs / sec | 1000 reqs | http://127.0.0.1:8000/asyncwait | async_main