Concurrency with asyncio
Python's async/await enables concurrent I/O without threads. Perfect for network requests, file I/O, and any work where you're waiting โ not computing. One thread handles multiple tasks by switching when waiting.
# import asyncio โ built-in, no install needed
import asyncio
# async function = coroutine
async def greet(name, delay):
print(f"Hello, {name}!")
await asyncio.sleep(delay) # Non-blocking sleep (yields control)
print(f"Goodbye, {name}!")
# Run a coroutine
asyncio.run(greet("Alice", 1))
# Key concepts:
# async def โ defines a coroutine function
# await โ suspends current coroutine, yields control back to event loop
# asyncio.run() โ runs a coroutine and returns when doneConcurrent Execution
# The power of async: run multiple I/O tasks concurrently
import asyncio
import time
async def fetch_data(url, delay):
print(f"Starting: {url}")
await asyncio.sleep(delay) # Simulate network request
print(f"Done: {url}")
return f"Data from {url}"
# Sequential โ total time = sum of all delays
async def sequential():
start = time.time()
await fetch_data("url1", 2)
await fetch_data("url2", 1)
await fetch_data("url3", 3)
print(f"Sequential: {time.time()-start:.1f}s") # ~6 seconds
# Concurrent with gather โ total time = max delay
async def concurrent():
start = time.time()
results = await asyncio.gather(
fetch_data("url1", 2),
fetch_data("url2", 1),
fetch_data("url3", 3),
)
print(f"Concurrent: {time.time()-start:.1f}s") # ~3 seconds
print(results) # ["Data from url1", "Data from url2", "Data from url3"]
asyncio.run(concurrent())Real Async HTTP with aiohttp
# pip install aiohttp
import asyncio
import aiohttp
async def fetch_json(session, url):
async with session.get(url) as response:
return await response.json()
async def main():
urls = [
"https://api.github.com/users/python",
"https://api.github.com/users/torvalds",
"https://api.github.com/users/gvanrossum",
]
async with aiohttp.ClientSession() as session:
tasks = [fetch_json(session, url) for url in urls]
results = await asyncio.gather(*tasks)
for user in results:
print(f"{user['login']}: {user['public_repos']} repos")
asyncio.run(main())Tasks and Error Handling
# asyncio.create_task โ schedule for background execution
async def background_job():
await asyncio.sleep(5)
print("Background done!")
async def main():
# Create task โ starts running concurrently
task = asyncio.create_task(background_job())
# Do other work while background_job runs
print("Doing other work...")
await asyncio.sleep(1)
# Wait for task to complete
await task
# Error handling in gather
async def might_fail(x):
if x == 2:
raise ValueError("x can't be 2")
await asyncio.sleep(0.1)
return x * 10
async def main():
# gather with return_exceptions=True โ collects errors as results
results = await asyncio.gather(
might_fail(1),
might_fail(2),
might_fail(3),
return_exceptions=True
)
for result in results:
if isinstance(result, Exception):
print(f"Error: {result}")
else:
print(f"Result: {result}")
# Timeouts
async def slow_operation():
await asyncio.sleep(10)
async def main():
try:
async with asyncio.timeout(3): # Python 3.11+
await slow_operation()
except TimeoutError:
print("Operation timed out")Key Takeaways
- async/await for I/O-bound concurrency: not CPU-bound work
- await yields control: event loop can run other tasks while waiting
- asyncio.gather(): run multiple coroutines concurrently
- asyncio.create_task(): fire-and-forget background coroutines
- Use aiohttp for async HTTP: requests is synchronous, not usable with asyncio
Practice Exercises
- Write an async function that fetches 10 URLs concurrently and measures total time vs sequential.
- Write an async producer/consumer using
asyncio.Queue. - Write an async web scraper that downloads multiple pages concurrently with a semaphore to limit concurrent connections to 5.