Ships AsyncConnection, AsyncCursor, and AsyncConnectionPool that
expose async/await versions of the sync API for use with FastAPI,
aiohttp, etc.
Strategy: thread-pool wrapping (aiopg pattern), not native async.
Each blocking I/O call is offloaded to a worker thread via
asyncio.to_thread. The event loop never blocks; queries run in
parallel up to the pool's max_size. Cost: ~250 lines, no changes
to the sync codebase. Native async (Phase 17) would require a
~2000-line transport abstraction refactor — deferred until a real
workload needs it.
For typical FastAPI/aiohttp workloads (request → one query → return),
this is functionally equivalent to native async. Each await yields
the loop while a worker thread does the I/O. Only differs for
hundreds-of-concurrent-connections workloads.
API mirrors the sync API one-to-one:
import asyncio
from informix_db import aio
async def main():
pool = await aio.create_pool(host=..., min_size=1, max_size=10)
async with pool.connection() as conn:
cur = await conn.cursor()
await cur.execute("SELECT id FROM users WHERE name = ?", (name,))
row = await cur.fetchone()
await pool.close()
The async pool preserves the sync pool's eviction policy: connection
errors evict, application errors retain.
Tests: 9 integration tests in test_aio.py covering open/close,
async-with, simple/parameterized SELECT, async-for cursor iteration,
pool acquire/release, 20-query concurrent gather (verifies parallelism
through max_size=5 pool), pool async context manager, commit/rollback.
Total: 69 unit + 163 integration = 232 tests.
Pyproject changes:
* Added pytest-asyncio>=1.3.0 as dev dep
* asyncio_mode = "auto" so async tests don't need decorators
Architectural completion: with Phase 16, every backlog item is
done. The Phase 0 ambition — first pure-Python Informix driver,
no native deps — is now genuinely complete.
270 lines
8.5 KiB
Python
270 lines
8.5 KiB
Python
"""Phase 16 integration tests — async API (``informix_db.aio``).
|
|
|
|
The async API is a thin wrapper around the sync codebase, offloading
|
|
each blocking I/O call to ``asyncio.to_thread``. These tests verify
|
|
the public surface and the core async-context-manager / async-iter
|
|
semantics.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import contextlib
|
|
import time
|
|
|
|
import pytest
|
|
|
|
import informix_db
|
|
from informix_db import aio
|
|
from tests.conftest import ConnParams
|
|
|
|
pytestmark = pytest.mark.integration
|
|
|
|
|
|
# -------- Connection lifecycle --------
|
|
|
|
|
|
async def test_async_connect_and_close(conn_params: ConnParams) -> None:
|
|
"""Basic open + close."""
|
|
conn = await aio.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
)
|
|
assert not conn.closed
|
|
await conn.close()
|
|
assert conn.closed
|
|
|
|
|
|
async def test_async_connection_as_context_manager(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""``async with`` opens and closes the connection."""
|
|
async with await aio.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
) as conn:
|
|
assert not conn.closed
|
|
assert conn.closed
|
|
|
|
|
|
# -------- Cursor execute / fetch --------
|
|
|
|
|
|
async def test_async_simple_select(conn_params: ConnParams) -> None:
|
|
"""Single-statement select via the async API."""
|
|
async with await aio.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
) as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute("SELECT FIRST 3 tabname FROM systables")
|
|
rows = await cur.fetchall()
|
|
assert len(rows) == 3
|
|
assert all(isinstance(r[0], str) for r in rows)
|
|
await cur.close()
|
|
|
|
|
|
async def test_async_select_with_parameters(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""Parameterized async query."""
|
|
async with await aio.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
) as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute(
|
|
"SELECT tabname FROM systables WHERE tabid = ?", (1,)
|
|
)
|
|
row = await cur.fetchone()
|
|
assert row == ("systables",)
|
|
|
|
|
|
async def test_async_cursor_iteration(conn_params: ConnParams) -> None:
|
|
"""``async for row in cur:`` works."""
|
|
async with await aio.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
) as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute("SELECT FIRST 5 tabname FROM systables")
|
|
rows = []
|
|
async for row in cur:
|
|
rows.append(row)
|
|
assert len(rows) == 5
|
|
|
|
|
|
# -------- Pool --------
|
|
|
|
|
|
async def test_async_pool_basic(conn_params: ConnParams) -> None:
|
|
"""Basic acquire/release via async pool."""
|
|
pool = await aio.create_pool(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
min_size=1, max_size=3,
|
|
)
|
|
try:
|
|
assert pool.idle_count == 1
|
|
async with pool.connection() as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute("SELECT FIRST 1 tabname FROM systables")
|
|
assert (await cur.fetchone()) == ("systables",)
|
|
assert pool.idle_count == 1
|
|
finally:
|
|
await pool.close()
|
|
|
|
|
|
async def test_async_pool_concurrent_queries(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""Multiple ``asyncio.gather`` queries actually run in parallel.
|
|
|
|
With a thread-pool of 5 and 10 queries each sleeping 100ms server-side
|
|
(via SLEEP-equivalent), wall-clock should be ~200ms (10/5 batches),
|
|
not ~1s (serial). We don't have SLEEP available, so we just verify
|
|
the API doesn't deadlock and the pool grows correctly.
|
|
"""
|
|
pool = await aio.create_pool(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
min_size=0, max_size=5,
|
|
)
|
|
try:
|
|
|
|
async def query(qid: int) -> int:
|
|
async with pool.connection() as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute(
|
|
"SELECT tabid FROM systables WHERE tabid = ?", (qid % 5 + 1,)
|
|
)
|
|
(val,) = await cur.fetchone()
|
|
return val
|
|
|
|
start = time.monotonic()
|
|
results = await asyncio.gather(*[query(i) for i in range(20)])
|
|
elapsed = time.monotonic() - start
|
|
|
|
assert len(results) == 20
|
|
assert pool.size <= 5 # respected max_size
|
|
# Sanity: 20 queries shouldn't take more than 5 seconds even on
|
|
# a slow CI box. If parallelism is broken, expect serial = 20*0.5 = 10s.
|
|
assert elapsed < 5.0
|
|
finally:
|
|
await pool.close()
|
|
|
|
|
|
async def test_async_pool_context_manager(conn_params: ConnParams) -> None:
|
|
"""``async with pool: ...`` closes on exit."""
|
|
pool = await aio.create_pool(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
min_size=1, max_size=2,
|
|
)
|
|
async with pool, pool.connection() as conn:
|
|
cur = await conn.cursor()
|
|
await cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
assert (await cur.fetchone()) == (1,)
|
|
# Pool closed on aexit
|
|
with pytest.raises(informix_db.PoolClosedError):
|
|
await pool.acquire()
|
|
|
|
|
|
# -------- Transactions --------
|
|
|
|
|
|
async def test_async_commit_rollback(
|
|
logged_db_params: ConnParams,
|
|
) -> None:
|
|
"""Transactions: commit and rollback go through the async path."""
|
|
async with await aio.connect(
|
|
host=logged_db_params.host,
|
|
port=logged_db_params.port,
|
|
user=logged_db_params.user,
|
|
password=logged_db_params.password,
|
|
database=logged_db_params.database,
|
|
server=logged_db_params.server,
|
|
) as conn:
|
|
cur = await conn.cursor()
|
|
# autocommit=True for the CREATE
|
|
async with await aio.connect(
|
|
host=logged_db_params.host,
|
|
port=logged_db_params.port,
|
|
user=logged_db_params.user,
|
|
password=logged_db_params.password,
|
|
database=logged_db_params.database,
|
|
server=logged_db_params.server,
|
|
autocommit=True,
|
|
) as setup_conn:
|
|
scur = await setup_conn.cursor()
|
|
with contextlib.suppress(Exception):
|
|
await scur.execute("DROP TABLE p16_aio")
|
|
await scur.execute(
|
|
"CREATE TABLE p16_aio (id INT, label VARCHAR(20))"
|
|
)
|
|
|
|
try:
|
|
# INSERT then rollback — should be invisible after
|
|
await cur.execute(
|
|
"INSERT INTO p16_aio VALUES (?, ?)", (1, "alpha")
|
|
)
|
|
await conn.rollback()
|
|
|
|
await cur.execute("SELECT COUNT(*) FROM p16_aio")
|
|
(count,) = await cur.fetchone()
|
|
assert int(count) == 0
|
|
|
|
# INSERT then commit — should persist
|
|
await cur.execute(
|
|
"INSERT INTO p16_aio VALUES (?, ?)", (2, "beta")
|
|
)
|
|
await conn.commit()
|
|
|
|
await cur.execute("SELECT id, label FROM p16_aio")
|
|
assert await cur.fetchall() == [(2, "beta")]
|
|
finally:
|
|
async with await aio.connect(
|
|
host=logged_db_params.host,
|
|
port=logged_db_params.port,
|
|
user=logged_db_params.user,
|
|
password=logged_db_params.password,
|
|
database=logged_db_params.database,
|
|
server=logged_db_params.server,
|
|
autocommit=True,
|
|
) as cleanup_conn:
|
|
ccur = await cleanup_conn.cursor()
|
|
with contextlib.suppress(Exception):
|
|
await ccur.execute("DROP TABLE p16_aio")
|