Thread-safe connection pool with min/max sizing, lazy growth,
idle recycling, and per-acquire health-check.
API:
pool = informix_db.create_pool(host=..., min_size=1, max_size=10)
with pool.connection() as conn:
...
pool.close()
Design choices:
* Lazy growth from min_size — pre-opens min_size on construction,
grows to max_size on demand. Pay-nothing startup with burst capacity.
* Health-check on acquire, not release. Sends a trivial SELECT 1
round-trip before yielding. Dead idle connections (server-side
timeout, network drop) are silently replaced. The cost is ~1ms
per acquire, bought at the price of "users never see a stale-
connection error". Check-on-release is wrong because idle time
is when connections actually die.
* Eviction on OperationalError/InterfaceError only. The "with
pool.connection()" context manager retains the connection on
application-level errors (ValueError, IntegrityError, etc.).
Avoids the "every constraint violation evicts a healthy connection"
pitfall.
* Releases the pool lock during connect() — the slow handshake
(50-100ms) doesn't serialize other threads' acquires.
Tests: 15 integration tests in test_pool.py covering:
* API & lifecycle (pre-open, lazy growth, context-manager, LIFO)
* Exhaustion (timeout when full, per-acquire override, unblock-on-release)
* Eviction (explicit broken, auto on OperationalError, retain on
application errors)
* Health-check (dead idle silently replaced)
* Shutdown (close drains, idempotent, context-manager)
* Multi-thread safety (8 workers × 3 queries each, no leaks)
Total: 69 unit + 154 integration = 223 tests.
With Phase 14 (TLS) and Phase 15 (pool), the project covers the
three things a typical Python web/API workload needs from a
database driver: PEP 249 surface, TLS transport, connection pool.
Only async (informix_db.aio) remains in the backlog.
290 lines
8.6 KiB
Python
290 lines
8.6 KiB
Python
"""Phase 15 integration tests — connection pool.
|
|
|
|
Covers acquire/release, lazy/eager growth, timeout on exhaustion,
|
|
broken-connection eviction, health-check on acquire, multi-thread
|
|
safety, and clean shutdown.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import threading
|
|
import time
|
|
|
|
import pytest
|
|
|
|
import informix_db
|
|
from tests.conftest import ConnParams
|
|
|
|
pytestmark = pytest.mark.integration
|
|
|
|
|
|
def _make_pool(
|
|
params: ConnParams, *, min_size: int = 0, max_size: int = 4, **kw
|
|
) -> informix_db.ConnectionPool:
|
|
return informix_db.create_pool(
|
|
host=params.host,
|
|
port=params.port,
|
|
user=params.user,
|
|
password=params.password,
|
|
database=params.database,
|
|
server=params.server,
|
|
min_size=min_size,
|
|
max_size=max_size,
|
|
**kw,
|
|
)
|
|
|
|
|
|
# -------- API + lifecycle --------
|
|
|
|
|
|
def test_pool_starts_with_min_size_connections(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""``min_size`` connections are pre-opened on construction."""
|
|
pool = _make_pool(conn_params, min_size=2, max_size=4)
|
|
try:
|
|
assert pool.size == 2
|
|
assert pool.idle_count == 2
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_pool_grows_lazily_to_max_size(conn_params: ConnParams) -> None:
|
|
"""Starts at 0, grows on demand up to ``max_size``."""
|
|
pool = _make_pool(conn_params, min_size=0, max_size=3)
|
|
try:
|
|
assert pool.size == 0
|
|
c1 = pool.acquire()
|
|
assert pool.size == 1
|
|
c2 = pool.acquire()
|
|
c3 = pool.acquire()
|
|
assert pool.size == 3
|
|
for c in (c1, c2, c3):
|
|
pool.release(c)
|
|
assert pool.idle_count == 3
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_pool_context_manager_releases(conn_params: ConnParams) -> None:
|
|
"""``with pool.connection()`` checks out and returns automatically."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
with pool.connection() as conn:
|
|
assert pool.idle_count == 0
|
|
cur = conn.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
assert cur.fetchone() == (1,)
|
|
# Released back into the pool
|
|
assert pool.idle_count == 1
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_pool_reuses_connections(conn_params: ConnParams) -> None:
|
|
"""Sequential acquires return the SAME underlying connection (LIFO)."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
with pool.connection() as conn1:
|
|
id1 = id(conn1)
|
|
with pool.connection() as conn2:
|
|
id2 = id(conn2)
|
|
assert id1 == id2 # same Connection object reused
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
# -------- Exhaustion + timeout --------
|
|
|
|
|
|
def test_pool_acquire_times_out_when_full(conn_params: ConnParams) -> None:
|
|
"""Beyond max_size, acquire blocks then raises PoolTimeoutError."""
|
|
pool = _make_pool(conn_params, max_size=1, acquire_timeout=0.3)
|
|
try:
|
|
c1 = pool.acquire()
|
|
start = time.monotonic()
|
|
with pytest.raises(informix_db.PoolTimeoutError, match="max_size=1"):
|
|
pool.acquire()
|
|
elapsed = time.monotonic() - start
|
|
assert 0.25 < elapsed < 1.0 # honors the timeout
|
|
pool.release(c1)
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_pool_acquire_timeout_override(conn_params: ConnParams) -> None:
|
|
"""Per-acquire ``timeout`` overrides the pool default."""
|
|
pool = _make_pool(conn_params, max_size=1, acquire_timeout=10.0)
|
|
try:
|
|
c1 = pool.acquire()
|
|
start = time.monotonic()
|
|
with pytest.raises(informix_db.PoolTimeoutError):
|
|
pool.acquire(timeout=0.2)
|
|
assert time.monotonic() - start < 1.0 # didn't wait the 10s default
|
|
pool.release(c1)
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_pool_release_unblocks_waiter(conn_params: ConnParams) -> None:
|
|
"""When a connection is released, a blocked acquire returns."""
|
|
pool = _make_pool(conn_params, max_size=1, acquire_timeout=2.0)
|
|
try:
|
|
c1 = pool.acquire()
|
|
|
|
# Start a thread that will block on acquire
|
|
result: list[informix_db.Connection] = []
|
|
|
|
def waiter() -> None:
|
|
result.append(pool.acquire())
|
|
|
|
t = threading.Thread(target=waiter, daemon=True)
|
|
t.start()
|
|
time.sleep(0.1) # let waiter block
|
|
assert not result # still waiting
|
|
|
|
# Releasing c1 should unblock the waiter
|
|
pool.release(c1)
|
|
t.join(timeout=1.0)
|
|
assert len(result) == 1
|
|
pool.release(result[0])
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
# -------- Broken connection eviction --------
|
|
|
|
|
|
def test_broken_connection_evicted(conn_params: ConnParams) -> None:
|
|
"""Releasing with broken=True closes the connection and frees the slot."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
c1 = pool.acquire()
|
|
assert pool.size == 1
|
|
pool.release(c1, broken=True)
|
|
# Slot freed, conn closed
|
|
assert pool.size == 0
|
|
assert pool.idle_count == 0
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_with_block_on_operational_error_evicts(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""Exception in ``with pool.connection()`` evicts the connection."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
with pytest.raises(informix_db.OperationalError), pool.connection():
|
|
raise informix_db.OperationalError("simulated failure")
|
|
# Slot freed
|
|
assert pool.size == 0
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
def test_with_block_on_other_error_returns_to_pool(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""Non-connection-related exceptions DON'T evict (data errors stay)."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
with pytest.raises(ValueError), pool.connection() as _conn:
|
|
raise ValueError("application bug, not connection")
|
|
# Connection retained
|
|
assert pool.idle_count == 1
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
# -------- Health check on acquire --------
|
|
|
|
|
|
def test_dead_connection_silently_replaced(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""An idle connection that died is dropped and a fresh one minted."""
|
|
pool = _make_pool(conn_params, max_size=2)
|
|
try:
|
|
c1 = pool.acquire()
|
|
pool.release(c1)
|
|
# Forcibly break the idle connection from the outside
|
|
c1.close()
|
|
# Next acquire should silently replace it
|
|
c2 = pool.acquire()
|
|
assert c2 is not c1 or not c1.closed
|
|
# Verify it's actually usable
|
|
cur = c2.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
assert cur.fetchone() == (1,)
|
|
pool.release(c2)
|
|
finally:
|
|
pool.close()
|
|
|
|
|
|
# -------- Shutdown --------
|
|
|
|
|
|
def test_pool_close_drains_idle(conn_params: ConnParams) -> None:
|
|
"""``close()`` closes all idle connections and rejects new acquires."""
|
|
pool = _make_pool(conn_params, min_size=2)
|
|
assert pool.idle_count == 2
|
|
pool.close()
|
|
assert pool.size == 0
|
|
with pytest.raises(informix_db.PoolClosedError):
|
|
pool.acquire()
|
|
|
|
|
|
def test_pool_close_idempotent(conn_params: ConnParams) -> None:
|
|
"""``close()`` may be called multiple times."""
|
|
pool = _make_pool(conn_params, max_size=1)
|
|
pool.close()
|
|
pool.close() # must not raise
|
|
|
|
|
|
def test_pool_as_context_manager(conn_params: ConnParams) -> None:
|
|
"""``with pool: ...`` closes on exit."""
|
|
with _make_pool(conn_params, min_size=1) as pool, pool.connection() as conn:
|
|
cur = conn.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
assert cur.fetchone() == (1,)
|
|
# After exit
|
|
with pytest.raises(informix_db.PoolClosedError):
|
|
pool.acquire()
|
|
|
|
|
|
# -------- Multi-threaded safety --------
|
|
|
|
|
|
def test_pool_thread_safe_concurrent_acquires(
|
|
conn_params: ConnParams,
|
|
) -> None:
|
|
"""Multiple threads sharing a pool don't deadlock or double-use."""
|
|
pool = _make_pool(conn_params, max_size=4, acquire_timeout=5.0)
|
|
try:
|
|
results: list[int] = []
|
|
results_lock = threading.Lock()
|
|
|
|
def worker() -> None:
|
|
for _ in range(3):
|
|
with pool.connection() as conn:
|
|
cur = conn.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
(val,) = cur.fetchone()
|
|
with results_lock:
|
|
results.append(val)
|
|
|
|
threads = [threading.Thread(target=worker) for _ in range(8)]
|
|
for t in threads:
|
|
t.start()
|
|
for t in threads:
|
|
t.join(timeout=10.0)
|
|
assert not t.is_alive()
|
|
# 8 workers x 3 queries each = 24 results, all = 1
|
|
assert len(results) == 24
|
|
assert all(r == 1 for r in results)
|
|
# Pool didn't leak: at most max_size connections
|
|
assert pool.size <= 4
|
|
finally:
|
|
pool.close()
|