Adds tests/benchmarks/ with pytest-benchmark coverage of the hot codec paths and end-to-end SELECT/INSERT/pool/async round-trips. Establishes a committed baseline.json so PRs can be regression-checked at review via --benchmark-compare. * test_codec_perf.py (16): decode/encode_param/parse_tuple_payload micro-benchmarks - run without container, suitable for pre-merge CI. * test_select_perf.py (4): SELECT round-trips - 1-row latency floor, 10-row, 1k-row full fetch, parameterized. * test_insert_perf.py (3): single-row INSERT, executemany 100 / 1000. * test_pool_perf.py (3): cold connect, pool acquire/release, pool acquire + query + release. * test_async_perf.py (2): async round-trip overhead, 10x concurrent. * baseline.json: committed snapshot, 28 measurements. * benchmark pytest marker, gated off by default. * Makefile: bench / bench-codec / bench-save targets; test-integration excludes benchmarks for speed. Headline numbers (dev container loopback): * decode(int): 181 ns * parse_tuple 5 cols: 2.87 µs/row * SELECT 1 round-trip: 177 µs * Pool acquire+query+release: 295 µs * Cold connect: 11.2 ms (72x slower than pool) UTF-8 decode carries no measurable cost vs iso-8859-1 - confirms Phase 20 didn't regress anything. Total: 69 unit + 211 integration + 28 benchmark = 308 tests.
84 lines
2.3 KiB
Python
84 lines
2.3 KiB
Python
"""Connection-pool benchmarks — measure the cost of pool acquire/release
|
|
vs. fresh connect.
|
|
|
|
The win on the pool side is *avoiding the login handshake*. Cold connect
|
|
to Informix is ~5-15ms (server-side auth + protocol negotiation). Pool
|
|
acquire is ~50-200µs (validation only). The benchmark makes that delta
|
|
visible.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
import informix_db
|
|
from informix_db.pool import ConnectionPool, create_pool
|
|
from tests.conftest import ConnParams
|
|
|
|
pytestmark = [pytest.mark.benchmark, pytest.mark.integration]
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def pool(conn_params: ConnParams):
|
|
"""Module-scoped pool kept warm across the bench file."""
|
|
p = create_pool(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
autocommit=True,
|
|
min_size=2,
|
|
max_size=10,
|
|
)
|
|
try:
|
|
yield p
|
|
finally:
|
|
p.close()
|
|
|
|
|
|
def test_cold_connect_disconnect(benchmark, conn_params: ConnParams) -> None:
|
|
"""Full login handshake + close per call — the worst case."""
|
|
|
|
def run() -> None:
|
|
conn = informix_db.connect(
|
|
host=conn_params.host,
|
|
port=conn_params.port,
|
|
user=conn_params.user,
|
|
password=conn_params.password,
|
|
database=conn_params.database,
|
|
server=conn_params.server,
|
|
autocommit=True,
|
|
)
|
|
conn.close()
|
|
|
|
# Cold-connect is slow (~10ms); cap at 5 rounds, no per-round iteration
|
|
benchmark.pedantic(run, rounds=5, iterations=1)
|
|
|
|
|
|
def test_pool_acquire_release(benchmark, pool: ConnectionPool) -> None:
|
|
"""Pool acquire+release — the steady-state cost of a pooled query."""
|
|
|
|
def run() -> None:
|
|
with pool.connection() as _conn:
|
|
pass
|
|
|
|
benchmark(run)
|
|
|
|
|
|
def test_pool_acquire_query_release(
|
|
benchmark, pool: ConnectionPool
|
|
) -> None:
|
|
"""Realistic per-query cost: acquire, run a tiny query, release."""
|
|
|
|
def run() -> object:
|
|
with pool.connection() as conn:
|
|
cur = conn.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
row = cur.fetchone()
|
|
cur.close()
|
|
return row
|
|
|
|
benchmark(run)
|