Adds tests/benchmarks/ with pytest-benchmark coverage of the hot codec paths and end-to-end SELECT/INSERT/pool/async round-trips. Establishes a committed baseline.json so PRs can be regression-checked at review via --benchmark-compare. * test_codec_perf.py (16): decode/encode_param/parse_tuple_payload micro-benchmarks - run without container, suitable for pre-merge CI. * test_select_perf.py (4): SELECT round-trips - 1-row latency floor, 10-row, 1k-row full fetch, parameterized. * test_insert_perf.py (3): single-row INSERT, executemany 100 / 1000. * test_pool_perf.py (3): cold connect, pool acquire/release, pool acquire + query + release. * test_async_perf.py (2): async round-trip overhead, 10x concurrent. * baseline.json: committed snapshot, 28 measurements. * benchmark pytest marker, gated off by default. * Makefile: bench / bench-codec / bench-save targets; test-integration excludes benchmarks for speed. Headline numbers (dev container loopback): * decode(int): 181 ns * parse_tuple 5 cols: 2.87 µs/row * SELECT 1 round-trip: 177 µs * Pool acquire+query+release: 295 µs * Cold connect: 11.2 ms (72x slower than pool) UTF-8 decode carries no measurable cost vs iso-8859-1 - confirms Phase 20 didn't regress anything. Total: 69 unit + 211 integration + 28 benchmark = 308 tests.
107 lines
2.8 KiB
Python
107 lines
2.8 KiB
Python
"""End-to-end INSERT benchmarks — single-row, executemany, and the gap.
|
|
|
|
The single-row vs. executemany delta is the ``executemany`` win — we
|
|
PREPARE+RELEASE once and BIND+EXECUTE per row, vs PREPARE+RELEASE per
|
|
row. On any decent network this is 10-50x.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import contextlib
|
|
|
|
import pytest
|
|
|
|
import informix_db
|
|
|
|
pytestmark = [pytest.mark.benchmark, pytest.mark.integration]
|
|
|
|
|
|
def _setup_temp_table(conn: informix_db.Connection, name: str) -> None:
|
|
cur = conn.cursor()
|
|
with contextlib.suppress(informix_db.Error):
|
|
cur.execute(f"DROP TABLE {name}")
|
|
cur.execute(
|
|
f"CREATE TABLE {name} (id INT, name VARCHAR(64), value FLOAT)"
|
|
)
|
|
|
|
|
|
def _drop_temp_table(conn: informix_db.Connection, name: str) -> None:
|
|
cur = conn.cursor()
|
|
with contextlib.suppress(informix_db.Error):
|
|
cur.execute(f"DROP TABLE {name}")
|
|
|
|
|
|
def test_insert_single_row(benchmark, bench_conn: informix_db.Connection) -> None:
|
|
"""Single INSERT per call — full PREPARE+BIND+EXECUTE+RELEASE cycle."""
|
|
table = "p21_ins_single"
|
|
_setup_temp_table(bench_conn, table)
|
|
counter = [0]
|
|
|
|
def run() -> None:
|
|
counter[0] += 1
|
|
cur = bench_conn.cursor()
|
|
cur.execute(
|
|
f"INSERT INTO {table} VALUES (?, ?, ?)",
|
|
(counter[0], f"name_{counter[0]}", float(counter[0])),
|
|
)
|
|
cur.close()
|
|
|
|
try:
|
|
benchmark(run)
|
|
finally:
|
|
_drop_temp_table(bench_conn, table)
|
|
|
|
|
|
def test_executemany_100_rows(
|
|
benchmark, bench_conn: informix_db.Connection
|
|
) -> None:
|
|
"""100 INSERTs via executemany — one PREPARE, 100 BIND+EXECUTEs, one RELEASE."""
|
|
table = "p21_ins_emany_100"
|
|
_setup_temp_table(bench_conn, table)
|
|
counter = [0]
|
|
|
|
def run() -> None:
|
|
counter[0] += 1
|
|
base = counter[0] * 100
|
|
rows = [
|
|
(base + i, f"row_{base + i}", float(base + i)) for i in range(100)
|
|
]
|
|
cur = bench_conn.cursor()
|
|
cur.executemany(
|
|
f"INSERT INTO {table} VALUES (?, ?, ?)",
|
|
rows,
|
|
)
|
|
cur.close()
|
|
|
|
try:
|
|
benchmark(run)
|
|
finally:
|
|
_drop_temp_table(bench_conn, table)
|
|
|
|
|
|
def test_executemany_1000_rows(
|
|
benchmark, bench_conn: informix_db.Connection
|
|
) -> None:
|
|
"""1000 INSERTs via executemany — sustained-batch throughput."""
|
|
table = "p21_ins_emany_1000"
|
|
_setup_temp_table(bench_conn, table)
|
|
counter = [0]
|
|
|
|
def run() -> None:
|
|
counter[0] += 1
|
|
base = counter[0] * 1000
|
|
rows = [
|
|
(base + i, f"row_{base + i}", float(base + i)) for i in range(1000)
|
|
]
|
|
cur = bench_conn.cursor()
|
|
cur.executemany(
|
|
f"INSERT INTO {table} VALUES (?, ?, ?)",
|
|
rows,
|
|
)
|
|
cur.close()
|
|
|
|
try:
|
|
benchmark.pedantic(run, rounds=3, iterations=1)
|
|
finally:
|
|
_drop_temp_table(bench_conn, table)
|