Adds tests/benchmarks/ with pytest-benchmark coverage of the hot codec paths and end-to-end SELECT/INSERT/pool/async round-trips. Establishes a committed baseline.json so PRs can be regression-checked at review via --benchmark-compare. * test_codec_perf.py (16): decode/encode_param/parse_tuple_payload micro-benchmarks - run without container, suitable for pre-merge CI. * test_select_perf.py (4): SELECT round-trips - 1-row latency floor, 10-row, 1k-row full fetch, parameterized. * test_insert_perf.py (3): single-row INSERT, executemany 100 / 1000. * test_pool_perf.py (3): cold connect, pool acquire/release, pool acquire + query + release. * test_async_perf.py (2): async round-trip overhead, 10x concurrent. * baseline.json: committed snapshot, 28 measurements. * benchmark pytest marker, gated off by default. * Makefile: bench / bench-codec / bench-save targets; test-integration excludes benchmarks for speed. Headline numbers (dev container loopback): * decode(int): 181 ns * parse_tuple 5 cols: 2.87 µs/row * SELECT 1 round-trip: 177 µs * Pool acquire+query+release: 295 µs * Cold connect: 11.2 ms (72x slower than pool) UTF-8 decode carries no measurable cost vs iso-8859-1 - confirms Phase 20 didn't regress anything. Total: 69 unit + 211 integration + 28 benchmark = 308 tests.
82 lines
2.2 KiB
Python
82 lines
2.2 KiB
Python
"""End-to-end SELECT benchmarks.
|
|
|
|
Measure the full PREPARE → EXECUTE → FETCH → CLOSE → RELEASE round-trip
|
|
for representative query shapes. The codec micro-benchmarks set the
|
|
*ceiling* (best-case CPU); these tell you how much of that ceiling
|
|
the wire protocol + server response time eats.
|
|
|
|
Layered comparison:
|
|
- ``select_one_row`` — protocol-overhead floor (single tiny round-trip)
|
|
- ``select_systables_first`` — small server-side query (~10 rows)
|
|
- ``select_bench_table_all`` — full 1k-row table fetch (sustained throughput)
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import pytest
|
|
|
|
import informix_db
|
|
|
|
pytestmark = [pytest.mark.benchmark, pytest.mark.integration]
|
|
|
|
|
|
def test_select_one_row(benchmark, bench_conn: informix_db.Connection) -> None:
|
|
"""Single-row round-trip — protocol-overhead floor."""
|
|
|
|
def run() -> object:
|
|
cur = bench_conn.cursor()
|
|
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
|
|
row = cur.fetchone()
|
|
cur.close()
|
|
return row
|
|
|
|
benchmark(run)
|
|
|
|
|
|
def test_select_systables_first_10(benchmark, bench_conn: informix_db.Connection) -> None:
|
|
"""Small server-side query — describes 4 columns, returns ~10 rows."""
|
|
|
|
def run() -> list:
|
|
cur = bench_conn.cursor()
|
|
cur.execute(
|
|
"SELECT FIRST 10 tabname, owner, tabid, ncols FROM systables"
|
|
)
|
|
rows = cur.fetchall()
|
|
cur.close()
|
|
return rows
|
|
|
|
benchmark(run)
|
|
|
|
|
|
def test_select_bench_table_all(
|
|
benchmark, bench_conn: informix_db.Connection, bench_table: str
|
|
) -> None:
|
|
"""1000-row sustained fetch — covers the typical reporting query."""
|
|
|
|
def run() -> list:
|
|
cur = bench_conn.cursor()
|
|
cur.execute(f"SELECT * FROM {bench_table}")
|
|
rows = cur.fetchall()
|
|
cur.close()
|
|
return rows
|
|
|
|
benchmark(run)
|
|
|
|
|
|
def test_select_with_param(
|
|
benchmark, bench_conn: informix_db.Connection, bench_table: str
|
|
) -> None:
|
|
"""Parameterized SELECT — exercises the BIND path."""
|
|
|
|
def run() -> list:
|
|
cur = bench_conn.cursor()
|
|
cur.execute(
|
|
f"SELECT id, name FROM {bench_table} WHERE counter > ?",
|
|
(5000,),
|
|
)
|
|
rows = cur.fetchall()
|
|
cur.close()
|
|
return rows
|
|
|
|
benchmark(run)
|