Add docs-site: Astro + Starlight at informix-db.warehack.ing

22 pages across Diataxis quadrants (start / how-to / reference / explain).
Custom amber-on-charcoal theme, wire-dump hero animation, Supported
Systems footer badge. caddy-docker-proxy deployment with prod + dev
profiles, Makefile with prod/dev/down/logs/local targets.
This commit is contained in:
Ryan Malloy 2026-05-08 03:23:22 -06:00
parent ad55391bf1
commit 86070e4688
43 changed files with 9297 additions and 0 deletions

12
docs-site/.dockerignore Normal file
View File

@ -0,0 +1,12 @@
node_modules
dist
.astro
.env
.env.local
.git
*.log
README.md
.dockerignore
Dockerfile
docker-compose.yml
Makefile

4
docs-site/.env.example Normal file
View File

@ -0,0 +1,4 @@
COMPOSE_PROJECT=informix-db-docs
DOMAIN=informix-db.warehack.ing
DEV_DOMAIN=informix-db.l.warehack.ing
MODE=prod

21
docs-site/.gitignore vendored Normal file
View File

@ -0,0 +1,21 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env
.env.production
# macOS-specific files
.DS_Store

20
docs-site/Caddyfile Normal file
View File

@ -0,0 +1,20 @@
:80 {
root * /srv
encode zstd gzip
file_server
@assets path *.css *.js *.woff2 *.woff *.svg *.png *.webp *.jpg *.jpeg *.ico
header @assets Cache-Control "public, max-age=31536000, immutable"
@html path *.html
header @html Cache-Control "public, max-age=300, must-revalidate"
header @html X-Content-Type-Options "nosniff"
header @html X-Frame-Options "SAMEORIGIN"
header @html Referrer-Policy "strict-origin-when-cross-origin"
handle_errors {
@404 expression `{err.status_code} == 404`
rewrite @404 /404.html
file_server
}
}

30
docs-site/Dockerfile Normal file
View File

@ -0,0 +1,30 @@
ARG NODE_VERSION=22-alpine
ARG CADDY_VERSION=2.10-alpine
FROM node:${NODE_VERSION} AS deps
WORKDIR /app
COPY package.json package-lock.json* ./
RUN --mount=type=cache,target=/root/.npm \
npm ci --no-audit --no-fund 2>/dev/null || npm install --no-audit --no-fund
FROM node:${NODE_VERSION} AS builder
WORKDIR /app
ENV ASTRO_TELEMETRY_DISABLED=1 NODE_ENV=production
COPY --from=deps /app/node_modules ./node_modules
COPY . .
RUN npm run build
FROM caddy:${CADDY_VERSION} AS prod
COPY --from=builder /app/dist /srv
COPY Caddyfile /etc/caddy/Caddyfile
EXPOSE 80
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD wget -q --spider http://127.0.0.1/ || exit 1
FROM node:${NODE_VERSION} AS dev
WORKDIR /app
ENV ASTRO_TELEMETRY_DISABLED=1 NODE_ENV=development
COPY --from=deps /app/node_modules ./node_modules
COPY . .
EXPOSE 4321
CMD ["npm", "run", "dev", "--", "--host", "0.0.0.0"]

33
docs-site/Makefile Normal file
View File

@ -0,0 +1,33 @@
.PHONY: prod dev down logs build clean install local
include .env
export
prod:
docker compose --profile prod up -d --build
dev:
docker compose --profile dev up -d --build
down:
docker compose --profile prod --profile dev down
logs:
docker compose logs -f --tail=200
build:
npm run build
clean:
rm -rf dist node_modules .astro
install:
npm install
local:
npm run dev -- --host 127.0.0.1
status:
@echo "Domain: $(DOMAIN)"
@echo "Project: $(COMPOSE_PROJECT)"
@docker compose ps

49
docs-site/README.md Normal file
View File

@ -0,0 +1,49 @@
# Starlight Starter Kit: Basics
[![Built with Starlight](https://astro.badg.es/v2/built-with-starlight/tiny.svg)](https://starlight.astro.build)
```
npm create astro@latest -- --template starlight
```
> 🧑‍🚀 **Seasoned astronaut?** Delete this file. Have fun!
## 🚀 Project Structure
Inside of your Astro + Starlight project, you'll see the following folders and files:
```
.
├── public/
├── src/
│ ├── assets/
│ ├── content/
│ │ └── docs/
│ └── content.config.ts
├── astro.config.mjs
├── package.json
└── tsconfig.json
```
Starlight looks for `.md` or `.mdx` files in the `src/content/docs/` directory. Each file is exposed as a route based on its file name.
Images can be added to `src/assets/` and embedded in Markdown with a relative link.
Static assets, like favicons, can be placed in the `public/` directory.
## 🧞 Commands
All commands are run from the root of the project, from a terminal:
| Command | Action |
| :------------------------ | :----------------------------------------------- |
| `npm install` | Installs dependencies |
| `npm run dev` | Starts local dev server at `localhost:4321` |
| `npm run build` | Build your production site to `./dist/` |
| `npm run preview` | Preview your build locally, before deploying |
| `npm run astro ...` | Run CLI commands like `astro add`, `astro check` |
| `npm run astro -- --help` | Get help using the Astro CLI |
## 👀 Want to learn more?
Check out [Starlights docs](https://starlight.astro.build/), read [the Astro documentation](https://docs.astro.build), or jump into the [Astro Discord server](https://astro.build/chat).

119
docs-site/astro.config.mjs Normal file
View File

@ -0,0 +1,119 @@
// @ts-check
import { defineConfig } from 'astro/config';
import starlight from '@astrojs/starlight';
const DEV_DOMAIN = process.env.DEV_DOMAIN ?? 'informix-db.l.warehack.ing';
// https://astro.build/config
export default defineConfig({
site: 'https://informix-db.warehack.ing',
server: { host: '0.0.0.0', port: 4321 },
telemetry: false,
devToolbar: { enabled: false },
vite: {
server: {
host: '0.0.0.0',
hmr: {
host: DEV_DOMAIN,
protocol: 'wss',
clientPort: 443,
},
allowedHosts: [DEV_DOMAIN, '.warehack.ing', 'localhost', '127.0.0.1'],
},
},
integrations: [
starlight({
title: 'informix-db',
description: 'Pure-Python driver for IBM Informix IDS. No CSDK, no JVM, no native libraries.',
logo: { src: './src/assets/logo.svg', replacesTitle: false },
favicon: '/favicon.svg',
tableOfContents: { minHeadingLevel: 2, maxHeadingLevel: 4 },
lastUpdated: true,
pagination: true,
editLink: {
baseUrl: 'https://github.com/rsp2k/informix-db/edit/main/docs-site/',
},
social: [
{ icon: 'github', label: 'GitHub', href: 'https://github.com/rsp2k/informix-db' },
{ icon: 'seti:python', label: 'PyPI', href: 'https://pypi.org/project/informix-db/' },
],
customCss: ['./src/styles/theme.css', './src/styles/components.css'],
components: {
Hero: './src/components/Hero.astro',
Footer: './src/components/Footer.astro',
},
expressiveCode: {
themes: ['github-dark', 'github-light'],
styleOverrides: {
borderRadius: '6px',
codeFontFamily: "'IBM Plex Mono', ui-monospace, SFMono-Regular, Menlo, monospace",
},
},
head: [
{
tag: 'link',
attrs: { rel: 'preconnect', href: 'https://rsms.me' },
},
{
tag: 'link',
attrs: {
rel: 'stylesheet',
href: 'https://rsms.me/inter/inter.css',
},
},
{
tag: 'meta',
attrs: { name: 'theme-color', content: '#0e0d0c' },
},
{
tag: 'meta',
attrs: { property: 'og:type', content: 'website' },
},
],
sidebar: [
{
label: 'Start here',
items: [
{ label: 'WTF did you build this for?', slug: 'start/wtf' },
{ label: 'Install & first query', slug: 'start/quickstart' },
{ label: 'Compared to IfxPy', slug: 'start/vs-ifxpy' },
],
},
{
label: 'How-to guides',
items: [
{ label: 'Connect with TLS', slug: 'how-to/tls' },
{ label: 'Use the connection pool', slug: 'how-to/pool' },
{ label: 'Async with FastAPI', slug: 'how-to/async-fastapi' },
{ label: 'Bulk inserts (executemany)', slug: 'how-to/executemany' },
{ label: 'Optimize bulk SELECT', slug: 'how-to/buffered-reader' },
{ label: 'BLOB / CLOB read & write', slug: 'how-to/smart-lobs' },
{ label: 'Migrate from IfxPy', slug: 'how-to/migrate-from-ifxpy' },
{ label: 'Run the dev container', slug: 'how-to/dev-container' },
],
},
{
label: 'Reference',
items: [
{ label: 'API surface', slug: 'reference/api' },
{ label: 'SQL ↔ Python types', slug: 'reference/types' },
{ label: 'Configuration & env flags', slug: 'reference/config' },
{ label: 'Exceptions & error codes', slug: 'reference/exceptions' },
{ label: 'Performance baselines', slug: 'reference/benchmarks' },
],
},
{
label: 'Explanation',
items: [
{ label: 'The SQLI wire protocol', slug: 'explain/sqli-protocol' },
{ label: 'Architecture overview', slug: 'explain/architecture' },
{ label: 'The buffered reader (Phase 39)', slug: 'explain/buffered-reader' },
{ label: 'Async strategy', slug: 'explain/async-strategy' },
{ label: 'Pure-Python tradeoffs', slug: 'explain/pure-python' },
{ label: 'The phase log', slug: 'explain/phase-log' },
],
},
],
}),
],
});

View File

@ -0,0 +1,46 @@
services:
docs:
profiles: ["prod"]
build:
context: .
target: prod
container_name: ${COMPOSE_PROJECT}-prod
restart: unless-stopped
networks:
- caddy
labels:
caddy: ${DOMAIN}
caddy.reverse_proxy: "{{upstreams 80}}"
docs-dev:
profiles: ["dev"]
build:
context: .
target: dev
container_name: ${COMPOSE_PROJECT}-dev
restart: unless-stopped
volumes:
- ./src:/app/src:cached
- ./public:/app/public:cached
- ./astro.config.mjs:/app/astro.config.mjs:cached
- ./tsconfig.json:/app/tsconfig.json:cached
- ./package.json:/app/package.json:cached
environment:
DEV_DOMAIN: ${DEV_DOMAIN}
networks:
- caddy
labels:
caddy: ${DEV_DOMAIN}
caddy.reverse_proxy: "{{upstreams 4321}}"
caddy.reverse_proxy.flush_interval: "-1"
caddy.reverse_proxy.transport: "http"
caddy.reverse_proxy.transport.read_timeout: "0"
caddy.reverse_proxy.transport.write_timeout: "0"
caddy.reverse_proxy.transport.keepalive: "5m"
caddy.reverse_proxy.transport.keepalive_idle_conns: "10"
caddy.reverse_proxy.stream_timeout: "24h"
caddy.reverse_proxy.stream_close_delay: "5s"
networks:
caddy:
external: true

6269
docs-site/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

17
docs-site/package.json Normal file
View File

@ -0,0 +1,17 @@
{
"name": "docs-site",
"type": "module",
"version": "0.0.1",
"scripts": {
"dev": "astro dev",
"start": "astro dev",
"build": "astro build",
"preview": "astro preview",
"astro": "astro"
},
"dependencies": {
"@astrojs/starlight": "^0.39.1",
"astro": "^6.2.2",
"sharp": "^0.34.5"
}
}

View File

@ -0,0 +1,6 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32">
<rect width="32" height="32" rx="6" fill="#0e0d0c"/>
<rect x="8" y="11" width="6" height="6" rx="1" fill="#f5a524"/>
<rect x="8" y="19" width="16" height="2" rx="1" fill="#f5a524"/>
<rect x="8" y="23" width="11" height="2" rx="1" fill="#f5a524" opacity="0.55"/>
</svg>

After

Width:  |  Height:  |  Size: 338 B

View File

@ -0,0 +1,66 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 75" height="100%" width="100%">
<!-- Gradient Definitions -->
<defs>
<linearGradient id="gradient1" x1="0%" y1="0%" x2="0%" y2="100%">
<stop offset="0%" stop-color="#60a5fa"></stop>
<stop offset="50%" stop-color="#3b82f6"></stop>
<stop offset="100%" stop-color="#2563eb"></stop>
</linearGradient>
<linearGradient id="gradient2" x1="0%" y1="0%" x2="0%" y2="100%">
<stop offset="0%" stop-color="#93c5fd"></stop>
<stop offset="50%" stop-color="#60a5fa"></stop>
<stop offset="100%" stop-color="#3b82f6"></stop>
</linearGradient>
<linearGradient id="flowGradient" x1="0%" y1="0%" x2="100%" y2="0%">
<stop offset="0%" stop-color="#2563eb"></stop>
<stop offset="50%" stop-color="#60a5fa"></stop>
<stop offset="100%" stop-color="#2563eb"></stop>
</linearGradient>
<pattern id="circuitPattern" patternUnits="userSpaceOnUse" width="12" height="45" patternTransform="scale(1)">
<rect width="12" height="45" fill="url(#gradient1)"></rect>
<path d="M2,5 h8 M2,5 v5 M10,5 v10 M5,15 h5 M5,15 v10 M3,25 h7 M7,25 v10 M3,35 h4" stroke="#dbeafe" stroke-width="0.5" fill="none" opacity="0.7"></path>
<circle cx="2" cy="5" r="1" fill="#dbeafe" opacity="0.7"></circle>
<circle cx="10" cy="5" r="1" fill="#dbeafe" opacity="0.7"></circle>
<circle cx="5" cy="15" r="1" fill="#dbeafe" opacity="0.7"></circle>
<circle cx="3" cy="25" r="1" fill="#dbeafe" opacity="0.7"></circle>
<circle cx="7" cy="35" r="1" fill="#dbeafe" opacity="0.7"></circle>
</pattern>
<pattern id="binaryPattern" patternUnits="userSpaceOnUse" width="12" height="35" patternTransform="scale(1)">
<rect width="12" height="35" fill="#2563eb"></rect>
<text x="3" y="8" font-family="monospace" font-size="3" fill="#FFFFFF" opacity="0.5">10</text>
<text x="3" y="14" font-family="monospace" font-size="3" fill="#FFFFFF" opacity="0.5">01</text>
<text x="3" y="20" font-family="monospace" font-size="3" fill="#FFFFFF" opacity="0.5">11</text>
<text x="3" y="26" font-family="monospace" font-size="3" fill="#FFFFFF" opacity="0.5">00</text>
<text x="3" y="32" font-family="monospace" font-size="3" fill="#FFFFFF" opacity="0.5">10</text>
</pattern>
<pattern id="punchCardPattern" patternUnits="userSpaceOnUse" width="12" height="45" patternTransform="scale(1)">
<rect width="12" height="45" fill="#3b82f6"></rect>
<path d="M0,5 h12 M0,10 h12 M0,15 h12 M0,20 h12 M0,25 h12 M0,30 h12 M0,35 h12 M0,40 h12" stroke="#93c5fd" stroke-width="0.2" fill="none"></path>
<circle cx="3" cy="7" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="9" cy="7" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="6" cy="12" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="3" cy="17" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="9" cy="22" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="6" cy="27" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="3" cy="32" r="1" fill="#1e3a8a" opacity="0.9"></circle>
<circle cx="9" cy="37" r="1" fill="#1e3a8a" opacity="0.9"></circle>
</pattern>
</defs>
<!-- Flow lines behind bars -->
<g opacity="0.3">
<path d="M6,50 C20,40 40,55 48,35 C56,50 75,30 90,55" stroke="url(#flowGradient)" stroke-width="1" fill="none"></path>
<path d="M6,60 C30,50 50,40 70,55 C80,45 90,60 90,60" stroke="url(#flowGradient)" stroke-width="1" fill="none"></path>
</g>
<!-- Bar chart graphic - the "towers" -->
<g>
<rect x="0" y="45" width="12" height="25" rx="1" ry="1" fill="url(#binaryPattern)"></rect>
<rect x="14" y="35" width="12" height="35" rx="1" ry="1" fill="#2563eb"></rect>
<rect x="28" y="25" width="12" height="45" rx="1" ry="1" fill="url(#circuitPattern)"></rect>
<rect x="42" y="20" width="12" height="50" rx="1" ry="1" fill="url(#gradient2)"></rect>
<rect x="56" y="25" width="12" height="45" rx="1" ry="1" fill="url(#punchCardPattern)"></rect>
<rect x="70" y="35" width="12" height="35" rx="1" ry="1" fill="url(#circuitPattern)"></rect>
<rect x="84" y="45" width="12" height="25" rx="1" ry="1" fill="#2563eb"></rect>
<!-- Connecting glow -->
<path d="M12,55 L14,55 M26,45 L28,45 M40,40 L42,40 M54,40 L56,40 M82,55 L84,55" stroke="#bfdbfe" stroke-width="0.8" stroke-opacity="0.6"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 4.3 KiB

View File

@ -0,0 +1,7 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 28 24" fill="none" role="img" aria-label="informix-db">
<title>informix-db</title>
<rect x="0" y="4" width="5" height="16" rx="1" fill="#f5a524"/>
<rect x="8" y="4" width="3" height="16" rx="1" fill="#f5a524" fill-opacity="0.78"/>
<rect x="14" y="4" width="3" height="16" rx="1" fill="#f5a524" fill-opacity="0.52"/>
<rect x="20" y="4" width="3" height="16" rx="1" fill="#f5a524" fill-opacity="0.28"/>
</svg>

After

Width:  |  Height:  |  Size: 473 B

View File

@ -0,0 +1,43 @@
---
// Footer override that preserves Starlight's default chrome
// (prev/next pagination, last-updated, edit link) and appends
// a Supported Systems "joint" badge below.
import Default from '@astrojs/starlight/components/Footer.astro';
---
<Default><slot /></Default>
<aside class="ifx-ss-badge" aria-label="Supported Systems">
<a class="ifx-ss-badge__link" href="https://supported.systems" rel="noopener">
<img
class="ifx-ss-badge__logo"
src="/supported-systems-logo.svg"
alt=""
width="60"
height="45"
loading="lazy"
/>
<div class="ifx-ss-badge__copy">
<h3 class="ifx-ss-badge__heading">A Supported Systems Joint</h3>
<p class="ifx-ss-badge__body">
<code>informix-db</code> is built and maintained by
<span class="ifx-ss-badge__name">Supported Systems</span> &mdash; a
boutique software studio focused on thoughtful, user-first technology.
We take databases personally.
</p>
<span class="ifx-ss-badge__cta">
Visit supported.systems
<svg viewBox="0 0 16 16" width="14" height="14" aria-hidden="true">
<path
d="M4 8h7M8 5l3 3-3 3"
fill="none"
stroke="currentColor"
stroke-width="1.5"
stroke-linecap="round"
stroke-linejoin="round"
/>
</svg>
</span>
</div>
</a>
</aside>

View File

@ -0,0 +1,79 @@
---
// Custom hero for the homepage. Renders the wire-dump easter egg
// alongside the headline + CTA stack. Uses real captured SQLI bytes
// from docs/CAPTURES/01-connect-only.socat.log (truncated and curated).
---
<section class="ifx-hero">
<div class="ifx-hero__copy">
<span class="ifx-hero__eyebrow">Pure Python · No CSDK · No JVM · No libcrypt.so.1</span>
<h1 class="ifx-hero__title">
Talk to Informix without
<strong>linking against IBM's 92 MB tarball.</strong>
</h1>
<p class="ifx-hero__lede">
Every other Informix driver wraps IBM's C SDK or the JDBC JAR. We weren't into that.
So we read the protocol and wrote it ourselves — PEP 249, sync + async, pooled, TLS.
Within 10% of IBM's own C driver on bulk fetches, <strong>1.6× faster</strong> on
bulk inserts. No compile step. No <code>LD_LIBRARY_PATH</code> ritual. No
<code>libcrypt.so.1</code> from 2018.
</p>
<div class="ifx-hero__cta">
<a class="primary" href="/start/quickstart/">Get started →</a>
<a class="secondary" href="/start/vs-ifxpy/">Compared to IfxPy</a>
<a class="secondary" href="https://github.com/rsp2k/informix-db">GitHub</a>
</div>
<div class="ifx-hero__install">pip install informix-db</div>
</div>
<div class="ifx-hero__visual">
<div class="ifx-wiredump" aria-hidden="true">
<div class="ifx-wiredump__scroll" id="ifx-wire">
<span class="ifx-wiredump__line"><span class="ifx-wiredump__direction ifx-wiredump__direction--out">{'>'} OUT</span> 01 c3 01 3c 00 00 00 64 00 65 00 00 00 3d 00 06</span>
<span class="ifx-wiredump__line"> 49 45 45 45 4d 00 00 6c 73 71 6c 65 78 65 63 00 <span class="ifx-wiredump__byte--ascii">IEEEM..lsqlexec.</span></span>
<span class="ifx-wiredump__line"> 00 00 00 00 00 00 06 39 2e 32 38 30 00 00 0c 52 <span class="ifx-wiredump__byte--ascii">.......9.280...R</span></span>
<span class="ifx-wiredump__line"> 44 53 23 52 30 30 30 30 30 30 00 00 05 73 71 6c <span class="ifx-wiredump__byte--ascii">DS#R000000...sql</span></span>
<span class="ifx-wiredump__line"> 69 00 00 00 01 3c 00 00 00 00 00 00 00 00 00 01 <span class="ifx-wiredump__byte--ascii">i....{'<'}.........</span></span>
<span class="ifx-wiredump__line"> 00 09 69 6e 66 6f 72 6d 69 78 00 00 07 69 6e 34 <span class="ifx-wiredump__byte--ascii">..informix...in4</span></span>
<span class="ifx-wiredump__line"></span>
<span class="ifx-wiredump__line"><span class="ifx-wiredump__direction ifx-wiredump__direction--in">{'<'} IN </span> 01 14 02 3c 10 00 00 64 00 65 00 00 00 3d 00 06</span>
<span class="ifx-wiredump__line"> 49 45 45 45 49 00 00 6c 73 72 76 69 6e 66 78 00 <span class="ifx-wiredump__byte--ascii">IEEEI..lsrvinfx.</span></span>
<span class="ifx-wiredump__line"> 00 00 00 00 00 00 2f 49 42 4d 20 49 6e 66 6f 72 <span class="ifx-wiredump__byte--ascii">....../IBM Infor</span></span>
<span class="ifx-wiredump__line"> 6d 69 78 20 44 79 6e 61 6d 69 63 20 53 65 72 76 <span class="ifx-wiredump__byte--ascii">mix Dynamic Serv</span></span>
<span class="ifx-wiredump__line"> 65 72 20 56 65 72 73 69 6f 6e 20 31 35 2e 30 2e <span class="ifx-wiredump__byte--ascii">er Version 15.0.</span></span>
<span class="ifx-wiredump__line"></span>
<span class="ifx-wiredump__line"><span class="ifx-wiredump__direction ifx-wiredump__direction--out">{'>'} OUT</span> 00 02 00 00 00 00 00 49 73 65 6c 65 63 74 20 46 <span class="ifx-wiredump__byte--ascii">.......Iselect F</span></span>
<span class="ifx-wiredump__line"> 49 52 53 54 20 31 20 73 69 74 65 20 66 72 6f 6d <span class="ifx-wiredump__byte--ascii">IRST 1 site from</span></span>
<span class="ifx-wiredump__line"> 20 69 6e 66 6f 72 6d 69 78 2e 73 79 73 74 61 62 <span class="ifx-wiredump__byte--ascii"> informix.systab</span></span>
<span class="ifx-wiredump__line"> 6c 65 73 20 77 68 65 72 65 20 74 61 62 6e 61 6d <span class="ifx-wiredump__byte--ascii">les where tabnam</span></span>
<span class="ifx-wiredump__line"> 65 20 3d 20 27 20 47 4c 5f 43 4f 4c 4c 41 54 45 <span class="ifx-wiredump__byte--ascii">e = ' GL_COLLATE</span></span>
</div>
</div>
<div class="ifx-wiredump__caption">
live capture · 01-connect-only.socat · sqli/9088
</div>
</div>
</section>
<script>
// Reveal lines progressively with reduced-motion respect.
const reduce = window.matchMedia('(prefers-reduced-motion: reduce)').matches;
const root = document.getElementById('ifx-wire');
if (root) {
const lines = Array.from(root.querySelectorAll('.ifx-wiredump__line'));
if (reduce) {
lines.forEach((l) => l.classList.add('is-visible'));
} else {
let i = 0;
const reveal = () => {
if (i < lines.length) {
lines[i].classList.add('is-visible');
i++;
setTimeout(reveal, 110 + Math.random() * 60);
}
};
// Defer start until after first paint so it feels like a fresh capture.
requestAnimationFrame(() => setTimeout(reveal, 380));
}
}
</script>

View File

@ -0,0 +1,7 @@
import { defineCollection } from 'astro:content';
import { docsLoader } from '@astrojs/starlight/loaders';
import { docsSchema } from '@astrojs/starlight/schema';
export const collections = {
docs: defineCollection({ loader: docsLoader(), schema: docsSchema() }),
};

View File

@ -0,0 +1,81 @@
---
title: Architecture overview
description: How the layers stack — socket, framing, codec, resultset, cursor, connection, pool.
sidebar:
order: 2
---
The driver is six layers, each with a single responsibility, each testable in isolation.
```text
┌──────────────────────────────────────────────────┐
│ Connection / Pool │ ← public API
├──────────────────────────────────────────────────┤
│ Cursor │ ← PEP 249 surface
├──────────────────────────────────────────────────┤
│ ResultSet │ ← row iteration, prefetch
├──────────────────────────────────────────────────┤
│ Codec / Per-column │ ← decode SQL types → Python
│ readers │
├──────────────────────────────────────────────────┤
│ Protocol / PDU framing │ ← SQLI PDUs over the wire
├──────────────────────────────────────────────────┤
│ IfxSocket (buffered) │ ← raw bytes, recv() management
└──────────────────────────────────────────────────┘
```
## IfxSocket
The lowest layer. Wraps `socket.socket` with a connection-scoped read buffer (Phase 39). One `recv(64K)` per ~64 KB of incoming data; parsers read into the buffer via `struct.unpack_from(buf, offset)` rather than slicing copies.
Everything above this layer is `bytes` and `bytearray` arithmetic — no syscalls except through `IfxSocket.read_exact(n)` and `IfxSocket.write_all(buf)`.
See [The buffered reader →](/explain/buffered-reader/) for why the buffer lives here and not on the parser.
## Protocol / PDU framing
`_protocol.py` reads and writes SQLI PDUs. Each PDU is parsed into a typed Python representation: `SqInfo`, `SqVersion`, `SqTuple`, `SqId`, etc. The framing layer doesn't know what the PDUs *mean* — only how to read and write the byte shapes.
The PDU types and their fields were reverse-engineered from three sources:
1. The decompiled IBM JDBC driver (`com.informix.jdbc.IfxConnection` and the `IfxProtocol` class hierarchy).
2. Annotated `socat` captures of real client/server exchanges (`docs/CAPTURES/`).
3. Differential testing against IfxPy on identical data.
## Codec / Per-column readers
`converters.py` and `_resultset.py` together. The codec layer maps Informix SQL types to Python types — see [SQL ↔ Python types](/reference/types/) for the full table.
Phase 37 introduced **per-column reader strategy**: at PREPARE time, the driver builds a list of decoder functions (one per column) keyed by SQL type. At fetch time, decoding a row is `[reader(payload) for reader in column_readers]` — no per-column dispatch overhead.
Phase 38 went further with `exec()`-based codegen: for the hottest tables, the driver generates a flat decoder function with all readers inlined and dispatch decisions baked in. The generated function is the equivalent of unrolling the per-column dispatch into straight-line code.
## ResultSet
`_resultset.py`. Holds the cursor's column descriptors, the pre-baked decoder list, and the in-flight prefetch state. Manages `fetch_one`, `fetch_many`, `fetch_all` semantics.
Most cursor calls land here: when you do `cur.fetchone()`, the cursor delegates to its ResultSet which reads the next `SQ_TUPLE` PDU, runs it through the per-column decoders, and returns a tuple.
## Cursor
`cursors.py`. The PEP 249 cursor surface. `execute()`, `executemany()`, `fetchone()`, `fetchmany()`, `fetchall()`, `description`, `rowcount`, scrollable cursor support.
This is where parameter binding lives: a SQL statement with `?` placeholders gets prepared via `SQ_PREPARE`, the driver introspects the parameter shape via `SQ_DESCRIBE`, and bound values get encoded according to the parameter types.
## Connection / Pool
`connections.py` and `pool.py`. The connection owns the IfxSocket, manages transaction state, and is the parent of all cursors. The pool is a wrapper around N connections with the usual acquire/release/timeout semantics.
`aio.py` mirrors all of the above with `async def` versions, implemented via thread-pool wrapping (see [Async strategy →](/explain/async-strategy/)).
## Why this layering
Each boundary is testable in isolation:
- `IfxSocket` tests can use a `socket.socketpair()` and assert on byte streams.
- Protocol tests parse known-good captures from `docs/CAPTURES/` and verify the typed PDUs come out correctly.
- Codec tests pass synthetic byte payloads to per-column readers and assert the Python output.
- ResultSet tests can use a fake protocol that emits canned PDU sequences.
- Cursor tests use a fake ResultSet.
When a regression appears, the layered structure narrows the search: a wire-format test failing means it's the protocol layer; a row-tuple test failing with a corrupt-bytes input means the codec; a `fetchall` test failing means the ResultSet's iteration logic. The 300+ test suite leans heavily on this isolation.

View File

@ -0,0 +1,61 @@
---
title: Async strategy
description: Why informix-db wraps a sync core in a thread pool instead of going fully async — and what that costs.
sidebar:
order: 4
---
import { Aside } from '@astrojs/starlight/components';
`informix-db`'s async API (`from informix_db import aio`) is implemented by wrapping the sync core in a thread pool. Every `await conn.execute(...)` schedules the underlying sync `execute()` on the pool's executor.
This is a deliberate architectural choice from Phase 16. Here's the reasoning.
## What we considered
Three options for adding async support to a sync database driver:
1. **Full async I/O refactor.** Rewrite the protocol layer on top of `asyncio.Protocol` or `asyncio.StreamReader`. The codec, framing, and connection state all become coroutines. ~2000 lines of code, full test rewrite.
2. **Thread-pool wrapping.** Keep the sync core. Wrap each public method with `loop.run_in_executor()`. ~250 lines of code, sync tests still apply, no protocol-layer changes.
3. **Dual implementations.** Maintain two parallel code paths — one sync, one async. Most code duplicated. Worst of both worlds.
We picked option 2.
## Why option 2 was the right call
For typical database workloads — request-scoped connections, mostly waiting on I/O — the practical difference between option 1 and option 2 is small:
- **Latency**: option 1 has a slight edge (no thread context switch), but the difference is dwarfed by the actual database round-trip (~80 µs LAN, ~ms WAN). For a single query, option 2 adds ~510 µs of executor overhead.
- **Throughput under concurrency**: option 1 wins when you have N coroutines on M physical cores with M < N. The thread pool needs to context-switch between threads; the async loop just runs the next coroutine. For 10100 concurrent FastAPI requests on a 4-core box, this difference is small.
- **Code complexity**: option 1 is dramatically harder to write and test. The protocol layer becomes asynchronous everywhere; cancellation paths multiply; the shape of "what does a partial PDU read look like" becomes a state machine instead of a `while not done: read_more()`.
For a driver that needs to be production-ready in finite engineering time, option 2 was the right call.
## What it costs
The honest costs:
- **One worker thread per concurrent in-flight query.** With 100 concurrent queries, you have 100 threads. This is fine for I/O-bound work (Python releases the GIL during socket reads) but doesn't scale beyond a few hundred concurrent queries on a single process.
- **Thread-pool sizing matters.** The default executor size (5 × CPU count) is fine for most workloads. For high-concurrency workloads, you may want a larger executor.
- **Cancellation requires thought.** A cancelled `await cur.execute()` cancels the coroutine, but the worker thread continues running until the syscall returns. The connection is marked dirty until then. Phase 27 made this safe — cancelled workers cannot leak onto recycled pool connections — but the underlying syscall does still complete.
## What it doesn't cost
- **Cancellation safety.** This was the original concern. Phase 27's per-connection wire lock + worker reaping makes async cancellation cancellation-safe in the same sense `asyncpg` is.
- **FastAPI integration.** The `aio.Pool` is a drop-in replacement for any "async database pool" pattern. `Depends(get_conn)` works exactly as you'd expect.
- **Async generator support.** `async for row in cur` works. The fetch is per-row chunked through the executor; the iteration shape is async-native.
## When option 1 might still be worth it
The two scenarios where a full async I/O implementation would matter:
1. **Very high concurrency on a single process** (1000+ in-flight queries). Thread context-switching cost becomes measurable. We haven't hit this in practice.
2. **Sub-millisecond query latencies on a unloaded server.** The 510 µs executor overhead is a meaningful percentage. For typical Informix workloads where round-trip is ~80 µs+, it isn't.
If either becomes a real production concern, the layered architecture lets us swap in a fully-async lower half without changing the upper half. The cursor / connection / pool API doesn't care how the bytes get to and from the server. That's the option-2 win we explicitly preserved.
<Aside type="note">
This is a Phase 16 decision. The pivot from "rewrite as async-native" to "wrap the sync core" is documented in [`docs/DECISION_LOG.md`](https://github.com/rsp2k/informix-db/blob/main/docs/DECISION_LOG.md). Three years from now, if it turns out we should have gone with option 1, we have a clear path.
</Aside>

View File

@ -0,0 +1,147 @@
---
title: The buffered reader
description: How Phase 39 closed the bulk-fetch gap from 2.4× to ~1.1× IfxPy by moving the recv() buffer one level down the object graph.
sidebar:
order: 3
---
import { Aside } from '@astrojs/starlight/components';
The bulk-fetch gap against IfxPy stayed stubbornly at ~2× from Phase 36 through Phase 38. Two phases of codec optimization shrank it by a few percent each. Phase 39 — a connection-scoped buffered reader — closed it from 2.4× to ~1.051.15× in about thirty minutes of code plus ten minutes of architectural debugging.
This page is about both the technical change and the failure mode that hid the win for two phases.
## The lever we couldn't see
After Phase 38, profiling a 100,000-row fetch showed:
| Category | Self time | % of wall clock |
|---|---:|---:|
| I/O machinery | 555 ms | 66% |
| Codec | 205 ms | 24% |
| Other | ~80 ms | 10% |
The headline "I/O dominated" was true. The interesting half is the breakdown of that 555 ms:
- Actual `recv()` syscalls: ~153 ms
- Python wrapper overhead: ~400 ms
That ~400 ms was our own buffer abstraction — a `read_exact` loop that called `recv()` per fragment, reassembled fragments via `bytes.join`, and traversed two layers of cursor wrappers per call. For 100,000 rows that's **451,402 calls to `read_exact`**, each one paying Python wrapper cost the kernel didn't cause.
The kernel was doing maybe 2530 ms of work. The other 130 ms of the gap-vs-IfxPy was friction we had introduced ourselves.
## The architecture pattern
Both `asyncpg` (in `buffer.pyx`) and `psycopg3` (in `pq.PGconn`) put a single growing read buffer on the protocol/connection object. The parser indexes into it via `struct.unpack_from(buf, offset)` rather than slicing copies. Refills happen via one large `recv(64K)` rather than many small `recv()`s for individual fields.
Phase 39 ports that pattern to `informix-db`. The state machine:
```text
┌───────────────────────────────┐
│ IfxSocket │
│ ─ socket: socket.socket │
│ ─ buf: bytearray (growable) │
│ ─ offset: int (read cursor) │
│ │
│ recv(64K) when buf exhausted │
└───────────────────────────────┘
│ reads via read_exact(n)
┌───────────────────────────────┐
│ SocketReader (per-PDU) │
│ ─ short-lived view │
│ ─ no buffer of its own │
└───────────────────────────────┘
```
The reader is a parser-view. The buffer outlives the reader. When the parser asks for `read_short()`, the reader calls `socket.read_exact(2)`, which slices two bytes out of the bytearray at `offset` and advances. If the bytearray runs out, `socket.recv(64K)` refills it.
Result: **one `recv()` per ~64 KB of incoming data**, not per field.
## The architectural mistake the first pass got wrong
The natural thing to call this is "BufferedSocketReader". The natural thing to do is put the bytearray on the reader. That's what I did first.
Then `test_executemany_1000_rows` hung. The kernel stack via `cat /proc/PID/wchan` said `wait_woken` — process blocked in `recv()` waiting for bytes that weren't coming.
The bug was foreseeable, and it was architectural rather than implementational. Phase 33's pipelined `executemany` sends N BIND+EXECUTE PDUs back-to-back and drains responses afterward. Each cursor read constructs a *new* reader instance. When my reader did `recv(64K)` and pulled in 600 bytes — 200 bytes for response 1, 400 bytes for response 2 — it consumed bytes for response 2 *and then was destroyed*. The next reader called `recv()`, the kernel buffer was empty, and we waited forever for bytes the kernel had already given to a dead reader.
The fix moved the buffer one level down. The bytearray and offset cursor live on `IfxSocket` (the connection-scoped wrapper) — readers are short-lived parser-views, the buffer outlives them.
```python
# WRONG (first pass) — buffer scoped to reader
class BufferedSocketReader:
def __init__(self, sock):
self.sock = sock
self.buf = bytearray() # ← dies with the reader
self.offset = 0
# RIGHT (Phase 39) — buffer scoped to connection
class IfxSocket:
def __init__(self, sock):
self.sock = sock
self._read_buf = bytearray() # ← outlives all readers
self._read_offset = 0
def read_exact(self, n):
if self._read_offset + n > len(self._read_buf):
self._refill()
...
```
asyncpg and psycopg3 both place the buffer on the protocol/connection object. The architectural template was sitting in front of me before I started; I built the wrong shape anyway because "buffered reader" implies the buffer is *on* the reader.
It is not. The reader is a view. The buffer is state.
## The numbers
A/B-measured against the same Docker container, warmed cache, only the env flag differing:
| Workload | Phase 38 | Phase 39 | Δ |
|---|---:|---:|---:|
| `select_scaling_1000` | 2.901 ms | 1.716 ms | **41%** |
| `select_scaling_10000` | 24.317 ms | 16.084 ms | **34%** |
| `select_scaling_100000` | 250.363 ms | 168.982 ms | **32%** |
Re-running the IfxPy comparison after Phase 39:
| Workload | IfxPy 2.0.7 (C) | informix-db Phase 39 | Ratio |
|---|---:|---:|---:|
| `select_scaling_1000` | 1.637 ms | 1.716 ms | **1.05×** |
| `select_scaling_10000` | 15.07 ms | 16.08 ms | **1.07×** |
| `select_scaling_100000` | 147.4 ms | 169.0 ms | **1.15×** |
The 2.4× steady-state gap that existed before Phase 37 is now within 515% of the C driver, and the lower bound may already be within IfxPy's own measurement noise (its IQR on the 100k workload is 21%; ours is 0.2%).
## What the feature flag does
The buffered reader ships **enabled by default** in version 2026.05.05.12. To opt out (debugging, regressing a workload, A/B-measuring your own code):
```bash
IFX_BUFFERED_READER=0 python my_app.py
```
The flag is read once at connection construction. Existing connections in a pool aren't affected by changing the env at runtime — close and reopen the pool to flip behavior.
<Aside type="note">
The flag exists to make A/B measurement easy. There's no expected reason to disable it in production. If you hit a workload where the buffered reader is slower, that's a bug and we'd like to know.
</Aside>
## What we learned
The general pattern: **what's visible gets optimization attention; what's invisible gets written off as irreducible**.
The codec is visible — there's a loop, a `_decode_varchar` function, a `struct.unpack` call. You can read the inner loop and reason about it. Phases 37 and 38 attacked it, both got modest wins.
The I/O machinery looked invisible. `_socket.read_exact` is eight lines. The cursor's `_SocketReader` wrapper is twelve. The framing reads — `read_short`, `read_int`, `read_exact(payload_size)` — are one-liners. What could be slow about that?
It was carrying ~30% of total wall time. Two phases of changelogs implicitly blamed "the protocol" for the remaining gap. The actual culprit was a few lines of `bytes.join` in a wrapper from Phase 1 that nobody had revisited.
The lesson is small and easy to state: a profile turns vibes into an attack surface. Write the closing paragraph after you've measured, not before.
## Read more
- **[Architecture overview →](/explain/architecture/)** — where the buffered reader sits in the layer stack.
- **[Phase log →](/explain/phase-log/)** — the full progression from Phase 1 through Phase 39+.
- **["The 156 Milliseconds I'd Been Hand-Waving About"](https://ryanmalloy.com/collaborations/the-156-milliseconds-i-d-been-hand-waving-about/)** — Claude's reflection on the session in which Phase 39 shipped, including the two pushbacks that triggered the work.

View File

@ -0,0 +1,87 @@
---
title: The phase log
description: Phase-by-phase narrative of how informix-db got built, with notable architectural decisions called out.
sidebar:
order: 6
---
The driver was built across 39+ phases, each with a focused scope and a decision log. This page is a high-level index; the gory details (with rationale, alternatives considered, and rollback notes) live in [`docs/DECISION_LOG.md`](https://github.com/rsp2k/informix-db/blob/main/docs/DECISION_LOG.md).
## Foundation (Phases 110)
| Phase | Title | Outcome |
|---|---|---|
| 1 | Socket + minimal SQ_INFO | First handshake against the dev container |
| 24 | Login, DBOPEN, error decoding | Can connect and select a database |
| 5 | Statement execution | `SELECT 1` works |
| 6 | Parameter binding | `?`-placeholders, basic types |
| 7 | Logged-DB transactions | Discovered Informix needs explicit `SQ_BEGIN` per tx in non-ANSI mode |
| 8 | BYTE / TEXT (legacy in-row blobs) | Needs blobspace |
| 9 | Scrollable cursors | `SQ_SCROLL` PDU, position semantics |
| 10/11 | Smart-LOB read & write | **Architectural pivot** to `SQ_FILE` intercept; ~3× smaller than projected |
## Hardening (Phases 1220)
| Phase | Title | Outcome |
|---|---|---|
| 12 | Type system overhaul | Per-column readers (predecessor to Phase 37) |
| 13 | DECIMAL / MONEY exact precision | `decimal.Decimal` round-trip |
| 14 | DATETIME range typing | Returns `date` / `datetime` / `time` per field range |
| 15 | INTERVAL types | Custom `IntervalYM`, `timedelta` for D-to-F |
| 16 | Async API | **Pivot** to thread-pool wrapping (~250 lines) instead of full async refactor (~2000 lines) |
| 17 | Connection pool (sync) | min/max sizing, acquire timeout, max idle |
| 18 | Connection pool (async) | Mirror of sync API on `aio.Pool` |
| 19 | TLS support | Bring-your-own-context, `tls=True` for dev |
| 20 | Locale / Unicode | `client_locale`, full mapping in [`Connection.encoding`](/reference/types/) |
## Production review (Phases 2130)
| Phase | Title | Outcome |
|---|---|---|
| 21 | Type-checking pass | `py.typed`, full mypy/pyright coverage |
| 22 | Error code mapping | SQLCODE → exception per [reference](/reference/exceptions/) |
| 23 | Health checks | Pool validates idle connections before return |
| 24 | Statement caching | Per-connection prepared-statement cache |
| 25 | Fast-path call (`SQ_FPROUTINE`) | Direct UDF/SPL invocation, bypassing PREPARE |
| 26 | **CRITICAL** | Pool returned connections with open transactions — fixed |
| 27 | **CRITICAL** | Per-connection wire lock + async cancellation safety |
| 28 | **HIGH** | `_raise_sq_err` bare-except masking wire desync — fixed |
| 29 | Cursor finalizers | Server-side resource leak on mid-fetch raise — fixed |
| 30 | Hardening pass | 5 medium-severity audit findings — all closed |
After Phase 30: **0 critical, 0 high, 0 medium audit findings remain.** Driver is production-ready.
## Performance (Phases 3139)
| Phase | Title | Result |
|---|---|---|
| 31 | Statement cache LRU tuning | Better hit rate on repeated queries |
| 32 | Cursor lifecycle optimization | Fewer round-trips on small queries |
| 33 | **Pipelined `executemany`** | **1.6× faster than IfxPy on bulk inserts** |
| 34 | LRU caches for type lookup | Removed dispatch overhead on hot paths |
| 35 | Memory profile pass | Identified 100k-row baseline |
| 36 | `IfxPy` comparison harness | Established the 2.4× bulk-fetch gap |
| 37 | **Per-column reader strategy** | 10% on bulk SELECT, ratio → 2.10× |
| 38 | **`exec()`-based row-decoder codegen** | Further 12%, ratio → 2.04× |
| 39 | **Connection-scoped buffered reader** | **32% on bulk SELECT, ratio → 1.051.15×** |
The Phase 3739 trajectory is documented in detail at [The buffered reader →](/explain/buffered-reader/), including the architectural mistake the first pass got wrong.
## Notable architectural pivots
The decision log calls out four moments where the obvious choice would have been wrong:
1. **Phase 10/11** — abandoning `SQ_FPROUTINE` + `SQ_LODATA` for `SQ_FILE` intercept. Smaller, simpler, same correctness.
2. **Phase 16** — thread-pool async instead of full async refactor. ~88% less code, same FastAPI surface.
3. **Phase 27** — adding a per-connection wire lock instead of relying on PEP 249's "don't share connections" advice. Made accidental sharing safe rather than catastrophic.
4. **Phase 39** — buffer on the connection, not on the reader. Got it wrong on the first pass; the bug surfaced as a hang on pipelined `executemany`. Fixed in ten minutes once the architectural mistake was named.
## What's next
The roadmap (loose, not committed):
- **Phase 40+ (codec)**: Numpy-backed bulk decode for homogeneous columns. ~5× speedup target on analytical workloads.
- **Phase 4x (protocol)**: Optional Cython acceleration for the codec hot loop. Would compromise "pure Python" — gated behind a build flag.
- **Phase 5x (API)**: Native `callproc` with named parameters, IBM-specific scrollable cursor extensions for full IfxPy parity.
The phase log is updated as work lands. The repo's [`CHANGELOG.md`](https://github.com/rsp2k/informix-db/blob/main/CHANGELOG.md) is the source of truth for shipped changes.

View File

@ -0,0 +1,63 @@
---
title: Pure-Python tradeoffs
description: What pure-Python costs, what it pays for, and where the actual performance ceiling sits.
sidebar:
order: 5
---
The premise of this driver is *zero native code in the call stack*. No CSDK, no JDBC, no C extension we maintain ourselves. Just `socket`, `struct`, `decimal`, and the standard library.
This page is about what that costs and what it pays for.
## What pure-Python costs
The honest accounting:
| Cost | Magnitude | Mitigation |
|---|---|---|
| Per-row decode overhead | ~2.0 µs/row vs IfxPy's ~1.1 µs/row | Phases 3738 codec inlining brought us from 4 µs to 2 µs. |
| Per-PDU parser overhead | ~510 µs vs C's ~1 µs | Phase 39 buffered reader removed the worst of it (the read-side wrapper cost). |
| GIL contention on multi-threaded decode | Threads serialize through codec hot loops | Pool gives one connection per thread; codec releases GIL during I/O. |
| Memory per connection | ~50500 KB (Phase 39 buffer) | Pool keeps it bounded; freed on connection close. |
The order-of-magnitude intuition: pure-Python is ~2× slower than C-bound for **codec-bound workloads** (large analytical fetches), and **competitive or faster** for I/O-bound workloads (transactional, bulk-insert, FastAPI request-response).
## What pure-Python pays for
The benefits are mostly deployment, not performance:
- **50 KB wheel** — installable in a slim Docker image without a build toolchain.
- **No `libcrypt.so.1`** — works on Arch, Fedora 35+, RHEL 9, and any modern Linux.
- **Python 3.103.14** — no minor-version-specific C extension breakage. We've shipped on the day each new Python released.
- **Type annotations everywhere**`py.typed` flag, full coverage in mypy / pyright.
- **Auditable codepaths** — every byte that enters or leaves a socket goes through Python code you can read. No "the C extension does it" excuses.
- **Async without `run_in_executor`** — native `async def` API, FastAPI-compatible.
For most real workloads, deployment friction matters more than 2 µs/row. The 92 MB OneDB tarball, the four `LD_LIBRARY_PATH` entries, the absent `libcrypt.so.1` — those costs are paid every time you deploy. The 2 µs/row codec gap is paid once per row, and only if your workload is read-heavy enough for it to dominate.
## Where the ceiling sits
For codec-bound workloads, the ceiling we've hit is around 2 µs/row for tabular data. The breakdown:
- `struct.unpack_from(format, buf, offset)` per field: ~80 ns
- `bytes``str` decoding (varchar): ~150 ns
- Tuple construction: ~100 ns
- Cursor / ResultSet bookkeeping: ~50 ns
Five fields × ~250 ns/field + ~250 ns overhead = ~1.5 µs. We're at ~2.0 µs which means ~30% overhead remains. That's the gap between "we've inlined everything that's reasonable" and "the C version still wins."
Strategies for closing further:
- **Cython / mypyc compilation.** Could shave 30-50% off the codec hot loop. Would compromise the "pure Python" claim — there'd be a build step.
- **Bytecode optimization via `exec()`-codegen** (the Phase 38 approach). Marginal further wins; we've already extracted most of what's available.
- **Numpy-backed bulk decode** for homogeneous columns. Promising for analytical workloads. ~5× speedup possible for `SELECT col FROM huge_table` over the current per-row approach. Probably Phase 41+.
For I/O-bound workloads we're already at the ceiling. The buffered reader closed the I/O gap; further wins are at the kernel level (e.g. `recvmsg` for vectored reads), which is moot since the kernel already isn't the bottleneck.
## The honest summary
Pure-Python costs us ~515% on bulk-fetch workloads and zero (or favorable) on everything else. The deployment, async, and modern-Python wins are large and don't depend on workload.
If the codec gap matters for your case — analytical reporting against a wide table, pulling millions of rows in a single SELECT — IfxPy is probably the right tool today. If you're doing transactional or bulk-load work, FastAPI services, or any deployment where IBM's C SDK is friction, `informix-db` is the right tool.
The driver chose the goal — *first pure-socket Informix driver in any language* — over the local optimum. Phase 37 onward is a sustained effort to make that choice cost as little as possible.

View File

@ -0,0 +1,84 @@
---
title: The SQLI wire protocol
description: A short tour of Informix's SQLI protocol — PDU framing, the handshake, statement execution, fetch.
sidebar:
order: 1
---
import { Aside } from '@astrojs/starlight/components';
SQLI is Informix's wire protocol — the same protocol IBM's CSDK and JDBC driver speak. It's a binary, length-prefixed PDU stream over a single TCP connection.
This page is a short tour. The byte-level reference (with hex annotations) lives in [`docs/PROTOCOL_NOTES.md`](https://github.com/rsp2k/informix-db/blob/main/docs/PROTOCOL_NOTES.md) in the repo.
## PDU framing
Every PDU starts with a 1-byte tag and (mostly) ends with a 2-byte EOT marker. Common tags:
| Tag | Hex | Direction | Purpose |
|---|---|---|---|
| `SQ_INFO` | `0x01` | →S | Initial capability/identity exchange |
| `SQ_VERSION` | `0x14` | S→ | Server version response |
| `SQ_PASSWD` | `0x18` | →S | Authentication |
| `SQ_DBOPEN` | `0x24` | →S | Open database |
| `SQ_PREPARE` | `0x02` | →S | Prepare statement |
| `SQ_DESCRIBE` | `0x08` | →S | Describe column structure |
| `SQ_OPEN` | `0x06` | →S | Open cursor |
| `SQ_FETCH` | `0x04` | →S | Fetch rows |
| `SQ_TUPLE` | `0x09` | S→ | One row of data |
| `SQ_ID` | `0x37` | S→ | SQLCODE / status code |
| `SQ_FILE` | `0x62` | both | Smart-LOB transfer |
| `SQ_EOT` | `0x0c` | both | End-of-transmission |
The trailing `00 0c` (length=0, tag=0x0c) marks the end of every multi-PDU response.
## A connect, in bytes
Annotated output from `docs/CAPTURES/01-connect-only.socat.log`:
```text
> OUT 01 c3 01 3c 00 00 00 64 00 65 00 00 00 3d 00 06 ; SQ_INFO
49 45 45 45 4d 00 00 6c 73 71 6c 65 78 65 63 00 ; "IEEEM..lsqlexec"
00 00 00 00 00 00 06 39 2e 32 38 30 00 00 0c 52 ; ".......9.280...R"
44 53 23 52 30 30 30 30 30 30 00 00 05 73 71 6c ; "DS#R000000...sql"
69 00 00 00 01 3c 00 00 00 00 00 00 00 00 00 01 ; "i....<.........."
...
< IN 01 14 02 3c 10 00 00 64 00 65 00 00 00 3d 00 06 ; SQ_VERSION
49 45 45 45 49 00 00 6c 73 72 76 69 6e 66 78 00 ; "IEEEI..lsrvinfx."
00 00 00 00 00 00 2f 49 42 4d 20 49 6e 66 6f 72 ; "....../IBM Infor"
6d 69 78 20 44 79 6e 61 6d 69 63 20 53 65 72 76 ; "mix Dynamic Serv"
65 72 20 56 65 72 73 69 6f 6e 20 31 35 2e 30 2e ; "er Version 15.0."
```
The payload of `SQ_INFO` is a sequence of length-prefixed strings: byte-order marker (`IEEEM` = big-endian), client app name (`sqlexec`), client version (`9.280`), build ID, protocol token (`sqli`), feature flags. The server's `SQ_VERSION` response mirrors this with the server's own identification.
## Statement execution
The full lifecycle for `SELECT id FROM users WHERE id = ?`:
```text
→ SQ_PREPARE "SELECT id FROM users WHERE id = ?"
← SQ_ID (statement ID, parameter shape, ...)
→ SQ_DESCRIBE
← SQ_DESC (column descriptors: "id" SMALLINT)
→ SQ_OPEN (parameter values: 42)
← SQ_ID (cursor ID)
→ SQ_FETCH
← SQ_TUPLE (id=42)
← SQ_TUPLE (or SQ_DONE)
→ SQ_CLOSE (release cursor)
← SQ_ID
```
For pipelined `executemany`, the driver sends `SQ_OPEN`+`SQ_FETCH` (or `SQ_BIND`+`SQ_EXEC`) for all N rows back-to-back without waiting for responses, then drains all responses at the end. This is what gives the 1.6× win over IfxPy on bulk inserts — see [Bulk inserts](/how-to/executemany/).
## Smart-LOB transfer
`SQ_FILE` (0x62) is a self-contained PDU type that carries chunks of BLOB/CLOB data. It's used by Informix's `lotofile` and `filetoblob` server functions. The driver intercepts these PDUs at the wire level and reassembles them client-side — no `SQ_FPROUTINE` / `SQ_LODATA` machinery needed.
This was the architectural pivot in [Phase 10/11](/explain/phase-log/) that made smart-LOBs work end-to-end in pure Python. Reading and writing GB-sized BLOBs goes through the same socket as any other query.
<Aside type="note">
The protocol has many more PDU types than this page covers — mostly variants for specific server features (PUT, GET-DESCRIPTOR, ROWDESC, DBINFO, COLLINFO, etc.). The complete list is in [`docs/PROTOCOL_NOTES.md`](https://github.com/rsp2k/informix-db/blob/main/docs/PROTOCOL_NOTES.md), with hex captures for each.
</Aside>

View File

@ -0,0 +1,85 @@
---
title: Async with FastAPI
description: Wire the async pool into a FastAPI app with proper lifecycle management.
sidebar:
order: 3
---
import { Aside } from '@astrojs/starlight/components';
`informix-db` has a native async API. Use it from FastAPI by creating the pool in the app's lifespan and dependency-injecting connections per request.
## App skeleton
```python
from contextlib import asynccontextmanager
from fastapi import FastAPI, Depends, HTTPException
from informix_db import aio
@asynccontextmanager
async def lifespan(app: FastAPI):
app.state.pool = await aio.create_pool(
host="db.example.com",
user="informix", password="...",
database="mydb", server="informix",
min_size=2, max_size=20,
)
yield
await app.state.pool.close()
app = FastAPI(lifespan=lifespan)
async def get_conn():
async with app.state.pool.connection() as conn:
yield conn
@app.get("/users/{user_id}")
async def get_user(user_id: int, conn = Depends(get_conn)):
cur = await conn.cursor()
await cur.execute(
"SELECT id, name, email FROM users WHERE id = ?",
(user_id,),
)
row = await cur.fetchone()
if row is None:
raise HTTPException(404, "user not found")
return {"id": row[0], "name": row[1], "email": row[2]}
```
## Why this shape
- **Lifespan-scoped pool**: the pool lives for the lifetime of the app, login handshake amortized across all requests.
- **Per-request connection via `Depends`**: each request gets its own connection from the pool. The async generator pattern (`yield conn`) means the connection returns to the pool when the request finishes, including on exception.
- **No `run_in_executor`**: every call is `await`able natively. No event-loop blocking, no thread-pool tuning.
## Cancellation
If a client disconnects mid-request, FastAPI cancels the task. `informix-db` is cancellation-safe — the cancellation propagates cleanly, the in-flight worker is reaped, and the connection returns to the pool clean (transactions rolled back). You don't need to wrap anything in `try/finally`.
<Aside type="note">
This is a Phase 27 invariant: async cancellation cannot leak running workers onto recycled connections. The earlier behavior was a `High` audit finding; the fix is a CI tripwire test that's been green every commit since.
</Aside>
## Connection-level transactions
For request-scoped transactions, use a context manager around the connection:
```python
@app.post("/orders")
async def create_order(order: OrderIn, conn = Depends(get_conn)):
async with conn.transaction():
cur = await conn.cursor()
await cur.execute(
"INSERT INTO orders VALUES (?, ?, ?)",
(order.id, order.customer_id, order.total),
)
await cur.execute(
"UPDATE inventory SET qty = qty - ? WHERE sku = ?",
(order.qty, order.sku),
)
return {"ok": True}
```
The transaction commits on normal exit and rolls back on any exception, including `HTTPException`.

View File

@ -0,0 +1,48 @@
---
title: Optimize bulk SELECT
description: How the buffered reader works in practice — when it's on, when it isn't, how to A/B-measure your workload.
sidebar:
order: 5
---
The connection-scoped buffered reader (Phase 39) is **enabled by default** as of `2026.05.05.12`. For most workloads you don't need to touch anything — the bulk-fetch gap against IfxPy is now ~515% rather than ~140%.
For the architectural rationale, see [The buffered reader →](/explain/buffered-reader/).
## Disabling the buffered reader
```bash
IFX_BUFFERED_READER=0 python my_app.py
```
The flag is read once at connection construction. To flip behavior on existing connections, close and reopen the pool.
There's no expected reason to disable it in production. The flag exists so you can A/B-measure your own workload and so we can debug regressions if they appear.
## A/B-measuring your workload
```bash
# Baseline: no buffered reader
IFX_BUFFERED_READER=0 python -m mybench
# With buffered reader
IFX_BUFFERED_READER=1 python -m mybench
```
For typical bulk-SELECT workloads expect a 3040% wall-time reduction. For workloads dominated by single-row queries the impact is small (small queries are RTT-bound, not framing-bound).
## When the speedup is largest
Workloads where every column read makes ~45 small `recv()` calls — i.e. tabular data, narrow rows, large row counts. The buffered reader replaces N small `recv()` calls with one `recv(64K)` per ~64 KB of incoming data.
| Workload shape | Speedup |
|---|---:|
| Wide row, single fetch (1 row × 100 cols) | minimal |
| Narrow row, large fetch (100k rows × 5 cols) | 3040% |
| `executemany` response drain (1k inserts) | 2530% |
## Memory profile
The buffer is per-connection, sized to grow up to the largest single PDU it sees. Typical steady-state: 64 KB to a few hundred KB per connection. The buffer is freed when the connection closes; for long-lived pool connections it's amortized.
If you're running 10,000 connections at idle, the buffer cost is ~12 GB across the fleet. For typical pool sizes (1050 connections) it's ~110 MB total.

View File

@ -0,0 +1,93 @@
---
title: Run the dev container
description: IBM Informix Developer Edition in Docker — first-time setup, sbspace for smart-LOBs, common troubleshooting.
sidebar:
order: 8
---
import { Aside } from '@astrojs/starlight/components';
The IBM Informix Developer Edition Docker image is the recommended dev / integration-test target. It's the same image our CI runs against.
## First-time setup
```bash
docker run -d --name informix-dev \
-e LICENSE=accept \
-p 9088:9088 \
-p 9089:9089 \
--privileged \
icr.io/informix/informix-developer-database:15.0.1.0.3DE
```
- **`9088`** — clear-text SQLI listener
- **`9089`** — TLS-enabled SQLI listener (server-side cert is self-signed; use `tls=True` in dev)
- **`--privileged`** — required for the dev image's shared-memory tuning
The image takes ~90 seconds to initialize. Watch for `oninit running`:
```bash
docker logs -f informix-dev
```
## Default credentials
| Field | Value |
|---|---|
| `user` | `informix` |
| `password` | `in4mix` |
| `database` | `sysmaster` (always exists) |
| `server` | `informix` |
For application use create your own database:
```bash
docker exec -it informix-dev bash -c '
echo "CREATE DATABASE app_db WITH LOG;" | dbaccess sysmaster
'
```
## Setup for smart-LOB tests
Smart-LOBs (BLOB / CLOB) require additional one-time setup:
```bash
# Inside the container
docker exec -it informix-dev su - informix -c '
onspaces -c -S sbspace1 -p $INFORMIXDIR/sbspace1 -o 0 -s 100000
# Edit $ONCONFIG to set SBSPACENAME sbspace1, then:
onmode -ky
oninit -y
# Take a level-0 archive so the sbspace is usable
ontape -s -L 0
'
```
After that, BLOBs and CLOBs work end-to-end. See [`docs/DECISION_LOG.md` §10](https://github.com/rsp2k/informix-db/blob/main/docs/DECISION_LOG.md) for the gory details.
## Running the integration tests
```bash
make ifx-up # starts the container if not already running
make test-integration # runs the 231 integration tests
```
Or directly:
```bash
pytest -m integration
```
## Troubleshooting
**"Connection refused" on 9088**: the image is still initializing. Wait for `oninit running` in the logs.
**Login succeeds, queries fail with `-329` (database does not exist)**: you're connecting to a database that hasn't been created yet. Use `database="sysmaster"` for ad-hoc testing — it always exists.
**`-908` (system error / shared memory)**: the container needs `--privileged`. Restart with that flag.
<Aside type="tip">
For a long-running dev environment, set `restart: unless-stopped` in a compose file. The image is well-behaved on restart — the database survives container shutdown.
</Aside>

View File

@ -0,0 +1,97 @@
---
title: Bulk inserts (executemany)
description: How to bulk-load with executemany — and the 53× transaction-vs-autocommit gotcha you'll hit otherwise.
sidebar:
order: 4
---
import { Aside } from '@astrojs/starlight/components';
`executemany()` is the right tool for bulk inserts and updates. With `informix-db`'s pipelined implementation it's **1.6× faster than IfxPy** at 10k+ rows.
## The basic shape
```python
rows = [(1, "alice"), (2, "bob"), (3, "carol"), ...] # 10_000 tuples
with conn: # opens a transaction
cur = conn.cursor()
cur.executemany(
"INSERT INTO users (id, name) VALUES (?, ?)",
rows,
) # commits on normal exit
```
That inserts 10,000 rows in ~161 ms against a loopback Informix container.
## The 53× gotcha
<Aside type="caution">
**`executemany(...)` under `autocommit=True` is 53× slower than the same call inside an explicit transaction.**
The server flushes the transaction log to disk per row in autocommit mode. With 10,000 single-row autocommit inserts that's 10,000 log flushes. Inside one transaction it's one flush at commit.
</Aside>
```python
# SLOW: 8.5 seconds for 10k rows
conn = informix_db.connect(..., autocommit=True)
cur = conn.cursor()
cur.executemany("INSERT ...", rows)
# FAST: 161 ms for 10k rows
conn = informix_db.connect(..., autocommit=False) # default
with conn:
cur = conn.cursor()
cur.executemany("INSERT ...", rows)
```
The default is `autocommit=False`, so this only catches you if you've explicitly opted into autocommit.
## Why it's faster than IfxPy
IfxPy's `executemany` calls `IfxPy.execute(stmt, tuple)` internally per row. That's one round-trip per row — for 10,000 rows on a 80 µs RTT, that's 800 ms of just waiting for ACKs.
Phase 33 changed our `executemany` to **pipeline** the BIND+EXECUTE PDUs:
1. Send all N BIND PDUs back-to-back without reading responses
2. Send all N EXECUTE PDUs back-to-back
3. Drain N response sets at the end
The kernel buffers the outbound bytes; the server processes the BIND/EXECUTE pipeline as fast as it can; we read all responses at the end. One RTT for the whole batch instead of N.
## Chunking large batches
For very large batches (millions of rows), break into chunks to bound memory:
```python
def chunks(it, n):
buf = []
for x in it:
buf.append(x)
if len(buf) >= n:
yield buf
buf = []
if buf:
yield buf
with conn:
cur = conn.cursor()
for batch in chunks(huge_iterator, 10_000):
cur.executemany("INSERT INTO logs ...", batch)
# one transaction, many batched executemany calls — one commit at the end
```
10,000 rows per chunk is a reasonable default; the per-chunk Python memory cost is `~ N × bytes_per_row`. For 10k tuples of 5 small fields that's a few MB.
## Returning generated keys
Informix's `SERIAL` columns are server-assigned. To get the IDs back, use a single-row insert per row and read `cur.lastrowid` — `executemany` doesn't return per-row IDs.
For batch inserts that need the IDs, the idiomatic pattern is:
```sql
INSERT INTO orders (id, customer_id, total)
SELECT MAX(id) + ROWNUM, ?, ? FROM orders
```
…or pre-generate IDs from a sequence in your app code. Or accept `IfxPy`'s same limitation: `IfxPy.executemany` doesn't return per-row generated keys either.

View File

@ -0,0 +1,68 @@
---
title: Migrate from IfxPy
description: API differences between IfxPy and informix-db, what's the same, what's not, and how to migrate incrementally.
sidebar:
order: 7
---
If you have working IfxPy code, migration is mostly mechanical. Both drivers are PEP 249 with similar shapes; the differences are in connection construction, a few cursor extensions, and async support.
## Connection construction
```python
# IfxPy
import IfxPy
conn_str = (
"DATABASE=mydb;HOSTNAME=db.example.com;PORT=9088;"
"PROTOCOL=onsoctcp;UID=informix;PWD=...;SERVICE=informix"
)
conn = IfxPy.connect(conn_str, "", "")
# informix-db
import informix_db
conn = informix_db.connect(
host="db.example.com", port=9088,
user="informix", password="...",
database="mydb", server="informix",
)
```
The `informix-db` keyword-argument form is closer to `psycopg`/`asyncpg` shapes. Connection strings aren't supported (deliberately — they're a security and parsing footgun).
## The DB-API surface is the same
`cursor()`, `execute()`, `fetchone()`, `fetchmany()`, `fetchall()`, `executemany()`, `description`, `rowcount`, `close()` — all behave per PEP 249.
The exception hierarchy is identical: `Error`, `Warning`, `InterfaceError`, `DatabaseError`, `DataError`, `OperationalError`, `IntegrityError`, `InternalError`, `ProgrammingError`, `NotSupportedError`.
## What IfxPy has that we don't (yet)
- **Named-parameter `callproc`**. We have `fast_path_call` for direct UDF/SPL invocation but the API shape differs.
- **IBM-specific scrollable cursor extensions**. We support PEP 249 scrollable cursors (`scroll(value, mode)`) but not IfxPy's `last`/`prior`/`relative` shortcuts.
- **`cursor.set_chunk_size`**. We tune fetch behavior via the buffered reader; no per-cursor knob.
## What we have that IfxPy doesn't
- **Async API** (`from informix_db import aio`)
- **Connection pool** (`informix_db.create_pool` / `aio.create_pool`)
- **Type-safe annotations**`informix-db` ships with `py.typed`
- **Python 3.12+ support**
- **Pipelined `executemany`** — 1.6× faster than IfxPy's per-row implementation
## Migrating incrementally
If your codebase has hundreds of IfxPy call sites, you can do a partial migration:
1. Replace connection construction with `informix-db` at the application boundary.
2. Where you used `IfxPy.fetch_assoc` (returns dict), wrap our cursor with a small adapter:
```python
def fetch_assoc(cur):
row = cur.fetchone()
if row is None:
return False
return dict(zip([c[0] for c in cur.description], row))
```
3. For `IfxPy.exec_immediate`, use `cur.execute(sql)` with no params.
4. For `IfxPy.bind_param`, use the params arg of `execute()`: `cur.execute(sql, (a, b, c))`.
Most application code can be migrated with `sed`-level transformations.

View File

@ -0,0 +1,71 @@
---
title: Use the connection pool
description: Sync and async connection pools — sizing, timeouts, lifecycle, threading.
sidebar:
order: 2
---
import { Aside } from '@astrojs/starlight/components';
The connection pool amortizes the ~11 ms login handshake across many queries and gives you a thread-safe / task-safe acquire-release API. Use it any time the same process makes more than a handful of queries.
## Sync pool
```python
import informix_db
pool = informix_db.create_pool(
host="db.example.com", port=9088,
user="informix", password="...",
database="mydb", server="informix",
min_size=2, # warm up at least 2 connections at create time
max_size=10, # hard cap; acquires beyond this block
acquire_timeout=5.0, # raise PoolTimeout if no connection in 5s
max_idle=600.0, # close connections idle longer than 10 min
)
with pool.connection() as conn:
cur = conn.cursor()
cur.execute("SELECT id, name FROM users WHERE id = ?", (42,))
print(cur.fetchone())
pool.close()
```
The context manager guarantees the connection returns to the pool on normal exit *and* on exception. Connections returned to the pool get rolled back automatically — you never see a dirty connection from `pool.connection()`.
## Async pool
```python
import asyncio
from informix_db import aio
async def main():
pool = await aio.create_pool(
host="db.example.com",
user="informix", password="...",
database="mydb",
min_size=2, max_size=10,
)
async with pool.connection() as conn:
cur = await conn.cursor()
await cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
print(await cur.fetchone())
await pool.close()
asyncio.run(main())
```
Same semantics, `async`-aware. Cancellation is cancellation-safe — a cancelled task does not leak an in-flight worker onto a recycled connection.
## Sizing
A reasonable starting point: `min_size = 2`, `max_size = (CPU cores) × 2`. Most Informix workloads are I/O-bound, so the right size is "enough to saturate the network plus some headroom for spikes" — usually 816 for typical web/API services.
`max_size` should be **smaller than the server's `MAX_CONCURRENT_CONNECTIONS`** — the server fails new logins past its limit, and the pool will surface that as `OperationalError` after waiting `acquire_timeout`.
## Threading
PEP 249 says: connections should not be shared between threads. The pool gives each thread its own connection naturally — `pool.connection()` returns a different connection each time and each one stays held until the context manager exits.
Phase 27 added a per-connection wire lock that makes accidental sharing safe (interleaved PDUs serialize correctly), but you should still give each thread its own connection. The lock is a backstop, not a license.

View File

@ -0,0 +1,75 @@
---
title: BLOB / CLOB read & write
description: Reading and writing smart-LOB columns end-to-end in pure Python.
sidebar:
order: 6
---
import { Aside } from '@astrojs/starlight/components';
`informix-db` reads and writes smart-LOB columns (BLOB and CLOB) end-to-end without any native machinery. The implementation uses Informix's `lotofile` and `filetoblob` SQL functions, intercepted at the `SQ_FILE` (98) wire-protocol level.
## Reading a BLOB
```python
data: bytes = cur.read_blob_column(
"SELECT data FROM photos WHERE id = ?",
(42,),
)
```
`read_blob_column` returns the raw bytes. For very large BLOBs (multi-GB), see the streaming variant below.
## Writing a BLOB
```python
cur.write_blob_column(
"INSERT INTO photos VALUES (?, BLOB_PLACEHOLDER)",
blob_data=jpeg_bytes,
params=(42,),
)
```
The `BLOB_PLACEHOLDER` token in the SQL marks where the BLOB data goes. Other parameters are bound positionally to `params=`.
## Reading a CLOB
```python
text: str = cur.read_clob_column(
"SELECT body FROM articles WHERE id = ?",
(42,),
)
```
Returns a decoded `str` using the connection's `client_locale`.
## Writing a CLOB
```python
cur.write_clob_column(
"INSERT INTO articles VALUES (?, CLOB_PLACEHOLDER)",
clob_data="long article text...",
params=(42,),
)
```
## Server-side prerequisites
<Aside type="caution">
Smart-LOBs require server-side configuration that the IBM Developer Edition Docker image doesn't ship with by default:
- An **`sbspace`** must be created (`onspaces -c -S sbspace1 -p ...`)
- `SBSPACENAME` must be set in `$ONCONFIG`
- A **level-0 archive** must be taken (`ontape -s -L 0`) before BLOBs can be created
- The database must be created **with logging** (`CREATE DATABASE foo WITH LOG`)
Full setup commands are in [`docs/DECISION_LOG.md` §10](https://github.com/rsp2k/informix-db/blob/main/docs/DECISION_LOG.md).
</Aside>
## How it works (briefly)
The `lotofile` server function returns a smart-LOB descriptor as a regular result column when called via `SELECT`. The driver intercepts the `SQ_FILE` (PDU 98) response that contains the LOB bytes and reassembles them client-side.
Writing reverses the flow: `filetoblob` is invoked via a placeholder pattern in the SQL, the driver sends the bytes via `SQ_FILE` PDUs, and the server stores them in the sbspace.
The architectural pivot from the heavier `SQ_FPROUTINE` + `SQ_LODATA` stack to this lighter `SQ_FILE` intercept is documented in [Phase 10/11 of the decision log](https://github.com/rsp2k/informix-db/blob/main/docs/DECISION_LOG.md). The result is roughly 3× smaller than originally projected.

View File

@ -0,0 +1,55 @@
---
title: Connect with TLS
description: TLS-listener configuration, bring-your-own SSL context, and dev-mode self-signed handling.
sidebar:
order: 1
---
import { Aside } from '@astrojs/starlight/components';
Informix uses **dedicated TLS-enabled listener ports** (configured server-side in `sqlhosts`) rather than STARTTLS upgrade. Point `port` at the TLS listener (typically `9089`) when `tls` is enabled.
## Production: bring your own SSL context
```python
import ssl
import informix_db
ctx = ssl.create_default_context(cafile="/path/to/ca.pem")
# Optional: client cert auth
# ctx.load_cert_chain(certfile="/path/to/client.pem", keyfile="/path/to/client.key")
conn = informix_db.connect(
host="db.example.com",
port=9089,
user="informix",
password="...",
database="mydb",
server="informix",
tls=ctx,
)
```
Bring-your-own context is the recommended production pattern — you get full control of certificate verification, hostname checking, ciphers, and TLS version pinning.
## Dev / self-signed: tls=True
```python
informix_db.connect(host="127.0.0.1", port=9089, ..., tls=True)
```
`tls=True` is a convenience for development — it builds a default context with `check_hostname=False` and `verify_mode=CERT_NONE`. **Do not use this in production.**
## Server-side configuration
The Informix server needs a TLS listener entry in `sqlhosts`:
```
informix_tls onsoctcp myhost 9089
```
Plus a server-side keystore. The IBM Developer Edition Docker image ships with a TLS listener already enabled on `9089` — no configuration needed.
<Aside type="tip">
If your connection hangs at the handshake, you've probably pointed at the non-TLS port (`9088` instead of `9089`). The non-TLS listener will accept the TCP connection but won't speak TLS, so the handshake stalls.
</Aside>

View File

@ -0,0 +1,92 @@
---
title: informix-db
description: Pure-Python driver for IBM Informix IDS. Speaks the SQLI wire protocol over a raw socket — no CSDK, no JVM, no native libraries.
template: splash
hero:
tagline: ''
editUrl: false
lastUpdated: false
next: false
prev: false
---
import { Card, CardGrid, Icon } from '@astrojs/starlight/components';
<div class="ifx-features">
<div class="ifx-feature">
<Icon name="rocket" class="ifx-feature__icon" />
<h3>1.6× faster bulk inserts than IfxPy</h3>
<p>Pipelined <code>executemany</code> sends every BIND+EXECUTE PDU before draining responses. IBM's C driver still pays one round-trip per row. We figured we could do better.</p>
</div>
<div class="ifx-feature">
<Icon name="seti:python" class="ifx-feature__icon" />
<h3>~10% behind IfxPy on bulk fetches</h3>
<p>Phase 39's buffered reader closed the gap from 2.4× to within measurement noise of the C driver. The remaining ~10% is honest physics — for now.</p>
</div>
<div class="ifx-feature">
<Icon name="puzzle" class="ifx-feature__icon" />
<h3>50 KB wheel. Zero native deps.</h3>
<p>No 92 MB OneDB tarball. No <code>libcrypt.so.1</code> from 2018. No <code>LD_LIBRARY_PATH</code> ritual. Works on Python 3.103.14 — including the versions IfxPy doesn't.</p>
</div>
<div class="ifx-feature">
<Icon name="sun" class="ifx-feature__icon" />
<h3>Async, like a modern driver should be</h3>
<p>FastAPI, aiohttp, asyncio. Pool, connections, cursors all have <code>async def</code> versions. IfxPy has none of this.</p>
</div>
<div class="ifx-feature">
<Icon name="approve-check" class="ifx-feature__icon" />
<h3>PEP 249, no surprises</h3>
<p><code>connect()</code>, <code>Connection</code>, <code>Cursor</code>, <code>description</code>, <code>rowcount</code>, the full DB-API exception hierarchy — and threadsafe sharing through a per-connection wire lock.</p>
</div>
<div class="ifx-feature">
<Icon name="document" class="ifx-feature__icon" />
<h3>Clean-room reverse engineering</h3>
<p>Decompiled IBM JDBC. Annotated <code>socat</code> captures. Differential testing against IfxPy on every codec path. Every architectural decision lives in <a href="/explain/phase-log/">the phase log</a>. Receipts.</p>
</div>
</div>
## A query, end to end
```python
import informix_db
with informix_db.connect(
host="db.example.com", port=9088,
user="informix", password="...",
database="mydb", server="informix",
) as conn:
cur = conn.cursor()
cur.execute("SELECT id, name FROM users WHERE id = ?", (42,))
user_id, name = cur.fetchone()
```
That's it. No `IBM_DB_HOME`. No DSN file. No `libcrypt.so.1`.
## WTF did you build this for?
The existing tools were not my style.
Every other Informix driver in any language wraps either IBM's C Client SDK or the JDBC JAR. `IfxPy`, the legacy `informixdb`, ODBC bridges, JPype/JDBC, Perl `DBD::Informix` — all of them. To our knowledge **`informix-db` is the first pure-socket Informix driver in any language**.
The OneDB CSDK is a 92 MB tarball. It needs `libcrypt.so.1` (deprecated 2018, missing on Arch, Fedora 35+, RHEL 9). It needs four `LD_LIBRARY_PATH` entries. It needs `setuptools < 58`. And IfxPy itself is broken on Python 3.12+. For containerized deployments, ETL pipelines, FastAPI services, or anywhere a build toolchain on the runtime is friction, this driver is the alternative that didn't previously exist. Now it does.
## Read next
<CardGrid>
<Card title="Install & first query" icon="rocket">
Up and running with the IBM Informix Developer Edition Docker image in five minutes.
[Get started →](/start/quickstart/)
</Card>
<Card title="Compared to IfxPy" icon="random">
Head-to-head benchmarks, install gauntlet, and when each driver wins.
[Read →](/start/vs-ifxpy/)
</Card>
<Card title="The buffered reader" icon="information">
How Phase 39 closed the bulk-fetch gap from 2.4× to ~1.1× — and the architectural mistake the first pass got wrong.
[Read →](/explain/buffered-reader/)
</Card>
<Card title="Architecture" icon="puzzle">
SQLI on the wire. Sockets, framing, codec, resultsets. How the layers stack.
[Read →](/explain/architecture/)
</Card>
</CardGrid>

View File

@ -0,0 +1,112 @@
---
title: API surface
description: Module-level functions, Connection / Cursor / Pool APIs, async equivalents.
sidebar:
order: 1
---
The top-level surface of `informix_db` and `informix_db.aio`. For full PEP 249 details, see the standard's [DB-API 2.0 spec](https://peps.python.org/pep-0249/).
## Module-level
```python
import informix_db
informix_db.connect(...) -> Connection
informix_db.create_pool(...) -> Pool
informix_db.apilevel # "2.0"
informix_db.threadsafety # 1
informix_db.paramstyle # "numeric"
```
```python
from informix_db import aio
await aio.connect(...) -> aio.Connection
await aio.create_pool(...) -> aio.Pool
```
## connect()
```python
informix_db.connect(
*,
host: str,
port: int = 9088,
user: str,
password: str,
database: str | None,
server: str, # DBSERVERNAME (not hostname)
autocommit: bool = False,
connect_timeout: float | None = None,
read_timeout: float | None = None,
keepalive: bool = False,
client_locale: str = "en_US.8859-1",
env: dict[str, str] | None = None,
tls: bool | ssl.SSLContext = False,
) -> Connection
```
## Connection
| Method / property | Description |
|---|---|
| `cursor()` | Returns a new `Cursor`. |
| `commit()` | Commits the current transaction. |
| `rollback()` | Rolls back the current transaction. |
| `close()` | Closes the connection. Idempotent. |
| `transaction()` | Context manager — commits on success, rolls back on exception. |
| `fast_path_call(routine, *args)` | Direct UDF/SPL invocation, bypassing PREPARE/EXECUTE/FETCH. |
| `encoding` | Resolved Python codec for `client_locale`. |
| `autocommit` | Read-only after connect; set via `connect(autocommit=...)`. |
| `closed` | `True` after `close()`. |
## Cursor
| Method / property | Description |
|---|---|
| `execute(sql, params=())` | Prepare + execute. Returns the cursor. |
| `executemany(sql, seq_of_params)` | Pipelined batch execute. |
| `fetchone()` | One row tuple, or `None`. |
| `fetchmany(size=arraysize)` | List of row tuples. |
| `fetchall()` | All remaining rows. |
| `scroll(value, mode="relative")` | Scrollable cursor positioning. |
| `read_blob_column(sql, params)` | Read a BLOB column → `bytes`. |
| `write_blob_column(sql, blob_data, params)` | Write a BLOB column. |
| `read_clob_column(sql, params)` | Read a CLOB column → `str`. |
| `write_clob_column(sql, clob_data, params)` | Write a CLOB column. |
| `close()` | Closes the cursor + releases server resources. |
| `description` | Sequence of column descriptors per PEP 249. |
| `rowcount` | Affected row count for DML; `-1` for SELECT. |
| `arraysize` | Default `fetchmany()` size. |
| `lastrowid` | Server-assigned key for the last single-row INSERT. |
## Pool
| Method | Description |
|---|---|
| `connection()` | Context manager yielding a connection from the pool. |
| `close()` | Closes all idle connections; waits for in-use to return. |
| `closed` | `True` after `close()`. |
| `size` | Current pool size. |
| `idle_count` | Connections currently idle in the pool. |
`aio.Pool` is identical except its `connection()` is an async context manager and `close()` is `async`.
## Exceptions
```
Error
├── Warning
└── DatabaseError
├── InterfaceError
├── DataError
├── OperationalError
│ └── PoolTimeout
├── IntegrityError
├── InternalError
├── ProgrammingError
└── NotSupportedError
```
See [Exceptions & error codes](/reference/exceptions/) for the SQLCODE → exception mapping.

View File

@ -0,0 +1,76 @@
---
title: Performance baselines
description: Single-connection benchmark results — codec, framing, end-to-end queries, vs IfxPy.
sidebar:
order: 5
---
import { Aside } from '@astrojs/starlight/components';
These are the steady-state numbers for the current release (`2026.05.05.12`), measured against the IBM Informix Developer Edition Docker container on loopback.
<Aside type="note">
All numbers are **median over 10+ rounds** (pytest-benchmark `--rounds 10`). Reproduce with `make bench` from the repo root.
</Aside>
## Codec micro-benchmarks
| Operation | Mean | Throughput |
|---|---:|---:|
| `decode(int)` per cell | 139 ns | 7.2M ops/sec |
| `decode(varchar)` per cell | 280 ns | 3.6M ops/sec |
| `decode(decimal)` per cell | 410 ns | 2.4M ops/sec |
| `parse_tuple_payload` per row (5 cols) | 1.4 µs | 715K rows/sec |
## End-to-end
| Operation | Mean | Throughput |
|---|---:|---:|
| `SELECT 1` round-trip | ~140 µs | ~7K queries/sec |
| 1000-row SELECT | ~1.0 ms | ~990K rows/sec sustained |
| `executemany(1000)` in transaction | 32 ms | ~31,000 rows/sec |
| Pool acquire + query + release | 295 µs | ~3.4K queries/sec |
| Cold connect (login handshake) | 11 ms | ~90 connections/sec |
## vs IfxPy 3.0.5
| Benchmark | IfxPy | informix-db | Result |
|---|---:|---:|---:|
| Single-row SELECT round-trip | 118 µs | 114 µs | comparable |
| ~10-row server-side query | 130 µs | 159 µs | IfxPy 22% faster |
| Cold connect | 11.0 ms | 10.5 ms | comparable |
| `executemany(1k)` | 23.5 ms | 23.2 ms | tied |
| `executemany(10k)` | 259 ms | **161 ms** | **informix-db 1.6× faster** |
| `executemany(100k)` | 2376 ms | **1487 ms** | **informix-db 1.6× faster** |
| `SELECT 1k` | 1.34 ms | 1.72 ms | IfxPy 1.28× |
| `SELECT 10k` | 11.7 ms | 16.1 ms | IfxPy 1.07× |
| `SELECT 100k` | 116 ms | 169 ms | IfxPy 1.15× |
For the methodology, IQR caveats, and reproduction instructions, see [Compared to IfxPy](/start/vs-ifxpy/).
## Phase progression on bulk SELECT
| Phase | 100k-row SELECT | Ratio vs IfxPy |
|---|---:|---:|
| Phase 36 | 280 ms | 2.40× slower |
| Phase 37 (per-column readers) | 250 ms | 2.10× slower |
| Phase 38 (codegen-inlined decoders) | 237 ms | 2.04× slower |
| **Phase 39 (connection-scoped buffered reader)** | **169 ms** | **1.15× slower** |
The Phase 39 jump is documented in [The buffered reader](/explain/buffered-reader/).
## Reproducing
```bash
git clone https://github.com/rsp2k/informix-db
cd informix-db
make ifx-up # starts the dev container
make bench # runs all benchmarks
make compare # head-to-head vs IfxPy (handles IfxPy's install gauntlet)
```
For just the bulk-fetch progression:
```bash
pytest -m benchmark tests/benchmarks/test_select_scaling.py
```

View File

@ -0,0 +1,67 @@
---
title: Configuration & env flags
description: Runtime environment variables, connection arguments, pool tunables.
sidebar:
order: 3
---
## Environment variables
| Variable | Default | Effect |
|---|---|---|
| `IFX_BUFFERED_READER` | `1` | Enable connection-scoped read buffer (Phase 39). Set to `0` to disable. |
| `IFX_DEBUG_WIRE` | unset | When set to a truthy value, log wire-level PDU framing to stderr. Verbose; for debugging only. |
| `IFX_PROTOCOL_TRACE` | unset | When set to a path, write annotated wire captures to the file. |
| `IFX_DISABLE_PIPELINE` | unset | Disables pipelined `executemany` (Phase 33). Use only to A/B-measure. |
| `INFORMIXSERVER` | — | Read by `connect()` if `server=` is not provided. |
Environment variables are read at connection construction. Changing them at runtime doesn't affect existing connections.
## Connection arguments
Full keyword list for `informix_db.connect()`:
| Arg | Type | Default | Notes |
|---|---|---|---|
| `host` | `str` | required | TCP host. |
| `port` | `int` | `9088` | TCP port. `9089` is the typical TLS listener. |
| `user` | `str` | required | |
| `password` | `str` | required | |
| `database` | `str \| None` | required | `None` logs in without selecting a DB. |
| `server` | `str` | required | DBSERVERNAME, not hostname. |
| `autocommit` | `bool` | `False` | |
| `connect_timeout` | `float \| None` | `None` | TCP+login timeout. |
| `read_timeout` | `float \| None` | `None` | Per-read timeout. |
| `keepalive` | `bool` | `False` | `SO_KEEPALIVE`. |
| `client_locale` | `str` | `"en_US.8859-1"` | See [SQL ↔ Python types](/reference/types/) for codec mapping. |
| `env` | `dict[str, str] \| None` | `None` | Server-side session env. |
| `tls` | `bool \| ssl.SSLContext` | `False` | See [Connect with TLS](/how-to/tls/). |
## Pool tunables
| Arg | Default | Effect |
|---|---|---|
| `min_size` | `1` | Connections to pre-warm at create time. |
| `max_size` | `10` | Hard cap. |
| `acquire_timeout` | `30.0` | Raise `PoolTimeout` after this many seconds. |
| `max_idle` | `600.0` | Close connections idle longer than this (seconds). |
| `health_check` | `True` | Validate idle connections before returning from the pool. |
All other connection arguments are forwarded to each pool-created connection.
## Server-side session env
The `env={}` parameter of `connect()` sets server session variables sent in the login PDU:
```python
informix_db.connect(
...,
env={
"OPT_GOAL": "-1", # optimize for first-row return
"OPTOFC": "1", # auto-free cursors at fetch-close
"IFX_AUTOFREE": "1",
},
)
```
`CLIENT_LOCALE` is set automatically from `client_locale=` — don't put it in `env=`.

View File

@ -0,0 +1,68 @@
---
title: Exceptions & error codes
description: PEP 249 exception hierarchy, Informix SQLCODE → Python exception mapping.
sidebar:
order: 4
---
The exception hierarchy is per PEP 249. All exceptions live in `informix_db` and inherit from `informix_db.Error`.
## Hierarchy
```
Error
├── Warning
└── DatabaseError
├── InterfaceError
├── DataError
├── OperationalError
│ └── PoolTimeout
├── IntegrityError
├── InternalError
├── ProgrammingError
└── NotSupportedError
```
## When each is raised
| Exception | Typical cause |
|---|---|
| `InterfaceError` | Misuse of the driver API itself (e.g. fetch on a closed cursor). |
| `DataError` | Type conversion failures, codec errors. |
| `OperationalError` | Network errors, timeouts, server unavailable, login failures. |
| `IntegrityError` | Constraint violations (PK, FK, unique, NOT NULL). |
| `InternalError` | Driver-internal invariant violation (file a bug). |
| `ProgrammingError` | Bad SQL syntax, missing tables, parameter binding errors. |
| `NotSupportedError` | Driver doesn't support the requested operation. |
| `PoolTimeout` | Pool acquire exceeded `acquire_timeout`. |
## SQLCODE mapping
Informix returns SQLCODE values; the driver maps them to the appropriate exception. A few common ones:
| SQLCODE | Meaning | Exception |
|---|---|---|
| `-201` | Syntax error | `ProgrammingError` |
| `-206` | Table not found | `ProgrammingError` |
| `-239` / `-268` / `-691` / `-703` | Unique / FK / NOT NULL violation | `IntegrityError` |
| `-329` | Database not found | `OperationalError` |
| `-908` | Connection terminated by server | `OperationalError` |
| `-1820` | Codeset conversion failure | `DataError` |
| `-908` / network errors | Server unavailable | `OperationalError` |
The full mapping lives in `src/informix_db/_errcodes.py`.
## Inspecting errors
Every exception carries the original SQLCODE and message:
```python
try:
cur.execute("INSERT INTO users VALUES (?, ?)", (1, "alice"))
except informix_db.IntegrityError as e:
print(e.sqlcode) # -239 (unique violation)
print(e.isam_code) # secondary error code
print(str(e)) # human-readable
```
`sqlcode` is the primary error; `isam_code` is the underlying ISAM error (when applicable). For multi-statement transactions, use `e.statement` to see which statement failed.

View File

@ -0,0 +1,59 @@
---
title: SQL ↔ Python types
description: Mapping table between Informix SQL types and Python types, with notes on edge cases.
sidebar:
order: 2
---
| SQL type | Python type | Notes |
|---|---|---|
| `SMALLINT` / `INT` / `BIGINT` / `SERIAL` | `int` | Arbitrary precision on the Python side. |
| `FLOAT` / `SMALLFLOAT` | `float` | IEEE 754 double / single. |
| `DECIMAL(p,s)` / `MONEY` | `decimal.Decimal` | Exact precision preserved. |
| `CHAR` / `VARCHAR` / `NCHAR` / `NVCHAR` / `LVARCHAR` | `str` | Decoded using `client_locale`. |
| `BOOLEAN` | `bool` | |
| `DATE` | `datetime.date` | |
| `DATETIME YEAR TO …` | `datetime.datetime` / `datetime.time` / `datetime.date` | The Python type depends on the field range. |
| `INTERVAL DAY TO FRACTION` | `datetime.timedelta` | |
| `INTERVAL YEAR TO MONTH` | `informix_db.IntervalYM` | Custom type — `datetime.timedelta` can't represent year-month intervals. |
| `BYTE` / `TEXT` (legacy in-row blobs) | `bytes` / `str` | |
| `BLOB` / `CLOB` (smart-LOBs) | `informix_db.BlobLocator` / `informix_db.ClobLocator` | Read via `cursor.read_blob_column`, write via `cursor.write_blob_column`. |
| `ROW(…)` | `informix_db.RowValue` | Named-tuple-like with `.name`-accessible fields. |
| `SET(…)` / `MULTISET(…)` / `LIST(…)` | `informix_db.CollectionValue` | Iterable; preserves duplicates only for `MULTISET` / `LIST`. |
| `NULL` | `None` | |
## DATETIME field ranges
Informix's `DATETIME YEAR TO X` is field-range typed. The Python type returned depends on which fields are present:
| Range | Python type |
|---|---|
| `YEAR TO YEAR` through `YEAR TO DAY` | `datetime.date` |
| `YEAR TO HOUR` through `YEAR TO FRACTION(5)` | `datetime.datetime` |
| `HOUR TO HOUR` through `HOUR TO FRACTION(5)` | `datetime.time` |
## Decimal precision
`DECIMAL` columns preserve their declared precision and scale through the codec. A `DECIMAL(10,2)` column with value `123.45` decodes to `Decimal("123.45")` exactly — no float intermediate.
For binding `Decimal` values into INSERT/UPDATE, the driver uses the column's declared scale. Pass `Decimal` for exact values; `float` works but may cause rounding at the column scale.
## NULL
`NULL` is `None` in both directions. Use `IS NULL` / `IS NOT NULL` in SQL — `WHERE x = ?` with `None` returns no rows even where `x IS NULL`.
## Type extensions
`informix_db.IntervalYM(years, months)` represents `INTERVAL YEAR TO MONTH`:
```python
from informix_db import IntervalYM
ym = IntervalYM(years=2, months=3)
cur.execute("INSERT INTO contracts (term) VALUES (?)", (ym,))
cur.execute("SELECT term FROM contracts WHERE id = ?", (1,))
result = cur.fetchone()[0] # IntervalYM(years=2, months=3)
```
`informix_db.RowValue` and `informix_db.CollectionValue` are read-only types returned for `ROW`, `SET`, `MULTISET`, `LIST` columns. Both expose Python iteration and indexed access.

View File

@ -0,0 +1,161 @@
---
title: Install & first query
description: Five minutes from pip install to a real SELECT against a Dockerised Informix server.
sidebar:
order: 2
---
import { Tabs, TabItem, Steps, Aside } from '@astrojs/starlight/components';
This page gets you from a clean Python environment to a working query against a real Informix server in five minutes. We'll use the official IBM Informix Developer Edition Docker image so you don't need an Informix license.
## Prerequisites
- Python 3.10 or newer
- Docker, for the dev server (skip if you already have an Informix instance)
- 4 GB of free RAM for the dev container
## 1. Install the driver
<Tabs>
<TabItem label="uv (recommended)">
```bash
uv add informix-db
```
</TabItem>
<TabItem label="pip">
```bash
pip install informix-db
```
</TabItem>
<TabItem label="poetry">
```bash
poetry add informix-db
```
</TabItem>
</Tabs>
That's the entire dependency. No system packages, no `LD_LIBRARY_PATH`, no `libcrypt.so.1`.
## 2. Start the dev container
```bash
docker run -d --name informix-dev \
-e LICENSE=accept \
-p 9088:9088 \
-p 9089:9089 \
--privileged \
icr.io/informix/informix-developer-database:15.0.1.0.3DE
```
The image takes ~90 seconds to initialize. Watch the logs until you see `oninit running`:
```bash
docker logs -f informix-dev
```
<Aside type="tip">
The `--privileged` flag is required by the dev image — it tries to manage shared memory limits. For production servers this isn't a thing.
</Aside>
## 3. Run your first query
Save this as `hello.py`:
```python
import informix_db
with informix_db.connect(
host="127.0.0.1",
port=9088,
user="informix",
password="in4mix",
database="sysmaster",
server="informix",
) as conn:
cur = conn.cursor()
cur.execute(
"SELECT FIRST 5 dbsname, tabname "
"FROM systables WHERE tabid > 99"
)
for row in cur.fetchall():
print(row)
```
Then:
```bash
python hello.py
```
You should see five rows from Informix's system catalog. If you do, congratulations — you've spoken SQLI to an IBM database from pure Python with zero native code in the call stack.
## What just happened
When `informix_db.connect()` returned, the driver had:
1. Opened a TCP socket to `127.0.0.1:9088`
2. Sent an `SQ_INFO` PDU containing client capabilities (locale, byte order, app name)
3. Received the server's identification (`IBM Informix Dynamic Server Version 15.0.1.0.3`)
4. Negotiated authentication via `SQ_PASSWD`
5. Opened the `sysmaster` database via `SQ_DBOPEN`
6. Returned a `Connection` object ready for queries
`cur.execute()` sent an `SQ_PREPARE` PDU with your SQL, parsed the response into a column descriptor, sent `SQ_DESCRIBE`, then `SQ_OPEN` to start the cursor. `cur.fetchall()` issued `SQ_FETCH` PDUs and decoded each `SQ_TUPLE` payload via per-column readers.
If you want to see this happen byte-by-byte, [the architecture page](/explain/architecture/) walks through the wire protocol with annotated captures.
## 4. Try parameter binding
```python
with informix_db.connect(host="127.0.0.1", port=9088, user="informix",
password="in4mix", database="sysmaster",
server="informix") as conn:
cur = conn.cursor()
cur.execute(
"SELECT tabname FROM systables WHERE tabid = ?",
(1,),
)
print(cur.fetchone())
```
`?` and `:1` both work — Informix's native paramstyle is `numeric`, but `?` is supported as a synonym.
## 5. Use the connection pool
For real applications, prefer the pool:
```python
import informix_db
pool = informix_db.create_pool(
host="127.0.0.1", port=9088,
user="informix", password="in4mix",
database="sysmaster", server="informix",
min_size=2, max_size=10,
acquire_timeout=5.0,
)
with pool.connection() as conn:
cur = conn.cursor()
cur.execute("SELECT 1 FROM systables WHERE tabid = 1")
print(cur.fetchone())
pool.close()
```
The pool is thread-safe and has a per-connection wire lock — accidental sharing across threads doesn't corrupt the wire stream, though PEP 249 advice still holds (one connection per thread).
## What's next
<Steps>
1. **Going async?** [Async with FastAPI →](/how-to/async-fastapi/) walks through a real FastAPI app with the async pool.
2. **Connecting to production?** [Connect with TLS →](/how-to/tls/) covers TLS-listener configuration and bring-your-own-context patterns.
3. **Bulk-loading?** [Bulk inserts (executemany) →](/how-to/executemany/) — and the 53× transaction-vs-autocommit gotcha you'll hit otherwise.
4. **Migrating from IfxPy?** [Migrate from IfxPy →](/how-to/migrate-from-ifxpy/) covers the API differences and the things IfxPy does that we don't (yet).
</Steps>

View File

@ -0,0 +1,149 @@
---
title: Compared to IfxPy
description: Head-to-head benchmarks against IBM's C-bound Python driver. Where each one wins and why.
sidebar:
order: 3
---
import { Aside } from '@astrojs/starlight/components';
[IfxPy](https://pypi.org/project/IfxPy/) is IBM's official Python driver — a C extension that wraps the OneDB Client SDK (CSDK), which itself wraps the same SQLI wire protocol `informix-db` speaks directly. It's the reasonable comparison: same protocol, same server, same workload, different transport.
Numbers below are **median + IQR over 10+ rounds**, all against the same IBM Informix Developer Edition Docker container on the same host. Methodology and reproduction steps live in [`tests/benchmarks/compare/`](https://github.com/rsp2k/informix-db/tree/main/tests/benchmarks/compare) in the repo.
## Headline numbers
| Benchmark | IfxPy 3.0.5 (C) | informix-db (pure Python) | Result |
|---|---:|---:|---:|
| Single-row SELECT round-trip | 118 µs | 114 µs | comparable |
| ~10-row server-side query | 130 µs | 159 µs | IfxPy 22% faster |
| Cold connect (login handshake) | 11.0 ms | 10.5 ms | comparable |
| `executemany(1k)` in transaction | 23.5 ms | 23.2 ms | tied |
| **`executemany(10k)` in transaction** | 259 ms | **161 ms** | **informix-db 1.6× faster** |
| **`executemany(100k)` in transaction** | 2376 ms | **1487 ms** | **informix-db 1.6× faster** |
| `SELECT 1k` rows | 1.34 ms | 1.72 ms | IfxPy 1.28× faster |
| `SELECT 10k` rows | 11.7 ms | 16.1 ms | IfxPy 1.07× faster |
| `SELECT 100k` rows | 116 ms | 169 ms | IfxPy 1.15× faster |
<Aside type="note">
Phase 39's connection-scoped buffered reader closed the bulk-fetch gap from a steady ~2.4× to ~1.051.15×. The story of how that landed is in [the buffered reader page](/explain/buffered-reader/).
</Aside>
## When informix-db wins
### Bulk inserts at scale
The clearest win is bulk insert throughput. `executemany(10_000_rows)` runs in **161 ms** vs IfxPy's **259 ms** — `informix-db` is 1.6× faster.
The mechanism is pipelining. Phase 33 changed `executemany` to send all N BIND+EXECUTE PDUs back-to-back **before** draining any response. IfxPy's C-level `IfxPy.execute(stmt, tuple)` makes one round-trip per row — N RTTs at ~80 µs each adds up to the 100 ms gap.
```python
# Both drivers
cur.executemany(
"INSERT INTO orders VALUES (?, ?, ?)",
rows, # list of 10_000 tuples
)
# informix-db: 161 ms — 10k PDUs sent, then 10k responses drained
# IfxPy: 259 ms — 10k round-trips, each blocking on response
```
### Containerized deployment
`informix-db` ships as a 50 KB pure-Python wheel with **zero runtime dependencies**. Your Dockerfile is:
```dockerfile
FROM python:3.13-slim
RUN pip install informix-db
```
IfxPy's deployment surface is dramatically larger:
- 92 MB IBM OneDB Client tarball
- `setuptools < 58` build pin
- `LD_LIBRARY_PATH` configuration for four directories
- `libcrypt.so.1` (deprecated 2018 — missing on Arch, Fedora 35+, RHEL 9)
- C compiler in the build image
For slim images, multi-stage builds, FaaS deployments, or anywhere build-toolchain-on-the-runtime is friction, `informix-db` is the only reasonable option.
### Modern Python
IfxPy works on Python ≤ 3.11 currently. The C extension breaks on 3.12+ (PyConfig changes, removed `_PyImport_AcquireLock`, etc.).
`informix-db` works unmodified on **3.10, 3.11, 3.12, 3.13, and 3.14**. We've kept a CI matrix on every minor version since 3.10 from the start.
### Async
`informix-db` ships an async API:
```python
from informix_db import aio
async def main():
pool = await aio.create_pool(...)
async with pool.connection() as conn:
cur = await conn.cursor()
await cur.execute("SELECT ...")
rows = await cur.fetchall()
```
IfxPy has no async support — every call blocks the event loop. Using IfxPy from FastAPI requires `loop.run_in_executor()` boilerplate, and the thread pool isn't connection-aware so you give up the natural fairness of an async pool.
## When IfxPy wins
### Large analytical fetches
For queries pulling 10k+ rows where per-row decode cost dominates, IfxPy is currently 515% faster. The C-level `fetch_tuple` decoder is ~1.1 µs/row; our Python `parse_tuple_payload` is ~2.0 µs/row after Phase 39 (down from ~2.7 before). At 100k rows the gap is ~80 ms wall-clock — meaningful but not disqualifying.
The gap is closing phase by phase:
| Phase | Bulk-fetch ratio vs IfxPy |
|---|---|
| Phase 36 | 2.40× slower |
| Phase 37 (per-column readers) | 2.10× slower |
| Phase 38 (codegen-inlined decoders) | 2.04× slower |
| **Phase 39 (connection-scoped buffered reader)** | **1.15× slower** |
If you're running analytical reports that pull millions of rows in a single SELECT and the per-row decode overhead is a measurable cost, IfxPy may be marginally faster today. For most application workloads it isn't.
### Workloads built around CSDK extensions
If your existing code uses IBM-specific cursor extensions (`cursor.callproc` with named parameters, IBM's specific scrollable cursor semantics around `last`/`prior`/`relative`, `cursor.set_chunk_size` for fetch tuning), the migration to `informix-db` is straightforward but not zero-cost. We support the core PEP 249 surface plus our own scrollable cursor API — see [the migration guide](/how-to/migrate-from-ifxpy/).
## Methodology
Benchmarks are pytest-benchmark fixtures in `tests/benchmarks/compare/` against the official `icr.io/informix/informix-developer-database:15.0.1.0.3DE` image, running on the same loopback as the Python process.
Reported numbers are **median over 10+ rounds**, with IQR included. Why median over mean: the first round of any run includes JIT warmup, page-cache miss, and a TCP slow-start round-trip. The mean is contaminated by these one-shot costs in a way that misrepresents steady-state behavior. Median + IQR is what we report.
IfxPy's IQR on the 100k-row SELECT is ~21% (Docker→host loopback noise, plus the C extension's allocation patterns). Our IQR is ~0.2%. The headline 1.15× ratio at 100k rows is partly that noise — a fair reading is "515% slower than IfxPy on large fetches", and the lower bound may already be within measurement noise.
To reproduce:
```bash
git clone https://github.com/rsp2k/informix-db
cd informix-db/tests/benchmarks/compare
make ifx-up
make compare
```
The `Makefile` handles the IfxPy install gauntlet (Python ≤ 3.11 environment, `setuptools < 58`, `libcrypt.so.1` symlink, OneDB CSDK download, the four `LD_LIBRARY_PATH` exports) so you don't have to learn it manually.
## Summary
Use `informix-db` when:
- You're writing new code in Python ≥ 3.10
- Your workload is bulk-insert / ETL / log-shipping
- You want async / FastAPI integration
- You're deploying in containers or to Python environments where build toolchains are friction
- Your platform doesn't have `libcrypt.so.1`
Use IfxPy when:
- You have an existing IfxPy codebase
- You're running large analytical SELECTs and the 515% decode-side gap matters
- You're constrained to Python ≤ 3.11 anyway
For everything else — the cost-benefit favors `pip install informix-db`.

View File

@ -0,0 +1,85 @@
---
title: WTF did you build this for?
description: A pure-Python Informix driver, why it didn't exist before, and what it's good for.
sidebar:
order: 1
label: WTF did you build this for?
---
The existing tools were not my style.
Every Informix driver in any language — `IfxPy`, the legacy `informixdb`, ODBC bridges, JPype/JDBC, Perl `DBD::Informix` — wraps either IBM's C Client SDK or the JDBC JAR. To our knowledge `informix-db` is the **first pure-socket Informix driver in any language**.
## The problem with IBM's C SDK
The IBM Informix Client SDK (CSDK), now packaged as part of OneDB Client, is a 92 MB tarball with a non-trivial install gauntlet:
- Python ≤ 3.11 (IfxPy is broken on 3.12+)
- `setuptools < 58` (legacy build system)
- Permissive `CFLAGS` for the C extension build
- Manual download of the 92 MB ODBC tarball
- Four `LD_LIBRARY_PATH` directories
- `libcrypt.so.1` — deprecated in 2018, missing on Arch, Fedora 35+, RHEL 9
For containerized deployments, ETL pipelines, FastAPI services, or anywhere Python lives and IBM's C SDK is friction, the friction compounds. `informix-db`'s install is `pip install informix-db`. The wheel is ~50 KB. There are zero runtime dependencies.
## What it does
`informix-db` opens a TCP socket to an Informix server's SQLI listener and speaks the wire protocol directly — the same protocol IBM's JDBC driver uses, the same protocol the CSDK speaks under the hood. No native code is in the thread of execution.
The wire protocol was reverse-engineered through three sources:
1. **Decompiled IBM JDBC driver** (`com.informix.jdbc.IfxConnection` and friends), used as a clean-room reference for PDU shapes and protocol semantics.
2. **Annotated `socat` captures** of real client/server traffic against the IBM Informix Developer Edition Docker image.
3. **Differential testing** against `IfxPy` — every codec path is tested against the C driver's behavior on the same data.
The result is a PEP 249 compliant driver with a sync API, an async API (FastAPI / asyncio compatible), a connection pool, TLS support, smart-LOB read/write, scrollable cursors, fast-path stored procedure invocation, and bulk-insert / bulk-fetch performance within ~1060% of the C driver depending on workload.
## What it's good for
The places where `informix-db` is unambiguously the right choice:
- **ETL and bulk-load pipelines.** Pipelined `executemany` (Phase 33) is 1.6× faster than IfxPy at scale because every BIND+EXECUTE PDU goes out before any responses are drained. IfxPy still pays one round-trip per `IfxPy.execute(stmt, tuple)` call.
- **Container deployments.** The 50 KB wheel and absent native deps mean a slim base image works. No multi-stage build to compile the CSDK.
- **Modern Python.** Works on 3.10 through 3.14 unmodified. IfxPy hasn't shipped 3.12 wheels.
- **Async / FastAPI.** Native async support via thread-pool wrapping. IfxPy is fully synchronous; using it from FastAPI requires `run_in_executor` boilerplate and gives up the connection pool's natural async semantics.
- **Anywhere `libcrypt.so.1` is missing.** Modern Linux distributions ship `libcrypt.so.2`. IfxPy refuses to load without `libcrypt.so.1`. We don't link against either.
## What IfxPy is still better at
Honesty matters here:
- **Large analytical fetches.** IfxPy's C-level `fetch_tuple` decoder is faster than our Python `parse_tuple_payload` (~1.1 µs/row vs ~2.0 µs/row after Phase 39). For workloads pulling 10k+ rows in a single SELECT where the per-row decode cost dominates, IfxPy is currently 515% faster. The gap is shrinking phase by phase.
- **Workloads built around the CSDK.** If your existing code already uses IfxPy idioms (`IfxPyDbi.connect_pooled`, IBM's specific cursor extensions), the migration to `informix-db` is straightforward but not zero-cost.
The honest summary table from the [comparison page](/start/vs-ifxpy/):
| Workload | Winner | Margin |
|---|---|---|
| Bulk insert (`executemany` 10k100k rows) | `informix-db` | 1.6× faster |
| Bulk SELECT (10k100k rows) | IfxPy | 1.051.15× faster |
| Single-row queries | tied | within noise |
| Cold connect | tied | within noise |
| Containerized deployment | `informix-db` | no contest |
| Python 3.12+ | `informix-db` | only option |
## Production-ready
Every finding from a system-wide failure-mode audit (data correctness, wire safety, resource leaks, concurrency, async cancellation) has been addressed:
- Pool no longer returns connections with open transactions
- Per-connection wire lock prevents PDU interleaving from accidental sharing
- Async cancellation cannot leak running workers onto recycled connections
- `_raise_sq_err` no longer masks wire desync via bare-except
- Cursor finalizers release server-side resources on mid-fetch raise
- 5 medium-severity hardening items resolved
**0 critical, 0 high, 0 medium audit findings remain.** Every architectural change went through a Margaret Hamilton-style review focused on silent-failure modes, recovery paths, and documented invariants. Each documented invariant is paired with either a runtime guard or a CI tripwire test.
300+ tests across unit / integration / benchmark suites. Integration tests run against the official IBM Informix Developer Edition Docker image (15.0.1.0.3DE).
## Read next
- **[Install & first query →](/start/quickstart/)** — five minutes from `pip install` to a real SELECT against a Docker-hosted Informix.
- **[Compared to IfxPy →](/start/vs-ifxpy/)** — full head-to-head benchmarks, methodology, and reproduction.
- **[Architecture →](/explain/architecture/)** — how the layers stack: socket, framing, codec, resultset, cursor.

View File

@ -0,0 +1,343 @@
/* Component-scoped styles for the custom Hero and homepage modules */
.ifx-hero {
display: grid;
grid-template-columns: minmax(0, 1fr) minmax(0, 1.1fr);
gap: clamp(1.5rem, 4vw, 4rem);
align-items: center;
padding: clamp(2rem, 5vw, 4.5rem) 0 clamp(2rem, 4vw, 3.5rem);
border-bottom: 1px solid var(--sl-color-hairline-light);
}
@media (max-width: 800px) {
.ifx-hero {
grid-template-columns: 1fr;
}
}
.ifx-hero__eyebrow {
display: inline-flex;
align-items: center;
gap: 0.5rem;
font-family: var(--sl-font-mono);
font-size: 0.75rem;
letter-spacing: 0.18em;
text-transform: uppercase;
color: var(--sl-color-text-accent);
margin-bottom: 0.75rem;
}
.ifx-hero__eyebrow::before {
content: '';
width: 8px;
height: 8px;
background: var(--ifx-amber);
border-radius: 1px;
box-shadow: 0 0 12px var(--ifx-amber);
animation: ifx-pulse 2.4s ease-in-out infinite;
}
@keyframes ifx-pulse {
0%, 100% { opacity: 0.4; }
50% { opacity: 1; }
}
.ifx-hero__title {
font-size: clamp(2rem, 5vw, 3.25rem);
line-height: 1.05;
letter-spacing: -0.02em;
font-weight: 700;
margin: 0 0 1rem;
color: var(--sl-color-white);
}
.ifx-hero__title strong {
background: linear-gradient(180deg, var(--ifx-amber-bright) 0%, var(--ifx-amber) 100%);
-webkit-background-clip: text;
background-clip: text;
color: transparent;
font-weight: inherit;
}
.ifx-hero__lede {
font-size: clamp(1rem, 1.5vw, 1.15rem);
line-height: 1.55;
color: var(--sl-color-gray-2);
max-width: 36rem;
margin: 0 0 1.75rem;
}
.ifx-hero__cta {
display: flex;
flex-wrap: wrap;
gap: 0.75rem;
}
.ifx-hero__cta a {
display: inline-flex;
align-items: center;
gap: 0.4rem;
padding: 0.6rem 1.1rem;
border-radius: 6px;
font-weight: 500;
text-decoration: none !important;
transition: transform 0.08s ease, box-shadow 0.15s ease;
}
.ifx-hero__cta a:hover {
transform: translateY(-1px);
}
.ifx-hero__cta .primary {
background: var(--ifx-amber);
color: var(--ifx-charcoal-0);
}
.ifx-hero__cta .primary:hover {
box-shadow: 0 4px 16px rgba(245, 165, 36, 0.35);
}
.ifx-hero__cta .secondary {
border: 1px solid var(--sl-color-hairline-shade);
color: var(--sl-color-text);
background: transparent;
}
.ifx-hero__cta .secondary:hover {
border-color: var(--sl-color-accent);
}
.ifx-hero__install {
font-family: var(--sl-font-mono);
font-size: 0.95rem;
margin-top: 1.5rem;
padding: 0.75rem 1rem;
background: var(--sl-color-bg-inline-code);
border-left: 2px solid var(--ifx-amber);
border-radius: 0 4px 4px 0;
color: var(--sl-color-text);
user-select: all;
}
.ifx-hero__install::before {
content: '$ ';
color: var(--sl-color-text-accent);
user-select: none;
}
/* Wire-dump easter egg: types out real captured handshake bytes */
.ifx-wiredump {
font-family: var(--sl-font-mono);
font-size: 0.78rem;
line-height: 1.55;
background: var(--ifx-charcoal-0);
color: var(--ifx-amber);
border: 1px solid var(--sl-color-hairline);
border-radius: 8px;
padding: 1.25rem 1rem;
height: clamp(280px, 35vw, 360px);
overflow: hidden;
position: relative;
box-shadow: inset 0 0 60px rgba(245, 165, 36, 0.04);
}
:root[data-theme='light'] .ifx-wiredump {
background: #1a1612;
}
.ifx-wiredump__scroll {
white-space: pre;
height: 100%;
overflow: hidden;
}
.ifx-wiredump__line {
display: block;
opacity: 0;
white-space: pre;
}
.ifx-wiredump__line.is-visible {
opacity: 1;
}
.ifx-wiredump__byte--ascii {
color: #ffd884;
}
.ifx-wiredump__caret {
display: inline-block;
width: 8px;
height: 1em;
background: var(--ifx-amber);
vertical-align: text-bottom;
margin-left: 2px;
animation: ifx-blink 1.1s steps(2) infinite;
}
@keyframes ifx-blink {
0%, 50% { opacity: 1; }
51%, 100% { opacity: 0; }
}
.ifx-wiredump__direction {
color: var(--sl-color-gray-3);
margin-right: 0.5rem;
}
.ifx-wiredump__direction--out {
color: var(--ifx-amber);
}
.ifx-wiredump__direction--in {
color: #6dd2a4;
}
.ifx-wiredump__caption {
font-family: var(--sl-font-mono);
font-size: 0.7rem;
letter-spacing: 0.1em;
text-transform: uppercase;
color: var(--sl-color-gray-3);
margin-top: 0.6rem;
text-align: right;
}
/* Homepage feature grid */
.ifx-features {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(260px, 1fr));
gap: 1.25rem;
margin: 3rem 0;
}
.ifx-feature {
padding: 1.5rem;
border: 1px solid var(--sl-color-hairline-light);
border-radius: 8px;
background: var(--sl-color-bg-nav);
transition: border-color 0.15s ease, transform 0.1s ease;
}
.ifx-feature:hover {
border-color: var(--sl-color-accent);
}
.ifx-feature__icon {
width: 28px;
height: 28px;
margin-bottom: 0.75rem;
color: var(--sl-color-text-accent);
}
.ifx-feature h3 {
font-size: 1.05rem;
margin: 0 0 0.5rem;
letter-spacing: -0.01em;
}
.ifx-feature p {
font-size: 0.92rem;
line-height: 1.5;
margin: 0;
color: var(--sl-color-gray-2);
}
/* Supported Systems "joint" badge — appears below every page's footer */
.ifx-ss-badge {
margin-top: 3.5rem;
padding: 0;
border-top: 1px solid var(--sl-color-hairline-light);
}
.ifx-ss-badge__link {
display: grid;
grid-template-columns: auto minmax(0, 1fr);
gap: 1.25rem;
align-items: center;
padding: 1.5rem 0 0.5rem;
text-decoration: none !important;
color: var(--sl-color-text);
transition: color 0.12s ease;
}
.ifx-ss-badge__link:hover {
color: var(--sl-color-text-accent);
}
.ifx-ss-badge__link:hover .ifx-ss-badge__cta {
gap: 0.5rem;
}
.ifx-ss-badge__logo {
width: 60px;
height: 45px;
flex-shrink: 0;
/* Slight lift so the punch-card pattern reads */
filter: drop-shadow(0 1px 2px rgba(37, 99, 235, 0.12));
}
.ifx-ss-badge__copy {
min-width: 0;
}
.ifx-ss-badge__heading {
font-family: var(--sl-font-mono);
font-size: 0.78rem;
font-weight: 600;
letter-spacing: 0.16em;
text-transform: uppercase;
color: var(--sl-color-text-accent);
margin: 0 0 0.4rem;
border: none !important;
padding: 0 !important;
}
.ifx-ss-badge__body {
font-size: 0.92rem;
line-height: 1.55;
color: var(--sl-color-gray-2);
margin: 0 0 0.5rem;
}
.ifx-ss-badge__name {
color: #3b82f6;
font-weight: 600;
}
:root[data-theme='light'] .ifx-ss-badge__name {
color: #2563eb;
}
.ifx-ss-badge__cta {
display: inline-flex;
align-items: center;
gap: 0.35rem;
font-size: 0.85rem;
font-weight: 500;
color: var(--sl-color-text-accent);
transition: gap 0.15s ease;
}
@media (max-width: 540px) {
.ifx-ss-badge__link {
grid-template-columns: 1fr;
gap: 0.85rem;
text-align: left;
}
.ifx-ss-badge__logo {
width: 50px;
height: 38px;
}
}
/* Performance-bar visual for the comparison table */
.ifx-perfbar {
position: relative;
height: 6px;
background: var(--sl-color-bg-inline-code);
border-radius: 3px;
overflow: hidden;
margin-top: 0.25rem;
}
.ifx-perfbar__fill {
position: absolute;
inset: 0 auto 0 0;
background: linear-gradient(90deg, var(--ifx-amber) 0%, var(--ifx-amber-bright) 100%);
border-radius: 3px;
}

View File

@ -0,0 +1,137 @@
/*
* informix-db docs theme
* - Charcoal base (no purple gradients, ever)
* - Amber accent CRT-monitor nod, distinct from sibling sites' cyan
* - Inter for body, IBM Plex Mono for technical bytes
*/
:root {
--ifx-amber: #f5a524;
--ifx-amber-bright: #ffb84d;
--ifx-amber-dim: #b87a18;
--ifx-charcoal-0: #0e0d0c;
--ifx-charcoal-1: #161412;
--ifx-charcoal-2: #1f1c1a;
--ifx-charcoal-3: #2a2622;
--ifx-paper: #faf7f2;
--ifx-paper-2: #f3eee5;
--ifx-ink: #1a1612;
--ifx-ink-soft: #4a443c;
--ifx-rule: rgba(245, 165, 36, 0.18);
--sl-font: 'Inter', system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
--sl-font-mono: 'IBM Plex Mono', ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
}
:root[data-theme='dark'] {
--sl-color-accent-low: #3a2810;
--sl-color-accent: var(--ifx-amber);
--sl-color-accent-high: var(--ifx-amber-bright);
--sl-color-white: #fbf7ee;
--sl-color-gray-1: #ece6da;
--sl-color-gray-2: #c8c1b1;
--sl-color-gray-3: #948b78;
--sl-color-gray-4: #5a5246;
--sl-color-gray-5: #2e2823;
--sl-color-gray-6: var(--ifx-charcoal-2);
--sl-color-black: var(--ifx-charcoal-0);
--sl-color-bg: var(--ifx-charcoal-0);
--sl-color-bg-nav: var(--ifx-charcoal-1);
--sl-color-bg-sidebar: var(--ifx-charcoal-1);
--sl-color-bg-inline-code: var(--ifx-charcoal-2);
--sl-color-bg-accent: var(--ifx-amber);
--sl-color-text: #ece6da;
--sl-color-text-accent: var(--ifx-amber-bright);
--sl-color-text-invert: var(--ifx-charcoal-0);
--sl-color-hairline: rgba(245, 165, 36, 0.14);
--sl-color-hairline-light: rgba(245, 165, 36, 0.08);
--sl-color-hairline-shade: rgba(245, 165, 36, 0.22);
}
:root[data-theme='light'] {
--sl-color-accent-low: #fbe8c8;
--sl-color-accent: var(--ifx-amber-dim);
--sl-color-accent-high: #7a4f0a;
--sl-color-white: #1a1612;
--sl-color-gray-1: #2e2823;
--sl-color-gray-2: #4a443c;
--sl-color-gray-3: #6e665a;
--sl-color-gray-4: #948b78;
--sl-color-gray-5: #d8d1c1;
--sl-color-gray-6: #ebe5d6;
--sl-color-gray-7: #f3eee5;
--sl-color-black: var(--ifx-paper);
--sl-color-bg: var(--ifx-paper);
--sl-color-bg-nav: var(--ifx-paper-2);
--sl-color-bg-sidebar: var(--ifx-paper-2);
--sl-color-bg-inline-code: #ece6d4;
--sl-color-bg-accent: var(--ifx-amber-dim);
--sl-color-text: var(--ifx-ink);
--sl-color-text-accent: var(--ifx-amber-dim);
--sl-color-text-invert: var(--ifx-paper);
--sl-color-hairline: rgba(120, 80, 12, 0.18);
--sl-color-hairline-light: rgba(120, 80, 12, 0.10);
--sl-color-hairline-shade: rgba(120, 80, 12, 0.28);
}
/* Tighten heading rhythm — Starlight defaults are a touch loose for technical docs */
.sl-markdown-content h2 {
margin-top: 2.5rem;
border-top: 1px solid var(--sl-color-hairline-light);
padding-top: 1.5rem;
letter-spacing: -0.01em;
}
.sl-markdown-content h3 {
margin-top: 1.75rem;
letter-spacing: -0.005em;
}
/* Inline code: monospace + amber tint, no jarring background block */
.sl-markdown-content :not(pre) > code {
font-feature-settings: 'ss02', 'cv02';
border: 1px solid var(--sl-color-hairline-shade);
font-size: 0.9em;
padding: 0.1em 0.35em;
border-radius: 4px;
}
/* Tables: dense, technical, with amber column rules — for type-mapping & benchmark tables */
.sl-markdown-content table {
border-collapse: collapse;
font-variant-numeric: tabular-nums;
font-size: 0.92rem;
}
.sl-markdown-content thead th {
border-bottom: 2px solid var(--sl-color-accent);
text-align: left;
font-weight: 600;
letter-spacing: 0.02em;
text-transform: uppercase;
font-size: 0.78em;
color: var(--sl-color-text-accent);
}
.sl-markdown-content tbody td {
border-bottom: 1px solid var(--sl-color-hairline-light);
padding: 0.5rem 0.75rem;
}
/* Anchor links — underline, no rainbow */
.sl-markdown-content a:not(.sl-anchor-link) {
text-decoration: underline;
text-decoration-color: var(--sl-color-accent);
text-decoration-thickness: 1px;
text-underline-offset: 3px;
transition: text-decoration-thickness 0.1s ease;
}
.sl-markdown-content a:not(.sl-anchor-link):hover {
text-decoration-thickness: 2px;
}

5
docs-site/tsconfig.json Normal file
View File

@ -0,0 +1,5 @@
{
"extends": "astro/tsconfigs/strict",
"include": [".astro/types.d.ts", "**/*"],
"exclude": ["dist"]
}