"""Component value optimizer for iterative circuit tuning. Adjusts circuit parameters toward target specifications by running real LTspice simulations in a loop. Supports single-component binary search and multi-component coordinate descent. """ import logging import math import tempfile from dataclasses import dataclass, field from pathlib import Path import numpy as np from .raw_parser import RawFile from .runner import run_netlist from .waveform_math import ( compute_bandwidth, compute_peak_to_peak, compute_rms, compute_settling_time, ) logger = logging.getLogger(__name__) # --------------------------------------------------------------------------- # Standard component value series # --------------------------------------------------------------------------- # E12: 12 values per decade (10% tolerance) _E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2] # E24: 24 values per decade (5% tolerance) _E24 = [ 1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1, ] # E96: 96 values per decade (1% tolerance) _E96 = [ 1.00, 1.02, 1.05, 1.07, 1.10, 1.13, 1.15, 1.18, 1.21, 1.24, 1.27, 1.30, 1.33, 1.37, 1.40, 1.43, 1.47, 1.50, 1.54, 1.58, 1.62, 1.65, 1.69, 1.74, 1.78, 1.82, 1.87, 1.91, 1.96, 2.00, 2.05, 2.10, 2.15, 2.21, 2.26, 2.32, 2.37, 2.43, 2.49, 2.55, 2.61, 2.67, 2.74, 2.80, 2.87, 2.94, 3.01, 3.09, 3.16, 3.24, 3.32, 3.40, 3.48, 3.57, 3.65, 3.74, 3.83, 3.92, 4.02, 4.12, 4.22, 4.32, 4.42, 4.53, 4.64, 4.75, 4.87, 4.99, 5.11, 5.23, 5.36, 5.49, 5.62, 5.76, 5.90, 6.04, 6.19, 6.34, 6.49, 6.65, 6.81, 6.98, 7.15, 7.32, 7.50, 7.68, 7.87, 8.06, 8.25, 8.45, 8.66, 8.87, 9.09, 9.31, 9.53, 9.76, ] _SERIES = { "E12": _E12, "E24": _E24, "E96": _E96, } # Engineering prefixes: exponent -> suffix _ENG_PREFIXES = { -15: "f", -12: "p", -9: "n", -6: "u", -3: "m", 0: "", 3: "k", 6: "M", 9: "G", 12: "T", } # --------------------------------------------------------------------------- # Data classes # --------------------------------------------------------------------------- @dataclass class OptimizationTarget: """A single performance target to optimize toward.""" signal_name: str # e.g. "V(out)" metric: str # "bandwidth_hz", "rms", "peak_to_peak", "settling_time", # "gain_db", "phase_margin_deg" target_value: float weight: float = 1.0 @dataclass class ComponentRange: """Allowed range for a component value during optimization.""" component_name: str # e.g. "R1" min_value: float max_value: float preferred_series: str | None = None # "E12", "E24", "E96" @dataclass class OptimizationResult: """Outcome of an optimization run.""" best_values: dict[str, float] best_cost: float iterations: int history: list[dict] = field(default_factory=list) targets_met: dict[str, bool] = field(default_factory=dict) final_metrics: dict[str, float] = field(default_factory=dict) # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- def snap_to_preferred(value: float, series: str = "E12") -> float: """Snap a value to the nearest standard component value. Works across decades -- e.g. 4800 snaps to 4.7k (E12). Args: value: Raw component value in base units (ohms, farads, etc.) series: Preferred series name ("E12", "E24", "E96") Returns: Nearest standard value from the requested series. """ if value <= 0: return _SERIES.get(series, _E12)[0] base_values = _SERIES.get(series, _E12) decade = math.floor(math.log10(value)) mantissa = value / (10**decade) best = base_values[0] best_ratio = abs(math.log10(mantissa / best)) for bv in base_values[1:]: ratio = abs(math.log10(mantissa / bv)) if ratio < best_ratio: best = bv best_ratio = ratio # Also check the decade above (mantissa might round up past 9.x) for bv in base_values[:2]: candidate = bv * 10 ratio = abs(math.log10(mantissa / candidate)) if ratio < best_ratio: best = candidate best_ratio = ratio return best * (10**decade) def format_engineering(value: float) -> str: """Format a float in engineering notation with SI suffix. Examples: 10000 -> "10k" 0.0001 -> "100u" 4700 -> "4.7k" 0.0000033 -> "3.3u" 1.5 -> "1.5" """ if value == 0: return "0" sign = "-" if value < 0 else "" value = abs(value) exp = math.floor(math.log10(value)) # Round to nearest multiple of 3 (downward) eng_exp = (exp // 3) * 3 # Clamp to known prefixes eng_exp = max(-15, min(12, eng_exp)) mantissa = value / (10**eng_exp) suffix = _ENG_PREFIXES.get(eng_exp, f"e{eng_exp}") # Format mantissa: strip trailing zeros but keep at least one digit if mantissa == int(mantissa) and mantissa < 1000: formatted = f"{sign}{int(mantissa)}{suffix}" else: formatted = f"{sign}{mantissa:.3g}{suffix}" return formatted # --------------------------------------------------------------------------- # Metric extraction # --------------------------------------------------------------------------- def _extract_metric( raw_data: RawFile, target: OptimizationTarget, ) -> float | None: """Compute a single metric from simulation results. Returns the measured value, or None if the signal/data is unavailable. """ signal = raw_data.get_variable(target.signal_name) if signal is None: return None metric = target.metric if metric == "rms": return compute_rms(signal) if metric == "peak_to_peak": result = compute_peak_to_peak(signal) return result["peak_to_peak"] if metric == "settling_time": time = raw_data.get_time() if time is None: return None if np.iscomplexobj(time): time = time.real if np.iscomplexobj(signal): signal = np.abs(signal) result = compute_settling_time(time, signal) return result["settling_time"] if metric == "bandwidth_hz": freq = raw_data.get_frequency() if freq is None: return None # Convert complex magnitude to dB mag_db = np.where( np.abs(signal) > 0, 20.0 * np.log10(np.abs(signal)), -200.0, ) result = compute_bandwidth(np.real(freq), np.real(mag_db)) return result["bandwidth_hz"] if metric == "gain_db": # Peak gain in dB (for AC analysis signals) mag = np.abs(signal) peak = float(np.max(mag)) if peak > 0: return 20.0 * math.log10(peak) return -200.0 if metric == "phase_margin_deg": freq = raw_data.get_frequency() if freq is None: return None # Phase margin: phase at the frequency where |gain| crosses 0 dB mag = np.abs(signal) mag_db = np.where(mag > 0, 20.0 * np.log10(mag), -200.0) phase_deg = np.degrees(np.angle(signal)) # Find 0 dB crossing (gain crossover) for i in range(len(mag_db) - 1): if mag_db[i] >= 0 and mag_db[i + 1] < 0: # Interpolate phase at 0 dB crossing dm = mag_db[i + 1] - mag_db[i] if abs(dm) < 1e-30: phase_at_xover = float(phase_deg[i]) else: frac = (0.0 - mag_db[i]) / dm phase_at_xover = float(phase_deg[i] + frac * (phase_deg[i + 1] - phase_deg[i])) # Phase margin = 180 + phase (since we want distance from -180) return 180.0 + phase_at_xover # No 0 dB crossing found -- gain never reaches unity return None return None def _compute_cost( raw_data: RawFile, targets: list[OptimizationTarget], ) -> tuple[float, dict[str, float]]: """Evaluate weighted cost across all targets. Cost for each target: weight * |measured - target| / |target| Uses |target| normalization so different units are comparable. Returns: (total_cost, metrics_dict) where metrics_dict maps "signal_name.metric" -> measured_value """ total_cost = 0.0 metrics: dict[str, float] = {} for t in targets: measured = _extract_metric(raw_data, t) key = f"{t.signal_name}.{t.metric}" if measured is None: # Signal not found -- heavy penalty total_cost += t.weight * 1e6 metrics[key] = float("nan") continue metrics[key] = measured if abs(t.target_value) > 1e-30: cost = t.weight * abs(measured - t.target_value) / abs(t.target_value) else: # Target is ~0: use absolute error cost = t.weight * abs(measured) total_cost += cost return total_cost, metrics # --------------------------------------------------------------------------- # Core optimizer # --------------------------------------------------------------------------- async def _run_with_values( netlist_template: str, values: dict[str, float], work_dir: Path, ) -> RawFile | None: """Substitute values into template, write .cir, simulate, return parsed data.""" text = netlist_template for name, val in values.items(): text = text.replace(f"{{{name}}}", format_engineering(val)) cir_path = work_dir / "opt_iter.cir" cir_path.write_text(text) result = await run_netlist(cir_path, work_dir=work_dir) if not result.success or result.raw_data is None: logger.warning("Simulation failed: %s", result.error) return None return result.raw_data async def _binary_search_single( netlist_template: str, target: OptimizationTarget, comp: ComponentRange, max_iterations: int, work_dir: Path, ) -> OptimizationResult: """Optimize a single component via binary search. Assumes the metric is monotonic (or at least locally monotonic) with respect to the component value. Evaluates the midpoint, then narrows the half that moves the metric toward the target. """ lo = comp.min_value hi = comp.max_value history: list[dict] = [] best_values = {comp.component_name: (lo + hi) / 2} best_cost = float("inf") best_metrics: dict[str, float] = {} for iteration in range(max_iterations): mid = (lo + hi) / 2 values = {comp.component_name: mid} raw_data = await _run_with_values(netlist_template, values, work_dir) if raw_data is None: history.append( { "iteration": iteration, "values": {comp.component_name: mid}, "cost": float("inf"), "error": "simulation_failed", } ) # Narrow toward the other half on failure hi = mid continue cost, metrics = _compute_cost(raw_data, [target]) measured = metrics.get(f"{target.signal_name}.{target.metric}") history.append( { "iteration": iteration, "values": {comp.component_name: mid}, "cost": cost, "metrics": metrics, } ) if cost < best_cost: best_cost = cost best_values = {comp.component_name: mid} best_metrics = metrics # Decide which half to keep if measured is not None and not math.isnan(measured): if measured < target.target_value: # Need larger metric -- which direction depends on monotonicity. # Test: does increasing component value increase or decrease metric? # We probe a point slightly above mid to determine direction. probe = mid * 1.1 if probe > hi: probe = mid * 0.9 probe_is_lower = True else: probe_is_lower = False probe_data = await _run_with_values( netlist_template, {comp.component_name: probe}, work_dir ) if probe_data is not None: probe_measured = _extract_metric(probe_data, target) if probe_measured is not None: if probe_is_lower: # We probed lower. If metric went up, metric increases # with decreasing value => go lower. if probe_measured > measured: hi = mid else: lo = mid else: # We probed higher. If metric went up, go higher. if probe_measured > measured: lo = mid else: hi = mid else: hi = mid else: hi = mid elif measured > target.target_value: # Need smaller metric -- mirror logic probe = mid * 1.1 if probe > hi: probe = mid * 0.9 probe_is_lower = True else: probe_is_lower = False probe_data = await _run_with_values( netlist_template, {comp.component_name: probe}, work_dir ) if probe_data is not None: probe_measured = _extract_metric(probe_data, target) if probe_measured is not None: if probe_is_lower: if probe_measured < measured: hi = mid else: lo = mid else: if probe_measured < measured: lo = mid else: hi = mid else: lo = mid else: lo = mid else: # Exact match break # Converged if search range is tight enough if hi - lo < lo * 0.001: break # Snap to preferred series if requested final_val = best_values[comp.component_name] if comp.preferred_series: final_val = snap_to_preferred(final_val, comp.preferred_series) best_values[comp.component_name] = final_val target_key = f"{target.signal_name}.{target.metric}" met = best_metrics.get(target_key) tolerance = abs(target.target_value * 0.05) if abs(target.target_value) > 0 else 0.05 targets_met = { target_key: met is not None and abs(met - target.target_value) <= tolerance, } return OptimizationResult( best_values=best_values, best_cost=best_cost, iterations=len(history), history=history, targets_met=targets_met, final_metrics=best_metrics, ) async def _coordinate_descent( netlist_template: str, targets: list[OptimizationTarget], component_ranges: list[ComponentRange], max_iterations: int, work_dir: Path, ) -> OptimizationResult: """Optimize multiple components via coordinate descent. Cycles through components, running a bounded golden-section search on each one while holding the others fixed. Repeats until the overall cost stops improving or we exhaust the iteration budget. """ # Start at geometric midpoint of each range current_values: dict[str, float] = {} for cr in component_ranges: current_values[cr.component_name] = math.sqrt(cr.min_value * cr.max_value) history: list[dict] = [] best_cost = float("inf") best_values = dict(current_values) best_metrics: dict[str, float] = {} iteration = 0 n_comps = len(component_ranges) # Golden ratio for search phi = (1 + math.sqrt(5)) / 2 resphi = 2 - phi # ~0.382 while iteration < max_iterations: improved_this_cycle = False for cr in component_ranges: if iteration >= max_iterations: break lo = cr.min_value hi = cr.max_value # Golden-section search for this component # Allocate a few iterations per component per cycle iters_per_comp = max(2, (max_iterations - iteration) // n_comps) a = lo b = hi x1 = a + resphi * (b - a) x2 = b - resphi * (b - a) # Evaluate x1 trial_1 = dict(current_values) trial_1[cr.component_name] = x1 raw_1 = await _run_with_values(netlist_template, trial_1, work_dir) cost_1 = float("inf") metrics_1: dict[str, float] = {} if raw_1 is not None: cost_1, metrics_1 = _compute_cost(raw_1, targets) iteration += 1 # Evaluate x2 trial_2 = dict(current_values) trial_2[cr.component_name] = x2 raw_2 = await _run_with_values(netlist_template, trial_2, work_dir) cost_2 = float("inf") metrics_2: dict[str, float] = {} if raw_2 is not None: cost_2, metrics_2 = _compute_cost(raw_2, targets) iteration += 1 for _ in range(iters_per_comp - 2): if iteration >= max_iterations: break if cost_1 < cost_2: b = x2 x2 = x1 cost_2 = cost_1 metrics_2 = metrics_1 x1 = a + resphi * (b - a) trial = dict(current_values) trial[cr.component_name] = x1 raw = await _run_with_values(netlist_template, trial, work_dir) if raw is not None: cost_1, metrics_1 = _compute_cost(raw, targets) else: cost_1 = float("inf") metrics_1 = {} else: a = x1 x1 = x2 cost_1 = cost_2 metrics_1 = metrics_2 x2 = b - resphi * (b - a) trial = dict(current_values) trial[cr.component_name] = x2 raw = await _run_with_values(netlist_template, trial, work_dir) if raw is not None: cost_2, metrics_2 = _compute_cost(raw, targets) else: cost_2 = float("inf") metrics_2 = {} iteration += 1 # Pick the better of the final two if cost_1 <= cost_2: chosen_val = x1 chosen_cost = cost_1 chosen_metrics = metrics_1 else: chosen_val = x2 chosen_cost = cost_2 chosen_metrics = metrics_2 # Snap to preferred series if cr.preferred_series: chosen_val = snap_to_preferred(chosen_val, cr.preferred_series) current_values[cr.component_name] = chosen_val history.append( { "iteration": iteration, "optimized_component": cr.component_name, "values": dict(current_values), "cost": chosen_cost, "metrics": chosen_metrics, } ) if chosen_cost < best_cost: best_cost = chosen_cost best_values = dict(current_values) best_metrics = chosen_metrics improved_this_cycle = True if not improved_this_cycle: break # Determine which targets are met (within 5%) targets_met: dict[str, bool] = {} for t in targets: key = f"{t.signal_name}.{t.metric}" measured = best_metrics.get(key) if measured is None or math.isnan(measured): targets_met[key] = False else: tolerance = abs(t.target_value * 0.05) if abs(t.target_value) > 0 else 0.05 targets_met[key] = abs(measured - t.target_value) <= tolerance return OptimizationResult( best_values=best_values, best_cost=best_cost, iterations=iteration, history=history, targets_met=targets_met, final_metrics=best_metrics, ) async def optimize_component_values( netlist_template: str, targets: list[OptimizationTarget], component_ranges: list[ComponentRange], max_iterations: int = 20, method: str = "binary_search", ) -> OptimizationResult: """Iteratively adjust component values toward target specifications. Creates temporary .cir files, runs real LTspice simulations via Wine, and evaluates a cost function against the targets after each run. Args: netlist_template: Netlist text with ``{ComponentName}`` placeholders (e.g. ``{R1}``, ``{C1}``) that get substituted each iteration. targets: Performance targets to optimize toward. component_ranges: Allowed ranges for each tunable component. max_iterations: Maximum simulation iterations (default 20). method: ``"binary_search"`` for single-component optimization, ``"coordinate_descent"`` for multi-component. When ``"binary_search"`` is requested with multiple components, coordinate descent is used automatically. Returns: OptimizationResult with best values found, cost history, and whether each target was met. """ work_dir = Path(tempfile.mkdtemp(prefix="ltspice_opt_")) if len(component_ranges) == 1 and len(targets) == 1 and method == "binary_search": return await _binary_search_single( netlist_template, targets[0], component_ranges[0], max_iterations, work_dir, ) return await _coordinate_descent( netlist_template, targets, component_ranges, max_iterations, work_dir, )