Implement precision timing recovery functions: - _refine_symbol_boundary(): Scans at 1/32-symbol resolution to find exact chirp boundary by maximizing dechirped SNR - _find_sfd_boundary(): FFT-based correlation with downchirp template to find exact data start position Bug fixes: - Fix _is_downchirp() false positives by comparing both correlations - Fix _estimate_cfo() to return values in [0, N) range The improved sync_from_samples() now produces bins identical to the reference lora_decode_gpu decoder.
199 lines
7.9 KiB
Python
199 lines
7.9 KiB
Python
#!/usr/bin/env python3
|
|
"""Debug timing comparison between existing decoder and our FrameSync."""
|
|
|
|
import sys
|
|
from pathlib import Path
|
|
import numpy as np
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent / "python"))
|
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "gnuradio"))
|
|
|
|
from rylr998 import Channelizer, FrameSync
|
|
from rylr998.frame_sync import FrameSyncState
|
|
from lora_decode_gpu import GPULoraDecoder, channelize as lora_channelize
|
|
|
|
SF = 9
|
|
BW = 125e3
|
|
N = 512
|
|
sps = N
|
|
|
|
capture_path = Path(__file__).parent.parent.parent / "gnuradio" / "logs" / "capture_multi.raw"
|
|
iq_raw = np.fromfile(capture_path, dtype=np.complex64)
|
|
iq_ch = lora_channelize(iq_raw, 2e6, 915e6, 915e6, BW)
|
|
|
|
# Create existing decoder to get preamble location
|
|
dec = GPULoraDecoder(sf=SF, bw=BW, sample_rate=BW, use_gpu=False)
|
|
symbols, snrs = dec.batch_detect_symbols(iq_ch)
|
|
preambles = dec.find_preambles(symbols, snrs)
|
|
start_idx, length, _ = preambles[0]
|
|
print(f"Existing decoder: preamble at symbol {start_idx}, len={length}")
|
|
print(f"Preamble bin from existing: {int(symbols[start_idx])}")
|
|
print(f"Bins around preamble:")
|
|
for i in range(start_idx - 2, start_idx + length + 6):
|
|
print(f" sym {i}: bin={int(symbols[i])}, snr={snrs[i]:.1f}dB")
|
|
|
|
# Now trace our FrameSync on the same region
|
|
region_start = max(0, start_idx * sps - 5 * sps)
|
|
region_end = (start_idx + length + 30) * sps
|
|
iq_region = iq_ch[region_start:region_end]
|
|
print(f"\nExtracting region starting at sample {region_start}")
|
|
|
|
sync = FrameSync(sf=SF, sample_rate=BW, bw=BW)
|
|
|
|
print(f"\nTracing FrameSync state machine:")
|
|
n_symbols = len(iq_region) // sps
|
|
preamble_sym = None
|
|
for i in range(min(n_symbols, 30)):
|
|
sym_samples = iq_region[i*sps:(i+1)*sps]
|
|
prev_state = sync._state.name
|
|
|
|
# Get peak bin directly
|
|
peak_bin, peak_mag = sync._dechirp_and_peak(sym_samples)
|
|
|
|
# Debug: get both correlations
|
|
seg = sym_samples[:sync.sps]
|
|
dc_dechirped = seg * sync._upchirp
|
|
dc_spectrum = np.abs(np.fft.fft(dc_dechirped, n=sync.N))
|
|
dc_corr = np.max(dc_spectrum) / np.mean(dc_spectrum)
|
|
|
|
uc_dechirped = seg * sync._downchirp
|
|
uc_spectrum = np.abs(np.fft.fft(uc_dechirped, n=sync.N))
|
|
uc_corr = np.max(uc_spectrum) / np.mean(uc_spectrum)
|
|
|
|
is_dc, dc_mag = sync._is_downchirp(sym_samples)
|
|
|
|
sync.process_symbol(sym_samples)
|
|
new_state = sync._state.name
|
|
|
|
state_changed = prev_state != new_state
|
|
interesting = state_changed or new_state != "SEARCH"
|
|
|
|
if interesting:
|
|
if prev_state == "SEARCH" and new_state == "PREAMBLE":
|
|
preamble_sym = i
|
|
print(f" sym {i}: bin={peak_bin:3d} uc={uc_corr:.1f}x dc={dc_corr:.1f}x isDC={is_dc} {prev_state:10} -> {new_state}")
|
|
if new_state == "PREAMBLE":
|
|
print(f" preamble_count={sync._preamble_count}, cfo_est={sync._cfo_estimate:.1f}")
|
|
|
|
print(f"\nFound preamble starting at region symbol {preamble_sym}")
|
|
global_sym = region_start//sps + preamble_sym if preamble_sym else None
|
|
print(f"That corresponds to global symbol {global_sym}")
|
|
print(f"Existing says preamble at global symbol {start_idx}")
|
|
|
|
# Debug the SFD correlation directly
|
|
print("\n" + "="*70)
|
|
print("Debug SFD correlation")
|
|
|
|
# Compare our downchirp with existing decoder's
|
|
from lora_decode_gpu import generate_chirp
|
|
existing_dc = generate_chirp(SF, BW, BW, up=False)
|
|
our_dc = sync._downchirp
|
|
|
|
# Check if they match
|
|
corr_check = np.abs(np.dot(existing_dc, np.conj(our_dc))) / N
|
|
print(f"Downchirp correlation with existing: {corr_check:.4f}")
|
|
print(f"Our _downchirp first 5 phases: {np.angle(our_dc[:5])}")
|
|
print(f"Existing downchirp first 5 phases: {np.angle(existing_dc[:5])}")
|
|
|
|
# Manually do what sync_from_samples does
|
|
sync.reset()
|
|
|
|
# Run state machine to find preamble
|
|
sps = N
|
|
for i in range(30):
|
|
sym_samples = iq_region[i*sps:(i+1)*sps]
|
|
sync.process_symbol(sym_samples)
|
|
if sync._state.name == "DATA":
|
|
break
|
|
|
|
preamble_start_sym = 6 # From trace above
|
|
coarse_start = preamble_start_sym * sps
|
|
refined_start, true_bin = sync._refine_symbol_boundary(iq_region, coarse_start, 13)
|
|
print(f"Refined start: sample {refined_start} (offset {refined_start - coarse_start} from coarse)")
|
|
print(f"True bin: {true_bin}")
|
|
|
|
# SFD search
|
|
sfd_search_start = refined_start + int((13 + 1) * sps)
|
|
sfd_search_len = 4 * sps
|
|
print(f"SFD search: samples {sfd_search_start} to {sfd_search_start + sfd_search_len}")
|
|
|
|
# Do correlation manually to see the peak
|
|
downchirp_template = sync._downchirp
|
|
segment = iq_region[sfd_search_start:sfd_search_start + sfd_search_len]
|
|
padded_template = np.zeros(len(segment), dtype=np.complex64)
|
|
padded_template[:sps] = downchirp_template
|
|
|
|
corr = np.abs(np.fft.ifft(
|
|
np.fft.fft(segment) * np.conj(np.fft.fft(padded_template))
|
|
))
|
|
peak_offset = int(np.argmax(corr))
|
|
sfd_start = sfd_search_start + peak_offset
|
|
data_start = sfd_start + int(2.25 * sps)
|
|
|
|
print(f"Correlation peak offset: {peak_offset} samples = {peak_offset/sps:.2f} symbols")
|
|
print(f"SFD start: sample {sfd_start}")
|
|
print(f"Data start: sample {data_start}")
|
|
print(f"Data start symbol in region: {data_start / sps:.2f}")
|
|
|
|
# Also test existing decoder's refinement on the same position
|
|
print("\nExisting decoder refinement comparison:")
|
|
existing_refined_start, existing_true_bin = dec._refine_symbol_boundary(iq_ch, start_idx * sps, length)
|
|
print(f" Existing: refined_start={existing_refined_start}, true_bin={existing_true_bin}")
|
|
print(f" Ours: refined_start={region_start + refined_start}, true_bin={true_bin}")
|
|
print(f" Bin difference: {true_bin - existing_true_bin}")
|
|
|
|
# Also test existing decoder's _find_sfd_boundary on the same region
|
|
global_sfd_search_start = region_start + sfd_search_start
|
|
global_sfd_search_len = sfd_search_len
|
|
print(f"\nExisting decoder SFD search on same global position:")
|
|
existing_data_start = dec._find_sfd_boundary(iq_ch, global_sfd_search_start, global_sfd_search_len)
|
|
if existing_data_start:
|
|
print(f" Existing data_start: global sample {existing_data_start}")
|
|
print(f" Existing data_start symbol (from region start): {(existing_data_start - region_start) / sps:.2f}")
|
|
else:
|
|
print(" Existing _find_sfd_boundary returned None")
|
|
|
|
# Now test sync_from_samples and check the actual data bins
|
|
print("\n" + "="*70)
|
|
print("Testing sync_from_samples on same region")
|
|
sync.reset()
|
|
result = sync.sync_from_samples(iq_region, max_data_symbols=25)
|
|
print(f" found: {result.found}")
|
|
print(f" preamble_count: {result.preamble_count}")
|
|
print(f" cfo_bin: {result.cfo_bin}")
|
|
print(f" data_symbols: {len(result.data_symbols)}")
|
|
print(f" First 10 data bins: {result.data_symbols[:10]}")
|
|
|
|
# Compare with existing decoder
|
|
preamble_bin = int(symbols[start_idx])
|
|
result_existing = dec.demodulate_payload(iq_ch, start_idx * sps, length, preamble_snr=36.4, preamble_bin=preamble_bin)
|
|
existing_data_bins, cfo_bin = result_existing
|
|
print(f"\nExisting decoder:")
|
|
print(f" cfo_bin: {cfo_bin}")
|
|
print(f" First 10 data bins: {[int(x) for x in existing_data_bins[:10]]}")
|
|
|
|
print(f"\nBin comparison (existing vs ours):")
|
|
for i in range(min(10, len(result.data_symbols), len(existing_data_bins))):
|
|
existing = int(existing_data_bins[i])
|
|
ours = result.data_symbols[i]
|
|
diff = (ours - existing) % N
|
|
if diff > N // 2:
|
|
diff = diff - N
|
|
match = "✓" if abs(diff) <= 1 else f"off by {diff}"
|
|
print(f" [{i}] existing={existing:3d} ours={ours:3d} {match}")
|
|
|
|
# Try correcting our bins with a constant offset
|
|
print(f"\nTrying constant offset correction of +25:")
|
|
offset = 25
|
|
corrected_bins = [(b + offset) % N for b in result.data_symbols]
|
|
print(f" Corrected first 10: {corrected_bins[:10]}")
|
|
match_count = sum(1 for i in range(min(10, len(corrected_bins), len(existing_data_bins)))
|
|
if abs(corrected_bins[i] - int(existing_data_bins[i])) <= 1)
|
|
print(f" Matches: {match_count}/10")
|
|
|
|
# Try using the CFO difference for correction
|
|
cfo_diff = (int(cfo_bin) - int(result.cfo_bin)) % N # existing CFO - our CFO
|
|
print(f"\nCFO difference: {cfo_diff}")
|
|
cfo_corrected = [(b + cfo_diff) % N for b in result.data_symbols]
|
|
print(f" CFO-corrected first 10: {cfo_corrected[:10]}")
|