- Self-contained HTML dashboard with MS Office 365 design - pytest plugin captures inputs, outputs, and errors per test - Unified orchestrator runs pytest + torture tests together - Test files persisted in reports/test_files/ with relative links - GitHub Actions workflow with PR comments and job summaries - Makefile with convenient commands (test, view-dashboard, etc.) - Works offline with embedded JSON data (no CORS issues)
508 lines
19 KiB
Python
Executable File
508 lines
19 KiB
Python
Executable File
#!/usr/bin/env python
|
|
"""
|
|
Run both pytest and torture tests, then generate a unified test dashboard.
|
|
|
|
This script orchestrates:
|
|
1. Running pytest with dashboard plugin
|
|
2. Running torture tests with result capture
|
|
3. Merging results into a single JSON file
|
|
4. Opening the dashboard in the browser
|
|
"""
|
|
|
|
import asyncio
|
|
import json
|
|
import os
|
|
import shutil
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
|
|
# Add src to path
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
|
|
|
|
|
|
def run_pytest_tests(output_path: Path) -> dict:
|
|
"""Run pytest tests with dashboard plugin."""
|
|
print("\n" + "=" * 70)
|
|
print("🧪 Running pytest test suite...")
|
|
print("=" * 70)
|
|
|
|
# Ensure plugin is loaded
|
|
plugin_path = Path(__file__).parent / "tests" / "pytest_dashboard_plugin.py"
|
|
|
|
# Run pytest with plugin
|
|
cmd = [
|
|
sys.executable,
|
|
"-m",
|
|
"pytest",
|
|
"-p",
|
|
"tests.pytest_dashboard_plugin",
|
|
f"--dashboard-output={output_path}",
|
|
"-v",
|
|
]
|
|
|
|
result = subprocess.run(cmd, cwd=Path(__file__).parent)
|
|
|
|
# Load results
|
|
if output_path.exists():
|
|
with open(output_path) as f:
|
|
return json.load(f)
|
|
else:
|
|
return {
|
|
"metadata": {
|
|
"start_time": datetime.now().isoformat(),
|
|
"end_time": datetime.now().isoformat(),
|
|
"duration": 0,
|
|
"exit_status": result.returncode,
|
|
},
|
|
"summary": {"total": 0, "passed": 0, "failed": 0, "skipped": 0, "pass_rate": 0},
|
|
"categories": {},
|
|
"tests": [],
|
|
}
|
|
|
|
|
|
async def run_torture_tests(test_files_dir: Path = None) -> dict:
|
|
"""Run torture tests and capture results.
|
|
|
|
Args:
|
|
test_files_dir: Directory to store test files. If provided, files persist
|
|
for inclusion in dashboard. If None, uses temp directory.
|
|
"""
|
|
print("\n" + "=" * 70)
|
|
print("🔥 Running torture tests...")
|
|
print("=" * 70)
|
|
|
|
from torture_test import (
|
|
run_torture_tests as run_torture,
|
|
create_test_xlsx,
|
|
create_test_docx,
|
|
EXCEL_TEST_FILES,
|
|
ExcelMixin,
|
|
WordMixin,
|
|
)
|
|
|
|
excel_mixin = ExcelMixin()
|
|
word_mixin = WordMixin()
|
|
|
|
results = []
|
|
start_time = time.time()
|
|
|
|
# Use persistent directory if provided, otherwise temp
|
|
if test_files_dir:
|
|
test_files_dir.mkdir(parents=True, exist_ok=True)
|
|
test_xlsx = create_test_xlsx(str(test_files_dir / "test_data.xlsx"))
|
|
test_docx = create_test_docx(str(test_files_dir / "test_document.docx"))
|
|
# Use relative paths for the dashboard
|
|
test_xlsx_path = "test_files/test_data.xlsx"
|
|
test_docx_path = "test_files/test_document.docx"
|
|
else:
|
|
import tempfile
|
|
tmpdir = tempfile.mkdtemp()
|
|
test_xlsx = create_test_xlsx(os.path.join(tmpdir, "test_data.xlsx"))
|
|
test_docx = create_test_docx(os.path.join(tmpdir, "test_document.docx"))
|
|
test_xlsx_path = test_xlsx
|
|
test_docx_path = test_docx
|
|
|
|
# Test 1: Excel Data Analysis
|
|
test_start = time.time()
|
|
try:
|
|
result = await excel_mixin.analyze_excel_data(test_xlsx)
|
|
summary = result.get("summary", {})
|
|
sheets_count = summary.get("sheets_analyzed", 1)
|
|
results.append({
|
|
"name": "Excel Data Analysis",
|
|
"nodeid": "torture_test.py::test_excel_data_analysis",
|
|
"category": "Excel",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_data_analysis",
|
|
"inputs": {"file": test_xlsx_path},
|
|
"outputs": {"sheets_analyzed": sheets_count},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Excel Data Analysis",
|
|
"nodeid": "torture_test.py::test_excel_data_analysis",
|
|
"category": "Excel",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_data_analysis",
|
|
"inputs": {"file": test_xlsx_path},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
|
|
# Test 2: Excel Formula Extraction
|
|
test_start = time.time()
|
|
try:
|
|
result = await excel_mixin.extract_excel_formulas(test_xlsx)
|
|
summary = result.get("summary", {})
|
|
formula_count = summary.get("total_formulas", 0)
|
|
results.append({
|
|
"name": "Excel Formula Extraction",
|
|
"nodeid": "torture_test.py::test_excel_formula_extraction",
|
|
"category": "Excel",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_formula_extraction",
|
|
"inputs": {"file": test_xlsx_path},
|
|
"outputs": {"total_formulas": formula_count},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Excel Formula Extraction",
|
|
"nodeid": "torture_test.py::test_excel_formula_extraction",
|
|
"category": "Excel",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_formula_extraction",
|
|
"inputs": {"file": test_xlsx_path},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
|
|
# Test 3: Excel Chart Generation
|
|
test_start = time.time()
|
|
try:
|
|
result = await excel_mixin.create_excel_chart_data(
|
|
test_xlsx,
|
|
x_column="Category",
|
|
y_columns=["Value"],
|
|
chart_type="bar"
|
|
)
|
|
chart_libs = len(result.get("chart_configuration", {}))
|
|
results.append({
|
|
"name": "Excel Chart Data Generation",
|
|
"nodeid": "torture_test.py::test_excel_chart_generation",
|
|
"category": "Excel",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_chart_generation",
|
|
"inputs": {"file": test_xlsx_path, "x_column": "Category", "y_columns": ["Value"]},
|
|
"outputs": {"chart_libraries": chart_libs},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Excel Chart Data Generation",
|
|
"nodeid": "torture_test.py::test_excel_chart_generation",
|
|
"category": "Excel",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_excel_chart_generation",
|
|
"inputs": {"file": test_xlsx_path, "x_column": "Category", "y_columns": ["Value"]},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
|
|
# Test 4: Word Structure Analysis
|
|
test_start = time.time()
|
|
try:
|
|
result = await word_mixin.analyze_word_structure(test_docx)
|
|
heading_count = result["structure"].get("total_headings", 0)
|
|
results.append({
|
|
"name": "Word Structure Analysis",
|
|
"nodeid": "torture_test.py::test_word_structure_analysis",
|
|
"category": "Word",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_word_structure_analysis",
|
|
"inputs": {"file": test_docx_path},
|
|
"outputs": {"total_headings": heading_count},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Word Structure Analysis",
|
|
"nodeid": "torture_test.py::test_word_structure_analysis",
|
|
"category": "Word",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_word_structure_analysis",
|
|
"inputs": {"file": test_docx_path},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
|
|
# Test 5: Word Table Extraction
|
|
test_start = time.time()
|
|
try:
|
|
result = await word_mixin.extract_word_tables(test_docx)
|
|
table_count = result.get("total_tables", 0)
|
|
results.append({
|
|
"name": "Word Table Extraction",
|
|
"nodeid": "torture_test.py::test_word_table_extraction",
|
|
"category": "Word",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_word_table_extraction",
|
|
"inputs": {"file": test_docx_path},
|
|
"outputs": {"total_tables": table_count},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Word Table Extraction",
|
|
"nodeid": "torture_test.py::test_word_table_extraction",
|
|
"category": "Word",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_word_table_extraction",
|
|
"inputs": {"file": test_docx_path},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
|
|
# Test 6: Real Excel file (if available)
|
|
real_excel = EXCEL_TEST_FILES[0]
|
|
if os.path.exists(real_excel):
|
|
test_start = time.time()
|
|
try:
|
|
result = await excel_mixin.analyze_excel_data(real_excel)
|
|
sheets = len(result.get("sheets", []))
|
|
results.append({
|
|
"name": "Real Excel File Analysis (FORScan)",
|
|
"nodeid": "torture_test.py::test_real_excel_analysis",
|
|
"category": "Excel",
|
|
"outcome": "passed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_real_excel_analysis",
|
|
"inputs": {"file": real_excel},
|
|
"outputs": {"sheets": sheets},
|
|
"error": None,
|
|
"traceback": None,
|
|
})
|
|
except Exception as e:
|
|
results.append({
|
|
"name": "Real Excel File Analysis (FORScan)",
|
|
"nodeid": "torture_test.py::test_real_excel_analysis",
|
|
"category": "Excel",
|
|
"outcome": "failed",
|
|
"duration": time.time() - test_start,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_real_excel_analysis",
|
|
"inputs": {"file": real_excel},
|
|
"outputs": None,
|
|
"error": str(e),
|
|
"traceback": f"{type(e).__name__}: {e}",
|
|
})
|
|
else:
|
|
results.append({
|
|
"name": "Real Excel File Analysis (FORScan)",
|
|
"nodeid": "torture_test.py::test_real_excel_analysis",
|
|
"category": "Excel",
|
|
"outcome": "skipped",
|
|
"duration": 0,
|
|
"timestamp": datetime.now().isoformat(),
|
|
"module": "torture_test",
|
|
"class": None,
|
|
"function": "test_real_excel_analysis",
|
|
"inputs": {"file": real_excel},
|
|
"outputs": None,
|
|
"error": f"File not found: {real_excel}",
|
|
"traceback": None,
|
|
})
|
|
|
|
# Calculate summary
|
|
total_duration = time.time() - start_time
|
|
passed = sum(1 for r in results if r["outcome"] == "passed")
|
|
failed = sum(1 for r in results if r["outcome"] == "failed")
|
|
skipped = sum(1 for r in results if r["outcome"] == "skipped")
|
|
total = len(results)
|
|
|
|
return {
|
|
"metadata": {
|
|
"start_time": datetime.fromtimestamp(start_time).isoformat(),
|
|
"end_time": datetime.now().isoformat(),
|
|
"duration": total_duration,
|
|
"exit_status": 0 if failed == 0 else 1,
|
|
"pytest_version": "torture_test",
|
|
},
|
|
"summary": {
|
|
"total": total,
|
|
"passed": passed,
|
|
"failed": failed,
|
|
"skipped": skipped,
|
|
"pass_rate": (passed / total * 100) if total > 0 else 0,
|
|
},
|
|
"categories": {
|
|
"Excel": {
|
|
"total": sum(1 for r in results if r["category"] == "Excel"),
|
|
"passed": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "passed"),
|
|
"failed": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "failed"),
|
|
"skipped": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "skipped"),
|
|
},
|
|
"Word": {
|
|
"total": sum(1 for r in results if r["category"] == "Word"),
|
|
"passed": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "passed"),
|
|
"failed": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "failed"),
|
|
"skipped": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "skipped"),
|
|
},
|
|
},
|
|
"tests": results,
|
|
}
|
|
|
|
|
|
def merge_results(pytest_results: dict, torture_results: dict) -> dict:
|
|
"""Merge pytest and torture test results."""
|
|
# Merge tests
|
|
all_tests = pytest_results.get("tests", []) + torture_results.get("tests", [])
|
|
|
|
# Recalculate summary
|
|
total = len(all_tests)
|
|
passed = sum(1 for t in all_tests if t["outcome"] == "passed")
|
|
failed = sum(1 for t in all_tests if t["outcome"] == "failed")
|
|
skipped = sum(1 for t in all_tests if t["outcome"] == "skipped")
|
|
|
|
# Merge categories
|
|
all_categories = {}
|
|
for cat_dict in [pytest_results.get("categories", {}), torture_results.get("categories", {})]:
|
|
for cat, stats in cat_dict.items():
|
|
if cat not in all_categories:
|
|
all_categories[cat] = {"total": 0, "passed": 0, "failed": 0, "skipped": 0}
|
|
for key in ["total", "passed", "failed", "skipped"]:
|
|
all_categories[cat][key] += stats.get(key, 0)
|
|
|
|
# Combine durations
|
|
total_duration = pytest_results.get("metadata", {}).get("duration", 0) + \
|
|
torture_results.get("metadata", {}).get("duration", 0)
|
|
|
|
return {
|
|
"metadata": {
|
|
"start_time": pytest_results.get("metadata", {}).get("start_time", datetime.now().isoformat()),
|
|
"end_time": datetime.now().isoformat(),
|
|
"duration": total_duration,
|
|
"exit_status": 0 if failed == 0 else 1,
|
|
"pytest_version": pytest_results.get("metadata", {}).get("pytest_version", "unknown"),
|
|
"test_types": ["pytest", "torture_test"],
|
|
},
|
|
"summary": {
|
|
"total": total,
|
|
"passed": passed,
|
|
"failed": failed,
|
|
"skipped": skipped,
|
|
"pass_rate": (passed / total * 100) if total > 0 else 0,
|
|
},
|
|
"categories": all_categories,
|
|
"tests": all_tests,
|
|
}
|
|
|
|
|
|
def main():
|
|
"""Main execution function."""
|
|
reports_dir = Path(__file__).parent / "reports"
|
|
reports_dir.mkdir(exist_ok=True)
|
|
|
|
test_files_dir = reports_dir / "test_files"
|
|
|
|
pytest_output = reports_dir / "pytest_results.json"
|
|
final_output = reports_dir / "test_results.json"
|
|
|
|
# Run pytest tests
|
|
pytest_results = run_pytest_tests(pytest_output)
|
|
|
|
# Run torture tests with persistent test files
|
|
torture_results = asyncio.run(run_torture_tests(test_files_dir))
|
|
|
|
# Merge results
|
|
merged_results = merge_results(pytest_results, torture_results)
|
|
|
|
# Write final results
|
|
with open(final_output, "w") as f:
|
|
json.dump(merged_results, f, indent=2)
|
|
|
|
# Embed JSON data into HTML for offline viewing (file:// URLs)
|
|
dashboard_html = reports_dir / "test_dashboard.html"
|
|
if dashboard_html.exists():
|
|
html_content = dashboard_html.read_text()
|
|
# Remove any existing embedded data
|
|
import re
|
|
html_content = re.sub(
|
|
r'<script type="application/json" id="test-results-data">.*?</script>\n?',
|
|
'',
|
|
html_content,
|
|
flags=re.DOTALL
|
|
)
|
|
# Embed fresh data before </body>
|
|
embed_script = f'<script type="application/json" id="test-results-data">{json.dumps(merged_results)}</script>\n'
|
|
html_content = html_content.replace('</body>', f'{embed_script}</body>')
|
|
dashboard_html.write_text(html_content)
|
|
|
|
print("\n" + "=" * 70)
|
|
print("📊 TEST DASHBOARD SUMMARY")
|
|
print("=" * 70)
|
|
print(f"\n✅ Passed: {merged_results['summary']['passed']}")
|
|
print(f"❌ Failed: {merged_results['summary']['failed']}")
|
|
print(f"⏭️ Skipped: {merged_results['summary']['skipped']}")
|
|
print(f"\n📈 Pass Rate: {merged_results['summary']['pass_rate']:.1f}%")
|
|
print(f"⏱️ Duration: {merged_results['metadata']['duration']:.2f}s")
|
|
print(f"\n📄 Results saved to: {final_output}")
|
|
print(f"🌐 Dashboard: {reports_dir / 'test_dashboard.html'}")
|
|
print("=" * 70)
|
|
|
|
# Try to open dashboard in browser
|
|
try:
|
|
import webbrowser
|
|
dashboard_path = reports_dir / "test_dashboard.html"
|
|
webbrowser.open(f"file://{dashboard_path.absolute()}")
|
|
print("\n🌐 Opening dashboard in browser...")
|
|
except Exception as e:
|
|
print(f"\n⚠️ Could not open browser automatically: {e}")
|
|
print(f" Open manually: file://{(reports_dir / 'test_dashboard.html').absolute()}")
|
|
|
|
# Return exit code
|
|
return merged_results["metadata"]["exit_status"]
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|