mcp-office-tools/tests/pytest_dashboard_plugin.py
Ryan Malloy c935cec7b6 Add MS Office-themed test dashboard with interactive reporting
- Self-contained HTML dashboard with MS Office 365 design
- pytest plugin captures inputs, outputs, and errors per test
- Unified orchestrator runs pytest + torture tests together
- Test files persisted in reports/test_files/ with relative links
- GitHub Actions workflow with PR comments and job summaries
- Makefile with convenient commands (test, view-dashboard, etc.)
- Works offline with embedded JSON data (no CORS issues)
2026-01-11 00:28:12 -07:00

195 lines
7.3 KiB
Python

"""Pytest plugin to capture test results for the dashboard.
This plugin captures detailed test execution data including inputs, outputs,
timing, and status for display in the HTML test dashboard.
"""
import json
import time
import traceback
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any
import pytest
class DashboardReporter:
"""Reporter that captures test execution data for the dashboard."""
def __init__(self, output_path: str):
self.output_path = Path(output_path)
self.test_results: List[Dict[str, Any]] = []
self.start_time = time.time()
self.session_metadata = {
"start_time": datetime.now().isoformat(),
"pytest_version": pytest.__version__,
}
def pytest_runtest_protocol(self, item, nextitem):
"""Capture test execution at the protocol level."""
# Store test item for later use
item._dashboard_start = time.time()
return None
def pytest_runtest_makereport(self, item, call):
"""Capture test results and extract information."""
if call.when == "call": # Only capture the main test call, not setup/teardown
test_data = {
"name": item.name,
"nodeid": item.nodeid,
"category": self._categorize_test(item),
"outcome": None, # Will be set in pytest_runtest_logreport
"duration": call.duration,
"timestamp": datetime.now().isoformat(),
"module": item.module.__name__ if item.module else "unknown",
"class": item.cls.__name__ if item.cls else None,
"function": item.function.__name__ if hasattr(item, "function") else item.name,
"inputs": self._extract_inputs(item),
"outputs": None,
"error": None,
"traceback": None,
}
# Store for later processing in pytest_runtest_logreport
item._dashboard_data = test_data
def pytest_runtest_logreport(self, report):
"""Process test reports to extract outputs and status."""
if report.when == "call" and hasattr(report, "item"):
item = report.item if hasattr(report, "item") else None
if item and hasattr(item, "_dashboard_data"):
test_data = item._dashboard_data
# Set outcome
test_data["outcome"] = report.outcome
# Extract output
if hasattr(report, "capstdout"):
test_data["outputs"] = {
"stdout": report.capstdout,
"stderr": getattr(report, "capstderr", ""),
}
# Extract error information
if report.failed:
test_data["error"] = str(report.longrepr) if hasattr(report, "longrepr") else "Unknown error"
if hasattr(report, "longreprtext"):
test_data["traceback"] = report.longreprtext
elif hasattr(report, "longrepr"):
test_data["traceback"] = str(report.longrepr)
# Extract actual output from test result if available
if hasattr(report, "result"):
test_data["outputs"]["result"] = str(report.result)
self.test_results.append(test_data)
def pytest_sessionfinish(self, session, exitstatus):
"""Write results to JSON file at end of test session."""
end_time = time.time()
# Calculate summary statistics
total_tests = len(self.test_results)
passed = sum(1 for t in self.test_results if t["outcome"] == "passed")
failed = sum(1 for t in self.test_results if t["outcome"] == "failed")
skipped = sum(1 for t in self.test_results if t["outcome"] == "skipped")
# Group by category
categories = {}
for test in self.test_results:
cat = test["category"]
if cat not in categories:
categories[cat] = {"total": 0, "passed": 0, "failed": 0, "skipped": 0}
categories[cat]["total"] += 1
if test["outcome"] == "passed":
categories[cat]["passed"] += 1
elif test["outcome"] == "failed":
categories[cat]["failed"] += 1
elif test["outcome"] == "skipped":
categories[cat]["skipped"] += 1
# Build final output
output_data = {
"metadata": {
**self.session_metadata,
"end_time": datetime.now().isoformat(),
"duration": end_time - self.start_time,
"exit_status": exitstatus,
},
"summary": {
"total": total_tests,
"passed": passed,
"failed": failed,
"skipped": skipped,
"pass_rate": (passed / total_tests * 100) if total_tests > 0 else 0,
},
"categories": categories,
"tests": self.test_results,
}
# Ensure output directory exists
self.output_path.parent.mkdir(parents=True, exist_ok=True)
# Write JSON
with open(self.output_path, "w") as f:
json.dump(output_data, f, indent=2)
print(f"\n Dashboard test results written to: {self.output_path}")
def _categorize_test(self, item) -> str:
"""Categorize test based on its name/path."""
nodeid = item.nodeid.lower()
if "word" in nodeid:
return "Word"
elif "excel" in nodeid:
return "Excel"
elif "powerpoint" in nodeid or "pptx" in nodeid:
return "PowerPoint"
elif "universal" in nodeid:
return "Universal"
elif "server" in nodeid:
return "Server"
else:
return "Other"
def _extract_inputs(self, item) -> Dict[str, Any]:
"""Extract test inputs from fixtures and parameters."""
inputs = {}
# Get fixture values
if hasattr(item, "funcargs"):
for name, value in item.funcargs.items():
# Skip complex objects, only store simple values
if isinstance(value, (str, int, float, bool, type(None))):
inputs[name] = value
elif isinstance(value, (list, tuple)) and len(value) < 10:
inputs[name] = list(value)
elif isinstance(value, dict) and len(value) < 10:
inputs[name] = value
else:
inputs[name] = f"<{type(value).__name__}>"
# Get parametrize values if present
if hasattr(item, "callspec"):
inputs["params"] = item.callspec.params
return inputs
def pytest_configure(config):
"""Register the dashboard reporter plugin."""
output_path = config.getoption("--dashboard-output", default="reports/test_results.json")
reporter = DashboardReporter(output_path)
config.pluginmanager.register(reporter, "dashboard_reporter")
def pytest_addoption(parser):
"""Add command line option for dashboard output path."""
parser.addoption(
"--dashboard-output",
action="store",
default="reports/test_results.json",
help="Path to output JSON file for dashboard (default: reports/test_results.json)",
)