"""Modern HTML reporting system with video processing theme."""
import json
import time
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, asdict
import base64
from .quality import TestQualityMetrics
from .config import TestingConfig
@dataclass
class TestResult:
"""Individual test result data."""
name: str
status: str # passed, failed, skipped, error
duration: float
category: str
error_message: Optional[str] = None
artifacts: List[str] = None
quality_metrics: Optional[TestQualityMetrics] = None
def __post_init__(self):
if self.artifacts is None:
self.artifacts = []
class HTMLReporter:
"""Modern HTML reporter with video processing theme."""
def __init__(self, config: TestingConfig):
self.config = config
self.test_results: List[TestResult] = []
self.start_time = time.time()
self.summary_stats = {
"total": 0,
"passed": 0,
"failed": 0,
"skipped": 0,
"errors": 0
}
def add_test_result(self, result: TestResult):
"""Add a test result to the report."""
self.test_results.append(result)
self.summary_stats["total"] += 1
self.summary_stats[result.status] += 1
def generate_report(self) -> str:
"""Generate the complete HTML report."""
duration = time.time() - self.start_time
timestamp = datetime.now()
html_content = self._generate_html_template(duration, timestamp)
return html_content
def save_report(self, output_path: Optional[Path] = None) -> Path:
"""Save the HTML report to file."""
if output_path is None:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_path = self.config.reports_dir / f"test_report_{timestamp}.html"
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
f.write(self.generate_report())
return output_path
def _generate_html_template(self, duration: float, timestamp: datetime) -> str:
"""Generate the complete HTML template."""
return f"""
Video Processor Test Report
{self._generate_css()}
{self._generate_javascript()}
{self._generate_header(duration, timestamp)}
{self._generate_navigation()}
{self._generate_summary_section()}
{self._generate_quality_overview()}
{self._generate_test_results_section()}
{self._generate_charts_section()}
{self._generate_footer()}
"""
def _generate_css(self) -> str:
"""Generate CSS styles with video processing theme."""
return """"""
def _generate_javascript(self) -> str:
"""Generate JavaScript for interactive features."""
return """"""
def _generate_header(self, duration: float, timestamp: datetime) -> str:
"""Generate the header section."""
return f"""
"""
def _generate_navigation(self) -> str:
"""Generate the navigation section."""
return """
"""
def _generate_summary_section(self) -> str:
"""Generate the summary section."""
return f"""
{self.summary_stats['total']}
Total Tests
{self.summary_stats['passed']}
Passed
{self.summary_stats['failed']}
Failed
{self.summary_stats['skipped']}
Skipped
"""
def _generate_quality_overview(self) -> str:
"""Generate the quality metrics overview."""
avg_quality = self._calculate_average_quality()
return f"""
Overall Score
{avg_quality['overall']:.1f}/10
Grade: {self._get_grade(avg_quality['overall'])}
Functional Quality
{avg_quality['functional']:.1f}/10
Grade: {self._get_grade(avg_quality['functional'])}
Performance Quality
{avg_quality['performance']:.1f}/10
Grade: {self._get_grade(avg_quality['performance'])}
Reliability Score
{avg_quality['reliability']:.1f}/10
Grade: {self._get_grade(avg_quality['reliability'])}
"""
def _generate_test_results_section(self) -> str:
"""Generate the test results table."""
filter_buttons = """
"""
table_rows = ""
for result in self.test_results:
error_html = ""
if result.error_message:
error_html = f'{result.error_message}
'
quality_score = "N/A"
if result.quality_metrics:
quality_score = f"{result.quality_metrics.overall_score:.1f}/10"
table_rows += f"""
{result.name}
{error_html}
|
{result.status.upper()}
|
{result.category}
|
{result.duration:.3f}s |
{quality_score} |
"""
return f"""
Test Name |
Status |
Category |
Duration |
Quality Score |
{table_rows}
"""
def _generate_charts_section(self) -> str:
"""Generate the charts/analytics section."""
return """
"""
def _generate_footer(self) -> str:
"""Generate the footer section."""
return f"""
"""
def _calculate_success_rate(self) -> float:
"""Calculate the overall success rate."""
total = self.summary_stats['total']
if total == 0:
return 0.0
return (self.summary_stats['passed'] / total) * 100
def _calculate_average_quality(self) -> Dict[str, float]:
"""Calculate average quality metrics."""
quality_tests = [r for r in self.test_results if r.quality_metrics]
if not quality_tests:
return {
'overall': 8.0,
'functional': 8.0,
'performance': 8.0,
'reliability': 8.0
}
return {
'overall': sum(r.quality_metrics.overall_score for r in quality_tests) / len(quality_tests),
'functional': sum(r.quality_metrics.functional_score for r in quality_tests) / len(quality_tests),
'performance': sum(r.quality_metrics.performance_score for r in quality_tests) / len(quality_tests),
'reliability': sum(r.quality_metrics.reliability_score for r in quality_tests) / len(quality_tests),
}
def _get_grade(self, score: float) -> str:
"""Convert score to letter grade."""
if score >= 9.0:
return "A+"
elif score >= 8.5:
return "A"
elif score >= 8.0:
return "A-"
elif score >= 7.5:
return "B+"
elif score >= 7.0:
return "B"
elif score >= 6.5:
return "B-"
elif score >= 6.0:
return "C+"
elif score >= 5.5:
return "C"
elif score >= 5.0:
return "C-"
elif score >= 4.0:
return "D"
else:
return "F"
class JSONReporter:
"""JSON reporter for CI/CD integration."""
def __init__(self, config: TestingConfig):
self.config = config
self.test_results: List[TestResult] = []
self.start_time = time.time()
def add_test_result(self, result: TestResult):
"""Add a test result."""
self.test_results.append(result)
def generate_report(self) -> Dict[str, Any]:
"""Generate JSON report."""
duration = time.time() - self.start_time
summary = {
"total": len(self.test_results),
"passed": len([r for r in self.test_results if r.status == "passed"]),
"failed": len([r for r in self.test_results if r.status == "failed"]),
"skipped": len([r for r in self.test_results if r.status == "skipped"]),
"errors": len([r for r in self.test_results if r.status == "error"]),
}
return {
"timestamp": datetime.now().isoformat(),
"duration": duration,
"summary": summary,
"success_rate": (summary["passed"] / summary["total"] * 100) if summary["total"] > 0 else 0,
"results": [asdict(result) for result in self.test_results],
"config": {
"project_name": self.config.project_name,
"version": self.config.version,
"parallel_workers": self.config.parallel_workers,
}
}
def save_report(self, output_path: Optional[Path] = None) -> Path:
"""Save JSON report to file."""
if output_path is None:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_path = self.config.reports_dir / f"test_report_{timestamp}.json"
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
json.dump(self.generate_report(), f, indent=2, default=str)
return output_path
class ConsoleReporter:
"""Terminal-friendly console reporter."""
def __init__(self, config: TestingConfig):
self.config = config
self.test_results: List[TestResult] = []
def add_test_result(self, result: TestResult):
"""Add a test result."""
self.test_results.append(result)
def print_summary(self):
"""Print summary to console."""
total = len(self.test_results)
passed = len([r for r in self.test_results if r.status == "passed"])
failed = len([r for r in self.test_results if r.status == "failed"])
skipped = len([r for r in self.test_results if r.status == "skipped"])
print("\n" + "="*80)
print(f"🎬 VIDEO PROCESSOR TEST SUMMARY")
print("="*80)
print(f"Total Tests: {total}")
print(f"✅ Passed: {passed}")
print(f"❌ Failed: {failed}")
print(f"⏭️ Skipped: {skipped}")
print(f"Success Rate: {(passed/total*100) if total > 0 else 0:.1f}%")
print("="*80)
if failed > 0:
print("\nFailed Tests:")
for result in self.test_results:
if result.status == "failed":
print(f" ❌ {result.name}")
if result.error_message:
print(f" Error: {result.error_message[:100]}...")
print()