diff --git a/.github/workflows/test-dashboard.yml b/.github/workflows/test-dashboard.yml new file mode 100644 index 0000000..6ac200d --- /dev/null +++ b/.github/workflows/test-dashboard.yml @@ -0,0 +1,124 @@ +name: Test Dashboard + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main ] + workflow_dispatch: # Allow manual trigger + +jobs: + test-and-dashboard: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' + + - name: Install UV + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + + - name: Install dependencies + run: | + uv sync --dev + + - name: Run tests with dashboard generation + run: | + python run_dashboard_tests.py + continue-on-error: true # Generate dashboard even if tests fail + + - name: Extract test summary + id: test_summary + run: | + TOTAL=$(jq '.summary.total' reports/test_results.json) + PASSED=$(jq '.summary.passed' reports/test_results.json) + FAILED=$(jq '.summary.failed' reports/test_results.json) + SKIPPED=$(jq '.summary.skipped' reports/test_results.json) + PASS_RATE=$(jq '.summary.pass_rate' reports/test_results.json) + + echo "total=$TOTAL" >> $GITHUB_OUTPUT + echo "passed=$PASSED" >> $GITHUB_OUTPUT + echo "failed=$FAILED" >> $GITHUB_OUTPUT + echo "skipped=$SKIPPED" >> $GITHUB_OUTPUT + echo "pass_rate=$PASS_RATE" >> $GITHUB_OUTPUT + + - name: Upload test dashboard + uses: actions/upload-artifact@v4 + if: always() + with: + name: test-dashboard + path: reports/ + retention-days: 30 + + - name: Comment PR with results + if: github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + const total = ${{ steps.test_summary.outputs.total }}; + const passed = ${{ steps.test_summary.outputs.passed }}; + const failed = ${{ steps.test_summary.outputs.failed }}; + const skipped = ${{ steps.test_summary.outputs.skipped }}; + const passRate = ${{ steps.test_summary.outputs.pass_rate }}; + + const statusEmoji = failed > 0 ? 'โŒ' : 'โœ…'; + const passRateEmoji = passRate >= 90 ? '๐ŸŽ‰' : passRate >= 70 ? '๐Ÿ‘' : 'โš ๏ธ'; + + const comment = `## ${statusEmoji} Test Results + + | Metric | Value | + |--------|-------| + | Total Tests | ${total} | + | โœ… Passed | ${passed} | + | โŒ Failed | ${failed} | + | โญ๏ธ Skipped | ${skipped} | + | ${passRateEmoji} Pass Rate | ${passRate.toFixed(1)}% | + + ### ๐Ÿ“Š Interactive Dashboard + + [Download test dashboard artifact](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + + The dashboard includes: + - Detailed test results with inputs/outputs + - Error tracebacks for failed tests + - Category breakdown (Word, Excel, PowerPoint, etc.) + - Interactive filtering and search + + **To view**: Download the artifact, extract, and open \`test_dashboard.html\` in your browser. + `; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: comment + }); + + - name: Create job summary + if: always() + run: | + echo "# ๐Ÿ“Š Test Dashboard Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "## Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Total**: ${{ steps.test_summary.outputs.total }} tests" >> $GITHUB_STEP_SUMMARY + echo "- **โœ… Passed**: ${{ steps.test_summary.outputs.passed }}" >> $GITHUB_STEP_SUMMARY + echo "- **โŒ Failed**: ${{ steps.test_summary.outputs.failed }}" >> $GITHUB_STEP_SUMMARY + echo "- **โญ๏ธ Skipped**: ${{ steps.test_summary.outputs.skipped }}" >> $GITHUB_STEP_SUMMARY + echo "- **๐Ÿ“ˆ Pass Rate**: ${{ steps.test_summary.outputs.pass_rate }}%" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "## ๐ŸŒ Dashboard" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Download the \`test-dashboard\` artifact to view the interactive HTML dashboard." >> $GITHUB_STEP_SUMMARY + + - name: Fail job if tests failed + if: steps.test_summary.outputs.failed > 0 + run: exit 1 diff --git a/ADVANCED_TOOLS_PLAN.md b/ADVANCED_TOOLS_PLAN.md new file mode 100644 index 0000000..43af168 --- /dev/null +++ b/ADVANCED_TOOLS_PLAN.md @@ -0,0 +1,190 @@ +# Advanced MCP Office Tools Enhancement Plan + +## Current Status +- โœ… Basic text extraction +- โœ… Image extraction +- โœ… Metadata extraction +- โœ… Format detection +- โœ… Document health analysis +- โœ… Word-to-Markdown conversion + +## Missing Advanced Features by Library + +### ๐Ÿ“Š Excel Tools (openpyxl + pandas + xlsxwriter) + +#### Data Analysis & Manipulation +- `analyze_excel_data` - Statistical analysis, data types, missing values +- `create_pivot_table` - Generate pivot tables with aggregations +- `excel_data_validation` - Set dropdown lists, number ranges, date constraints +- `excel_conditional_formatting` - Apply color scales, data bars, icon sets +- `excel_formula_analysis` - Extract, validate, and analyze formulas +- `excel_chart_creation` - Create charts (bar, line, pie, scatter, etc.) +- `excel_worksheet_operations` - Add/delete/rename sheets, copy data +- `excel_merge_spreadsheets` - Combine multiple Excel files intelligently + +#### Advanced Excel Features +- `excel_named_ranges` - Create and manage named ranges +- `excel_data_filtering` - Apply AutoFilter and advanced filters +- `excel_cell_styling` - Font, borders, alignment, number formats +- `excel_protection` - Password protect sheets/workbooks +- `excel_hyperlinks` - Add/extract hyperlinks from cells +- `excel_comments_notes` - Add/extract cell comments and notes + +### ๐Ÿ“ Word Tools (python-docx + mammoth) + +#### Document Structure & Layout +- `word_extract_tables` - Extract tables with styling and structure +- `word_extract_headers_footers` - Get headers/footers from all sections +- `word_extract_toc` - Extract table of contents with page numbers +- `word_document_structure` - Analyze heading hierarchy and outline +- `word_page_layout_analysis` - Margins, orientation, columns, page breaks +- `word_section_analysis` - Different sections with different formatting + +#### Content Management +- `word_find_replace_advanced` - Pattern-based find/replace with formatting +- `word_extract_comments` - Get all comments with author and timestamps +- `word_extract_tracked_changes` - Get revision history and changes +- `word_extract_hyperlinks` - Extract all hyperlinks with context +- `word_extract_footnotes_endnotes` - Get footnotes and endnotes +- `word_style_analysis` - Analyze and extract custom styles + +#### Document Generation +- `word_create_document` - Create new Word documents from templates +- `word_merge_documents` - Combine multiple Word documents +- `word_insert_content` - Add text, tables, images at specific locations +- `word_apply_formatting` - Apply consistent formatting across content + +### ๐ŸŽฏ PowerPoint Tools (python-pptx) + +#### Presentation Analysis +- `ppt_extract_slide_content` - Get text, images, shapes from each slide +- `ppt_extract_speaker_notes` - Get presenter notes for all slides +- `ppt_slide_layout_analysis` - Analyze slide layouts and master slides +- `ppt_extract_animations` - Get animation sequences and timing +- `ppt_presentation_structure` - Outline view with slide hierarchy + +#### Content Management +- `ppt_slide_operations` - Add/delete/reorder slides +- `ppt_master_slide_analysis` - Extract master slide templates +- `ppt_shape_analysis` - Analyze text boxes, shapes, SmartArt +- `ppt_media_extraction` - Extract embedded videos and audio +- `ppt_hyperlink_analysis` - Extract slide transitions and hyperlinks + +#### Presentation Generation +- `ppt_create_presentation` - Create new presentations from data +- `ppt_slide_generation` - Generate slides from templates and content +- `ppt_chart_integration` - Add charts and graphs to slides + +### ๐Ÿ”„ Cross-Format Tools + +#### Document Conversion +- `convert_excel_to_word_table` - Convert spreadsheet data to Word tables +- `convert_word_table_to_excel` - Extract Word tables to Excel format +- `extract_presentation_data_to_excel` - Convert slide content to spreadsheet +- `create_report_from_data` - Generate Word reports from Excel data + +#### Advanced Analysis +- `cross_document_comparison` - Compare content across different formats +- `document_summarization` - AI-powered document summaries +- `extract_key_metrics` - Find numbers, dates, important data across docs +- `document_relationship_analysis` - Find references between documents + +### ๐ŸŽจ Advanced Image & Media Tools + +#### Image Processing (Pillow integration) +- `advanced_image_extraction` - Extract with OCR, face detection, object recognition +- `image_format_conversion` - Convert between formats with optimization +- `image_metadata_analysis` - EXIF data, creation dates, camera info +- `image_quality_analysis` - Resolution, compression, clarity metrics + +#### Media Analysis +- `extract_embedded_objects` - Get all embedded files (PDFs, other Office docs) +- `analyze_document_media` - Comprehensive media inventory +- `optimize_document_media` - Reduce file sizes by optimizing images + +### ๐Ÿ“ˆ Data Science Integration + +#### Analytics Tools (pandas + numpy integration) +- `statistical_analysis` - Mean, median, correlations, distributions +- `time_series_analysis` - Trend analysis on date-based data +- `data_cleaning_suggestions` - Identify data quality issues +- `export_for_analysis` - Export to JSON, CSV, Parquet for data science + +#### Visualization Preparation +- `prepare_chart_data` - Format data for visualization libraries +- `generate_chart_configs` - Create chart.js, plotly, matplotlib configs +- `data_validation_rules` - Suggest data validation based on content analysis + +### ๐Ÿ” Security & Compliance Tools + +#### Document Security +- `analyze_document_security` - Check for sensitive information +- `redact_sensitive_content` - Remove/mask PII, financial data +- `document_audit_trail` - Track document creation, modification history +- `compliance_checking` - Check against various compliance standards + +#### Access Control +- `extract_permissions` - Get document protection and sharing settings +- `password_analysis` - Check password protection strength +- `digital_signature_verification` - Verify document signatures + +### ๐Ÿ”ง Automation & Workflow Tools + +#### Batch Operations +- `batch_document_processing` - Process multiple documents with same operations +- `template_application` - Apply templates to multiple documents +- `bulk_format_conversion` - Convert multiple files between formats +- `automated_report_generation` - Generate reports from data templates + +#### Integration Tools +- `export_to_cms` - Export content to various CMS formats +- `api_integration_prep` - Prepare data for API consumption +- `database_export` - Export structured data to database formats +- `email_template_generation` - Create email templates from documents + +## Implementation Priority + +### Phase 1: High-Impact Excel Tools ๐Ÿ”ฅ +1. `analyze_excel_data` - Immediate value for data analysis +2. `create_pivot_table` - High-demand business feature +3. `excel_chart_creation` - Visual data representation +4. `excel_conditional_formatting` - Professional spreadsheet styling + +### Phase 2: Advanced Word Processing ๐Ÿ“„ +1. `word_extract_tables` - Critical for data extraction +2. `word_document_structure` - Essential for navigation +3. `word_find_replace_advanced` - Powerful content management +4. `word_create_document` - Document generation capability + +### Phase 3: PowerPoint & Cross-Format ๐ŸŽฏ +1. `ppt_extract_slide_content` - Complete presentation analysis +2. `convert_excel_to_word_table` - Cross-format workflows +3. `ppt_create_presentation` - Automated presentation generation + +### Phase 4: Advanced Analytics & Security ๐Ÿš€ +1. `statistical_analysis` - Data science integration +2. `analyze_document_security` - Compliance and security +3. `batch_document_processing` - Automation workflows + +## Technical Implementation Notes + +### Library Extensions Needed +- **openpyxl**: Chart creation, conditional formatting, data validation +- **python-docx**: Advanced styling, document manipulation +- **python-pptx**: Slide generation, animation analysis +- **pandas**: Statistical functions, data analysis tools +- **Pillow**: Advanced image processing features + +### New Dependencies to Consider +- **matplotlib/plotly**: Chart generation +- **numpy**: Statistical calculations +- **python-dateutil**: Advanced date parsing +- **regex**: Advanced pattern matching +- **cryptography**: Document security analysis + +### Architecture Considerations +- Maintain mixin pattern for clean organization +- Add result caching for expensive operations +- Implement progress tracking for batch operations +- Add streaming support for large data processing +- Maintain backward compatibility with existing tools \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..99cbabd --- /dev/null +++ b/Makefile @@ -0,0 +1,127 @@ +# Makefile for MCP Office Tools +# Provides convenient commands for testing, development, and dashboard generation + +.PHONY: help test test-dashboard test-pytest test-torture view-dashboard clean install format lint type-check + +# Default target - show help +help: + @echo "MCP Office Tools - Available Commands" + @echo "======================================" + @echo "" + @echo "Testing & Dashboard:" + @echo " make test - Run all tests with dashboard generation" + @echo " make test-dashboard - Alias for 'make test'" + @echo " make test-pytest - Run only pytest tests" + @echo " make test-torture - Run only torture tests" + @echo " make view-dashboard - Open test dashboard in browser" + @echo "" + @echo "Development:" + @echo " make install - Install project with dev dependencies" + @echo " make format - Format code with black" + @echo " make lint - Lint code with ruff" + @echo " make type-check - Run type checking with mypy" + @echo " make clean - Clean temporary files and caches" + @echo "" + @echo "Examples:" + @echo " make test # Run everything and open dashboard" + @echo " make test-pytest # Quick pytest-only run" + @echo " make view-dashboard # View existing results" + +# Run all tests and generate unified dashboard +test: test-dashboard + +test-dashboard: + @echo "๐Ÿงช Running comprehensive test suite with dashboard generation..." + @python run_dashboard_tests.py + +# Run only pytest tests +test-pytest: + @echo "๐Ÿงช Running pytest test suite..." + @uv run pytest --dashboard-output=reports/test_results.json -v + +# Run only torture tests +test-torture: + @echo "๐Ÿ”ฅ Running torture tests..." + @uv run python torture_test.py + +# View test dashboard in browser +view-dashboard: + @echo "๐Ÿ“Š Opening test dashboard..." + @./view_dashboard.sh + +# Install project with dev dependencies +install: + @echo "๐Ÿ“ฆ Installing MCP Office Tools with dev dependencies..." + @uv sync --dev + @echo "โœ… Installation complete!" + +# Format code with black +format: + @echo "๐ŸŽจ Formatting code with black..." + @uv run black src/ tests/ examples/ + @echo "โœ… Formatting complete!" + +# Lint code with ruff +lint: + @echo "๐Ÿ” Linting code with ruff..." + @uv run ruff check src/ tests/ examples/ + @echo "โœ… Linting complete!" + +# Type checking with mypy +type-check: + @echo "๐Ÿ”Ž Running type checks with mypy..." + @uv run mypy src/ + @echo "โœ… Type checking complete!" + +# Clean temporary files and caches +clean: + @echo "๐Ÿงน Cleaning temporary files and caches..." + @find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true + @find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true + @find . -type d -name ".pytest_cache" -exec rm -rf {} + 2>/dev/null || true + @find . -type d -name ".mypy_cache" -exec rm -rf {} + 2>/dev/null || true + @find . -type d -name ".ruff_cache" -exec rm -rf {} + 2>/dev/null || true + @find . -type f -name "*.pyc" -delete 2>/dev/null || true + @rm -rf dist/ build/ 2>/dev/null || true + @echo "โœ… Cleanup complete!" + +# Run full quality checks (format, lint, type-check, test) +check: format lint type-check test + @echo "โœ… All quality checks passed!" + +# Quick development test cycle (no dashboard) +quick-test: + @echo "โšก Quick test run (no dashboard)..." + @uv run pytest -v --tb=short + +# Coverage report +coverage: + @echo "๐Ÿ“Š Generating coverage report..." + @uv run pytest --cov=mcp_office_tools --cov-report=html --cov-report=term + @echo "โœ… Coverage report generated at htmlcov/index.html" + +# Run server in development mode +dev: + @echo "๐Ÿš€ Starting MCP Office Tools server..." + @uv run mcp-office-tools + +# Build distribution packages +build: + @echo "๐Ÿ“ฆ Building distribution packages..." + @uv build + @echo "โœ… Build complete! Packages in dist/" + +# Show project info +info: + @echo "MCP Office Tools - Project Information" + @echo "=======================================" + @echo "" + @echo "Project: mcp-office-tools" + @echo "Version: $(shell grep '^version' pyproject.toml | cut -d'"' -f2)" + @echo "Python: $(shell python --version)" + @echo "UV: $(shell uv --version 2>/dev/null || echo 'not installed')" + @echo "" + @echo "Directory: $(shell pwd)" + @echo "Tests: $(shell find tests -name 'test_*.py' | wc -l) test files" + @echo "Source files: $(shell find src -name '*.py' | wc -l) Python files" + @echo "" diff --git a/QUICKSTART_DASHBOARD.md b/QUICKSTART_DASHBOARD.md new file mode 100644 index 0000000..5da6509 --- /dev/null +++ b/QUICKSTART_DASHBOARD.md @@ -0,0 +1,114 @@ +# Test Dashboard - Quick Start + +## TL;DR - 3 Commands to Get Started + +```bash +# 1. Run all tests and generate dashboard +python run_dashboard_tests.py + +# 2. View dashboard (alternative) +make test + +# 3. Open existing dashboard +./view_dashboard.sh +``` + +## What You Get + +A beautiful, interactive HTML test dashboard that looks like Microsoft Office 365: + +- **Summary Cards** - Pass/fail stats at a glance +- **Interactive Filters** - Search and filter by category/status +- **Detailed Views** - Expand any test to see inputs, outputs, errors +- **MS Office Theme** - Professional, familiar design + +## File Locations + +``` +reports/ +โ”œโ”€โ”€ test_dashboard.html โ† Open this in browser +โ””โ”€โ”€ test_results.json โ† Test data (auto-generated) +``` + +## Common Tasks + +### Run Tests +```bash +make test # Run everything +make test-pytest # Pytest only +python torture_test.py # Torture tests only +``` + +### View Results +```bash +./view_dashboard.sh # Auto-open in browser +make view-dashboard # Same thing +open reports/test_dashboard.html # Manual +``` + +### Customize +```bash +# Edit colors +vim reports/test_dashboard.html # Edit CSS variables + +# Change categorization +vim tests/pytest_dashboard_plugin.py # Edit _categorize_test() +``` + +## Color Reference + +- Word: Blue `#2B579A` +- Excel: Green `#217346` +- PowerPoint: Orange `#D24726` +- Pass: Green `#107C10` +- Fail: Red `#D83B01` + +## Example Output + +``` +$ python run_dashboard_tests.py + +====================================================================== +๐Ÿงช Running pytest test suite... +====================================================================== +... pytest output ... + +====================================================================== +๐Ÿ”ฅ Running torture tests... +====================================================================== +... torture test output ... + +====================================================================== +๐Ÿ“Š TEST DASHBOARD SUMMARY +====================================================================== + +โœ… Passed: 12 +โŒ Failed: 2 +โญ๏ธ Skipped: 1 + +๐Ÿ“ˆ Pass Rate: 80.0% +โฑ๏ธ Duration: 45.12s + +๐Ÿ“„ Results saved to: reports/test_results.json +๐ŸŒ Dashboard: reports/test_dashboard.html +====================================================================== + +๐ŸŒ Opening dashboard in browser... +``` + +## Troubleshooting + +**Dashboard shows no results?** +โ†’ Run tests first: `python run_dashboard_tests.py` + +**Can't open in browser?** +โ†’ Manually open: `file:///path/to/reports/test_dashboard.html` + +**Tests not categorized correctly?** +โ†’ Edit `tests/pytest_dashboard_plugin.py`, function `_categorize_test()` + +## More Info + +- Full docs: `TEST_DASHBOARD.md` +- Implementation details: `DASHBOARD_SUMMARY.md` +- Dashboard features: `reports/README.md` diff --git a/reports/README.md b/reports/README.md new file mode 100644 index 0000000..a9a3903 --- /dev/null +++ b/reports/README.md @@ -0,0 +1,209 @@ +# MCP Office Tools - Test Dashboard + +Beautiful, interactive HTML dashboard for viewing test results with Microsoft Office-inspired design. + +## Features + +- **MS Office Theme**: Modern Microsoft Office 365-inspired design with Fluent Design elements +- **Category-based Organization**: Separate results by Word, Excel, PowerPoint, Universal, and Server categories +- **Interactive Filtering**: Search and filter tests by name, category, or status +- **Detailed Test Views**: Expand any test to see inputs, outputs, errors, and tracebacks +- **Real-time Statistics**: Pass/fail rates, duration metrics, and category breakdowns +- **Self-contained**: Works offline with no external dependencies + +## Quick Start + +### Run All Tests with Dashboard + +```bash +# Run both pytest and torture tests, generate dashboard, and open in browser +python run_dashboard_tests.py +``` + +### Run Only Pytest Tests + +```bash +# Run pytest with dashboard plugin +pytest -p tests.pytest_dashboard_plugin --dashboard-output=reports/test_results.json + +# Open dashboard +open reports/test_dashboard.html # macOS +xdg-open reports/test_dashboard.html # Linux +start reports/test_dashboard.html # Windows +``` + +### View Existing Results + +Simply open `reports/test_dashboard.html` in your browser. The dashboard will automatically load `test_results.json` from the same directory. + +## Dashboard Components + +### Summary Cards + +Four main summary cards show: +- **Total Tests**: Number of test cases executed +- **Passed**: Successful tests with pass rate and progress bar +- **Failed**: Tests with errors +- **Duration**: Total execution time + +### Filter Controls + +- **Search Box**: Filter tests by name, module, or category +- **Category Filters**: Filter by Word, Excel, PowerPoint, Universal, or Server +- **Status Filters**: Show only passed, failed, or skipped tests + +### Test Results + +Each test displays: +- **Status Icon**: Visual indicator (โœ“ pass, โœ— fail, โŠ˜ skip) +- **Test Name**: Descriptive test name +- **Category Badge**: Color-coded category (Word=blue, Excel=green, PowerPoint=orange) +- **Duration**: Execution time in milliseconds +- **Expandable Details**: Click to view inputs, outputs, errors, and full traceback + +## File Structure + +``` +reports/ +โ”œโ”€โ”€ test_dashboard.html # Main dashboard (open this in browser) +โ”œโ”€โ”€ test_results.json # Generated test data (auto-loaded by dashboard) +โ”œโ”€โ”€ pytest_results.json # Intermediate pytest results +โ””โ”€โ”€ README.md # This file +``` + +## Design Philosophy + +### Microsoft Office Color Palette + +- **Word Blue**: `#2B579A` - Used for Word-related tests +- **Excel Green**: `#217346` - Used for Excel-related tests +- **PowerPoint Orange**: `#D24726` - Used for PowerPoint-related tests +- **Primary Blue**: `#0078D4` - Accent color (Fluent Design) + +### Fluent Design Principles + +- **Subtle Shadows**: Cards have soft shadows for depth +- **Rounded Corners**: 8px border radius for modern look +- **Hover Effects**: Interactive elements respond to mouse hover +- **Typography**: Segoe UI font family (Office standard) +- **Clean Layout**: Generous whitespace and clear hierarchy + +## Integration with CI/CD + +### GitHub Actions Example + +```yaml +- name: Run Tests with Dashboard + run: | + python run_dashboard_tests.py + +- name: Upload Test Dashboard + uses: actions/upload-artifact@v3 + with: + name: test-dashboard + path: reports/ +``` + +### GitLab CI Example + +```yaml +test_dashboard: + script: + - python run_dashboard_tests.py + artifacts: + paths: + - reports/ + expire_in: 1 week +``` + +## Customization + +### Change Dashboard Output Location + +```bash +# Custom output path for pytest +pytest -p tests.pytest_dashboard_plugin --dashboard-output=custom/path/results.json +``` + +### Modify Colors + +Edit the CSS variables in `test_dashboard.html`: + +```css +:root { + --word-blue: #2B579A; + --excel-green: #217346; + --powerpoint-orange: #D24726; + /* ... more colors ... */ +} +``` + +## Troubleshooting + +### Dashboard shows "No Test Results Found" + +- Ensure `test_results.json` exists in the `reports/` directory +- Run tests first: `python run_dashboard_tests.py` +- Check browser console for JSON loading errors + +### Tests not categorized correctly + +- Categories are determined by test path/name +- Ensure test files follow naming convention (e.g., `test_word_*.py`) +- Edit `_categorize_test()` in `pytest_dashboard_plugin.py` to customize + +### Dashboard doesn't open automatically + +- May require manual browser opening +- Use the file path printed in terminal +- Check that `webbrowser` module is available + +## Advanced Usage + +### Extend the Plugin + +The pytest plugin can be customized by editing `tests/pytest_dashboard_plugin.py`: + +```python +def _extract_inputs(self, item): + """Customize how test inputs are extracted""" + # Your custom logic here + pass + +def _categorize_test(self, item): + """Customize test categorization""" + # Your custom logic here + pass +``` + +### Add Custom Test Data + +The JSON format supports additional fields: + +```json +{ + "metadata": { /* your custom metadata */ }, + "summary": { /* summary stats */ }, + "categories": { /* category breakdown */ }, + "tests": [ + { + "name": "test_name", + "custom_field": "your_value", + /* ... standard fields ... */ + } + ] +} +``` + +## Contributing + +When adding new test categories or features: + +1. Update `_categorize_test()` in the pytest plugin +2. Add corresponding color scheme in HTML dashboard CSS +3. Add filter button in dashboard controls +4. Update this README with new features + +## License + +Part of the MCP Office Tools project. See main project LICENSE file. diff --git a/reports/pytest_results.json b/reports/pytest_results.json new file mode 100644 index 0000000..4d17593 --- /dev/null +++ b/reports/pytest_results.json @@ -0,0 +1,18 @@ +{ + "metadata": { + "start_time": "2026-01-11T00:23:10.209539", + "pytest_version": "9.0.2", + "end_time": "2026-01-11T00:23:10.999816", + "duration": 0.7902717590332031, + "exit_status": 0 + }, + "summary": { + "total": 0, + "passed": 0, + "failed": 0, + "skipped": 0, + "pass_rate": 0 + }, + "categories": {}, + "tests": [] +} \ No newline at end of file diff --git a/reports/test_dashboard.html b/reports/test_dashboard.html new file mode 100644 index 0000000..8ec838e --- /dev/null +++ b/reports/test_dashboard.html @@ -0,0 +1,963 @@ + + + + + + MCP Office Tools - Test Dashboard + + + + +
+
+

+
+
W
+
X
+
P
+
+ MCP Office Tools - Test Dashboard +

+
+ Loading... +
+
+
+ + +
+ +
+
+
+
Total Tests
+
+
0
+
Test cases executed
+
+ +
+
+
Passed
+ + โœ“ + +
+
0
+
+ 0% pass rate +
+
+
+
+
+ +
+
+
Failed
+ + โœ— + +
+
0
+
Tests with errors
+
+ +
+
+
Duration
+
+
0s
+
Total execution time
+
+
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+ + +
+ +
+ + + +
+ + + + + + + + diff --git a/reports/test_files/test_data.xlsx b/reports/test_files/test_data.xlsx new file mode 100644 index 0000000..43f39ca Binary files /dev/null and b/reports/test_files/test_data.xlsx differ diff --git a/reports/test_files/test_document.docx b/reports/test_files/test_document.docx new file mode 100644 index 0000000..be0baf7 Binary files /dev/null and b/reports/test_files/test_document.docx differ diff --git a/reports/test_results.json b/reports/test_results.json new file mode 100644 index 0000000..bbd25bd --- /dev/null +++ b/reports/test_results.json @@ -0,0 +1,154 @@ +{ + "metadata": { + "start_time": "2026-01-11T00:23:10.209539", + "end_time": "2026-01-11T00:23:12.295169", + "duration": 1.052842140197754, + "exit_status": 0, + "pytest_version": "9.0.2", + "test_types": [ + "pytest", + "torture_test" + ] + }, + "summary": { + "total": 6, + "passed": 5, + "failed": 0, + "skipped": 1, + "pass_rate": 83.33333333333334 + }, + "categories": { + "Excel": { + "total": 4, + "passed": 3, + "failed": 0, + "skipped": 1 + }, + "Word": { + "total": 2, + "passed": 2, + "failed": 0, + "skipped": 0 + } + }, + "tests": [ + { + "name": "Excel Data Analysis", + "nodeid": "torture_test.py::test_excel_data_analysis", + "category": "Excel", + "outcome": "passed", + "duration": 0.1404409408569336, + "timestamp": "2026-01-11T00:23:12.271793", + "module": "torture_test", + "class": null, + "function": "test_excel_data_analysis", + "inputs": { + "file": "test_files/test_data.xlsx" + }, + "outputs": { + "sheets_analyzed": [ + "Test Data" + ] + }, + "error": null, + "traceback": null + }, + { + "name": "Excel Formula Extraction", + "nodeid": "torture_test.py::test_excel_formula_extraction", + "category": "Excel", + "outcome": "passed", + "duration": 0.0031723976135253906, + "timestamp": "2026-01-11T00:23:12.274971", + "module": "torture_test", + "class": null, + "function": "test_excel_formula_extraction", + "inputs": { + "file": "test_files/test_data.xlsx" + }, + "outputs": { + "total_formulas": 8 + }, + "error": null, + "traceback": null + }, + { + "name": "Excel Chart Data Generation", + "nodeid": "torture_test.py::test_excel_chart_generation", + "category": "Excel", + "outcome": "passed", + "duration": 0.003323078155517578, + "timestamp": "2026-01-11T00:23:12.278299", + "module": "torture_test", + "class": null, + "function": "test_excel_chart_generation", + "inputs": { + "file": "test_files/test_data.xlsx", + "x_column": "Category", + "y_columns": [ + "Value" + ] + }, + "outputs": { + "chart_libraries": 2 + }, + "error": null, + "traceback": null + }, + { + "name": "Word Structure Analysis", + "nodeid": "torture_test.py::test_word_structure_analysis", + "category": "Word", + "outcome": "passed", + "duration": 0.010413646697998047, + "timestamp": "2026-01-11T00:23:12.288718", + "module": "torture_test", + "class": null, + "function": "test_word_structure_analysis", + "inputs": { + "file": "test_files/test_document.docx" + }, + "outputs": { + "total_headings": 0 + }, + "error": null, + "traceback": null + }, + { + "name": "Word Table Extraction", + "nodeid": "torture_test.py::test_word_table_extraction", + "category": "Word", + "outcome": "passed", + "duration": 0.006224393844604492, + "timestamp": "2026-01-11T00:23:12.294948", + "module": "torture_test", + "class": null, + "function": "test_word_table_extraction", + "inputs": { + "file": "test_files/test_document.docx" + }, + "outputs": { + "total_tables": 0 + }, + "error": null, + "traceback": null + }, + { + "name": "Real Excel File Analysis (FORScan)", + "nodeid": "torture_test.py::test_real_excel_analysis", + "category": "Excel", + "outcome": "skipped", + "duration": 0, + "timestamp": "2026-01-11T00:23:12.294963", + "module": "torture_test", + "class": null, + "function": "test_real_excel_analysis", + "inputs": { + "file": "/home/rpm/FORScan Lite spreadsheets v1.1/FORScan Lite spreadsheet - PIDs.xlsx" + }, + "outputs": null, + "error": "File not found: /home/rpm/FORScan Lite spreadsheets v1.1/FORScan Lite spreadsheet - PIDs.xlsx", + "traceback": null + } + ] +} \ No newline at end of file diff --git a/run_dashboard_tests.py b/run_dashboard_tests.py new file mode 100755 index 0000000..26430e2 --- /dev/null +++ b/run_dashboard_tests.py @@ -0,0 +1,507 @@ +#!/usr/bin/env python +""" +Run both pytest and torture tests, then generate a unified test dashboard. + +This script orchestrates: +1. Running pytest with dashboard plugin +2. Running torture tests with result capture +3. Merging results into a single JSON file +4. Opening the dashboard in the browser +""" + +import asyncio +import json +import os +import shutil +import subprocess +import sys +import time +from datetime import datetime +from pathlib import Path + +# Add src to path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) + + +def run_pytest_tests(output_path: Path) -> dict: + """Run pytest tests with dashboard plugin.""" + print("\n" + "=" * 70) + print("๐Ÿงช Running pytest test suite...") + print("=" * 70) + + # Ensure plugin is loaded + plugin_path = Path(__file__).parent / "tests" / "pytest_dashboard_plugin.py" + + # Run pytest with plugin + cmd = [ + sys.executable, + "-m", + "pytest", + "-p", + "tests.pytest_dashboard_plugin", + f"--dashboard-output={output_path}", + "-v", + ] + + result = subprocess.run(cmd, cwd=Path(__file__).parent) + + # Load results + if output_path.exists(): + with open(output_path) as f: + return json.load(f) + else: + return { + "metadata": { + "start_time": datetime.now().isoformat(), + "end_time": datetime.now().isoformat(), + "duration": 0, + "exit_status": result.returncode, + }, + "summary": {"total": 0, "passed": 0, "failed": 0, "skipped": 0, "pass_rate": 0}, + "categories": {}, + "tests": [], + } + + +async def run_torture_tests(test_files_dir: Path = None) -> dict: + """Run torture tests and capture results. + + Args: + test_files_dir: Directory to store test files. If provided, files persist + for inclusion in dashboard. If None, uses temp directory. + """ + print("\n" + "=" * 70) + print("๐Ÿ”ฅ Running torture tests...") + print("=" * 70) + + from torture_test import ( + run_torture_tests as run_torture, + create_test_xlsx, + create_test_docx, + EXCEL_TEST_FILES, + ExcelMixin, + WordMixin, + ) + + excel_mixin = ExcelMixin() + word_mixin = WordMixin() + + results = [] + start_time = time.time() + + # Use persistent directory if provided, otherwise temp + if test_files_dir: + test_files_dir.mkdir(parents=True, exist_ok=True) + test_xlsx = create_test_xlsx(str(test_files_dir / "test_data.xlsx")) + test_docx = create_test_docx(str(test_files_dir / "test_document.docx")) + # Use relative paths for the dashboard + test_xlsx_path = "test_files/test_data.xlsx" + test_docx_path = "test_files/test_document.docx" + else: + import tempfile + tmpdir = tempfile.mkdtemp() + test_xlsx = create_test_xlsx(os.path.join(tmpdir, "test_data.xlsx")) + test_docx = create_test_docx(os.path.join(tmpdir, "test_document.docx")) + test_xlsx_path = test_xlsx + test_docx_path = test_docx + + # Test 1: Excel Data Analysis + test_start = time.time() + try: + result = await excel_mixin.analyze_excel_data(test_xlsx) + summary = result.get("summary", {}) + sheets_count = summary.get("sheets_analyzed", 1) + results.append({ + "name": "Excel Data Analysis", + "nodeid": "torture_test.py::test_excel_data_analysis", + "category": "Excel", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_data_analysis", + "inputs": {"file": test_xlsx_path}, + "outputs": {"sheets_analyzed": sheets_count}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Excel Data Analysis", + "nodeid": "torture_test.py::test_excel_data_analysis", + "category": "Excel", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_data_analysis", + "inputs": {"file": test_xlsx_path}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + + # Test 2: Excel Formula Extraction + test_start = time.time() + try: + result = await excel_mixin.extract_excel_formulas(test_xlsx) + summary = result.get("summary", {}) + formula_count = summary.get("total_formulas", 0) + results.append({ + "name": "Excel Formula Extraction", + "nodeid": "torture_test.py::test_excel_formula_extraction", + "category": "Excel", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_formula_extraction", + "inputs": {"file": test_xlsx_path}, + "outputs": {"total_formulas": formula_count}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Excel Formula Extraction", + "nodeid": "torture_test.py::test_excel_formula_extraction", + "category": "Excel", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_formula_extraction", + "inputs": {"file": test_xlsx_path}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + + # Test 3: Excel Chart Generation + test_start = time.time() + try: + result = await excel_mixin.create_excel_chart_data( + test_xlsx, + x_column="Category", + y_columns=["Value"], + chart_type="bar" + ) + chart_libs = len(result.get("chart_configuration", {})) + results.append({ + "name": "Excel Chart Data Generation", + "nodeid": "torture_test.py::test_excel_chart_generation", + "category": "Excel", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_chart_generation", + "inputs": {"file": test_xlsx_path, "x_column": "Category", "y_columns": ["Value"]}, + "outputs": {"chart_libraries": chart_libs}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Excel Chart Data Generation", + "nodeid": "torture_test.py::test_excel_chart_generation", + "category": "Excel", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_excel_chart_generation", + "inputs": {"file": test_xlsx_path, "x_column": "Category", "y_columns": ["Value"]}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + + # Test 4: Word Structure Analysis + test_start = time.time() + try: + result = await word_mixin.analyze_word_structure(test_docx) + heading_count = result["structure"].get("total_headings", 0) + results.append({ + "name": "Word Structure Analysis", + "nodeid": "torture_test.py::test_word_structure_analysis", + "category": "Word", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_word_structure_analysis", + "inputs": {"file": test_docx_path}, + "outputs": {"total_headings": heading_count}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Word Structure Analysis", + "nodeid": "torture_test.py::test_word_structure_analysis", + "category": "Word", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_word_structure_analysis", + "inputs": {"file": test_docx_path}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + + # Test 5: Word Table Extraction + test_start = time.time() + try: + result = await word_mixin.extract_word_tables(test_docx) + table_count = result.get("total_tables", 0) + results.append({ + "name": "Word Table Extraction", + "nodeid": "torture_test.py::test_word_table_extraction", + "category": "Word", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_word_table_extraction", + "inputs": {"file": test_docx_path}, + "outputs": {"total_tables": table_count}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Word Table Extraction", + "nodeid": "torture_test.py::test_word_table_extraction", + "category": "Word", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_word_table_extraction", + "inputs": {"file": test_docx_path}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + + # Test 6: Real Excel file (if available) + real_excel = EXCEL_TEST_FILES[0] + if os.path.exists(real_excel): + test_start = time.time() + try: + result = await excel_mixin.analyze_excel_data(real_excel) + sheets = len(result.get("sheets", [])) + results.append({ + "name": "Real Excel File Analysis (FORScan)", + "nodeid": "torture_test.py::test_real_excel_analysis", + "category": "Excel", + "outcome": "passed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_real_excel_analysis", + "inputs": {"file": real_excel}, + "outputs": {"sheets": sheets}, + "error": None, + "traceback": None, + }) + except Exception as e: + results.append({ + "name": "Real Excel File Analysis (FORScan)", + "nodeid": "torture_test.py::test_real_excel_analysis", + "category": "Excel", + "outcome": "failed", + "duration": time.time() - test_start, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_real_excel_analysis", + "inputs": {"file": real_excel}, + "outputs": None, + "error": str(e), + "traceback": f"{type(e).__name__}: {e}", + }) + else: + results.append({ + "name": "Real Excel File Analysis (FORScan)", + "nodeid": "torture_test.py::test_real_excel_analysis", + "category": "Excel", + "outcome": "skipped", + "duration": 0, + "timestamp": datetime.now().isoformat(), + "module": "torture_test", + "class": None, + "function": "test_real_excel_analysis", + "inputs": {"file": real_excel}, + "outputs": None, + "error": f"File not found: {real_excel}", + "traceback": None, + }) + + # Calculate summary + total_duration = time.time() - start_time + passed = sum(1 for r in results if r["outcome"] == "passed") + failed = sum(1 for r in results if r["outcome"] == "failed") + skipped = sum(1 for r in results if r["outcome"] == "skipped") + total = len(results) + + return { + "metadata": { + "start_time": datetime.fromtimestamp(start_time).isoformat(), + "end_time": datetime.now().isoformat(), + "duration": total_duration, + "exit_status": 0 if failed == 0 else 1, + "pytest_version": "torture_test", + }, + "summary": { + "total": total, + "passed": passed, + "failed": failed, + "skipped": skipped, + "pass_rate": (passed / total * 100) if total > 0 else 0, + }, + "categories": { + "Excel": { + "total": sum(1 for r in results if r["category"] == "Excel"), + "passed": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "passed"), + "failed": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "failed"), + "skipped": sum(1 for r in results if r["category"] == "Excel" and r["outcome"] == "skipped"), + }, + "Word": { + "total": sum(1 for r in results if r["category"] == "Word"), + "passed": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "passed"), + "failed": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "failed"), + "skipped": sum(1 for r in results if r["category"] == "Word" and r["outcome"] == "skipped"), + }, + }, + "tests": results, + } + + +def merge_results(pytest_results: dict, torture_results: dict) -> dict: + """Merge pytest and torture test results.""" + # Merge tests + all_tests = pytest_results.get("tests", []) + torture_results.get("tests", []) + + # Recalculate summary + total = len(all_tests) + passed = sum(1 for t in all_tests if t["outcome"] == "passed") + failed = sum(1 for t in all_tests if t["outcome"] == "failed") + skipped = sum(1 for t in all_tests if t["outcome"] == "skipped") + + # Merge categories + all_categories = {} + for cat_dict in [pytest_results.get("categories", {}), torture_results.get("categories", {})]: + for cat, stats in cat_dict.items(): + if cat not in all_categories: + all_categories[cat] = {"total": 0, "passed": 0, "failed": 0, "skipped": 0} + for key in ["total", "passed", "failed", "skipped"]: + all_categories[cat][key] += stats.get(key, 0) + + # Combine durations + total_duration = pytest_results.get("metadata", {}).get("duration", 0) + \ + torture_results.get("metadata", {}).get("duration", 0) + + return { + "metadata": { + "start_time": pytest_results.get("metadata", {}).get("start_time", datetime.now().isoformat()), + "end_time": datetime.now().isoformat(), + "duration": total_duration, + "exit_status": 0 if failed == 0 else 1, + "pytest_version": pytest_results.get("metadata", {}).get("pytest_version", "unknown"), + "test_types": ["pytest", "torture_test"], + }, + "summary": { + "total": total, + "passed": passed, + "failed": failed, + "skipped": skipped, + "pass_rate": (passed / total * 100) if total > 0 else 0, + }, + "categories": all_categories, + "tests": all_tests, + } + + +def main(): + """Main execution function.""" + reports_dir = Path(__file__).parent / "reports" + reports_dir.mkdir(exist_ok=True) + + test_files_dir = reports_dir / "test_files" + + pytest_output = reports_dir / "pytest_results.json" + final_output = reports_dir / "test_results.json" + + # Run pytest tests + pytest_results = run_pytest_tests(pytest_output) + + # Run torture tests with persistent test files + torture_results = asyncio.run(run_torture_tests(test_files_dir)) + + # Merge results + merged_results = merge_results(pytest_results, torture_results) + + # Write final results + with open(final_output, "w") as f: + json.dump(merged_results, f, indent=2) + + # Embed JSON data into HTML for offline viewing (file:// URLs) + dashboard_html = reports_dir / "test_dashboard.html" + if dashboard_html.exists(): + html_content = dashboard_html.read_text() + # Remove any existing embedded data + import re + html_content = re.sub( + r'\n?', + '', + html_content, + flags=re.DOTALL + ) + # Embed fresh data before + embed_script = f'\n' + html_content = html_content.replace('', f'{embed_script}') + dashboard_html.write_text(html_content) + + print("\n" + "=" * 70) + print("๐Ÿ“Š TEST DASHBOARD SUMMARY") + print("=" * 70) + print(f"\nโœ… Passed: {merged_results['summary']['passed']}") + print(f"โŒ Failed: {merged_results['summary']['failed']}") + print(f"โญ๏ธ Skipped: {merged_results['summary']['skipped']}") + print(f"\n๐Ÿ“ˆ Pass Rate: {merged_results['summary']['pass_rate']:.1f}%") + print(f"โฑ๏ธ Duration: {merged_results['metadata']['duration']:.2f}s") + print(f"\n๐Ÿ“„ Results saved to: {final_output}") + print(f"๐ŸŒ Dashboard: {reports_dir / 'test_dashboard.html'}") + print("=" * 70) + + # Try to open dashboard in browser + try: + import webbrowser + dashboard_path = reports_dir / "test_dashboard.html" + webbrowser.open(f"file://{dashboard_path.absolute()}") + print("\n๐ŸŒ Opening dashboard in browser...") + except Exception as e: + print(f"\nโš ๏ธ Could not open browser automatically: {e}") + print(f" Open manually: file://{(reports_dir / 'test_dashboard.html').absolute()}") + + # Return exit code + return merged_results["metadata"]["exit_status"] + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/test_mcp_tools.py b/test_mcp_tools.py new file mode 100644 index 0000000..185b08b --- /dev/null +++ b/test_mcp_tools.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +"""Simple test script to verify MCP Office Tools functionality.""" + +import asyncio +import tempfile +import os +from pathlib import Path + +# Create simple test documents +def create_test_documents(): + """Create test documents for verification.""" + temp_dir = Path(tempfile.mkdtemp()) + + # Create a simple CSV file + csv_path = temp_dir / "test.csv" + csv_content = """Name,Age,City +John Doe,30,New York +Jane Smith,25,Los Angeles +Bob Johnson,35,Chicago""" + + with open(csv_path, 'w') as f: + f.write(csv_content) + + # Create a simple text file to test validation + txt_path = temp_dir / "test.txt" + with open(txt_path, 'w') as f: + f.write("This is a simple text file, not an Office document.") + + return temp_dir, csv_path, txt_path + +async def test_mcp_server(): + """Test MCP server functionality.""" + print("๐Ÿงช Testing MCP Office Tools Server") + print("=" * 50) + + # Create test documents + temp_dir, csv_path, txt_path = create_test_documents() + print(f"๐Ÿ“ Created test files in: {temp_dir}") + + try: + # Import the server components + from mcp_office_tools.mixins import UniversalMixin + + # Test the Universal Mixin directly + universal = UniversalMixin() + + print("\n๐Ÿ” Testing extract_text with CSV file...") + try: + result = await universal.extract_text(str(csv_path)) + print("โœ… CSV text extraction successful!") + print(f" Text length: {len(result.get('text', ''))}") + print(f" Method used: {result.get('method_used', 'unknown')}") + except Exception as e: + print(f"โŒ CSV text extraction failed: {e}") + + print("\n๐Ÿ” Testing get_supported_formats...") + try: + result = await universal.get_supported_formats() + print("โœ… Supported formats query successful!") + print(f" Total formats: {len(result.get('formats', []))}") + print(f" Excel formats: {len([f for f in result.get('formats', []) if 'Excel' in f.get('description', '')])}") + except Exception as e: + print(f"โŒ Supported formats query failed: {e}") + + print("\n๐Ÿ” Testing validation with unsupported file...") + try: + result = await universal.extract_text(str(txt_path)) + print("โŒ Should have failed with unsupported file!") + except Exception as e: + print(f"โœ… Correctly rejected unsupported file: {type(e).__name__}") + + print("\n๐Ÿ” Testing detect_office_format...") + try: + result = await universal.detect_office_format(str(csv_path)) + print("โœ… Format detection successful!") + print(f" Detected format: {result.get('format', 'unknown')}") + print(f" Is supported: {result.get('is_supported', False)}") + except Exception as e: + print(f"โŒ Format detection failed: {e}") + + except ImportError as e: + print(f"โŒ Failed to import server components: {e}") + return False + except Exception as e: + print(f"โŒ Unexpected error: {e}") + return False + finally: + # Cleanup + import shutil + shutil.rmtree(temp_dir) + print(f"\n๐Ÿงน Cleaned up test files from: {temp_dir}") + + print("\nโœ… Basic MCP Office Tools testing completed!") + return True + +if __name__ == "__main__": + asyncio.run(test_mcp_server()) \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index de9d55c..407f354 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -245,8 +245,8 @@ def mock_validation_context(): return MockValidationContext -# FastMCP-specific test markers -pytest_plugins = ["pytest_asyncio"] +# FastMCP-specific test markers and dashboard plugin +pytest_plugins = ["pytest_asyncio", "tests.pytest_dashboard_plugin"] # Configure pytest markers def pytest_configure(config): diff --git a/tests/pytest_dashboard_plugin.py b/tests/pytest_dashboard_plugin.py new file mode 100644 index 0000000..47b5016 --- /dev/null +++ b/tests/pytest_dashboard_plugin.py @@ -0,0 +1,194 @@ +"""Pytest plugin to capture test results for the dashboard. + +This plugin captures detailed test execution data including inputs, outputs, +timing, and status for display in the HTML test dashboard. +""" + +import json +import time +import traceback +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Any +import pytest + + +class DashboardReporter: + """Reporter that captures test execution data for the dashboard.""" + + def __init__(self, output_path: str): + self.output_path = Path(output_path) + self.test_results: List[Dict[str, Any]] = [] + self.start_time = time.time() + self.session_metadata = { + "start_time": datetime.now().isoformat(), + "pytest_version": pytest.__version__, + } + + def pytest_runtest_protocol(self, item, nextitem): + """Capture test execution at the protocol level.""" + # Store test item for later use + item._dashboard_start = time.time() + return None + + def pytest_runtest_makereport(self, item, call): + """Capture test results and extract information.""" + if call.when == "call": # Only capture the main test call, not setup/teardown + test_data = { + "name": item.name, + "nodeid": item.nodeid, + "category": self._categorize_test(item), + "outcome": None, # Will be set in pytest_runtest_logreport + "duration": call.duration, + "timestamp": datetime.now().isoformat(), + "module": item.module.__name__ if item.module else "unknown", + "class": item.cls.__name__ if item.cls else None, + "function": item.function.__name__ if hasattr(item, "function") else item.name, + "inputs": self._extract_inputs(item), + "outputs": None, + "error": None, + "traceback": None, + } + + # Store for later processing in pytest_runtest_logreport + item._dashboard_data = test_data + + def pytest_runtest_logreport(self, report): + """Process test reports to extract outputs and status.""" + if report.when == "call" and hasattr(report, "item"): + item = report.item if hasattr(report, "item") else None + if item and hasattr(item, "_dashboard_data"): + test_data = item._dashboard_data + + # Set outcome + test_data["outcome"] = report.outcome + + # Extract output + if hasattr(report, "capstdout"): + test_data["outputs"] = { + "stdout": report.capstdout, + "stderr": getattr(report, "capstderr", ""), + } + + # Extract error information + if report.failed: + test_data["error"] = str(report.longrepr) if hasattr(report, "longrepr") else "Unknown error" + if hasattr(report, "longreprtext"): + test_data["traceback"] = report.longreprtext + elif hasattr(report, "longrepr"): + test_data["traceback"] = str(report.longrepr) + + # Extract actual output from test result if available + if hasattr(report, "result"): + test_data["outputs"]["result"] = str(report.result) + + self.test_results.append(test_data) + + def pytest_sessionfinish(self, session, exitstatus): + """Write results to JSON file at end of test session.""" + end_time = time.time() + + # Calculate summary statistics + total_tests = len(self.test_results) + passed = sum(1 for t in self.test_results if t["outcome"] == "passed") + failed = sum(1 for t in self.test_results if t["outcome"] == "failed") + skipped = sum(1 for t in self.test_results if t["outcome"] == "skipped") + + # Group by category + categories = {} + for test in self.test_results: + cat = test["category"] + if cat not in categories: + categories[cat] = {"total": 0, "passed": 0, "failed": 0, "skipped": 0} + categories[cat]["total"] += 1 + if test["outcome"] == "passed": + categories[cat]["passed"] += 1 + elif test["outcome"] == "failed": + categories[cat]["failed"] += 1 + elif test["outcome"] == "skipped": + categories[cat]["skipped"] += 1 + + # Build final output + output_data = { + "metadata": { + **self.session_metadata, + "end_time": datetime.now().isoformat(), + "duration": end_time - self.start_time, + "exit_status": exitstatus, + }, + "summary": { + "total": total_tests, + "passed": passed, + "failed": failed, + "skipped": skipped, + "pass_rate": (passed / total_tests * 100) if total_tests > 0 else 0, + }, + "categories": categories, + "tests": self.test_results, + } + + # Ensure output directory exists + self.output_path.parent.mkdir(parents=True, exist_ok=True) + + # Write JSON + with open(self.output_path, "w") as f: + json.dump(output_data, f, indent=2) + + print(f"\n Dashboard test results written to: {self.output_path}") + + def _categorize_test(self, item) -> str: + """Categorize test based on its name/path.""" + nodeid = item.nodeid.lower() + + if "word" in nodeid: + return "Word" + elif "excel" in nodeid: + return "Excel" + elif "powerpoint" in nodeid or "pptx" in nodeid: + return "PowerPoint" + elif "universal" in nodeid: + return "Universal" + elif "server" in nodeid: + return "Server" + else: + return "Other" + + def _extract_inputs(self, item) -> Dict[str, Any]: + """Extract test inputs from fixtures and parameters.""" + inputs = {} + + # Get fixture values + if hasattr(item, "funcargs"): + for name, value in item.funcargs.items(): + # Skip complex objects, only store simple values + if isinstance(value, (str, int, float, bool, type(None))): + inputs[name] = value + elif isinstance(value, (list, tuple)) and len(value) < 10: + inputs[name] = list(value) + elif isinstance(value, dict) and len(value) < 10: + inputs[name] = value + else: + inputs[name] = f"<{type(value).__name__}>" + + # Get parametrize values if present + if hasattr(item, "callspec"): + inputs["params"] = item.callspec.params + + return inputs + + +def pytest_configure(config): + """Register the dashboard reporter plugin.""" + output_path = config.getoption("--dashboard-output", default="reports/test_results.json") + reporter = DashboardReporter(output_path) + config.pluginmanager.register(reporter, "dashboard_reporter") + + +def pytest_addoption(parser): + """Add command line option for dashboard output path.""" + parser.addoption( + "--dashboard-output", + action="store", + default="reports/test_results.json", + help="Path to output JSON file for dashboard (default: reports/test_results.json)", + ) diff --git a/view_dashboard.sh b/view_dashboard.sh new file mode 100755 index 0000000..8a17422 --- /dev/null +++ b/view_dashboard.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Quick script to open the test dashboard in browser + +DASHBOARD_PATH="/home/rpm/claude/mcp-office-tools/reports/test_dashboard.html" + +echo "๐Ÿ“Š Opening MCP Office Tools Test Dashboard..." +echo "Dashboard: $DASHBOARD_PATH" +echo "" + +# Try different browser commands based on what's available +if command -v xdg-open &> /dev/null; then + xdg-open "$DASHBOARD_PATH" +elif command -v firefox &> /dev/null; then + firefox "$DASHBOARD_PATH" & +elif command -v chromium &> /dev/null; then + chromium "$DASHBOARD_PATH" & +elif command -v google-chrome &> /dev/null; then + google-chrome "$DASHBOARD_PATH" & +else + echo "โš ๏ธ No browser command found. Please open manually:" + echo " file://$DASHBOARD_PATH" +fi