#!/usr/bin/env python3 """ Test script for the Universal MCP Tool Orchestrator. Tests configuration loading, provider initialization, and basic functionality. """ import asyncio import json import logging import os import sys from pathlib import Path from dotenv import load_dotenv # Load environment variables first load_dotenv() # Add src directory to path so we can import our modules sys.path.insert(0, str(Path(__file__).parent / "src")) from src.llm_fusion_mcp.config import load_config, validate_api_keys from src.llm_fusion_mcp.error_handling import ErrorHandler, ErrorType # Configure logging for testing logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) async def test_configuration(): """Test configuration loading and validation.""" print("=" * 60) print("TESTING CONFIGURATION SYSTEM") print("=" * 60) try: # Load configuration config = load_config() print(f"āœ… Configuration loaded successfully") print(f" - Providers configured: {len(config.providers)}") print(f" - MCP servers configured: {len(config.mcp_servers)}") print(f" - Default provider: {config.default_provider}") # Validate API keys api_key_status = validate_api_keys(config) print(f"\nšŸ“‹ API Key Validation:") for provider, is_valid in api_key_status.items(): status = "āœ… Valid" if is_valid else "āŒ Invalid/Missing" print(f" - {provider}: {status}") valid_providers = [p for p, valid in api_key_status.items() if valid] print(f"\nšŸ”‘ Usable providers: {len(valid_providers)} / {len(api_key_status)}") return config, valid_providers except Exception as e: print(f"āŒ Configuration test failed: {e}") return None, [] async def test_error_handler(): """Test error handling system.""" print("\n" + "=" * 60) print("TESTING ERROR HANDLING SYSTEM") print("=" * 60) try: error_handler = ErrorHandler() # Set fallback order error_handler.set_provider_fallback_order(['gemini', 'openai', 'anthropic', 'grok']) # Test circuit breaker functionality print("šŸ”§ Testing circuit breaker...") # All providers should be available initially available = error_handler.get_available_providers(['gemini', 'openai', 'anthropic']) print(f" - Available providers: {available}") # Simulate some errors for 'test_provider' from src.llm_fusion_mcp.error_handling import ErrorInfo for i in range(6): # Exceed failure threshold error_info = ErrorInfo( error_type=ErrorType.PROVIDER_API_ERROR, provider='test_provider', message=f'Simulated error {i+1}' ) error_handler.record_error(error_info) # Check if circuit breaker opened is_available = error_handler.is_provider_available('test_provider') print(f" - Test provider available after errors: {is_available}") # Get error statistics stats = error_handler.get_error_statistics() print(f" - Error statistics: {json.dumps(stats, indent=4)}") print("āœ… Error handling system working correctly") return error_handler except Exception as e: print(f"āŒ Error handler test failed: {e}") return None async def test_provider_imports(): """Test that all required packages can be imported.""" print("\n" + "=" * 60) print("TESTING PROVIDER IMPORTS") print("=" * 60) packages = { 'openai': 'OpenAI Python SDK', 'anthropic': 'Anthropic Python SDK', 'httpx': 'HTTP client for async requests', 'fastapi': 'FastAPI web framework', 'uvicorn': 'ASGI server', 'pydantic': 'Data validation', 'yaml': 'YAML configuration parsing' } success_count = 0 for package, description in packages.items(): try: if package == 'yaml': import yaml else: __import__(package) print(f"āœ… {package}: {description}") success_count += 1 except ImportError as e: print(f"āŒ {package}: Missing - {e}") print(f"\nšŸ“¦ Package availability: {success_count}/{len(packages)} packages available") return success_count == len(packages) async def test_basic_functionality(): """Test basic orchestrator functionality.""" print("\n" + "=" * 60) print("TESTING BASIC ORCHESTRATOR FUNCTIONALITY") print("=" * 60) try: # This is a minimal test since we haven't implemented full MCP protocol yet config = load_config() # Test that we can initialize provider configurations for provider_name, provider_config in config.providers.items(): print(f"šŸ”§ Testing {provider_name} configuration...") if provider_config.interface == "openai": print(f" - Interface: OpenAI-compatible") print(f" - Base URL: {provider_config.base_url}") print(f" - Models: {len(provider_config.models)} available") else: print(f" - Interface: Native") print(f" - Models: {len(provider_config.models)} available") print(f" - Default model: {provider_config.default_model}") print("āœ… Basic orchestrator functionality test passed") return True except Exception as e: print(f"āŒ Basic functionality test failed: {e}") return False async def test_mcp_configuration(): """Test MCP server configuration.""" print("\n" + "=" * 60) print("TESTING MCP SERVER CONFIGURATION") print("=" * 60) try: config = load_config() for server_name, server_config in config.mcp_servers.items(): print(f"šŸ”§ Testing {server_name} MCP server configuration...") print(f" - Type: {server_config.type}") print(f" - Namespace: {server_config.namespace}") print(f" - Auto-start: {server_config.auto_start}") if server_config.type == "stdio": print(f" - Command: {' '.join(server_config.command)}") if server_config.args: print(f" - Args: {' '.join(server_config.args)}") elif server_config.type == "http": print(f" - URL: {server_config.url}") print("āœ… MCP server configuration test passed") return True except Exception as e: print(f"āŒ MCP configuration test failed: {e}") return False async def main(): """Run all tests.""" print("šŸš€ Starting Universal MCP Tool Orchestrator Tests") print("=" * 60) # Track test results tests_passed = 0 total_tests = 0 # Test 1: Package imports total_tests += 1 if await test_provider_imports(): tests_passed += 1 # Test 2: Configuration system total_tests += 1 config, valid_providers = await test_configuration() if config is not None: tests_passed += 1 # Test 3: Error handling total_tests += 1 error_handler = await test_error_handler() if error_handler is not None: tests_passed += 1 # Test 4: Basic functionality total_tests += 1 if await test_basic_functionality(): tests_passed += 1 # Test 5: MCP configuration total_tests += 1 if await test_mcp_configuration(): tests_passed += 1 # Final results print("\n" + "=" * 60) print("TEST RESULTS SUMMARY") print("=" * 60) print(f"šŸ“Š Tests passed: {tests_passed}/{total_tests}") if tests_passed == total_tests: print("šŸŽ‰ All tests passed! The orchestrator is ready for Phase 2.") else: print("āš ļø Some tests failed. Please check the output above for details.") # Provide guidance on next steps if config is None: print("\nšŸ’” Next steps:") print(" 1. Copy .env.example to .env and add your API keys") print(" 2. Ensure config/orchestrator.yaml exists") if not valid_providers: print("\nšŸ’” API Key setup:") print(" 1. Get API keys from provider websites") print(" 2. Add them to your .env file") print(" 3. At least one provider API key is required") print("\nšŸ”— Configuration files:") print(" - Main config: config/orchestrator.yaml") print(" - Environment: .env (copy from .env.example)") print(" - Source code: src/llm_fusion_mcp/") return tests_passed == total_tests if __name__ == "__main__": try: success = asyncio.run(main()) sys.exit(0 if success else 1) except KeyboardInterrupt: print("\n\nāš ļø Tests interrupted by user") sys.exit(1) except Exception as e: print(f"\n\nāŒ Unexpected error during testing: {e}") sys.exit(1)