1
0
forked from MCP/llm-fusion-mcp
llm-fusion-mcp/test_providers.py
Ryan Malloy c335ba0e1e Initial commit: LLM Fusion MCP Server
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok)
- Real-time streaming support across all providers
- Multimodal capabilities (text, images, audio)
- Intelligent document processing with smart chunking
- Production-ready with health monitoring and error handling
- Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper)
- Vector embeddings and semantic similarity
- Session-based API key management
- Built with FastMCP and modern Python tooling

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-05 05:47:51 -06:00

80 lines
2.8 KiB
Python

#!/usr/bin/env python3
"""Test multi-provider LLM support."""
import os
import sys
sys.path.insert(0, 'src')
from llm_fusion_mcp.server import llm_set_provider, llm_get_provider, llm_list_providers, llm_generate
from dotenv import load_dotenv
load_dotenv()
def test_provider_management():
"""Test provider switching and info."""
print("Testing provider management...")
print("=" * 50)
# List all providers
providers = llm_list_providers()
print("Available providers:")
for provider, info in providers["providers"].items():
print(f" {provider}: {info['default_model']} (API key: {'' if info['api_key_configured'] else ''})")
# Get current provider
current = llm_get_provider()
print(f"\nCurrent provider: {current['current_provider']}")
# Test switching providers
if os.getenv("ANTHROPIC_API_KEY"):
print("\nSwitching to Anthropic...")
result = llm_set_provider("anthropic")
if result["success"]:
print(f"✓ Switched to {result['provider']}")
print(f" Default model: {result['default_model']}")
print(f" Available models: {len(result['available_models'])} models")
# Show Claude 4 models
claude_4_models = [m for m in result['available_models'] if 'claude-4' in m]
print(f" Claude 4 models: {claude_4_models}")
else:
print(f"✗ Failed: {result['error']}")
def test_llm_generate():
"""Test the new llm_generate function."""
print("\nTesting llm_generate function...")
print("=" * 50)
prompt = "Write a haiku about coding"
# Test with current provider (streaming)
print("Testing streaming with current provider...")
try:
for chunk in llm_generate(prompt, stream=True):
if chunk.get("success") and chunk.get("type") == "content":
print(chunk.get("chunk", ""), end="", flush=True)
elif chunk.get("finished"):
print(f"\n✓ Generated with {chunk.get('provider')} / {chunk.get('model')}")
break
except Exception as e:
print(f"✗ Error: {e}")
# Test provider override
if os.getenv("GOOGLE_API_KEY"):
print("\nTesting provider override (Gemini)...")
try:
result = llm_generate(prompt, provider="gemini", stream=False)
if result.get("success"):
print(f"✓ Generated with {result['provider']} / {result['model']}")
print(f"Text: {result['text'][:100]}...")
else:
print(f"✗ Error: {result.get('error')}")
except Exception as e:
print(f"✗ Error: {e}")
if __name__ == "__main__":
test_provider_management()
test_llm_generate()
print("\n" + "=" * 50)
print("Provider tests completed!")