Some checks are pending
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.10) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.11) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.12) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🛡️ Security Scanning (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🐳 Docker Build & Push (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🎉 Create Release (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 📢 Deployment Notification (push) Blocked by required conditions
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok) - Real-time streaming support across all providers - Multimodal capabilities (text, images, audio) - Intelligent document processing with smart chunking - Production-ready with health monitoring and error handling - Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper) - Vector embeddings and semantic similarity - Session-based API key management - Built with FastMCP and modern Python tooling 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
110 lines
3.6 KiB
Python
110 lines
3.6 KiB
Python
#!/usr/bin/env python3
|
|
"""Test multi-provider LLM support directly."""
|
|
|
|
import os
|
|
from openai import OpenAI
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
|
|
# Provider configurations
|
|
PROVIDER_CONFIG = {
|
|
"gemini": {
|
|
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
"api_key_env": "GOOGLE_API_KEY",
|
|
"default_model": "gemini-1.5-flash",
|
|
"models": ["gemini-1.5-flash", "gemini-2.0-flash", "gemini-2.5-pro", "gemini-2.5-flash"]
|
|
},
|
|
"openai": {
|
|
"base_url": "https://api.openai.com/v1/",
|
|
"api_key_env": "OPENAI_API_KEY",
|
|
"default_model": "gpt-4o-mini",
|
|
"models": ["gpt-4o", "gpt-4o-mini", "o1-preview", "o1-mini"]
|
|
},
|
|
"anthropic": {
|
|
"base_url": "https://api.anthropic.com/v1/",
|
|
"api_key_env": "ANTHROPIC_API_KEY",
|
|
"default_model": "claude-3-5-sonnet-20241022",
|
|
"models": [
|
|
"claude-4-opus-4", "claude-4-sonnet-4",
|
|
"claude-3-5-sonnet-20241022", "claude-3-5-haiku-20241022",
|
|
"claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307",
|
|
"claude-4-opus", "claude-4-sonnet", "claude-3-5-sonnet", "claude-3-5-haiku"
|
|
]
|
|
}
|
|
}
|
|
|
|
def get_client(provider: str) -> OpenAI:
|
|
"""Get OpenAI client for the specified provider."""
|
|
config = PROVIDER_CONFIG[provider]
|
|
api_key = os.getenv(config["api_key_env"])
|
|
|
|
if not api_key:
|
|
raise ValueError(f"API key not found for {provider}. Please set {config['api_key_env']}")
|
|
|
|
return OpenAI(
|
|
api_key=api_key,
|
|
base_url=config["base_url"]
|
|
)
|
|
|
|
def test_provider_info():
|
|
"""Test provider information display."""
|
|
print("Multi-Provider LLM Support Test")
|
|
print("=" * 70)
|
|
|
|
for provider, config in PROVIDER_CONFIG.items():
|
|
api_key_set = bool(os.getenv(config["api_key_env"]))
|
|
print(f"{provider.upper()}: {'✓' if api_key_set else '✗'} API key configured")
|
|
print(f" Default: {config['default_model']}")
|
|
print(f" Models: {len(config['models'])} available")
|
|
|
|
if provider == "anthropic":
|
|
claude_4_models = [m for m in config['models'] if 'claude-4' in m]
|
|
print(f" Claude 4: {claude_4_models}")
|
|
print()
|
|
|
|
def test_provider_generation(provider: str):
|
|
"""Test text generation with a specific provider."""
|
|
print(f"Testing {provider.upper()} generation...")
|
|
print("-" * 40)
|
|
|
|
try:
|
|
config = PROVIDER_CONFIG[provider]
|
|
if not os.getenv(config["api_key_env"]):
|
|
print(f"⚠️ Skipping {provider} - no API key configured")
|
|
return
|
|
|
|
client = get_client(provider)
|
|
model = config["default_model"]
|
|
|
|
# Test streaming
|
|
stream = client.chat.completions.create(
|
|
model=model,
|
|
messages=[{"role": "user", "content": "Say hello and name yourself in one sentence"}],
|
|
stream=True
|
|
)
|
|
|
|
full_text = ""
|
|
for chunk in stream:
|
|
if chunk.choices[0].delta.content:
|
|
content = chunk.choices[0].delta.content
|
|
full_text += content
|
|
print(content, end="", flush=True)
|
|
|
|
print(f"\n✓ {provider} working with {model}")
|
|
print(f"Response length: {len(full_text)} chars")
|
|
|
|
except Exception as e:
|
|
print(f"✗ {provider} failed: {e}")
|
|
|
|
print()
|
|
|
|
if __name__ == "__main__":
|
|
test_provider_info()
|
|
|
|
# Test each provider
|
|
for provider in ["gemini", "anthropic", "openai"]:
|
|
test_provider_generation(provider)
|
|
|
|
print("=" * 70)
|
|
print("Multi-provider test completed!") |