forked from MCP/llm-fusion-mcp
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok) - Real-time streaming support across all providers - Multimodal capabilities (text, images, audio) - Intelligent document processing with smart chunking - Production-ready with health monitoring and error handling - Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper) - Vector embeddings and semantic similarity - Session-based API key management - Built with FastMCP and modern Python tooling 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
77 lines
2.7 KiB
Python
77 lines
2.7 KiB
Python
"""OpenAI-specific tools for the Multi-LLM MCP Server - Simple Working Version"""
|
|
|
|
import os
|
|
from typing import Dict, Any, Optional
|
|
from openai import OpenAI
|
|
|
|
def get_openai_client() -> OpenAI:
|
|
"""Get configured OpenAI client with API key from environment or session."""
|
|
api_key = os.getenv("OPENAI_API_KEY")
|
|
if not api_key:
|
|
raise ValueError("No OpenAI API key found. Set OPENAI_API_KEY environment variable.")
|
|
return OpenAI(api_key=api_key)
|
|
|
|
def register_simple_openai_tools(mcp):
|
|
"""Register simplified OpenAI tools that work with FastMCP."""
|
|
|
|
@mcp.tool()
|
|
def openai_test_connection() -> Dict[str, Any]:
|
|
"""Test OpenAI API connection and list available models.
|
|
|
|
This is a simple test tool to verify the OpenAI integration is working.
|
|
Returns information about available models and API connectivity.
|
|
"""
|
|
try:
|
|
client = get_openai_client()
|
|
models = client.models.list()
|
|
model_names = [model.id for model in models.data[:10]] # First 10 models
|
|
|
|
return {
|
|
"status": "connected",
|
|
"models_sample": model_names,
|
|
"total_models": len(models.data),
|
|
"success": True
|
|
}
|
|
except Exception as e:
|
|
return {
|
|
"status": "error",
|
|
"error": str(e),
|
|
"success": False
|
|
}
|
|
|
|
@mcp.tool()
|
|
def openai_generate_simple(prompt: str, model: str = "gpt-4o-mini") -> Dict[str, Any]:
|
|
"""Generate text using OpenAI API with simple interface.
|
|
|
|
Args:
|
|
prompt: The text prompt to generate from
|
|
model: OpenAI model to use (default: gpt-4o-mini)
|
|
|
|
Returns:
|
|
Dict with generated text and metadata
|
|
"""
|
|
try:
|
|
client = get_openai_client()
|
|
response = client.chat.completions.create(
|
|
model=model,
|
|
messages=[{"role": "user", "content": prompt}],
|
|
max_tokens=1000
|
|
)
|
|
|
|
return {
|
|
"text": response.choices[0].message.content,
|
|
"model": model,
|
|
"usage": {
|
|
"prompt_tokens": response.usage.prompt_tokens,
|
|
"completion_tokens": response.usage.completion_tokens,
|
|
"total_tokens": response.usage.total_tokens
|
|
},
|
|
"success": True
|
|
}
|
|
except Exception as e:
|
|
return {
|
|
"error": str(e),
|
|
"success": False
|
|
}
|
|
|
|
print("✅ Simple OpenAI tools registered successfully!") |