1
0
forked from MCP/llm-fusion-mcp
llm-fusion-mcp/src/llm_fusion_mcp/openai_server.py
Ryan Malloy c335ba0e1e Initial commit: LLM Fusion MCP Server
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok)
- Real-time streaming support across all providers
- Multimodal capabilities (text, images, audio)
- Intelligent document processing with smart chunking
- Production-ready with health monitoring and error handling
- Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper)
- Vector embeddings and semantic similarity
- Session-based API key management
- Built with FastMCP and modern Python tooling

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-05 05:47:51 -06:00

228 lines
6.7 KiB
Python

"""OpenAI-specific tools as a separate FastMCP server for composition."""
import os
from typing import Dict, Any, Optional, List
from openai import OpenAI
from fastmcp import FastMCP
# Create separate OpenAI server
openai_mcp = FastMCP(name="OpenAIServer")
def get_openai_client() -> OpenAI:
"""Get configured OpenAI client with API key from environment."""
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("No OpenAI API key found. Set OPENAI_API_KEY environment variable.")
return OpenAI(api_key=api_key)
# =============================================================================
# OPENAI BASIC TOOLS
# =============================================================================
@openai_mcp.tool()
def openai_test_connection() -> Dict[str, Any]:
"""Test OpenAI API connection and list available models.
This tool verifies the OpenAI integration is working correctly.
Returns information about available models and API connectivity.
"""
try:
client = get_openai_client()
models = client.models.list()
model_names = [model.id for model in models.data[:10]] # First 10 models
return {
"status": "connected",
"models_sample": model_names,
"total_models": len(models.data),
"success": True
}
except Exception as e:
return {
"status": "error",
"error": str(e),
"success": False
}
@openai_mcp.tool()
def openai_generate_simple(prompt: str, model: str = "gpt-4o-mini") -> Dict[str, Any]:
"""Generate text using OpenAI API with simple interface.
Args:
prompt: The text prompt to generate from
model: OpenAI model to use (default: gpt-4o-mini)
Returns:
Dict with generated text and metadata
"""
try:
client = get_openai_client()
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=1000
)
return {
"text": response.choices[0].message.content,
"model": model,
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
},
"success": True
}
except Exception as e:
return {
"error": str(e),
"success": False
}
# =============================================================================
# OPENAI ASSISTANTS API
# =============================================================================
@openai_mcp.tool()
def openai_create_assistant(
name: str,
instructions: str,
model: str = "gpt-4o",
tools: Optional[List[Dict[str, Any]]] = None,
description: Optional[str] = None
) -> Dict[str, Any]:
"""Create a new OpenAI Assistant with persistent behavior and capabilities.
Args:
name: Name for the assistant
instructions: System instructions defining behavior
model: OpenAI model to use (gpt-4o, gpt-4-turbo, etc.)
tools: List of tools [{"type": "code_interpreter"}, {"type": "file_search"}]
description: Optional description
Returns:
Dict with assistant details
"""
try:
client = get_openai_client()
assistant_data = {
"name": name,
"instructions": instructions,
"model": model
}
if description:
assistant_data["description"] = description
if tools:
assistant_data["tools"] = tools
assistant = client.beta.assistants.create(**assistant_data)
return {
"id": assistant.id,
"name": assistant.name,
"instructions": assistant.instructions,
"model": assistant.model,
"tools": assistant.tools,
"success": True
}
except Exception as e:
return {
"error": str(e),
"success": False
}
@openai_mcp.tool()
def openai_list_assistants(limit: int = 20) -> Dict[str, Any]:
"""List all OpenAI Assistants in your account.
Args:
limit: Maximum number of assistants to return
Returns:
Dict with list of assistants
"""
try:
client = get_openai_client()
assistants = client.beta.assistants.list(limit=limit)
assistant_list = []
for assistant in assistants.data:
assistant_list.append({
"id": assistant.id,
"name": assistant.name,
"instructions": assistant.instructions,
"model": assistant.model,
"created_at": assistant.created_at
})
return {
"assistants": assistant_list,
"count": len(assistant_list),
"success": True
}
except Exception as e:
return {
"error": str(e),
"success": False
}
# =============================================================================
# OPENAI IMAGES API (DALL-E)
# =============================================================================
@openai_mcp.tool()
def openai_generate_image(
prompt: str,
model: str = "dall-e-3",
size: str = "1024x1024",
quality: str = "standard",
n: int = 1
) -> Dict[str, Any]:
"""Generate images using OpenAI DALL-E.
Args:
prompt: Image description prompt
model: dall-e-3 or dall-e-2
size: Image size (1024x1024, 1024x1792, 1792x1024 for dall-e-3)
quality: standard or hd (dall-e-3 only)
n: Number of images (1-10, dall-e-2 supports more)
Returns:
Dict with image URLs and metadata
"""
try:
client = get_openai_client()
kwargs = {
"model": model,
"prompt": prompt,
"size": size,
"n": n
}
if model == "dall-e-3":
kwargs["quality"] = quality
response = client.images.generate(**kwargs)
images = []
for image in response.data:
images.append({
"url": image.url,
"revised_prompt": getattr(image, 'revised_prompt', None)
})
return {
"images": images,
"model": model,
"size": size,
"prompt": prompt,
"success": True
}
except Exception as e:
return {
"error": str(e),
"success": False
}