llm-fusion-mcp/run_server.sh
Ryan Malloy c335ba0e1e
Some checks are pending
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.10) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.11) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🔍 Code Quality & Testing (3.12) (push) Waiting to run
🚀 LLM Fusion MCP - CI/CD Pipeline / 🛡️ Security Scanning (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🐳 Docker Build & Push (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 🎉 Create Release (push) Blocked by required conditions
🚀 LLM Fusion MCP - CI/CD Pipeline / 📢 Deployment Notification (push) Blocked by required conditions
Initial commit: LLM Fusion MCP Server
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok)
- Real-time streaming support across all providers
- Multimodal capabilities (text, images, audio)
- Intelligent document processing with smart chunking
- Production-ready with health monitoring and error handling
- Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper)
- Vector embeddings and semantic similarity
- Session-based API key management
- Built with FastMCP and modern Python tooling

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-05 05:47:51 -06:00

87 lines
2.3 KiB
Bash
Executable File

#!/bin/bash
# LLM Fusion MCP Server Launcher
# For use with Claude Desktop and other MCP clients
set -e
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVER_NAME="llm-fusion-mcp"
PYTHON_MODULE="llm_fusion_mcp.server"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() {
echo -e "${BLUE}[INFO]${NC} $1" >&2
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1" >&2
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1" >&2
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1" >&2
}
# Check if running in project directory
if [ ! -f "$SCRIPT_DIR/pyproject.toml" ]; then
log_error "pyproject.toml not found. Please run this script from the project root directory."
exit 1
fi
# Check if uv is available
if ! command -v uv &> /dev/null; then
log_error "uv is not installed. Please install uv first:"
log_error "curl -LsSf https://astral.sh/uv/install.sh | sh"
exit 1
fi
# Check if dependencies are installed
if [ ! -f "$SCRIPT_DIR/uv.lock" ] || [ ! -d "$SCRIPT_DIR/.venv" ]; then
log_info "Installing dependencies..."
cd "$SCRIPT_DIR"
uv sync --all-extras
log_success "Dependencies installed"
fi
# Validate API keys
log_info "Checking API key configuration..."
# Load environment variables if .env exists
if [ -f "$SCRIPT_DIR/.env" ]; then
set -a
source "$SCRIPT_DIR/.env"
set +a
log_info "Loaded environment from .env file"
elif [ -f "$SCRIPT_DIR/.env.production" ]; then
log_warning "No .env file found, but .env.production exists"
log_warning "Copy .env.production to .env and configure your API keys"
else
log_warning "No environment file found. API keys must be set as environment variables"
fi
# Check for at least one API key
if [ -z "$GOOGLE_API_KEY" ] && [ -z "$OPENAI_API_KEY" ] && [ -z "$ANTHROPIC_API_KEY" ] && [ -z "$XAI_API_KEY" ]; then
log_warning "No API keys configured. The server will start but providers may not work."
log_warning "Set at least one of: GOOGLE_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY, XAI_API_KEY"
fi
# Start the server
log_info "Starting LLM Fusion MCP Server..."
log_info "Server: $SERVER_NAME"
log_info "Module: $PYTHON_MODULE"
log_info "Working Directory: $SCRIPT_DIR"
cd "$SCRIPT_DIR"
# Use uv to run the server
exec uv run python -m "$PYTHON_MODULE"