forked from MCP/llm-fusion-mcp
- Unified access to 4 major LLM providers (Gemini, OpenAI, Anthropic, Grok) - Real-time streaming support across all providers - Multimodal capabilities (text, images, audio) - Intelligent document processing with smart chunking - Production-ready with health monitoring and error handling - Full OpenAI ecosystem integration (Assistants, DALL-E, Whisper) - Vector embeddings and semantic similarity - Session-based API key management - Built with FastMCP and modern Python tooling 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
48 lines
1.0 KiB
Docker
48 lines
1.0 KiB
Docker
# LLM Fusion MCP - Production Docker Image
|
|
FROM python:3.12-slim
|
|
|
|
# Set environment variables
|
|
ENV PYTHONUNBUFFERED=1
|
|
ENV PYTHONDONTWRITEBYTECODE=1
|
|
ENV UV_CACHE_DIR=/tmp/uv-cache
|
|
|
|
# Install system dependencies
|
|
RUN apt-get update && apt-get install -y \
|
|
curl \
|
|
git \
|
|
&& rm -rf /var/lib/apt/lists/*
|
|
|
|
# Install uv
|
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
ENV PATH="/root/.cargo/bin:$PATH"
|
|
|
|
# Create app directory
|
|
WORKDIR /app
|
|
|
|
# Copy dependency files
|
|
COPY pyproject.toml uv.lock ./
|
|
|
|
# Install dependencies
|
|
RUN uv sync --frozen --no-dev
|
|
|
|
# Copy application code
|
|
COPY src/ ./src/
|
|
COPY run_server.sh ./
|
|
COPY .env.example ./
|
|
|
|
# Make run script executable
|
|
RUN chmod +x run_server.sh
|
|
|
|
# Create non-root user for security
|
|
RUN useradd -m -u 1000 llmfusion && chown -R llmfusion:llmfusion /app
|
|
USER llmfusion
|
|
|
|
# Health check
|
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
|
CMD python -c "import sys; sys.exit(0)"
|
|
|
|
# Expose port (if running HTTP server in future)
|
|
EXPOSE 8000
|
|
|
|
# Run the server
|
|
CMD ["./run_server.sh"] |