# LLM Fusion MCP - Environment Configuration Example # Copy this file to .env and add your API keys # ============================================================================= # LLM PROVIDER API KEYS (Add at least one) # ============================================================================= # Google Gemini (Recommended - Primary Provider) # Get your key from: https://aistudio.google.com/app/apikey GOOGLE_API_KEY=your_google_api_key_here # OpenAI (Optional - GPT models, DALL-E, Whisper) # Get your key from: https://platform.openai.com/api-keys OPENAI_API_KEY=your_openai_api_key_here # Anthropic (Optional - Claude models) # Get your key from: https://console.anthropic.com/ ANTHROPIC_API_KEY=your_anthropic_api_key_here # xAI Grok (Optional - Grok models) # Get your key from: https://console.x.ai/ XAI_API_KEY=your_xai_api_key_here # ============================================================================= # SERVER CONFIGURATION (Optional) # ============================================================================= # Server mode (development, production) SERVER_MODE=development # Logging level (DEBUG, INFO, WARNING, ERROR) LOG_LEVEL=INFO # Maximum file size for analysis (in MB) MAX_FILE_SIZE_MB=50 # Request timeout (in seconds) REQUEST_TIMEOUT=300 # ============================================================================= # PERFORMANCE SETTINGS (Optional) # ============================================================================= # Model cache timeout (in minutes) MODEL_CACHE_TIMEOUT=5 # Maximum concurrent requests MAX_CONCURRENT_REQUESTS=10 # Rate limiting (requests per minute per provider) RATE_LIMIT_PER_MINUTE=60