🚀 Features: - FastMCP 2.8.1+ integration with modern Python 3.11+ features - Kuzu graph database for intelligent memory relationships - Multi-provider embedding support (OpenAI, Ollama, Sentence Transformers) - Automatic relationship detection via semantic similarity - Graph traversal for connected memory discovery - 8 MCP tools for comprehensive memory operations 🦙 Self-Hosted Focus: - Ollama provider for complete privacy and control - Zero external dependencies for sacred trust applications - Production-ready with comprehensive testing - Interactive setup script with provider selection 📦 Complete Package: - memory_mcp_server.py (1,010 lines) - Main FastMCP server - Comprehensive test suite and examples - Detailed documentation including Ollama setup guide - MCP client configuration examples - Interactive setup script 🎯 Perfect for LLM memory systems requiring: - Privacy-first architecture - Intelligent relationship modeling - Graph-based memory exploration - Self-hosted deployment capabilities
165 lines
5.2 KiB
Bash
Executable File
165 lines
5.2 KiB
Bash
Executable File
#!/bin/bash
|
||
|
||
# Ultimate Memory MCP Server - Ollama Edition Setup Script
|
||
# Self-hosted embeddings with complete privacy and control
|
||
|
||
set -e
|
||
|
||
echo "🦙 Setting up Ultimate Memory MCP Server - Ollama Edition..."
|
||
|
||
# Check Python version
|
||
python_version=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2)
|
||
required_version="3.11"
|
||
|
||
if [ "$(printf '%s\n' "$required_version" "$python_version" | sort -V | head -n1)" != "$required_version" ]; then
|
||
echo "❌ Python 3.11+ is required. You have Python $python_version"
|
||
echo "Please upgrade Python and try again."
|
||
exit 1
|
||
fi
|
||
|
||
echo "✅ Python $python_version detected"
|
||
|
||
# Install dependencies
|
||
echo "📦 Installing dependencies..."
|
||
pip install -r requirements.txt
|
||
|
||
# Check if Ollama is installed
|
||
echo "🔍 Checking for Ollama installation..."
|
||
if command -v ollama &> /dev/null; then
|
||
echo "✅ Ollama is installed"
|
||
ollama_version=$(ollama --version 2>&1 | head -n1)
|
||
echo " Version: $ollama_version"
|
||
else
|
||
echo "❌ Ollama not found"
|
||
echo ""
|
||
echo "📥 Please install Ollama:"
|
||
echo " Linux/macOS: curl -fsSL https://ollama.ai/install.sh | sh"
|
||
echo " Or download from: https://ollama.ai/"
|
||
echo ""
|
||
read -p "Continue setup without Ollama? (y/N): " continue_setup
|
||
if [[ ! $continue_setup =~ ^[Yy]$ ]]; then
|
||
echo "Please install Ollama and run setup again."
|
||
exit 1
|
||
fi
|
||
fi
|
||
|
||
# Check if .env exists
|
||
if [ ! -f .env ]; then
|
||
echo "⚙️ Creating environment configuration..."
|
||
cp .env.example .env
|
||
echo "✅ Created .env file with default settings"
|
||
else
|
||
echo "✅ Environment file already exists"
|
||
fi
|
||
|
||
# Test Ollama connection if available
|
||
if command -v ollama &> /dev/null; then
|
||
echo ""
|
||
echo "🧪 Testing Ollama setup..."
|
||
|
||
# Check if Ollama server is running
|
||
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
|
||
echo "✅ Ollama server is running"
|
||
|
||
# Check for required model
|
||
model_name="nomic-embed-text"
|
||
if ollama list | grep -q "$model_name"; then
|
||
echo "✅ Embedding model '$model_name' is available"
|
||
else
|
||
echo "❌ Embedding model '$model_name' not found"
|
||
echo ""
|
||
read -p "Download the embedding model now? (Y/n): " download_model
|
||
if [[ ! $download_model =~ ^[Nn]$ ]]; then
|
||
echo "📥 Downloading $model_name..."
|
||
if ollama pull $model_name; then
|
||
echo "✅ Model downloaded successfully"
|
||
else
|
||
echo "❌ Failed to download model"
|
||
fi
|
||
fi
|
||
fi
|
||
|
||
# Optional: Check for summary model
|
||
summary_model="llama3.2:1b"
|
||
if ollama list | grep -q "$summary_model"; then
|
||
echo "✅ Summary model '$summary_model' is available"
|
||
else
|
||
echo "ℹ️ Summary model '$summary_model' not found (optional)"
|
||
read -p "Download the summary model? (y/N): " download_summary
|
||
if [[ $download_summary =~ ^[Yy]$ ]]; then
|
||
echo "📥 Downloading $summary_model..."
|
||
ollama pull $summary_model
|
||
fi
|
||
fi
|
||
|
||
else
|
||
echo "❌ Ollama server is not running"
|
||
echo ""
|
||
echo "🚀 To start Ollama server:"
|
||
echo " ollama serve"
|
||
echo ""
|
||
echo " Then in another terminal:"
|
||
echo " ollama pull nomic-embed-text"
|
||
echo ""
|
||
fi
|
||
fi
|
||
|
||
# Create database directory
|
||
mkdir -p memory_graph_db
|
||
echo "✅ Created database directory"
|
||
|
||
# Show current configuration
|
||
echo ""
|
||
echo "📋 Configuration Summary:"
|
||
if [ -f .env ]; then
|
||
base_url=$(grep "OLLAMA_BASE_URL=" .env | cut -d= -f2)
|
||
model=$(grep "OLLAMA_EMBEDDING_MODEL=" .env | cut -d= -f2)
|
||
db_path=$(grep "KUZU_DB_PATH=" .env | cut -d= -f2)
|
||
|
||
echo " Database: $db_path"
|
||
echo " Ollama URL: $base_url"
|
||
echo " Embedding Model: $model"
|
||
fi
|
||
|
||
# Test the setup
|
||
echo ""
|
||
echo "🧪 Running tests..."
|
||
|
||
# Test Ollama connection first
|
||
echo "Testing Ollama connection..."
|
||
if python test_server.py --connection-only; then
|
||
echo ""
|
||
echo "Testing memory server functionality..."
|
||
python test_server.py
|
||
else
|
||
echo ""
|
||
echo "❌ Ollama connection test failed."
|
||
echo "Please check your Ollama setup and try again."
|
||
echo ""
|
||
echo "🔧 Troubleshooting:"
|
||
echo "1. Start Ollama: ollama serve"
|
||
echo "2. Install model: ollama pull nomic-embed-text"
|
||
echo "3. Check status: curl http://localhost:11434/api/tags"
|
||
echo "4. Run: python test_server.py --help-setup"
|
||
exit 1
|
||
fi
|
||
|
||
echo ""
|
||
echo "🎉 Setup complete!"
|
||
echo ""
|
||
echo "🚀 Next steps:"
|
||
echo "1. Keep Ollama running: ollama serve (in background)"
|
||
echo "2. Start the memory server: python memory_mcp_server.py"
|
||
echo "3. Configure your MCP client (see mcp_config_example.json)"
|
||
echo ""
|
||
echo "💡 Ollama Tips:"
|
||
echo " - Server uses ~1.5GB RAM for nomic-embed-text"
|
||
echo " - First embedding generation may be slower (model loading)"
|
||
echo " - All processing happens locally (complete privacy)"
|
||
echo " - No API costs or rate limits"
|
||
echo ""
|
||
echo "📚 For detailed docs: cat README.md"
|
||
echo "🔧 For troubleshooting: python test_server.py --help-setup"
|
||
echo ""
|
||
echo "🦙 Enjoy your self-hosted memory system!"
|