#!/bin/bash # Ultimate Memory MCP Server - Ollama Edition Setup Script # Self-hosted embeddings with complete privacy and control set -e echo "๐Ÿฆ™ Setting up Ultimate Memory MCP Server - Ollama Edition..." # Check Python version python_version=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) required_version="3.11" if [ "$(printf '%s\n' "$required_version" "$python_version" | sort -V | head -n1)" != "$required_version" ]; then echo "โŒ Python 3.11+ is required. You have Python $python_version" echo "Please upgrade Python and try again." exit 1 fi echo "โœ… Python $python_version detected" # Install dependencies echo "๐Ÿ“ฆ Installing dependencies..." pip install -r requirements.txt # Check if Ollama is installed echo "๐Ÿ” Checking for Ollama installation..." if command -v ollama &> /dev/null; then echo "โœ… Ollama is installed" ollama_version=$(ollama --version 2>&1 | head -n1) echo " Version: $ollama_version" else echo "โŒ Ollama not found" echo "" echo "๐Ÿ“ฅ Please install Ollama:" echo " Linux/macOS: curl -fsSL https://ollama.ai/install.sh | sh" echo " Or download from: https://ollama.ai/" echo "" read -p "Continue setup without Ollama? (y/N): " continue_setup if [[ ! $continue_setup =~ ^[Yy]$ ]]; then echo "Please install Ollama and run setup again." exit 1 fi fi # Check if .env exists if [ ! -f .env ]; then echo "โš™๏ธ Creating environment configuration..." cp .env.example .env echo "โœ… Created .env file with default settings" else echo "โœ… Environment file already exists" fi # Test Ollama connection if available if command -v ollama &> /dev/null; then echo "" echo "๐Ÿงช Testing Ollama setup..." # Check if Ollama server is running if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then echo "โœ… Ollama server is running" # Check for required model model_name="nomic-embed-text" if ollama list | grep -q "$model_name"; then echo "โœ… Embedding model '$model_name' is available" else echo "โŒ Embedding model '$model_name' not found" echo "" read -p "Download the embedding model now? (Y/n): " download_model if [[ ! $download_model =~ ^[Nn]$ ]]; then echo "๐Ÿ“ฅ Downloading $model_name..." if ollama pull $model_name; then echo "โœ… Model downloaded successfully" else echo "โŒ Failed to download model" fi fi fi # Optional: Check for summary model summary_model="llama3.2:1b" if ollama list | grep -q "$summary_model"; then echo "โœ… Summary model '$summary_model' is available" else echo "โ„น๏ธ Summary model '$summary_model' not found (optional)" read -p "Download the summary model? (y/N): " download_summary if [[ $download_summary =~ ^[Yy]$ ]]; then echo "๐Ÿ“ฅ Downloading $summary_model..." ollama pull $summary_model fi fi else echo "โŒ Ollama server is not running" echo "" echo "๐Ÿš€ To start Ollama server:" echo " ollama serve" echo "" echo " Then in another terminal:" echo " ollama pull nomic-embed-text" echo "" fi fi # Create database directory mkdir -p memory_graph_db echo "โœ… Created database directory" # Show current configuration echo "" echo "๐Ÿ“‹ Configuration Summary:" if [ -f .env ]; then base_url=$(grep "OLLAMA_BASE_URL=" .env | cut -d= -f2) model=$(grep "OLLAMA_EMBEDDING_MODEL=" .env | cut -d= -f2) db_path=$(grep "KUZU_DB_PATH=" .env | cut -d= -f2) echo " Database: $db_path" echo " Ollama URL: $base_url" echo " Embedding Model: $model" fi # Test the setup echo "" echo "๐Ÿงช Running tests..." # Test Ollama connection first echo "Testing Ollama connection..." if python test_server.py --connection-only; then echo "" echo "Testing memory server functionality..." python test_server.py else echo "" echo "โŒ Ollama connection test failed." echo "Please check your Ollama setup and try again." echo "" echo "๐Ÿ”ง Troubleshooting:" echo "1. Start Ollama: ollama serve" echo "2. Install model: ollama pull nomic-embed-text" echo "3. Check status: curl http://localhost:11434/api/tags" echo "4. Run: python test_server.py --help-setup" exit 1 fi echo "" echo "๐ŸŽ‰ Setup complete!" echo "" echo "๐Ÿš€ Next steps:" echo "1. Keep Ollama running: ollama serve (in background)" echo "2. Start the memory server: python memory_mcp_server.py" echo "3. Configure your MCP client (see mcp_config_example.json)" echo "" echo "๐Ÿ’ก Ollama Tips:" echo " - Server uses ~1.5GB RAM for nomic-embed-text" echo " - First embedding generation may be slower (model loading)" echo " - All processing happens locally (complete privacy)" echo " - No API costs or rate limits" echo "" echo "๐Ÿ“š For detailed docs: cat README.md" echo "๐Ÿ”ง For troubleshooting: python test_server.py --help-setup" echo "" echo "๐Ÿฆ™ Enjoy your self-hosted memory system!"