#!/usr/bin/env python3 """ Test script for the Ultimate Memory MCP Server - Ollama Edition Run this to verify the server is working correctly with Ollama. """ import asyncio import os import sys import requests from pathlib import Path # Add the project root to Python path project_root = Path(__file__).parent sys.path.insert(0, str(project_root)) from memory_mcp_server import MemoryMCPServer, MemoryType, OllamaProvider async def test_ollama_connection(): """Test Ollama server connection and model availability""" print("πŸ¦™ Testing Ollama connection...") base_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434') model = os.getenv('OLLAMA_EMBEDDING_MODEL', 'nomic-embed-text') print(f"πŸ“‘ Server: {base_url}") print(f"🎯 Model: {model}") try: # Test server connection print("πŸ”Œ Checking server connection...") response = requests.get(f"{base_url}/api/tags", timeout=10) if response.status_code == 200: print("βœ… Ollama server is running") # Check available models data = response.json() models = [m['name'] for m in data.get('models', [])] print(f"πŸ“¦ Available models: {models}") if model in models: print(f"βœ… Embedding model '{model}' is available") else: print(f"❌ Embedding model '{model}' not found") print(f"πŸ’‘ To install it, run: ollama pull {model}") return False # Test embedding generation print(f"πŸ§ͺ Testing embedding generation...") embed_response = requests.post( f"{base_url}/api/embeddings", json={"model": model, "prompt": "test embedding"}, timeout=30 ) if embed_response.status_code == 200: embedding = embed_response.json()["embedding"] print(f"βœ… Successfully generated embedding ({len(embedding)} dimensions)") print(f" First few values: {embedding[:5]}") return True else: print(f"❌ Embedding test failed: {embed_response.status_code}") print(f" Response: {embed_response.text}") return False else: print(f"❌ Ollama server not responding: {response.status_code}") return False except requests.exceptions.ConnectionError: print(f"❌ Cannot connect to Ollama server at {base_url}") print("πŸ’‘ Make sure Ollama is running: ollama serve") return False except Exception as e: print(f"❌ Ollama test failed: {e}") return False async def test_ollama_provider(): """Test the OllamaProvider class directly""" print("\nπŸ”§ Testing OllamaProvider class...") base_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434') model = os.getenv('OLLAMA_EMBEDDING_MODEL', 'nomic-embed-text') try: provider = OllamaProvider(base_url, model) # Test connection check connected, message = provider.check_connection() print(f"πŸ“Š Connection check: {'βœ…' if connected else '❌'} {message}") if not connected: return False # Test embedding generation print("πŸ”’ Testing embedding generation...") embedding = await provider.generate_embedding("This is a test sentence for embedding generation") print(f"βœ… Generated embedding with {len(embedding)} dimensions") print(f" First few values: {embedding[:5]}") # Test summary generation print("πŸ“ Testing summary generation...") long_text = ( "This is a longer piece of text that should be summarized. " "It contains multiple sentences and ideas that need to be condensed " "into a shorter, more manageable summary for storage and retrieval. " "The summary should capture the key points while being concise." ) summary = await provider.generate_summary(long_text) print(f"βœ… Generated summary: {summary}") return True except Exception as e: print(f"❌ Provider test failed: {e}") return False async def test_memory_server(): """Test the full memory server functionality""" print("\n🧠 Testing Ultimate Memory MCP Server with Ollama...") # Configuration test_db_path = "./test_memory_db" base_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434') model = os.getenv('OLLAMA_EMBEDDING_MODEL', 'nomic-embed-text') try: provider = OllamaProvider(base_url, model) # Check connection first connected, message = provider.check_connection() if not connected: print(f"❌ Ollama not available: {message}") print("\nPlease ensure:") print("1. Ollama is running: ollama serve") print(f"2. Model is installed: ollama pull {model}") print(f"3. Server is accessible at: {base_url}") return except Exception as e: print(f"❌ Failed to create Ollama provider: {e}") return # Initialize server server = MemoryMCPServer(test_db_path, provider) try: print("πŸ“Š Initializing database...") await server.initialize_db() print("βœ… Database initialized successfully") print("\nπŸ’Ύ Testing memory storage...") # Test storing different types of memories episodic_id = await server.store_memory( content="User clicked the save button at 2:30 PM during the demo", memory_type=MemoryType.EPISODIC, tags=["user-action", "demo", "save"], conversation_id="test_conversation" ) print(f"βœ… Stored episodic memory: {episodic_id}") semantic_id = await server.store_memory( content="User prefers dark mode interfaces for better eye comfort", memory_type=MemoryType.SEMANTIC, tags=["preference", "ui", "accessibility"] ) print(f"βœ… Stored semantic memory: {semantic_id}") procedural_id = await server.store_memory( content="To enable dark mode: Settings β†’ Appearance β†’ Theme β†’ Dark", memory_type=MemoryType.PROCEDURAL, tags=["instructions", "ui", "settings"] ) print(f"βœ… Stored procedural memory: {procedural_id}") print("\nπŸ” Testing semantic search...") search_results = await server.search_memories_semantic( query="user interface preferences", max_results=5, similarity_threshold=0.3 ) print(f"βœ… Found {len(search_results)} memories matching 'user interface preferences'") for i, result in enumerate(search_results, 1): print(f" {i}. Score: {result.similarity_score:.3f} - {result.content[:60]}...") print("\nπŸ”— Testing relationship creation...") relationship_id = await server.create_relationship( source_memory_id=semantic_id, target_memory_id=procedural_id, relationship_type="enables", strength=0.9, context="when user wants to implement their preference" ) print(f"βœ… Created relationship: {relationship_id}") print("\nπŸ•ΈοΈ Testing connected memories...") connected = await server.find_connected_memories( memory_id=semantic_id, max_depth=2, min_strength=0.5 ) print(f"βœ… Found {len(connected)} connected memories") for conn in connected: print(f" Depth {conn['depth']}: {conn['content'][:60]}...") print("\nπŸ“ Testing memory retrieval...") retrieved_memory = await server.get_memory_by_id(episodic_id) if retrieved_memory: print(f"βœ… Retrieved memory: {retrieved_memory.content[:60]}...") print(f" Type: {retrieved_memory.memory_type.value}") print(f" Access count: {retrieved_memory.access_count}") print("\nπŸ’¬ Testing conversation memories...") conv_memories = await server.get_conversation_memories("test_conversation") print(f"βœ… Found {len(conv_memories)} memories in conversation") print("\nπŸ“Š Testing keyword search...") keyword_results = await server.search_memories_by_keywords( query="dark mode", max_results=5 ) print(f"βœ… Found {len(keyword_results)} memories matching 'dark mode'") print("\nπŸŽ‰ All tests passed successfully!") print(f"\nMemory server is ready for use with Ollama ({model}).") except Exception as e: print(f"❌ Test failed: {e}") import traceback traceback.print_exc() finally: server.close_db() # Clean up test database import shutil if Path(test_db_path).exists(): shutil.rmtree(test_db_path) print(f"🧹 Cleaned up test database: {test_db_path}") def print_ollama_help(): """Print help for setting up Ollama""" print("\nπŸ“š Ollama Setup Help") print("=" * 50) base_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434') model = os.getenv('OLLAMA_EMBEDDING_MODEL', 'nomic-embed-text') print("πŸ¦™ Ollama Setup Steps:") print("1. Install Ollama: https://ollama.ai/") print("2. Start the server: ollama serve") print(f"3. Pull the embedding model: ollama pull {model}") print("4. Optional: Pull a chat model for summaries: ollama pull llama3.2:1b") print() print(f"Current configuration:") print(f" Server URL: {base_url}") print(f" Embedding Model: {model}") print() print("Test commands:") print(f" curl {base_url}/api/tags") print(f" ollama list") print(f" python test_server.py --connection-only") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Test Ultimate Memory MCP Server - Ollama Edition") parser.add_argument("--connection-only", action="store_true", help="Test only Ollama connection") parser.add_argument("--provider-only", action="store_true", help="Test only the OllamaProvider class") parser.add_argument("--help-setup", action="store_true", help="Show Ollama setup help") args = parser.parse_args() if args.help_setup: print_ollama_help() elif args.connection_only: asyncio.run(test_ollama_connection()) elif args.provider_only: asyncio.run(test_ollama_provider()) else: asyncio.run(test_memory_server())