🚀 Features: - FastMCP 2.8.1+ integration with modern Python 3.11+ features - Kuzu graph database for intelligent memory relationships - Multi-provider embedding support (OpenAI, Ollama, Sentence Transformers) - Automatic relationship detection via semantic similarity - Graph traversal for connected memory discovery - 8 MCP tools for comprehensive memory operations 🦙 Self-Hosted Focus: - Ollama provider for complete privacy and control - Zero external dependencies for sacred trust applications - Production-ready with comprehensive testing - Interactive setup script with provider selection 📦 Complete Package: - memory_mcp_server.py (1,010 lines) - Main FastMCP server - Comprehensive test suite and examples - Detailed documentation including Ollama setup guide - MCP client configuration examples - Interactive setup script 🎯 Perfect for LLM memory systems requiring: - Privacy-first architecture - Intelligent relationship modeling - Graph-based memory exploration - Self-hosted deployment capabilities
284 lines
9.9 KiB
Python
284 lines
9.9 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
Example usage of the Ultimate Memory MCP Server - Ollama Edition
|
||
This demonstrates common patterns and use cases for self-hosted memory.
|
||
"""
|
||
|
||
import asyncio
|
||
import json
|
||
|
||
# Example tool calls (these would be called through your MCP client)
|
||
|
||
async def example_workflow():
|
||
"""Example workflow showing memory operations with Ollama"""
|
||
|
||
print("🦙 Ultimate Memory MCP Server - Ollama Edition Examples")
|
||
print("=" * 60)
|
||
|
||
# Example 1: Storing different types of memories
|
||
print("\n1️⃣ Storing Memories (Ollama-Powered)")
|
||
|
||
examples = [
|
||
{
|
||
"tool": "store_memory",
|
||
"args": {
|
||
"content": "User mentioned they work best in the early morning hours",
|
||
"memory_type": "episodic",
|
||
"tags": ["schedule", "preference", "productivity"],
|
||
"conversation_id": "productivity_chat"
|
||
},
|
||
"note": "Stored with nomic-embed-text embedding"
|
||
},
|
||
{
|
||
"tool": "store_memory",
|
||
"args": {
|
||
"content": "Dark mode reduces eye strain during extended coding sessions",
|
||
"memory_type": "semantic",
|
||
"tags": ["health", "coding", "ui", "ergonomics"]
|
||
},
|
||
"note": "Semantic facts work great with Ollama embeddings"
|
||
},
|
||
{
|
||
"tool": "store_memory",
|
||
"args": {
|
||
"content": "To enable focus mode: Cmd+Shift+D on Mac, Ctrl+Shift+D on Windows",
|
||
"memory_type": "procedural",
|
||
"tags": ["shortcuts", "focus", "productivity", "cross-platform"]
|
||
},
|
||
"note": "Step-by-step instructions with clear embedding"
|
||
}
|
||
]
|
||
|
||
for example in examples:
|
||
print(f"📝 {example['tool']}:")
|
||
print(f" Content: {example['args']['content']}")
|
||
print(f" Type: {example['args']['memory_type']}")
|
||
print(f" Tags: {example['args'].get('tags', [])}")
|
||
print(f" 💡 {example['note']}")
|
||
print()
|
||
|
||
# Example 2: Searching memories with Ollama
|
||
print("2️⃣ Searching Memories (Semantic + Keyword)")
|
||
|
||
search_examples = [
|
||
{
|
||
"tool": "search_memories",
|
||
"args": {
|
||
"query": "productivity habits and work optimization",
|
||
"search_type": "semantic",
|
||
"max_results": 5
|
||
},
|
||
"note": "Semantic search excels at understanding intent"
|
||
},
|
||
{
|
||
"tool": "search_memories",
|
||
"args": {
|
||
"query": "keyboard shortcuts",
|
||
"search_type": "keyword"
|
||
},
|
||
"note": "Keyword search for exact phrases"
|
||
},
|
||
{
|
||
"tool": "search_memories",
|
||
"args": {
|
||
"query": "user interface and visual comfort",
|
||
"search_type": "semantic",
|
||
"include_relationships": True
|
||
},
|
||
"note": "Includes related memories via graph connections"
|
||
}
|
||
]
|
||
|
||
for example in search_examples:
|
||
print(f"🔍 {example['tool']}:")
|
||
print(f" Query: '{example['args']['query']}'")
|
||
print(f" Type: {example['args']['search_type']}")
|
||
print(f" 💡 {example['note']}")
|
||
print()
|
||
|
||
# Example 3: Creating relationships
|
||
print("3️⃣ Creating Memory Relationships")
|
||
|
||
relationship_examples = [
|
||
{
|
||
"tool": "create_relationship",
|
||
"args": {
|
||
"source_memory_id": "morning_preference_uuid",
|
||
"target_memory_id": "productivity_boost_uuid",
|
||
"relationship_type": "causes",
|
||
"strength": 0.85,
|
||
"context": "when following natural circadian rhythms"
|
||
},
|
||
"note": "Causal relationships help with reasoning"
|
||
},
|
||
{
|
||
"tool": "create_relationship",
|
||
"args": {
|
||
"source_memory_id": "eye_strain_concern_uuid",
|
||
"target_memory_id": "dark_mode_solution_uuid",
|
||
"relationship_type": "enables",
|
||
"strength": 0.9,
|
||
"bidirectional": False
|
||
},
|
||
"note": "Solution relationships for problem-solving"
|
||
},
|
||
{
|
||
"tool": "create_relationship",
|
||
"args": {
|
||
"source_memory_id": "focus_shortcut_uuid",
|
||
"target_memory_id": "productivity_tools_uuid",
|
||
"relationship_type": "part_of",
|
||
"strength": 0.75,
|
||
"context": "productivity toolkit"
|
||
},
|
||
"note": "Hierarchical relationships for organization"
|
||
}
|
||
]
|
||
|
||
for example in relationship_examples:
|
||
print(f"🔗 {example['tool']}:")
|
||
print(f" Type: {example['args']['relationship_type']}")
|
||
print(f" Strength: {example['args']['strength']}")
|
||
print(f" Context: {example['args'].get('context', 'N/A')}")
|
||
print(f" 💡 {example['note']}")
|
||
print()
|
||
|
||
# Example 4: Graph analysis and monitoring
|
||
print("4️⃣ Graph Analysis & Ollama Monitoring")
|
||
|
||
analysis_examples = [
|
||
{
|
||
"tool": "find_connected_memories",
|
||
"args": {
|
||
"memory_id": "productivity_uuid",
|
||
"max_depth": 3,
|
||
"min_strength": 0.5
|
||
},
|
||
"note": "Discover chains of related memories"
|
||
},
|
||
{
|
||
"tool": "analyze_memory_patterns",
|
||
"args": {},
|
||
"note": "Overall graph statistics and health"
|
||
},
|
||
{
|
||
"tool": "check_ollama_status",
|
||
"args": {},
|
||
"note": "Verify Ollama server and model status"
|
||
}
|
||
]
|
||
|
||
for example in analysis_examples:
|
||
print(f"📊 {example['tool']}:")
|
||
if example['args']:
|
||
for key, value in example['args'].items():
|
||
print(f" {key}: {value}")
|
||
else:
|
||
print(" No parameters required")
|
||
print(f" 💡 {example['note']}")
|
||
print()
|
||
|
||
# Example 5: Ollama-specific use cases
|
||
print("5️⃣ Ollama-Specific Use Cases")
|
||
|
||
ollama_use_cases = [
|
||
{
|
||
"scenario": "Privacy-First Personal Assistant",
|
||
"description": "Complete data privacy with local processing",
|
||
"memories": [
|
||
"User prefers encrypted communication",
|
||
"Works with sensitive financial data",
|
||
"Values privacy over convenience"
|
||
],
|
||
"benefits": ["No data sharing", "Offline capable", "User controlled"]
|
||
},
|
||
{
|
||
"scenario": "Enterprise Knowledge Base",
|
||
"description": "Corporate memory without cloud dependencies",
|
||
"memories": [
|
||
"Company coding standards for Python projects",
|
||
"Internal API documentation and examples",
|
||
"Team decision history and rationale"
|
||
],
|
||
"benefits": ["IP protection", "No subscription costs", "Full control"]
|
||
},
|
||
{
|
||
"scenario": "Research Assistant",
|
||
"description": "Academic/research memory with complete transparency",
|
||
"memories": [
|
||
"Research methodology preferences",
|
||
"Citation formats and academic standards",
|
||
"Experiment results and observations"
|
||
],
|
||
"benefits": ["Reproducible", "Auditable", "No vendor lock-in"]
|
||
},
|
||
{
|
||
"scenario": "Development Environment Memory",
|
||
"description": "Code assistant with local-first approach",
|
||
"memories": [
|
||
"Project-specific coding patterns",
|
||
"Bug solutions and workarounds",
|
||
"Performance optimization techniques"
|
||
],
|
||
"benefits": ["Code privacy", "Instant response", "Custom models"]
|
||
}
|
||
]
|
||
|
||
for use_case in ollama_use_cases:
|
||
print(f"🎯 {use_case['scenario']}")
|
||
print(f" {use_case['description']}")
|
||
print(f" Sample memories:")
|
||
for memory in use_case['memories']:
|
||
print(f" • {memory}")
|
||
print(f" Ollama benefits: {', '.join(use_case['benefits'])}")
|
||
print()
|
||
|
||
# Example 6: Performance considerations
|
||
print("6️⃣ Ollama Performance Tips")
|
||
|
||
performance_tips = [
|
||
{
|
||
"tip": "Model Selection",
|
||
"description": "Choose the right model for your use case",
|
||
"examples": [
|
||
"nomic-embed-text: Best balance of quality and speed",
|
||
"all-minilm: Fastest, lowest memory usage",
|
||
"mxbai-embed-large: Highest quality, more resources"
|
||
]
|
||
},
|
||
{
|
||
"tip": "Memory Management",
|
||
"description": "Optimize for your hardware",
|
||
"examples": [
|
||
"Keep Ollama server running to avoid reload overhead",
|
||
"Monitor RAM usage during peak operations",
|
||
"Use SSD storage for faster model loading"
|
||
]
|
||
},
|
||
{
|
||
"tip": "Batch Operations",
|
||
"description": "Group operations for efficiency",
|
||
"examples": [
|
||
"Store multiple memories in sequence",
|
||
"Batch relationship creation",
|
||
"Use semantic search for multiple queries"
|
||
]
|
||
}
|
||
]
|
||
|
||
for tip in performance_tips:
|
||
print(f"⚡ {tip['tip']}")
|
||
print(f" {tip['description']}")
|
||
for example in tip['examples']:
|
||
print(f" • {example}")
|
||
print()
|
||
|
||
print("📚 For complete setup instructions: cat OLLAMA_SETUP.md")
|
||
print("🔧 To test your setup: python test_server.py")
|
||
print("🚀 To start the server: python memory_mcp_server.py")
|
||
print("")
|
||
print("🦙 Enjoy your self-hosted, privacy-first memory system!")
|
||
|
||
if __name__ == "__main__":
|
||
asyncio.run(example_workflow())
|