diff --git a/RESUME.md b/RESUME.md new file mode 100644 index 0000000..2ea11c3 --- /dev/null +++ b/RESUME.md @@ -0,0 +1,185 @@ +# ๐Ÿ”ง RESUME: Phase 1 MCP Integration Implementation + +## ๐ŸŽฏ **Current Status: Phase 1 Tools Need MCP Registration** + +### **โœ… What We Discovered:** +- **Phase 1 tools ARE fully implemented** (20+ tools across 5 modules) +- **All Phase 1 classes inherit from MCPMixin** and use @mcp_tool decorators +- **Issue Found**: MCPMixin is falling back to dummy object class instead of real FastMCP MCPMixin +- **Root Cause**: Import fallback in base.py + missing FastMCP environment + +### **๐Ÿ” Phase 1 Modules & Tool Counts:** +- **๐Ÿ“‚ Git Integration** (4 tools): git_status, git_diff, git_grep, git_commit_prepare +- **๐Ÿ”ง Diff/Patch Operations** (3 tools): generate_diff, apply_patch, create_patch_file +- **๐Ÿ“ File Operations** (7 tools): bulk_rename, enhanced_list_directory, file_backup, etc. +- **๐Ÿง  Intelligent Completion** (3 tools): explain_tool, recommend_tools, suggest_workflow +- **๐Ÿ“Š Sneller Analytics** (3 tools): sneller_query, sneller_optimize, sneller_setup + +--- + +## ๐Ÿ› **Critical Issue Identified:** + +### **Problem in `/enhanced_mcp/base.py`:** +```python +try: + from fastmcp import Context, FastMCP + from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_prompt, mcp_resource, mcp_tool +except ImportError: + # Fallback for when FastMCP is not available + Context = None + FastMCP = None + MCPMixin = object # โ† THIS IS THE PROBLEM! + mcp_tool = lambda **kwargs: lambda func: func + mcp_resource = lambda **kwargs: lambda func: func + mcp_prompt = lambda **kwargs: lambda func: func +``` + +**Impact**: Phase 1 classes inherit from `object` instead of real `MCPMixin`, so `register_all()` method doesn't exist. + +--- + +## ๐Ÿ”ง **Required Fixes:** + +### **1. Fix FastMCP Import Issue** +```python +# In base.py - ensure proper MCPMixin import +try: + from fastmcp import Context, FastMCP + from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_prompt, mcp_resource, mcp_tool +except ImportError as e: + print(f"FastMCP import failed: {e}") + # Add proper error handling instead of silent fallback +``` + +### **2. Server Registration Pattern** +Based on your example, the server should work like this: +```python +# In mcp_server.py +def create_server(name="enhanced-mcp-tools"): + app = FastMCP(name) + + # Create instances + git = GitIntegration() + diff_patch = DiffPatchOperations() + file_ops = EnhancedFileOperations() + completion = IntelligentCompletion() + sneller = SnellerAnalytics() + + # Register with prefixes (this should work once MCPMixin is fixed) + git.register_all(app, prefix="git") + diff_patch.register_all(app, prefix="diff_patch") + file_ops.register_all(app, prefix="file_ops") + completion.register_all(app, prefix="completion") + sneller.register_all(app, prefix="sneller") + + return app +``` + +### **3. Verify Phase 1 Class Structure** +All Phase 1 classes should look like this (they already do): +```python +class GitIntegration(MCPMixin): + """Git integration tools""" + + @mcp_tool(name="git_status", description="Get comprehensive git repository status") + async def git_status(self, repository_path: str, include_untracked: Optional[bool] = True, ctx: Context = None): + # Implementation... +``` + +--- + +## ๐Ÿงช **Testing Plan:** + +### **Phase 1 Tools to Test:** +1. **Git Integration:** + - `enhanced-mcp-tools:git_git_status` + - `enhanced-mcp-tools:git_git_diff` + - `enhanced-mcp-tools:git_git_grep` + - `enhanced-mcp-tools:git_git_commit_prepare` + +2. **File Operations:** + - `enhanced-mcp-tools:file_ops_enhanced_list_directory` + - `enhanced-mcp-tools:file_ops_file_backup` + - `enhanced-mcp-tools:file_ops_bulk_rename` + +3. **Diff/Patch:** + - `enhanced-mcp-tools:diff_patch_generate_diff` + - `enhanced-mcp-tools:diff_patch_apply_patch` + +4. **Intelligent Completion:** + - `enhanced-mcp-tools:completion_explain_tool` + - `enhanced-mcp-tools:completion_recommend_tools` + +5. **Sneller Analytics:** + - `enhanced-mcp-tools:sneller_sneller_query` + - `enhanced-mcp-tools:sneller_sneller_setup` + +--- + +## ๐Ÿ“š **Documentation Updates Needed:** + +### **1. Update README.md** +Add Phase 1 tools to the main tool list: +```markdown +## ๐Ÿ› ๏ธ Available Tools + +### ๐Ÿ“‚ **Phase 1: Essential Workflow Tools** (20+ tools) +- **Git Integration**: Advanced git operations, search, and analysis +- **File Operations**: Enhanced file management and backup +- **Diff/Patch**: Comprehensive diff and patch operations +- **Intelligent Completion**: Smart workflow suggestions +- **Sneller Analytics**: High-performance analytics integration +``` + +### **2. Create Phase 1 Tool Reference** +Create `/docs/PHASE1_TOOLS.md` with: +- Complete tool listing with descriptions +- Usage examples for each tool +- LLM-friendly parameter guidance +- Safety annotations and warnings + +### **3. Update LLM_TOOL_GUIDE.md** +Add Phase 1 tools with proper annotations: +```markdown +## Git Integration Tools + +### git_status +**Purpose**: Get comprehensive repository status +**LLM Usage**: `git_status(repository_path=".")` +**Safety**: ๐ŸŸข SAFE - Read-only operation +``` + +--- + +## ๐ŸŽฏ **Implementation Priority:** + +1. **๐Ÿ”ง Fix MCPMixin import** in base.py (CRITICAL) +2. **๐Ÿงช Test one Phase 1 tool** (git_status) to verify fix works +3. **๐Ÿš€ Test all Phase 1 modules** systematically +4. **๐Ÿ“š Document working Phase 1 tools** +5. **๐ŸŽ‰ Celebrate complete Enhanced MCP Tools** (Phases 1+2+3 all working!) + +--- + +## ๐ŸŽ‰ **Expected Outcome:** + +Once fixed, Enhanced MCP Tools will have **35+ professional-grade tools** across all three phases: + +- โœ… **Phase 1**: Essential workflow (20+ tools) - *Ready to enable* +- โœ… **Phase 2**: Code quality pipeline (5 tools) - *Working* +- โœ… **Phase 3**: Enhanced UX & Environment (5 tools) - *Working perfectly* + +**Total**: Full-featured development toolkit with git integration, file operations, code quality, environment management, and intelligent LLM guidance! + +--- + +## ๐Ÿšจ **Quick Fix Command:** +```bash +# After fixing base.py imports +cd /home/rpm/claude/enhanced-mcp-tools +# Rebuild package +# Restart Claude +# Test: enhanced-mcp-tools:git_git_status repository_path="." +``` + +**Status**: Ready to unlock 20+ additional Phase 1 tools with a simple import fix! ๐Ÿš€ \ No newline at end of file diff --git a/enhanced_mcp/base.py b/enhanced_mcp/base.py index 0ea83f9..745f3d3 100644 --- a/enhanced_mcp/base.py +++ b/enhanced_mcp/base.py @@ -35,17 +35,37 @@ try: except ImportError: requests = None +# FastMCP imports - these are REQUIRED for MCP functionality try: + from mcp.types import ToolAnnotations from fastmcp import Context, FastMCP from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_prompt, mcp_resource, mcp_tool -except ImportError: - # Fallback for when FastMCP is not available + + # Verify that MCPMixin has the required register_all method + if not hasattr(MCPMixin, 'register_all'): + raise ImportError("MCPMixin is missing register_all method - FastMCP version may be incompatible") + + FASTMCP_AVAILABLE = True + +except ImportError as e: + # FastMCP is REQUIRED - no silent fallbacks that break functionality + import sys + print(f"๐Ÿšจ CRITICAL: FastMCP import failed: {e}") + print("๐Ÿ“‹ Enhanced MCP Tools requires FastMCP to function.") + print("๐Ÿ”ง Please install with: pip install fastmcp") + print(" Or check your FastMCP installation and version compatibility.") + + # Still define the imports to prevent NameError, but mark as unavailable Context = None FastMCP = None - MCPMixin = object + MCPMixin = object # This will cause clear errors instead of silent failures mcp_tool = lambda **kwargs: lambda func: func mcp_resource = lambda **kwargs: lambda func: func mcp_prompt = lambda **kwargs: lambda func: func + ToolAnnotations = None + FASTMCP_AVAILABLE = False + + # Don't exit here - let individual modules handle the error appropriately # Common utility functions that multiple modules will use @@ -53,7 +73,38 @@ class MCPBase: """Base class with common functionality for all MCP tool classes""" def __init__(self): - pass + # Check if FastMCP is properly available when instantiating + if not FASTMCP_AVAILABLE: + raise RuntimeError( + "๐Ÿšจ Enhanced MCP Tools requires FastMCP but it's not available.\n" + "Please install with: pip install fastmcp" + ) + + def verify_mcp_ready(self) -> bool: + """Verify that this instance is ready for MCP registration""" + if not FASTMCP_AVAILABLE: + return False + if not hasattr(self, 'register_all'): + return False + return True + + def safe_register_all(self, app: 'FastMCP', prefix: str = None) -> bool: + """Safely register all tools with better error handling""" + if not self.verify_mcp_ready(): + print(f"โŒ Cannot register {self.__class__.__name__}: FastMCP not available or class not properly configured") + return False + + try: + if prefix: + self.register_all(app, prefix=prefix) + print(f"โœ… Registered {self.__class__.__name__} tools with prefix '{prefix}'") + else: + self.register_all(app) + print(f"โœ… Registered {self.__class__.__name__} tools") + return True + except Exception as e: + print(f"โŒ Failed to register {self.__class__.__name__}: {e}") + return False async def log_info(self, message: str, ctx: Optional[Context] = None): """Helper to log info messages""" @@ -155,6 +206,8 @@ __all__ = [ "mcp_prompt", "FastMCP", "Context", + "ToolAnnotations", + "FASTMCP_AVAILABLE", # Base class "MCPBase", ] diff --git a/enhanced_mcp/workflow_tools.py b/enhanced_mcp/workflow_tools.py index 1fca46f..6a1585a 100644 --- a/enhanced_mcp/workflow_tools.py +++ b/enhanced_mcp/workflow_tools.py @@ -33,8 +33,12 @@ class AdvancedSearchAnalysis(MCPMixin): """Batch search and replace across files with safety mechanisms""" try: if not dry_run and ctx: - await ctx.error("๐Ÿšจ DESTRUCTIVE OPERATION BLOCKED: Use dry_run=True first to preview changes!") - return {"error": "SAFETY: Must use dry_run=True to preview changes before execution"} + await ctx.error( + "๐Ÿšจ DESTRUCTIVE OPERATION BLOCKED: Use dry_run=True first to preview changes!" + ) + return { + "error": "SAFETY: Must use dry_run=True to preview changes before execution" + } directory_path = Path(directory) if not directory_path.exists(): @@ -43,10 +47,10 @@ class AdvancedSearchAnalysis(MCPMixin): # Determine file pattern for matching if file_pattern is None: file_pattern = "*" - + # Find matching files matching_files = [] - if '*' in file_pattern or '?' in file_pattern: + if "*" in file_pattern or "?" in file_pattern: # Use glob pattern for pattern_match in directory_path.rglob(file_pattern): if pattern_match.is_file(): @@ -66,46 +70,55 @@ class AdvancedSearchAnalysis(MCPMixin): # Skip binary files and very large files if file_path.stat().st_size > 10 * 1024 * 1024: # 10MB limit continue - + # Read file content - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() - + # Find matches import re + matches = list(re.finditer(search_pattern, content)) - + if matches: # Perform replacement new_content = re.sub(search_pattern, replacement, content) - + # Create backup if requested and not dry run backup_path = None if backup and not dry_run: - backup_path = file_path.with_suffix(f"{file_path.suffix}.bak.{int(time.time())}") + backup_path = file_path.with_suffix( + f"{file_path.suffix}.bak.{int(time.time())}" + ) shutil.copy2(file_path, backup_path) backup_paths.append(str(backup_path)) - + # Write new content if not dry run if not dry_run: - with open(file_path, 'w', encoding='utf-8') as f: + with open(file_path, "w", encoding="utf-8") as f: f.write(new_content) - + # Record change information change_info = { "file": str(file_path.relative_to(directory_path)), "matches": len(matches), "backup_created": backup_path is not None, "backup_path": str(backup_path) if backup_path else None, - "preview": { - "first_match": { - "line": content[:matches[0].start()].count('\n') + 1, - "old": matches[0].group(), - "new": re.sub(search_pattern, replacement, matches[0].group()) + "preview": ( + { + "first_match": { + "line": content[: matches[0].start()].count("\n") + 1, + "old": matches[0].group(), + "new": re.sub( + search_pattern, replacement, matches[0].group() + ), + } } - } if matches else None + if matches + else None + ), } - + changes.append(change_info) total_matches += len(matches) @@ -125,17 +138,21 @@ class AdvancedSearchAnalysis(MCPMixin): "files_scanned": len(matching_files), "files_with_matches": len(changes), "total_matches": total_matches, - "backups_created": len(backup_paths) + "backups_created": len(backup_paths), }, "changes": changes, - "backup_paths": backup_paths + "backup_paths": backup_paths, } if ctx: if dry_run: - await ctx.info(f"DRY RUN: Found {total_matches} matches in {len(changes)} files. Review before setting dry_run=False") + await ctx.info( + f"DRY RUN: Found {total_matches} matches in {len(changes)} files. Review before setting dry_run=False" + ) else: - await ctx.info(f"Replaced {total_matches} matches in {len(changes)} files with {len(backup_paths)} backups created") + await ctx.info( + f"Replaced {total_matches} matches in {len(changes)} files with {len(backup_paths)} backups created" + ) return result @@ -162,8 +179,14 @@ class AdvancedSearchAnalysis(MCPMixin): if ctx: await ctx.info(f"Analyzing codebase: {directory}") - exclude_patterns = exclude_patterns or ["*.pyc", "__pycache__", ".git", ".venv", "node_modules"] - + exclude_patterns = exclude_patterns or [ + "*.pyc", + "__pycache__", + ".git", + ".venv", + "node_modules", + ] + def should_exclude(path: Path) -> bool: for pattern in exclude_patterns: if fnmatch.fnmatch(path.name, pattern) or fnmatch.fnmatch(str(path), pattern): @@ -175,7 +198,7 @@ class AdvancedSearchAnalysis(MCPMixin): "timestamp": datetime.now().isoformat(), "metrics": {}, "files_analyzed": [], - "summary": {} + "summary": {}, } # Collect files @@ -190,33 +213,42 @@ class AdvancedSearchAnalysis(MCPMixin): if "loc" in include_metrics: total_lines = 0 file_types = {} - + for file_path in files: try: if file_path.suffix: ext = file_path.suffix.lower() - if ext in ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.go', '.rs', '.rb']: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + if ext in [ + ".py", + ".js", + ".ts", + ".java", + ".cpp", + ".c", + ".go", + ".rs", + ".rb", + ]: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: lines = len(f.readlines()) total_lines += lines - + if ext not in file_types: file_types[ext] = {"files": 0, "lines": 0} file_types[ext]["files"] += 1 file_types[ext]["lines"] += lines - - stats["files_analyzed"].append({ - "path": str(file_path.relative_to(dir_path)), - "extension": ext, - "lines": lines - }) + + stats["files_analyzed"].append( + { + "path": str(file_path.relative_to(dir_path)), + "extension": ext, + "lines": lines, + } + ) except Exception: continue - stats["metrics"]["loc"] = { - "total_lines": total_lines, - "file_types": file_types - } + stats["metrics"]["loc"] = {"total_lines": total_lines, "file_types": file_types} # Complexity metrics (enhanced implementation) if "complexity" in include_metrics: @@ -226,39 +258,48 @@ class AdvancedSearchAnalysis(MCPMixin): "average_function_length": 0, "largest_files": [], "cyclomatic_complexity": {"files": [], "average": 0}, - "file_complexity_distribution": {"simple": 0, "moderate": 0, "complex": 0, "very_complex": 0} + "file_complexity_distribution": { + "simple": 0, + "moderate": 0, + "complex": 0, + "very_complex": 0, + }, } - + function_lengths = [] all_complexity_scores = [] - + for file_path in files: - if file_path.suffix.lower() in ['.py', '.js', '.ts', '.java', '.cpp', '.c']: + if file_path.suffix.lower() in [".py", ".js", ".ts", ".java", ".cpp", ".c"]: try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() - lines = content.count('\n') + 1 - + lines = content.count("\n") + 1 + # Basic complexity analysis - file_complexity = self._analyze_file_complexity(content, file_path.suffix.lower()) - + file_complexity = self._analyze_file_complexity( + content, file_path.suffix.lower() + ) + complexity_data["total_functions"] += file_complexity["functions"] complexity_data["total_classes"] += file_complexity["classes"] function_lengths.extend(file_complexity["function_lengths"]) - + # File size categorization if lines > 500: - complexity_data["largest_files"].append({ - "file": str(file_path.relative_to(dir_path)), - "lines": lines, - "functions": file_complexity["functions"], - "classes": file_complexity["classes"] - }) - + complexity_data["largest_files"].append( + { + "file": str(file_path.relative_to(dir_path)), + "lines": lines, + "functions": file_complexity["functions"], + "classes": file_complexity["classes"], + } + ) + # Categorize file complexity complexity_score = file_complexity["complexity_score"] all_complexity_scores.append(complexity_score) - + if complexity_score < 10: complexity_data["file_complexity_distribution"]["simple"] += 1 elif complexity_score < 20: @@ -266,40 +307,46 @@ class AdvancedSearchAnalysis(MCPMixin): elif complexity_score < 50: complexity_data["file_complexity_distribution"]["complex"] += 1 else: - complexity_data["file_complexity_distribution"]["very_complex"] += 1 - - complexity_data["cyclomatic_complexity"]["files"].append({ - "file": str(file_path.relative_to(dir_path)), - "score": complexity_score - }) - + complexity_data["file_complexity_distribution"][ + "very_complex" + ] += 1 + + complexity_data["cyclomatic_complexity"]["files"].append( + { + "file": str(file_path.relative_to(dir_path)), + "score": complexity_score, + } + ) + except Exception: continue - + # Calculate averages if function_lengths: - complexity_data["average_function_length"] = sum(function_lengths) / len(function_lengths) - + complexity_data["average_function_length"] = sum(function_lengths) / len( + function_lengths + ) + if all_complexity_scores: - complexity_data["cyclomatic_complexity"]["average"] = sum(all_complexity_scores) / len(all_complexity_scores) - + complexity_data["cyclomatic_complexity"]["average"] = sum( + all_complexity_scores + ) / len(all_complexity_scores) + # Sort largest files and keep top 10 complexity_data["largest_files"] = sorted( - complexity_data["largest_files"], - key=lambda x: x["lines"], - reverse=True + complexity_data["largest_files"], key=lambda x: x["lines"], reverse=True )[:10] - + # Sort by complexity score and keep top 10 complexity_data["cyclomatic_complexity"]["files"] = sorted( complexity_data["cyclomatic_complexity"]["files"], key=lambda x: x["score"], - reverse=True + reverse=True, )[:10] - + stats["metrics"]["complexity"] = complexity_data - # Dependencies metrics (enhanced implementation) + # Dependencies metrics (enhanced implementation) if "dependencies" in include_metrics: deps = { "package_files": [], @@ -307,33 +354,42 @@ class AdvancedSearchAnalysis(MCPMixin): "dependency_details": {}, "vulnerabilities_detected": False, "outdated_deps": [], - "recommendations": [] + "recommendations": [], } - + # Find and analyze dependency files for file_path in files: file_name = file_path.name.lower() - - if file_name in ["requirements.txt", "package.json", "cargo.toml", "go.mod", "pyproject.toml", "pipfile", "composer.json", "gemfile"]: + + if file_name in [ + "requirements.txt", + "package.json", + "cargo.toml", + "go.mod", + "pyproject.toml", + "pipfile", + "composer.json", + "gemfile", + ]: deps["package_files"].append(str(file_path.relative_to(dir_path))) - + # Analyze specific dependency files try: dep_analysis = self._analyze_dependency_file(file_path) deps["dependency_details"][file_name] = dep_analysis - + if "count" in dep_analysis: deps["dependency_counts"][file_name] = dep_analysis["count"] - + except Exception as e: deps["dependency_details"][file_name] = {"error": str(e)} - + # Import analysis for Python files import_counts = {"total": 0, "stdlib": 0, "third_party": 0, "local": 0} unique_imports = set() - + for file_path in files: - if file_path.suffix.lower() == '.py': + if file_path.suffix.lower() == ".py": try: imports = self._extract_python_imports(file_path) import_counts["total"] += len(imports["all"]) @@ -343,22 +399,28 @@ class AdvancedSearchAnalysis(MCPMixin): unique_imports.update(imports["all"]) except Exception: continue - + deps["import_analysis"] = { "counts": import_counts, "unique_imports": len(unique_imports), - "most_imported": list(unique_imports)[:20] # Top 20 + "most_imported": list(unique_imports)[:20], # Top 20 } - + # Generate recommendations if len(deps["package_files"]) == 0: - deps["recommendations"].append("No dependency files found - consider adding requirements.txt or package.json") + deps["recommendations"].append( + "No dependency files found - consider adding requirements.txt or package.json" + ) elif len(deps["package_files"]) > 2: - deps["recommendations"].append("Multiple dependency files detected - ensure consistency") - + deps["recommendations"].append( + "Multiple dependency files detected - ensure consistency" + ) + if import_counts["third_party"] > 50: - deps["recommendations"].append("High number of third-party dependencies - consider dependency review") - + deps["recommendations"].append( + "High number of third-party dependencies - consider dependency review" + ) + stats["metrics"]["dependencies"] = deps if ctx: @@ -373,204 +435,243 @@ class AdvancedSearchAnalysis(MCPMixin): def _analyze_file_complexity(self, content: str, extension: str) -> Dict[str, Any]: """Analyze complexity metrics for a single file""" - complexity = { - "functions": 0, - "classes": 0, - "function_lengths": [], - "complexity_score": 0 - } - - lines = content.split('\n') + complexity = {"functions": 0, "classes": 0, "function_lengths": [], "complexity_score": 0} + + lines = content.split("\n") current_function_lines = 0 - - if extension == '.py': + + if extension == ".py": # Python complexity analysis for i, line in enumerate(lines): stripped = line.strip() - + # Count functions and classes - if stripped.startswith('def '): + if stripped.startswith("def "): complexity["functions"] += 1 if current_function_lines > 0: complexity["function_lengths"].append(current_function_lines) current_function_lines = 1 - elif stripped.startswith('class '): + elif stripped.startswith("class "): complexity["classes"] += 1 elif current_function_lines > 0: current_function_lines += 1 - + # Complexity indicators - if any(keyword in stripped for keyword in ['if ', 'elif ', 'for ', 'while ', 'try:', 'except:', 'with ']): + if any( + keyword in stripped + for keyword in ["if ", "elif ", "for ", "while ", "try:", "except:", "with "] + ): complexity["complexity_score"] += 1 - if any(keyword in stripped for keyword in ['and ', 'or ', '&&', '||']): + if any(keyword in stripped for keyword in ["and ", "or ", "&&", "||"]): complexity["complexity_score"] += 0.5 - - elif extension in ['.js', '.ts']: + + elif extension in [".js", ".ts"]: # JavaScript/TypeScript complexity analysis for line in lines: stripped = line.strip() - + # Count functions - if 'function ' in stripped or '=>' in stripped: + if "function " in stripped or "=>" in stripped: complexity["functions"] += 1 - if 'class ' in stripped: + if "class " in stripped: complexity["classes"] += 1 - + # Complexity indicators - if any(keyword in stripped for keyword in ['if ', 'else', 'for ', 'while ', 'switch', 'case', 'try', 'catch']): + if any( + keyword in stripped + for keyword in [ + "if ", + "else", + "for ", + "while ", + "switch", + "case", + "try", + "catch", + ] + ): complexity["complexity_score"] += 1 - if any(keyword in stripped for keyword in ['&&', '||', '?', ':']): + if any(keyword in stripped for keyword in ["&&", "||", "?", ":"]): complexity["complexity_score"] += 0.5 - + # Add final function length if we were tracking one if current_function_lines > 0: complexity["function_lengths"].append(current_function_lines) - + return complexity def _analyze_dependency_file(self, file_path: Path) -> Dict[str, Any]: """Analyze a specific dependency file""" analysis = {"count": 0, "dependencies": [], "type": "unknown"} - + try: if file_path.name.lower() == "package.json": analysis["type"] = "npm" - with open(file_path, 'r') as f: + with open(file_path, "r") as f: data = json.load(f) deps = {} if "dependencies" in data: deps.update(data["dependencies"]) if "devDependencies" in data: deps.update(data["devDependencies"]) - + analysis["count"] = len(deps) analysis["dependencies"] = list(deps.keys())[:20] # Top 20 - + elif file_path.name.lower() in ["requirements.txt", "requirements-dev.txt"]: analysis["type"] = "pip" - with open(file_path, 'r') as f: - lines = [line.strip() for line in f if line.strip() and not line.startswith('#')] + with open(file_path, "r") as f: + lines = [ + line.strip() for line in f if line.strip() and not line.startswith("#") + ] analysis["count"] = len(lines) - analysis["dependencies"] = [line.split('==')[0].split('>=')[0].split('<=')[0] for line in lines[:20]] - + analysis["dependencies"] = [ + line.split("==")[0].split(">=")[0].split("<=")[0] for line in lines[:20] + ] + elif file_path.name.lower() == "pyproject.toml": analysis["type"] = "python-project" # Basic TOML parsing without external dependencies - with open(file_path, 'r') as f: + with open(file_path, "r") as f: content = f.read() # Simple dependency extraction deps = [] - if '[project.dependencies]' in content or 'dependencies = [' in content: - lines = content.split('\n') + if "[project.dependencies]" in content or "dependencies = [" in content: + lines = content.split("\n") in_deps = False for line in lines: - if 'dependencies' in line and '[' in line: + if "dependencies" in line and "[" in line: in_deps = True continue - if in_deps and ']' in line: + if in_deps and "]" in line: break if in_deps and '"' in line: - dep = line.strip().strip(',').strip('"') + dep = line.strip().strip(",").strip('"') if dep: - deps.append(dep.split('>=')[0].split('==')[0]) - + deps.append(dep.split(">=")[0].split("==")[0]) + analysis["count"] = len(deps) analysis["dependencies"] = deps[:20] - + elif file_path.name.lower() == "cargo.toml": analysis["type"] = "cargo" - with open(file_path, 'r') as f: + with open(file_path, "r") as f: content = f.read() # Simple Cargo.toml parsing - lines = content.split('\n') + lines = content.split("\n") deps = [] in_deps = False for line in lines: - if '[dependencies]' in line: + if "[dependencies]" in line: in_deps = True continue - if in_deps and line.startswith('['): + if in_deps and line.startswith("["): break - if in_deps and '=' in line: - dep_name = line.split('=')[0].strip() + if in_deps and "=" in line: + dep_name = line.split("=")[0].strip() if dep_name: deps.append(dep_name) - + analysis["count"] = len(deps) analysis["dependencies"] = deps[:20] - + except Exception as e: analysis["error"] = str(e) - + return analysis def _extract_python_imports(self, file_path: Path) -> Dict[str, List[str]]: """Extract import statements from Python file""" imports = {"all": [], "stdlib": [], "third_party": [], "local": []} - + # Standard library modules (partial list) stdlib_modules = { - 'os', 'sys', 'json', 're', 'time', 'datetime', 'collections', 'itertools', - 'functools', 'typing', 'pathlib', 'subprocess', 'threading', 'multiprocessing', - 'urllib', 'http', 'email', 'html', 'xml', 'csv', 'sqlite3', 'logging', - 'unittest', 'argparse', 'configparser', 'tempfile', 'shutil', 'glob' + "os", + "sys", + "json", + "re", + "time", + "datetime", + "collections", + "itertools", + "functools", + "typing", + "pathlib", + "subprocess", + "threading", + "multiprocessing", + "urllib", + "http", + "email", + "html", + "xml", + "csv", + "sqlite3", + "logging", + "unittest", + "argparse", + "configparser", + "tempfile", + "shutil", + "glob", } - + try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() - + # Use AST for more accurate parsing try: tree = ast.parse(content) for node in ast.walk(tree): if isinstance(node, ast.Import): for alias in node.names: - module_name = alias.name.split('.')[0] + module_name = alias.name.split(".")[0] imports["all"].append(module_name) - + if module_name in stdlib_modules: imports["stdlib"].append(module_name) - elif module_name.startswith('.') or '.' in alias.name: + elif module_name.startswith(".") or "." in alias.name: imports["local"].append(module_name) else: imports["third_party"].append(module_name) - + elif isinstance(node, ast.ImportFrom): if node.module: - module_name = node.module.split('.')[0] + module_name = node.module.split(".")[0] imports["all"].append(module_name) - + if module_name in stdlib_modules: imports["stdlib"].append(module_name) elif node.level > 0: # Relative import imports["local"].append(module_name) else: imports["third_party"].append(module_name) - + except SyntaxError: # Fallback to simple regex parsing import re - import_pattern = r'^(?:from\s+(\S+)\s+import|import\s+(\S+))' - for line in content.split('\n'): + + import_pattern = r"^(?:from\s+(\S+)\s+import|import\s+(\S+))" + for line in content.split("\n"): match = re.match(import_pattern, line.strip()) if match: module = match.group(1) or match.group(2) if module: - module_name = module.split('.')[0] + module_name = module.split(".")[0] imports["all"].append(module_name) if module_name in stdlib_modules: imports["stdlib"].append(module_name) else: imports["third_party"].append(module_name) - + except Exception: pass - + # Remove duplicates while preserving order for key in imports: imports[key] = list(dict.fromkeys(imports[key])) - + return imports @mcp_tool(name="find_duplicates", description="๐ŸŸก SAFE: Detect duplicate code or files") @@ -592,12 +693,23 @@ class AdvancedSearchAnalysis(MCPMixin): # Default file types to analyze if file_types is None: - file_types = ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.cs', '.rb', '.php', '.go'] + file_types = [ + ".py", + ".js", + ".ts", + ".java", + ".cpp", + ".c", + ".cs", + ".rb", + ".php", + ".go", + ] # Collect files files = [] exclude_patterns = ["*.pyc", "__pycache__", ".git", ".venv", "node_modules", "*.min.js"] - + def should_exclude(path: Path) -> bool: for pattern in exclude_patterns: if fnmatch.fnmatch(path.name, pattern) or fnmatch.fnmatch(str(path), pattern): @@ -605,9 +717,11 @@ class AdvancedSearchAnalysis(MCPMixin): return False for file_path in dir_path.rglob("*"): - if (file_path.is_file() and - not should_exclude(file_path) and - file_path.suffix.lower() in file_types): + if ( + file_path.is_file() + and not should_exclude(file_path) + and file_path.suffix.lower() in file_types + ): files.append(file_path) results = { @@ -622,8 +736,8 @@ class AdvancedSearchAnalysis(MCPMixin): "identical_file_groups": 0, "similar_file_pairs": 0, "duplicate_function_groups": 0, - "potential_savings_kb": 0 - } + "potential_savings_kb": 0, + }, } if len(files) == 0: @@ -635,12 +749,16 @@ class AdvancedSearchAnalysis(MCPMixin): results["summary"]["identical_file_groups"] = len(identical_groups) # Find similar files (by content similarity) - similar_pairs = await self._find_similar_files(files, dir_path, similarity_threshold, ctx) + similar_pairs = await self._find_similar_files( + files, dir_path, similarity_threshold, ctx + ) results["similar_files"] = similar_pairs results["summary"]["similar_file_pairs"] = len(similar_pairs) # Find duplicate functions/methods - duplicate_functions = await self._find_duplicate_functions(files, dir_path, similarity_threshold) + duplicate_functions = await self._find_duplicate_functions( + files, dir_path, similarity_threshold + ) results["duplicate_functions"] = duplicate_functions results["summary"]["duplicate_function_groups"] = len(duplicate_functions) @@ -650,17 +768,21 @@ class AdvancedSearchAnalysis(MCPMixin): if len(group["files"]) > 1: file_size = group["size_bytes"] total_savings += file_size * (len(group["files"]) - 1) - + results["summary"]["potential_savings_kb"] = round(total_savings / 1024, 2) # Generate recommendations results["recommendations"] = self._generate_duplicate_recommendations(results) if ctx: - total_duplicates = (results["summary"]["identical_file_groups"] + - results["summary"]["similar_file_pairs"] + - results["summary"]["duplicate_function_groups"]) - await ctx.info(f"Duplicate analysis complete: {total_duplicates} duplicate groups found") + total_duplicates = ( + results["summary"]["identical_file_groups"] + + results["summary"]["similar_file_pairs"] + + results["summary"]["duplicate_function_groups"] + ) + await ctx.info( + f"Duplicate analysis complete: {total_duplicates} duplicate groups found" + ) return results @@ -670,30 +792,31 @@ class AdvancedSearchAnalysis(MCPMixin): await self.log_critical(error_msg, exception=e, ctx=ctx) return {"error": error_msg} - async def _find_identical_files(self, files: List[Path], base_path: Path) -> List[Dict[str, Any]]: + async def _find_identical_files( + self, files: List[Path], base_path: Path + ) -> List[Dict[str, Any]]: """Find files with identical content using hash comparison""" import hashlib - + file_hashes = {} - + for file_path in files: try: # Skip very large files (>10MB) if file_path.stat().st_size > 10 * 1024 * 1024: continue - - with open(file_path, 'rb') as f: + + with open(file_path, "rb") as f: content = f.read() file_hash = hashlib.md5(content).hexdigest() - + if file_hash not in file_hashes: file_hashes[file_hash] = [] - - file_hashes[file_hash].append({ - "path": str(file_path.relative_to(base_path)), - "size_bytes": len(content) - }) - + + file_hashes[file_hash].append( + {"path": str(file_path.relative_to(base_path)), "size_bytes": len(content)} + ) + except Exception: continue @@ -701,110 +824,123 @@ class AdvancedSearchAnalysis(MCPMixin): identical_groups = [] for file_hash, file_list in file_hashes.items(): if len(file_list) > 1: - identical_groups.append({ - "hash": file_hash, - "files": file_list, - "count": len(file_list), - "size_bytes": file_list[0]["size_bytes"] - }) + identical_groups.append( + { + "hash": file_hash, + "files": file_list, + "count": len(file_list), + "size_bytes": file_list[0]["size_bytes"], + } + ) return sorted(identical_groups, key=lambda x: x["count"], reverse=True) - async def _find_similar_files(self, files: List[Path], base_path: Path, threshold: float, ctx: Context) -> List[Dict[str, Any]]: + async def _find_similar_files( + self, files: List[Path], base_path: Path, threshold: float, ctx: Context + ) -> List[Dict[str, Any]]: """Find files with similar content using text comparison""" similar_pairs = [] - + # Process files in batches to avoid memory issues batch_size = 50 - + for i in range(0, len(files), batch_size): - batch_files = files[i:i + batch_size] - + batch_files = files[i : i + batch_size] + # Load file contents for this batch file_contents = {} for file_path in batch_files: try: if file_path.stat().st_size > 1024 * 1024: # Skip files > 1MB continue - - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() # Normalize content for comparison normalized = self._normalize_code_content(content) if len(normalized) > 100: # Skip very small files file_contents[file_path] = normalized - + except Exception: continue # Compare files in this batch with all previous files batch_paths = list(file_contents.keys()) - + for j in range(len(batch_paths)): for k in range(j + 1, len(batch_paths)): file1, file2 = batch_paths[j], batch_paths[k] - + similarity = self._calculate_text_similarity( - file_contents[file1], - file_contents[file2] + file_contents[file1], file_contents[file2] ) - + if similarity >= threshold: - similar_pairs.append({ - "file1": str(file1.relative_to(base_path)), - "file2": str(file2.relative_to(base_path)), - "similarity_percent": round(similarity, 1), - "file1_size": file1.stat().st_size, - "file2_size": file2.stat().st_size - }) + similar_pairs.append( + { + "file1": str(file1.relative_to(base_path)), + "file2": str(file2.relative_to(base_path)), + "similarity_percent": round(similarity, 1), + "file1_size": file1.stat().st_size, + "file2_size": file2.stat().st_size, + } + ) - return sorted(similar_pairs, key=lambda x: x["similarity_percent"], reverse=True)[:20] # Top 20 + return sorted(similar_pairs, key=lambda x: x["similarity_percent"], reverse=True)[ + :20 + ] # Top 20 - async def _find_duplicate_functions(self, files: List[Path], base_path: Path, threshold: float) -> List[Dict[str, Any]]: + async def _find_duplicate_functions( + self, files: List[Path], base_path: Path, threshold: float + ) -> List[Dict[str, Any]]: """Find duplicate functions/methods across files""" function_groups = {} - + for file_path in files: - if file_path.suffix.lower() not in ['.py', '.js', '.ts', '.java']: + if file_path.suffix.lower() not in [".py", ".js", ".ts", ".java"]: continue - + try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() - + functions = self._extract_functions(content, file_path.suffix.lower()) - + for func in functions: # Create a normalized signature for comparison normalized = self._normalize_function_content(func["content"]) - + if len(normalized) < 50: # Skip very small functions continue - + # Group similar functions found_group = False for signature, group in function_groups.items(): if self._calculate_text_similarity(normalized, signature) >= threshold: - group["functions"].append({ - "file": str(file_path.relative_to(base_path)), - "name": func["name"], - "line_start": func["line_start"], - "line_end": func["line_end"] - }) + group["functions"].append( + { + "file": str(file_path.relative_to(base_path)), + "name": func["name"], + "line_start": func["line_start"], + "line_end": func["line_end"], + } + ) found_group = True break - + if not found_group: function_groups[normalized] = { "signature": normalized[:100] + "...", - "functions": [{ - "file": str(file_path.relative_to(base_path)), - "name": func["name"], - "line_start": func["line_start"], - "line_end": func["line_end"] - }] + "functions": [ + { + "file": str(file_path.relative_to(base_path)), + "name": func["name"], + "line_start": func["line_start"], + "line_end": func["line_end"], + } + ], } - + except Exception: continue @@ -812,160 +948,191 @@ class AdvancedSearchAnalysis(MCPMixin): duplicate_groups = [] for signature, group in function_groups.items(): if len(group["functions"]) > 1: - duplicate_groups.append({ - "signature_preview": group["signature"], - "functions": group["functions"], - "count": len(group["functions"]) - }) + duplicate_groups.append( + { + "signature_preview": group["signature"], + "functions": group["functions"], + "count": len(group["functions"]), + } + ) return sorted(duplicate_groups, key=lambda x: x["count"], reverse=True)[:10] # Top 10 def _normalize_code_content(self, content: str) -> str: """Normalize code content for comparison""" - lines = content.split('\n') + lines = content.split("\n") normalized_lines = [] - + for line in lines: # Remove leading/trailing whitespace stripped = line.strip() - + # Skip empty lines and comments - if not stripped or stripped.startswith('#') or stripped.startswith('//'): + if not stripped or stripped.startswith("#") or stripped.startswith("//"): continue - + # Basic normalization (could be enhanced) - stripped = re.sub(r'\s+', ' ', stripped) # Normalize whitespace + stripped = re.sub(r"\s+", " ", stripped) # Normalize whitespace normalized_lines.append(stripped) - - return '\n'.join(normalized_lines) + + return "\n".join(normalized_lines) def _normalize_function_content(self, content: str) -> str: """Normalize function content for comparison""" # Remove function signature line and normalize body - lines = content.split('\n')[1:] # Skip first line (signature) - return self._normalize_code_content('\n'.join(lines)) + lines = content.split("\n")[1:] # Skip first line (signature) + return self._normalize_code_content("\n".join(lines)) def _calculate_text_similarity(self, text1: str, text2: str) -> float: """Calculate similarity between two text strings""" if not text1 or not text2: return 0.0 - + # Simple character-based similarity shorter = min(len(text1), len(text2)) longer = max(len(text1), len(text2)) - + if longer == 0: return 100.0 - + # Count matching characters in order matches = 0 for i in range(shorter): if text1[i] == text2[i]: matches += 1 - + # Calculate similarity as percentage return (matches / longer) * 100 def _extract_functions(self, content: str, extension: str) -> List[Dict[str, Any]]: """Extract function definitions from code""" functions = [] - lines = content.split('\n') - - if extension == '.py': + lines = content.split("\n") + + if extension == ".py": current_function = None indent_level = 0 - + for i, line in enumerate(lines): stripped = line.strip() - if stripped.startswith('def ') and ':' in stripped: + if stripped.startswith("def ") and ":" in stripped: # Save previous function if current_function: current_function["line_end"] = i - 1 - current_function["content"] = '\n'.join(lines[current_function["line_start"]:i]) + current_function["content"] = "\n".join( + lines[current_function["line_start"] : i] + ) functions.append(current_function) - + # Start new function - func_name = stripped.split('(')[0].replace('def ', '').strip() + func_name = stripped.split("(")[0].replace("def ", "").strip() current_function = { "name": func_name, "line_start": i, "line_end": i, - "content": "" + "content": "", } indent_level = len(line) - len(line.lstrip()) - - elif current_function and line and len(line) - len(line.lstrip()) <= indent_level and stripped: + + elif ( + current_function + and line + and len(line) - len(line.lstrip()) <= indent_level + and stripped + ): # Function ended current_function["line_end"] = i - 1 - current_function["content"] = '\n'.join(lines[current_function["line_start"]:i]) + current_function["content"] = "\n".join( + lines[current_function["line_start"] : i] + ) functions.append(current_function) current_function = None - + # Add last function if current_function: current_function["line_end"] = len(lines) - 1 - current_function["content"] = '\n'.join(lines[current_function["line_start"]:]) + current_function["content"] = "\n".join(lines[current_function["line_start"] :]) functions.append(current_function) - - elif extension in ['.js', '.ts']: + + elif extension in [".js", ".ts"]: # Basic JavaScript/TypeScript function extraction for i, line in enumerate(lines): stripped = line.strip() - if ('function ' in stripped or '=>' in stripped) and '{' in stripped: + if ("function " in stripped or "=>" in stripped) and "{" in stripped: # Extract function name (simplified) - if 'function ' in stripped: - func_name = stripped.split('function ')[1].split('(')[0].strip() + if "function " in stripped: + func_name = stripped.split("function ")[1].split("(")[0].strip() else: func_name = f"arrow_function_line_{i}" - + # Find function end (simplified - just look for next function or end) end_line = i + 10 # Limit search for j in range(i + 1, min(len(lines), i + 50)): - if ('function ' in lines[j] or lines[j].strip().startswith('}')): + if "function " in lines[j] or lines[j].strip().startswith("}"): end_line = j break - - functions.append({ - "name": func_name, - "line_start": i, - "line_end": end_line, - "content": '\n'.join(lines[i:end_line + 1]) - }) - + + functions.append( + { + "name": func_name, + "line_start": i, + "line_end": end_line, + "content": "\n".join(lines[i : end_line + 1]), + } + ) + return functions def _generate_duplicate_recommendations(self, results: Dict[str, Any]) -> List[str]: """Generate actionable recommendations for duplicate cleanup""" recommendations = [] summary = results["summary"] - - if (summary["identical_file_groups"] == 0 and - summary["similar_file_pairs"] == 0 and - summary["duplicate_function_groups"] == 0): - recommendations.append("โœ… No significant duplicates found! Codebase is well-organized.") + + if ( + summary["identical_file_groups"] == 0 + and summary["similar_file_pairs"] == 0 + and summary["duplicate_function_groups"] == 0 + ): + recommendations.append( + "โœ… No significant duplicates found! Codebase is well-organized." + ) return recommendations - + if summary["identical_file_groups"] > 0: - recommendations.append(f"๐Ÿ”ด Found {summary['identical_file_groups']} groups of identical files - consider removing duplicates") + recommendations.append( + f"๐Ÿ”ด Found {summary['identical_file_groups']} groups of identical files - consider removing duplicates" + ) if summary["potential_savings_kb"] > 0: - recommendations.append(f"๐Ÿ’พ Potential space savings: {summary['potential_savings_kb']} KB") - + recommendations.append( + f"๐Ÿ’พ Potential space savings: {summary['potential_savings_kb']} KB" + ) + if summary["similar_file_pairs"] > 0: - recommendations.append(f"โš ๏ธ Found {summary['similar_file_pairs']} pairs of similar files - review for consolidation opportunities") - + recommendations.append( + f"โš ๏ธ Found {summary['similar_file_pairs']} pairs of similar files - review for consolidation opportunities" + ) + if summary["duplicate_function_groups"] > 0: - recommendations.append(f"๐Ÿ”ง Found {summary['duplicate_function_groups']} groups of duplicate functions - consider refactoring into shared utilities") - + recommendations.append( + f"๐Ÿ”ง Found {summary['duplicate_function_groups']} groups of duplicate functions - consider refactoring into shared utilities" + ) + # Specific actions if summary["identical_file_groups"] > 0: - recommendations.append("๐Ÿ’ก Action: Remove or symlink identical files to reduce redundancy") - + recommendations.append( + "๐Ÿ’ก Action: Remove or symlink identical files to reduce redundancy" + ) + if summary["duplicate_function_groups"] > 0: - recommendations.append("๐Ÿ’ก Action: Extract duplicate functions into a shared module or utility class") - + recommendations.append( + "๐Ÿ’ก Action: Extract duplicate functions into a shared module or utility class" + ) + if summary["similar_file_pairs"] > 0: - recommendations.append("๐Ÿ’ก Action: Review similar files for opportunities to merge or create templates") - + recommendations.append( + "๐Ÿ’ก Action: Review similar files for opportunities to merge or create templates" + ) + return recommendations @@ -973,7 +1140,8 @@ class DevelopmentWorkflow(MCPMixin): """Development workflow automation tools""" @mcp_tool( - name="run_tests", description="๐ŸŸก SAFE: Execute test suites with intelligent framework detection" + name="run_tests", + description="๐ŸŸก SAFE: Execute test suites with intelligent framework detection", ) async def run_tests( self, @@ -1007,17 +1175,22 @@ class DevelopmentWorkflow(MCPMixin): # Build command based on framework cmd = [] env_vars = os.environ.copy() - + if detected_framework == "pytest": cmd = ["python", "-m", "pytest"] if coverage: - cmd.extend(["--cov", str(test_path_obj.parent if test_path_obj.is_file() else test_path_obj)]) + cmd.extend( + [ + "--cov", + str(test_path_obj.parent if test_path_obj.is_file() else test_path_obj), + ] + ) cmd.extend(["--cov-report", "term-missing"]) if pattern: cmd.extend(["-k", pattern]) cmd.append(str(test_path_obj)) cmd.extend(["-v", "--tb=short"]) - + elif detected_framework == "jest": cmd = ["npx", "jest"] if coverage: @@ -1026,7 +1199,7 @@ class DevelopmentWorkflow(MCPMixin): cmd.extend(["--testNamePattern", pattern]) cmd.append(str(test_path_obj)) cmd.extend(["--verbose"]) - + elif detected_framework == "mocha": cmd = ["npx", "mocha"] if pattern: @@ -1034,22 +1207,22 @@ class DevelopmentWorkflow(MCPMixin): cmd.append(str(test_path_obj)) cmd.append("--reporter") cmd.append("json") - + else: return {"error": f"Unsupported test framework: {detected_framework}"} # Run the tests start_time = time.time() - + result = subprocess.run( cmd, cwd=test_path_obj.parent if test_path_obj.is_file() else test_path_obj, capture_output=True, text=True, env=env_vars, - timeout=300 # 5 minute timeout + timeout=300, # 5 minute timeout ) - + end_time = time.time() duration = round(end_time - start_time, 2) @@ -1070,31 +1243,39 @@ class DevelopmentWorkflow(MCPMixin): stdout = result.stdout failed_pattern = r"(\d+) failed" passed_pattern = r"(\d+) passed" - + failed_match = re.search(failed_pattern, stdout) passed_match = re.search(passed_pattern, stdout) - - test_results.update({ - "tests_passed": int(passed_match.group(1)) if passed_match else 0, - "tests_failed": int(failed_match.group(1)) if failed_match else 0, - "coverage_info": self._extract_coverage_info(stdout) if coverage else None - }) - + + test_results.update( + { + "tests_passed": int(passed_match.group(1)) if passed_match else 0, + "tests_failed": int(failed_match.group(1)) if failed_match else 0, + "coverage_info": self._extract_coverage_info(stdout) if coverage else None, + } + ) + elif detected_framework in ["jest", "mocha"]: # Basic parsing for JavaScript frameworks - test_results.update({ - "tests_passed": stdout.count("โœ“") if "โœ“" in stdout else 0, - "tests_failed": stdout.count("โœ—") if "โœ—" in stdout else 0, - }) + test_results.update( + { + "tests_passed": stdout.count("โœ“") if "โœ“" in stdout else 0, + "tests_failed": stdout.count("โœ—") if "โœ—" in stdout else 0, + } + ) # Summary total_tests = test_results.get("tests_passed", 0) + test_results.get("tests_failed", 0) test_results["total_tests"] = total_tests - test_results["pass_rate"] = round((test_results.get("tests_passed", 0) / max(total_tests, 1)) * 100, 1) + test_results["pass_rate"] = round( + (test_results.get("tests_passed", 0) / max(total_tests, 1)) * 100, 1 + ) if ctx: status_emoji = "โœ…" if test_results["success"] else "โŒ" - await ctx.info(f"{status_emoji} Tests completed: {test_results['tests_passed']}/{total_tests} passed ({duration}s)") + await ctx.info( + f"{status_emoji} Tests completed: {test_results['tests_passed']}/{total_tests} passed ({duration}s)" + ) return test_results @@ -1103,13 +1284,13 @@ class DevelopmentWorkflow(MCPMixin): if ctx: await ctx.error(error_msg) return {"error": error_msg} - + except FileNotFoundError: error_msg = f"Test framework '{detected_framework}' not found in PATH" if ctx: await ctx.error(error_msg) return {"error": error_msg, "suggestion": f"Install {detected_framework} or check PATH"} - + except Exception as e: error_msg = f"Test execution failed: {str(e)}" if ctx: @@ -1120,16 +1301,13 @@ class DevelopmentWorkflow(MCPMixin): """Extract coverage information from pytest output""" try: # Look for coverage summary line - lines = stdout.split('\n') + lines = stdout.split("\n") for line in lines: if "TOTAL" in line and "%" in line: parts = line.split() for part in parts: - if part.endswith('%'): - return { - "total_coverage": part, - "raw_line": line.strip() - } + if part.endswith("%"): + return {"total_coverage": part, "raw_line": line.strip()} return None except Exception: return None @@ -1162,7 +1340,7 @@ class DevelopmentWorkflow(MCPMixin): # Group files by type for appropriate linter selection file_groups = self._group_files_by_type(valid_files) - + # Auto-detect linters if not specified if linters is None: linters = self._detect_available_linters(file_groups) @@ -1173,12 +1351,7 @@ class DevelopmentWorkflow(MCPMixin): "linters_used": linters, "fix_mode": fix, "lint_results": {}, - "summary": { - "total_issues": 0, - "errors": 0, - "warnings": 0, - "fixed_issues": 0 - } + "summary": {"total_issues": 0, "errors": 0, "warnings": 0, "fixed_issues": 0}, } # Run linters for each file type @@ -1190,33 +1363,34 @@ class DevelopmentWorkflow(MCPMixin): if not type_linters: results["lint_results"][file_type] = { "status": "skipped", - "reason": f"No suitable linters available for {file_type} files" + "reason": f"No suitable linters available for {file_type} files", } continue # Run each applicable linter for linter in type_linters: linter_key = f"{file_type}_{linter}" - + try: linter_result = await self._run_linter(linter, files, fix, ctx) results["lint_results"][linter_key] = linter_result - + # Update summary stats if "issues" in linter_result: issues = linter_result["issues"] results["summary"]["total_issues"] += len(issues) - results["summary"]["errors"] += len([i for i in issues if i.get("severity") == "error"]) - results["summary"]["warnings"] += len([i for i in issues if i.get("severity") == "warning"]) - + results["summary"]["errors"] += len( + [i for i in issues if i.get("severity") == "error"] + ) + results["summary"]["warnings"] += len( + [i for i in issues if i.get("severity") == "warning"] + ) + if "fixed_count" in linter_result: results["summary"]["fixed_issues"] += linter_result["fixed_count"] except Exception as e: - results["lint_results"][linter_key] = { - "status": "failed", - "error": str(e) - } + results["lint_results"][linter_key] = {"status": "failed", "error": str(e)} # Generate recommendations results["recommendations"] = self._generate_lint_recommendations(results) @@ -1225,11 +1399,15 @@ class DevelopmentWorkflow(MCPMixin): total_issues = results["summary"]["total_issues"] fixed_issues = results["summary"]["fixed_issues"] status_emoji = "โœ…" if total_issues == 0 else "โš ๏ธ" if total_issues < 10 else "๐Ÿšจ" - + if fix and fixed_issues > 0: - await ctx.info(f"{status_emoji} Linting complete: {total_issues} issues found, {fixed_issues} auto-fixed") + await ctx.info( + f"{status_emoji} Linting complete: {total_issues} issues found, {fixed_issues} auto-fixed" + ) else: - await ctx.info(f"{status_emoji} Linting complete: {total_issues} issues found across {len(valid_files)} files") + await ctx.info( + f"{status_emoji} Linting complete: {total_issues} issues found across {len(valid_files)} files" + ) return results @@ -1248,60 +1426,60 @@ class DevelopmentWorkflow(MCPMixin): "json": [], "yaml": [], "markdown": [], - "other": [] + "other": [], } - + for file_path in files: suffix = file_path.suffix.lower() - - if suffix in ['.py', '.pyx', '.pyi']: + + if suffix in [".py", ".pyx", ".pyi"]: groups["python"].append(file_path) - elif suffix in ['.js', '.jsx', '.mjs']: + elif suffix in [".js", ".jsx", ".mjs"]: groups["javascript"].append(file_path) - elif suffix in ['.ts', '.tsx']: + elif suffix in [".ts", ".tsx"]: groups["typescript"].append(file_path) - elif suffix in ['.json']: + elif suffix in [".json"]: groups["json"].append(file_path) - elif suffix in ['.yaml', '.yml']: + elif suffix in [".yaml", ".yml"]: groups["yaml"].append(file_path) - elif suffix in ['.md', '.markdown']: + elif suffix in [".md", ".markdown"]: groups["markdown"].append(file_path) else: groups["other"].append(file_path) - + return {k: v for k, v in groups.items() if v} # Remove empty groups def _detect_available_linters(self, file_groups: Dict[str, List[Path]]) -> List[str]: """Detect which linters are available on the system""" available_linters = [] - + # Python linters if "python" in file_groups: for linter in ["flake8", "pylint", "pycodestyle", "pyflakes"]: if self._is_command_available(linter): available_linters.append(linter) - + # JavaScript/TypeScript linters if "javascript" in file_groups or "typescript" in file_groups: for linter in ["eslint", "jshint"]: if self._is_command_available(linter): available_linters.append(linter) - + # JSON linters if "json" in file_groups: if self._is_command_available("jsonlint"): available_linters.append("jsonlint") - + # YAML linters if "yaml" in file_groups: if self._is_command_available("yamllint"): available_linters.append("yamllint") - + # Markdown linters if "markdown" in file_groups: if self._is_command_available("markdownlint"): available_linters.append("markdownlint") - + return available_linters def _get_linters_for_type(self, file_type: str, available_linters: List[str]) -> List[str]: @@ -1312,28 +1490,26 @@ class DevelopmentWorkflow(MCPMixin): "typescript": ["eslint"], "json": ["jsonlint"], "yaml": ["yamllint"], - "markdown": ["markdownlint"] + "markdown": ["markdownlint"], } - + applicable = type_mapping.get(file_type, []) return [linter for linter in applicable if linter in available_linters] def _is_command_available(self, command: str) -> bool: """Check if a command is available in PATH""" try: - result = subprocess.run( - [command, "--version"], - capture_output=True, - timeout=5 - ) + result = subprocess.run([command, "--version"], capture_output=True, timeout=5) return result.returncode == 0 except (subprocess.TimeoutExpired, FileNotFoundError): return False - async def _run_linter(self, linter: str, files: List[Path], fix: bool, ctx: Context) -> Dict[str, Any]: + async def _run_linter( + self, linter: str, files: List[Path], fix: bool, ctx: Context + ) -> Dict[str, Any]: """Run a specific linter on files""" file_paths = [str(f) for f in files] - + try: if linter == "flake8": return await self._run_flake8(file_paths, fix) @@ -1351,99 +1527,109 @@ class DevelopmentWorkflow(MCPMixin): return await self._run_markdownlint(file_paths) else: return {"status": "unsupported", "linter": linter} - + except Exception as e: return {"status": "error", "linter": linter, "error": str(e)} async def _run_flake8(self, file_paths: List[str], fix: bool) -> Dict[str, Any]: """Run flake8 linter""" cmd = ["flake8", "--format=json"] + file_paths - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=60) - + issues = [] if result.stdout: try: # flake8 doesn't output valid JSON by default, parse line by line - for line in result.stdout.strip().split('\n'): + for line in result.stdout.strip().split("\n"): if line: # Format: filename:line:col: code message - parts = line.split(':', 3) + parts = line.split(":", 3) if len(parts) >= 4: - issues.append({ - "file": parts[0], - "line": int(parts[1]), - "column": int(parts[2]), - "code": parts[3].split()[0], - "message": parts[3].split(' ', 1)[1] if ' ' in parts[3] else parts[3], - "severity": "error" if parts[3].startswith(' E') else "warning" - }) + issues.append( + { + "file": parts[0], + "line": int(parts[1]), + "column": int(parts[2]), + "code": parts[3].split()[0], + "message": ( + parts[3].split(" ", 1)[1] if " " in parts[3] else parts[3] + ), + "severity": "error" if parts[3].startswith(" E") else "warning", + } + ) except Exception: # Fallback to simple parsing issues = [{"message": result.stdout, "severity": "error"}] - + return { "linter": "flake8", "status": "completed", "exit_code": result.returncode, "issues": issues, - "can_fix": False # flake8 doesn't auto-fix + "can_fix": False, # flake8 doesn't auto-fix } async def _run_pylint(self, file_paths: List[str], fix: bool) -> Dict[str, Any]: """Run pylint linter""" cmd = ["pylint", "--output-format=json"] + file_paths - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) - + issues = [] if result.stdout: try: pylint_output = json.loads(result.stdout) for issue in pylint_output: - issues.append({ - "file": issue.get("path", ""), - "line": issue.get("line", 0), - "column": issue.get("column", 0), - "code": issue.get("message-id", ""), - "message": issue.get("message", ""), - "severity": issue.get("type", "warning") - }) + issues.append( + { + "file": issue.get("path", ""), + "line": issue.get("line", 0), + "column": issue.get("column", 0), + "code": issue.get("message-id", ""), + "message": issue.get("message", ""), + "severity": issue.get("type", "warning"), + } + ) except json.JSONDecodeError: issues = [{"message": "Failed to parse pylint output", "severity": "error"}] - + return { "linter": "pylint", "status": "completed", "exit_code": result.returncode, "issues": issues, - "can_fix": False # pylint doesn't auto-fix + "can_fix": False, # pylint doesn't auto-fix } async def _run_pycodestyle(self, file_paths: List[str], fix: bool) -> Dict[str, Any]: """Run pycodestyle linter""" cmd = ["pycodestyle"] + file_paths - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=60) - + issues = [] fixed_count = 0 - + if result.stdout: - for line in result.stdout.strip().split('\n'): + for line in result.stdout.strip().split("\n"): if line: # Format: filename:line:col: code message - parts = line.split(':', 3) + parts = line.split(":", 3) if len(parts) >= 4: - issues.append({ - "file": parts[0], - "line": int(parts[1]), - "column": int(parts[2]), - "code": parts[3].split()[0], - "message": parts[3].split(' ', 1)[1] if ' ' in parts[3] else parts[3], - "severity": "warning" - }) - + issues.append( + { + "file": parts[0], + "line": int(parts[1]), + "column": int(parts[2]), + "code": parts[3].split()[0], + "message": ( + parts[3].split(" ", 1)[1] if " " in parts[3] else parts[3] + ), + "severity": "warning", + } + ) + # Try autopep8 for fixing if requested if fix and self._is_command_available("autopep8"): for file_path in file_paths: @@ -1451,14 +1637,14 @@ class DevelopmentWorkflow(MCPMixin): fix_result = subprocess.run(fix_cmd, capture_output=True, timeout=30) if fix_result.returncode == 0: fixed_count += 1 - + return { "linter": "pycodestyle", "status": "completed", "exit_code": result.returncode, "issues": issues, "can_fix": True, - "fixed_count": fixed_count + "fixed_count": fixed_count, } async def _run_eslint(self, file_paths: List[str], fix: bool) -> Dict[str, Any]: @@ -1467,162 +1653,182 @@ class DevelopmentWorkflow(MCPMixin): if fix: cmd.append("--fix") cmd.extend(file_paths) - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=60) - + issues = [] fixed_count = 0 - + if result.stdout: try: eslint_output = json.loads(result.stdout) for file_result in eslint_output: - fixed_count += file_result.get("fixableErrorCount", 0) + file_result.get("fixableWarningCount", 0) - + fixed_count += file_result.get("fixableErrorCount", 0) + file_result.get( + "fixableWarningCount", 0 + ) + for message in file_result.get("messages", []): - issues.append({ - "file": file_result.get("filePath", ""), - "line": message.get("line", 0), - "column": message.get("column", 0), - "code": message.get("ruleId", ""), - "message": message.get("message", ""), - "severity": message.get("severity", 1) == 2 and "error" or "warning" - }) + issues.append( + { + "file": file_result.get("filePath", ""), + "line": message.get("line", 0), + "column": message.get("column", 0), + "code": message.get("ruleId", ""), + "message": message.get("message", ""), + "severity": message.get("severity", 1) == 2 + and "error" + or "warning", + } + ) except json.JSONDecodeError: issues = [{"message": "Failed to parse ESLint output", "severity": "error"}] - + return { "linter": "eslint", "status": "completed", "exit_code": result.returncode, "issues": issues, "can_fix": True, - "fixed_count": fixed_count if fix else 0 + "fixed_count": fixed_count if fix else 0, } async def _run_jsonlint(self, file_paths: List[str]) -> Dict[str, Any]: """Run JSON linter""" issues = [] - + for file_path in file_paths: try: - with open(file_path, 'r') as f: + with open(file_path, "r") as f: json.load(f) except json.JSONDecodeError as e: - issues.append({ - "file": file_path, - "line": e.lineno, - "column": e.colno, - "message": str(e), - "severity": "error" - }) + issues.append( + { + "file": file_path, + "line": e.lineno, + "column": e.colno, + "message": str(e), + "severity": "error", + } + ) except Exception as e: - issues.append({ - "file": file_path, - "message": f"Failed to read file: {str(e)}", - "severity": "error" - }) - + issues.append( + { + "file": file_path, + "message": f"Failed to read file: {str(e)}", + "severity": "error", + } + ) + return { "linter": "jsonlint", "status": "completed", "exit_code": 0 if not issues else 1, "issues": issues, - "can_fix": False + "can_fix": False, } async def _run_yamllint(self, file_paths: List[str]) -> Dict[str, Any]: """Run YAML linter""" cmd = ["yamllint", "--format=parsable"] + file_paths - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) - + issues = [] if result.stdout: - for line in result.stdout.strip().split('\n'): - if line and ':' in line: + for line in result.stdout.strip().split("\n"): + if line and ":" in line: # Format: filename:line:col: [level] message - parts = line.split(':', 3) + parts = line.split(":", 3) if len(parts) >= 4: level_msg = parts[3].strip() level = "warning" if "[error]" in level_msg: level = "error" - - issues.append({ - "file": parts[0], - "line": int(parts[1]) if parts[1].isdigit() else 0, - "column": int(parts[2]) if parts[2].isdigit() else 0, - "message": level_msg.replace("[error]", "").replace("[warning]", "").strip(), - "severity": level - }) - + + issues.append( + { + "file": parts[0], + "line": int(parts[1]) if parts[1].isdigit() else 0, + "column": int(parts[2]) if parts[2].isdigit() else 0, + "message": level_msg.replace("[error]", "") + .replace("[warning]", "") + .strip(), + "severity": level, + } + ) + return { "linter": "yamllint", "status": "completed", "exit_code": result.returncode, "issues": issues, - "can_fix": False + "can_fix": False, } async def _run_markdownlint(self, file_paths: List[str]) -> Dict[str, Any]: """Run Markdown linter""" cmd = ["markdownlint"] + file_paths - + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) - + issues = [] if result.stdout: - for line in result.stdout.strip().split('\n'): - if line and ':' in line: + for line in result.stdout.strip().split("\n"): + if line and ":" in line: # Format: filename:line message - parts = line.split(':', 2) + parts = line.split(":", 2) if len(parts) >= 3: - issues.append({ - "file": parts[0], - "line": int(parts[1]) if parts[1].isdigit() else 0, - "message": parts[2].strip(), - "severity": "warning" - }) - + issues.append( + { + "file": parts[0], + "line": int(parts[1]) if parts[1].isdigit() else 0, + "message": parts[2].strip(), + "severity": "warning", + } + ) + return { "linter": "markdownlint", "status": "completed", "exit_code": result.returncode, "issues": issues, - "can_fix": False + "can_fix": False, } def _generate_lint_recommendations(self, results: Dict[str, Any]) -> List[str]: """Generate actionable recommendations based on lint results""" recommendations = [] summary = results["summary"] - + if summary["total_issues"] == 0: recommendations.append("โœ… No linting issues found! Code quality looks excellent.") return recommendations - + if summary["errors"] > 0: recommendations.append(f"๐Ÿšจ Fix {summary['errors']} critical errors before deployment") - + if summary["warnings"] > 10: - recommendations.append(f"โš ๏ธ Consider addressing {summary['warnings']} warnings for better code quality") + recommendations.append( + f"โš ๏ธ Consider addressing {summary['warnings']} warnings for better code quality" + ) elif summary["warnings"] > 0: recommendations.append(f"Address {summary['warnings']} minor warnings when convenient") - + if summary["fixed_issues"] > 0: recommendations.append(f"โœ… Auto-fixed {summary['fixed_issues']} issues") - + # Suggest auto-fixing if available can_fix_tools = [] for result_key, result in results["lint_results"].items(): if result.get("can_fix") and result.get("issues"): tool = result.get("linter", result_key) can_fix_tools.append(tool) - + if can_fix_tools and not results["fix_mode"]: - recommendations.append(f"๐Ÿ’ก Run with fix=True to auto-fix issues using: {', '.join(set(can_fix_tools))}") - + recommendations.append( + f"๐Ÿ’ก Run with fix=True to auto-fix issues using: {', '.join(set(can_fix_tools))}" + ) + return recommendations @mcp_tool(name="format_code", description="๐ŸŸก SAFE: Auto-format code using standard formatters") @@ -1655,7 +1861,7 @@ class DevelopmentWorkflow(MCPMixin): # Group files by type for appropriate formatter selection file_groups = self._group_files_for_formatting(valid_files) - + results = { "total_files": len(valid_files), "file_groups": {k: len(v) for k, v in file_groups.items()}, @@ -1666,8 +1872,8 @@ class DevelopmentWorkflow(MCPMixin): "formatted_files": 0, "unchanged_files": 0, "failed_files": 0, - "total_changes": 0 - } + "total_changes": 0, + }, } # Format each file group with appropriate formatter @@ -1677,11 +1883,11 @@ class DevelopmentWorkflow(MCPMixin): # Determine formatter for this file type selected_formatter = self._select_formatter_for_type(file_type, formatter) - + if not selected_formatter: results["format_results"][file_type] = { "status": "skipped", - "reason": f"No suitable formatter available for {file_type} files" + "reason": f"No suitable formatter available for {file_type} files", } continue @@ -1690,26 +1896,30 @@ class DevelopmentWorkflow(MCPMixin): results["format_results"][file_type] = { "status": "skipped", "reason": f"Formatter '{selected_formatter}' not installed", - "suggestion": self._get_install_suggestion(selected_formatter) + "suggestion": self._get_install_suggestion(selected_formatter), } continue # Run the formatter try: - format_result = await self._run_formatter(selected_formatter, files, config_file, ctx) + format_result = await self._run_formatter( + selected_formatter, files, config_file, ctx + ) results["format_results"][file_type] = format_result - + # Update summary if "files_changed" in format_result: results["summary"]["formatted_files"] += format_result["files_changed"] - results["summary"]["unchanged_files"] += format_result.get("files_unchanged", 0) + results["summary"]["unchanged_files"] += format_result.get( + "files_unchanged", 0 + ) results["summary"]["total_changes"] += format_result.get("total_changes", 0) except Exception as e: results["format_results"][file_type] = { "status": "failed", "formatter": selected_formatter, - "error": str(e) + "error": str(e), } results["summary"]["failed_files"] += len(files) @@ -1718,9 +1928,13 @@ class DevelopmentWorkflow(MCPMixin): if ctx: formatted = results["summary"]["formatted_files"] - total = results["summary"]["formatted_files"] + results["summary"]["unchanged_files"] + total = ( + results["summary"]["formatted_files"] + results["summary"]["unchanged_files"] + ) status_emoji = "โœ…" if results["summary"]["failed_files"] == 0 else "โš ๏ธ" - await ctx.info(f"{status_emoji} Formatting complete: {formatted}/{total} files changed") + await ctx.info( + f"{status_emoji} Formatting complete: {formatted}/{total} files changed" + ) return results @@ -1741,31 +1955,31 @@ class DevelopmentWorkflow(MCPMixin): "css": [], "html": [], "markdown": [], - "other": [] + "other": [], } - + for file_path in files: suffix = file_path.suffix.lower() - - if suffix in ['.py', '.pyx', '.pyi']: + + if suffix in [".py", ".pyx", ".pyi"]: groups["python"].append(file_path) - elif suffix in ['.js', '.jsx', '.mjs']: + elif suffix in [".js", ".jsx", ".mjs"]: groups["javascript"].append(file_path) - elif suffix in ['.ts', '.tsx']: + elif suffix in [".ts", ".tsx"]: groups["typescript"].append(file_path) - elif suffix in ['.json']: + elif suffix in [".json"]: groups["json"].append(file_path) - elif suffix in ['.yaml', '.yml']: + elif suffix in [".yaml", ".yml"]: groups["yaml"].append(file_path) - elif suffix in ['.css', '.scss', '.sass', '.less']: + elif suffix in [".css", ".scss", ".sass", ".less"]: groups["css"].append(file_path) - elif suffix in ['.html', '.htm', '.xhtml']: + elif suffix in [".html", ".htm", ".xhtml"]: groups["html"].append(file_path) - elif suffix in ['.md', '.markdown']: + elif suffix in [".md", ".markdown"]: groups["markdown"].append(file_path) else: groups["other"].append(file_path) - + return {k: v for k, v in groups.items() if v} # Remove empty groups def _select_formatter_for_type(self, file_type: str, requested_formatter: str) -> Optional[str]: @@ -1780,14 +1994,14 @@ class DevelopmentWorkflow(MCPMixin): "yaml": ["prettier"], "css": ["prettier"], "html": ["prettier"], - "markdown": ["prettier"] + "markdown": ["prettier"], } - + if file_type in type_formatters and requested_formatter in type_formatters[file_type]: return requested_formatter else: return None # Requested formatter not suitable for this file type - + # Auto-detect best formatter for file type formatter_priority = { "python": ["black", "autopep8"], @@ -1797,14 +2011,14 @@ class DevelopmentWorkflow(MCPMixin): "yaml": ["prettier"], "css": ["prettier"], "html": ["prettier"], - "markdown": ["prettier"] + "markdown": ["prettier"], } - + candidates = formatter_priority.get(file_type, []) for formatter in candidates: if self._is_command_available(formatter): return formatter - + return None def _get_install_suggestion(self, formatter: str) -> str: @@ -1812,14 +2026,16 @@ class DevelopmentWorkflow(MCPMixin): suggestions = { "black": "pip install black", "autopep8": "pip install autopep8", - "prettier": "npm install -g prettier" + "prettier": "npm install -g prettier", } return suggestions.get(formatter, f"Install {formatter}") - async def _run_formatter(self, formatter: str, files: List[Path], config_file: Optional[str], ctx: Context) -> Dict[str, Any]: + async def _run_formatter( + self, formatter: str, files: List[Path], config_file: Optional[str], ctx: Context + ) -> Dict[str, Any]: """Run a specific formatter on files""" file_paths = [str(f) for f in files] - + try: if formatter == "black": return await self._run_black(file_paths, config_file) @@ -1829,33 +2045,33 @@ class DevelopmentWorkflow(MCPMixin): return await self._run_prettier(file_paths, config_file) else: return {"status": "unsupported", "formatter": formatter} - + except Exception as e: return {"status": "error", "formatter": formatter, "error": str(e)} async def _run_black(self, file_paths: List[str], config_file: Optional[str]) -> Dict[str, Any]: """Run Black Python formatter""" cmd = ["black", "--diff", "--color"] - + if config_file: cmd.extend(["--config", config_file]) - + # First run with --diff to see what would change diff_cmd = cmd + file_paths diff_result = subprocess.run(diff_cmd, capture_output=True, text=True, timeout=60) - + # Count changes by counting diff sections changes = diff_result.stdout.count("--- ") if diff_result.stdout else 0 - + # Run actual formatting format_cmd = ["black"] + (["--config", config_file] if config_file else []) + file_paths format_result = subprocess.run(format_cmd, capture_output=True, text=True, timeout=60) - + # Count files that were actually changed files_changed = 0 if format_result.stderr: files_changed = format_result.stderr.count("reformatted") - + return { "formatter": "black", "status": "completed", @@ -1863,25 +2079,29 @@ class DevelopmentWorkflow(MCPMixin): "files_changed": files_changed, "files_unchanged": len(file_paths) - files_changed, "total_changes": changes, - "diff_preview": diff_result.stdout[:1000] if diff_result.stdout else None # First 1000 chars + "diff_preview": ( + diff_result.stdout[:1000] if diff_result.stdout else None + ), # First 1000 chars } - async def _run_autopep8(self, file_paths: List[str], config_file: Optional[str]) -> Dict[str, Any]: + async def _run_autopep8( + self, file_paths: List[str], config_file: Optional[str] + ) -> Dict[str, Any]: """Run autopep8 Python formatter""" cmd = ["autopep8", "--in-place", "--aggressive", "--aggressive"] - + if config_file: cmd.extend(["--global-config", config_file]) - + # Run diff first to see changes diff_cmd = ["autopep8", "--diff"] + file_paths diff_result = subprocess.run(diff_cmd, capture_output=True, text=True, timeout=60) changes = diff_result.stdout.count("@@") if diff_result.stdout else 0 - + # Run actual formatting format_cmd = cmd + file_paths format_result = subprocess.run(format_cmd, capture_output=True, text=True, timeout=60) - + return { "formatter": "autopep8", "status": "completed", @@ -1889,26 +2109,30 @@ class DevelopmentWorkflow(MCPMixin): "files_changed": len(file_paths) if format_result.returncode == 0 else 0, "files_unchanged": 0 if format_result.returncode == 0 else len(file_paths), "total_changes": changes, - "diff_preview": diff_result.stdout[:1000] if diff_result.stdout else None + "diff_preview": diff_result.stdout[:1000] if diff_result.stdout else None, } - async def _run_prettier(self, file_paths: List[str], config_file: Optional[str]) -> Dict[str, Any]: + async def _run_prettier( + self, file_paths: List[str], config_file: Optional[str] + ) -> Dict[str, Any]: """Run Prettier formatter""" cmd = ["prettier", "--write"] - + if config_file: cmd.extend(["--config", config_file]) - + # Check what files would be changed check_cmd = ["prettier", "--list-different"] + file_paths check_result = subprocess.run(check_cmd, capture_output=True, text=True, timeout=60) - - files_to_change = len(check_result.stdout.strip().split('\n')) if check_result.stdout.strip() else 0 - + + files_to_change = ( + len(check_result.stdout.strip().split("\n")) if check_result.stdout.strip() else 0 + ) + # Run actual formatting format_cmd = cmd + file_paths format_result = subprocess.run(format_cmd, capture_output=True, text=True, timeout=60) - + return { "formatter": "prettier", "status": "completed", @@ -1916,38 +2140,42 @@ class DevelopmentWorkflow(MCPMixin): "files_changed": files_to_change if format_result.returncode == 0 else 0, "files_unchanged": len(file_paths) - files_to_change, "total_changes": files_to_change, - "changed_files": check_result.stdout.strip().split('\n') if check_result.stdout.strip() else [] + "changed_files": ( + check_result.stdout.strip().split("\n") if check_result.stdout.strip() else [] + ), } def _generate_format_recommendations(self, results: Dict[str, Any]) -> List[str]: """Generate actionable recommendations based on format results""" recommendations = [] summary = results["summary"] - + if summary["formatted_files"] == 0 and summary["failed_files"] == 0: recommendations.append("โœ… All files are already properly formatted!") return recommendations - + if summary["formatted_files"] > 0: recommendations.append(f"โœ… Successfully formatted {summary['formatted_files']} files") - + if summary["failed_files"] > 0: - recommendations.append(f"โš ๏ธ Failed to format {summary['failed_files']} files - check error details") - + recommendations.append( + f"โš ๏ธ Failed to format {summary['failed_files']} files - check error details" + ) + # Check for missing formatters skipped_types = [] for file_type, result in results["format_results"].items(): if result.get("status") == "skipped" and "not installed" in result.get("reason", ""): skipped_types.append((file_type, result.get("suggestion", ""))) - + if skipped_types: recommendations.append("๐Ÿ’ก Install missing formatters:") for file_type, suggestion in skipped_types: recommendations.append(f" - {suggestion} (for {file_type} files)") - + if summary["total_changes"] > 50: recommendations.append("๐Ÿ“‹ Many changes applied - review diff output carefully") - + return recommendations @@ -1969,29 +2197,29 @@ class NetworkAPITools(MCPMixin): if requests is None: return { "error": "requests library not available", - "install": "pip install requests" + "install": "pip install requests", } # Prepare headers request_headers = headers or {} - + # Prepare body based on type request_data = None request_json = None - + if body is not None: if isinstance(body, dict): request_json = body - if 'Content-Type' not in request_headers: - request_headers['Content-Type'] = 'application/json' + if "Content-Type" not in request_headers: + request_headers["Content-Type"] = "application/json" else: request_data = body - if 'Content-Type' not in request_headers: - request_headers['Content-Type'] = 'text/plain' + if "Content-Type" not in request_headers: + request_headers["Content-Type"] = "text/plain" # Make the request start_time = time.time() - + response = requests.request( method=method, url=url, @@ -1999,18 +2227,18 @@ class NetworkAPITools(MCPMixin): data=request_data, json=request_json, timeout=timeout, - allow_redirects=True + allow_redirects=True, ) - + end_time = time.time() response_time = round((end_time - start_time) * 1000, 2) # ms # Parse response body safely response_body = None - content_type = response.headers.get('Content-Type', '').lower() - + content_type = response.headers.get("Content-Type", "").lower() + try: - if 'application/json' in content_type: + if "application/json" in content_type: response_body = response.json() else: response_body = response.text @@ -2022,28 +2250,25 @@ class NetworkAPITools(MCPMixin): # Build response object result = { - "request": { - "method": method, - "url": url, - "headers": request_headers, - "body": body - }, + "request": {"method": method, "url": url, "headers": request_headers, "body": body}, "response": { "status_code": response.status_code, "status_text": response.reason, "headers": dict(response.headers), "body": response_body, "size_bytes": len(response.content), - "response_time_ms": response_time + "response_time_ms": response_time, }, "success": 200 <= response.status_code < 300, "redirected": len(response.history) > 0, - "final_url": response.url + "final_url": response.url, } if ctx: status_emoji = "โœ…" if result["success"] else "โŒ" - await ctx.info(f"{status_emoji} {method} {url} โ†’ {response.status_code} ({response_time}ms)") + await ctx.info( + f"{status_emoji} {method} {url} โ†’ {response.status_code} ({response_time}ms)" + ) return result @@ -2052,19 +2277,19 @@ class NetworkAPITools(MCPMixin): if ctx: await ctx.error(error_msg) return {"error": error_msg, "type": "timeout"} - + except requests.exceptions.ConnectionError as e: error_msg = f"Connection error: {str(e)}" if ctx: await ctx.error(error_msg) return {"error": error_msg, "type": "connection_error"} - + except requests.exceptions.RequestException as e: error_msg = f"Request failed: {str(e)}" if ctx: await ctx.error(error_msg) return {"error": error_msg, "type": "request_error"} - + except Exception as e: error_msg = f"HTTP request failed: {str(e)}" if ctx: @@ -2131,7 +2356,7 @@ class EnvironmentProcessManagement(MCPMixin): """Environment and process management tools""" @mcp_tool( - name="environment_info", + name="environment_info", description="""๐Ÿ” Get comprehensive system diagnostics with smart auto-detection. USAGE EXAMPLES: @@ -2142,53 +2367,55 @@ class EnvironmentProcessManagement(MCPMixin): - Specific sections: include_sections=["python", "git"] PERFORMANCE: ~0.1-0.2s execution time, safe for frequent use - RETURNS: Structured data + LLM hints for next actions""" + RETURNS: Structured data + LLM hints for next actions""", ) def environment_info( - self, - include_sections: List[Literal["auto", "all", "dev", "system", "python", "node", "git", "env_vars"]] = ["auto"], - detail_level: Literal["basic", "detailed", "comprehensive"] = "detailed" + self, + include_sections: List[ + Literal["auto", "all", "dev", "system", "python", "node", "git", "env_vars"] + ] = ["auto"], + detail_level: Literal["basic", "detailed", "comprehensive"] = "detailed", ) -> Dict[str, Any]: """Get detailed environment information with smart auto-detection and LLM-friendly guidance""" try: start_time = time.time() # Track performance - + # Smart section selection based on LLM-friendly modes actual_sections = [] - + if "auto" in include_sections: # Auto-detect available and relevant sections actual_sections = ["system", "python"] # Always available # Check for git availability try: - subprocess.run(['git', '--version'], capture_output=True, timeout=2) + subprocess.run(["git", "--version"], capture_output=True, timeout=2) actual_sections.append("git") except: pass - # Check for node availability + # Check for node availability try: - subprocess.run(['node', '--version'], capture_output=True, timeout=2) + subprocess.run(["node", "--version"], capture_output=True, timeout=2) actual_sections.append("node") except: pass - + elif "all" in include_sections: actual_sections = ["system", "python", "node", "git", "env_vars"] - + elif "dev" in include_sections: # Development-focused essentials actual_sections = ["python", "git"] # Add node if available try: - subprocess.run(['node', '--version'], capture_output=True, timeout=2) + subprocess.run(["node", "--version"], capture_output=True, timeout=2) actual_sections.append("node") except: pass - + else: # Use specified sections directly actual_sections = [s for s in include_sections if s not in ["auto", "all", "dev"]] - + result = { "timestamp": datetime.now().isoformat(), "sections_requested": include_sections, @@ -2200,10 +2427,10 @@ class EnvironmentProcessManagement(MCPMixin): "llm_hints": { "suggested_next_actions": [], "common_workflows": [], - "related_tools": [] - } + "related_tools": [], + }, } - + # System information section if "system" in actual_sections: try: @@ -2222,25 +2449,39 @@ class EnvironmentProcessManagement(MCPMixin): "python_version": platform.python_version(), "python_compiler": platform.python_compiler(), "python_build": platform.python_build(), - } + }, } - + # Add psutil system info if available if psutil: try: system_info["hardware"] = { "cpu_count_logical": psutil.cpu_count(logical=True), "cpu_count_physical": psutil.cpu_count(logical=False), - "cpu_freq": psutil.cpu_freq()._asdict() if psutil.cpu_freq() else None, + "cpu_freq": ( + psutil.cpu_freq()._asdict() if psutil.cpu_freq() else None + ), "memory": { "total": psutil.virtual_memory().total, "available": psutil.virtual_memory().available, "percent_used": psutil.virtual_memory().percent, }, "disk_usage": { - "total": psutil.disk_usage('/').total if os.name != 'nt' else psutil.disk_usage('C:').total, - "used": psutil.disk_usage('/').used if os.name != 'nt' else psutil.disk_usage('C:').used, - "free": psutil.disk_usage('/').free if os.name != 'nt' else psutil.disk_usage('C:').free, + "total": ( + psutil.disk_usage("/").total + if os.name != "nt" + else psutil.disk_usage("C:").total + ), + "used": ( + psutil.disk_usage("/").used + if os.name != "nt" + else psutil.disk_usage("C:").used + ), + "free": ( + psutil.disk_usage("/").free + if os.name != "nt" + else psutil.disk_usage("C:").free + ), }, "boot_time": datetime.fromtimestamp(psutil.boot_time()).isoformat(), } @@ -2248,12 +2489,12 @@ class EnvironmentProcessManagement(MCPMixin): result["warnings"].append(f"Failed to get hardware info: {str(e)}") else: result["warnings"].append("psutil not available - hardware info limited") - + result["sections_data"]["system"] = system_info - + except Exception as e: result["errors"].append(f"Failed to get system info: {str(e)}") - + # Python environment section if "python" in actual_sections: try: @@ -2269,17 +2510,29 @@ class EnvironmentProcessManagement(MCPMixin): "executable": sys.executable, "path": sys.path[:10], # Limit to first 10 entries for readability "modules": { - "builtin_module_names": list(sys.builtin_module_names)[:20], # Limit for readability + "builtin_module_names": list(sys.builtin_module_names)[ + :20 + ], # Limit for readability "loaded_modules_count": len(sys.modules), }, "prefix": sys.prefix, - "base_prefix": getattr(sys, 'base_prefix', sys.prefix), - "real_prefix": getattr(sys, 'real_prefix', None), - "in_virtualenv": hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix), + "base_prefix": getattr(sys, "base_prefix", sys.prefix), + "real_prefix": getattr(sys, "real_prefix", None), + "in_virtualenv": hasattr(sys, "real_prefix") + or (hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix), } - + # Check for common development packages - common_packages = ['pip', 'setuptools', 'wheel', 'pytest', 'numpy', 'pandas', 'requests', 'fastmcp'] + common_packages = [ + "pip", + "setuptools", + "wheel", + "pytest", + "numpy", + "pandas", + "requests", + "fastmcp", + ] installed_packages = {} for pkg in common_packages: try: @@ -2287,152 +2540,205 @@ class EnvironmentProcessManagement(MCPMixin): installed_packages[pkg] = "available" except ImportError: installed_packages[pkg] = "not_installed" - + python_info["common_packages"] = installed_packages result["sections_data"]["python"] = python_info - + except Exception as e: result["errors"].append(f"Failed to get Python info: {str(e)}") - + # Node.js environment section if "node" in actual_sections: try: node_info = {"available": False} - + # Check for Node.js try: - node_result = subprocess.run(['node', '--version'], - capture_output=True, text=True, timeout=5) + node_result = subprocess.run( + ["node", "--version"], capture_output=True, text=True, timeout=5 + ) if node_result.returncode == 0: node_info["available"] = True node_info["version"] = node_result.stdout.strip() - + # Get npm version - npm_result = subprocess.run(['npm', '--version'], - capture_output=True, text=True, timeout=5) + npm_result = subprocess.run( + ["npm", "--version"], capture_output=True, text=True, timeout=5 + ) if npm_result.returncode == 0: node_info["npm_version"] = npm_result.stdout.strip() - + # Check for package.json in current directory - if Path('package.json').exists(): + if Path("package.json").exists(): try: - with open('package.json', 'r') as f: + with open("package.json", "r") as f: package_json = json.load(f) node_info["local_project"] = { "name": package_json.get("name"), "version": package_json.get("version"), - "dependencies_count": len(package_json.get("dependencies", {})), - "dev_dependencies_count": len(package_json.get("devDependencies", {})), + "dependencies_count": len( + package_json.get("dependencies", {}) + ), + "dev_dependencies_count": len( + package_json.get("devDependencies", {}) + ), } except Exception as e: - result["warnings"].append(f"Failed to read package.json: {str(e)}") - + result["warnings"].append( + f"Failed to read package.json: {str(e)}" + ) + except (subprocess.TimeoutExpired, FileNotFoundError): node_info["error"] = "Node.js not found or not accessible" - + result["sections_data"]["node"] = node_info - + except Exception as e: result["errors"].append(f"Failed to get Node.js info: {str(e)}") - + # Git environment section if "git" in actual_sections: try: git_info = {"available": False} - + try: # Check git version - git_result = subprocess.run(['git', '--version'], - capture_output=True, text=True, timeout=5) + git_result = subprocess.run( + ["git", "--version"], capture_output=True, text=True, timeout=5 + ) if git_result.returncode == 0: git_info["available"] = True git_info["version"] = git_result.stdout.strip() - + # Get git config - config_items = ['user.name', 'user.email', 'core.editor', 'init.defaultBranch'] + config_items = [ + "user.name", + "user.email", + "core.editor", + "init.defaultBranch", + ] git_config = {} for item in config_items: try: - config_result = subprocess.run(['git', 'config', '--get', item], - capture_output=True, text=True, timeout=3) + config_result = subprocess.run( + ["git", "config", "--get", item], + capture_output=True, + text=True, + timeout=3, + ) if config_result.returncode == 0: git_config[item] = config_result.stdout.strip() except subprocess.TimeoutExpired: git_config[item] = "timeout" except Exception: git_config[item] = "not_set" - + git_info["config"] = git_config - + # Check if we're in a git repository try: - repo_result = subprocess.run(['git', 'rev-parse', '--git-dir'], - capture_output=True, text=True, timeout=3) + repo_result = subprocess.run( + ["git", "rev-parse", "--git-dir"], + capture_output=True, + text=True, + timeout=3, + ) if repo_result.returncode == 0: git_info["repository"] = { "in_repo": True, - "git_dir": repo_result.stdout.strip() + "git_dir": repo_result.stdout.strip(), } - + # Get current branch - branch_result = subprocess.run(['git', 'branch', '--show-current'], - capture_output=True, text=True, timeout=3) + branch_result = subprocess.run( + ["git", "branch", "--show-current"], + capture_output=True, + text=True, + timeout=3, + ) if branch_result.returncode == 0: - git_info["repository"]["current_branch"] = branch_result.stdout.strip() + git_info["repository"][ + "current_branch" + ] = branch_result.stdout.strip() else: git_info["repository"] = {"in_repo": False} except Exception: git_info["repository"] = {"in_repo": False} - + except (subprocess.TimeoutExpired, FileNotFoundError): git_info["error"] = "Git not found or not accessible" - + result["sections_data"]["git"] = git_info - + except Exception as e: result["errors"].append(f"Failed to get Git info: {str(e)}") - + # Environment variables section (filtered for security) if "env_vars" in actual_sections: try: # SACRED TRUST: Filter sensitive environment variables sensitive_patterns = [ - 'password', 'secret', 'key', 'token', 'auth', 'credential', - 'private', 'aws_', 'api_', 'database_url', 'db_pass' + "password", + "secret", + "key", + "token", + "auth", + "credential", + "private", + "aws_", + "api_", + "database_url", + "db_pass", ] - + safe_env_vars = {} development_env_vars = {} - + for key, value in os.environ.items(): key_lower = key.lower() - + # Check if potentially sensitive is_sensitive = any(pattern in key_lower for pattern in sensitive_patterns) - + if is_sensitive: safe_env_vars[key] = f"[FILTERED - {len(value)} chars]" elif len(value) > 200: - safe_env_vars[key] = f"[TRUNCATED - {len(value)} chars]: {value[:100]}..." + safe_env_vars[key] = ( + f"[TRUNCATED - {len(value)} chars]: {value[:100]}..." + ) else: safe_env_vars[key] = value - + # Collect development-relevant variables - if any(dev_key in key_lower for dev_key in ['path', 'python', 'node', 'npm', 'git', 'editor', 'shell', 'term', 'lang', 'lc_']): + if any( + dev_key in key_lower + for dev_key in [ + "path", + "python", + "node", + "npm", + "git", + "editor", + "shell", + "term", + "lang", + "lc_", + ] + ): development_env_vars[key] = value if not is_sensitive else "[FILTERED]" - + env_info = { "total_count": len(os.environ), "development_relevant": development_env_vars, "all_variables": safe_env_vars, - "security_note": "Sensitive variables filtered for security" + "security_note": "Sensitive variables filtered for security", } - + result["sections_data"]["env_vars"] = env_info - + except Exception as e: result["errors"].append(f"Failed to get environment variables: {str(e)}") - + # Add summary result["summary"] = { "sections_completed": len(result["sections_data"]), @@ -2440,59 +2746,81 @@ class EnvironmentProcessManagement(MCPMixin): "sections_selected": len(actual_sections), "errors_count": len(result["errors"]), "warnings_count": len(result["warnings"]), - "success": len(result["errors"]) == 0 + "success": len(result["errors"]) == 0, } - + # Add LLM hints based on discovered environment llm_hints = result["llm_hints"] - + # Suggest next actions based on what was found if result["summary"]["success"]: if "python" in result["sections_data"]: python_info = result["sections_data"]["python"] if python_info["in_virtualenv"]: - llm_hints["suggested_next_actions"].append("Environment ready for development") + llm_hints["suggested_next_actions"].append( + "Environment ready for development" + ) else: - llm_hints["suggested_next_actions"].append("Consider creating virtual environment with manage_virtual_env") + llm_hints["suggested_next_actions"].append( + "Consider creating virtual environment with manage_virtual_env" + ) llm_hints["related_tools"].append("manage_virtual_env") - + if "git" in result["sections_data"]: git_info = result["sections_data"]["git"] if git_info.get("available") and git_info.get("repository", {}).get("in_repo"): - llm_hints["suggested_next_actions"].append("Git repository detected - ready for version control operations") + llm_hints["suggested_next_actions"].append( + "Git repository detected - ready for version control operations" + ) llm_hints["related_tools"].extend(["git_git_status", "git_git_diff"]) elif git_info.get("available"): - llm_hints["suggested_next_actions"].append("Git available but not in repository") - + llm_hints["suggested_next_actions"].append( + "Git available but not in repository" + ) + if "node" in result["sections_data"]: node_info = result["sections_data"]["node"] if node_info.get("available") and node_info.get("local_project"): - llm_hints["suggested_next_actions"].append("Node.js project detected - ready for npm/yarn operations") + llm_hints["suggested_next_actions"].append( + "Node.js project detected - ready for npm/yarn operations" + ) llm_hints["related_tools"].append("execute_command_enhanced") - + # Common workflows based on environment if "python" in actual_sections and "git" in actual_sections: - llm_hints["common_workflows"].append("Python development: setup โ†’ code โ†’ test โ†’ commit") - + llm_hints["common_workflows"].append( + "Python development: setup โ†’ code โ†’ test โ†’ commit" + ) + if len(result["errors"]) == 0: - llm_hints["common_workflows"].append("Environment analysis complete - ready for development tasks") - + llm_hints["common_workflows"].append( + "Environment analysis complete - ready for development tasks" + ) + # Performance hints result["performance_hints"] = { - "execution_time_ms": round((time.time() - start_time) * 1000, 1) if 'start_time' in locals() else None, + "execution_time_ms": ( + round((time.time() - start_time) * 1000, 1) + if "start_time" in locals() + else None + ), "detail_level_used": detail_level, "sections_auto_detected": "auto" in include_sections, - "recommendation": "Use 'dev' mode for faster Python/Git focus" if len(actual_sections) > 3 else "Current selection optimal" + "recommendation": ( + "Use 'dev' mode for faster Python/Git focus" + if len(actual_sections) > 3 + else "Current selection optimal" + ), } - + return result - + except Exception as e: return { "error": f"Critical error in environment_info: {str(e)}", "timestamp": datetime.now().isoformat(), "sections_requested": include_sections, - "success": False + "success": False, } @mcp_tool(name="process_tree", description="Show process hierarchy and relationships") @@ -2505,9 +2833,9 @@ class EnvironmentProcessManagement(MCPMixin): return { "error": "psutil not available - process monitoring requires psutil package", "timestamp": datetime.now().isoformat(), - "success": False + "success": False, } - + result = { "timestamp": datetime.now().isoformat(), "root_pid": root_pid, @@ -2516,9 +2844,9 @@ class EnvironmentProcessManagement(MCPMixin): "tree_structure": {}, "summary": {}, "errors": [], - "warnings": [] + "warnings": [], } - + # Get all processes or start from specific root try: if root_pid: @@ -2532,17 +2860,17 @@ class EnvironmentProcessManagement(MCPMixin): return { "error": f"Process with PID {root_pid} not found", "timestamp": datetime.now().isoformat(), - "success": False + "success": False, } else: # Get all processes process_list = list(psutil.process_iter()) - + # Collect process information total_cpu = 0 total_memory = 0 process_count = 0 - + for proc in process_list: try: # Get process info with error handling for each field @@ -2564,41 +2892,45 @@ class EnvironmentProcessManagement(MCPMixin): "connections": 0, "open_files": 0, } - + # Safely get each piece of information try: proc_info["name"] = proc.name() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: cmdline = proc.cmdline() - proc_info["cmdline"] = cmdline[:5] if len(cmdline) > 5 else cmdline # Limit for readability + proc_info["cmdline"] = ( + cmdline[:5] if len(cmdline) > 5 else cmdline + ) # Limit for readability except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["status"] = proc.status() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: - proc_info["create_time"] = datetime.fromtimestamp(proc.create_time()).isoformat() + proc_info["create_time"] = datetime.fromtimestamp( + proc.create_time() + ).isoformat() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["cpu_percent"] = proc.cpu_percent() total_cpu += proc_info["cpu_percent"] except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["memory_percent"] = proc.memory_percent() total_memory += proc_info["memory_percent"] except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: memory_info = proc.memory_info() proc_info["memory_info"] = { @@ -2609,101 +2941,97 @@ class EnvironmentProcessManagement(MCPMixin): } except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["ppid"] = proc.ppid() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: children = proc.children() proc_info["children_pids"] = [child.pid for child in children] except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["num_threads"] = proc.num_threads() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["username"] = proc.username() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["cwd"] = proc.cwd() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["exe"] = proc.exe() except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["connections"] = len(proc.connections()) except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + try: proc_info["open_files"] = len(proc.open_files()) except (psutil.AccessDenied, psutil.NoSuchProcess): pass - + result["processes"][proc.pid] = proc_info process_count += 1 - + except psutil.NoSuchProcess: # Process disappeared during iteration result["warnings"].append(f"Process {proc.pid} disappeared during scan") except Exception as e: result["warnings"].append(f"Error processing PID {proc.pid}: {str(e)}") - + # Build tree structure tree_structure = {} orphans = [] - + for pid, proc_info in result["processes"].items(): ppid = proc_info["ppid"] - + if ppid is None or ppid not in result["processes"]: # Root process or orphan if ppid is not None and ppid not in result["processes"]: orphans.append(pid) - tree_structure[pid] = { - "process": proc_info, - "children": {}, - "depth": 0 - } - + tree_structure[pid] = {"process": proc_info, "children": {}, "depth": 0} + # Build parent-child relationships def add_children(parent_pid, depth=0): if parent_pid not in tree_structure: tree_structure[parent_pid] = { "process": result["processes"].get(parent_pid, {}), "children": {}, - "depth": depth + "depth": depth, } - + parent_node = tree_structure[parent_pid] - + for pid, proc_info in result["processes"].items(): if proc_info["ppid"] == parent_pid and pid != parent_pid: if pid not in parent_node["children"]: parent_node["children"][pid] = { "process": proc_info, "children": {}, - "depth": depth + 1 + "depth": depth + 1, } add_children(pid, depth + 1) - + # Build tree for each root process for pid in list(tree_structure.keys()): add_children(pid) - + result["tree_structure"] = tree_structure - + # Generate summary statistics summary = { "total_processes": process_count, @@ -2714,33 +3042,29 @@ class EnvironmentProcessManagement(MCPMixin): "status_breakdown": {}, "top_cpu_processes": [], "top_memory_processes": [], - "user_breakdown": {} + "user_breakdown": {}, } - + # Status breakdown status_counts = {} user_counts = {} - + # Top processes by resource usage processes_by_cpu = sorted( - result["processes"].values(), - key=lambda x: x["cpu_percent"], - reverse=True + result["processes"].values(), key=lambda x: x["cpu_percent"], reverse=True )[:10] - + processes_by_memory = sorted( - result["processes"].values(), - key=lambda x: x["memory_percent"], - reverse=True + result["processes"].values(), key=lambda x: x["memory_percent"], reverse=True )[:10] - + for proc_info in result["processes"].values(): status = proc_info["status"] username = proc_info["username"] - + status_counts[status] = status_counts.get(status, 0) + 1 user_counts[username] = user_counts.get(username, 0) + 1 - + summary["status_breakdown"] = status_counts summary["user_breakdown"] = user_counts summary["top_cpu_processes"] = [ @@ -2748,7 +3072,7 @@ class EnvironmentProcessManagement(MCPMixin): "pid": proc["pid"], "name": proc["name"], "cpu_percent": proc["cpu_percent"], - "cmdline": " ".join(proc["cmdline"][:3]) if proc["cmdline"] else "" + "cmdline": " ".join(proc["cmdline"][:3]) if proc["cmdline"] else "", } for proc in processes_by_cpu ] @@ -2758,27 +3082,27 @@ class EnvironmentProcessManagement(MCPMixin): "name": proc["name"], "memory_percent": proc["memory_percent"], "memory_mb": proc["memory_info"].get("rss_mb", 0), - "cmdline": " ".join(proc["cmdline"][:3]) if proc["cmdline"] else "" + "cmdline": " ".join(proc["cmdline"][:3]) if proc["cmdline"] else "", } for proc in processes_by_memory ] - + result["summary"] = summary result["success"] = True - + except Exception as e: result["errors"].append(f"Failed to build process tree: {str(e)}") result["success"] = False - + return result - + except Exception as e: return { "error": f"Critical error in process_tree: {str(e)}", "timestamp": datetime.now().isoformat(), "root_pid": root_pid, "include_children": include_children, - "success": False + "success": False, } @mcp_tool( @@ -2792,7 +3116,7 @@ class EnvironmentProcessManagement(MCPMixin): - Safe removal: action="remove" (includes confirmation) PERFORMANCE: ~0.01s creation time with UV, graceful fallback to venv if needed - RETURNS: Includes LLM guidance for next steps and workflow suggestions""" + RETURNS: Includes LLM guidance for next steps and workflow suggestions""", ) def manage_virtual_env( self, @@ -2805,17 +3129,17 @@ class EnvironmentProcessManagement(MCPMixin): """Manage Python virtual environments with UV enhancement, auto-naming, and LLM guidance""" try: start_time = time.time() # Track performance - + # Smart environment name handling actual_env_name = env_name if auto_name or env_name == "auto": # Generate name from current directory current_dir = Path.cwd().name # Clean name for valid environment name - actual_env_name = re.sub(r'[^a-zA-Z0-9_-]', '_', current_dir.lower()) + actual_env_name = re.sub(r"[^a-zA-Z0-9_-]", "_", current_dir.lower()) if not actual_env_name or actual_env_name[0].isdigit(): actual_env_name = f"env_{actual_env_name}" - + # Workspace detection for better guidance workspace_info = {} if workspace_detection and action in ["create", "list"]: @@ -2825,9 +3149,9 @@ class EnvironmentProcessManagement(MCPMixin): "has_setup_py": Path("setup.py").exists(), "has_git": Path(".git").exists(), "current_directory": str(Path.cwd()), - "suggested_env_name": actual_env_name + "suggested_env_name": actual_env_name, } - + result = { "timestamp": datetime.now().isoformat(), "action": action, @@ -2844,13 +3168,13 @@ class EnvironmentProcessManagement(MCPMixin): "llm_hints": { "suggested_next_actions": [], "common_workflows": [], - "related_tools": [] - } + "related_tools": [], + }, } - + # Determine platform-specific paths and commands - is_windows = os.name == 'nt' - + is_windows = os.name == "nt" + # Common virtual environment directories venv_base_dirs = [] if is_windows: @@ -2869,20 +3193,21 @@ class EnvironmentProcessManagement(MCPMixin): os.path.join(os.getcwd(), ".venv"), os.path.join(os.getcwd(), "venv"), ] - + # Add conda environments if available conda_envs_dir = None try: - conda_info = subprocess.run(['conda', 'info', '--json'], - capture_output=True, text=True, timeout=5) + conda_info = subprocess.run( + ["conda", "info", "--json"], capture_output=True, text=True, timeout=5 + ) if conda_info.returncode == 0: conda_data = json.loads(conda_info.stdout) - conda_envs_dir = conda_data.get('envs_dirs', [None])[0] + conda_envs_dir = conda_data.get("envs_dirs", [None])[0] if conda_envs_dir: venv_base_dirs.append(conda_envs_dir) except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError): pass - + # Helper function to find environment def find_env_path(env_name_to_find): possible_paths = [] @@ -2893,41 +3218,46 @@ class EnvironmentProcessManagement(MCPMixin): if os.path.exists(env_path): return env_path return None - + # Helper function to get Python executable path in venv def get_venv_python_path(env_path): if is_windows: return os.path.join(env_path, "Scripts", "python.exe") else: return os.path.join(env_path, "bin", "python") - + # Helper function to get activation script path def get_activation_script(env_path): if is_windows: return os.path.join(env_path, "Scripts", "activate.bat") else: return os.path.join(env_path, "bin", "activate") - + # ACTION: CREATE if action == "create": try: start_time = time.time() # Track creation timing - + # Determine Python executable to use python_cmd = "python" if python_version: # Try version-specific Python - version_cmds = [f"python{python_version}", f"python{python_version[:3]}", "python"] + version_cmds = [ + f"python{python_version}", + f"python{python_version[:3]}", + "python", + ] for cmd in version_cmds: try: - version_check = subprocess.run([cmd, '--version'], - capture_output=True, text=True, timeout=5) + version_check = subprocess.run( + [cmd, "--version"], capture_output=True, text=True, timeout=5 + ) if version_check.returncode == 0: python_cmd = cmd break except (subprocess.TimeoutExpired, FileNotFoundError): continue - + # Choose creation location (prefer ~/.virtualenvs) base_dir = os.path.expanduser("~/.virtualenvs") if not os.path.exists(base_dir): @@ -2936,41 +3266,49 @@ class EnvironmentProcessManagement(MCPMixin): except OSError: # Fallback to current directory base_dir = os.getcwd() - result["warnings"].append(f"Could not create ~/.virtualenvs, using {base_dir}") - + result["warnings"].append( + f"Could not create ~/.virtualenvs, using {base_dir}" + ) + env_path = os.path.join(base_dir, actual_env_name) - + # Check if environment already exists if os.path.exists(env_path): - result["errors"].append(f"Virtual environment '{actual_env_name}' already exists at {env_path}") + result["errors"].append( + f"Virtual environment '{actual_env_name}' already exists at {env_path}" + ) return result - + # Create virtual environment with uv (much faster) or fallback to venv uv_available = False try: # Check if uv is available - uv_check = subprocess.run(['uv', '--version'], capture_output=True, text=True, timeout=5) + uv_check = subprocess.run( + ["uv", "--version"], capture_output=True, text=True, timeout=5 + ) if uv_check.returncode == 0: uv_available = True result["details"]["uv_version"] = uv_check.stdout.strip() except (subprocess.TimeoutExpired, FileNotFoundError): pass - + if uv_available: # Use uv for much faster virtual environment creation if python_version: - create_cmd = ['uv', 'venv', env_path, '--python', python_version] + create_cmd = ["uv", "venv", env_path, "--python", python_version] else: - create_cmd = ['uv', 'venv', env_path] + create_cmd = ["uv", "venv", env_path] creation_method = "uv" else: # Fallback to standard venv - create_cmd = [python_cmd, '-m', 'venv', env_path] + create_cmd = [python_cmd, "-m", "venv", env_path] creation_method = "venv" result["warnings"].append("uv not available, using standard venv (slower)") - - create_result = subprocess.run(create_cmd, capture_output=True, text=True, timeout=120) - + + create_result = subprocess.run( + create_cmd, capture_output=True, text=True, timeout=120 + ) + if create_result.returncode == 0: result["success"] = True result["details"] = { @@ -2979,45 +3317,57 @@ class EnvironmentProcessManagement(MCPMixin): "activation_script": get_activation_script(env_path), "creation_command": " ".join(create_cmd), "creation_method": creation_method, - "creation_time": round(time.time() - start_time, 3) if 'start_time' in locals() else None + "creation_time": ( + round(time.time() - start_time, 3) + if "start_time" in locals() + else None + ), } - + # Verify Python version in created environment venv_python = get_venv_python_path(env_path) if os.path.exists(venv_python): try: - version_result = subprocess.run([venv_python, '--version'], - capture_output=True, text=True, timeout=5) + version_result = subprocess.run( + [venv_python, "--version"], + capture_output=True, + text=True, + timeout=5, + ) if version_result.returncode == 0: - result["details"]["actual_python_version"] = version_result.stdout.strip() + result["details"][ + "actual_python_version" + ] = version_result.stdout.strip() except (subprocess.TimeoutExpired, FileNotFoundError): pass - + # Provide activation instructions if is_windows: result["instructions"] = [ f"To activate: {env_path}\\Scripts\\activate.bat", f"Or in PowerShell: & '{env_path}\\Scripts\\Activate.ps1'", f"To deactivate: deactivate", - f"Created using: {creation_method} ({'ultra-fast' if creation_method == 'uv' else 'standard'})" + f"Created using: {creation_method} ({'ultra-fast' if creation_method == 'uv' else 'standard'})", ] else: result["instructions"] = [ f"To activate: source {env_path}/bin/activate", f"To deactivate: deactivate", - f"Created using: {creation_method} ({'ultra-fast' if creation_method == 'uv' else 'standard'})" + f"Created using: {creation_method} ({'ultra-fast' if creation_method == 'uv' else 'standard'})", ] else: - result["errors"].append(f"Failed to create virtual environment: {create_result.stderr}") - + result["errors"].append( + f"Failed to create virtual environment: {create_result.stderr}" + ) + except Exception as e: result["errors"].append(f"Error creating virtual environment: {str(e)}") - + # ACTION: LIST elif action == "list": try: environments = [] - + for base_dir in venv_base_dirs: if base_dir and os.path.exists(base_dir): try: @@ -3027,199 +3377,258 @@ class EnvironmentProcessManagement(MCPMixin): # Check if it's a valid virtual environment python_path = get_venv_python_path(env_path) activation_script = get_activation_script(env_path) - - if os.path.exists(python_path) or os.path.exists(activation_script): + + if os.path.exists(python_path) or os.path.exists( + activation_script + ): env_info = { "name": item, "path": env_path, "base_dir": base_dir, - "python_executable": python_path if os.path.exists(python_path) else None, - "activation_script": activation_script if os.path.exists(activation_script) else None, + "python_executable": ( + python_path + if os.path.exists(python_path) + else None + ), + "activation_script": ( + activation_script + if os.path.exists(activation_script) + else None + ), "created": None, "python_version": None, - "packages_count": None + "packages_count": None, } - + # Get creation time try: stat = os.stat(env_path) - env_info["created"] = datetime.fromtimestamp(stat.st_ctime).isoformat() + env_info["created"] = datetime.fromtimestamp( + stat.st_ctime + ).isoformat() except OSError: pass - + # Get Python version if env_info["python_executable"]: try: - version_result = subprocess.run([env_info["python_executable"], '--version'], - capture_output=True, text=True, timeout=5) + version_result = subprocess.run( + [ + env_info["python_executable"], + "--version", + ], + capture_output=True, + text=True, + timeout=5, + ) if version_result.returncode == 0: - env_info["python_version"] = version_result.stdout.strip() - except (subprocess.TimeoutExpired, FileNotFoundError): + env_info["python_version"] = ( + version_result.stdout.strip() + ) + except ( + subprocess.TimeoutExpired, + FileNotFoundError, + ): pass - + # Get installed packages count if env_info["python_executable"]: try: - pip_list = subprocess.run([env_info["python_executable"], '-m', 'pip', 'list'], - capture_output=True, text=True, timeout=10) + pip_list = subprocess.run( + [ + env_info["python_executable"], + "-m", + "pip", + "list", + ], + capture_output=True, + text=True, + timeout=10, + ) if pip_list.returncode == 0: - lines = pip_list.stdout.strip().split('\n') + lines = pip_list.stdout.strip().split("\n") # Subtract header lines - env_info["packages_count"] = max(0, len(lines) - 2) - except (subprocess.TimeoutExpired, FileNotFoundError): + env_info["packages_count"] = max( + 0, len(lines) - 2 + ) + except ( + subprocess.TimeoutExpired, + FileNotFoundError, + ): pass - + environments.append(env_info) except PermissionError: result["warnings"].append(f"Permission denied accessing {base_dir}") - + result["success"] = True result["details"] = { "environments": environments, "total_count": len(environments), - "searched_directories": venv_base_dirs + "searched_directories": venv_base_dirs, } - + except Exception as e: result["errors"].append(f"Error listing virtual environments: {str(e)}") - + # ACTION: REMOVE elif action == "remove": try: env_path = find_env_path(actual_env_name) - + if not env_path: - result["errors"].append(f"Virtual environment '{actual_env_name}' not found") + result["errors"].append( + f"Virtual environment '{actual_env_name}' not found" + ) return result - + if not os.path.exists(env_path): - result["errors"].append(f"Virtual environment path does not exist: {env_path}") + result["errors"].append( + f"Virtual environment path does not exist: {env_path}" + ) return result - + # SACRED TRUST: Safety check - ensure it's actually a virtual environment python_path = get_venv_python_path(env_path) activation_script = get_activation_script(env_path) - + if not (os.path.exists(python_path) or os.path.exists(activation_script)): - result["errors"].append(f"Path '{env_path}' does not appear to be a virtual environment") + result["errors"].append( + f"Path '{env_path}' does not appear to be a virtual environment" + ) return result - + # Remove the environment try: shutil.rmtree(env_path) result["success"] = True - result["details"] = { - "removed_path": env_path, - "env_name": env_name - } - result["instructions"] = [f"Virtual environment '{env_name}' has been removed successfully"] + result["details"] = {"removed_path": env_path, "env_name": env_name} + result["instructions"] = [ + f"Virtual environment '{env_name}' has been removed successfully" + ] except OSError as e: result["errors"].append(f"Failed to remove virtual environment: {str(e)}") - + except Exception as e: result["errors"].append(f"Error removing virtual environment: {str(e)}") - + # ACTION: ACTIVATE elif action == "activate": try: env_path = find_env_path(actual_env_name) - + if not env_path: - result["errors"].append(f"Virtual environment '{actual_env_name}' not found") + result["errors"].append( + f"Virtual environment '{actual_env_name}' not found" + ) return result - + activation_script = get_activation_script(env_path) - + if not os.path.exists(activation_script): result["errors"].append(f"Activation script not found: {activation_script}") return result - + result["success"] = True result["details"] = { "env_path": env_path, - "activation_script": activation_script + "activation_script": activation_script, } - + if is_windows: result["instructions"] = [ f"Command Prompt: {activation_script}", f"PowerShell: & '{env_path}\\Scripts\\Activate.ps1'", f"Git Bash: source '{env_path}/Scripts/activate'", - "Note: Activation must be done in your shell session" + "Note: Activation must be done in your shell session", ] else: result["instructions"] = [ f"source {activation_script}", - "Note: Activation must be done in your shell session" + "Note: Activation must be done in your shell session", ] - + except Exception as e: result["errors"].append(f"Error preparing activation: {str(e)}") - + # ACTION: DEACTIVATE elif action == "deactivate": try: result["success"] = True result["instructions"] = [ "To deactivate any active virtual environment, run: deactivate", - "Note: This command must be run in your shell session" + "Note: This command must be run in your shell session", ] result["details"] = { "note": "Deactivation is universal across all virtual environments" } - + except Exception as e: result["errors"].append(f"Error preparing deactivation: {str(e)}") - + else: result["errors"].append(f"Unknown action: {action}") - + # Add LLM hints based on action and results llm_hints = result["llm_hints"] - + if result["success"]: if action == "create": llm_hints["suggested_next_actions"] = [ f"Activate environment: manage_virtual_env('activate', '{actual_env_name}')", "Install packages with execute_command_enhanced", - "Check environment with environment_info(['python'])" + "Check environment with environment_info(['python'])", ] llm_hints["related_tools"] = ["execute_command_enhanced", "environment_info"] - + if workspace_info.get("has_requirements_txt"): - llm_hints["common_workflows"] = ["create โ†’ activate โ†’ pip install -r requirements.txt"] + llm_hints["common_workflows"] = [ + "create โ†’ activate โ†’ pip install -r requirements.txt" + ] elif workspace_info.get("has_pyproject_toml"): llm_hints["common_workflows"] = ["create โ†’ activate โ†’ pip install -e ."] else: - llm_hints["common_workflows"] = ["create โ†’ activate โ†’ pip install "] - + llm_hints["common_workflows"] = [ + "create โ†’ activate โ†’ pip install " + ] + elif action == "list": if result["details"]["total_count"] > 0: - llm_hints["suggested_next_actions"] = ["Activate existing environment or create new one"] + llm_hints["suggested_next_actions"] = [ + "Activate existing environment or create new one" + ] else: - llm_hints["suggested_next_actions"] = ["Create first environment with manage_virtual_env('create', 'myproject')"] + llm_hints["suggested_next_actions"] = [ + "Create first environment with manage_virtual_env('create', 'myproject')" + ] llm_hints["related_tools"] = ["environment_info"] - + elif action == "activate": - llm_hints["suggested_next_actions"] = ["Use the provided activation command in your terminal"] - llm_hints["common_workflows"] = ["activate โ†’ install packages โ†’ start development"] - + llm_hints["suggested_next_actions"] = [ + "Use the provided activation command in your terminal" + ] + llm_hints["common_workflows"] = [ + "activate โ†’ install packages โ†’ start development" + ] + elif action == "remove": llm_hints["suggested_next_actions"] = ["Environment removed successfully"] if workspace_info.get("has_requirements_txt"): - llm_hints["suggested_next_actions"].append("Consider creating new environment for this project") - + llm_hints["suggested_next_actions"].append( + "Consider creating new environment for this project" + ) + # Performance tracking result["performance_hints"] = { "execution_time_ms": round((time.time() - start_time) * 1000, 1), "creation_method": result["details"].get("creation_method", "n/a"), "uv_available": "uv" in str(result.get("details", {})), "workspace_detected": bool(workspace_info), - "auto_naming_used": result["auto_name_used"] + "auto_naming_used": result["auto_name_used"], } - + return result - + except Exception as e: return { "error": f"Critical error in manage_virtual_env: {str(e)}", @@ -3227,7 +3636,7 @@ class EnvironmentProcessManagement(MCPMixin): "action": action, "env_name": env_name, "python_version": python_version, - "success": False + "success": False, } @@ -3260,13 +3669,17 @@ class EnhancedExistingTools(MCPMixin): "execution_details": {}, "attempts": [], "errors": [], - "warnings": [] + "warnings": [], } - + # Validate and prepare command if isinstance(command, str): # String command - parse for shell execution - command_list = command.split() if not any(char in command for char in ['|', '&', '>', '<', ';']) else None + command_list = ( + command.split() + if not any(char in command for char in ["|", "&", ">", "<", ";"]) + else None + ) shell_mode = command_list is None exec_command = command if shell_mode else command_list elif isinstance(command, list): @@ -3277,30 +3690,36 @@ class EnhancedExistingTools(MCPMixin): else: result["errors"].append("Command must be string or list") return result - + # Validate working directory original_cwd = os.getcwd() if working_directory: if not os.path.exists(working_directory): - result["errors"].append(f"Working directory does not exist: {working_directory}") + result["errors"].append( + f"Working directory does not exist: {working_directory}" + ) return result if not os.path.isdir(working_directory): - result["errors"].append(f"Working directory is not a directory: {working_directory}") + result["errors"].append( + f"Working directory is not a directory: {working_directory}" + ) return result - + # Prepare environment exec_env = os.environ.copy() if environment_vars: # SACRED TRUST: Validate environment variables for key, value in environment_vars.items(): if not isinstance(key, str) or not isinstance(value, str): - result["warnings"].append(f"Skipping non-string environment variable: {key}") + result["warnings"].append( + f"Skipping non-string environment variable: {key}" + ) continue exec_env[key] = value - + # Execute with retry mechanism max_attempts = retry_count + 1 - + for attempt in range(max_attempts): attempt_result = { "attempt": attempt + 1, @@ -3310,16 +3729,16 @@ class EnhancedExistingTools(MCPMixin): "stdout": "", "stderr": "", "execution_time": 0.0, - "error": None + "error": None, } - + try: # Change working directory if specified if working_directory: os.chdir(working_directory) - + start_time = time.time() - + # Configure output capture if capture_output == "none": stdout_capture = subprocess.DEVNULL @@ -3333,7 +3752,7 @@ class EnhancedExistingTools(MCPMixin): else: # "all" stdout_capture = subprocess.PIPE stderr_capture = subprocess.PIPE - + # Execute command if shell_mode: process = subprocess.run( @@ -3343,7 +3762,7 @@ class EnhancedExistingTools(MCPMixin): stderr=stderr_capture, env=exec_env, text=True, - timeout=300 # 5 minute timeout + timeout=300, # 5 minute timeout ) else: process = subprocess.run( @@ -3352,109 +3771,130 @@ class EnhancedExistingTools(MCPMixin): stderr=stderr_capture, env=exec_env, text=True, - timeout=300 # 5 minute timeout + timeout=300, # 5 minute timeout ) - + end_time = time.time() execution_time = end_time - start_time - + # Collect results - attempt_result.update({ - "success": process.returncode == 0, - "return_code": process.returncode, - "stdout": process.stdout or "", - "stderr": process.stderr or "", - "execution_time": round(execution_time, 3) - }) - + attempt_result.update( + { + "success": process.returncode == 0, + "return_code": process.returncode, + "stdout": process.stdout or "", + "stderr": process.stderr or "", + "execution_time": round(execution_time, 3), + } + ) + # Simulate streaming callback if provided if stream_callback is not None: - attempt_result["streaming_note"] = "Streaming callback would be called with real-time output" - + attempt_result["streaming_note"] = ( + "Streaming callback would be called with real-time output" + ) + # Success case if process.returncode == 0: result["success"] = True result["execution_details"] = { "final_attempt": attempt + 1, - "total_execution_time": sum(a["execution_time"] for a in result["attempts"]) + execution_time, + "total_execution_time": sum( + a["execution_time"] for a in result["attempts"] + ) + + execution_time, "return_code": process.returncode, "stdout": process.stdout or "", "stderr": process.stderr or "", "command_type": "shell" if shell_mode else "direct", "working_directory_used": working_directory or original_cwd, - "environment_vars_applied": len(environment_vars) if environment_vars else 0 + "environment_vars_applied": ( + len(environment_vars) if environment_vars else 0 + ), } - + result["attempts"].append(attempt_result) break - + # Failure case - prepare for retry else: - attempt_result["error"] = f"Command failed with return code {process.returncode}" + attempt_result["error"] = ( + f"Command failed with return code {process.returncode}" + ) result["attempts"].append(attempt_result) - + if attempt < max_attempts - 1: # Wait before retry (exponential backoff) - wait_time = min(2 ** attempt, 10) # Max 10 seconds + wait_time = min(2**attempt, 10) # Max 10 seconds time.sleep(wait_time) attempt_result["retry_wait"] = wait_time - + except subprocess.TimeoutExpired: attempt_result["error"] = "Command timed out after 300 seconds" attempt_result["execution_time"] = 300.0 result["attempts"].append(attempt_result) - + except subprocess.CalledProcessError as e: - attempt_result.update({ - "error": f"Command failed: {str(e)}", - "return_code": e.returncode, - "execution_time": round(time.time() - start_time, 3) - }) + attempt_result.update( + { + "error": f"Command failed: {str(e)}", + "return_code": e.returncode, + "execution_time": round(time.time() - start_time, 3), + } + ) result["attempts"].append(attempt_result) - + except FileNotFoundError: attempt_result["error"] = "Command not found" result["attempts"].append(attempt_result) break # Don't retry for command not found - + except PermissionError: attempt_result["error"] = "Permission denied" result["attempts"].append(attempt_result) break # Don't retry for permission errors - + except Exception as e: attempt_result["error"] = f"Unexpected error: {str(e)}" - attempt_result["execution_time"] = round(time.time() - start_time, 3) if 'start_time' in locals() else 0.0 + attempt_result["execution_time"] = ( + round(time.time() - start_time, 3) if "start_time" in locals() else 0.0 + ) result["attempts"].append(attempt_result) - + finally: # Always restore original working directory try: os.chdir(original_cwd) except OSError: result["warnings"].append("Failed to restore original working directory") - + # Final result processing if not result["success"]: # Collect all errors from attempts - all_errors = [attempt["error"] for attempt in result["attempts"] if attempt.get("error")] + all_errors = [ + attempt["error"] for attempt in result["attempts"] if attempt.get("error") + ] result["errors"].extend(all_errors) - + # Set final execution details from last attempt if result["attempts"]: last_attempt = result["attempts"][-1] result["execution_details"] = { "final_attempt": len(result["attempts"]), - "total_execution_time": sum(a["execution_time"] for a in result["attempts"]), + "total_execution_time": sum( + a["execution_time"] for a in result["attempts"] + ), "return_code": last_attempt.get("return_code"), "stdout": last_attempt.get("stdout", ""), "stderr": last_attempt.get("stderr", ""), "command_type": "shell" if shell_mode else "direct", "working_directory_used": working_directory or original_cwd, - "environment_vars_applied": len(environment_vars) if environment_vars else 0, - "final_error": last_attempt.get("error") + "environment_vars_applied": ( + len(environment_vars) if environment_vars else 0 + ), + "final_error": last_attempt.get("error"), } - + # Add summary statistics result["summary"] = { "total_attempts": len(result["attempts"]), @@ -3463,18 +3903,18 @@ class EnhancedExistingTools(MCPMixin): "total_execution_time": sum(a["execution_time"] for a in result["attempts"]), "retry_used": len(result["attempts"]) > 1, "command_length": len(str(command)), - "environment_vars_count": len(environment_vars) if environment_vars else 0 + "environment_vars_count": len(environment_vars) if environment_vars else 0, } - + return result - + except Exception as e: return { "error": f"Critical error in execute_command_enhanced: {str(e)}", "timestamp": datetime.now().isoformat(), "command": command, "working_directory": working_directory, - "success": False + "success": False, } @mcp_tool( @@ -3489,13 +3929,15 @@ class EnhancedExistingTools(MCPMixin): - "smart": Tries multiple modes, returns best results (recommended) PERFORMANCE: Searches 50+ files in under 0.5s, includes LLM workflow hints - RETURNS: Rich context + suggested_next_actions for common development tasks""" + RETURNS: Rich context + suggested_next_actions for common development tasks""", ) def search_code_enhanced( self, query: str, directory: str = ".", # Default to current directory - LLM friendly - search_type: Optional[Literal["smart", "text", "semantic", "ast", "cross-reference"]] = "smart", + search_type: Optional[ + Literal["smart", "text", "semantic", "ast", "cross-reference"] + ] = "smart", file_pattern: Optional[str] = None, save_to_history: Optional[bool] = True, max_results: int = 20, # Limit results for better LLM processing @@ -3514,20 +3956,20 @@ class EnhancedExistingTools(MCPMixin): "total_matches": 0, "search_duration": 0.0, "errors": [], - "warnings": [] + "warnings": [], } - + start_time = time.time() - + # Validate directory if not os.path.exists(directory): search_metadata["errors"].append(f"Directory does not exist: {directory}") return [{"search_metadata": search_metadata}] - + if not os.path.isdir(directory): search_metadata["errors"].append(f"Path is not a directory: {directory}") return [{"search_metadata": search_metadata}] - + # Determine file patterns to search if file_pattern: # Use provided pattern @@ -3537,11 +3979,35 @@ class EnhancedExistingTools(MCPMixin): if search_type == "ast": patterns = ["*.py"] # AST search limited to Python else: - patterns = ["*.py", "*.js", "*.ts", "*.java", "*.cpp", "*.c", "*.h", - "*.cs", "*.php", "*.rb", "*.go", "*.rs", "*.kt", "*.swift", - "*.html", "*.css", "*.sql", "*.yaml", "*.yml", "*.json", - "*.xml", "*.md", "*.txt", "*.sh", "*.ps1", "*.bat"] - + patterns = [ + "*.py", + "*.js", + "*.ts", + "*.java", + "*.cpp", + "*.c", + "*.h", + "*.cs", + "*.php", + "*.rb", + "*.go", + "*.rs", + "*.kt", + "*.swift", + "*.html", + "*.css", + "*.sql", + "*.yaml", + "*.yml", + "*.json", + "*.xml", + "*.md", + "*.txt", + "*.sh", + "*.ps1", + "*.bat", + ] + # Collect files to search files_to_search = [] for pattern in patterns: @@ -3550,45 +4016,69 @@ class EnhancedExistingTools(MCPMixin): if file_path.is_file(): # Skip binary files and common excluded directories relative_path = str(file_path.relative_to(directory)) - if not any(exclude in relative_path for exclude in - ['.git/', '__pycache__/', 'node_modules/', '.venv/', 'venv/', - '.pytest_cache/', 'dist/', 'build/', '.tox/', '.coverage']): + if not any( + exclude in relative_path + for exclude in [ + ".git/", + "__pycache__/", + "node_modules/", + ".venv/", + "venv/", + ".pytest_cache/", + "dist/", + "build/", + ".tox/", + ".coverage", + ] + ): files_to_search.append(file_path) except Exception as e: - search_metadata["warnings"].append(f"Error collecting files for pattern {pattern}: {str(e)}") - + search_metadata["warnings"].append( + f"Error collecting files for pattern {pattern}: {str(e)}" + ) + files_to_search = list(set(files_to_search)) # Remove duplicates search_metadata["total_files_searched"] = len(files_to_search) - + # Perform search based on type if search_type == "smart": # Smart search: try multiple modes and combine best results all_results = [] - + # Start with text search (fastest) text_results = self._search_text(query, files_to_search, search_metadata) - for result in text_results[:max_results//3]: # Limit each mode + for result in text_results[: max_results // 3]: # Limit each mode result["search_mode_used"] = "text" all_results.append(result) - + # Add semantic search if query looks like code - if any(char in query for char in ['(', ')', '.', '_']) or len(query.split()) == 1: - semantic_results = self._search_semantic(query, files_to_search, search_metadata) - for result in semantic_results[:max_results//3]: + if any(char in query for char in ["(", ")", ".", "_"]) or len(query.split()) == 1: + semantic_results = self._search_semantic( + query, files_to_search, search_metadata + ) + for result in semantic_results[: max_results // 3]: result["search_mode_used"] = "semantic" all_results.append(result) - + # Add AST search for Python files if appropriate - python_files = [f for f in files_to_search if f.suffix == '.py'] - if python_files and query.replace('_', '').isalnum(): + python_files = [f for f in files_to_search if f.suffix == ".py"] + if python_files and query.replace("_", "").isalnum(): ast_results = self._search_ast(query, python_files, search_metadata) - for result in ast_results[:max_results//3]: + for result in ast_results[: max_results // 3]: result["search_mode_used"] = "ast" all_results.append(result) - + results.extend(all_results[:max_results]) - search_metadata["smart_modes_used"] = list(set([r.get("search_mode_used") for r in all_results if r.get("search_mode_used")])) - + search_metadata["smart_modes_used"] = list( + set( + [ + r.get("search_mode_used") + for r in all_results + if r.get("search_mode_used") + ] + ) + ) + elif search_type == "text": results.extend(self._search_text(query, files_to_search, search_metadata)) elif search_type == "semantic": @@ -3596,19 +4086,21 @@ class EnhancedExistingTools(MCPMixin): elif search_type == "ast": results.extend(self._search_ast(query, files_to_search, search_metadata)) elif search_type == "cross-reference": - results.extend(self._search_cross_reference(query, files_to_search, search_metadata)) + results.extend( + self._search_cross_reference(query, files_to_search, search_metadata) + ) else: search_metadata["errors"].append(f"Unknown search type: {search_type}") - + # Limit results for better LLM processing if len(results) > max_results: results = results[:max_results] search_metadata["results_limited"] = True - + # Finalize metadata search_metadata["search_duration"] = round(time.time() - start_time, 3) search_metadata["total_matches"] = len([r for r in results if "match" in r]) - + # Save to history if requested if save_to_history: try: @@ -3618,72 +4110,88 @@ class EnhancedExistingTools(MCPMixin): "search_type": search_type, "directory": directory, "matches_found": search_metadata["total_matches"], - "duration": search_metadata["search_duration"] + "duration": search_metadata["search_duration"], } # In a real implementation, this would save to a persistent store search_metadata["history_saved"] = True except Exception as e: search_metadata["warnings"].append(f"Failed to save to history: {str(e)}") - + # Add LLM hints based on search results search_metadata["llm_hints"] = { "suggested_next_actions": [], "common_workflows": [], - "related_tools": [] + "related_tools": [], } - + if search_metadata["total_matches"] > 0: # Suggest actions based on what was found if search_type == "smart" or search_type == "text": search_metadata["llm_hints"]["suggested_next_actions"] = [ "Review found matches for relevant code", "Use execute_command_enhanced to run related commands", - "Consider ast search for deeper code structure analysis" + "Consider ast search for deeper code structure analysis", ] elif search_type == "ast": search_metadata["llm_hints"]["suggested_next_actions"] = [ "Analyze function/class structure", "Use cross-reference search to find usage patterns", - "Consider refactoring with edit_block_enhanced" + "Consider refactoring with edit_block_enhanced", ] elif search_type == "cross-reference": search_metadata["llm_hints"]["suggested_next_actions"] = [ "Assess impact of potential changes", "Plan refactoring strategy", - "Use git_git_grep for version history context" + "Use git_git_grep for version history context", ] - - search_metadata["llm_hints"]["related_tools"] = ["execute_command_enhanced", "edit_block_enhanced", "git_git_grep"] + + search_metadata["llm_hints"]["related_tools"] = [ + "execute_command_enhanced", + "edit_block_enhanced", + "git_git_grep", + ] search_metadata["llm_hints"]["common_workflows"] = [ "search โ†’ analyze โ†’ edit โ†’ test", - "search โ†’ cross-reference โ†’ plan refactoring" + "search โ†’ cross-reference โ†’ plan refactoring", ] else: search_metadata["llm_hints"]["suggested_next_actions"] = [ "Try different search terms or patterns", "Use semantic search for concept-based discovery", - "Check if search directory contains expected files" + "Check if search directory contains expected files", ] if search_type != "smart": - search_metadata["llm_hints"]["suggested_next_actions"].append("Try smart search mode for comprehensive results") - + search_metadata["llm_hints"]["suggested_next_actions"].append( + "Try smart search mode for comprehensive results" + ) + # Performance and optimization hints search_metadata["performance_hints"] = { "files_processed": search_metadata["total_files_searched"], - "search_efficiency": "excellent" if search_metadata["search_duration"] < 0.2 else "good" if search_metadata["search_duration"] < 0.5 else "consider filtering", - "optimization_suggestions": [] + "search_efficiency": ( + "excellent" + if search_metadata["search_duration"] < 0.2 + else ( + "good" if search_metadata["search_duration"] < 0.5 else "consider filtering" + ) + ), + "optimization_suggestions": [], } - + if search_metadata["total_files_searched"] > 100: - search_metadata["performance_hints"]["optimization_suggestions"].append("Consider using file_pattern to limit scope") + search_metadata["performance_hints"]["optimization_suggestions"].append( + "Consider using file_pattern to limit scope" + ) if search_metadata["search_duration"] > 1.0: - search_metadata["performance_hints"]["optimization_suggestions"].append("Large search - consider breaking into smaller queries") - + search_metadata["performance_hints"]["optimization_suggestions"].append( + "Large search - consider breaking into smaller queries" + ) + # Add metadata as first result results.insert(0, {"search_metadata": search_metadata}) - + return results - + except Exception as e: error_metadata = { "timestamp": datetime.now().isoformat(), @@ -3691,17 +4199,22 @@ class EnhancedExistingTools(MCPMixin): "directory": directory, "search_type": search_type, "critical_error": str(e), - "success": False + "success": False, } return [{"search_metadata": error_metadata}] - - def _search_text(self, query: str, files: List[Path], metadata: Dict[str, Any]) -> List[Dict[str, Any]]: + + def _search_text( + self, query: str, files: List[Path], metadata: Dict[str, Any] + ) -> List[Dict[str, Any]]: """Traditional text/regex search""" results = [] - + try: # Compile regex if query looks like regex (contains special chars) - use_regex = any(char in query for char in ['.', '*', '+', '?', '^', '$', '[', ']', '(', ')', '|', '\\']) + use_regex = any( + char in query + for char in [".", "*", "+", "?", "^", "$", "[", "]", "(", ")", "|", "\\"] + ) if use_regex: try: pattern = re.compile(query, re.IGNORECASE | re.MULTILINE) @@ -3709,12 +4222,12 @@ class EnhancedExistingTools(MCPMixin): # Fall back to literal search use_regex = False metadata["warnings"].append("Invalid regex pattern, using literal search") - + for file_path in files: try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: lines = f.readlines() - + for line_num, line in enumerate(lines, 1): matches = [] if use_regex: @@ -3729,61 +4242,73 @@ class EnhancedExistingTools(MCPMixin): if pos == -1: break # Create match-like object - match_obj = type('Match', (), { - 'start': lambda: pos, - 'end': lambda: pos + len(query), - 'group': lambda: line[pos:pos + len(query)] - })() + match_obj = type( + "Match", + (), + { + "start": lambda: pos, + "end": lambda: pos + len(query), + "group": lambda: line[pos : pos + len(query)], + }, + )() matches.append(match_obj) start = pos + 1 - + if matches: # Get context lines - context_before = lines[max(0, line_num - 3):line_num - 1] - context_after = lines[line_num:min(len(lines), line_num + 2)] - + context_before = lines[max(0, line_num - 3) : line_num - 1] + context_after = lines[line_num : min(len(lines), line_num + 2)] + result = { "match": { "file_path": str(file_path), - "relative_path": str(file_path.relative_to(Path(file_path).anchor)), + "relative_path": str( + file_path.relative_to(Path(file_path).anchor) + ), "line_number": line_num, "line_content": line.rstrip(), "matches_in_line": len(matches), "match_positions": [(m.start(), m.end()) for m in matches], - "matched_text": [m.group() for m in matches] + "matched_text": [m.group() for m in matches], }, "context": { "before": [l.rstrip() for l in context_before], - "after": [l.rstrip() for l in context_after] + "after": [l.rstrip() for l in context_after], }, "file_info": { "extension": file_path.suffix, "size_bytes": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat() + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), }, - "search_type": "text" + "search_type": "text", } results.append(result) - + except Exception as e: metadata["warnings"].append(f"Error searching file {file_path}: {str(e)}") - + except Exception as e: metadata["errors"].append(f"Error in text search: {str(e)}") - + return results - - def _search_semantic(self, query: str, files: List[Path], metadata: Dict[str, Any]) -> List[Dict[str, Any]]: + + def _search_semantic( + self, query: str, files: List[Path], metadata: Dict[str, Any] + ) -> List[Dict[str, Any]]: """Semantic code search with context awareness""" results = [] - + try: # Define semantic patterns for common code constructs semantic_patterns = { "function_definition": [ r"def\s+\w*" + re.escape(query) + r"\w*\s*\(", # Python r"function\s+\w*" + re.escape(query) + r"\w*\s*\(", # JavaScript - r"(public|private|protected)?\s*(static)?\s*\w+\s+\w*" + re.escape(query) + r"\w*\s*\(", # Java/C# + r"(public|private|protected)?\s*(static)?\s*\w+\s+\w*" + + re.escape(query) + + r"\w*\s*\(", # Java/C# ], "class_definition": [ r"class\s+\w*" + re.escape(query) + r"\w*\s*[\(:]", # Python/Java @@ -3799,173 +4324,185 @@ class EnhancedExistingTools(MCPMixin): "method_call": [ r"\.\s*\w*" + re.escape(query) + r"\w*\s*\(", # Method calls r"\b\w*" + re.escape(query) + r"\w*\s*\(", # Function calls - ] + ], } - + # Try to detect query intent query_lower = query.lower() search_patterns = [] - + # Add all patterns for comprehensive search for pattern_type, patterns in semantic_patterns.items(): search_patterns.extend([(p, pattern_type) for p in patterns]) - + # Also include literal search as fallback search_patterns.append((re.escape(query), "literal")) - + for file_path in files: try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() lines = content.splitlines() - + for pattern, pattern_type in search_patterns: try: regex = re.compile(pattern, re.IGNORECASE | re.MULTILINE) for match in regex.finditer(content): # Find line number - line_num = content[:match.start()].count('\\n') + 1 + line_num = content[: match.start()].count("\\n") + 1 line_content = lines[line_num - 1] if line_num <= len(lines) else "" - + # Get context - context_before = lines[max(0, line_num - 3):line_num - 1] - context_after = lines[line_num:min(len(lines), line_num + 2)] - + context_before = lines[max(0, line_num - 3) : line_num - 1] + context_after = lines[line_num : min(len(lines), line_num + 2)] + result = { "match": { "file_path": str(file_path), - "relative_path": str(file_path.relative_to(Path(file_path).anchor)), + "relative_path": str( + file_path.relative_to(Path(file_path).anchor) + ), "line_number": line_num, "line_content": line_content, "matched_text": match.group(), "semantic_type": pattern_type, - "match_start": match.start() - content[:match.start()].rfind('\\n') - 1, - "match_end": match.end() - content[:match.start()].rfind('\\n') - 1 - }, - "context": { - "before": context_before, - "after": context_after + "match_start": match.start() + - content[: match.start()].rfind("\\n") + - 1, + "match_end": match.end() + - content[: match.start()].rfind("\\n") + - 1, }, + "context": {"before": context_before, "after": context_after}, "file_info": { "extension": file_path.suffix, "size_bytes": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat() + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), }, - "search_type": "semantic" + "search_type": "semantic", } results.append(result) - + except re.error: continue # Skip invalid patterns - + except Exception as e: - metadata["warnings"].append(f"Error in semantic search of {file_path}: {str(e)}") - + metadata["warnings"].append( + f"Error in semantic search of {file_path}: {str(e)}" + ) + except Exception as e: metadata["errors"].append(f"Error in semantic search: {str(e)}") - + return results - - def _search_ast(self, query: str, files: List[Path], metadata: Dict[str, Any]) -> List[Dict[str, Any]]: + + def _search_ast( + self, query: str, files: List[Path], metadata: Dict[str, Any] + ) -> List[Dict[str, Any]]: """AST-based search for Python files""" results = [] - + try: - python_files = [f for f in files if f.suffix == '.py'] - + python_files = [f for f in files if f.suffix == ".py"] + for file_path in python_files: try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: content = f.read() lines = content.splitlines() - + try: tree = ast.parse(content) except SyntaxError as e: metadata["warnings"].append(f"Syntax error in {file_path}: {str(e)}") continue - + class ASTSearchVisitor(ast.NodeVisitor): def __init__(self): self.matches = [] - + def visit_FunctionDef(self, node): if query.lower() in node.name.lower(): self.matches.append(("function", node.name, node.lineno)) self.generic_visit(node) - + def visit_ClassDef(self, node): if query.lower() in node.name.lower(): self.matches.append(("class", node.name, node.lineno)) self.generic_visit(node) - + def visit_Name(self, node): if query.lower() in node.id.lower(): self.matches.append(("variable", node.id, node.lineno)) self.generic_visit(node) - + def visit_Attribute(self, node): if query.lower() in node.attr.lower(): self.matches.append(("attribute", node.attr, node.lineno)) self.generic_visit(node) - + visitor = ASTSearchVisitor() visitor.visit(tree) - + for match_type, name, line_num in visitor.matches: if line_num <= len(lines): line_content = lines[line_num - 1] - context_before = lines[max(0, line_num - 3):line_num - 1] - context_after = lines[line_num:min(len(lines), line_num + 2)] - + context_before = lines[max(0, line_num - 3) : line_num - 1] + context_after = lines[line_num : min(len(lines), line_num + 2)] + result = { "match": { "file_path": str(file_path), - "relative_path": str(file_path.relative_to(Path(file_path).anchor)), + "relative_path": str( + file_path.relative_to(Path(file_path).anchor) + ), "line_number": line_num, "line_content": line_content, "ast_node_type": match_type, "node_name": name, - "matched_text": name - }, - "context": { - "before": context_before, - "after": context_after + "matched_text": name, }, + "context": {"before": context_before, "after": context_after}, "file_info": { "extension": file_path.suffix, "size_bytes": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat() + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), }, - "search_type": "ast" + "search_type": "ast", } results.append(result) - + except Exception as e: metadata["warnings"].append(f"Error in AST search of {file_path}: {str(e)}") - + except Exception as e: metadata["errors"].append(f"Error in AST search: {str(e)}") - + return results - - def _search_cross_reference(self, query: str, files: List[Path], metadata: Dict[str, Any]) -> List[Dict[str, Any]]: + + def _search_cross_reference( + self, query: str, files: List[Path], metadata: Dict[str, Any] + ) -> List[Dict[str, Any]]: """Cross-reference search for tracking usage patterns""" results = [] - + try: # First pass: find definitions definitions = [] usages = [] - + for file_path in files: try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: lines = f.readlines() - + for line_num, line in enumerate(lines, 1): line_stripped = line.strip() - + # Look for definitions (simplified) definition_patterns = [ (r"def\s+" + re.escape(query) + r"\s*\(", "function"), @@ -3975,42 +4512,52 @@ class EnhancedExistingTools(MCPMixin): (r"let\s+" + re.escape(query) + r"\s*=", "variable"), (r"var\s+" + re.escape(query) + r"\s*=", "variable"), ] - + for pattern, def_type in definition_patterns: if re.search(pattern, line, re.IGNORECASE): - definitions.append({ + definitions.append( + { + "file_path": str(file_path), + "line_number": line_num, + "line_content": line.rstrip(), + "definition_type": def_type, + } + ) + + # Look for usages + if re.search(r"\b" + re.escape(query) + r"\b", line, re.IGNORECASE): + usages.append( + { "file_path": str(file_path), "line_number": line_num, "line_content": line.rstrip(), - "definition_type": def_type - }) - - # Look for usages - if re.search(r"\b" + re.escape(query) + r"\b", line, re.IGNORECASE): - usages.append({ - "file_path": str(file_path), - "line_number": line_num, - "line_content": line.rstrip() - }) - + } + ) + except Exception as e: - metadata["warnings"].append(f"Error in cross-reference search of {file_path}: {str(e)}") - + metadata["warnings"].append( + f"Error in cross-reference search of {file_path}: {str(e)}" + ) + # Combine definitions and usages all_references = definitions + usages - + for ref in all_references: file_path = Path(ref["file_path"]) line_num = ref["line_number"] - + # Get context try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: lines = f.readlines() - - context_before = [l.rstrip() for l in lines[max(0, line_num - 3):line_num - 1]] - context_after = [l.rstrip() for l in lines[line_num:min(len(lines), line_num + 2)]] - + + context_before = [ + l.rstrip() for l in lines[max(0, line_num - 3) : line_num - 1] + ] + context_after = [ + l.rstrip() for l in lines[line_num : min(len(lines), line_num + 2)] + ] + result = { "match": { "file_path": str(file_path), @@ -4018,27 +4565,28 @@ class EnhancedExistingTools(MCPMixin): "line_number": line_num, "line_content": ref["line_content"], "reference_type": ref.get("definition_type", "usage"), - "matched_text": query - }, - "context": { - "before": context_before, - "after": context_after + "matched_text": query, }, + "context": {"before": context_before, "after": context_after}, "file_info": { "extension": file_path.suffix, "size_bytes": file_path.stat().st_size, - "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat() + "modified": datetime.fromtimestamp( + file_path.stat().st_mtime + ).isoformat(), }, - "search_type": "cross-reference" + "search_type": "cross-reference", } results.append(result) - + except Exception as e: - metadata["warnings"].append(f"Error getting context for {file_path}:{line_num}: {str(e)}") - + metadata["warnings"].append( + f"Error getting context for {file_path}:{line_num}: {str(e)}" + ) + except Exception as e: metadata["errors"].append(f"Error in cross-reference search: {str(e)}") - + return results @mcp_tool( @@ -4080,7 +4628,9 @@ class UtilityTools(MCPMixin): """Generate project from template""" raise NotImplementedError("project_template not implemented") - @mcp_tool(name="dependency_check", description="๐ŸŸก SAFE: Analyze and update project dependencies") + @mcp_tool( + name="dependency_check", description="๐ŸŸก SAFE: Analyze and update project dependencies" + ) async def dependency_check( self, project_path: str, @@ -4101,20 +4651,20 @@ class UtilityTools(MCPMixin): "updates_available": [], "security_issues": [], "recommendations": [], - "summary": {} + "summary": {}, } # Detect project type and dependency files dependency_files = [] - + # Python projects pyproject_toml = project_path_obj / "pyproject.toml" requirements_txt = project_path_obj / "requirements.txt" pipfile = project_path_obj / "Pipfile" - + # Node.js projects package_json = project_path_obj / "package.json" - + if pyproject_toml.exists(): results["project_type"] = "python-pyproject" dependency_files.append(("pyproject.toml", pyproject_toml)) @@ -4128,7 +4678,9 @@ class UtilityTools(MCPMixin): results["project_type"] = "nodejs" dependency_files.append(("package.json", package_json)) else: - return {"error": "No supported dependency files found (pyproject.toml, requirements.txt, package.json)"} + return { + "error": "No supported dependency files found (pyproject.toml, requirements.txt, package.json)" + } # Parse dependency files for file_type, file_path in dependency_files: @@ -4143,9 +4695,9 @@ class UtilityTools(MCPMixin): deps = self._parse_pipfile(file_path) else: deps = {} - + results["dependencies"][file_type] = deps - + except Exception as e: results["dependencies"][file_type] = {"error": f"Failed to parse: {str(e)}"} @@ -4160,25 +4712,31 @@ class UtilityTools(MCPMixin): # Basic security checks if check_security: - security_issues = await self._check_security_issues(project_path_obj, results["project_type"], ctx) + security_issues = await self._check_security_issues( + project_path_obj, results["project_type"], ctx + ) results["security_issues"] = security_issues # Generate recommendations results["recommendations"] = self._generate_recommendations(results) # Create summary - total_deps = sum(len(deps) if isinstance(deps, dict) and "error" not in deps else 0 - for deps in results["dependencies"].values()) - + total_deps = sum( + len(deps) if isinstance(deps, dict) and "error" not in deps else 0 + for deps in results["dependencies"].values() + ) + results["summary"] = { "total_dependencies": total_deps, "updates_available": len(results["updates_available"]), "security_issues": len(results["security_issues"]), - "project_type": results["project_type"] + "project_type": results["project_type"], } if ctx: - await ctx.info(f"Dependency check complete: {total_deps} deps, {len(results['updates_available'])} updates, {len(results['security_issues'])} security issues") + await ctx.info( + f"Dependency check complete: {total_deps} deps, {len(results['updates_available'])} updates, {len(results['security_issues'])} security issues" + ) return results @@ -4197,27 +4755,42 @@ class UtilityTools(MCPMixin): import tomli as tomllib except ImportError: return {"error": "tomllib/tomli not available for parsing pyproject.toml"} - + try: - with open(file_path, 'rb') as f: + with open(file_path, "rb") as f: data = tomllib.load(f) - + deps = {} - + # Get dependencies from different sections - if 'project' in data and 'dependencies' in data['project']: - for dep in data['project']['dependencies']: - name = dep.split('>=')[0].split('==')[0].split('~=')[0].split('>')[0].split('<')[0].strip() + if "project" in data and "dependencies" in data["project"]: + for dep in data["project"]["dependencies"]: + name = ( + dep.split(">=")[0] + .split("==")[0] + .split("~=")[0] + .split(">")[0] + .split("<")[0] + .strip() + ) deps[name] = dep - - if 'tool' in data and 'poetry' in data['tool'] and 'dependencies' in data['tool']['poetry']: - poetry_deps = data['tool']['poetry']['dependencies'] + + if ( + "tool" in data + and "poetry" in data["tool"] + and "dependencies" in data["tool"]["poetry"] + ): + poetry_deps = data["tool"]["poetry"]["dependencies"] for name, version in poetry_deps.items(): - if name != 'python': - deps[name] = str(version) if not isinstance(version, dict) else version.get('version', 'latest') - + if name != "python": + deps[name] = ( + str(version) + if not isinstance(version, dict) + else version.get("version", "latest") + ) + return deps - + except Exception as e: return {"error": f"Failed to parse pyproject.toml: {str(e)}"} @@ -4225,11 +4798,18 @@ class UtilityTools(MCPMixin): """Parse requirements.txt for dependencies""" try: deps = {} - with open(file_path, 'r') as f: + with open(file_path, "r") as f: for line in f: line = line.strip() - if line and not line.startswith('#'): - name = line.split('>=')[0].split('==')[0].split('~=')[0].split('>')[0].split('<')[0].strip() + if line and not line.startswith("#"): + name = ( + line.split(">=")[0] + .split("==")[0] + .split("~=")[0] + .split(">")[0] + .split("<")[0] + .strip() + ) deps[name] = line return deps except Exception as e: @@ -4238,15 +4818,15 @@ class UtilityTools(MCPMixin): def _parse_package_json(self, file_path: Path) -> Dict[str, str]: """Parse package.json for dependencies""" try: - with open(file_path, 'r') as f: + with open(file_path, "r") as f: data = json.load(f) - + deps = {} - if 'dependencies' in data: - deps.update(data['dependencies']) - if 'devDependencies' in data: - deps.update(data['devDependencies']) - + if "dependencies" in data: + deps.update(data["dependencies"]) + if "devDependencies" in data: + deps.update(data["devDependencies"]) + return deps except Exception as e: return {"error": f"Failed to parse package.json: {str(e)}"} @@ -4256,14 +4836,14 @@ class UtilityTools(MCPMixin): try: # Simple parsing for Pipfile - would need toml parser for full support deps = {} - with open(file_path, 'r') as f: + with open(file_path, "r") as f: content = f.read() # Basic extraction - this is simplified - if '[packages]' in content: - lines = content.split('[packages]')[1].split('[')[0].strip().split('\n') + if "[packages]" in content: + lines = content.split("[packages]")[1].split("[")[0].strip().split("\n") for line in lines: - if '=' in line and line.strip(): - name, version = line.split('=', 1) + if "=" in line and line.strip(): + name, version = line.split("=", 1) deps[name.strip()] = version.strip().strip('"') return deps except Exception as e: @@ -4277,9 +4857,9 @@ class UtilityTools(MCPMixin): cwd=project_path, capture_output=True, text=True, - timeout=60 + timeout=60, ) - + if result.returncode == 0: try: outdated = json.loads(result.stdout) @@ -4288,7 +4868,7 @@ class UtilityTools(MCPMixin): "package": pkg["name"], "current_version": pkg["version"], "latest_version": pkg["latest_version"], - "type": pkg.get("latest_filetype", "wheel") + "type": pkg.get("latest_filetype", "wheel"), } for pkg in outdated ] @@ -4306,9 +4886,9 @@ class UtilityTools(MCPMixin): cwd=project_path, capture_output=True, text=True, - timeout=60 + timeout=60, ) - + # npm outdated returns exit code 1 when there are outdated packages if result.stdout: try: @@ -4318,7 +4898,7 @@ class UtilityTools(MCPMixin): "package": name, "current_version": info.get("current"), "latest_version": info.get("latest"), - "wanted_version": info.get("wanted") + "wanted_version": info.get("wanted"), } for name, info in outdated.items() ] @@ -4328,10 +4908,12 @@ class UtilityTools(MCPMixin): except Exception: return [] - async def _check_security_issues(self, project_path: Path, project_type: str, ctx: Context) -> List[Dict[str, Any]]: + async def _check_security_issues( + self, project_path: Path, project_type: str, ctx: Context + ) -> List[Dict[str, Any]]: """Check for known security vulnerabilities""" issues = [] - + try: if project_type.startswith("python"): # Try to use pip-audit if available @@ -4339,18 +4921,18 @@ class UtilityTools(MCPMixin): ["python", "-m", "pip", "install", "pip-audit"], cwd=project_path, capture_output=True, - timeout=30 + timeout=30, ) - + if result.returncode == 0: audit_result = subprocess.run( ["python", "-m", "pip-audit", "--format=json"], cwd=project_path, capture_output=True, text=True, - timeout=60 + timeout=60, ) - + if audit_result.returncode == 0: try: audit_data = json.loads(audit_result.stdout) @@ -4358,7 +4940,7 @@ class UtilityTools(MCPMixin): issues.extend(audit_data) except json.JSONDecodeError: pass - + elif project_type == "nodejs": # Try npm audit audit_result = subprocess.run( @@ -4366,51 +4948,62 @@ class UtilityTools(MCPMixin): cwd=project_path, capture_output=True, text=True, - timeout=60 + timeout=60, ) - + if audit_result.stdout: try: audit_data = json.loads(audit_result.stdout) if "vulnerabilities" in audit_data: for vuln_name, vuln_info in audit_data["vulnerabilities"].items(): - issues.append({ - "package": vuln_name, - "severity": vuln_info.get("severity", "unknown"), - "description": vuln_info.get("via", [{}])[0].get("title", "Unknown vulnerability") - }) + issues.append( + { + "package": vuln_name, + "severity": vuln_info.get("severity", "unknown"), + "description": vuln_info.get("via", [{}])[0].get( + "title", "Unknown vulnerability" + ), + } + ) except json.JSONDecodeError: pass - + except Exception: pass - + return issues def _generate_recommendations(self, results: Dict[str, Any]) -> List[str]: """Generate actionable recommendations""" recommendations = [] - + if results["updates_available"]: recommendations.append(f"Update {len(results['updates_available'])} outdated packages") - + if results["security_issues"]: - critical_issues = [issue for issue in results["security_issues"] - if issue.get("severity") in ["critical", "high"]] + critical_issues = [ + issue + for issue in results["security_issues"] + if issue.get("severity") in ["critical", "high"] + ] if critical_issues: - recommendations.append(f"๐Ÿšจ Address {len(critical_issues)} critical/high security vulnerabilities immediately") + recommendations.append( + f"๐Ÿšจ Address {len(critical_issues)} critical/high security vulnerabilities immediately" + ) else: recommendations.append(f"Review {len(results['security_issues'])} security issues") - + project_type = results.get("project_type") if project_type == "python-requirements": - recommendations.append("Consider migrating to pyproject.toml for better dependency management") + recommendations.append( + "Consider migrating to pyproject.toml for better dependency management" + ) elif project_type == "nodejs": recommendations.append("Run 'npm update' to install available updates") elif project_type and project_type.startswith("python"): recommendations.append("Run 'pip install --upgrade' for packages that need updates") - + if not results["updates_available"] and not results["security_issues"]: recommendations.append("โœ… All dependencies are up to date and secure") - + return recommendations