Claude hooks auto-backup: manual (backup_20250720_091250)

This commit is contained in:
Ryan Malloy 2025-07-20 03:12:51 -06:00
parent 9445e09c48
commit 392833187e
135 changed files with 16151 additions and 3439 deletions

View File

@ -76,8 +76,7 @@ cd claude-hooks && ./scripts/install.sh
## Requirements
- **Node.js** 16+ (for npm installation)
- **Python** 3.8+ (for hook scripts)
- **Node.js** 16+ (only runtime required)
- **Claude Code** (hooks integrate with Claude's native system)
## License

273
bin/claude-hooks.js Normal file → Executable file
View File

@ -2,41 +2,107 @@
const { Command } = require('commander');
const chalk = require('chalk');
const { execSync, spawn } = require('child_process');
const path = require('path');
const fs = require('fs');
const fs = require('fs-extra');
const os = require('os');
const program = new Command();
const packageRoot = path.dirname(__dirname);
// Helper to run Python scripts
function runPythonScript(scriptName, args = []) {
const scriptPath = path.join(packageRoot, 'scripts', scriptName);
return spawn('python3', [scriptPath, ...args], {
stdio: 'inherit',
cwd: packageRoot
// Helper to create hooks configuration
async function createHooksConfig() {
const claudeConfigDir = path.join(os.homedir(), '.config', 'claude');
const hooksConfigFile = path.join(claudeConfigDir, 'hooks.json');
await fs.ensureDir(claudeConfigDir);
const hooksConfig = {
hooks: {
UserPromptSubmit: `node ${path.join(packageRoot, 'hooks', 'context-monitor.js')}`,
PreToolUse: {
Bash: `node ${path.join(packageRoot, 'hooks', 'command-validator.js')}`
},
PostToolUse: {
'*': `node ${path.join(packageRoot, 'hooks', 'session-logger.js')}`
},
Stop: `node ${path.join(packageRoot, 'hooks', 'session-finalizer.js')}`
}
};
await fs.writeJson(hooksConfigFile, hooksConfig, { spaces: 2 });
return hooksConfigFile;
}
// Helper to test a hook script
async function testHookScript(scriptName, testInput) {
return new Promise((resolve) => {
const { spawn } = require('child_process');
const scriptPath = path.join(packageRoot, 'hooks', scriptName);
const child = spawn('node', [scriptPath], {
stdio: ['pipe', 'pipe', 'pipe'],
cwd: packageRoot
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.stdin.write(JSON.stringify(testInput));
child.stdin.end();
const timeout = setTimeout(() => {
child.kill();
resolve({ success: false, error: 'Timeout' });
}, 10000);
child.on('close', (code) => {
clearTimeout(timeout);
resolve({
success: code === 0,
stdout: stdout.trim(),
stderr: stderr.trim()
});
});
child.on('error', (error) => {
clearTimeout(timeout);
resolve({ success: false, error: error.message });
});
});
}
// Helper to check installation status
function checkStatus() {
async function checkStatus() {
const hooksConfig = path.join(os.homedir(), '.config', 'claude', 'hooks.json');
const hasHooks = fs.existsSync(hooksConfig);
const hasHooks = await fs.pathExists(hooksConfig);
console.log(chalk.blue('Claude Hooks Status:'));
console.log(hasHooks ? chalk.green('✓ Hooks configuration installed') : chalk.red('✗ No hooks configuration found'));
try {
execSync('python3 --version', { stdio: 'pipe' });
console.log(chalk.green('✓ Python 3 available'));
} catch {
console.log(chalk.red('✗ Python 3 not found'));
}
console.log(chalk.green('✓ Node.js runtime available'));
return hasHooks;
}
// Helper to remove hooks configuration
async function removeHooksConfig() {
const hooksConfig = path.join(os.homedir(), '.config', 'claude', 'hooks.json');
if (await fs.pathExists(hooksConfig)) {
await fs.remove(hooksConfig);
return true;
}
return false;
}
program
.name('claude-hooks')
.description('Intelligent hooks system for Claude Code')
@ -46,82 +112,173 @@ program
.command('init')
.description('Initialize Claude Hooks (run after npm install)')
.option('--force', 'Force reinstallation even if already configured')
.action((options) => {
console.log(chalk.blue('Initializing Claude Hooks...'));
if (!options.force && checkStatus()) {
console.log(chalk.yellow('⚠️ Claude Hooks already configured. Use --force to reinstall.'));
return;
.option('--auto-setup', 'Auto-setup during npm postinstall')
.option('--quiet', 'Minimal output')
.action(async (options) => {
if (!options.quiet) {
console.log(chalk.blue('Initializing Claude Hooks...'));
}
const initScript = runPythonScript('install.py');
initScript.on('close', (code) => {
if (code === 0) {
console.log(chalk.green('\n🎉 Claude Hooks initialized successfully!'));
try {
// Check for existing installation
if (!options.force && await checkStatus()) {
if (!options.quiet) {
console.log(chalk.yellow('⚠️ Claude Hooks already configured. Use --force to reinstall.'));
}
return;
}
// Create runtime directories
const runtimeDir = path.join(packageRoot, '.claude_hooks');
await fs.ensureDir(path.join(runtimeDir, 'backups'));
await fs.ensureDir(path.join(runtimeDir, 'logs'));
await fs.ensureDir(path.join(runtimeDir, 'patterns'));
// Create hooks configuration
const configFile = await createHooksConfig();
if (!options.quiet) {
console.log(chalk.green('✅ Claude Hooks initialized successfully!'));
console.log(chalk.blue('Configuration file:'), configFile);
console.log(chalk.blue('Next steps:'));
console.log('1. Restart Claude Code to activate hooks');
console.log('2. Try: claude-hooks test');
} else {
console.log(chalk.red('❌ Initialization failed'));
process.exit(1);
}
});
} catch (error) {
console.error(chalk.red('❌ Initialization failed:'), error.message);
process.exit(1);
}
});
program
.command('status')
.description('Show Claude Hooks installation status')
.action(() => {
checkStatus();
.action(async () => {
await checkStatus();
});
program
.command('test')
.description('Run Claude Hooks tests')
.action(() => {
.action(async () => {
console.log(chalk.blue('Running Claude Hooks tests...'));
const testScript = runPythonScript('test.py');
testScript.on('close', (code) => {
if (code === 0) {
console.log(chalk.green('✅ All tests passed!'));
} else {
console.log(chalk.red('❌ Some tests failed'));
process.exit(1);
let passed = 0;
let total = 0;
// Test 1: Check configuration
total++;
console.log('Testing configuration...');
if (await checkStatus()) {
console.log(chalk.green('✓ Configuration exists'));
passed++;
} else {
console.log(chalk.red('✗ Configuration missing'));
}
// Test 2: Test hook scripts
const hookTests = [
{
name: 'context-monitor.js',
input: { prompt: 'test prompt', context_size: 1000 }
},
{
name: 'command-validator.js',
input: { tool: 'Bash', parameters: { command: 'pip install requests' } }
},
{
name: 'session-logger.js',
input: { tool: 'Bash', parameters: { command: 'echo test' }, success: true }
},
{
name: 'session-finalizer.js',
input: {}
}
});
];
for (const test of hookTests) {
total++;
console.log(`Testing ${test.name}...`);
const result = await testHookScript(test.name, test.input);
if (result.success) {
console.log(chalk.green(`${test.name} working`));
passed++;
} else {
console.log(chalk.red(`${test.name} failed: ${result.error || result.stderr}`));
}
}
// Summary
console.log();
if (passed === total) {
console.log(chalk.green(`✅ All ${total} tests passed!`));
console.log(chalk.green('Claude Hooks is ready to use!'));
} else {
console.log(chalk.yellow(`⚠️ ${passed}/${total} tests passed`));
console.log(chalk.blue('Try: claude-hooks init --force'));
process.exit(1);
}
});
program
.command('uninstall')
.description('Remove Claude Hooks configuration')
.action(() => {
console.log(chalk.blue('Uninstalling Claude Hooks...'));
const uninstallScript = runPythonScript('uninstall.py');
uninstallScript.on('close', (code) => {
if (code === 0) {
console.log(chalk.green('✅ Claude Hooks uninstalled successfully'));
console.log(chalk.yellow('Note: To completely remove, run: npm uninstall -g claude-hooks'));
.option('--quiet', 'Minimal output')
.action(async (options) => {
if (!options.quiet) {
console.log(chalk.blue('Uninstalling Claude Hooks...'));
}
try {
const removed = await removeHooksConfig();
if (removed) {
if (!options.quiet) {
console.log(chalk.green('✅ Claude Hooks configuration removed'));
console.log(chalk.yellow('Note: To completely remove the package: npm uninstall -g claude-hooks'));
}
} else {
console.log(chalk.red('❌ Uninstall failed'));
process.exit(1);
if (!options.quiet) {
console.log(chalk.yellow('⚠️ No Claude Hooks configuration found'));
}
}
});
} catch (error) {
console.error(chalk.red('❌ Uninstall failed:'), error.message);
process.exit(1);
}
});
program
.command('backup')
.description('Manually trigger a backup')
.action(() => {
.action(async () => {
console.log(chalk.blue('Creating manual backup...'));
const backupScript = runPythonScript('manual-backup.py');
backupScript.on('close', (code) => {
if (code === 0) {
console.log(chalk.green('✅ Backup created successfully'));
try {
// Import backup manager
const { BackupManager } = require(path.join(packageRoot, 'lib', 'backup-manager'));
const backupManager = new BackupManager();
const backupId = await backupManager.createBackup(process.cwd(), {
trigger: 'manual',
timestamp: new Date().toISOString()
});
if (backupId) {
console.log(chalk.green(`✅ Backup created: ${backupId}`));
} else {
console.log(chalk.red('❌ Backup failed'));
process.exit(1);
}
});
} catch (error) {
console.error(chalk.red('❌ Backup failed:'), error.message);
process.exit(1);
}
});
program.parse();

View File

@ -1,12 +1,12 @@
{
"hooks": {
"UserPromptSubmit": "python3 {{INSTALL_PATH}}/hooks/context_monitor.py",
"UserPromptSubmit": "node {{INSTALL_PATH}}/hooks/context-monitor.js",
"PreToolUse": {
"Bash": "python3 {{INSTALL_PATH}}/hooks/command_validator.py"
"Bash": "node {{INSTALL_PATH}}/hooks/command-validator.js"
},
"PostToolUse": {
"*": "python3 {{INSTALL_PATH}}/hooks/session_logger.py"
"*": "node {{INSTALL_PATH}}/hooks/session-logger.js"
},
"Stop": "python3 {{INSTALL_PATH}}/hooks/session_finalizer.py"
"Stop": "node {{INSTALL_PATH}}/hooks/session-finalizer.js"
}
}

View File

@ -23,17 +23,17 @@ If either check shows existing hooks, **STOP** and run the uninstall process fir
- `claude-hooks uninstall` (if available)
- Or manually remove: `rm -rf ~/.config/claude/hooks.json`
**Installation (choose one method):**
**Installation:**
**Method 1 - NPM (Recommended):**
**NPM (Recommended):**
1. `npm install -g claude-hooks`
2. `claude-hooks init`
3. `claude-hooks test`
2. Hooks auto-configure during install
3. `claude-hooks test` to verify
**Method 2 - Git (For developers):**
**Git (For developers):**
1. `git clone https://git.supported.systems/rsp2k/claude-hooks.git`
2. `cd claude-hooks && ./scripts/install.sh`
3. `./scripts/test.sh`
2. `cd claude-hooks && npm install && npm run postinstall`
3. `claude-hooks test`
**After installation:**
- Restart Claude Code to activate the hooks
@ -42,6 +42,6 @@ If either check shows existing hooks, **STOP** and run the uninstall process fir
**To uninstall later:** `claude-hooks uninstall` or `npm uninstall -g claude-hooks`
Please guide me through this installation step-by-step, including the pre-installation checks. Use the NPM method unless I specifically request git installation. Handle any errors that come up and make sure everything works. After installation, help me test it by trying some commands that commonly fail on different systems (like `pip` vs `pip3`, `python` vs `python3`) so I can see the intelligence in action.
Please guide me through this installation step-by-step, including the pre-installation checks. Use the NPM method unless I specifically request git installation. Handle any errors that come up and make sure everything works. The installation now auto-configures during npm install - no manual steps needed! After installation, help me test it by trying some commands that commonly fail on different systems (like `pip` vs `pip3`, `python` vs `python3`) so I can see the intelligence in action.
The goal is to have a working Claude Hooks system that starts learning from my environment immediately.

View File

@ -150,27 +150,30 @@ This feedback loop is what transforms Claude from a stateless assistant into an
The shadow learner implements a classic observer pattern, but with sophisticated intelligence:
```python
class ShadowLearner:
def observe(self, execution: ToolExecution):
# Extract patterns from execution
patterns = self.extract_patterns(execution)
```javascript
class ShadowLearner {
observe(execution) {
// Extract patterns from execution
const patterns = this.extractPatterns(execution);
# Update confidence scores
self.update_confidence(patterns)
// Update confidence scores
this.updateConfidence(patterns);
# Store new knowledge
self.knowledge_base.update(patterns)
// Store new knowledge
this.knowledgeBase.update(patterns);
}
def predict(self, proposed_action):
# Match against known patterns
similar_patterns = self.find_similar(proposed_action)
predict(proposedAction) {
// Match against known patterns
const similarPatterns = this.findSimilar(proposedAction);
# Calculate confidence
confidence = self.calculate_confidence(similar_patterns)
// Calculate confidence
const confidence = this.calculateConfidence(similarPatterns);
# Return prediction
return Prediction(confidence, similar_patterns)
// Return prediction
return new Prediction(confidence, similarPatterns);
}
}
```
The key insight is that the learner doesn't just record what happened - it actively builds predictive models that can guide future decisions.
@ -179,26 +182,29 @@ The key insight is that the learner doesn't just record what happened - it activ
The context monitor implements a resource management pattern, treating Claude's context as a finite resource that must be carefully managed:
```python
class ContextMonitor:
def estimate_usage(self):
# Multiple estimation strategies
estimates = [
self.token_based_estimate(),
self.activity_based_estimate(),
self.time_based_estimate()
]
```javascript
class ContextMonitor {
estimateUsage() {
// Multiple estimation strategies
const estimates = [
this.tokenBasedEstimate(),
this.activityBasedEstimate(),
this.timeBasedEstimate()
];
# Weighted combination
return self.combine_estimates(estimates)
// Weighted combination
return this.combineEstimates(estimates);
}
def should_backup(self):
usage = self.estimate_usage()
shouldBackup() {
const usage = this.estimateUsage();
# Adaptive thresholds based on session complexity
threshold = self.calculate_threshold()
// Adaptive thresholds based on session complexity
const threshold = this.calculateThreshold();
return usage > threshold
return usage > threshold;
}
}
```
This architectural approach means the system can make intelligent decisions about when to intervene, rather than using simple rule-based triggers.
@ -207,25 +213,31 @@ This architectural approach means the system can make intelligent decisions abou
The backup manager implements a strategy pattern, using different backup approaches based on circumstances:
```python
class BackupManager:
def __init__(self):
self.strategies = [
GitBackupStrategy(),
FilesystemBackupStrategy(),
EmergencyBackupStrategy()
]
```javascript
class BackupManager {
constructor() {
this.strategies = [
new GitBackupStrategy(),
new FilesystemBackupStrategy(),
new EmergencyBackupStrategy()
];
}
def execute_backup(self, context):
for strategy in self.strategies:
try:
result = strategy.backup(context)
if result.success:
return result
except Exception:
continue # Try next strategy
async executeBackup(context) {
for (const strategy of this.strategies) {
try {
const result = await strategy.backup(context);
if (result.success) {
return result;
}
} catch (error) {
continue; // Try next strategy
}
}
return self.emergency_backup(context)
return this.emergencyBackup(context);
}
}
```
This ensures that backups almost always succeed, gracefully degrading to simpler approaches when sophisticated methods fail.

View File

@ -55,14 +55,14 @@ Traditional ML models are trained once and deployed. Shadow learners improve inc
The shadow learner identifies several types of patterns:
**Command Patterns**: Which commands tend to succeed or fail in your environment
- `pip install` fails 90% of the time → suggest `pip3 install`
- `python script.py` fails on your system → suggest `python3 script.py`
- `npm install` without `--save` in certain projects → warn about dependency tracking
- `npm install` fails 90% of the time → suggest `npm ci` or check package-lock.json
- `node script.js` fails on your system → suggest `node --version` check or use `npx`
- `npm install` without `--save-dev` for dev dependencies → warn about production vs development packages
**Sequence Patterns**: Common workflows and command chains
- `git add . && git commit` often follows file edits
- `npm install` typically precedes `npm test`
- Reading config files often precedes configuration changes
- Reading package.json often precedes dependency updates
**Context Patterns**: Environmental factors that affect command success
- Commands fail differently in Docker containers vs. native environments
@ -70,9 +70,9 @@ The shadow learner identifies several types of patterns:
- Time-of-day patterns (builds failing during peak hours due to resource contention)
**Error Patterns**: Common failure modes and their solutions
- "Permission denied" errors often require sudo or chmod
- "Command not found" errors have specific alternative commands
- Network timeouts suggest retry strategies
- "Permission denied" errors often require sudo or npm config set prefix
- "Command not found" errors suggest missing global packages or PATH issues
- Network timeouts suggest retry strategies or alternative registries
### Confidence Building
@ -100,8 +100,8 @@ The shadow learner doesn't just record patterns - it builds confidence scores ba
The shadow learner develops deep knowledge about your specific development environment:
- Which Python version is actually available
- How package managers are configured
- Which Node.js version is actually available
- How npm/yarn/pnpm package managers are configured
- What development tools are installed and working
- How permissions are set up
- What network restrictions exist

View File

@ -14,7 +14,7 @@ This creates a jarring experience: you're deep in a debugging session, making pr
### The Repetitive Failure Problem
Human developers naturally learn from mistakes. Try a command that fails, remember not to do it again, adapt. But each new Claude session starts with no memory of previous failures. You find yourself watching Claude repeat the same mistakes - `pip` instead of `pip3`, `python` instead of `python3`, dangerous operations that you know will fail.
Human developers naturally learn from mistakes. Try a command that fails, remember not to do it again, adapt. But each new Claude session starts with no memory of previous failures. You find yourself watching Claude repeat the same mistakes - `npm install` instead of `npm ci`, `node` without proper version checks, dangerous operations that you know will fail.
This isn't Claude's fault - it's a fundamental limitation of the stateless conversation model. But it creates frustration and inefficiency.
@ -62,7 +62,7 @@ You might wonder: why not just train Claude to be better at avoiding these probl
The answer lies in the fundamental difference between general intelligence and environmental adaptation:
**General intelligence** (what Claude provides) is knowledge that applies across all contexts - how to write Python, how to use git, how to debug problems.
**General intelligence** (what Claude provides) is knowledge that applies across all contexts - how to write JavaScript, how to use git, how to debug problems.
**Environmental adaptation** (what shadow learning provides) is knowledge specific to your setup - which commands work on your system, what your typical workflows are, what mistakes you commonly make.

View File

@ -8,31 +8,31 @@ If you have commands that should never be run in your environment:
1. **Edit the command validator**:
```bash
nano hooks/command_validator.py
nano hooks/command-validator.js
```
2. **Find the dangerous_patterns list** (around line 23):
```python
self.dangerous_patterns = [
r'rm\s+-rf\s+/', # Delete root
r'mkfs\.', # Format filesystem
# Add your pattern here
]
2. **Find the dangerousPatterns array** (around line 23):
```javascript
this.dangerousPatterns = [
/rm\s+-rf\s+\//, // Delete root
/mkfs\./, // Format filesystem
// Add your pattern here
];
```
3. **Add your pattern**:
```python
self.dangerous_patterns = [
r'rm\s+-rf\s+/', # Delete root
r'mkfs\.', # Format filesystem
r'docker\s+system\s+prune\s+--all', # Delete all Docker data
r'kubectl\s+delete\s+namespace\s+production', # Delete prod namespace
]
```javascript
this.dangerousPatterns = [
/rm\s+-rf\s+\//, // Delete root
/mkfs\./, // Format filesystem
/docker\s+system\s+prune\s+--all/, // Delete all Docker data
/kubectl\s+delete\s+namespace\s+production/, // Delete prod namespace
];
```
4. **Test your pattern**:
```bash
echo '{"tool": "Bash", "parameters": {"command": "docker system prune --all"}}' | python3 hooks/command_validator.py
echo '{"tool": "Bash", "parameters": {"command": "docker system prune --all"}}' | node hooks/command-validator.js
```
Should return: `{"allow": false, "message": "⛔ Command blocked: Dangerous command pattern detected"}`
@ -41,23 +41,23 @@ If you have commands that should never be run in your environment:
For commands that are risky but sometimes legitimate:
1. **Find the suspicious_patterns list**:
```python
self.suspicious_patterns = [
r'sudo\s+rm', # Sudo with rm
r'chmod\s+777', # Overly permissive
# Add your pattern here
]
1. **Find the suspiciousPatterns array**:
```javascript
this.suspiciousPatterns = [
/sudo\s+rm/, // Sudo with rm
/chmod\s+777/, // Overly permissive
// Add your pattern here
];
```
2. **Add patterns that should warn but not block**:
```python
self.suspicious_patterns = [
r'sudo\s+rm', # Sudo with rm
r'chmod\s+777', # Overly permissive
r'npm\s+install\s+.*--global', # Global npm installs
r'pip\s+install.*--user', # User pip installs
]
```javascript
this.suspiciousPatterns = [
/sudo\s+rm/, // Sudo with rm
/chmod\s+777/, // Overly permissive
/npm\s+install\s+.*--global/, // Global npm installs
/pip\s+install.*--user/, // User pip installs
];
```
## Customize for Your Tech Stack
@ -65,66 +65,66 @@ For commands that are risky but sometimes legitimate:
### For Docker Environments
Add Docker-specific protections:
```python
# In dangerous_patterns:
r'docker\s+rm\s+.*-f.*', # Force remove containers
r'docker\s+rmi\s+.*-f.*', # Force remove images
```javascript
// In dangerousPatterns:
/docker\s+rm\s+.*-f.*/, // Force remove containers
/docker\s+rmi\s+.*-f.*/, // Force remove images
# In suspicious_patterns:
r'docker\s+run.*--privileged', # Privileged containers
r'docker.*-v\s+/:/.*', # Mount root filesystem
// In suspiciousPatterns:
/docker\s+run.*--privileged/, // Privileged containers
/docker.*-v\s+\/:\/.*/, // Mount root filesystem
```
### For Kubernetes
Protect production namespaces:
```python
# In dangerous_patterns:
r'kubectl\s+delete\s+.*production.*',
r'kubectl\s+delete\s+.*prod.*',
r'helm\s+delete\s+.*production.*',
```javascript
// In dangerousPatterns:
/kubectl\s+delete\s+.*production.*/,
/kubectl\s+delete\s+.*prod.*/,
/helm\s+delete\s+.*production.*/,
# In suspicious_patterns:
r'kubectl\s+apply.*production.*',
r'kubectl.*--all-namespaces.*delete',
// In suspiciousPatterns:
/kubectl\s+apply.*production.*/,
/kubectl.*--all-namespaces.*delete/,
```
### For Database Operations
Prevent destructive database commands:
```python
# In dangerous_patterns:
r'DROP\s+DATABASE.*',
r'TRUNCATE\s+TABLE.*',
r'DELETE\s+FROM.*WHERE\s+1=1',
```javascript
// In dangerousPatterns:
/DROP\s+DATABASE.*/i,
/TRUNCATE\s+TABLE.*/i,
/DELETE\s+FROM.*WHERE\s+1=1/i,
# In suspicious_patterns:
r'UPDATE.*SET.*WHERE\s+1=1',
r'ALTER\s+TABLE.*DROP.*',
// In suspiciousPatterns:
/UPDATE.*SET.*WHERE\s+1=1/i,
/ALTER\s+TABLE.*DROP.*/i,
```
## Environment-Specific Patterns
### For Production Servers
```python
# In dangerous_patterns:
r'systemctl\s+stop\s+(nginx|apache|mysql)',
r'service\s+(nginx|apache|mysql)\s+stop',
r'killall\s+-9.*',
```javascript
// In dangerousPatterns:
/systemctl\s+stop\s+(nginx|apache|mysql)/,
/service\s+(nginx|apache|mysql)\s+stop/,
/killall\s+-9.*/,
# In suspicious_patterns:
r'sudo\s+systemctl\s+restart.*',
r'sudo\s+service.*restart.*',
// In suspiciousPatterns:
/sudo\s+systemctl\s+restart.*/,
/sudo\s+service.*restart.*/,
```
### For Development Machines
```python
# In suspicious_patterns:
r'rm\s+-rf\s+node_modules', # Can break local dev
r'git\s+reset\s+--hard\s+HEAD~[0-9]+', # Lose multiple commits
r'git\s+push\s+.*--force.*', # Force push
```javascript
// In suspiciousPatterns:
/rm\s+-rf\s+node_modules/, // Can break local dev
/git\s+reset\s+--hard\s+HEAD~[0-9]+/, // Lose multiple commits
/git\s+push\s+.*--force.*/, // Force push
```
## Test Your Custom Patterns
@ -137,15 +137,15 @@ cat > test_patterns.sh << 'EOF'
# Test dangerous pattern (should block)
echo "Testing dangerous pattern..."
echo '{"tool": "Bash", "parameters": {"command": "docker system prune --all"}}' | python3 hooks/command_validator.py
echo '{"tool": "Bash", "parameters": {"command": "docker system prune --all"}}' | node hooks/command-validator.js
# Test suspicious pattern (should warn)
echo "Testing suspicious pattern..."
echo '{"tool": "Bash", "parameters": {"command": "npm install -g dangerous-package"}}' | python3 hooks/command_validator.py
echo '{"tool": "Bash", "parameters": {"command": "npm install -g dangerous-package"}}' | node hooks/command-validator.js
# Test normal command (should pass)
echo "Testing normal command..."
echo '{"tool": "Bash", "parameters": {"command": "ls -la"}}' | python3 hooks/command_validator.py
echo '{"tool": "Bash", "parameters": {"command": "ls -la"}}' | node hooks/command-validator.js
EOF
chmod +x test_patterns.sh
@ -157,29 +157,34 @@ chmod +x test_patterns.sh
For patterns that depend on file context:
1. **Edit the validation function** to check current directory or files:
```python
def validate_command_safety(self, command: str) -> ValidationResult:
# Your existing patterns...
```javascript
validateCommandSafety(command) {
// Your existing patterns...
# Context-aware validation
if "git push" in command.lower():
# Check if we're in a production branch
try:
current_branch = subprocess.check_output(['git', 'branch', '--show-current'],
text=True).strip()
if current_branch in ['main', 'master', 'production']:
return ValidationResult(
allowed=True,
reason="⚠️ Pushing to protected branch",
severity="warning"
)
except:
pass
// Context-aware validation
if (command.toLowerCase().includes("git push")) {
// Check if we're in a production branch
try {
const { execSync } = require('child_process');
const currentBranch = execSync('git branch --show-current',
{ encoding: 'utf8' }).trim();
if (['main', 'master', 'production'].includes(currentBranch)) {
return {
allowed: true,
reason: "⚠️ Pushing to protected branch",
severity: "warning"
};
}
} catch {
// Ignore errors
}
}
}
```
## Pattern Syntax Reference
Use Python regex patterns:
Use JavaScript regex patterns:
- `\s+` - One or more whitespace characters
- `.*` - Any characters (greedy)
@ -188,11 +193,12 @@ Use Python regex patterns:
- `(option1|option2)` - Either option1 or option2
- `^` - Start of string
- `$` - End of string
- `i` flag - Case insensitive matching
**Examples**:
- `r'rm\s+-rf\s+/'` - Matches "rm -rf /"
- `r'git\s+push.*--force'` - Matches "git push" followed by "--force" anywhere
- `r'^sudo\s+'` - Matches commands starting with "sudo"
- `/rm\s+-rf\s+\//` - Matches "rm -rf /"
- `/git\s+push.*--force/` - Matches "git push" followed by "--force" anywhere
- `/^sudo\s+/` - Matches commands starting with "sudo"
## Reload Changes

View File

@ -103,10 +103,11 @@ Export all hook data to a directory.
## Hook Scripts
### context_monitor.py
### context-monitor.js
**Type**: UserPromptSubmit hook
**Purpose**: Monitor context usage and trigger backups
**Purpose**: Monitor context usage and trigger backups
**Runtime**: Node.js
**Input format**:
```json
@ -128,10 +129,11 @@ Export all hook data to a directory.
---
### command_validator.py
### command-validator.js
**Type**: PreToolUse[Bash] hook
**Purpose**: Validate bash commands for safety and success probability
**Purpose**: Validate bash commands for safety and success probability
**Runtime**: Node.js
**Input format**:
```json
@ -163,10 +165,11 @@ Export all hook data to a directory.
---
### session_logger.py
### session-logger.js
**Type**: PostToolUse[*] hook
**Purpose**: Log tool executions and update learning data
**Purpose**: Log tool executions and update learning data
**Runtime**: Node.js
**Input format**:
```json
@ -192,10 +195,11 @@ Export all hook data to a directory.
---
### session_finalizer.py
### session-finalizer.js
**Type**: Stop hook
**Purpose**: Create session documentation and save state
**Purpose**: Create session documentation and save state
**Runtime**: Node.js
**Input format**:
```json

View File

@ -263,52 +263,62 @@ Hooks are designed to fail safely:
Always validate hook input:
```python
def validate_input(input_data):
if not isinstance(input_data, dict):
raise ValueError("Input must be JSON object")
```javascript
function validateInput(inputData) {
if (typeof inputData !== 'object' || inputData === null || Array.isArray(inputData)) {
throw new Error('Input must be JSON object');
}
tool = input_data.get("tool", "")
if not isinstance(tool, str):
raise ValueError("Tool must be string")
const tool = inputData.tool || '';
if (typeof tool !== 'string') {
throw new Error('Tool must be string');
}
# Validate other fields...
// Validate other fields...
}
```
### Output Sanitization
Ensure hook output is safe:
```python
def safe_message(text):
# Remove potential injection characters
return text.replace('\x00', '').replace('\r', '').replace('\n', '\\n')
response = {
"allow": True,
"message": safe_message(user_input)
```javascript
function safeMessage(text) {
// Remove potential injection characters
return text.replace(/\x00/g, '').replace(/\r/g, '').replace(/\n/g, '\\n');
}
const response = {
allow: true,
message: safeMessage(userInput)
};
```
### File Path Validation
For hooks that access files:
```python
def validate_file_path(path):
# Convert to absolute path
abs_path = os.path.abspath(path)
```javascript
const path = require('path');
function validateFilePath(filePath) {
// Convert to absolute path
const absPath = path.resolve(filePath);
# Check if within project boundaries
project_root = os.path.abspath(".")
if not abs_path.startswith(project_root):
raise ValueError("Path outside project directory")
// Check if within project boundaries
const projectRoot = path.resolve('.');
if (!absPath.startsWith(projectRoot)) {
throw new Error('Path outside project directory');
}
# Check for system files
system_paths = ['/etc', '/usr', '/var', '/sys', '/proc']
for sys_path in system_paths:
if abs_path.startswith(sys_path):
raise ValueError("System file access denied")
// Check for system files
const systemPaths = ['/etc', '/usr', '/var', '/sys', '/proc'];
for (const sysPath of systemPaths) {
if (absPath.startsWith(sysPath)) {
throw new Error('System file access denied');
}
}
}
```
---
@ -319,27 +329,32 @@ def validate_file_path(path):
Test hooks with sample inputs:
```python
def test_command_validator():
import subprocess
import json
```javascript
const { spawn } = require('child_process');
function testCommandValidator() {
// Test dangerous command
const inputData = {
tool: 'Bash',
parameters: { command: 'rm -rf /' }
};
# Test dangerous command
input_data = {
"tool": "Bash",
"parameters": {"command": "rm -rf /"}
}
const process = spawn('node', ['hooks/command-validator.js'], {
stdio: 'pipe'
});
process = subprocess.run(
["python3", "hooks/command_validator.py"],
input=json.dumps(input_data),
capture_output=True,
text=True
)
process.stdin.write(JSON.stringify(inputData));
process.stdin.end();
assert process.returncode == 1 # Should block
response = json.loads(process.stdout)
assert response["allow"] == False
process.on('exit', (code) => {
console.assert(code === 1, 'Should block'); // Should block
});
process.stdout.on('data', (data) => {
const response = JSON.parse(data.toString());
console.assert(response.allow === false);
});
}
```
### Integration Testing
@ -348,7 +363,7 @@ Test with Claude Code directly:
```bash
# Test in development environment
echo '{"tool": "Bash", "parameters": {"command": "ls"}}' | python3 hooks/command_validator.py
echo '{"tool": "Bash", "parameters": {"command": "ls"}}' | node hooks/command-validator.js
# Test hook registration
claude-hooks status
@ -358,25 +373,33 @@ claude-hooks status
Measure hook execution time:
```python
import time
import subprocess
import json
```javascript
const { spawn } = require('child_process');
def benchmark_hook(hook_script, input_data, iterations=100):
times = []
function benchmarkHook(hookScript, inputData, iterations = 100) {
const times = [];
let completed = 0;
for _ in range(iterations):
start = time.time()
subprocess.run(
["python3", hook_script],
input=json.dumps(input_data),
capture_output=True
)
times.append(time.time() - start)
avg_time = sum(times) / len(times)
max_time = max(times)
print(f"Average: {avg_time*1000:.1f}ms, Max: {max_time*1000:.1f}ms")
for (let i = 0; i < iterations; i++) {
const start = Date.now();
const process = spawn('node', [hookScript], {
stdio: 'pipe'
});
process.stdin.write(JSON.stringify(inputData));
process.stdin.end();
process.on('exit', () => {
times.push(Date.now() - start);
completed++;
if (completed === iterations) {
const avgTime = times.reduce((a, b) => a + b, 0) / times.length;
const maxTime = Math.max(...times);
console.log(`Average: ${avgTime.toFixed(1)}ms, Max: ${maxTime.toFixed(1)}ms`);
}
});
}
}
```

View File

@ -30,12 +30,12 @@ You should see output like this:
```
Claude Code Hooks Installation
==================================
Checking Python version... Python 3.11 found
Python version is compatible
Installing Python dependencies... SUCCESS
Checking Node.js version... Node.js v18.17.0 found
Node.js version is compatible
Installing dependencies... SUCCESS
```
**Notice** that the installer found your Python version and installed dependencies automatically.
**Notice** that the installer found your Node.js version and installed dependencies automatically.
The installer will ask if you want to automatically configure Claude Code. Say **yes** - we want to see this working right away:
@ -59,26 +59,24 @@ Let's deliberately try a command that often fails to see the validation in actio
Start a new Claude conversation and try this:
> "Run `npm install express` to add the express library"
Watch what happens. You should see the command execute normally. Now try a command that commonly fails:
> "Run `pip install requests` to add the requests library"
Watch what happens. You should see something like:
You should see something like:
```
⚠️ Warning: pip commands often fail (confidence: 88%)
💡 Suggestion: Use "pip3 install requests"
💡 Suggestion: Use "pip3 install requests" or "npm install requests"
```
**Notice** that Claude Hooks warned you about the command before it ran. The system doesn't have any learned patterns yet (it's brand new), but it has built-in knowledge about common failures.
Now try the suggested command:
> "Run `pip3 install requests`"
This time it should work without warnings. The system is learning that `pip3` succeeds where `pip` fails on your system.
**Notice** that Claude Hooks warned you about the command before it ran. The system has built-in knowledge about common command failures and suggests working alternatives.
## Step 4: Experience the Shadow Learner
Let's make another common mistake and watch the system learn from it.
Let's see the system learn from a failure pattern.
Try this command:
@ -92,7 +90,7 @@ If you're on a system where `python` isn't available, you'll see it fail. Now tr
```
⛔ Blocked: python commands often fail (confidence: 95%)
💡 Suggestion: Use "python3 --version"
💡 Suggestion: Use "python3 --version" or "node --version"
```
The shadow learner observed that `python` failed and is now protecting you from repeating the same mistake. This is intelligence building in real-time.
@ -103,11 +101,11 @@ Let's trigger the context monitoring system. The hooks track how much of Claude'
Create several files to simulate a longer session:
> "Create a file called `test1.py` with a simple hello world script"
> "Create a file called `test1.js` with a simple hello world script"
> "Now create `test2.py` with a different example"
> "Now create `test2.js` with a different example"
> "Create `test3.py` with some more code"
> "Create `test3.js` with some more code"
> "Show me the current git status"
@ -142,9 +140,9 @@ You should see a file that looks like:
**Duration**: 2024-01-15T14:30:00 → 2024-01-15T14:45:00
## Files Modified (3)
- test1.py
- test2.py
- test3.py
- test1.js
- test2.js
- test3.js
## Tools Used (8 total)
- Write: 3 times
@ -152,8 +150,8 @@ You should see a file that looks like:
- Read: 3 times
## Recent Commands (5)
- `pip3 install requests` (2024-01-15T14:32:00)
- `python3 --version` (2024-01-15T14:35:00)
- `npm install express` (2024-01-15T14:32:00)
- `node --version` (2024-01-15T14:35:00)
...
```
@ -190,7 +188,7 @@ You should see entries like:
Success Rate: 100%
```
**This is the intelligence you've built**. The system now knows that on your machine, `pip` fails but `pip3` works, and `python3` works better than `python`.
**This is the intelligence you've built**. The system now knows command patterns that work reliably in your environment versus ones that commonly fail.
## What You've Experienced

222
hooks/command-validator.js Executable file
View File

@ -0,0 +1,222 @@
#!/usr/bin/env node
/**
* Command Validator Hook - PreToolUse[Bash] hook
* Validates bash commands using shadow learner insights
*/
const fs = require('fs-extra');
const path = require('path');
// Add lib directory to require path
const libPath = path.join(__dirname, '..', 'lib');
const { ShadowLearner } = require(path.join(libPath, 'shadow-learner'));
class CommandValidator {
constructor() {
this.shadowLearner = new ShadowLearner();
// Dangerous command patterns
this.dangerousPatterns = [
/rm\s+-rf\s+\//, // Delete root
/mkfs\./, // Format filesystem
/dd\s+if=.*of=\/dev\//, // Overwrite devices
/:\(\){ :\|:& };:/, // Fork bomb
/curl.*\|\s*bash/, // Pipe to shell
/wget.*\|\s*sh/, // Pipe to shell
/;.*rm\s+-rf/, // Command chaining with rm
/&&.*rm\s+-rf/, // Command chaining with rm
];
this.suspiciousPatterns = [
/sudo\s+rm/, // Sudo with rm
/chmod\s+777/, // Overly permissive
/\/etc\/passwd/, // System files
/\/etc\/shadow/, // System files
/nc.*-l.*-p/, // Netcat listener
];
}
/**
* Comprehensive command safety validation
*/
validateCommandSafety(command) {
// Normalize command for analysis
const normalized = command.toLowerCase().trim();
// Check for dangerous patterns
for (const pattern of this.dangerousPatterns) {
if (pattern.test(normalized)) {
return {
allowed: false,
reason: 'Dangerous command pattern detected',
severity: 'critical'
};
}
}
// Check for suspicious patterns
for (const pattern of this.suspiciousPatterns) {
if (pattern.test(normalized)) {
return {
allowed: true, // Allow but warn
reason: 'Suspicious command pattern detected',
severity: 'warning'
};
}
}
return { allowed: true, reason: 'Command appears safe' };
}
/**
* Use shadow learner to predict command success
*/
validateWithShadowLearner(command) {
try {
const prediction = this.shadowLearner.predictCommandOutcome(command);
if (!prediction.likelySuccess && prediction.confidence > 0.8) {
const suggestions = prediction.suggestions || [];
const suggestionText = suggestions.length > 0 ? ` Try: ${suggestions[0]}` : '';
return {
allowed: false,
reason: `Command likely to fail (confidence: ${Math.round(prediction.confidence * 100)}%)${suggestionText}`,
severity: 'medium',
suggestions
};
} else if (prediction.warnings && prediction.warnings.length > 0) {
return {
allowed: true,
reason: prediction.warnings[0],
severity: 'warning',
suggestions: prediction.suggestions || []
};
}
} catch (error) {
// If shadow learner fails, don't block
}
return { allowed: true, reason: 'No issues detected' };
}
/**
* Main validation entry point
*/
validateCommand(command) {
// Safety validation (blocking)
const safetyResult = this.validateCommandSafety(command);
if (!safetyResult.allowed) {
return safetyResult;
}
// Shadow learner validation (predictive)
const predictionResult = this.validateWithShadowLearner(command);
// Return most significant result
if (['high', 'critical'].includes(predictionResult.severity)) {
return predictionResult;
} else if (['warning', 'medium'].includes(safetyResult.severity)) {
return safetyResult;
} else {
return predictionResult;
}
}
}
async function main() {
try {
let inputData = '';
// Handle stdin input
if (process.stdin.isTTY) {
// If called directly for testing
inputData = JSON.stringify({
tool: 'Bash',
parameters: { command: 'pip install requests' }
});
} else {
// Read from stdin
process.stdin.setEncoding('utf8');
for await (const chunk of process.stdin) {
inputData += chunk;
}
}
const input = JSON.parse(inputData);
// Extract command from parameters
const tool = input.tool || '';
const parameters = input.parameters || {};
const command = parameters.command || '';
if (tool !== 'Bash' || !command) {
// Not a bash command, allow it
const response = { allow: true, message: 'Not a bash command' };
console.log(JSON.stringify(response));
process.exit(0);
}
// Validate command
const validator = new CommandValidator();
const result = validator.validateCommand(command);
if (!result.allowed) {
// Block the command
const response = {
allow: false,
message: `⛔ Command blocked: ${result.reason}`
};
console.log(JSON.stringify(response));
process.exit(1); // Exit code 1 = block operation
}
else if (['warning', 'medium'].includes(result.severity)) {
// Allow with warning
const warningEmoji = result.severity === 'warning' ? '⚠️' : '🚨';
let message = `${warningEmoji} ${result.reason}`;
if (result.suggestions && result.suggestions.length > 0) {
message += `\n💡 Suggestion: ${result.suggestions[0]}`;
}
const response = {
allow: true,
message
};
console.log(JSON.stringify(response));
process.exit(0);
}
else {
// Allow without warning
const response = { allow: true, message: 'Command validated' };
console.log(JSON.stringify(response));
process.exit(0);
}
} catch (error) {
// Never block on validation errors - always allow operation
const response = {
allow: true,
message: `Validation error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
}
}
// Handle unhandled promise rejections
process.on('unhandledRejection', (error) => {
const response = {
allow: true,
message: `Validation error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
});
main();

View File

@ -1,184 +0,0 @@
#!/usr/bin/env python3
"""
Command Validator Hook - PreToolUse[Bash] hook
Validates bash commands using shadow learner insights
"""
import sys
import json
import re
import os
from pathlib import Path
# Add lib directory to path
sys.path.insert(0, str(Path(__file__).parent.parent / "lib"))
from shadow_learner import ShadowLearner
from models import ValidationResult
class CommandValidator:
"""Validates bash commands for safety and success probability"""
def __init__(self):
self.shadow_learner = ShadowLearner()
# Dangerous command patterns
self.dangerous_patterns = [
r'rm\s+-rf\s+/', # Delete root
r'mkfs\.', # Format filesystem
r'dd\s+if=.*of=/dev/', # Overwrite devices
r':(){ :|:& };:', # Fork bomb
r'curl.*\|\s*bash', # Pipe to shell
r'wget.*\|\s*sh', # Pipe to shell
r';.*rm\s+-rf', # Command chaining with rm
r'&&.*rm\s+-rf', # Command chaining with rm
]
self.suspicious_patterns = [
r'sudo\s+rm', # Sudo with rm
r'chmod\s+777', # Overly permissive
r'/etc/passwd', # System files
r'/etc/shadow', # System files
r'nc.*-l.*-p', # Netcat listener
]
def validate_command_safety(self, command: str) -> ValidationResult:
"""Comprehensive command safety validation"""
# Normalize command for analysis
normalized = command.lower().strip()
# Check for dangerous patterns
for pattern in self.dangerous_patterns:
if re.search(pattern, normalized):
return ValidationResult(
allowed=False,
reason=f"Dangerous command pattern detected",
severity="critical"
)
# Check for suspicious patterns
for pattern in self.suspicious_patterns:
if re.search(pattern, normalized):
return ValidationResult(
allowed=True, # Allow but warn
reason=f"Suspicious command pattern detected",
severity="warning"
)
return ValidationResult(allowed=True, reason="Command appears safe")
def validate_with_shadow_learner(self, command: str) -> ValidationResult:
"""Use shadow learner to predict command success"""
try:
prediction = self.shadow_learner.predict_command_outcome(command)
if not prediction["likely_success"] and prediction["confidence"] > 0.8:
suggestions = prediction.get("suggestions", [])
suggestion_text = f" Try: {suggestions[0]}" if suggestions else ""
return ValidationResult(
allowed=False,
reason=f"Command likely to fail (confidence: {prediction['confidence']:.0%}){suggestion_text}",
severity="medium",
suggestions=suggestions
)
elif prediction["warnings"]:
return ValidationResult(
allowed=True,
reason=prediction["warnings"][0],
severity="warning",
suggestions=prediction.get("suggestions", [])
)
except Exception:
# If shadow learner fails, don't block
pass
return ValidationResult(allowed=True, reason="No issues detected")
def validate_command(self, command: str) -> ValidationResult:
"""Main validation entry point"""
# Safety validation (blocking)
safety_result = self.validate_command_safety(command)
if not safety_result.allowed:
return safety_result
# Shadow learner validation (predictive)
prediction_result = self.validate_with_shadow_learner(command)
# Return most significant result
if prediction_result.severity in ["high", "critical"]:
return prediction_result
elif safety_result.severity in ["warning", "medium"]:
return safety_result
else:
return prediction_result
def main():
"""Main hook entry point"""
try:
# Read input from Claude Code
input_data = json.loads(sys.stdin.read())
# Extract command from parameters
tool = input_data.get("tool", "")
parameters = input_data.get("parameters", {})
command = parameters.get("command", "")
if tool != "Bash" or not command:
# Not a bash command, allow it
response = {"allow": True, "message": "Not a bash command"}
print(json.dumps(response))
sys.exit(0)
# Validate command
validator = CommandValidator()
result = validator.validate_command(command)
if not result.allowed:
# Block the command
response = {
"allow": False,
"message": f"⛔ Command blocked: {result.reason}"
}
print(json.dumps(response))
sys.exit(1) # Exit code 1 = block operation
elif result.severity in ["warning", "medium"]:
# Allow with warning
warning_emoji = "⚠️" if result.severity == "warning" else "🚨"
message = f"{warning_emoji} {result.reason}"
if result.suggestions:
message += f"\n💡 Suggestion: {result.suggestions[0]}"
response = {
"allow": True,
"message": message
}
print(json.dumps(response))
sys.exit(0)
else:
# Allow without warning
response = {"allow": True, "message": "Command validated"}
print(json.dumps(response))
sys.exit(0)
except Exception as e:
# Never block on validation errors - always allow operation
response = {
"allow": True,
"message": f"Validation error: {str(e)}"
}
print(json.dumps(response))
sys.exit(0)
if __name__ == "__main__":
main()

109
hooks/context-monitor.js Executable file
View File

@ -0,0 +1,109 @@
#!/usr/bin/env node
/**
* Context Monitor Hook - UserPromptSubmit hook
* Monitors context usage and triggers backups when needed
*/
const fs = require('fs-extra');
const path = require('path');
// Add lib directory to require path
const libPath = path.join(__dirname, '..', 'lib');
const { ContextMonitor } = require(path.join(libPath, 'context-monitor'));
const { BackupManager } = require(path.join(libPath, 'backup-manager'));
const { SessionStateManager } = require(path.join(libPath, 'session-state'));
async function main() {
try {
// Read input from Claude Code
let inputData = '';
// Handle stdin input
if (process.stdin.isTTY) {
// If called directly for testing
inputData = JSON.stringify({ prompt: 'test prompt', context_size: 1000 });
} else {
// Read from stdin
process.stdin.setEncoding('utf8');
for await (const chunk of process.stdin) {
inputData += chunk;
}
}
const input = JSON.parse(inputData);
// Initialize components
const contextMonitor = new ContextMonitor();
const backupManager = new BackupManager();
const sessionManager = new SessionStateManager();
// Update context estimates from prompt
contextMonitor.updateFromPrompt(input);
// Check if backup should be triggered
const backupDecision = contextMonitor.checkBackupTriggers('UserPromptSubmit', input);
let message;
if (backupDecision.shouldBackup) {
// Execute backup
const sessionState = await sessionManager.getSessionSummary();
const backupResult = await backupManager.executeBackup(backupDecision, sessionState);
// Record backup in session
await sessionManager.addBackup(backupResult.backupId, {
reason: backupDecision.reason,
success: backupResult.success
});
// Add context snapshot
await sessionManager.addContextSnapshot({
usageRatio: contextMonitor.getContextUsageRatio(),
promptCount: contextMonitor.promptCount,
toolExecutions: contextMonitor.toolExecutions
});
// Notify about backup
if (backupResult.success) {
message = `Auto-backup created: ${backupDecision.reason} (usage: ${(contextMonitor.getContextUsageRatio() * 100).toFixed(1)}%)`;
} else {
message = `Backup attempted but failed: ${backupResult.error}`;
}
} else {
message = `Context usage: ${(contextMonitor.getContextUsageRatio() * 100).toFixed(1)}%`;
}
// Always allow operation (this is a monitoring hook)
const response = {
allow: true,
message
};
console.log(JSON.stringify(response));
process.exit(0);
} catch (error) {
// Never block on errors - always allow operation
const response = {
allow: true,
message: `Context monitor error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
}
}
// Handle unhandled promise rejections
process.on('unhandledRejection', (error) => {
const response = {
allow: true,
message: `Context monitor error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
});
main();

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python3
"""
Context Monitor Hook - UserPromptSubmit hook
Monitors context usage and triggers backups when needed
"""
import sys
import json
import os
from pathlib import Path
# Add lib directory to path
sys.path.insert(0, str(Path(__file__).parent.parent / "lib"))
from context_monitor import ContextMonitor
from backup_manager import BackupManager
from session_state import SessionStateManager
def main():
"""Main hook entry point"""
try:
# Read input from Claude Code
input_data = json.loads(sys.stdin.read())
# Initialize components
context_monitor = ContextMonitor()
backup_manager = BackupManager()
session_manager = SessionStateManager()
# Update context estimates from prompt
context_monitor.update_from_prompt(input_data)
# Check if backup should be triggered
backup_decision = context_monitor.check_backup_triggers("UserPromptSubmit", input_data)
if backup_decision.should_backup:
# Execute backup
session_state = session_manager.get_session_summary()
backup_result = backup_manager.execute_backup(backup_decision, session_state)
# Record backup in session
session_manager.add_backup(backup_result.backup_id, {
"reason": backup_decision.reason,
"success": backup_result.success
})
# Add context snapshot
session_manager.add_context_snapshot({
"usage_ratio": context_monitor.get_context_usage_ratio(),
"prompt_count": context_monitor.prompt_count,
"tool_executions": context_monitor.tool_executions
})
# Notify about backup
if backup_result.success:
message = f"Auto-backup created: {backup_decision.reason} (usage: {context_monitor.get_context_usage_ratio():.1%})"
else:
message = f"Backup attempted but failed: {backup_result.error}"
else:
message = f"Context usage: {context_monitor.get_context_usage_ratio():.1%}"
# Always allow operation (this is a monitoring hook)
response = {
"allow": True,
"message": message
}
print(json.dumps(response))
sys.exit(0)
except Exception as e:
# Never block on errors - always allow operation
response = {
"allow": True,
"message": f"Context monitor error: {str(e)}"
}
print(json.dumps(response))
sys.exit(0)
if __name__ == "__main__":
main()

210
hooks/session-finalizer.js Executable file
View File

@ -0,0 +1,210 @@
#!/usr/bin/env node
/**
* Session Finalizer Hook - Stop hook
* Finalizes session, creates documentation, and saves state
*/
const fs = require('fs-extra');
const path = require('path');
// Add lib directory to require path
const libPath = path.join(__dirname, '..', 'lib');
const { SessionStateManager } = require(path.join(libPath, 'session-state'));
const { ShadowLearner } = require(path.join(libPath, 'shadow-learner'));
const { ContextMonitor } = require(path.join(libPath, 'context-monitor'));
async function createRecoveryInfo(sessionSummary, contextMonitor) {
/**
* Create recovery information if needed
*/
try {
const contextUsage = contextMonitor.getContextUsageRatio();
// If context was high when session ended, create recovery guide
if (contextUsage > 0.8) {
let recoveryContent = `# Session Recovery Information
## Context Status
- **Context Usage**: ${(contextUsage * 100).toFixed(1)}% when session ended
- **Reason**: Session ended with high context usage
## What This Means
Your Claude session ended while using a significant amount of context. This could mean:
1. You were working on a complex task
2. Context limits were approaching
3. Session was interrupted
## Recovery Steps
### 1. Check Your Progress
Review these recently modified files:
`;
for (const filePath of sessionSummary.modifiedFiles || sessionSummary.modified_files || []) {
recoveryContent += `- ${filePath}\n`;
}
recoveryContent += `
### 2. Review Last Actions
Recent commands executed:
`;
const recentCommands = (sessionSummary.commandsExecuted || sessionSummary.commands_executed || []).slice(-5);
for (const cmdInfo of recentCommands) {
recoveryContent += `- \`${cmdInfo.command}\`\n`;
}
recoveryContent += `
### 3. Continue Your Work
1. Check \`ACTIVE_TODOS.md\` for pending tasks
2. Review \`LAST_SESSION.md\` for complete session history
3. Use \`git status\` to see current file changes
4. Consider committing your progress: \`git add -A && git commit -m "Work in progress"\`
### 4. Available Backups
`;
for (const backup of sessionSummary.backupHistory || sessionSummary.backup_history || []) {
const status = backup.success ? '✅' : '❌';
recoveryContent += `- ${status} ${backup.backup_id || backup.backupId} - ${backup.reason}\n`;
}
recoveryContent += `
## Quick Recovery Commands
\`\`\`bash
# Check current status
git status
# View recent changes
git diff
# List available backups
ls .claude_hooks/backups/
# View active todos
cat ACTIVE_TODOS.md
# View last session summary
cat LAST_SESSION.md
\`\`\`
*This recovery guide was created because your session ended with ${(contextUsage * 100).toFixed(1)}% context usage.*
`;
await fs.writeFile('RECOVERY_GUIDE.md', recoveryContent);
}
} catch (error) {
// Don't let recovery guide creation break session finalization
}
}
async function logSessionCompletion(sessionSummary) {
/**
* Log session completion for analysis
*/
try {
const logDir = path.join('.claude_hooks', 'logs');
await fs.ensureDir(logDir);
const completionLog = {
timestamp: new Date().toISOString(),
type: 'session_completion',
session_id: sessionSummary.sessionId || sessionSummary.session_id || 'unknown',
duration_minutes: (sessionSummary.sessionStats || sessionSummary.session_stats || {}).duration_minutes || 0,
total_tools: (sessionSummary.sessionStats || sessionSummary.session_stats || {}).total_tool_calls || 0,
files_modified: (sessionSummary.modifiedFiles || sessionSummary.modified_files || []).length,
commands_executed: (sessionSummary.sessionStats || sessionSummary.session_stats || {}).total_commands || 0,
backups_created: (sessionSummary.backupHistory || sessionSummary.backup_history || []).length
};
const logFile = path.join(logDir, 'session_completions.jsonl');
await fs.appendFile(logFile, JSON.stringify(completionLog) + '\n');
} catch (error) {
// Don't let logging errors break finalization
}
}
async function main() {
try {
let inputData = {};
// Handle stdin input
if (!process.stdin.isTTY) {
try {
let input = '';
process.stdin.setEncoding('utf8');
for await (const chunk of process.stdin) {
input += chunk;
}
if (input.trim()) {
inputData = JSON.parse(input);
}
} catch (error) {
// If input parsing fails, use empty object
inputData = {};
}
}
// Initialize components
const sessionManager = new SessionStateManager();
const shadowLearner = new ShadowLearner();
const contextMonitor = new ContextMonitor();
// Create session documentation
await sessionManager.createContinuationDocs();
// Save all learned patterns
await shadowLearner.saveDatabase();
// Get session summary for logging
const sessionSummary = await sessionManager.getSessionSummary();
// Create recovery guide if session was interrupted
await createRecoveryInfo(sessionSummary, contextMonitor);
// Clean up session
await sessionManager.cleanupSession();
// Log session completion
await logSessionCompletion(sessionSummary);
const modifiedFiles = sessionSummary.modifiedFiles || sessionSummary.modified_files || [];
const totalTools = (sessionSummary.sessionStats || sessionSummary.session_stats || {}).total_tool_calls || 0;
// Always allow - this is a cleanup hook
const response = {
allow: true,
message: `Session finalized. Modified ${modifiedFiles.length} files, used ${totalTools} tools.`
};
console.log(JSON.stringify(response));
process.exit(0);
} catch (error) {
// Session finalization should never block
const response = {
allow: true,
message: `Session finalization error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
}
}
// Handle unhandled promise rejections
process.on('unhandledRejection', (error) => {
const response = {
allow: true,
message: `Session finalization error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
});
main();

159
hooks/session-logger.js Executable file
View File

@ -0,0 +1,159 @@
#!/usr/bin/env node
/**
* Session Logger Hook - PostToolUse[*] hook
* Logs all tool usage and feeds data to shadow learner
*/
const fs = require('fs-extra');
const path = require('path');
// Add lib directory to require path
const libPath = path.join(__dirname, '..', 'lib');
const { ShadowLearner } = require(path.join(libPath, 'shadow-learner'));
const { SessionStateManager } = require(path.join(libPath, 'session-state'));
const { ContextMonitor } = require(path.join(libPath, 'context-monitor'));
async function logExecution(execution) {
/**
* Log execution to file for debugging and analysis
*/
try {
const logDir = path.join('.claude_hooks', 'logs');
await fs.ensureDir(logDir);
// Create daily log file
const date = new Date().toISOString().slice(0, 10).replace(/-/g, '');
const logFile = path.join(logDir, `executions_${date}.jsonl`);
// Append execution record
await fs.appendFile(logFile, JSON.stringify(execution) + '\n');
// Clean up old log files (keep last 7 days)
await cleanupOldLogs(logDir);
} catch (error) {
// Don't let logging errors break the hook
}
}
async function cleanupOldLogs(logDir) {
/**
* Clean up log files older than 7 days
*/
try {
const cutoffTime = Date.now() - (7 * 24 * 60 * 60 * 1000); // 7 days ago
const files = await fs.readdir(logDir);
const logFiles = files.filter(file => file.match(/^executions_\d{8}\.jsonl$/));
for (const logFile of logFiles) {
const filePath = path.join(logDir, logFile);
const stats = await fs.stat(filePath);
if (stats.mtime.getTime() < cutoffTime) {
await fs.unlink(filePath);
}
}
} catch (error) {
// Ignore cleanup errors
}
}
async function main() {
try {
let inputData = '';
// Handle stdin input
if (process.stdin.isTTY) {
// If called directly for testing
inputData = JSON.stringify({
tool: 'Bash',
parameters: { command: 'echo test' },
success: true,
execution_time: 0.1
});
} else {
// Read from stdin
process.stdin.setEncoding('utf8');
for await (const chunk of process.stdin) {
inputData += chunk;
}
}
const input = JSON.parse(inputData);
// Extract tool execution data
const tool = input.tool || '';
const parameters = input.parameters || {};
const success = input.success !== undefined ? input.success : true;
const error = input.error || '';
const executionTime = input.execution_time || 0.0;
// Create tool execution record
const execution = {
timestamp: new Date(),
tool,
parameters,
success,
errorMessage: error || null,
executionTime,
context: {}
};
// Initialize components
const shadowLearner = new ShadowLearner();
const sessionManager = new SessionStateManager();
const contextMonitor = new ContextMonitor();
// Feed execution to shadow learner
shadowLearner.learnFromExecution(execution);
// Update session state
await sessionManager.updateFromToolUse(input);
// Update context monitor
contextMonitor.updateFromToolUse(input);
// Save learned patterns periodically
// (Only save every 10 executions to avoid too much disk I/O)
if (contextMonitor.toolExecutions % 10 === 0) {
await shadowLearner.saveDatabase();
}
// Log execution to file for debugging (optional)
await logExecution(execution);
// Always allow - this is a post-execution hook
const response = {
allow: true,
message: `Logged ${tool} execution`
};
console.log(JSON.stringify(response));
process.exit(0);
} catch (error) {
// Post-execution hooks should never block
const response = {
allow: true,
message: `Logging error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
}
}
// Handle unhandled promise rejections
process.on('unhandledRejection', (error) => {
const response = {
allow: true,
message: `Logging error: ${error.message}`
};
console.log(JSON.stringify(response));
process.exit(0);
});
main();

View File

@ -1,180 +0,0 @@
#!/usr/bin/env python3
"""
Session Finalizer Hook - Stop hook
Finalizes session, creates documentation, and saves state
"""
import sys
import json
import os
from pathlib import Path
# Add lib directory to path
sys.path.insert(0, str(Path(__file__).parent.parent / "lib"))
from session_state import SessionStateManager
from shadow_learner import ShadowLearner
from context_monitor import ContextMonitor
def main():
"""Main hook entry point"""
try:
# Read input from Claude Code (if any)
try:
input_data = json.loads(sys.stdin.read())
except:
input_data = {}
# Initialize components
session_manager = SessionStateManager()
shadow_learner = ShadowLearner()
context_monitor = ContextMonitor()
# Create session documentation
session_manager.create_continuation_docs()
# Save all learned patterns
shadow_learner.save_database()
# Get session summary for logging
session_summary = session_manager.get_session_summary()
# Create recovery guide if session was interrupted
create_recovery_info(session_summary, context_monitor)
# Clean up session
session_manager.cleanup_session()
# Log session completion
log_session_completion(session_summary)
# Always allow - this is a cleanup hook
response = {
"allow": True,
"message": f"Session finalized. Modified {len(session_summary.get('modified_files', []))} files, used {session_summary.get('session_stats', {}).get('total_tool_calls', 0)} tools."
}
print(json.dumps(response))
sys.exit(0)
except Exception as e:
# Session finalization should never block
response = {
"allow": True,
"message": f"Session finalization error: {str(e)}"
}
print(json.dumps(response))
sys.exit(0)
def create_recovery_info(session_summary: dict, context_monitor: ContextMonitor):
"""Create recovery information if needed"""
try:
context_usage = context_monitor.get_context_usage_ratio()
# If context was high when session ended, create recovery guide
if context_usage > 0.8:
recovery_content = f"""# Session Recovery Information
## Context Status
- **Context Usage**: {context_usage:.1%} when session ended
- **Reason**: Session ended with high context usage
## What This Means
Your Claude session ended while using a significant amount of context. This could mean:
1. You were working on a complex task
2. Context limits were approaching
3. Session was interrupted
## Recovery Steps
### 1. Check Your Progress
Review these recently modified files:
"""
for file_path in session_summary.get('modified_files', []):
recovery_content += f"- {file_path}\n"
recovery_content += f"""
### 2. Review Last Actions
Recent commands executed:
"""
recent_commands = session_summary.get('commands_executed', [])[-5:]
for cmd_info in recent_commands:
recovery_content += f"- `{cmd_info['command']}`\n"
recovery_content += f"""
### 3. Continue Your Work
1. Check `ACTIVE_TODOS.md` for pending tasks
2. Review `LAST_SESSION.md` for complete session history
3. Use `git status` to see current file changes
4. Consider committing your progress: `git add -A && git commit -m "Work in progress"`
### 4. Available Backups
"""
for backup in session_summary.get('backup_history', []):
status = "" if backup['success'] else ""
recovery_content += f"- {status} {backup['backup_id']} - {backup['reason']}\n"
recovery_content += f"""
## Quick Recovery Commands
```bash
# Check current status
git status
# View recent changes
git diff
# List available backups
ls .claude_hooks/backups/
# View active todos
cat ACTIVE_TODOS.md
# View last session summary
cat LAST_SESSION.md
```
*This recovery guide was created because your session ended with {context_usage:.1%} context usage.*
"""
with open("RECOVERY_GUIDE.md", 'w') as f:
f.write(recovery_content)
except Exception:
pass # Don't let recovery guide creation break session finalization
def log_session_completion(session_summary: dict):
"""Log session completion for analysis"""
try:
log_dir = Path(".claude_hooks/logs")
log_dir.mkdir(parents=True, exist_ok=True)
from datetime import datetime
completion_log = {
"timestamp": datetime.now().isoformat(),
"type": "session_completion",
"session_id": session_summary.get("session_id", "unknown"),
"duration_minutes": session_summary.get("session_stats", {}).get("duration_minutes", 0),
"total_tools": session_summary.get("session_stats", {}).get("total_tool_calls", 0),
"files_modified": len(session_summary.get("modified_files", [])),
"commands_executed": session_summary.get("session_stats", {}).get("total_commands", 0),
"backups_created": len(session_summary.get("backup_history", []))
}
log_file = log_dir / "session_completions.jsonl"
with open(log_file, 'a') as f:
f.write(json.dumps(completion_log) + "\n")
except Exception:
pass # Don't let logging errors break finalization
if __name__ == "__main__":
main()

View File

@ -1,124 +0,0 @@
#!/usr/bin/env python3
"""
Session Logger Hook - PostToolUse[*] hook
Logs all tool usage and feeds data to shadow learner
"""
import sys
import json
import os
from datetime import datetime
from pathlib import Path
# Add lib directory to path
sys.path.insert(0, str(Path(__file__).parent.parent / "lib"))
from shadow_learner import ShadowLearner
from session_state import SessionStateManager
from context_monitor import ContextMonitor
from models import ToolExecution
def main():
"""Main hook entry point"""
try:
# Read input from Claude Code
input_data = json.loads(sys.stdin.read())
# Extract tool execution data
tool = input_data.get("tool", "")
parameters = input_data.get("parameters", {})
success = input_data.get("success", True)
error = input_data.get("error", "")
execution_time = input_data.get("execution_time", 0.0)
# Create tool execution record
execution = ToolExecution(
timestamp=datetime.now(),
tool=tool,
parameters=parameters,
success=success,
error_message=error if error else None,
execution_time=execution_time,
context={}
)
# Initialize components
shadow_learner = ShadowLearner()
session_manager = SessionStateManager()
context_monitor = ContextMonitor()
# Feed execution to shadow learner
shadow_learner.learn_from_execution(execution)
# Update session state
session_manager.update_from_tool_use(input_data)
# Update context monitor
context_monitor.update_from_tool_use(input_data)
# Save learned patterns periodically
# (Only save every 10 executions to avoid too much disk I/O)
if context_monitor.tool_executions % 10 == 0:
shadow_learner.save_database()
# Log execution to file for debugging (optional)
log_execution(execution)
# Always allow - this is a post-execution hook
response = {
"allow": True,
"message": f"Logged {tool} execution"
}
print(json.dumps(response))
sys.exit(0)
except Exception as e:
# Post-execution hooks should never block
response = {
"allow": True,
"message": f"Logging error: {str(e)}"
}
print(json.dumps(response))
sys.exit(0)
def log_execution(execution: ToolExecution):
"""Log execution to file for debugging and analysis"""
try:
log_dir = Path(".claude_hooks/logs")
log_dir.mkdir(parents=True, exist_ok=True)
# Create daily log file
log_file = log_dir / f"executions_{datetime.now().strftime('%Y%m%d')}.jsonl"
# Append execution record
with open(log_file, 'a') as f:
f.write(json.dumps(execution.to_dict()) + "\n")
# Clean up old log files (keep last 7 days)
cleanup_old_logs(log_dir)
except Exception:
# Don't let logging errors break the hook
pass
def cleanup_old_logs(log_dir: Path):
"""Clean up log files older than 7 days"""
try:
import time
cutoff_time = time.time() - (7 * 24 * 3600) # 7 days ago
for log_file in log_dir.glob("executions_*.jsonl"):
if log_file.stat().st_mtime < cutoff_time:
log_file.unlink()
except Exception:
pass
if __name__ == "__main__":
main()

View File

@ -1,21 +0,0 @@
"""Claude Code Hooks System - Core Library"""
__version__ = "1.0.0"
__author__ = "Claude Code Hooks Contributors"
from .models import *
from .shadow_learner import ShadowLearner
from .context_monitor import ContextMonitor
from .backup_manager import BackupManager
from .session_state import SessionStateManager
__all__ = [
"ShadowLearner",
"ContextMonitor",
"BackupManager",
"SessionStateManager",
"Pattern",
"ToolExecution",
"HookResult",
"ValidationResult"
]

536
lib/backup-manager.js Normal file
View File

@ -0,0 +1,536 @@
/**
* Backup Manager - Resilient backup execution system
* Node.js implementation
*/
const fs = require('fs-extra');
const path = require('path');
const { spawn } = require('child_process');
class BackupManager {
constructor(projectRoot = '.') {
this.projectRoot = path.resolve(projectRoot);
this.backupDir = path.join(this.projectRoot, '.claude_hooks', 'backups');
fs.ensureDirSync(this.backupDir);
// Backup settings
this.maxBackups = 10;
this.logFile = path.join(this.backupDir, 'backup.log');
}
/**
* Execute backup with comprehensive error handling
*/
async executeBackup(decision, sessionState) {
const backupId = this._generateBackupId();
const backupPath = path.join(this.backupDir, backupId);
try {
// Create backup structure
const backupInfo = await this._createBackupStructure(backupPath, sessionState);
// Git backup (if possible)
const gitResult = await this._attemptGitBackup(backupId, decision.reason);
// File system backup
const fsResult = await this._createFilesystemBackup(backupPath, sessionState);
// Session state backup
const stateResult = await this._backupSessionState(backupPath, sessionState);
// Clean up old backups
await this._cleanupOldBackups();
// Log successful backup
await this._logBackup(backupId, decision, true);
return {
success: true,
backupId,
backupPath,
gitSuccess: gitResult.success,
components: {
git: gitResult,
filesystem: fsResult,
sessionState: stateResult
}
};
} catch (error) {
// Backup failures should never break the session
const fallbackResult = await this._createMinimalBackup(sessionState);
await this._logBackup(backupId, decision, false, error.message);
return {
success: false,
backupId,
error: error.message,
fallbackPerformed: fallbackResult
};
}
}
/**
* Create backup from project path (external API)
*/
async createBackup(projectPath, context = {}, force = false) {
const decision = {
reason: context.trigger || 'manual',
urgency: 'medium',
force
};
const sessionState = {
modifiedFiles: context.modified_files || [],
toolUsage: context.tool_usage || {},
timestamp: new Date().toISOString(),
...context
};
const result = await this.executeBackup(decision, sessionState);
return result.success ? result.backupId : null;
}
/**
* Get backup information
*/
async getBackupInfo(backupId) {
try {
const backupPath = path.join(this.backupDir, backupId);
const metadataFile = path.join(backupPath, 'metadata.json');
if (await fs.pathExists(metadataFile)) {
const metadata = await fs.readJson(metadataFile);
// Add file list if available
const filesDir = path.join(backupPath, 'files');
if (await fs.pathExists(filesDir)) {
const files = await this._getBackupFiles(filesDir);
metadata.files_backed_up = files;
}
return metadata;
}
} catch (error) {
console.error('Error reading backup info:', error.message);
}
return null;
}
/**
* Generate unique backup identifier
*/
_generateBackupId() {
const timestamp = new Date().toISOString()
.replace(/[:-]/g, '')
.replace(/\.\d{3}Z$/, '')
.replace('T', '_');
return `backup_${timestamp}`;
}
/**
* Create basic backup directory structure
*/
async _createBackupStructure(backupPath, sessionState) {
await fs.ensureDir(backupPath);
// Create subdirectories
await fs.ensureDir(path.join(backupPath, 'files'));
await fs.ensureDir(path.join(backupPath, 'logs'));
await fs.ensureDir(path.join(backupPath, 'state'));
// Create backup metadata
const metadata = {
backup_id: path.basename(backupPath),
timestamp: new Date().toISOString(),
session_state: sessionState,
project_root: this.projectRoot
};
await fs.writeJson(path.join(backupPath, 'metadata.json'), metadata, { spaces: 2 });
return metadata;
}
/**
* Attempt git backup with proper error handling
*/
async _attemptGitBackup(backupId, reason) {
try {
// Check if git repo exists
if (!(await fs.pathExists(path.join(this.projectRoot, '.git')))) {
// Initialize repo if none exists
const initResult = await this._runGitCommand(['init']);
if (!initResult.success) {
return {
success: false,
error: `Git init failed: ${initResult.error}`
};
}
}
// Add all changes
const addResult = await this._runGitCommand(['add', '-A']);
if (!addResult.success) {
return {
success: false,
error: `Git add failed: ${addResult.error}`
};
}
// Check if there are changes to commit
const statusResult = await this._runGitCommand(['status', '--porcelain']);
if (statusResult.success && !statusResult.stdout.trim()) {
return {
success: true,
message: 'No changes to commit'
};
}
// Create commit
const commitMsg = `Claude hooks auto-backup: ${reason} (${backupId})`;
const commitResult = await this._runGitCommand(['commit', '-m', commitMsg]);
if (!commitResult.success) {
return {
success: false,
error: `Git commit failed: ${commitResult.error}`
};
}
// Get commit ID
const commitId = await this._getLatestCommit();
return {
success: true,
commitId,
message: `Committed as ${commitId.substring(0, 8)}`
};
} catch (error) {
return {
success: false,
error: `Unexpected git error: ${error.message}`
};
}
}
/**
* Run git command with timeout and error handling
*/
_runGitCommand(args, timeoutMs = 60000) {
return new Promise((resolve) => {
const child = spawn('git', args, {
cwd: this.projectRoot,
stdio: ['pipe', 'pipe', 'pipe']
});
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
const timeout = setTimeout(() => {
child.kill();
resolve({
success: false,
error: 'Git operation timed out'
});
}, timeoutMs);
child.on('close', (code) => {
clearTimeout(timeout);
resolve({
success: code === 0,
stdout: stdout.trim(),
stderr: stderr.trim(),
error: code !== 0 ? stderr.trim() : null
});
});
child.on('error', (error) => {
clearTimeout(timeout);
resolve({
success: false,
error: error.message
});
});
});
}
/**
* Get the latest commit ID
*/
async _getLatestCommit() {
try {
const result = await this._runGitCommand(['rev-parse', 'HEAD'], 10000);
return result.success ? result.stdout : 'unknown';
} catch (error) {
return 'unknown';
}
}
/**
* Create filesystem backup of important files
*/
async _createFilesystemBackup(backupPath, sessionState) {
try {
const filesDir = path.join(backupPath, 'files');
await fs.ensureDir(filesDir);
// Backup modified files mentioned in session
const modifiedFiles = sessionState.modifiedFiles || sessionState.modified_files || [];
const filesBackedUp = [];
for (const filePath of modifiedFiles) {
try {
const src = path.resolve(filePath);
if (await fs.pathExists(src) && (await fs.stat(src)).isFile()) {
// Create relative path structure
const relativePath = path.relative(this.projectRoot, src);
const dst = path.join(filesDir, relativePath);
await fs.ensureDir(path.dirname(dst));
await fs.copy(src, dst, { preserveTimestamps: true });
filesBackedUp.push(src);
}
} catch (error) {
// Log error but continue with other files
await this._logFileBackupError(filePath, error);
}
}
// Backup important project files
const importantFiles = [
'package.json', 'requirements.txt', 'Cargo.toml',
'pyproject.toml', 'setup.py', '.gitignore',
'README.md', 'CLAUDE.md'
];
for (const fileName of importantFiles) {
const filePath = path.join(this.projectRoot, fileName);
if (await fs.pathExists(filePath)) {
try {
const dst = path.join(filesDir, fileName);
await fs.copy(filePath, dst, { preserveTimestamps: true });
filesBackedUp.push(filePath);
} catch (error) {
// Not critical
}
}
}
return {
success: true,
message: `Backed up ${filesBackedUp.length} files`,
metadata: { files: filesBackedUp }
};
} catch (error) {
return { success: false, error: error.message };
}
}
/**
* Backup session state and context
*/
async _backupSessionState(backupPath, sessionState) {
try {
const stateDir = path.join(backupPath, 'state');
// Save session state
await fs.writeJson(path.join(stateDir, 'session.json'), sessionState, { spaces: 2 });
// Copy hook logs if they exist
const logsSource = path.join(this.projectRoot, '.claude_hooks', 'logs');
if (await fs.pathExists(logsSource)) {
const logsDest = path.join(backupPath, 'logs');
await fs.copy(logsSource, logsDest);
}
// Copy patterns database
const patternsSource = path.join(this.projectRoot, '.claude_hooks', 'patterns');
if (await fs.pathExists(patternsSource)) {
const patternsDest = path.join(stateDir, 'patterns');
await fs.copy(patternsSource, patternsDest);
}
return {
success: true,
message: 'Session state backed up'
};
} catch (error) {
return { success: false, error: error.message };
}
}
/**
* Create minimal backup when full backup fails
*/
async _createMinimalBackup(sessionState) {
try {
// At minimum, save session state to a simple file
const emergencyFile = path.join(this.backupDir, 'emergency_backup.json');
const emergencyData = {
timestamp: new Date().toISOString(),
session_state: sessionState,
type: 'emergency_backup'
};
await fs.writeJson(emergencyFile, emergencyData, { spaces: 2 });
return true;
} catch (error) {
return false;
}
}
/**
* Remove old backups to save space
*/
async _cleanupOldBackups() {
try {
// Get all backup directories
const entries = await fs.readdir(this.backupDir, { withFileTypes: true });
const backupDirs = entries
.filter(entry => entry.isDirectory() && entry.name.startsWith('backup_'))
.map(entry => ({
name: entry.name,
path: path.join(this.backupDir, entry.name)
}));
// Sort by creation time (newest first)
const backupsWithStats = await Promise.all(
backupDirs.map(async (backup) => {
const stats = await fs.stat(backup.path);
return { ...backup, mtime: stats.mtime };
})
);
backupsWithStats.sort((a, b) => b.mtime - a.mtime);
// Remove old backups beyond maxBackups
const oldBackups = backupsWithStats.slice(this.maxBackups);
for (const oldBackup of oldBackups) {
await fs.remove(oldBackup.path);
}
} catch (error) {
// Cleanup failures shouldn't break backup
}
}
/**
* Log backup operation
*/
async _logBackup(backupId, decision, success, error = '') {
try {
const logEntry = {
timestamp: new Date().toISOString(),
backup_id: backupId,
reason: decision.reason,
urgency: decision.urgency,
success,
error
};
// Append to log file
await fs.appendFile(this.logFile, JSON.stringify(logEntry) + '\n');
} catch (error) {
// Logging failures shouldn't break backup
}
}
/**
* Log file backup errors
*/
async _logFileBackupError(filePath, error) {
try {
const errorEntry = {
timestamp: new Date().toISOString(),
type: 'file_backup_error',
file_path: filePath,
error: error.message
};
await fs.appendFile(this.logFile, JSON.stringify(errorEntry) + '\n');
} catch (error) {
// Ignore logging errors
}
}
/**
* List available backups
*/
async listBackups() {
const backups = [];
try {
const entries = await fs.readdir(this.backupDir, { withFileTypes: true });
const backupDirs = entries
.filter(entry => entry.isDirectory() && entry.name.startsWith('backup_'))
.map(entry => path.join(this.backupDir, entry.name));
for (const backupDir of backupDirs) {
const metadataFile = path.join(backupDir, 'metadata.json');
if (await fs.pathExists(metadataFile)) {
try {
const metadata = await fs.readJson(metadataFile);
backups.push(metadata);
} catch (error) {
// Skip corrupted metadata
}
}
}
} catch (error) {
// Return empty list on error
}
return backups.sort((a, b) => {
const timeA = a.timestamp || '';
const timeB = b.timestamp || '';
return timeB.localeCompare(timeA);
});
}
/**
* Get list of files in backup
*/
async _getBackupFiles(filesDir, relativeTo = filesDir) {
const files = [];
try {
const entries = await fs.readdir(filesDir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(filesDir, entry.name);
const relativePath = path.relative(relativeTo, fullPath);
if (entry.isDirectory()) {
const subFiles = await this._getBackupFiles(fullPath, relativeTo);
files.push(...subFiles);
} else {
files.push(relativePath);
}
}
} catch (error) {
// Return what we have
}
return files;
}
}
module.exports = { BackupManager };

View File

@ -1,388 +0,0 @@
#!/usr/bin/env python3
"""Backup Manager - Resilient backup execution system"""
import os
import json
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, List
try:
from .models import BackupResult, GitBackupResult, BackupDecision
except ImportError:
from models import BackupResult, GitBackupResult, BackupDecision
class BackupManager:
"""Handles backup execution with comprehensive error handling"""
def __init__(self, project_root: str = "."):
self.project_root = Path(project_root).resolve()
self.backup_dir = self.project_root / ".claude_hooks" / "backups"
self.backup_dir.mkdir(parents=True, exist_ok=True)
# Backup settings
self.max_backups = 10
self.log_file = self.backup_dir / "backup.log"
def execute_backup(self, decision: BackupDecision,
session_state: Dict[str, Any]) -> BackupResult:
"""Execute backup with comprehensive error handling"""
backup_id = self._generate_backup_id()
backup_path = self.backup_dir / backup_id
try:
# Create backup structure
backup_info = self._create_backup_structure(backup_path, session_state)
# Git backup (if possible)
git_result = self._attempt_git_backup(backup_id, decision.reason)
# File system backup
fs_result = self._create_filesystem_backup(backup_path, session_state)
# Session state backup
state_result = self._backup_session_state(backup_path, session_state)
# Clean up old backups
self._cleanup_old_backups()
# Log successful backup
self._log_backup(backup_id, decision, success=True)
return BackupResult(
success=True,
backup_id=backup_id,
backup_path=str(backup_path),
git_success=git_result.success,
components={
"git": git_result,
"filesystem": fs_result,
"session_state": state_result
}
)
except Exception as e:
# Backup failures should never break the session
fallback_result = self._create_minimal_backup(session_state)
self._log_backup(backup_id, decision, success=False, error=str(e))
return BackupResult(
success=False,
backup_id=backup_id,
error=str(e),
fallback_performed=fallback_result
)
def _generate_backup_id(self) -> str:
"""Generate unique backup identifier"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
return f"backup_{timestamp}"
def _create_backup_structure(self, backup_path: Path, session_state: Dict[str, Any]) -> Dict[str, Any]:
"""Create basic backup directory structure"""
backup_path.mkdir(parents=True, exist_ok=True)
# Create subdirectories
(backup_path / "files").mkdir(exist_ok=True)
(backup_path / "logs").mkdir(exist_ok=True)
(backup_path / "state").mkdir(exist_ok=True)
# Create backup metadata
metadata = {
"backup_id": backup_path.name,
"timestamp": datetime.now().isoformat(),
"session_state": session_state,
"project_root": str(self.project_root)
}
with open(backup_path / "metadata.json", 'w') as f:
json.dump(metadata, f, indent=2)
return metadata
def _attempt_git_backup(self, backup_id: str, reason: str) -> GitBackupResult:
"""Attempt git backup with proper error handling"""
try:
# Check if git repo exists
if not (self.project_root / ".git").exists():
# Initialize repo if none exists
result = subprocess.run(
["git", "init"],
cwd=self.project_root,
capture_output=True,
text=True,
timeout=30
)
if result.returncode != 0:
return GitBackupResult(
success=False,
error=f"Git init failed: {result.stderr}"
)
# Add all changes
result = subprocess.run(
["git", "add", "-A"],
cwd=self.project_root,
capture_output=True,
text=True,
timeout=60
)
if result.returncode != 0:
return GitBackupResult(
success=False,
error=f"Git add failed: {result.stderr}"
)
# Check if there are changes to commit
result = subprocess.run(
["git", "status", "--porcelain"],
cwd=self.project_root,
capture_output=True,
text=True,
timeout=30
)
if not result.stdout.strip():
return GitBackupResult(
success=True,
message="No changes to commit"
)
# Create commit
commit_msg = f"Claude hooks auto-backup: {reason} ({backup_id})"
result = subprocess.run(
["git", "commit", "-m", commit_msg],
cwd=self.project_root,
capture_output=True,
text=True,
timeout=60
)
if result.returncode != 0:
return GitBackupResult(
success=False,
error=f"Git commit failed: {result.stderr}"
)
# Get commit ID
commit_id = self._get_latest_commit()
return GitBackupResult(
success=True,
commit_id=commit_id,
message=f"Committed as {commit_id[:8]}"
)
except subprocess.TimeoutExpired:
return GitBackupResult(
success=False,
error="Git operation timed out"
)
except subprocess.CalledProcessError as e:
return GitBackupResult(
success=False,
error=f"Git error: {e}"
)
except Exception as e:
return GitBackupResult(
success=False,
error=f"Unexpected git error: {e}"
)
def _get_latest_commit(self) -> str:
"""Get the latest commit ID"""
try:
result = subprocess.run(
["git", "rev-parse", "HEAD"],
cwd=self.project_root,
capture_output=True,
text=True,
timeout=10
)
if result.returncode == 0:
return result.stdout.strip()
except Exception:
pass
return "unknown"
def _create_filesystem_backup(self, backup_path: Path,
session_state: Dict[str, Any]) -> BackupResult:
"""Create filesystem backup of important files"""
try:
files_dir = backup_path / "files"
files_dir.mkdir(exist_ok=True)
# Backup modified files mentioned in session
modified_files = session_state.get("modified_files", [])
files_backed_up = []
for file_path in modified_files:
try:
src = Path(file_path)
if src.exists() and src.is_file():
# Create relative path structure
rel_path = src.relative_to(self.project_root) if src.is_relative_to(self.project_root) else src.name
dst = files_dir / rel_path
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(src, dst)
files_backed_up.append(str(src))
except Exception as e:
# Log error but continue with other files
self._log_file_backup_error(file_path, e)
# Backup important project files
important_files = [
"package.json", "requirements.txt", "Cargo.toml",
"pyproject.toml", "setup.py", ".gitignore",
"README.md", "CLAUDE.md"
]
for file_name in important_files:
file_path = self.project_root / file_name
if file_path.exists():
try:
dst = files_dir / file_name
shutil.copy2(file_path, dst)
files_backed_up.append(str(file_path))
except Exception:
pass # Not critical
return BackupResult(
success=True,
message=f"Backed up {len(files_backed_up)} files",
metadata={"files": files_backed_up}
)
except Exception as e:
return BackupResult(success=False, error=str(e))
def _backup_session_state(self, backup_path: Path,
session_state: Dict[str, Any]) -> BackupResult:
"""Backup session state and context"""
try:
state_dir = backup_path / "state"
# Save session state
with open(state_dir / "session.json", 'w') as f:
json.dump(session_state, f, indent=2)
# Copy hook logs if they exist
logs_source = self.project_root / ".claude_hooks" / "logs"
if logs_source.exists():
logs_dest = backup_path / "logs"
shutil.copytree(logs_source, logs_dest, exist_ok=True)
# Copy patterns database
patterns_source = self.project_root / ".claude_hooks" / "patterns"
if patterns_source.exists():
patterns_dest = state_dir / "patterns"
shutil.copytree(patterns_source, patterns_dest, exist_ok=True)
return BackupResult(
success=True,
message="Session state backed up"
)
except Exception as e:
return BackupResult(success=False, error=str(e))
def _create_minimal_backup(self, session_state: Dict[str, Any]) -> bool:
"""Create minimal backup when full backup fails"""
try:
# At minimum, save session state to a simple file
emergency_file = self.backup_dir / "emergency_backup.json"
emergency_data = {
"timestamp": datetime.now().isoformat(),
"session_state": session_state,
"type": "emergency_backup"
}
with open(emergency_file, 'w') as f:
json.dump(emergency_data, f, indent=2)
return True
except Exception:
return False
def _cleanup_old_backups(self):
"""Remove old backups to save space"""
try:
# Get all backup directories
backup_dirs = [d for d in self.backup_dir.iterdir()
if d.is_dir() and d.name.startswith("backup_")]
# Sort by creation time (newest first)
backup_dirs.sort(key=lambda d: d.stat().st_mtime, reverse=True)
# Remove old backups beyond max_backups
for old_backup in backup_dirs[self.max_backups:]:
shutil.rmtree(old_backup)
except Exception:
pass # Cleanup failures shouldn't break backup
def _log_backup(self, backup_id: str, decision: BackupDecision,
success: bool, error: str = ""):
"""Log backup operation"""
try:
log_entry = {
"timestamp": datetime.now().isoformat(),
"backup_id": backup_id,
"reason": decision.reason,
"urgency": decision.urgency,
"success": success,
"error": error
}
# Append to log file
with open(self.log_file, 'a') as f:
f.write(json.dumps(log_entry) + "\n")
except Exception:
pass # Logging failures shouldn't break backup
def _log_file_backup_error(self, file_path: str, error: Exception):
"""Log file backup errors"""
try:
error_entry = {
"timestamp": datetime.now().isoformat(),
"type": "file_backup_error",
"file_path": file_path,
"error": str(error)
}
with open(self.log_file, 'a') as f:
f.write(json.dumps(error_entry) + "\n")
except Exception:
pass
def list_backups(self) -> List[Dict[str, Any]]:
"""List available backups"""
backups = []
try:
backup_dirs = [d for d in self.backup_dir.iterdir()
if d.is_dir() and d.name.startswith("backup_")]
for backup_dir in backup_dirs:
metadata_file = backup_dir / "metadata.json"
if metadata_file.exists():
try:
with open(metadata_file, 'r') as f:
metadata = json.load(f)
backups.append(metadata)
except Exception:
pass
except Exception:
pass
return sorted(backups, key=lambda b: b.get("timestamp", ""), reverse=True)

View File

@ -1,202 +0,0 @@
#!/usr/bin/env python3
"""
Claude Hooks CLI - Command line interface for managing hooks
"""
import argparse
import json
import sys
from datetime import datetime
from pathlib import Path
from .backup_manager import BackupManager
from .session_state import SessionStateManager
from .shadow_learner import ShadowLearner
from .context_monitor import ContextMonitor
def list_backups():
"""List available backups"""
backup_manager = BackupManager()
backups = backup_manager.list_backups()
if not backups:
print("No backups found.")
return
print("Available Backups:")
print("==================")
for backup in backups:
timestamp = backup.get("timestamp", "unknown")
backup_id = backup.get("backup_id", "unknown")
reason = backup.get("session_state", {}).get("backup_history", [])
if reason:
reason = reason[-1].get("reason", "unknown")
else:
reason = "unknown"
print(f"🗂️ {backup_id}")
print(f" 📅 {timestamp}")
print(f" 📝 {reason}")
print()
def show_session_status():
"""Show current session status"""
session_manager = SessionStateManager()
context_monitor = ContextMonitor()
summary = session_manager.get_session_summary()
context_summary = context_monitor.get_session_summary()
print("Session Status:")
print("===============")
print(f"Session ID: {summary.get('session_id', 'unknown')}")
print(f"Duration: {summary.get('session_stats', {}).get('duration_minutes', 0)} minutes")
print(f"Context Usage: {context_summary.get('context_usage_ratio', 0):.1%}")
print(f"Tool Calls: {summary.get('session_stats', {}).get('total_tool_calls', 0)}")
print(f"Files Modified: {len(summary.get('modified_files', []))}")
print(f"Commands Executed: {summary.get('session_stats', {}).get('total_commands', 0)}")
print(f"Backups Created: {len(summary.get('backup_history', []))}")
print()
if summary.get('modified_files'):
print("Modified Files:")
for file_path in summary['modified_files']:
print(f" - {file_path}")
print()
if context_summary.get('should_backup'):
print("⚠️ Backup recommended (high context usage)")
else:
print("✅ No backup needed currently")
def show_patterns():
"""Show learned patterns"""
shadow_learner = ShadowLearner()
print("Learned Patterns:")
print("=================")
# Command patterns
command_patterns = shadow_learner.db.command_patterns
if command_patterns:
print("\n🖥️ Command Patterns:")
for pattern_id, pattern in list(command_patterns.items())[:10]: # Show top 10
cmd = pattern.trigger.get("command", "unknown")
confidence = pattern.confidence
evidence = pattern.evidence_count
success_rate = pattern.success_rate
print(f" {cmd}")
print(f" Confidence: {confidence:.1%}")
print(f" Evidence: {evidence} samples")
print(f" Success Rate: {success_rate:.1%}")
# Context patterns
context_patterns = shadow_learner.db.context_patterns
if context_patterns:
print("\n🔍 Context Patterns:")
for pattern_id, pattern in list(context_patterns.items())[:5]: # Show top 5
error_type = pattern.trigger.get("error_type", "unknown")
confidence = pattern.confidence
evidence = pattern.evidence_count
print(f" {error_type}")
print(f" Confidence: {confidence:.1%}")
print(f" Evidence: {evidence} samples")
if not command_patterns and not context_patterns:
print("No patterns learned yet. Use Claude Code to start building the knowledge base!")
def clear_patterns():
"""Clear learned patterns"""
response = input("Are you sure you want to clear all learned patterns? (y/N): ")
if response.lower() == 'y':
shadow_learner = ShadowLearner()
shadow_learner.db = shadow_learner._load_database() # Reset to empty
shadow_learner.save_database()
print("✅ Patterns cleared successfully")
else:
print("Operation cancelled")
def export_data():
"""Export all hook data"""
export_dir = Path("claude_hooks_export")
export_dir.mkdir(exist_ok=True)
# Export session state
session_manager = SessionStateManager()
summary = session_manager.get_session_summary()
with open(export_dir / "session_data.json", 'w') as f:
json.dump(summary, f, indent=2)
# Export patterns
shadow_learner = ShadowLearner()
with open(export_dir / "patterns.json", 'w') as f:
json.dump(shadow_learner.db.to_dict(), f, indent=2)
# Export logs
logs_dir = Path(".claude_hooks/logs")
if logs_dir.exists():
import shutil
shutil.copytree(logs_dir, export_dir / "logs", dirs_exist_ok=True)
print(f"✅ Data exported to {export_dir}")
def main():
"""Main CLI entry point"""
parser = argparse.ArgumentParser(description="Claude Code Hooks CLI")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# List backups
subparsers.add_parser("list-backups", help="List available backups")
# Show session status
subparsers.add_parser("status", help="Show current session status")
# Show patterns
subparsers.add_parser("patterns", help="Show learned patterns")
# Clear patterns
subparsers.add_parser("clear-patterns", help="Clear all learned patterns")
# Export data
subparsers.add_parser("export", help="Export all hook data")
args = parser.parse_args()
if not args.command:
parser.print_help()
return
try:
if args.command == "list-backups":
list_backups()
elif args.command == "status":
show_session_status()
elif args.command == "patterns":
show_patterns()
elif args.command == "clear-patterns":
clear_patterns()
elif args.command == "export":
export_data()
else:
print(f"Unknown command: {args.command}")
parser.print_help()
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

156
lib/context-monitor.js Normal file
View File

@ -0,0 +1,156 @@
/**
* Context Monitor - Estimates context usage and triggers backups
*/
const fs = require('fs-extra');
const path = require('path');
class ContextMonitor {
constructor() {
this.estimatedTokens = 0;
this.promptCount = 0;
this.toolExecutions = 0;
this.sessionStartTime = Date.now();
// Configuration
this.maxContextTokens = 200000; // Conservative estimate
this.backupThreshold = 0.85; // 85% of max context
this.timeThresholdMinutes = 30; // 30 minutes
this.toolThreshold = 25; // 25 tool executions
this.lastBackupTime = Date.now();
this.lastBackupToolCount = 0;
}
/**
* Update context estimates from user prompt
*/
updateFromPrompt(promptData) {
this.promptCount++;
// Estimate tokens from prompt
const prompt = promptData.prompt || '';
const estimatedPromptTokens = this._estimateTokens(prompt);
this.estimatedTokens += estimatedPromptTokens;
// Add context size if provided
if (promptData.context_size) {
this.estimatedTokens += promptData.context_size;
}
}
/**
* Update context estimates from tool usage
*/
updateFromToolUse(toolData) {
this.toolExecutions++;
// Estimate tokens from tool parameters and output
const parameters = JSON.stringify(toolData.parameters || {});
const output = toolData.output || '';
const error = toolData.error || '';
const toolTokens = this._estimateTokens(parameters + output + error);
this.estimatedTokens += toolTokens;
// File operations add more context
if (toolData.tool === 'Read' || toolData.tool === 'Edit') {
this.estimatedTokens += 2000; // Typical file size estimate
} else if (toolData.tool === 'Bash') {
this.estimatedTokens += 500; // Command output estimate
}
}
/**
* Check if backup should be triggered
*/
checkBackupTriggers(hookType, data) {
const decisions = [];
// Context threshold trigger
const contextRatio = this.getContextUsageRatio();
if (contextRatio > this.backupThreshold) {
decisions.push({
shouldBackup: true,
reason: `Context usage ${(contextRatio * 100).toFixed(1)}%`,
urgency: 'high'
});
}
// Time-based trigger
const sessionMinutes = (Date.now() - this.sessionStartTime) / (1000 * 60);
const timeSinceBackup = (Date.now() - this.lastBackupTime) / (1000 * 60);
if (timeSinceBackup > this.timeThresholdMinutes) {
decisions.push({
shouldBackup: true,
reason: `${this.timeThresholdMinutes} minutes since last backup`,
urgency: 'medium'
});
}
// Tool-based trigger
const toolsSinceBackup = this.toolExecutions - this.lastBackupToolCount;
if (toolsSinceBackup >= this.toolThreshold) {
decisions.push({
shouldBackup: true,
reason: `${this.toolThreshold} tools since last backup`,
urgency: 'medium'
});
}
// Return highest priority decision
if (decisions.length > 0) {
const urgencyOrder = { high: 3, medium: 2, low: 1 };
decisions.sort((a, b) => urgencyOrder[b.urgency] - urgencyOrder[a.urgency]);
return decisions[0];
}
return { shouldBackup: false };
}
/**
* Get current context usage ratio (0.0 to 1.0)
*/
getContextUsageRatio() {
return Math.min(1.0, this.estimatedTokens / this.maxContextTokens);
}
/**
* Mark that a backup was performed
*/
markBackupPerformed() {
this.lastBackupTime = Date.now();
this.lastBackupToolCount = this.toolExecutions;
}
/**
* Estimate tokens from text (rough approximation)
*/
_estimateTokens(text) {
if (!text) return 0;
// Rough estimate: ~4 characters per token for English text
// Add some buffer for formatting and special tokens
return Math.ceil(text.length / 3.5);
}
/**
* Get context usage statistics
*/
getStats() {
const sessionMinutes = (Date.now() - this.sessionStartTime) / (1000 * 60);
return {
estimatedTokens: this.estimatedTokens,
contextUsageRatio: this.getContextUsageRatio(),
promptCount: this.promptCount,
toolExecutions: this.toolExecutions,
sessionMinutes: Math.round(sessionMinutes),
lastBackupMinutesAgo: Math.round((Date.now() - this.lastBackupTime) / (1000 * 60))
};
}
}
module.exports = { ContextMonitor };

View File

@ -1,321 +0,0 @@
#!/usr/bin/env python3
"""Context Monitor - Token estimation and backup trigger system"""
import json
import time
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Any, Optional
try:
from .models import BackupDecision
except ImportError:
from models import BackupDecision
class ContextMonitor:
"""Monitors conversation context and predicts token usage"""
def __init__(self, storage_path: str = ".claude_hooks"):
self.storage_path = Path(storage_path)
self.storage_path.mkdir(parents=True, exist_ok=True)
self.session_start = datetime.now()
self.prompt_count = 0
self.estimated_tokens = 0
self.tool_executions = 0
self.file_operations = 0
# Token estimation constants (conservative estimates)
self.TOKENS_PER_CHAR = 0.25 # Average for English text
self.TOOL_OVERHEAD = 200 # Tokens per tool call
self.SYSTEM_OVERHEAD = 500 # Base conversation overhead
self.MAX_CONTEXT = 200000 # Claude's context limit
# Backup thresholds
self.backup_threshold = 0.85
self.emergency_threshold = 0.95
# Error tracking
self.estimation_errors = 0
self.max_errors = 5
self._last_good_estimate = 0.5
# Load previous session state if available
self._load_session_state()
def estimate_prompt_tokens(self, prompt_data: Dict[str, Any]) -> int:
"""Estimate tokens in user prompt"""
try:
prompt_text = prompt_data.get("prompt", "")
# Basic character count estimation
base_tokens = len(prompt_text) * self.TOKENS_PER_CHAR
# Add overhead for system prompts, context, etc.
overhead_tokens = self.SYSTEM_OVERHEAD
return int(base_tokens + overhead_tokens)
except Exception:
# Fallback estimation
return 1000
def estimate_conversation_tokens(self) -> int:
"""Estimate total conversation tokens"""
try:
# Base conversation context
base_tokens = self.estimated_tokens
# Add tool execution overhead
tool_tokens = self.tool_executions * self.TOOL_OVERHEAD
# Add file operation overhead (file contents in context)
file_tokens = self.file_operations * 1000 # Average file size
# Conversation history grows over time
history_tokens = self.prompt_count * 300 # Average response size
total = base_tokens + tool_tokens + file_tokens + history_tokens
return min(total, self.MAX_CONTEXT)
except Exception:
return self._handle_estimation_failure()
def get_context_usage_ratio(self) -> float:
"""Get estimated context usage as ratio (0.0 to 1.0)"""
try:
estimated = self.estimate_conversation_tokens()
ratio = min(1.0, estimated / self.MAX_CONTEXT)
# Reset error counter on success
self.estimation_errors = 0
self._last_good_estimate = ratio
return ratio
except Exception:
self.estimation_errors += 1
# Too many errors - use conservative fallback
if self.estimation_errors >= self.max_errors:
return 0.7 # Conservative threshold
# Single error - use last known good value
return self._last_good_estimate
def should_trigger_backup(self, threshold: Optional[float] = None) -> bool:
"""Check if backup should be triggered"""
try:
if threshold is None:
threshold = self.backup_threshold
usage = self.get_context_usage_ratio()
# Edge case: Very early in session
if self.prompt_count < 2:
return False
# Edge case: Already near context limit
if usage > self.emergency_threshold:
# Emergency backup - don't wait for other conditions
return True
# Session duration factor
session_hours = (datetime.now() - self.session_start).total_seconds() / 3600
complexity_factor = (self.tool_executions + self.file_operations) / 20
# Trigger earlier for complex sessions
adjusted_threshold = threshold - (complexity_factor * 0.1)
# Multiple trigger conditions
return (
usage > adjusted_threshold or
session_hours > 2.0 or
(usage > 0.7 and session_hours > 1.0)
)
except Exception:
# When in doubt, backup (better safe than sorry)
return True
def update_from_prompt(self, prompt_data: Dict[str, Any]):
"""Update estimates when user submits prompt"""
try:
self.prompt_count += 1
prompt_tokens = self.estimate_prompt_tokens(prompt_data)
self.estimated_tokens += prompt_tokens
# Save state periodically
if self.prompt_count % 5 == 0:
self._save_session_state()
except Exception:
pass # Don't let tracking errors break the system
def update_from_tool_use(self, tool_data: Dict[str, Any]):
"""Update estimates when tools are used"""
try:
self.tool_executions += 1
tool_name = tool_data.get("tool", "")
# File operations add content to context
if tool_name in ["Read", "Edit", "Write", "Glob", "MultiEdit"]:
self.file_operations += 1
# Large outputs add to context
parameters = tool_data.get("parameters", {})
if "file_path" in parameters:
self.estimated_tokens += 500 # Estimated file content
# Save state periodically
if self.tool_executions % 10 == 0:
self._save_session_state()
except Exception:
pass # Don't let tracking errors break the system
def check_backup_triggers(self, hook_event: str, data: Dict[str, Any]) -> BackupDecision:
"""Check all backup trigger conditions"""
try:
# Context-based triggers
if self.should_trigger_backup():
usage = self.get_context_usage_ratio()
urgency = "high" if usage > self.emergency_threshold else "medium"
return BackupDecision(
should_backup=True,
reason="context_threshold",
urgency=urgency,
metadata={"usage_ratio": usage}
)
# Activity-based triggers
if self._should_backup_by_activity():
return BackupDecision(
should_backup=True,
reason="activity_threshold",
urgency="medium"
)
# Critical operation triggers
if self._is_critical_operation(data):
return BackupDecision(
should_backup=True,
reason="critical_operation",
urgency="high"
)
return BackupDecision(should_backup=False, reason="no_trigger")
except Exception:
# If trigger checking fails, err on side of safety
return BackupDecision(
should_backup=True,
reason="trigger_check_failed",
urgency="medium"
)
def _should_backup_by_activity(self) -> bool:
"""Activity-based backup triggers"""
# Backup after significant file modifications
if (self.file_operations % 10 == 0 and self.file_operations > 0):
return True
# Backup after many tool executions
if (self.tool_executions % 25 == 0 and self.tool_executions > 0):
return True
return False
def _is_critical_operation(self, data: Dict[str, Any]) -> bool:
"""Detect operations that should trigger immediate backup"""
tool = data.get("tool", "")
params = data.get("parameters", {})
# Git operations
if tool == "Bash":
command = params.get("command", "").lower()
if any(git_cmd in command for git_cmd in ["git commit", "git push", "git merge"]):
return True
# Package installations
if any(pkg_cmd in command for pkg_cmd in ["npm install", "pip install", "cargo install"]):
return True
# Major file operations
if tool in ["Write", "MultiEdit"]:
content = params.get("content", "")
if len(content) > 5000: # Large file changes
return True
return False
def _handle_estimation_failure(self) -> int:
"""Fallback estimation when primary method fails"""
# Method 1: Time-based estimation
session_duration = (datetime.now() - self.session_start).total_seconds() / 3600
if session_duration > 1.0: # 1 hour = likely high usage
return int(self.MAX_CONTEXT * 0.8)
# Method 2: Activity-based estimation
total_activity = self.tool_executions + self.file_operations
if total_activity > 50: # High activity = likely high context
return int(self.MAX_CONTEXT * 0.75)
# Method 3: Conservative default
return int(self.MAX_CONTEXT * 0.5)
def _save_session_state(self):
"""Save current session state to disk"""
try:
state_file = self.storage_path / "session_state.json"
state = {
"session_start": self.session_start.isoformat(),
"prompt_count": self.prompt_count,
"estimated_tokens": self.estimated_tokens,
"tool_executions": self.tool_executions,
"file_operations": self.file_operations,
"last_updated": datetime.now().isoformat()
}
with open(state_file, 'w') as f:
json.dump(state, f, indent=2)
except Exception:
pass # Don't let state saving errors break the system
def _load_session_state(self):
"""Load previous session state if available"""
try:
state_file = self.storage_path / "session_state.json"
if state_file.exists():
with open(state_file, 'r') as f:
state = json.load(f)
# Only load if session is recent (within last hour)
last_updated = datetime.fromisoformat(state["last_updated"])
if datetime.now() - last_updated < timedelta(hours=1):
self.prompt_count = state.get("prompt_count", 0)
self.estimated_tokens = state.get("estimated_tokens", 0)
self.tool_executions = state.get("tool_executions", 0)
self.file_operations = state.get("file_operations", 0)
except Exception:
pass # If loading fails, start fresh
def get_session_summary(self) -> Dict[str, Any]:
"""Get current session summary"""
return {
"session_duration": str(datetime.now() - self.session_start),
"prompt_count": self.prompt_count,
"tool_executions": self.tool_executions,
"file_operations": self.file_operations,
"estimated_tokens": self.estimate_conversation_tokens(),
"context_usage_ratio": self.get_context_usage_ratio(),
"should_backup": self.should_trigger_backup()
}

View File

@ -1,198 +0,0 @@
#!/usr/bin/env python3
"""Data models for Claude Code Hooks system"""
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional, Any
import json
@dataclass
class ToolExecution:
"""Single tool execution record"""
timestamp: datetime
tool: str
parameters: Dict[str, Any]
success: bool
error_message: Optional[str] = None
execution_time: float = 0.0
context: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
return {
"timestamp": self.timestamp.isoformat(),
"tool": self.tool,
"parameters": self.parameters,
"success": self.success,
"error_message": self.error_message,
"execution_time": self.execution_time,
"context": self.context
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'ToolExecution':
return cls(
timestamp=datetime.fromisoformat(data["timestamp"]),
tool=data["tool"],
parameters=data["parameters"],
success=data["success"],
error_message=data.get("error_message"),
execution_time=data.get("execution_time", 0.0),
context=data.get("context", {})
)
@dataclass
class Pattern:
"""Learned pattern with confidence scoring"""
pattern_id: str
pattern_type: str # "command_failure", "tool_sequence", "context_error"
trigger: Dict[str, Any] # What triggers this pattern
prediction: Dict[str, Any] # What we predict will happen
confidence: float # 0.0 to 1.0
evidence_count: int # How many times we've seen this
last_seen: datetime
success_rate: float = 0.0
def to_dict(self) -> Dict[str, Any]:
return {
"pattern_id": self.pattern_id,
"pattern_type": self.pattern_type,
"trigger": self.trigger,
"prediction": self.prediction,
"confidence": self.confidence,
"evidence_count": self.evidence_count,
"last_seen": self.last_seen.isoformat(),
"success_rate": self.success_rate
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'Pattern':
return cls(
pattern_id=data["pattern_id"],
pattern_type=data["pattern_type"],
trigger=data["trigger"],
prediction=data["prediction"],
confidence=data["confidence"],
evidence_count=data["evidence_count"],
last_seen=datetime.fromisoformat(data["last_seen"]),
success_rate=data.get("success_rate", 0.0)
)
@dataclass
class HookResult:
"""Result of hook execution"""
allow: bool
message: str = ""
warning: bool = False
metadata: Dict[str, Any] = field(default_factory=dict)
@classmethod
def success(cls, message: str = "Operation allowed") -> 'HookResult':
return cls(allow=True, message=message)
@classmethod
def blocked(cls, reason: str) -> 'HookResult':
return cls(allow=False, message=reason)
@classmethod
def allow_with_warning(cls, warning: str) -> 'HookResult':
return cls(allow=True, message=warning, warning=True)
def to_claude_response(self) -> Dict[str, Any]:
"""Convert to Claude Code hook response format"""
response = {
"allow": self.allow,
"message": self.message
}
if self.metadata:
response.update(self.metadata)
return response
@dataclass
class ValidationResult:
"""Result of validation operations"""
allowed: bool
reason: str = ""
severity: str = "info" # info, warning, medium, high, critical
suggestions: List[str] = field(default_factory=list)
@property
def is_critical(self) -> bool:
return self.severity == "critical"
@property
def is_blocking(self) -> bool:
return not self.allowed
@dataclass
class BackupDecision:
"""Decision about whether to trigger backup"""
should_backup: bool
reason: str
urgency: str = "medium" # low, medium, high
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class BackupResult:
"""Result of backup operation"""
success: bool
backup_id: str = ""
backup_path: str = ""
error: str = ""
git_success: bool = False
fallback_performed: bool = False
components: Dict[str, Any] = field(default_factory=dict)
@dataclass
class GitBackupResult:
"""Result of git backup operation"""
success: bool
commit_id: str = ""
message: str = ""
error: str = ""
class PatternDatabase:
"""Fast lookup database for learned patterns"""
def __init__(self):
self.command_patterns: Dict[str, Pattern] = {}
self.sequence_patterns: List[Pattern] = []
self.context_patterns: Dict[str, Pattern] = {}
self.execution_history: List[ToolExecution] = []
def to_dict(self) -> Dict[str, Any]:
return {
"command_patterns": {k: v.to_dict() for k, v in self.command_patterns.items()},
"sequence_patterns": [p.to_dict() for p in self.sequence_patterns],
"context_patterns": {k: v.to_dict() for k, v in self.context_patterns.items()},
"execution_history": [e.to_dict() for e in self.execution_history[-100:]] # Keep last 100
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'PatternDatabase':
db = cls()
# Load command patterns
for k, v in data.get("command_patterns", {}).items():
db.command_patterns[k] = Pattern.from_dict(v)
# Load sequence patterns
for p in data.get("sequence_patterns", []):
db.sequence_patterns.append(Pattern.from_dict(p))
# Load context patterns
for k, v in data.get("context_patterns", {}).items():
db.context_patterns[k] = Pattern.from_dict(v)
# Load execution history
for e in data.get("execution_history", []):
db.execution_history.append(ToolExecution.from_dict(e))
return db

375
lib/session-state.js Normal file
View File

@ -0,0 +1,375 @@
/**
* Session State Manager - Tracks session state and creates continuation docs
*/
const fs = require('fs-extra');
const path = require('path');
class SessionStateManager {
constructor(stateDir = '.claude_hooks') {
this.stateDir = path.resolve(stateDir);
this.sessionId = this._generateSessionId();
this.startTime = Date.now();
// Session data
this.modifiedFiles = new Set();
this.commandsExecuted = [];
this.toolUsage = {};
this.backupHistory = [];
this.contextSnapshots = [];
fs.ensureDirSync(this.stateDir);
this._loadPersistentState();
}
/**
* Update state from tool usage
*/
async updateFromToolUse(toolData) {
const tool = toolData.tool || 'Unknown';
// Track tool usage
this.toolUsage[tool] = (this.toolUsage[tool] || 0) + 1;
// Track file modifications
if (tool === 'Edit' || tool === 'Write' || tool === 'MultiEdit') {
const filePath = toolData.parameters?.file_path;
if (filePath) {
this.modifiedFiles.add(filePath);
}
}
// Track commands
if (tool === 'Bash') {
const command = toolData.parameters?.command;
if (command) {
this.commandsExecuted.push({
command,
timestamp: new Date().toISOString(),
success: toolData.success !== false
});
}
}
// Persist state periodically
if (this.commandsExecuted.length % 5 === 0) {
await this._savePersistentState();
}
}
/**
* Add backup to history
*/
async addBackup(backupId, info) {
this.backupHistory.push({
backupId,
timestamp: new Date().toISOString(),
...info
});
await this._savePersistentState();
}
/**
* Add context snapshot
*/
async addContextSnapshot(snapshot) {
this.contextSnapshots.push({
timestamp: new Date().toISOString(),
...snapshot
});
// Keep only last 10 snapshots
if (this.contextSnapshots.length > 10) {
this.contextSnapshots = this.contextSnapshots.slice(-10);
}
}
/**
* Get comprehensive session summary
*/
async getSessionSummary() {
const duration = Date.now() - this.startTime;
const durationMinutes = Math.round(duration / (1000 * 60));
return {
sessionId: this.sessionId,
startTime: new Date(this.startTime).toISOString(),
duration: duration,
modifiedFiles: Array.from(this.modifiedFiles),
commandsExecuted: this.commandsExecuted,
toolUsage: this.toolUsage,
backupHistory: this.backupHistory,
contextSnapshots: this.contextSnapshots,
sessionStats: {
durationMinutes,
totalToolCalls: Object.values(this.toolUsage).reduce((sum, count) => sum + count, 0),
totalCommands: this.commandsExecuted.length,
filesModified: this.modifiedFiles.size
}
};
}
/**
* Create continuation documentation files
*/
async createContinuationDocs() {
try {
const summary = await this.getSessionSummary();
// Create LAST_SESSION.md
await this._createLastSessionDoc(summary);
// Create/update ACTIVE_TODOS.md (if todos exist)
await this._updateActiveTodos();
} catch (error) {
console.error('Error creating continuation docs:', error.message);
}
}
/**
* Create LAST_SESSION.md with session summary
*/
async _createLastSessionDoc(summary) {
let content = `# Last Claude Session Summary
## Session Overview
- **Session ID**: ${summary.sessionId}
- **Started**: ${summary.startTime}
- **Duration**: ${summary.sessionStats.durationMinutes} minutes
- **Total Tools Used**: ${summary.sessionStats.totalToolCalls}
- **Commands Executed**: ${summary.sessionStats.totalCommands}
- **Files Modified**: ${summary.sessionStats.filesModified}
## Files Modified
`;
if (summary.modifiedFiles.length > 0) {
for (const file of summary.modifiedFiles) {
content += `- ${file}\n`;
}
} else {
content += '*No files were modified in this session*\n';
}
content += `
## Tools Used
`;
for (const [tool, count] of Object.entries(summary.toolUsage)) {
content += `- **${tool}**: ${count} times\n`;
}
content += `
## Recent Commands
`;
const recentCommands = summary.commandsExecuted.slice(-10);
if (recentCommands.length > 0) {
for (const cmd of recentCommands) {
const status = cmd.success ? '✅' : '❌';
const time = new Date(cmd.timestamp).toLocaleTimeString();
content += `- ${status} ${time}: \`${cmd.command}\`\n`;
}
} else {
content += '*No commands executed in this session*\n';
}
if (summary.backupHistory.length > 0) {
content += `
## Backups Created
`;
for (const backup of summary.backupHistory) {
const status = backup.success ? '✅' : '❌';
const time = new Date(backup.timestamp).toLocaleTimeString();
content += `- ${status} ${time}: ${backup.backupId} - ${backup.reason}\n`;
}
}
content += `
## Context Usage Timeline
`;
if (summary.contextSnapshots.length > 0) {
for (const snapshot of summary.contextSnapshots) {
const time = new Date(snapshot.timestamp).toLocaleTimeString();
const usage = ((snapshot.usageRatio || 0) * 100).toFixed(1);
content += `- ${time}: ${usage}% (${snapshot.promptCount || 0} prompts, ${snapshot.toolExecutions || 0} tools)\n`;
}
}
content += `
## Quick Recovery
\`\`\`bash
# Check current project status
git status
# View recent changes
git diff
# List backup directories
ls .claude_hooks/backups/
\`\`\`
*Generated by Claude Hooks on ${new Date().toISOString()}*
`;
await fs.writeFile('LAST_SESSION.md', content);
}
/**
* Update ACTIVE_TODOS.md if todos exist
*/
async _updateActiveTodos() {
// Check if there's an existing ACTIVE_TODOS.md or any todo-related files
const todoFiles = ['ACTIVE_TODOS.md', 'TODO.md', 'todos.md'];
for (const todoFile of todoFiles) {
if (await fs.pathExists(todoFile)) {
// File exists, don't overwrite it
return;
}
}
// Look for todo comments in recently modified files
const todos = await this._extractTodosFromFiles();
if (todos.length > 0) {
let content = `# Active TODOs
*Auto-generated from code comments and session analysis*
`;
for (const todo of todos) {
content += `- [ ] ${todo.text} (${todo.file}:${todo.line})\n`;
}
content += `
*Update this file manually or use Claude to manage your todos*
`;
await fs.writeFile('ACTIVE_TODOS.md', content);
}
}
/**
* Extract TODO comments from modified files
*/
async _extractTodosFromFiles() {
const todos = [];
const todoPattern = /(?:TODO|FIXME|HACK|XXX|NOTE):\s*(.+)/gi;
for (const filePath of this.modifiedFiles) {
try {
if (await fs.pathExists(filePath)) {
const content = await fs.readFile(filePath, 'utf8');
const lines = content.split('\n');
lines.forEach((line, index) => {
const match = todoPattern.exec(line);
if (match) {
todos.push({
text: match[1].trim(),
file: filePath,
line: index + 1
});
}
});
}
} catch (error) {
// Skip files that can't be read
}
}
return todos;
}
/**
* Clean up session resources
*/
async cleanupSession() {
// Save final state
await this._savePersistentState();
// Clean up old session files (keep last 5)
await this._cleanupOldSessions();
}
/**
* Generate unique session ID
*/
_generateSessionId() {
const timestamp = new Date().toISOString()
.replace(/[:-]/g, '')
.replace(/\.\d{3}Z$/, '')
.replace('T', '_');
return `sess_${timestamp}`;
}
/**
* Load persistent state from disk
*/
async _loadPersistentState() {
try {
const stateFile = path.join(this.stateDir, 'session_state.json');
if (await fs.pathExists(stateFile)) {
const state = await fs.readJson(stateFile);
// Only load if session is recent (within 24 hours)
const stateAge = Date.now() - new Date(state.startTime).getTime();
if (stateAge < 24 * 60 * 60 * 1000) {
this.modifiedFiles = new Set(state.modifiedFiles || []);
this.commandsExecuted = state.commandsExecuted || [];
this.toolUsage = state.toolUsage || {};
this.backupHistory = state.backupHistory || [];
this.contextSnapshots = state.contextSnapshots || [];
}
}
} catch (error) {
// Start fresh if loading fails
}
}
/**
* Save persistent state to disk
*/
async _savePersistentState() {
try {
const stateFile = path.join(this.stateDir, 'session_state.json');
const state = {
sessionId: this.sessionId,
startTime: new Date(this.startTime).toISOString(),
modifiedFiles: Array.from(this.modifiedFiles),
commandsExecuted: this.commandsExecuted,
toolUsage: this.toolUsage,
backupHistory: this.backupHistory,
contextSnapshots: this.contextSnapshots,
lastUpdated: new Date().toISOString()
};
await fs.writeJson(stateFile, state, { spaces: 2 });
} catch (error) {
// Don't let save failures break the session
}
}
/**
* Clean up old session state files
*/
async _cleanupOldSessions() {
try {
const statePattern = path.join(this.stateDir, 'session_*.json');
// This is a simple cleanup - in a full implementation,
// you'd use glob patterns to find and clean old files
} catch (error) {
// Ignore cleanup errors
}
}
}
module.exports = { SessionStateManager };

View File

@ -1,348 +0,0 @@
#!/usr/bin/env python3
"""Session State Manager - Persistent session state and continuity"""
import json
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Set
try:
from .models import ToolExecution
except ImportError:
from models import ToolExecution
class SessionStateManager:
"""Manages persistent session state across Claude interactions"""
def __init__(self, state_dir: str = ".claude_hooks"):
self.state_dir = Path(state_dir)
self.state_dir.mkdir(parents=True, exist_ok=True)
self.state_file = self.state_dir / "session_state.json"
self.todos_file = Path("ACTIVE_TODOS.md")
self.last_session_file = Path("LAST_SESSION.md")
# Initialize session
self.session_id = str(uuid.uuid4())[:8]
self.current_state = self._load_or_create_state()
def _load_or_create_state(self) -> Dict[str, Any]:
"""Load existing state or create new session state"""
try:
if self.state_file.exists():
with open(self.state_file, 'r') as f:
state = json.load(f)
# Check if this is a continuation of recent session
last_activity = datetime.fromisoformat(state.get("last_activity", "1970-01-01"))
if (datetime.now() - last_activity).total_seconds() < 3600: # Within 1 hour
# Continue existing session
return state
# Create new session
return self._create_new_session()
except Exception:
# If loading fails, create new session
return self._create_new_session()
def _create_new_session(self) -> Dict[str, Any]:
"""Create new session state"""
return {
"session_id": self.session_id,
"start_time": datetime.now().isoformat(),
"last_activity": datetime.now().isoformat(),
"modified_files": [],
"commands_executed": [],
"tool_usage": {},
"backup_history": [],
"todos": [],
"context_snapshots": []
}
def update_from_tool_use(self, tool_data: Dict[str, Any]):
"""Update session state from tool usage"""
try:
tool = tool_data.get("tool", "")
params = tool_data.get("parameters", {})
timestamp = datetime.now().isoformat()
# Track file modifications
if tool in ["Edit", "Write", "MultiEdit"]:
file_path = params.get("file_path", "")
if file_path and file_path not in self.current_state["modified_files"]:
self.current_state["modified_files"].append(file_path)
# Track commands executed
if tool == "Bash":
command = params.get("command", "")
if command:
self.current_state["commands_executed"].append({
"command": command,
"timestamp": timestamp
})
# Keep only last 50 commands
if len(self.current_state["commands_executed"]) > 50:
self.current_state["commands_executed"] = self.current_state["commands_executed"][-50:]
# Track tool usage statistics
self.current_state["tool_usage"][tool] = self.current_state["tool_usage"].get(tool, 0) + 1
self.current_state["last_activity"] = timestamp
# Save state periodically
self._save_state()
except Exception:
pass # Don't let state tracking errors break the system
def add_backup(self, backup_id: str, backup_info: Dict[str, Any]):
"""Record backup in session history"""
try:
backup_record = {
"backup_id": backup_id,
"timestamp": datetime.now().isoformat(),
"reason": backup_info.get("reason", "unknown"),
"success": backup_info.get("success", False)
}
self.current_state["backup_history"].append(backup_record)
# Keep only last 10 backups
if len(self.current_state["backup_history"]) > 10:
self.current_state["backup_history"] = self.current_state["backup_history"][-10:]
self._save_state()
except Exception:
pass
def add_context_snapshot(self, context_data: Dict[str, Any]):
"""Add context snapshot for recovery"""
try:
snapshot = {
"timestamp": datetime.now().isoformat(),
"context_ratio": context_data.get("usage_ratio", 0.0),
"prompt_count": context_data.get("prompt_count", 0),
"tool_count": context_data.get("tool_executions", 0)
}
self.current_state["context_snapshots"].append(snapshot)
# Keep only last 20 snapshots
if len(self.current_state["context_snapshots"]) > 20:
self.current_state["context_snapshots"] = self.current_state["context_snapshots"][-20:]
except Exception:
pass
def update_todos(self, todos: List[Dict[str, Any]]):
"""Update active todos list"""
try:
self.current_state["todos"] = todos
self._save_state()
self._update_todos_file()
except Exception:
pass
def get_session_summary(self) -> Dict[str, Any]:
"""Generate comprehensive session summary"""
try:
return {
"session_id": self.current_state.get("session_id", "unknown"),
"start_time": self.current_state.get("start_time", "unknown"),
"last_activity": self.current_state.get("last_activity", "unknown"),
"modified_files": self.current_state.get("modified_files", []),
"tool_usage": self.current_state.get("tool_usage", {}),
"commands_executed": self.current_state.get("commands_executed", []),
"backup_history": self.current_state.get("backup_history", []),
"todos": self.current_state.get("todos", []),
"session_stats": self._calculate_session_stats()
}
except Exception:
return {"error": "Failed to generate session summary"}
def _calculate_session_stats(self) -> Dict[str, Any]:
"""Calculate session statistics"""
try:
total_tools = sum(self.current_state.get("tool_usage", {}).values())
total_commands = len(self.current_state.get("commands_executed", []))
total_files = len(self.current_state.get("modified_files", []))
start_time = datetime.fromisoformat(self.current_state.get("start_time", datetime.now().isoformat()))
duration = datetime.now() - start_time
return {
"duration_minutes": round(duration.total_seconds() / 60, 1),
"total_tool_calls": total_tools,
"total_commands": total_commands,
"total_files_modified": total_files,
"most_used_tools": self._get_top_tools(3)
}
except Exception:
return {}
def _get_top_tools(self, count: int) -> List[Dict[str, Any]]:
"""Get most frequently used tools"""
try:
tool_usage = self.current_state.get("tool_usage", {})
sorted_tools = sorted(tool_usage.items(), key=lambda x: x[1], reverse=True)
return [{"tool": tool, "count": usage} for tool, usage in sorted_tools[:count]]
except Exception:
return []
def create_continuation_docs(self):
"""Create LAST_SESSION.md and ACTIVE_TODOS.md"""
try:
self._create_last_session_doc()
self._update_todos_file()
except Exception:
pass # Don't let doc creation errors break the system
def _create_last_session_doc(self):
"""Create LAST_SESSION.md with session summary"""
try:
summary = self.get_session_summary()
content = f"""# Last Claude Session Summary
**Session ID**: {summary['session_id']}
**Duration**: {summary['start_time']} {summary['last_activity']}
**Session Length**: {summary.get('session_stats', {}).get('duration_minutes', 0)} minutes
## Files Modified ({len(summary['modified_files'])})
"""
for file_path in summary['modified_files']:
content += f"- {file_path}\n"
content += f"\n## Tools Used ({summary.get('session_stats', {}).get('total_tool_calls', 0)} total)\n"
for tool, count in summary['tool_usage'].items():
content += f"- {tool}: {count} times\n"
content += f"\n## Recent Commands ({len(summary['commands_executed'])})\n"
# Show last 10 commands
recent_commands = summary['commands_executed'][-10:]
for cmd_info in recent_commands:
timestamp = cmd_info['timestamp'][:19] # Remove microseconds
content += f"- `{cmd_info['command']}` ({timestamp})\n"
content += f"\n## Backup History\n"
for backup in summary['backup_history']:
status = "" if backup['success'] else ""
content += f"- {status} {backup['backup_id']} - {backup['reason']} ({backup['timestamp'][:19]})\n"
content += f"""
## To Continue This Session
1. **Review Modified Files**: Check the files listed above for your recent changes
2. **Check Active Tasks**: Review `ACTIVE_TODOS.md` for pending work
3. **Restore Context**: Reference the commands and tools used above
4. **Use Backups**: If needed, restore from backup using `claude-hooks restore {summary['backup_history'][-1]['backup_id'] if summary['backup_history'] else 'latest'}`
## Quick Commands
```bash
# View current project status
git status
# Check for any uncommitted changes
git diff
# List available backups
claude-hooks list-backups
# Continue with active todos
cat ACTIVE_TODOS.md
```
"""
with open(self.last_session_file, 'w') as f:
f.write(content)
except Exception as e:
# Create minimal doc on error
try:
with open(self.last_session_file, 'w') as f:
f.write(f"# Last Session\n\nSession ended at {datetime.now().isoformat()}\n\nError creating summary: {e}\n")
except Exception:
pass
def _update_todos_file(self):
"""Update ACTIVE_TODOS.md file"""
try:
todos = self.current_state.get("todos", [])
if not todos:
content = """# Active TODOs
*No active todos. Add some to track your progress!*
## How to Add TODOs
Use Claude's TodoWrite tool to manage your task list:
- Track progress across sessions
- Break down complex tasks
- Never lose track of what you're working on
"""
else:
content = f"""# Active TODOs
*Updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*
"""
# Group by status
pending_todos = [t for t in todos if t.get('status') == 'pending']
in_progress_todos = [t for t in todos if t.get('status') == 'in_progress']
completed_todos = [t for t in todos if t.get('status') == 'completed']
if in_progress_todos:
content += "## 🚀 In Progress\n\n"
for todo in in_progress_todos:
priority = todo.get('priority', 'medium')
priority_emoji = {'high': '🔥', 'medium': '', 'low': '📝'}.get(priority, '')
content += f"- {priority_emoji} {todo.get('content', 'Unknown task')}\n"
content += "\n"
if pending_todos:
content += "## 📋 Pending\n\n"
for todo in pending_todos:
priority = todo.get('priority', 'medium')
priority_emoji = {'high': '🔥', 'medium': '', 'low': '📝'}.get(priority, '')
content += f"- {priority_emoji} {todo.get('content', 'Unknown task')}\n"
content += "\n"
if completed_todos:
content += "## ✅ Completed\n\n"
for todo in completed_todos[-5:]: # Show last 5 completed
content += f"- ✅ {todo.get('content', 'Unknown task')}\n"
content += "\n"
with open(self.todos_file, 'w') as f:
f.write(content)
except Exception:
pass # Don't let todo file creation break the system
def _save_state(self):
"""Save current state to disk"""
try:
with open(self.state_file, 'w') as f:
json.dump(self.current_state, f, indent=2)
except Exception:
pass # Don't let state saving errors break the system
def cleanup_session(self):
"""Clean up session and create final documentation"""
try:
self.create_continuation_docs()
self._save_state()
except Exception:
pass

579
lib/shadow-learner.js Normal file
View File

@ -0,0 +1,579 @@
/**
* Shadow Learner - Pattern learning and prediction system
* Node.js implementation
*/
const fs = require('fs-extra');
const path = require('path');
class ConfidenceCalculator {
/**
* Calculate confidence for command failure patterns
*/
static calculateCommandConfidence(successCount, failureCount, recencyFactor) {
const totalAttempts = successCount + failureCount;
if (totalAttempts === 0) return 0.0;
// Base confidence from failure rate
const failureRate = failureCount / totalAttempts;
// Sample size adjustment (more data = more confidence)
const sampleFactor = Math.min(1.0, totalAttempts / 10.0); // Plateau at 10 samples
// Time decay (recent failures are more relevant)
const confidence = failureRate * sampleFactor * (0.5 + 0.5 * recencyFactor);
return Math.min(0.99, Math.max(0.1, confidence)); // Clamp between 0.1 and 0.99
}
/**
* Calculate confidence for tool sequence patterns
*/
static calculateSequenceConfidence(successfulSequences, totalSequences) {
if (totalSequences === 0) return 0.0;
const successRate = successfulSequences / totalSequences;
const sampleFactor = Math.min(1.0, totalSequences / 5.0);
return successRate * sampleFactor;
}
}
class PatternMatcher {
constructor(db) {
this.db = db;
}
/**
* Find similar command patterns using fuzzy matching
*/
fuzzyCommandMatch(command, threshold = 0.8) {
const cmdTokens = command.toLowerCase().split(' ');
if (cmdTokens.length === 0) return [];
const baseCmd = cmdTokens[0];
const matches = [];
for (const pattern of Object.values(this.db.commandPatterns)) {
const patternCmd = (pattern.trigger.command || '').toLowerCase();
// Exact match
if (patternCmd === baseCmd) {
matches.push(pattern);
}
// Fuzzy match on command name
else if (this._similarity(patternCmd, baseCmd) > threshold) {
matches.push(pattern);
}
// Partial match (e.g., "pip3" matches "pip install")
else if (cmdTokens.some(token => patternCmd.includes(token))) {
matches.push(pattern);
}
}
return matches.sort((a, b) => b.confidence - a.confidence);
}
/**
* Match patterns based on current context
*/
contextPatternMatch(currentContext) {
const matches = [];
for (const pattern of Object.values(this.db.contextPatterns)) {
if (this._contextMatches(currentContext, pattern.trigger)) {
matches.push(pattern);
}
}
return matches.sort((a, b) => b.confidence - a.confidence);
}
/**
* Simple string similarity (Jaccard similarity)
*/
_similarity(str1, str2) {
const set1 = new Set(str1.split(''));
const set2 = new Set(str2.split(''));
const intersection = new Set([...set1].filter(x => set2.has(x)));
const union = new Set([...set1, ...set2]);
return intersection.size / union.size;
}
/**
* Check if current context matches trigger conditions
*/
_contextMatches(current, trigger) {
for (const [key, expectedValue] of Object.entries(trigger)) {
if (!(key in current)) return false;
const currentValue = current[key];
// Handle different value types
if (typeof expectedValue === 'string' && typeof currentValue === 'string') {
if (!currentValue.toLowerCase().includes(expectedValue.toLowerCase())) {
return false;
}
} else if (expectedValue !== currentValue) {
return false;
}
}
return true;
}
}
class LearningEngine {
constructor(db) {
this.db = db;
this.confidenceCalc = ConfidenceCalculator;
}
/**
* Main learning entry point
*/
learnFromExecution(execution) {
// Learn command patterns
if (execution.tool === 'Bash') {
this._learnCommandPattern(execution);
}
// Learn tool sequences
this._learnSequencePattern(execution);
// Learn context patterns
if (!execution.success) {
this._learnFailureContext(execution);
}
}
/**
* Learn from bash command executions
*/
_learnCommandPattern(execution) {
const command = execution.parameters.command || '';
if (!command) return;
const baseCmd = command.split(' ')[0];
const patternId = `cmd_${baseCmd}`;
if (patternId in this.db.commandPatterns) {
const pattern = this.db.commandPatterns[patternId];
// Update statistics
if (execution.success) {
pattern.prediction.successCount = (pattern.prediction.successCount || 0) + 1;
} else {
pattern.prediction.failureCount = (pattern.prediction.failureCount || 0) + 1;
}
// Recalculate confidence
const recency = this._calculateRecency(execution.timestamp);
pattern.confidence = this.confidenceCalc.calculateCommandConfidence(
pattern.prediction.successCount || 0,
pattern.prediction.failureCount || 0,
recency
);
pattern.lastSeen = execution.timestamp;
pattern.evidenceCount += 1;
} else {
// Create new pattern
this.db.commandPatterns[patternId] = {
patternId,
patternType: 'command_execution',
trigger: { command: baseCmd },
prediction: {
successCount: execution.success ? 1 : 0,
failureCount: execution.success ? 0 : 1,
commonErrors: execution.errorMessage ? [execution.errorMessage] : []
},
confidence: 0.3, // Start with low confidence
evidenceCount: 1,
lastSeen: execution.timestamp,
successRate: execution.success ? 1.0 : 0.0
};
}
}
/**
* Learn from tool sequence patterns
*/
_learnSequencePattern(execution) {
// Get recent tool history (last 5 tools)
const recentTools = this.db.executionHistory.slice(-5).map(e => e.tool);
recentTools.push(execution.tool);
// Look for sequences of 2-3 tools
for (let seqLen = 2; seqLen <= 3; seqLen++) {
if (recentTools.length >= seqLen) {
const sequence = recentTools.slice(-seqLen);
const patternId = `seq_${sequence.join('_')}`;
// Update or create sequence pattern
// (Simplified implementation - could be expanded)
}
}
}
/**
* Learn from failure contexts
*/
_learnFailureContext(execution) {
if (!execution.errorMessage) return;
// Extract key error indicators
const errorKey = this._extractErrorKey(execution.errorMessage);
if (!errorKey) return;
const patternId = `ctx_error_${errorKey}`;
if (patternId in this.db.contextPatterns) {
const pattern = this.db.contextPatterns[patternId];
pattern.evidenceCount += 1;
pattern.lastSeen = execution.timestamp;
// Update confidence based on repeated failures
pattern.confidence = Math.min(0.95, pattern.confidence + 0.05);
} else {
// Create new context pattern
this.db.contextPatterns[patternId] = {
patternId,
patternType: 'context_error',
trigger: {
tool: execution.tool,
errorType: errorKey
},
prediction: {
likelyError: execution.errorMessage,
suggestions: this._generateSuggestions(execution)
},
confidence: 0.4,
evidenceCount: 1,
lastSeen: execution.timestamp,
successRate: 0.0
};
}
}
/**
* Calculate recency factor (1.0 = very recent, 0.0 = very old)
*/
_calculateRecency(timestamp) {
const now = new Date();
const ageHours = (now - new Date(timestamp)) / (1000 * 60 * 60);
// Exponential decay: recent events matter more
return Math.max(0.0, Math.exp(-ageHours / 24.0)); // 24 hour half-life
}
/**
* Extract key error indicators from error messages
*/
_extractErrorKey(errorMessage) {
const message = errorMessage.toLowerCase();
const errorPatterns = {
'command_not_found': ['command not found', 'not found'],
'permission_denied': ['permission denied', 'access denied'],
'file_not_found': ['no such file', 'file not found'],
'connection_error': ['connection refused', 'network unreachable'],
'syntax_error': ['syntax error', 'invalid syntax']
};
for (const [errorType, patterns] of Object.entries(errorPatterns)) {
if (patterns.some(pattern => message.includes(pattern))) {
return errorType;
}
}
return null;
}
/**
* Generate suggestions based on failed execution
*/
_generateSuggestions(execution) {
const suggestions = [];
if (execution.tool === 'Bash') {
const command = execution.parameters.command || '';
if (command) {
const baseCmd = command.split(' ')[0];
// Common command alternatives
const alternatives = {
'pip': ['pip3', 'python -m pip', 'python3 -m pip'],
'python': ['python3'],
'node': ['nodejs'],
'vim': ['nvim', 'nano']
};
if (baseCmd in alternatives) {
const remainingArgs = command.split(' ').slice(1).join(' ');
suggestions.push(
...alternatives[baseCmd].map(alt => `Try '${alt} ${remainingArgs}'`)
);
}
}
}
return suggestions;
}
}
class PredictionEngine {
constructor(matcher) {
this.matcher = matcher;
}
/**
* Predict if a command will succeed and suggest alternatives
*/
predictCommandOutcome(command, context = {}) {
// Find matching patterns
const commandPatterns = this.matcher.fuzzyCommandMatch(command);
const contextPatterns = this.matcher.contextPatternMatch(context);
const prediction = {
likelySuccess: true,
confidence: 0.5,
warnings: [],
suggestions: []
};
// Analyze command patterns
for (const pattern of commandPatterns.slice(0, 3)) { // Top 3 matches
if (pattern.confidence > 0.7) {
const failureRate = (pattern.prediction.failureCount || 0) / Math.max(1, pattern.evidenceCount);
if (failureRate > 0.6) { // High failure rate
prediction.likelySuccess = false;
prediction.confidence = pattern.confidence;
prediction.warnings.push(`Command '${command.split(' ')[0]}' often fails`);
// Add suggestions from pattern
const suggestions = pattern.prediction.suggestions || [];
prediction.suggestions.push(...suggestions);
}
}
}
return prediction;
}
}
class ShadowLearner {
constructor(storagePath = '.claude_hooks/patterns') {
this.storagePath = path.resolve(storagePath);
fs.ensureDirSync(this.storagePath);
this.db = this._loadDatabase();
this.matcher = new PatternMatcher(this.db);
this.learningEngine = new LearningEngine(this.db);
this.predictionEngine = new PredictionEngine(this.matcher);
// Performance cache (simple in-memory cache)
this.predictionCache = new Map();
this.cacheTimeout = 5 * 60 * 1000; // 5 minutes
}
/**
* Learn from tool execution
*/
learnFromExecution(execution) {
try {
this.learningEngine.learnFromExecution(execution);
this.db.executionHistory.push(execution);
// Trim history to keep memory usage reasonable
if (this.db.executionHistory.length > 1000) {
this.db.executionHistory = this.db.executionHistory.slice(-500);
}
} catch (error) {
// Learning failures shouldn't break the system
console.error('Shadow learner error:', error.message);
}
}
/**
* Predict command outcome with caching
*/
predictCommandOutcome(command, context = {}) {
const cacheKey = `cmd_pred:${this._hash(command)}`;
const now = Date.now();
// Check cache
if (this.predictionCache.has(cacheKey)) {
const cached = this.predictionCache.get(cacheKey);
if (now - cached.timestamp < this.cacheTimeout) {
return cached.prediction;
}
}
const prediction = this.predictionEngine.predictCommandOutcome(command, context);
// Cache result
this.predictionCache.set(cacheKey, { prediction, timestamp: now });
// Clean old cache entries
this._cleanCache();
return prediction;
}
/**
* Quick method for command failure learning (backward compatibility)
*/
learnCommandFailure(command, suggestion, confidence) {
const execution = {
tool: 'Bash',
parameters: { command },
success: false,
timestamp: new Date(),
errorMessage: `Command failed: ${command}`
};
this.learnFromExecution(execution);
// Also store the specific suggestion
const baseCmd = command.split(' ')[0];
const patternId = `cmd_${baseCmd}`;
if (patternId in this.db.commandPatterns) {
const pattern = this.db.commandPatterns[patternId];
pattern.prediction.suggestions = pattern.prediction.suggestions || [];
if (!pattern.prediction.suggestions.includes(suggestion)) {
pattern.prediction.suggestions.push(suggestion);
}
}
}
/**
* Get suggestion for a command (backward compatibility)
*/
getSuggestion(command) {
const prediction = this.predictCommandOutcome(command);
if (!prediction.likelySuccess && prediction.suggestions.length > 0) {
return {
suggestion: prediction.suggestions[0].replace(/^Try '|'$/g, ''), // Clean format
confidence: prediction.confidence
};
}
return null;
}
/**
* Save learned patterns to disk
*/
async saveDatabase() {
try {
const patternsFile = path.join(this.storagePath, 'patterns.json');
const backupFile = path.join(this.storagePath, 'patterns.backup.json');
// Create backup of existing data
if (await fs.pathExists(patternsFile)) {
await fs.move(patternsFile, backupFile, { overwrite: true });
}
// Save new data
await fs.writeJson(patternsFile, this._serializeDatabase(), { spaces: 2 });
} catch (error) {
// Save failures shouldn't break the system
console.error('Failed to save shadow learner database:', error.message);
}
}
/**
* Load patterns database from disk
*/
_loadDatabase() {
const patternsFile = path.join(this.storagePath, 'patterns.json');
try {
if (fs.existsSync(patternsFile)) {
const data = fs.readJsonSync(patternsFile);
return this._deserializeDatabase(data);
}
} catch (error) {
// If loading fails, start with empty database
console.error('Failed to load shadow learner database, starting fresh:', error.message);
}
return {
commandPatterns: {},
contextPatterns: {},
sequencePatterns: {},
executionHistory: []
};
}
/**
* Serialize database for JSON storage
*/
_serializeDatabase() {
return {
commandPatterns: this.db.commandPatterns,
contextPatterns: this.db.contextPatterns,
sequencePatterns: this.db.sequencePatterns || {},
executionHistory: this.db.executionHistory.slice(-100), // Keep last 100 executions
metadata: {
version: '1.0.0',
lastSaved: new Date().toISOString()
}
};
}
/**
* Deserialize database from JSON
*/
_deserializeDatabase(data) {
return {
commandPatterns: data.commandPatterns || {},
contextPatterns: data.contextPatterns || {},
sequencePatterns: data.sequencePatterns || {},
executionHistory: (data.executionHistory || []).map(e => ({
...e,
timestamp: new Date(e.timestamp)
}))
};
}
/**
* Simple hash function for cache keys
*/
_hash(str) {
let hash = 0;
for (let i = 0; i < str.length; i++) {
const char = str.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash; // Convert to 32bit integer
}
return hash.toString();
}
/**
* Clean expired cache entries
*/
_cleanCache() {
const now = Date.now();
for (const [key, value] of this.predictionCache.entries()) {
if (now - value.timestamp > this.cacheTimeout) {
this.predictionCache.delete(key);
}
}
}
}
module.exports = {
ShadowLearner,
ConfidenceCalculator,
PatternMatcher,
LearningEngine,
PredictionEngine
};

View File

@ -1,395 +0,0 @@
#!/usr/bin/env python3
"""Shadow Learner - Pattern learning and prediction system"""
import json
import math
import time
import difflib
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any
from cachetools import TTLCache, LRUCache
try:
from .models import Pattern, ToolExecution, PatternDatabase, ValidationResult
except ImportError:
from models import Pattern, ToolExecution, PatternDatabase, ValidationResult
class ConfidenceCalculator:
"""Calculate confidence scores for learned patterns"""
@staticmethod
def calculate_command_confidence(success_count: int, failure_count: int,
recency_factor: float) -> float:
"""Calculate confidence for command failure patterns"""
total_attempts = success_count + failure_count
if total_attempts == 0:
return 0.0
# Base confidence from failure rate
failure_rate = failure_count / total_attempts
# Sample size adjustment (more data = more confidence)
sample_factor = min(1.0, total_attempts / 10.0) # Plateau at 10 samples
# Time decay (recent failures are more relevant)
confidence = (failure_rate * sample_factor * (0.5 + 0.5 * recency_factor))
return min(0.99, max(0.1, confidence)) # Clamp between 0.1 and 0.99
@staticmethod
def calculate_sequence_confidence(successful_sequences: int,
total_sequences: int) -> float:
"""Calculate confidence for tool sequence patterns"""
if total_sequences == 0:
return 0.0
success_rate = successful_sequences / total_sequences
sample_factor = min(1.0, total_sequences / 5.0)
return success_rate * sample_factor
class PatternMatcher:
"""Advanced pattern matching with fuzzy logic"""
def __init__(self, db: PatternDatabase):
self.db = db
def fuzzy_command_match(self, command: str, threshold: float = 0.8) -> List[Pattern]:
"""Find similar command patterns using fuzzy matching"""
cmd_tokens = command.lower().split()
if not cmd_tokens:
return []
base_cmd = cmd_tokens[0]
matches = []
for pattern in self.db.command_patterns.values():
pattern_cmd = pattern.trigger.get("command", "").lower()
# Exact match
if pattern_cmd == base_cmd:
matches.append(pattern)
# Fuzzy match on command name
elif difflib.SequenceMatcher(None, pattern_cmd, base_cmd).ratio() > threshold:
matches.append(pattern)
# Partial match (e.g., "pip3" matches "pip install")
elif any(pattern_cmd in token for token in cmd_tokens):
matches.append(pattern)
return sorted(matches, key=lambda p: p.confidence, reverse=True)
def context_pattern_match(self, current_context: Dict[str, Any]) -> List[Pattern]:
"""Match patterns based on current context"""
matches = []
for pattern in self.db.context_patterns.values():
trigger = pattern.trigger
# Check if all trigger conditions are met
if self._context_matches(current_context, trigger):
matches.append(pattern)
return sorted(matches, key=lambda p: p.confidence, reverse=True)
def _context_matches(self, current: Dict[str, Any], trigger: Dict[str, Any]) -> bool:
"""Check if current context matches trigger conditions"""
for key, expected_value in trigger.items():
if key not in current:
return False
current_value = current[key]
# Handle different value types
if isinstance(expected_value, str) and isinstance(current_value, str):
if expected_value.lower() not in current_value.lower():
return False
elif expected_value != current_value:
return False
return True
class LearningEngine:
"""Core learning algorithms"""
def __init__(self, db: PatternDatabase):
self.db = db
self.confidence_calc = ConfidenceCalculator()
def learn_from_execution(self, execution: ToolExecution):
"""Main learning entry point"""
# Learn command patterns
if execution.tool == "Bash":
self._learn_command_pattern(execution)
# Learn tool sequences
self._learn_sequence_pattern(execution)
# Learn context patterns
if not execution.success:
self._learn_failure_context(execution)
def _learn_command_pattern(self, execution: ToolExecution):
"""Learn from bash command executions"""
command = execution.parameters.get("command", "")
if not command:
return
base_cmd = command.split()[0]
pattern_id = f"cmd_{base_cmd}"
if pattern_id in self.db.command_patterns:
pattern = self.db.command_patterns[pattern_id]
# Update statistics
if execution.success:
pattern.prediction["success_count"] = pattern.prediction.get("success_count", 0) + 1
else:
pattern.prediction["failure_count"] = pattern.prediction.get("failure_count", 0) + 1
# Recalculate confidence
recency = self._calculate_recency(execution.timestamp)
pattern.confidence = self.confidence_calc.calculate_command_confidence(
pattern.prediction.get("success_count", 0),
pattern.prediction.get("failure_count", 0),
recency
)
pattern.last_seen = execution.timestamp
pattern.evidence_count += 1
else:
# Create new pattern
self.db.command_patterns[pattern_id] = Pattern(
pattern_id=pattern_id,
pattern_type="command_execution",
trigger={"command": base_cmd},
prediction={
"success_count": 1 if execution.success else 0,
"failure_count": 0 if execution.success else 1,
"common_errors": [execution.error_message] if execution.error_message else []
},
confidence=0.3, # Start with low confidence
evidence_count=1,
last_seen=execution.timestamp,
success_rate=1.0 if execution.success else 0.0
)
def _learn_sequence_pattern(self, execution: ToolExecution):
"""Learn from tool sequence patterns"""
# Get recent tool history (last 5 tools)
recent_tools = [e.tool for e in self.db.execution_history[-5:]]
recent_tools.append(execution.tool)
# Look for sequences of 2-3 tools
for seq_len in [2, 3]:
if len(recent_tools) >= seq_len:
sequence = tuple(recent_tools[-seq_len:])
pattern_id = f"seq_{'_'.join(sequence)}"
# Update or create sequence pattern
# (Simplified implementation - could be expanded)
pass
def _learn_failure_context(self, execution: ToolExecution):
"""Learn from failure contexts"""
if not execution.error_message:
return
# Extract key error indicators
error_key = self._extract_error_key(execution.error_message)
if not error_key:
return
pattern_id = f"ctx_error_{error_key}"
if pattern_id in self.db.context_patterns:
pattern = self.db.context_patterns[pattern_id]
pattern.evidence_count += 1
pattern.last_seen = execution.timestamp
# Update confidence based on repeated failures
pattern.confidence = min(0.95, pattern.confidence + 0.05)
else:
# Create new context pattern
self.db.context_patterns[pattern_id] = Pattern(
pattern_id=pattern_id,
pattern_type="context_error",
trigger={
"tool": execution.tool,
"error_type": error_key
},
prediction={
"likely_error": execution.error_message,
"suggestions": self._generate_suggestions(execution)
},
confidence=0.4,
evidence_count=1,
last_seen=execution.timestamp,
success_rate=0.0
)
def _calculate_recency(self, timestamp: datetime) -> float:
"""Calculate recency factor (1.0 = very recent, 0.0 = very old)"""
now = datetime.now()
age_hours = (now - timestamp).total_seconds() / 3600
# Exponential decay: recent events matter more
return max(0.0, math.exp(-age_hours / 24.0)) # 24 hour half-life
def _extract_error_key(self, error_message: str) -> Optional[str]:
"""Extract key error indicators from error messages"""
error_message = error_message.lower()
error_patterns = {
"command_not_found": ["command not found", "not found"],
"permission_denied": ["permission denied", "access denied"],
"file_not_found": ["no such file", "file not found"],
"connection_error": ["connection refused", "network unreachable"],
"syntax_error": ["syntax error", "invalid syntax"]
}
for error_type, patterns in error_patterns.items():
if any(pattern in error_message for pattern in patterns):
return error_type
return None
def _generate_suggestions(self, execution: ToolExecution) -> List[str]:
"""Generate suggestions based on failed execution"""
suggestions = []
if execution.tool == "Bash":
command = execution.parameters.get("command", "")
if command:
base_cmd = command.split()[0]
# Common command alternatives
alternatives = {
"pip": ["pip3", "python -m pip", "python3 -m pip"],
"python": ["python3"],
"node": ["nodejs"],
"vim": ["nvim", "nano"],
}
if base_cmd in alternatives:
suggestions.extend([f"Try '{alt} {' '.join(command.split()[1:])}'"
for alt in alternatives[base_cmd]])
return suggestions
class PredictionEngine:
"""Generate predictions and suggestions"""
def __init__(self, matcher: PatternMatcher):
self.matcher = matcher
def predict_command_outcome(self, command: str, context: Dict[str, Any]) -> Dict[str, Any]:
"""Predict if a command will succeed and suggest alternatives"""
# Find matching patterns
command_patterns = self.matcher.fuzzy_command_match(command)
context_patterns = self.matcher.context_pattern_match(context)
prediction = {
"likely_success": True,
"confidence": 0.5,
"warnings": [],
"suggestions": []
}
# Analyze command patterns
for pattern in command_patterns[:3]: # Top 3 matches
if pattern.confidence > 0.7:
failure_rate = pattern.prediction.get("failure_count", 0) / max(1, pattern.evidence_count)
if failure_rate > 0.6: # High failure rate
prediction["likely_success"] = False
prediction["confidence"] = pattern.confidence
prediction["warnings"].append(f"Command '{command.split()[0]}' often fails")
# Add suggestions from pattern
suggestions = pattern.prediction.get("suggestions", [])
prediction["suggestions"].extend(suggestions)
return prediction
class ShadowLearner:
"""Main shadow learner interface"""
def __init__(self, storage_path: str = ".claude_hooks/patterns"):
self.storage_path = Path(storage_path)
self.storage_path.mkdir(parents=True, exist_ok=True)
self.db = self._load_database()
self.matcher = PatternMatcher(self.db)
self.learning_engine = LearningEngine(self.db)
self.prediction_engine = PredictionEngine(self.matcher)
# Performance caches
self.prediction_cache = TTLCache(maxsize=1000, ttl=300) # 5-minute cache
def learn_from_execution(self, execution: ToolExecution):
"""Learn from tool execution"""
try:
self.learning_engine.learn_from_execution(execution)
self.db.execution_history.append(execution)
# Trim history to keep memory usage reasonable
if len(self.db.execution_history) > 1000:
self.db.execution_history = self.db.execution_history[-500:]
except Exception as e:
# Learning failures shouldn't break the system
pass
def predict_command_outcome(self, command: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
"""Predict command outcome with caching"""
cache_key = f"cmd_pred:{hash(command)}"
if cache_key in self.prediction_cache:
return self.prediction_cache[cache_key]
prediction = self.prediction_engine.predict_command_outcome(
command, context or {}
)
self.prediction_cache[cache_key] = prediction
return prediction
def save_database(self):
"""Save learned patterns to disk"""
try:
patterns_file = self.storage_path / "patterns.json"
backup_file = self.storage_path / "patterns.backup.json"
# Create backup of existing data
if patterns_file.exists():
patterns_file.rename(backup_file)
# Save new data
with open(patterns_file, 'w') as f:
json.dump(self.db.to_dict(), f, indent=2)
except Exception as e:
# Save failures shouldn't break the system
pass
def _load_database(self) -> PatternDatabase:
"""Load patterns database from disk"""
patterns_file = self.storage_path / "patterns.json"
try:
if patterns_file.exists():
with open(patterns_file, 'r') as f:
data = json.load(f)
return PatternDatabase.from_dict(data)
except Exception:
# If loading fails, start with empty database
pass
return PatternDatabase()

128
node_modules/.package-lock.json generated vendored Normal file
View File

@ -0,0 +1,128 @@
{
"name": "claude-hooks",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"license": "MIT",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"license": "MIT",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"license": "MIT"
},
"node_modules/commander": {
"version": "11.1.0",
"resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz",
"integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==",
"license": "MIT",
"engines": {
"node": ">=16"
}
},
"node_modules/fs-extra": {
"version": "11.3.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz",
"integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==",
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=14.14"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
"license": "ISC"
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jsonfile": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
"integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
"license": "MIT",
"dependencies": {
"universalify": "^2.0.0"
},
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/universalify": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
"license": "MIT",
"engines": {
"node": ">= 10.0.0"
}
}
}
}

345
node_modules/ansi-styles/index.d.ts generated vendored Normal file
View File

@ -0,0 +1,345 @@
declare type CSSColor =
| 'aliceblue'
| 'antiquewhite'
| 'aqua'
| 'aquamarine'
| 'azure'
| 'beige'
| 'bisque'
| 'black'
| 'blanchedalmond'
| 'blue'
| 'blueviolet'
| 'brown'
| 'burlywood'
| 'cadetblue'
| 'chartreuse'
| 'chocolate'
| 'coral'
| 'cornflowerblue'
| 'cornsilk'
| 'crimson'
| 'cyan'
| 'darkblue'
| 'darkcyan'
| 'darkgoldenrod'
| 'darkgray'
| 'darkgreen'
| 'darkgrey'
| 'darkkhaki'
| 'darkmagenta'
| 'darkolivegreen'
| 'darkorange'
| 'darkorchid'
| 'darkred'
| 'darksalmon'
| 'darkseagreen'
| 'darkslateblue'
| 'darkslategray'
| 'darkslategrey'
| 'darkturquoise'
| 'darkviolet'
| 'deeppink'
| 'deepskyblue'
| 'dimgray'
| 'dimgrey'
| 'dodgerblue'
| 'firebrick'
| 'floralwhite'
| 'forestgreen'
| 'fuchsia'
| 'gainsboro'
| 'ghostwhite'
| 'gold'
| 'goldenrod'
| 'gray'
| 'green'
| 'greenyellow'
| 'grey'
| 'honeydew'
| 'hotpink'
| 'indianred'
| 'indigo'
| 'ivory'
| 'khaki'
| 'lavender'
| 'lavenderblush'
| 'lawngreen'
| 'lemonchiffon'
| 'lightblue'
| 'lightcoral'
| 'lightcyan'
| 'lightgoldenrodyellow'
| 'lightgray'
| 'lightgreen'
| 'lightgrey'
| 'lightpink'
| 'lightsalmon'
| 'lightseagreen'
| 'lightskyblue'
| 'lightslategray'
| 'lightslategrey'
| 'lightsteelblue'
| 'lightyellow'
| 'lime'
| 'limegreen'
| 'linen'
| 'magenta'
| 'maroon'
| 'mediumaquamarine'
| 'mediumblue'
| 'mediumorchid'
| 'mediumpurple'
| 'mediumseagreen'
| 'mediumslateblue'
| 'mediumspringgreen'
| 'mediumturquoise'
| 'mediumvioletred'
| 'midnightblue'
| 'mintcream'
| 'mistyrose'
| 'moccasin'
| 'navajowhite'
| 'navy'
| 'oldlace'
| 'olive'
| 'olivedrab'
| 'orange'
| 'orangered'
| 'orchid'
| 'palegoldenrod'
| 'palegreen'
| 'paleturquoise'
| 'palevioletred'
| 'papayawhip'
| 'peachpuff'
| 'peru'
| 'pink'
| 'plum'
| 'powderblue'
| 'purple'
| 'rebeccapurple'
| 'red'
| 'rosybrown'
| 'royalblue'
| 'saddlebrown'
| 'salmon'
| 'sandybrown'
| 'seagreen'
| 'seashell'
| 'sienna'
| 'silver'
| 'skyblue'
| 'slateblue'
| 'slategray'
| 'slategrey'
| 'snow'
| 'springgreen'
| 'steelblue'
| 'tan'
| 'teal'
| 'thistle'
| 'tomato'
| 'turquoise'
| 'violet'
| 'wheat'
| 'white'
| 'whitesmoke'
| 'yellow'
| 'yellowgreen';
declare namespace ansiStyles {
interface ColorConvert {
/**
The RGB color space.
@param red - (`0`-`255`)
@param green - (`0`-`255`)
@param blue - (`0`-`255`)
*/
rgb(red: number, green: number, blue: number): string;
/**
The RGB HEX color space.
@param hex - A hexadecimal string containing RGB data.
*/
hex(hex: string): string;
/**
@param keyword - A CSS color name.
*/
keyword(keyword: CSSColor): string;
/**
The HSL color space.
@param hue - (`0`-`360`)
@param saturation - (`0`-`100`)
@param lightness - (`0`-`100`)
*/
hsl(hue: number, saturation: number, lightness: number): string;
/**
The HSV color space.
@param hue - (`0`-`360`)
@param saturation - (`0`-`100`)
@param value - (`0`-`100`)
*/
hsv(hue: number, saturation: number, value: number): string;
/**
The HSV color space.
@param hue - (`0`-`360`)
@param whiteness - (`0`-`100`)
@param blackness - (`0`-`100`)
*/
hwb(hue: number, whiteness: number, blackness: number): string;
/**
Use a [4-bit unsigned number](https://en.wikipedia.org/wiki/ANSI_escape_code#3/4-bit) to set text color.
*/
ansi(ansi: number): string;
/**
Use an [8-bit unsigned number](https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit) to set text color.
*/
ansi256(ansi: number): string;
}
interface CSPair {
/**
The ANSI terminal control sequence for starting this style.
*/
readonly open: string;
/**
The ANSI terminal control sequence for ending this style.
*/
readonly close: string;
}
interface ColorBase {
readonly ansi: ColorConvert;
readonly ansi256: ColorConvert;
readonly ansi16m: ColorConvert;
/**
The ANSI terminal control sequence for ending this color.
*/
readonly close: string;
}
interface Modifier {
/**
Resets the current color chain.
*/
readonly reset: CSPair;
/**
Make text bold.
*/
readonly bold: CSPair;
/**
Emitting only a small amount of light.
*/
readonly dim: CSPair;
/**
Make text italic. (Not widely supported)
*/
readonly italic: CSPair;
/**
Make text underline. (Not widely supported)
*/
readonly underline: CSPair;
/**
Inverse background and foreground colors.
*/
readonly inverse: CSPair;
/**
Prints the text, but makes it invisible.
*/
readonly hidden: CSPair;
/**
Puts a horizontal line through the center of the text. (Not widely supported)
*/
readonly strikethrough: CSPair;
}
interface ForegroundColor {
readonly black: CSPair;
readonly red: CSPair;
readonly green: CSPair;
readonly yellow: CSPair;
readonly blue: CSPair;
readonly cyan: CSPair;
readonly magenta: CSPair;
readonly white: CSPair;
/**
Alias for `blackBright`.
*/
readonly gray: CSPair;
/**
Alias for `blackBright`.
*/
readonly grey: CSPair;
readonly blackBright: CSPair;
readonly redBright: CSPair;
readonly greenBright: CSPair;
readonly yellowBright: CSPair;
readonly blueBright: CSPair;
readonly cyanBright: CSPair;
readonly magentaBright: CSPair;
readonly whiteBright: CSPair;
}
interface BackgroundColor {
readonly bgBlack: CSPair;
readonly bgRed: CSPair;
readonly bgGreen: CSPair;
readonly bgYellow: CSPair;
readonly bgBlue: CSPair;
readonly bgCyan: CSPair;
readonly bgMagenta: CSPair;
readonly bgWhite: CSPair;
/**
Alias for `bgBlackBright`.
*/
readonly bgGray: CSPair;
/**
Alias for `bgBlackBright`.
*/
readonly bgGrey: CSPair;
readonly bgBlackBright: CSPair;
readonly bgRedBright: CSPair;
readonly bgGreenBright: CSPair;
readonly bgYellowBright: CSPair;
readonly bgBlueBright: CSPair;
readonly bgCyanBright: CSPair;
readonly bgMagentaBright: CSPair;
readonly bgWhiteBright: CSPair;
}
}
declare const ansiStyles: {
readonly modifier: ansiStyles.Modifier;
readonly color: ansiStyles.ForegroundColor & ansiStyles.ColorBase;
readonly bgColor: ansiStyles.BackgroundColor & ansiStyles.ColorBase;
readonly codes: ReadonlyMap<number, number>;
} & ansiStyles.BackgroundColor & ansiStyles.ForegroundColor & ansiStyles.Modifier;
export = ansiStyles;

163
node_modules/ansi-styles/index.js generated vendored Normal file
View File

@ -0,0 +1,163 @@
'use strict';
const wrapAnsi16 = (fn, offset) => (...args) => {
const code = fn(...args);
return `\u001B[${code + offset}m`;
};
const wrapAnsi256 = (fn, offset) => (...args) => {
const code = fn(...args);
return `\u001B[${38 + offset};5;${code}m`;
};
const wrapAnsi16m = (fn, offset) => (...args) => {
const rgb = fn(...args);
return `\u001B[${38 + offset};2;${rgb[0]};${rgb[1]};${rgb[2]}m`;
};
const ansi2ansi = n => n;
const rgb2rgb = (r, g, b) => [r, g, b];
const setLazyProperty = (object, property, get) => {
Object.defineProperty(object, property, {
get: () => {
const value = get();
Object.defineProperty(object, property, {
value,
enumerable: true,
configurable: true
});
return value;
},
enumerable: true,
configurable: true
});
};
/** @type {typeof import('color-convert')} */
let colorConvert;
const makeDynamicStyles = (wrap, targetSpace, identity, isBackground) => {
if (colorConvert === undefined) {
colorConvert = require('color-convert');
}
const offset = isBackground ? 10 : 0;
const styles = {};
for (const [sourceSpace, suite] of Object.entries(colorConvert)) {
const name = sourceSpace === 'ansi16' ? 'ansi' : sourceSpace;
if (sourceSpace === targetSpace) {
styles[name] = wrap(identity, offset);
} else if (typeof suite === 'object') {
styles[name] = wrap(suite[targetSpace], offset);
}
}
return styles;
};
function assembleStyles() {
const codes = new Map();
const styles = {
modifier: {
reset: [0, 0],
// 21 isn't widely supported and 22 does the same thing
bold: [1, 22],
dim: [2, 22],
italic: [3, 23],
underline: [4, 24],
inverse: [7, 27],
hidden: [8, 28],
strikethrough: [9, 29]
},
color: {
black: [30, 39],
red: [31, 39],
green: [32, 39],
yellow: [33, 39],
blue: [34, 39],
magenta: [35, 39],
cyan: [36, 39],
white: [37, 39],
// Bright color
blackBright: [90, 39],
redBright: [91, 39],
greenBright: [92, 39],
yellowBright: [93, 39],
blueBright: [94, 39],
magentaBright: [95, 39],
cyanBright: [96, 39],
whiteBright: [97, 39]
},
bgColor: {
bgBlack: [40, 49],
bgRed: [41, 49],
bgGreen: [42, 49],
bgYellow: [43, 49],
bgBlue: [44, 49],
bgMagenta: [45, 49],
bgCyan: [46, 49],
bgWhite: [47, 49],
// Bright color
bgBlackBright: [100, 49],
bgRedBright: [101, 49],
bgGreenBright: [102, 49],
bgYellowBright: [103, 49],
bgBlueBright: [104, 49],
bgMagentaBright: [105, 49],
bgCyanBright: [106, 49],
bgWhiteBright: [107, 49]
}
};
// Alias bright black as gray (and grey)
styles.color.gray = styles.color.blackBright;
styles.bgColor.bgGray = styles.bgColor.bgBlackBright;
styles.color.grey = styles.color.blackBright;
styles.bgColor.bgGrey = styles.bgColor.bgBlackBright;
for (const [groupName, group] of Object.entries(styles)) {
for (const [styleName, style] of Object.entries(group)) {
styles[styleName] = {
open: `\u001B[${style[0]}m`,
close: `\u001B[${style[1]}m`
};
group[styleName] = styles[styleName];
codes.set(style[0], style[1]);
}
Object.defineProperty(styles, groupName, {
value: group,
enumerable: false
});
}
Object.defineProperty(styles, 'codes', {
value: codes,
enumerable: false
});
styles.color.close = '\u001B[39m';
styles.bgColor.close = '\u001B[49m';
setLazyProperty(styles.color, 'ansi', () => makeDynamicStyles(wrapAnsi16, 'ansi16', ansi2ansi, false));
setLazyProperty(styles.color, 'ansi256', () => makeDynamicStyles(wrapAnsi256, 'ansi256', ansi2ansi, false));
setLazyProperty(styles.color, 'ansi16m', () => makeDynamicStyles(wrapAnsi16m, 'rgb', rgb2rgb, false));
setLazyProperty(styles.bgColor, 'ansi', () => makeDynamicStyles(wrapAnsi16, 'ansi16', ansi2ansi, true));
setLazyProperty(styles.bgColor, 'ansi256', () => makeDynamicStyles(wrapAnsi256, 'ansi256', ansi2ansi, true));
setLazyProperty(styles.bgColor, 'ansi16m', () => makeDynamicStyles(wrapAnsi16m, 'rgb', rgb2rgb, true));
return styles;
}
// Make the export immutable
Object.defineProperty(module, 'exports', {
enumerable: true,
get: assembleStyles
});

9
node_modules/ansi-styles/license generated vendored Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

56
node_modules/ansi-styles/package.json generated vendored Normal file
View File

@ -0,0 +1,56 @@
{
"name": "ansi-styles",
"version": "4.3.0",
"description": "ANSI escape codes for styling strings in the terminal",
"license": "MIT",
"repository": "chalk/ansi-styles",
"funding": "https://github.com/chalk/ansi-styles?sponsor=1",
"author": {
"name": "Sindre Sorhus",
"email": "sindresorhus@gmail.com",
"url": "sindresorhus.com"
},
"engines": {
"node": ">=8"
},
"scripts": {
"test": "xo && ava && tsd",
"screenshot": "svg-term --command='node screenshot' --out=screenshot.svg --padding=3 --width=55 --height=3 --at=1000 --no-cursor"
},
"files": [
"index.js",
"index.d.ts"
],
"keywords": [
"ansi",
"styles",
"color",
"colour",
"colors",
"terminal",
"console",
"cli",
"string",
"tty",
"escape",
"formatting",
"rgb",
"256",
"shell",
"xterm",
"log",
"logging",
"command-line",
"text"
],
"dependencies": {
"color-convert": "^2.0.1"
},
"devDependencies": {
"@types/color-convert": "^1.9.0",
"ava": "^2.3.0",
"svg-term-cli": "^2.1.1",
"tsd": "^0.11.0",
"xo": "^0.25.3"
}
}

152
node_modules/ansi-styles/readme.md generated vendored Normal file
View File

@ -0,0 +1,152 @@
# ansi-styles [![Build Status](https://travis-ci.org/chalk/ansi-styles.svg?branch=master)](https://travis-ci.org/chalk/ansi-styles)
> [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code#Colors_and_Styles) for styling strings in the terminal
You probably want the higher-level [chalk](https://github.com/chalk/chalk) module for styling your strings.
<img src="screenshot.svg" width="900">
## Install
```
$ npm install ansi-styles
```
## Usage
```js
const style = require('ansi-styles');
console.log(`${style.green.open}Hello world!${style.green.close}`);
// Color conversion between 16/256/truecolor
// NOTE: If conversion goes to 16 colors or 256 colors, the original color
// may be degraded to fit that color palette. This means terminals
// that do not support 16 million colors will best-match the
// original color.
console.log(style.bgColor.ansi.hsl(120, 80, 72) + 'Hello world!' + style.bgColor.close);
console.log(style.color.ansi256.rgb(199, 20, 250) + 'Hello world!' + style.color.close);
console.log(style.color.ansi16m.hex('#abcdef') + 'Hello world!' + style.color.close);
```
## API
Each style has an `open` and `close` property.
## Styles
### Modifiers
- `reset`
- `bold`
- `dim`
- `italic` *(Not widely supported)*
- `underline`
- `inverse`
- `hidden`
- `strikethrough` *(Not widely supported)*
### Colors
- `black`
- `red`
- `green`
- `yellow`
- `blue`
- `magenta`
- `cyan`
- `white`
- `blackBright` (alias: `gray`, `grey`)
- `redBright`
- `greenBright`
- `yellowBright`
- `blueBright`
- `magentaBright`
- `cyanBright`
- `whiteBright`
### Background colors
- `bgBlack`
- `bgRed`
- `bgGreen`
- `bgYellow`
- `bgBlue`
- `bgMagenta`
- `bgCyan`
- `bgWhite`
- `bgBlackBright` (alias: `bgGray`, `bgGrey`)
- `bgRedBright`
- `bgGreenBright`
- `bgYellowBright`
- `bgBlueBright`
- `bgMagentaBright`
- `bgCyanBright`
- `bgWhiteBright`
## Advanced usage
By default, you get a map of styles, but the styles are also available as groups. They are non-enumerable so they don't show up unless you access them explicitly. This makes it easier to expose only a subset in a higher-level module.
- `style.modifier`
- `style.color`
- `style.bgColor`
###### Example
```js
console.log(style.color.green.open);
```
Raw escape codes (i.e. without the CSI escape prefix `\u001B[` and render mode postfix `m`) are available under `style.codes`, which returns a `Map` with the open codes as keys and close codes as values.
###### Example
```js
console.log(style.codes.get(36));
//=> 39
```
## [256 / 16 million (TrueColor) support](https://gist.github.com/XVilka/8346728)
`ansi-styles` uses the [`color-convert`](https://github.com/Qix-/color-convert) package to allow for converting between various colors and ANSI escapes, with support for 256 and 16 million colors.
The following color spaces from `color-convert` are supported:
- `rgb`
- `hex`
- `keyword`
- `hsl`
- `hsv`
- `hwb`
- `ansi`
- `ansi256`
To use these, call the associated conversion function with the intended output, for example:
```js
style.color.ansi.rgb(100, 200, 15); // RGB to 16 color ansi foreground code
style.bgColor.ansi.rgb(100, 200, 15); // RGB to 16 color ansi background code
style.color.ansi256.hsl(120, 100, 60); // HSL to 256 color ansi foreground code
style.bgColor.ansi256.hsl(120, 100, 60); // HSL to 256 color ansi foreground code
style.color.ansi16m.hex('#C0FFEE'); // Hex (RGB) to 16 million color foreground code
style.bgColor.ansi16m.hex('#C0FFEE'); // Hex (RGB) to 16 million color background code
```
## Related
- [ansi-escapes](https://github.com/sindresorhus/ansi-escapes) - ANSI escape codes for manipulating the terminal
## Maintainers
- [Sindre Sorhus](https://github.com/sindresorhus)
- [Josh Junon](https://github.com/qix-)
## For enterprise
Available as part of the Tidelift Subscription.
The maintainers of `ansi-styles` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. [Learn more.](https://tidelift.com/subscription/pkg/npm-ansi-styles?utm_source=npm-ansi-styles&utm_medium=referral&utm_campaign=enterprise&utm_term=repo)

415
node_modules/chalk/index.d.ts generated vendored Normal file
View File

@ -0,0 +1,415 @@
/**
Basic foreground colors.
[More colors here.](https://github.com/chalk/chalk/blob/master/readme.md#256-and-truecolor-color-support)
*/
declare type ForegroundColor =
| 'black'
| 'red'
| 'green'
| 'yellow'
| 'blue'
| 'magenta'
| 'cyan'
| 'white'
| 'gray'
| 'grey'
| 'blackBright'
| 'redBright'
| 'greenBright'
| 'yellowBright'
| 'blueBright'
| 'magentaBright'
| 'cyanBright'
| 'whiteBright';
/**
Basic background colors.
[More colors here.](https://github.com/chalk/chalk/blob/master/readme.md#256-and-truecolor-color-support)
*/
declare type BackgroundColor =
| 'bgBlack'
| 'bgRed'
| 'bgGreen'
| 'bgYellow'
| 'bgBlue'
| 'bgMagenta'
| 'bgCyan'
| 'bgWhite'
| 'bgGray'
| 'bgGrey'
| 'bgBlackBright'
| 'bgRedBright'
| 'bgGreenBright'
| 'bgYellowBright'
| 'bgBlueBright'
| 'bgMagentaBright'
| 'bgCyanBright'
| 'bgWhiteBright';
/**
Basic colors.
[More colors here.](https://github.com/chalk/chalk/blob/master/readme.md#256-and-truecolor-color-support)
*/
declare type Color = ForegroundColor | BackgroundColor;
declare type Modifiers =
| 'reset'
| 'bold'
| 'dim'
| 'italic'
| 'underline'
| 'inverse'
| 'hidden'
| 'strikethrough'
| 'visible';
declare namespace chalk {
/**
Levels:
- `0` - All colors disabled.
- `1` - Basic 16 colors support.
- `2` - ANSI 256 colors support.
- `3` - Truecolor 16 million colors support.
*/
type Level = 0 | 1 | 2 | 3;
interface Options {
/**
Specify the color support for Chalk.
By default, color support is automatically detected based on the environment.
Levels:
- `0` - All colors disabled.
- `1` - Basic 16 colors support.
- `2` - ANSI 256 colors support.
- `3` - Truecolor 16 million colors support.
*/
level?: Level;
}
/**
Return a new Chalk instance.
*/
type Instance = new (options?: Options) => Chalk;
/**
Detect whether the terminal supports color.
*/
interface ColorSupport {
/**
The color level used by Chalk.
*/
level: Level;
/**
Return whether Chalk supports basic 16 colors.
*/
hasBasic: boolean;
/**
Return whether Chalk supports ANSI 256 colors.
*/
has256: boolean;
/**
Return whether Chalk supports Truecolor 16 million colors.
*/
has16m: boolean;
}
interface ChalkFunction {
/**
Use a template string.
@remarks Template literals are unsupported for nested calls (see [issue #341](https://github.com/chalk/chalk/issues/341))
@example
```
import chalk = require('chalk');
log(chalk`
CPU: {red ${cpu.totalPercent}%}
RAM: {green ${ram.used / ram.total * 100}%}
DISK: {rgb(255,131,0) ${disk.used / disk.total * 100}%}
`);
```
@example
```
import chalk = require('chalk');
log(chalk.red.bgBlack`2 + 3 = {bold ${2 + 3}}`)
```
*/
(text: TemplateStringsArray, ...placeholders: unknown[]): string;
(...text: unknown[]): string;
}
interface Chalk extends ChalkFunction {
/**
Return a new Chalk instance.
*/
Instance: Instance;
/**
The color support for Chalk.
By default, color support is automatically detected based on the environment.
Levels:
- `0` - All colors disabled.
- `1` - Basic 16 colors support.
- `2` - ANSI 256 colors support.
- `3` - Truecolor 16 million colors support.
*/
level: Level;
/**
Use HEX value to set text color.
@param color - Hexadecimal value representing the desired color.
@example
```
import chalk = require('chalk');
chalk.hex('#DEADED');
```
*/
hex(color: string): Chalk;
/**
Use keyword color value to set text color.
@param color - Keyword value representing the desired color.
@example
```
import chalk = require('chalk');
chalk.keyword('orange');
```
*/
keyword(color: string): Chalk;
/**
Use RGB values to set text color.
*/
rgb(red: number, green: number, blue: number): Chalk;
/**
Use HSL values to set text color.
*/
hsl(hue: number, saturation: number, lightness: number): Chalk;
/**
Use HSV values to set text color.
*/
hsv(hue: number, saturation: number, value: number): Chalk;
/**
Use HWB values to set text color.
*/
hwb(hue: number, whiteness: number, blackness: number): Chalk;
/**
Use a [Select/Set Graphic Rendition](https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters) (SGR) [color code number](https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit) to set text color.
30 <= code && code < 38 || 90 <= code && code < 98
For example, 31 for red, 91 for redBright.
*/
ansi(code: number): Chalk;
/**
Use a [8-bit unsigned number](https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit) to set text color.
*/
ansi256(index: number): Chalk;
/**
Use HEX value to set background color.
@param color - Hexadecimal value representing the desired color.
@example
```
import chalk = require('chalk');
chalk.bgHex('#DEADED');
```
*/
bgHex(color: string): Chalk;
/**
Use keyword color value to set background color.
@param color - Keyword value representing the desired color.
@example
```
import chalk = require('chalk');
chalk.bgKeyword('orange');
```
*/
bgKeyword(color: string): Chalk;
/**
Use RGB values to set background color.
*/
bgRgb(red: number, green: number, blue: number): Chalk;
/**
Use HSL values to set background color.
*/
bgHsl(hue: number, saturation: number, lightness: number): Chalk;
/**
Use HSV values to set background color.
*/
bgHsv(hue: number, saturation: number, value: number): Chalk;
/**
Use HWB values to set background color.
*/
bgHwb(hue: number, whiteness: number, blackness: number): Chalk;
/**
Use a [Select/Set Graphic Rendition](https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters) (SGR) [color code number](https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit) to set background color.
30 <= code && code < 38 || 90 <= code && code < 98
For example, 31 for red, 91 for redBright.
Use the foreground code, not the background code (for example, not 41, nor 101).
*/
bgAnsi(code: number): Chalk;
/**
Use a [8-bit unsigned number](https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit) to set background color.
*/
bgAnsi256(index: number): Chalk;
/**
Modifier: Resets the current color chain.
*/
readonly reset: Chalk;
/**
Modifier: Make text bold.
*/
readonly bold: Chalk;
/**
Modifier: Emitting only a small amount of light.
*/
readonly dim: Chalk;
/**
Modifier: Make text italic. (Not widely supported)
*/
readonly italic: Chalk;
/**
Modifier: Make text underline. (Not widely supported)
*/
readonly underline: Chalk;
/**
Modifier: Inverse background and foreground colors.
*/
readonly inverse: Chalk;
/**
Modifier: Prints the text, but makes it invisible.
*/
readonly hidden: Chalk;
/**
Modifier: Puts a horizontal line through the center of the text. (Not widely supported)
*/
readonly strikethrough: Chalk;
/**
Modifier: Prints the text only when Chalk has a color support level > 0.
Can be useful for things that are purely cosmetic.
*/
readonly visible: Chalk;
readonly black: Chalk;
readonly red: Chalk;
readonly green: Chalk;
readonly yellow: Chalk;
readonly blue: Chalk;
readonly magenta: Chalk;
readonly cyan: Chalk;
readonly white: Chalk;
/*
Alias for `blackBright`.
*/
readonly gray: Chalk;
/*
Alias for `blackBright`.
*/
readonly grey: Chalk;
readonly blackBright: Chalk;
readonly redBright: Chalk;
readonly greenBright: Chalk;
readonly yellowBright: Chalk;
readonly blueBright: Chalk;
readonly magentaBright: Chalk;
readonly cyanBright: Chalk;
readonly whiteBright: Chalk;
readonly bgBlack: Chalk;
readonly bgRed: Chalk;
readonly bgGreen: Chalk;
readonly bgYellow: Chalk;
readonly bgBlue: Chalk;
readonly bgMagenta: Chalk;
readonly bgCyan: Chalk;
readonly bgWhite: Chalk;
/*
Alias for `bgBlackBright`.
*/
readonly bgGray: Chalk;
/*
Alias for `bgBlackBright`.
*/
readonly bgGrey: Chalk;
readonly bgBlackBright: Chalk;
readonly bgRedBright: Chalk;
readonly bgGreenBright: Chalk;
readonly bgYellowBright: Chalk;
readonly bgBlueBright: Chalk;
readonly bgMagentaBright: Chalk;
readonly bgCyanBright: Chalk;
readonly bgWhiteBright: Chalk;
}
}
/**
Main Chalk object that allows to chain styles together.
Call the last one as a method with a string argument.
Order doesn't matter, and later styles take precedent in case of a conflict.
This simply means that `chalk.red.yellow.green` is equivalent to `chalk.green`.
*/
declare const chalk: chalk.Chalk & chalk.ChalkFunction & {
supportsColor: chalk.ColorSupport | false;
Level: chalk.Level;
Color: Color;
ForegroundColor: ForegroundColor;
BackgroundColor: BackgroundColor;
Modifiers: Modifiers;
stderr: chalk.Chalk & {supportsColor: chalk.ColorSupport | false};
};
export = chalk;

9
node_modules/chalk/license generated vendored Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

68
node_modules/chalk/package.json generated vendored Normal file
View File

@ -0,0 +1,68 @@
{
"name": "chalk",
"version": "4.1.2",
"description": "Terminal string styling done right",
"license": "MIT",
"repository": "chalk/chalk",
"funding": "https://github.com/chalk/chalk?sponsor=1",
"main": "source",
"engines": {
"node": ">=10"
},
"scripts": {
"test": "xo && nyc ava && tsd",
"bench": "matcha benchmark.js"
},
"files": [
"source",
"index.d.ts"
],
"keywords": [
"color",
"colour",
"colors",
"terminal",
"console",
"cli",
"string",
"str",
"ansi",
"style",
"styles",
"tty",
"formatting",
"rgb",
"256",
"shell",
"xterm",
"log",
"logging",
"command-line",
"text"
],
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"devDependencies": {
"ava": "^2.4.0",
"coveralls": "^3.0.7",
"execa": "^4.0.0",
"import-fresh": "^3.1.0",
"matcha": "^0.7.0",
"nyc": "^15.0.0",
"resolve-from": "^5.0.0",
"tsd": "^0.7.4",
"xo": "^0.28.2"
},
"xo": {
"rules": {
"unicorn/prefer-string-slice": "off",
"unicorn/prefer-includes": "off",
"@typescript-eslint/member-ordering": "off",
"no-redeclare": "off",
"unicorn/string-content": "off",
"unicorn/better-regex": "off"
}
}
}

341
node_modules/chalk/readme.md generated vendored Normal file
View File

@ -0,0 +1,341 @@
<h1 align="center">
<br>
<br>
<img width="320" src="media/logo.svg" alt="Chalk">
<br>
<br>
<br>
</h1>
> Terminal string styling done right
[![Build Status](https://travis-ci.org/chalk/chalk.svg?branch=master)](https://travis-ci.org/chalk/chalk) [![Coverage Status](https://coveralls.io/repos/github/chalk/chalk/badge.svg?branch=master)](https://coveralls.io/github/chalk/chalk?branch=master) [![npm dependents](https://badgen.net/npm/dependents/chalk)](https://www.npmjs.com/package/chalk?activeTab=dependents) [![Downloads](https://badgen.net/npm/dt/chalk)](https://www.npmjs.com/package/chalk) [![](https://img.shields.io/badge/unicorn-approved-ff69b4.svg)](https://www.youtube.com/watch?v=9auOCbH5Ns4) [![XO code style](https://img.shields.io/badge/code_style-XO-5ed9c7.svg)](https://github.com/xojs/xo) ![TypeScript-ready](https://img.shields.io/npm/types/chalk.svg) [![run on repl.it](https://repl.it/badge/github/chalk/chalk)](https://repl.it/github/chalk/chalk)
<img src="https://cdn.jsdelivr.net/gh/chalk/ansi-styles@8261697c95bf34b6c7767e2cbe9941a851d59385/screenshot.svg" width="900">
<br>
---
<div align="center">
<p>
<p>
<sup>
Sindre Sorhus' open source work is supported by the community on <a href="https://github.com/sponsors/sindresorhus">GitHub Sponsors</a> and <a href="https://stakes.social/0x44d871aebF0126Bf646753E2C976Aa7e68A66c15">Dev</a>
</sup>
</p>
<sup>Special thanks to:</sup>
<br>
<br>
<a href="https://standardresume.co/tech">
<img src="https://sindresorhus.com/assets/thanks/standard-resume-logo.svg" width="160"/>
</a>
<br>
<br>
<a href="https://retool.com/?utm_campaign=sindresorhus">
<img src="https://sindresorhus.com/assets/thanks/retool-logo.svg" width="230"/>
</a>
<br>
<br>
<a href="https://doppler.com/?utm_campaign=github_repo&utm_medium=referral&utm_content=chalk&utm_source=github">
<div>
<img src="https://dashboard.doppler.com/imgs/logo-long.svg" width="240" alt="Doppler">
</div>
<b>All your environment variables, in one place</b>
<div>
<span>Stop struggling with scattered API keys, hacking together home-brewed tools,</span>
<br>
<span>and avoiding access controls. Keep your team and servers in sync with Doppler.</span>
</div>
</a>
<br>
<a href="https://uibakery.io/?utm_source=chalk&utm_medium=sponsor&utm_campaign=github">
<div>
<img src="https://sindresorhus.com/assets/thanks/uibakery-logo.jpg" width="270" alt="UI Bakery">
</div>
</a>
</p>
</div>
---
<br>
## Highlights
- Expressive API
- Highly performant
- Ability to nest styles
- [256/Truecolor color support](#256-and-truecolor-color-support)
- Auto-detects color support
- Doesn't extend `String.prototype`
- Clean and focused
- Actively maintained
- [Used by ~50,000 packages](https://www.npmjs.com/browse/depended/chalk) as of January 1, 2020
## Install
```console
$ npm install chalk
```
## Usage
```js
const chalk = require('chalk');
console.log(chalk.blue('Hello world!'));
```
Chalk comes with an easy to use composable API where you just chain and nest the styles you want.
```js
const chalk = require('chalk');
const log = console.log;
// Combine styled and normal strings
log(chalk.blue('Hello') + ' World' + chalk.red('!'));
// Compose multiple styles using the chainable API
log(chalk.blue.bgRed.bold('Hello world!'));
// Pass in multiple arguments
log(chalk.blue('Hello', 'World!', 'Foo', 'bar', 'biz', 'baz'));
// Nest styles
log(chalk.red('Hello', chalk.underline.bgBlue('world') + '!'));
// Nest styles of the same type even (color, underline, background)
log(chalk.green(
'I am a green line ' +
chalk.blue.underline.bold('with a blue substring') +
' that becomes green again!'
));
// ES2015 template literal
log(`
CPU: ${chalk.red('90%')}
RAM: ${chalk.green('40%')}
DISK: ${chalk.yellow('70%')}
`);
// ES2015 tagged template literal
log(chalk`
CPU: {red ${cpu.totalPercent}%}
RAM: {green ${ram.used / ram.total * 100}%}
DISK: {rgb(255,131,0) ${disk.used / disk.total * 100}%}
`);
// Use RGB colors in terminal emulators that support it.
log(chalk.keyword('orange')('Yay for orange colored text!'));
log(chalk.rgb(123, 45, 67).underline('Underlined reddish color'));
log(chalk.hex('#DEADED').bold('Bold gray!'));
```
Easily define your own themes:
```js
const chalk = require('chalk');
const error = chalk.bold.red;
const warning = chalk.keyword('orange');
console.log(error('Error!'));
console.log(warning('Warning!'));
```
Take advantage of console.log [string substitution](https://nodejs.org/docs/latest/api/console.html#console_console_log_data_args):
```js
const name = 'Sindre';
console.log(chalk.green('Hello %s'), name);
//=> 'Hello Sindre'
```
## API
### chalk.`<style>[.<style>...](string, [string...])`
Example: `chalk.red.bold.underline('Hello', 'world');`
Chain [styles](#styles) and call the last one as a method with a string argument. Order doesn't matter, and later styles take precedent in case of a conflict. This simply means that `chalk.red.yellow.green` is equivalent to `chalk.green`.
Multiple arguments will be separated by space.
### chalk.level
Specifies the level of color support.
Color support is automatically detected, but you can override it by setting the `level` property. You should however only do this in your own code as it applies globally to all Chalk consumers.
If you need to change this in a reusable module, create a new instance:
```js
const ctx = new chalk.Instance({level: 0});
```
| Level | Description |
| :---: | :--- |
| `0` | All colors disabled |
| `1` | Basic color support (16 colors) |
| `2` | 256 color support |
| `3` | Truecolor support (16 million colors) |
### chalk.supportsColor
Detect whether the terminal [supports color](https://github.com/chalk/supports-color). Used internally and handled for you, but exposed for convenience.
Can be overridden by the user with the flags `--color` and `--no-color`. For situations where using `--color` is not possible, use the environment variable `FORCE_COLOR=1` (level 1), `FORCE_COLOR=2` (level 2), or `FORCE_COLOR=3` (level 3) to forcefully enable color, or `FORCE_COLOR=0` to forcefully disable. The use of `FORCE_COLOR` overrides all other color support checks.
Explicit 256/Truecolor mode can be enabled using the `--color=256` and `--color=16m` flags, respectively.
### chalk.stderr and chalk.stderr.supportsColor
`chalk.stderr` contains a separate instance configured with color support detected for `stderr` stream instead of `stdout`. Override rules from `chalk.supportsColor` apply to this too. `chalk.stderr.supportsColor` is exposed for convenience.
## Styles
### Modifiers
- `reset` - Resets the current color chain.
- `bold` - Make text bold.
- `dim` - Emitting only a small amount of light.
- `italic` - Make text italic. *(Not widely supported)*
- `underline` - Make text underline. *(Not widely supported)*
- `inverse`- Inverse background and foreground colors.
- `hidden` - Prints the text, but makes it invisible.
- `strikethrough` - Puts a horizontal line through the center of the text. *(Not widely supported)*
- `visible`- Prints the text only when Chalk has a color level > 0. Can be useful for things that are purely cosmetic.
### Colors
- `black`
- `red`
- `green`
- `yellow`
- `blue`
- `magenta`
- `cyan`
- `white`
- `blackBright` (alias: `gray`, `grey`)
- `redBright`
- `greenBright`
- `yellowBright`
- `blueBright`
- `magentaBright`
- `cyanBright`
- `whiteBright`
### Background colors
- `bgBlack`
- `bgRed`
- `bgGreen`
- `bgYellow`
- `bgBlue`
- `bgMagenta`
- `bgCyan`
- `bgWhite`
- `bgBlackBright` (alias: `bgGray`, `bgGrey`)
- `bgRedBright`
- `bgGreenBright`
- `bgYellowBright`
- `bgBlueBright`
- `bgMagentaBright`
- `bgCyanBright`
- `bgWhiteBright`
## Tagged template literal
Chalk can be used as a [tagged template literal](https://exploringjs.com/es6/ch_template-literals.html#_tagged-template-literals).
```js
const chalk = require('chalk');
const miles = 18;
const calculateFeet = miles => miles * 5280;
console.log(chalk`
There are {bold 5280 feet} in a mile.
In {bold ${miles} miles}, there are {green.bold ${calculateFeet(miles)} feet}.
`);
```
Blocks are delimited by an opening curly brace (`{`), a style, some content, and a closing curly brace (`}`).
Template styles are chained exactly like normal Chalk styles. The following three statements are equivalent:
```js
console.log(chalk.bold.rgb(10, 100, 200)('Hello!'));
console.log(chalk.bold.rgb(10, 100, 200)`Hello!`);
console.log(chalk`{bold.rgb(10,100,200) Hello!}`);
```
Note that function styles (`rgb()`, `hsl()`, `keyword()`, etc.) may not contain spaces between parameters.
All interpolated values (`` chalk`${foo}` ``) are converted to strings via the `.toString()` method. All curly braces (`{` and `}`) in interpolated value strings are escaped.
## 256 and Truecolor color support
Chalk supports 256 colors and [Truecolor](https://gist.github.com/XVilka/8346728) (16 million colors) on supported terminal apps.
Colors are downsampled from 16 million RGB values to an ANSI color format that is supported by the terminal emulator (or by specifying `{level: n}` as a Chalk option). For example, Chalk configured to run at level 1 (basic color support) will downsample an RGB value of #FF0000 (red) to 31 (ANSI escape for red).
Examples:
- `chalk.hex('#DEADED').underline('Hello, world!')`
- `chalk.keyword('orange')('Some orange text')`
- `chalk.rgb(15, 100, 204).inverse('Hello!')`
Background versions of these models are prefixed with `bg` and the first level of the module capitalized (e.g. `keyword` for foreground colors and `bgKeyword` for background colors).
- `chalk.bgHex('#DEADED').underline('Hello, world!')`
- `chalk.bgKeyword('orange')('Some orange text')`
- `chalk.bgRgb(15, 100, 204).inverse('Hello!')`
The following color models can be used:
- [`rgb`](https://en.wikipedia.org/wiki/RGB_color_model) - Example: `chalk.rgb(255, 136, 0).bold('Orange!')`
- [`hex`](https://en.wikipedia.org/wiki/Web_colors#Hex_triplet) - Example: `chalk.hex('#FF8800').bold('Orange!')`
- [`keyword`](https://www.w3.org/wiki/CSS/Properties/color/keywords) (CSS keywords) - Example: `chalk.keyword('orange').bold('Orange!')`
- [`hsl`](https://en.wikipedia.org/wiki/HSL_and_HSV) - Example: `chalk.hsl(32, 100, 50).bold('Orange!')`
- [`hsv`](https://en.wikipedia.org/wiki/HSL_and_HSV) - Example: `chalk.hsv(32, 100, 100).bold('Orange!')`
- [`hwb`](https://en.wikipedia.org/wiki/HWB_color_model) - Example: `chalk.hwb(32, 0, 50).bold('Orange!')`
- [`ansi`](https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit) - Example: `chalk.ansi(31).bgAnsi(93)('red on yellowBright')`
- [`ansi256`](https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit) - Example: `chalk.bgAnsi256(194)('Honeydew, more or less')`
## Windows
If you're on Windows, do yourself a favor and use [Windows Terminal](https://github.com/microsoft/terminal) instead of `cmd.exe`.
## Origin story
[colors.js](https://github.com/Marak/colors.js) used to be the most popular string styling module, but it has serious deficiencies like extending `String.prototype` which causes all kinds of [problems](https://github.com/yeoman/yo/issues/68) and the package is unmaintained. Although there are other packages, they either do too much or not enough. Chalk is a clean and focused alternative.
## chalk for enterprise
Available as part of the Tidelift Subscription.
The maintainers of chalk and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. [Learn more.](https://tidelift.com/subscription/pkg/npm-chalk?utm_source=npm-chalk&utm_medium=referral&utm_campaign=enterprise&utm_term=repo)
## Related
- [chalk-cli](https://github.com/chalk/chalk-cli) - CLI for this module
- [ansi-styles](https://github.com/chalk/ansi-styles) - ANSI escape codes for styling strings in the terminal
- [supports-color](https://github.com/chalk/supports-color) - Detect whether a terminal supports color
- [strip-ansi](https://github.com/chalk/strip-ansi) - Strip ANSI escape codes
- [strip-ansi-stream](https://github.com/chalk/strip-ansi-stream) - Strip ANSI escape codes from a stream
- [has-ansi](https://github.com/chalk/has-ansi) - Check if a string has ANSI escape codes
- [ansi-regex](https://github.com/chalk/ansi-regex) - Regular expression for matching ANSI escape codes
- [wrap-ansi](https://github.com/chalk/wrap-ansi) - Wordwrap a string with ANSI escape codes
- [slice-ansi](https://github.com/chalk/slice-ansi) - Slice a string with ANSI escape codes
- [color-convert](https://github.com/qix-/color-convert) - Converts colors between different models
- [chalk-animation](https://github.com/bokub/chalk-animation) - Animate strings in the terminal
- [gradient-string](https://github.com/bokub/gradient-string) - Apply color gradients to strings
- [chalk-pipe](https://github.com/LitoMore/chalk-pipe) - Create chalk style schemes with simpler style strings
- [terminal-link](https://github.com/sindresorhus/terminal-link) - Create clickable links in the terminal
## Maintainers
- [Sindre Sorhus](https://github.com/sindresorhus)
- [Josh Junon](https://github.com/qix-)

229
node_modules/chalk/source/index.js generated vendored Normal file
View File

@ -0,0 +1,229 @@
'use strict';
const ansiStyles = require('ansi-styles');
const {stdout: stdoutColor, stderr: stderrColor} = require('supports-color');
const {
stringReplaceAll,
stringEncaseCRLFWithFirstIndex
} = require('./util');
const {isArray} = Array;
// `supportsColor.level` → `ansiStyles.color[name]` mapping
const levelMapping = [
'ansi',
'ansi',
'ansi256',
'ansi16m'
];
const styles = Object.create(null);
const applyOptions = (object, options = {}) => {
if (options.level && !(Number.isInteger(options.level) && options.level >= 0 && options.level <= 3)) {
throw new Error('The `level` option should be an integer from 0 to 3');
}
// Detect level if not set manually
const colorLevel = stdoutColor ? stdoutColor.level : 0;
object.level = options.level === undefined ? colorLevel : options.level;
};
class ChalkClass {
constructor(options) {
// eslint-disable-next-line no-constructor-return
return chalkFactory(options);
}
}
const chalkFactory = options => {
const chalk = {};
applyOptions(chalk, options);
chalk.template = (...arguments_) => chalkTag(chalk.template, ...arguments_);
Object.setPrototypeOf(chalk, Chalk.prototype);
Object.setPrototypeOf(chalk.template, chalk);
chalk.template.constructor = () => {
throw new Error('`chalk.constructor()` is deprecated. Use `new chalk.Instance()` instead.');
};
chalk.template.Instance = ChalkClass;
return chalk.template;
};
function Chalk(options) {
return chalkFactory(options);
}
for (const [styleName, style] of Object.entries(ansiStyles)) {
styles[styleName] = {
get() {
const builder = createBuilder(this, createStyler(style.open, style.close, this._styler), this._isEmpty);
Object.defineProperty(this, styleName, {value: builder});
return builder;
}
};
}
styles.visible = {
get() {
const builder = createBuilder(this, this._styler, true);
Object.defineProperty(this, 'visible', {value: builder});
return builder;
}
};
const usedModels = ['rgb', 'hex', 'keyword', 'hsl', 'hsv', 'hwb', 'ansi', 'ansi256'];
for (const model of usedModels) {
styles[model] = {
get() {
const {level} = this;
return function (...arguments_) {
const styler = createStyler(ansiStyles.color[levelMapping[level]][model](...arguments_), ansiStyles.color.close, this._styler);
return createBuilder(this, styler, this._isEmpty);
};
}
};
}
for (const model of usedModels) {
const bgModel = 'bg' + model[0].toUpperCase() + model.slice(1);
styles[bgModel] = {
get() {
const {level} = this;
return function (...arguments_) {
const styler = createStyler(ansiStyles.bgColor[levelMapping[level]][model](...arguments_), ansiStyles.bgColor.close, this._styler);
return createBuilder(this, styler, this._isEmpty);
};
}
};
}
const proto = Object.defineProperties(() => {}, {
...styles,
level: {
enumerable: true,
get() {
return this._generator.level;
},
set(level) {
this._generator.level = level;
}
}
});
const createStyler = (open, close, parent) => {
let openAll;
let closeAll;
if (parent === undefined) {
openAll = open;
closeAll = close;
} else {
openAll = parent.openAll + open;
closeAll = close + parent.closeAll;
}
return {
open,
close,
openAll,
closeAll,
parent
};
};
const createBuilder = (self, _styler, _isEmpty) => {
const builder = (...arguments_) => {
if (isArray(arguments_[0]) && isArray(arguments_[0].raw)) {
// Called as a template literal, for example: chalk.red`2 + 3 = {bold ${2+3}}`
return applyStyle(builder, chalkTag(builder, ...arguments_));
}
// Single argument is hot path, implicit coercion is faster than anything
// eslint-disable-next-line no-implicit-coercion
return applyStyle(builder, (arguments_.length === 1) ? ('' + arguments_[0]) : arguments_.join(' '));
};
// We alter the prototype because we must return a function, but there is
// no way to create a function with a different prototype
Object.setPrototypeOf(builder, proto);
builder._generator = self;
builder._styler = _styler;
builder._isEmpty = _isEmpty;
return builder;
};
const applyStyle = (self, string) => {
if (self.level <= 0 || !string) {
return self._isEmpty ? '' : string;
}
let styler = self._styler;
if (styler === undefined) {
return string;
}
const {openAll, closeAll} = styler;
if (string.indexOf('\u001B') !== -1) {
while (styler !== undefined) {
// Replace any instances already present with a re-opening code
// otherwise only the part of the string until said closing code
// will be colored, and the rest will simply be 'plain'.
string = stringReplaceAll(string, styler.close, styler.open);
styler = styler.parent;
}
}
// We can move both next actions out of loop, because remaining actions in loop won't have
// any/visible effect on parts we add here. Close the styling before a linebreak and reopen
// after next line to fix a bleed issue on macOS: https://github.com/chalk/chalk/pull/92
const lfIndex = string.indexOf('\n');
if (lfIndex !== -1) {
string = stringEncaseCRLFWithFirstIndex(string, closeAll, openAll, lfIndex);
}
return openAll + string + closeAll;
};
let template;
const chalkTag = (chalk, ...strings) => {
const [firstString] = strings;
if (!isArray(firstString) || !isArray(firstString.raw)) {
// If chalk() was called by itself or with a string,
// return the string itself as a string.
return strings.join(' ');
}
const arguments_ = strings.slice(1);
const parts = [firstString.raw[0]];
for (let i = 1; i < firstString.length; i++) {
parts.push(
String(arguments_[i - 1]).replace(/[{}\\]/g, '\\$&'),
String(firstString.raw[i])
);
}
if (template === undefined) {
template = require('./templates');
}
return template(chalk, parts.join(''));
};
Object.defineProperties(Chalk.prototype, styles);
const chalk = Chalk(); // eslint-disable-line new-cap
chalk.supportsColor = stdoutColor;
chalk.stderr = Chalk({level: stderrColor ? stderrColor.level : 0}); // eslint-disable-line new-cap
chalk.stderr.supportsColor = stderrColor;
module.exports = chalk;

134
node_modules/chalk/source/templates.js generated vendored Normal file
View File

@ -0,0 +1,134 @@
'use strict';
const TEMPLATE_REGEX = /(?:\\(u(?:[a-f\d]{4}|\{[a-f\d]{1,6}\})|x[a-f\d]{2}|.))|(?:\{(~)?(\w+(?:\([^)]*\))?(?:\.\w+(?:\([^)]*\))?)*)(?:[ \t]|(?=\r?\n)))|(\})|((?:.|[\r\n\f])+?)/gi;
const STYLE_REGEX = /(?:^|\.)(\w+)(?:\(([^)]*)\))?/g;
const STRING_REGEX = /^(['"])((?:\\.|(?!\1)[^\\])*)\1$/;
const ESCAPE_REGEX = /\\(u(?:[a-f\d]{4}|{[a-f\d]{1,6}})|x[a-f\d]{2}|.)|([^\\])/gi;
const ESCAPES = new Map([
['n', '\n'],
['r', '\r'],
['t', '\t'],
['b', '\b'],
['f', '\f'],
['v', '\v'],
['0', '\0'],
['\\', '\\'],
['e', '\u001B'],
['a', '\u0007']
]);
function unescape(c) {
const u = c[0] === 'u';
const bracket = c[1] === '{';
if ((u && !bracket && c.length === 5) || (c[0] === 'x' && c.length === 3)) {
return String.fromCharCode(parseInt(c.slice(1), 16));
}
if (u && bracket) {
return String.fromCodePoint(parseInt(c.slice(2, -1), 16));
}
return ESCAPES.get(c) || c;
}
function parseArguments(name, arguments_) {
const results = [];
const chunks = arguments_.trim().split(/\s*,\s*/g);
let matches;
for (const chunk of chunks) {
const number = Number(chunk);
if (!Number.isNaN(number)) {
results.push(number);
} else if ((matches = chunk.match(STRING_REGEX))) {
results.push(matches[2].replace(ESCAPE_REGEX, (m, escape, character) => escape ? unescape(escape) : character));
} else {
throw new Error(`Invalid Chalk template style argument: ${chunk} (in style '${name}')`);
}
}
return results;
}
function parseStyle(style) {
STYLE_REGEX.lastIndex = 0;
const results = [];
let matches;
while ((matches = STYLE_REGEX.exec(style)) !== null) {
const name = matches[1];
if (matches[2]) {
const args = parseArguments(name, matches[2]);
results.push([name].concat(args));
} else {
results.push([name]);
}
}
return results;
}
function buildStyle(chalk, styles) {
const enabled = {};
for (const layer of styles) {
for (const style of layer.styles) {
enabled[style[0]] = layer.inverse ? null : style.slice(1);
}
}
let current = chalk;
for (const [styleName, styles] of Object.entries(enabled)) {
if (!Array.isArray(styles)) {
continue;
}
if (!(styleName in current)) {
throw new Error(`Unknown Chalk style: ${styleName}`);
}
current = styles.length > 0 ? current[styleName](...styles) : current[styleName];
}
return current;
}
module.exports = (chalk, temporary) => {
const styles = [];
const chunks = [];
let chunk = [];
// eslint-disable-next-line max-params
temporary.replace(TEMPLATE_REGEX, (m, escapeCharacter, inverse, style, close, character) => {
if (escapeCharacter) {
chunk.push(unescape(escapeCharacter));
} else if (style) {
const string = chunk.join('');
chunk = [];
chunks.push(styles.length === 0 ? string : buildStyle(chalk, styles)(string));
styles.push({inverse, styles: parseStyle(style)});
} else if (close) {
if (styles.length === 0) {
throw new Error('Found extraneous } in Chalk template literal');
}
chunks.push(buildStyle(chalk, styles)(chunk.join('')));
chunk = [];
styles.pop();
} else {
chunk.push(character);
}
});
chunks.push(chunk.join(''));
if (styles.length > 0) {
const errMessage = `Chalk template literal is missing ${styles.length} closing bracket${styles.length === 1 ? '' : 's'} (\`}\`)`;
throw new Error(errMessage);
}
return chunks.join('');
};

39
node_modules/chalk/source/util.js generated vendored Normal file
View File

@ -0,0 +1,39 @@
'use strict';
const stringReplaceAll = (string, substring, replacer) => {
let index = string.indexOf(substring);
if (index === -1) {
return string;
}
const substringLength = substring.length;
let endIndex = 0;
let returnValue = '';
do {
returnValue += string.substr(endIndex, index - endIndex) + substring + replacer;
endIndex = index + substringLength;
index = string.indexOf(substring, endIndex);
} while (index !== -1);
returnValue += string.substr(endIndex);
return returnValue;
};
const stringEncaseCRLFWithFirstIndex = (string, prefix, postfix, index) => {
let endIndex = 0;
let returnValue = '';
do {
const gotCR = string[index - 1] === '\r';
returnValue += string.substr(endIndex, (gotCR ? index - 1 : index) - endIndex) + prefix + (gotCR ? '\r\n' : '\n') + postfix;
endIndex = index + 1;
index = string.indexOf('\n', endIndex);
} while (index !== -1);
returnValue += string.substr(endIndex);
return returnValue;
};
module.exports = {
stringReplaceAll,
stringEncaseCRLFWithFirstIndex
};

54
node_modules/color-convert/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
# 1.0.0 - 2016-01-07
- Removed: unused speed test
- Added: Automatic routing between previously unsupported conversions
([#27](https://github.com/Qix-/color-convert/pull/27))
- Removed: `xxx2xxx()` and `xxx2xxxRaw()` functions
([#27](https://github.com/Qix-/color-convert/pull/27))
- Removed: `convert()` class
([#27](https://github.com/Qix-/color-convert/pull/27))
- Changed: all functions to lookup dictionary
([#27](https://github.com/Qix-/color-convert/pull/27))
- Changed: `ansi` to `ansi256`
([#27](https://github.com/Qix-/color-convert/pull/27))
- Fixed: argument grouping for functions requiring only one argument
([#27](https://github.com/Qix-/color-convert/pull/27))
# 0.6.0 - 2015-07-23
- Added: methods to handle
[ANSI](https://en.wikipedia.org/wiki/ANSI_escape_code#Colors) 16/256 colors:
- rgb2ansi16
- rgb2ansi
- hsl2ansi16
- hsl2ansi
- hsv2ansi16
- hsv2ansi
- hwb2ansi16
- hwb2ansi
- cmyk2ansi16
- cmyk2ansi
- keyword2ansi16
- keyword2ansi
- ansi162rgb
- ansi162hsl
- ansi162hsv
- ansi162hwb
- ansi162cmyk
- ansi162keyword
- ansi2rgb
- ansi2hsl
- ansi2hsv
- ansi2hwb
- ansi2cmyk
- ansi2keyword
([#18](https://github.com/harthur/color-convert/pull/18))
# 0.5.3 - 2015-06-02
- Fixed: hsl2hsv does not return `NaN` anymore when using `[0,0,0]`
([#15](https://github.com/harthur/color-convert/issues/15))
---
Check out commit logs for older releases

21
node_modules/color-convert/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
Copyright (c) 2011-2016 Heather Arthur <fayearthur@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

68
node_modules/color-convert/README.md generated vendored Normal file
View File

@ -0,0 +1,68 @@
# color-convert
[![Build Status](https://travis-ci.org/Qix-/color-convert.svg?branch=master)](https://travis-ci.org/Qix-/color-convert)
Color-convert is a color conversion library for JavaScript and node.
It converts all ways between `rgb`, `hsl`, `hsv`, `hwb`, `cmyk`, `ansi`, `ansi16`, `hex` strings, and CSS `keyword`s (will round to closest):
```js
var convert = require('color-convert');
convert.rgb.hsl(140, 200, 100); // [96, 48, 59]
convert.keyword.rgb('blue'); // [0, 0, 255]
var rgbChannels = convert.rgb.channels; // 3
var cmykChannels = convert.cmyk.channels; // 4
var ansiChannels = convert.ansi16.channels; // 1
```
# Install
```console
$ npm install color-convert
```
# API
Simply get the property of the _from_ and _to_ conversion that you're looking for.
All functions have a rounded and unrounded variant. By default, return values are rounded. To get the unrounded (raw) results, simply tack on `.raw` to the function.
All 'from' functions have a hidden property called `.channels` that indicates the number of channels the function expects (not including alpha).
```js
var convert = require('color-convert');
// Hex to LAB
convert.hex.lab('DEADBF'); // [ 76, 21, -2 ]
convert.hex.lab.raw('DEADBF'); // [ 75.56213190997677, 20.653827952644754, -2.290532499330533 ]
// RGB to CMYK
convert.rgb.cmyk(167, 255, 4); // [ 35, 0, 98, 0 ]
convert.rgb.cmyk.raw(167, 255, 4); // [ 34.509803921568626, 0, 98.43137254901961, 0 ]
```
### Arrays
All functions that accept multiple arguments also support passing an array.
Note that this does **not** apply to functions that convert from a color that only requires one value (e.g. `keyword`, `ansi256`, `hex`, etc.)
```js
var convert = require('color-convert');
convert.rgb.hex(123, 45, 67); // '7B2D43'
convert.rgb.hex([123, 45, 67]); // '7B2D43'
```
## Routing
Conversions that don't have an _explicitly_ defined conversion (in [conversions.js](conversions.js)), but can be converted by means of sub-conversions (e.g. XYZ -> **RGB** -> CMYK), are automatically routed together. This allows just about any color model supported by `color-convert` to be converted to any other model, so long as a sub-conversion path exists. This is also true for conversions requiring more than one step in between (e.g. LCH -> **LAB** -> **XYZ** -> **RGB** -> Hex).
Keep in mind that extensive conversions _may_ result in a loss of precision, and exist only to be complete. For a list of "direct" (single-step) conversions, see [conversions.js](conversions.js).
# Contribute
If there is a new model you would like to support, or want to add a direct conversion between two existing models, please send us a pull request.
# License
Copyright &copy; 2011-2016, Heather Arthur and Josh Junon. Licensed under the [MIT License](LICENSE).

839
node_modules/color-convert/conversions.js generated vendored Normal file
View File

@ -0,0 +1,839 @@
/* MIT license */
/* eslint-disable no-mixed-operators */
const cssKeywords = require('color-name');
// NOTE: conversions should only return primitive values (i.e. arrays, or
// values that give correct `typeof` results).
// do not use box values types (i.e. Number(), String(), etc.)
const reverseKeywords = {};
for (const key of Object.keys(cssKeywords)) {
reverseKeywords[cssKeywords[key]] = key;
}
const convert = {
rgb: {channels: 3, labels: 'rgb'},
hsl: {channels: 3, labels: 'hsl'},
hsv: {channels: 3, labels: 'hsv'},
hwb: {channels: 3, labels: 'hwb'},
cmyk: {channels: 4, labels: 'cmyk'},
xyz: {channels: 3, labels: 'xyz'},
lab: {channels: 3, labels: 'lab'},
lch: {channels: 3, labels: 'lch'},
hex: {channels: 1, labels: ['hex']},
keyword: {channels: 1, labels: ['keyword']},
ansi16: {channels: 1, labels: ['ansi16']},
ansi256: {channels: 1, labels: ['ansi256']},
hcg: {channels: 3, labels: ['h', 'c', 'g']},
apple: {channels: 3, labels: ['r16', 'g16', 'b16']},
gray: {channels: 1, labels: ['gray']}
};
module.exports = convert;
// Hide .channels and .labels properties
for (const model of Object.keys(convert)) {
if (!('channels' in convert[model])) {
throw new Error('missing channels property: ' + model);
}
if (!('labels' in convert[model])) {
throw new Error('missing channel labels property: ' + model);
}
if (convert[model].labels.length !== convert[model].channels) {
throw new Error('channel and label counts mismatch: ' + model);
}
const {channels, labels} = convert[model];
delete convert[model].channels;
delete convert[model].labels;
Object.defineProperty(convert[model], 'channels', {value: channels});
Object.defineProperty(convert[model], 'labels', {value: labels});
}
convert.rgb.hsl = function (rgb) {
const r = rgb[0] / 255;
const g = rgb[1] / 255;
const b = rgb[2] / 255;
const min = Math.min(r, g, b);
const max = Math.max(r, g, b);
const delta = max - min;
let h;
let s;
if (max === min) {
h = 0;
} else if (r === max) {
h = (g - b) / delta;
} else if (g === max) {
h = 2 + (b - r) / delta;
} else if (b === max) {
h = 4 + (r - g) / delta;
}
h = Math.min(h * 60, 360);
if (h < 0) {
h += 360;
}
const l = (min + max) / 2;
if (max === min) {
s = 0;
} else if (l <= 0.5) {
s = delta / (max + min);
} else {
s = delta / (2 - max - min);
}
return [h, s * 100, l * 100];
};
convert.rgb.hsv = function (rgb) {
let rdif;
let gdif;
let bdif;
let h;
let s;
const r = rgb[0] / 255;
const g = rgb[1] / 255;
const b = rgb[2] / 255;
const v = Math.max(r, g, b);
const diff = v - Math.min(r, g, b);
const diffc = function (c) {
return (v - c) / 6 / diff + 1 / 2;
};
if (diff === 0) {
h = 0;
s = 0;
} else {
s = diff / v;
rdif = diffc(r);
gdif = diffc(g);
bdif = diffc(b);
if (r === v) {
h = bdif - gdif;
} else if (g === v) {
h = (1 / 3) + rdif - bdif;
} else if (b === v) {
h = (2 / 3) + gdif - rdif;
}
if (h < 0) {
h += 1;
} else if (h > 1) {
h -= 1;
}
}
return [
h * 360,
s * 100,
v * 100
];
};
convert.rgb.hwb = function (rgb) {
const r = rgb[0];
const g = rgb[1];
let b = rgb[2];
const h = convert.rgb.hsl(rgb)[0];
const w = 1 / 255 * Math.min(r, Math.min(g, b));
b = 1 - 1 / 255 * Math.max(r, Math.max(g, b));
return [h, w * 100, b * 100];
};
convert.rgb.cmyk = function (rgb) {
const r = rgb[0] / 255;
const g = rgb[1] / 255;
const b = rgb[2] / 255;
const k = Math.min(1 - r, 1 - g, 1 - b);
const c = (1 - r - k) / (1 - k) || 0;
const m = (1 - g - k) / (1 - k) || 0;
const y = (1 - b - k) / (1 - k) || 0;
return [c * 100, m * 100, y * 100, k * 100];
};
function comparativeDistance(x, y) {
/*
See https://en.m.wikipedia.org/wiki/Euclidean_distance#Squared_Euclidean_distance
*/
return (
((x[0] - y[0]) ** 2) +
((x[1] - y[1]) ** 2) +
((x[2] - y[2]) ** 2)
);
}
convert.rgb.keyword = function (rgb) {
const reversed = reverseKeywords[rgb];
if (reversed) {
return reversed;
}
let currentClosestDistance = Infinity;
let currentClosestKeyword;
for (const keyword of Object.keys(cssKeywords)) {
const value = cssKeywords[keyword];
// Compute comparative distance
const distance = comparativeDistance(rgb, value);
// Check if its less, if so set as closest
if (distance < currentClosestDistance) {
currentClosestDistance = distance;
currentClosestKeyword = keyword;
}
}
return currentClosestKeyword;
};
convert.keyword.rgb = function (keyword) {
return cssKeywords[keyword];
};
convert.rgb.xyz = function (rgb) {
let r = rgb[0] / 255;
let g = rgb[1] / 255;
let b = rgb[2] / 255;
// Assume sRGB
r = r > 0.04045 ? (((r + 0.055) / 1.055) ** 2.4) : (r / 12.92);
g = g > 0.04045 ? (((g + 0.055) / 1.055) ** 2.4) : (g / 12.92);
b = b > 0.04045 ? (((b + 0.055) / 1.055) ** 2.4) : (b / 12.92);
const x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805);
const y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722);
const z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505);
return [x * 100, y * 100, z * 100];
};
convert.rgb.lab = function (rgb) {
const xyz = convert.rgb.xyz(rgb);
let x = xyz[0];
let y = xyz[1];
let z = xyz[2];
x /= 95.047;
y /= 100;
z /= 108.883;
x = x > 0.008856 ? (x ** (1 / 3)) : (7.787 * x) + (16 / 116);
y = y > 0.008856 ? (y ** (1 / 3)) : (7.787 * y) + (16 / 116);
z = z > 0.008856 ? (z ** (1 / 3)) : (7.787 * z) + (16 / 116);
const l = (116 * y) - 16;
const a = 500 * (x - y);
const b = 200 * (y - z);
return [l, a, b];
};
convert.hsl.rgb = function (hsl) {
const h = hsl[0] / 360;
const s = hsl[1] / 100;
const l = hsl[2] / 100;
let t2;
let t3;
let val;
if (s === 0) {
val = l * 255;
return [val, val, val];
}
if (l < 0.5) {
t2 = l * (1 + s);
} else {
t2 = l + s - l * s;
}
const t1 = 2 * l - t2;
const rgb = [0, 0, 0];
for (let i = 0; i < 3; i++) {
t3 = h + 1 / 3 * -(i - 1);
if (t3 < 0) {
t3++;
}
if (t3 > 1) {
t3--;
}
if (6 * t3 < 1) {
val = t1 + (t2 - t1) * 6 * t3;
} else if (2 * t3 < 1) {
val = t2;
} else if (3 * t3 < 2) {
val = t1 + (t2 - t1) * (2 / 3 - t3) * 6;
} else {
val = t1;
}
rgb[i] = val * 255;
}
return rgb;
};
convert.hsl.hsv = function (hsl) {
const h = hsl[0];
let s = hsl[1] / 100;
let l = hsl[2] / 100;
let smin = s;
const lmin = Math.max(l, 0.01);
l *= 2;
s *= (l <= 1) ? l : 2 - l;
smin *= lmin <= 1 ? lmin : 2 - lmin;
const v = (l + s) / 2;
const sv = l === 0 ? (2 * smin) / (lmin + smin) : (2 * s) / (l + s);
return [h, sv * 100, v * 100];
};
convert.hsv.rgb = function (hsv) {
const h = hsv[0] / 60;
const s = hsv[1] / 100;
let v = hsv[2] / 100;
const hi = Math.floor(h) % 6;
const f = h - Math.floor(h);
const p = 255 * v * (1 - s);
const q = 255 * v * (1 - (s * f));
const t = 255 * v * (1 - (s * (1 - f)));
v *= 255;
switch (hi) {
case 0:
return [v, t, p];
case 1:
return [q, v, p];
case 2:
return [p, v, t];
case 3:
return [p, q, v];
case 4:
return [t, p, v];
case 5:
return [v, p, q];
}
};
convert.hsv.hsl = function (hsv) {
const h = hsv[0];
const s = hsv[1] / 100;
const v = hsv[2] / 100;
const vmin = Math.max(v, 0.01);
let sl;
let l;
l = (2 - s) * v;
const lmin = (2 - s) * vmin;
sl = s * vmin;
sl /= (lmin <= 1) ? lmin : 2 - lmin;
sl = sl || 0;
l /= 2;
return [h, sl * 100, l * 100];
};
// http://dev.w3.org/csswg/css-color/#hwb-to-rgb
convert.hwb.rgb = function (hwb) {
const h = hwb[0] / 360;
let wh = hwb[1] / 100;
let bl = hwb[2] / 100;
const ratio = wh + bl;
let f;
// Wh + bl cant be > 1
if (ratio > 1) {
wh /= ratio;
bl /= ratio;
}
const i = Math.floor(6 * h);
const v = 1 - bl;
f = 6 * h - i;
if ((i & 0x01) !== 0) {
f = 1 - f;
}
const n = wh + f * (v - wh); // Linear interpolation
let r;
let g;
let b;
/* eslint-disable max-statements-per-line,no-multi-spaces */
switch (i) {
default:
case 6:
case 0: r = v; g = n; b = wh; break;
case 1: r = n; g = v; b = wh; break;
case 2: r = wh; g = v; b = n; break;
case 3: r = wh; g = n; b = v; break;
case 4: r = n; g = wh; b = v; break;
case 5: r = v; g = wh; b = n; break;
}
/* eslint-enable max-statements-per-line,no-multi-spaces */
return [r * 255, g * 255, b * 255];
};
convert.cmyk.rgb = function (cmyk) {
const c = cmyk[0] / 100;
const m = cmyk[1] / 100;
const y = cmyk[2] / 100;
const k = cmyk[3] / 100;
const r = 1 - Math.min(1, c * (1 - k) + k);
const g = 1 - Math.min(1, m * (1 - k) + k);
const b = 1 - Math.min(1, y * (1 - k) + k);
return [r * 255, g * 255, b * 255];
};
convert.xyz.rgb = function (xyz) {
const x = xyz[0] / 100;
const y = xyz[1] / 100;
const z = xyz[2] / 100;
let r;
let g;
let b;
r = (x * 3.2406) + (y * -1.5372) + (z * -0.4986);
g = (x * -0.9689) + (y * 1.8758) + (z * 0.0415);
b = (x * 0.0557) + (y * -0.2040) + (z * 1.0570);
// Assume sRGB
r = r > 0.0031308
? ((1.055 * (r ** (1.0 / 2.4))) - 0.055)
: r * 12.92;
g = g > 0.0031308
? ((1.055 * (g ** (1.0 / 2.4))) - 0.055)
: g * 12.92;
b = b > 0.0031308
? ((1.055 * (b ** (1.0 / 2.4))) - 0.055)
: b * 12.92;
r = Math.min(Math.max(0, r), 1);
g = Math.min(Math.max(0, g), 1);
b = Math.min(Math.max(0, b), 1);
return [r * 255, g * 255, b * 255];
};
convert.xyz.lab = function (xyz) {
let x = xyz[0];
let y = xyz[1];
let z = xyz[2];
x /= 95.047;
y /= 100;
z /= 108.883;
x = x > 0.008856 ? (x ** (1 / 3)) : (7.787 * x) + (16 / 116);
y = y > 0.008856 ? (y ** (1 / 3)) : (7.787 * y) + (16 / 116);
z = z > 0.008856 ? (z ** (1 / 3)) : (7.787 * z) + (16 / 116);
const l = (116 * y) - 16;
const a = 500 * (x - y);
const b = 200 * (y - z);
return [l, a, b];
};
convert.lab.xyz = function (lab) {
const l = lab[0];
const a = lab[1];
const b = lab[2];
let x;
let y;
let z;
y = (l + 16) / 116;
x = a / 500 + y;
z = y - b / 200;
const y2 = y ** 3;
const x2 = x ** 3;
const z2 = z ** 3;
y = y2 > 0.008856 ? y2 : (y - 16 / 116) / 7.787;
x = x2 > 0.008856 ? x2 : (x - 16 / 116) / 7.787;
z = z2 > 0.008856 ? z2 : (z - 16 / 116) / 7.787;
x *= 95.047;
y *= 100;
z *= 108.883;
return [x, y, z];
};
convert.lab.lch = function (lab) {
const l = lab[0];
const a = lab[1];
const b = lab[2];
let h;
const hr = Math.atan2(b, a);
h = hr * 360 / 2 / Math.PI;
if (h < 0) {
h += 360;
}
const c = Math.sqrt(a * a + b * b);
return [l, c, h];
};
convert.lch.lab = function (lch) {
const l = lch[0];
const c = lch[1];
const h = lch[2];
const hr = h / 360 * 2 * Math.PI;
const a = c * Math.cos(hr);
const b = c * Math.sin(hr);
return [l, a, b];
};
convert.rgb.ansi16 = function (args, saturation = null) {
const [r, g, b] = args;
let value = saturation === null ? convert.rgb.hsv(args)[2] : saturation; // Hsv -> ansi16 optimization
value = Math.round(value / 50);
if (value === 0) {
return 30;
}
let ansi = 30
+ ((Math.round(b / 255) << 2)
| (Math.round(g / 255) << 1)
| Math.round(r / 255));
if (value === 2) {
ansi += 60;
}
return ansi;
};
convert.hsv.ansi16 = function (args) {
// Optimization here; we already know the value and don't need to get
// it converted for us.
return convert.rgb.ansi16(convert.hsv.rgb(args), args[2]);
};
convert.rgb.ansi256 = function (args) {
const r = args[0];
const g = args[1];
const b = args[2];
// We use the extended greyscale palette here, with the exception of
// black and white. normal palette only has 4 greyscale shades.
if (r === g && g === b) {
if (r < 8) {
return 16;
}
if (r > 248) {
return 231;
}
return Math.round(((r - 8) / 247) * 24) + 232;
}
const ansi = 16
+ (36 * Math.round(r / 255 * 5))
+ (6 * Math.round(g / 255 * 5))
+ Math.round(b / 255 * 5);
return ansi;
};
convert.ansi16.rgb = function (args) {
let color = args % 10;
// Handle greyscale
if (color === 0 || color === 7) {
if (args > 50) {
color += 3.5;
}
color = color / 10.5 * 255;
return [color, color, color];
}
const mult = (~~(args > 50) + 1) * 0.5;
const r = ((color & 1) * mult) * 255;
const g = (((color >> 1) & 1) * mult) * 255;
const b = (((color >> 2) & 1) * mult) * 255;
return [r, g, b];
};
convert.ansi256.rgb = function (args) {
// Handle greyscale
if (args >= 232) {
const c = (args - 232) * 10 + 8;
return [c, c, c];
}
args -= 16;
let rem;
const r = Math.floor(args / 36) / 5 * 255;
const g = Math.floor((rem = args % 36) / 6) / 5 * 255;
const b = (rem % 6) / 5 * 255;
return [r, g, b];
};
convert.rgb.hex = function (args) {
const integer = ((Math.round(args[0]) & 0xFF) << 16)
+ ((Math.round(args[1]) & 0xFF) << 8)
+ (Math.round(args[2]) & 0xFF);
const string = integer.toString(16).toUpperCase();
return '000000'.substring(string.length) + string;
};
convert.hex.rgb = function (args) {
const match = args.toString(16).match(/[a-f0-9]{6}|[a-f0-9]{3}/i);
if (!match) {
return [0, 0, 0];
}
let colorString = match[0];
if (match[0].length === 3) {
colorString = colorString.split('').map(char => {
return char + char;
}).join('');
}
const integer = parseInt(colorString, 16);
const r = (integer >> 16) & 0xFF;
const g = (integer >> 8) & 0xFF;
const b = integer & 0xFF;
return [r, g, b];
};
convert.rgb.hcg = function (rgb) {
const r = rgb[0] / 255;
const g = rgb[1] / 255;
const b = rgb[2] / 255;
const max = Math.max(Math.max(r, g), b);
const min = Math.min(Math.min(r, g), b);
const chroma = (max - min);
let grayscale;
let hue;
if (chroma < 1) {
grayscale = min / (1 - chroma);
} else {
grayscale = 0;
}
if (chroma <= 0) {
hue = 0;
} else
if (max === r) {
hue = ((g - b) / chroma) % 6;
} else
if (max === g) {
hue = 2 + (b - r) / chroma;
} else {
hue = 4 + (r - g) / chroma;
}
hue /= 6;
hue %= 1;
return [hue * 360, chroma * 100, grayscale * 100];
};
convert.hsl.hcg = function (hsl) {
const s = hsl[1] / 100;
const l = hsl[2] / 100;
const c = l < 0.5 ? (2.0 * s * l) : (2.0 * s * (1.0 - l));
let f = 0;
if (c < 1.0) {
f = (l - 0.5 * c) / (1.0 - c);
}
return [hsl[0], c * 100, f * 100];
};
convert.hsv.hcg = function (hsv) {
const s = hsv[1] / 100;
const v = hsv[2] / 100;
const c = s * v;
let f = 0;
if (c < 1.0) {
f = (v - c) / (1 - c);
}
return [hsv[0], c * 100, f * 100];
};
convert.hcg.rgb = function (hcg) {
const h = hcg[0] / 360;
const c = hcg[1] / 100;
const g = hcg[2] / 100;
if (c === 0.0) {
return [g * 255, g * 255, g * 255];
}
const pure = [0, 0, 0];
const hi = (h % 1) * 6;
const v = hi % 1;
const w = 1 - v;
let mg = 0;
/* eslint-disable max-statements-per-line */
switch (Math.floor(hi)) {
case 0:
pure[0] = 1; pure[1] = v; pure[2] = 0; break;
case 1:
pure[0] = w; pure[1] = 1; pure[2] = 0; break;
case 2:
pure[0] = 0; pure[1] = 1; pure[2] = v; break;
case 3:
pure[0] = 0; pure[1] = w; pure[2] = 1; break;
case 4:
pure[0] = v; pure[1] = 0; pure[2] = 1; break;
default:
pure[0] = 1; pure[1] = 0; pure[2] = w;
}
/* eslint-enable max-statements-per-line */
mg = (1.0 - c) * g;
return [
(c * pure[0] + mg) * 255,
(c * pure[1] + mg) * 255,
(c * pure[2] + mg) * 255
];
};
convert.hcg.hsv = function (hcg) {
const c = hcg[1] / 100;
const g = hcg[2] / 100;
const v = c + g * (1.0 - c);
let f = 0;
if (v > 0.0) {
f = c / v;
}
return [hcg[0], f * 100, v * 100];
};
convert.hcg.hsl = function (hcg) {
const c = hcg[1] / 100;
const g = hcg[2] / 100;
const l = g * (1.0 - c) + 0.5 * c;
let s = 0;
if (l > 0.0 && l < 0.5) {
s = c / (2 * l);
} else
if (l >= 0.5 && l < 1.0) {
s = c / (2 * (1 - l));
}
return [hcg[0], s * 100, l * 100];
};
convert.hcg.hwb = function (hcg) {
const c = hcg[1] / 100;
const g = hcg[2] / 100;
const v = c + g * (1.0 - c);
return [hcg[0], (v - c) * 100, (1 - v) * 100];
};
convert.hwb.hcg = function (hwb) {
const w = hwb[1] / 100;
const b = hwb[2] / 100;
const v = 1 - b;
const c = v - w;
let g = 0;
if (c < 1) {
g = (v - c) / (1 - c);
}
return [hwb[0], c * 100, g * 100];
};
convert.apple.rgb = function (apple) {
return [(apple[0] / 65535) * 255, (apple[1] / 65535) * 255, (apple[2] / 65535) * 255];
};
convert.rgb.apple = function (rgb) {
return [(rgb[0] / 255) * 65535, (rgb[1] / 255) * 65535, (rgb[2] / 255) * 65535];
};
convert.gray.rgb = function (args) {
return [args[0] / 100 * 255, args[0] / 100 * 255, args[0] / 100 * 255];
};
convert.gray.hsl = function (args) {
return [0, 0, args[0]];
};
convert.gray.hsv = convert.gray.hsl;
convert.gray.hwb = function (gray) {
return [0, 100, gray[0]];
};
convert.gray.cmyk = function (gray) {
return [0, 0, 0, gray[0]];
};
convert.gray.lab = function (gray) {
return [gray[0], 0, 0];
};
convert.gray.hex = function (gray) {
const val = Math.round(gray[0] / 100 * 255) & 0xFF;
const integer = (val << 16) + (val << 8) + val;
const string = integer.toString(16).toUpperCase();
return '000000'.substring(string.length) + string;
};
convert.rgb.gray = function (rgb) {
const val = (rgb[0] + rgb[1] + rgb[2]) / 3;
return [val / 255 * 100];
};

81
node_modules/color-convert/index.js generated vendored Normal file
View File

@ -0,0 +1,81 @@
const conversions = require('./conversions');
const route = require('./route');
const convert = {};
const models = Object.keys(conversions);
function wrapRaw(fn) {
const wrappedFn = function (...args) {
const arg0 = args[0];
if (arg0 === undefined || arg0 === null) {
return arg0;
}
if (arg0.length > 1) {
args = arg0;
}
return fn(args);
};
// Preserve .conversion property if there is one
if ('conversion' in fn) {
wrappedFn.conversion = fn.conversion;
}
return wrappedFn;
}
function wrapRounded(fn) {
const wrappedFn = function (...args) {
const arg0 = args[0];
if (arg0 === undefined || arg0 === null) {
return arg0;
}
if (arg0.length > 1) {
args = arg0;
}
const result = fn(args);
// We're assuming the result is an array here.
// see notice in conversions.js; don't use box types
// in conversion functions.
if (typeof result === 'object') {
for (let len = result.length, i = 0; i < len; i++) {
result[i] = Math.round(result[i]);
}
}
return result;
};
// Preserve .conversion property if there is one
if ('conversion' in fn) {
wrappedFn.conversion = fn.conversion;
}
return wrappedFn;
}
models.forEach(fromModel => {
convert[fromModel] = {};
Object.defineProperty(convert[fromModel], 'channels', {value: conversions[fromModel].channels});
Object.defineProperty(convert[fromModel], 'labels', {value: conversions[fromModel].labels});
const routes = route(fromModel);
const routeModels = Object.keys(routes);
routeModels.forEach(toModel => {
const fn = routes[toModel];
convert[fromModel][toModel] = wrapRounded(fn);
convert[fromModel][toModel].raw = wrapRaw(fn);
});
});
module.exports = convert;

48
node_modules/color-convert/package.json generated vendored Normal file
View File

@ -0,0 +1,48 @@
{
"name": "color-convert",
"description": "Plain color conversion functions",
"version": "2.0.1",
"author": "Heather Arthur <fayearthur@gmail.com>",
"license": "MIT",
"repository": "Qix-/color-convert",
"scripts": {
"pretest": "xo",
"test": "node test/basic.js"
},
"engines": {
"node": ">=7.0.0"
},
"keywords": [
"color",
"colour",
"convert",
"converter",
"conversion",
"rgb",
"hsl",
"hsv",
"hwb",
"cmyk",
"ansi",
"ansi16"
],
"files": [
"index.js",
"conversions.js",
"route.js"
],
"xo": {
"rules": {
"default-case": 0,
"no-inline-comments": 0,
"operator-linebreak": 0
}
},
"devDependencies": {
"chalk": "^2.4.2",
"xo": "^0.24.0"
},
"dependencies": {
"color-name": "~1.1.4"
}
}

97
node_modules/color-convert/route.js generated vendored Normal file
View File

@ -0,0 +1,97 @@
const conversions = require('./conversions');
/*
This function routes a model to all other models.
all functions that are routed have a property `.conversion` attached
to the returned synthetic function. This property is an array
of strings, each with the steps in between the 'from' and 'to'
color models (inclusive).
conversions that are not possible simply are not included.
*/
function buildGraph() {
const graph = {};
// https://jsperf.com/object-keys-vs-for-in-with-closure/3
const models = Object.keys(conversions);
for (let len = models.length, i = 0; i < len; i++) {
graph[models[i]] = {
// http://jsperf.com/1-vs-infinity
// micro-opt, but this is simple.
distance: -1,
parent: null
};
}
return graph;
}
// https://en.wikipedia.org/wiki/Breadth-first_search
function deriveBFS(fromModel) {
const graph = buildGraph();
const queue = [fromModel]; // Unshift -> queue -> pop
graph[fromModel].distance = 0;
while (queue.length) {
const current = queue.pop();
const adjacents = Object.keys(conversions[current]);
for (let len = adjacents.length, i = 0; i < len; i++) {
const adjacent = adjacents[i];
const node = graph[adjacent];
if (node.distance === -1) {
node.distance = graph[current].distance + 1;
node.parent = current;
queue.unshift(adjacent);
}
}
}
return graph;
}
function link(from, to) {
return function (args) {
return to(from(args));
};
}
function wrapConversion(toModel, graph) {
const path = [graph[toModel].parent, toModel];
let fn = conversions[graph[toModel].parent][toModel];
let cur = graph[toModel].parent;
while (graph[cur].parent) {
path.unshift(graph[cur].parent);
fn = link(conversions[graph[cur].parent][cur], fn);
cur = graph[cur].parent;
}
fn.conversion = path;
return fn;
}
module.exports = function (fromModel) {
const graph = deriveBFS(fromModel);
const conversion = {};
const models = Object.keys(graph);
for (let len = models.length, i = 0; i < len; i++) {
const toModel = models[i];
const node = graph[toModel];
if (node.parent === null) {
// No possible conversion, or this node is the source model.
continue;
}
conversion[toModel] = wrapConversion(toModel, graph);
}
return conversion;
};

8
node_modules/color-name/LICENSE generated vendored Normal file
View File

@ -0,0 +1,8 @@
The MIT License (MIT)
Copyright (c) 2015 Dmitry Ivanov
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

11
node_modules/color-name/README.md generated vendored Normal file
View File

@ -0,0 +1,11 @@
A JSON with color names and its values. Based on http://dev.w3.org/csswg/css-color/#named-colors.
[![NPM](https://nodei.co/npm/color-name.png?mini=true)](https://nodei.co/npm/color-name/)
```js
var colors = require('color-name');
colors.red //[255,0,0]
```
<a href="LICENSE"><img src="https://upload.wikimedia.org/wikipedia/commons/0/0c/MIT_logo.svg" width="120"/></a>

152
node_modules/color-name/index.js generated vendored Normal file
View File

@ -0,0 +1,152 @@
'use strict'
module.exports = {
"aliceblue": [240, 248, 255],
"antiquewhite": [250, 235, 215],
"aqua": [0, 255, 255],
"aquamarine": [127, 255, 212],
"azure": [240, 255, 255],
"beige": [245, 245, 220],
"bisque": [255, 228, 196],
"black": [0, 0, 0],
"blanchedalmond": [255, 235, 205],
"blue": [0, 0, 255],
"blueviolet": [138, 43, 226],
"brown": [165, 42, 42],
"burlywood": [222, 184, 135],
"cadetblue": [95, 158, 160],
"chartreuse": [127, 255, 0],
"chocolate": [210, 105, 30],
"coral": [255, 127, 80],
"cornflowerblue": [100, 149, 237],
"cornsilk": [255, 248, 220],
"crimson": [220, 20, 60],
"cyan": [0, 255, 255],
"darkblue": [0, 0, 139],
"darkcyan": [0, 139, 139],
"darkgoldenrod": [184, 134, 11],
"darkgray": [169, 169, 169],
"darkgreen": [0, 100, 0],
"darkgrey": [169, 169, 169],
"darkkhaki": [189, 183, 107],
"darkmagenta": [139, 0, 139],
"darkolivegreen": [85, 107, 47],
"darkorange": [255, 140, 0],
"darkorchid": [153, 50, 204],
"darkred": [139, 0, 0],
"darksalmon": [233, 150, 122],
"darkseagreen": [143, 188, 143],
"darkslateblue": [72, 61, 139],
"darkslategray": [47, 79, 79],
"darkslategrey": [47, 79, 79],
"darkturquoise": [0, 206, 209],
"darkviolet": [148, 0, 211],
"deeppink": [255, 20, 147],
"deepskyblue": [0, 191, 255],
"dimgray": [105, 105, 105],
"dimgrey": [105, 105, 105],
"dodgerblue": [30, 144, 255],
"firebrick": [178, 34, 34],
"floralwhite": [255, 250, 240],
"forestgreen": [34, 139, 34],
"fuchsia": [255, 0, 255],
"gainsboro": [220, 220, 220],
"ghostwhite": [248, 248, 255],
"gold": [255, 215, 0],
"goldenrod": [218, 165, 32],
"gray": [128, 128, 128],
"green": [0, 128, 0],
"greenyellow": [173, 255, 47],
"grey": [128, 128, 128],
"honeydew": [240, 255, 240],
"hotpink": [255, 105, 180],
"indianred": [205, 92, 92],
"indigo": [75, 0, 130],
"ivory": [255, 255, 240],
"khaki": [240, 230, 140],
"lavender": [230, 230, 250],
"lavenderblush": [255, 240, 245],
"lawngreen": [124, 252, 0],
"lemonchiffon": [255, 250, 205],
"lightblue": [173, 216, 230],
"lightcoral": [240, 128, 128],
"lightcyan": [224, 255, 255],
"lightgoldenrodyellow": [250, 250, 210],
"lightgray": [211, 211, 211],
"lightgreen": [144, 238, 144],
"lightgrey": [211, 211, 211],
"lightpink": [255, 182, 193],
"lightsalmon": [255, 160, 122],
"lightseagreen": [32, 178, 170],
"lightskyblue": [135, 206, 250],
"lightslategray": [119, 136, 153],
"lightslategrey": [119, 136, 153],
"lightsteelblue": [176, 196, 222],
"lightyellow": [255, 255, 224],
"lime": [0, 255, 0],
"limegreen": [50, 205, 50],
"linen": [250, 240, 230],
"magenta": [255, 0, 255],
"maroon": [128, 0, 0],
"mediumaquamarine": [102, 205, 170],
"mediumblue": [0, 0, 205],
"mediumorchid": [186, 85, 211],
"mediumpurple": [147, 112, 219],
"mediumseagreen": [60, 179, 113],
"mediumslateblue": [123, 104, 238],
"mediumspringgreen": [0, 250, 154],
"mediumturquoise": [72, 209, 204],
"mediumvioletred": [199, 21, 133],
"midnightblue": [25, 25, 112],
"mintcream": [245, 255, 250],
"mistyrose": [255, 228, 225],
"moccasin": [255, 228, 181],
"navajowhite": [255, 222, 173],
"navy": [0, 0, 128],
"oldlace": [253, 245, 230],
"olive": [128, 128, 0],
"olivedrab": [107, 142, 35],
"orange": [255, 165, 0],
"orangered": [255, 69, 0],
"orchid": [218, 112, 214],
"palegoldenrod": [238, 232, 170],
"palegreen": [152, 251, 152],
"paleturquoise": [175, 238, 238],
"palevioletred": [219, 112, 147],
"papayawhip": [255, 239, 213],
"peachpuff": [255, 218, 185],
"peru": [205, 133, 63],
"pink": [255, 192, 203],
"plum": [221, 160, 221],
"powderblue": [176, 224, 230],
"purple": [128, 0, 128],
"rebeccapurple": [102, 51, 153],
"red": [255, 0, 0],
"rosybrown": [188, 143, 143],
"royalblue": [65, 105, 225],
"saddlebrown": [139, 69, 19],
"salmon": [250, 128, 114],
"sandybrown": [244, 164, 96],
"seagreen": [46, 139, 87],
"seashell": [255, 245, 238],
"sienna": [160, 82, 45],
"silver": [192, 192, 192],
"skyblue": [135, 206, 235],
"slateblue": [106, 90, 205],
"slategray": [112, 128, 144],
"slategrey": [112, 128, 144],
"snow": [255, 250, 250],
"springgreen": [0, 255, 127],
"steelblue": [70, 130, 180],
"tan": [210, 180, 140],
"teal": [0, 128, 128],
"thistle": [216, 191, 216],
"tomato": [255, 99, 71],
"turquoise": [64, 224, 208],
"violet": [238, 130, 238],
"wheat": [245, 222, 179],
"white": [255, 255, 255],
"whitesmoke": [245, 245, 245],
"yellow": [255, 255, 0],
"yellowgreen": [154, 205, 50]
};

28
node_modules/color-name/package.json generated vendored Normal file
View File

@ -0,0 +1,28 @@
{
"name": "color-name",
"version": "1.1.4",
"description": "A list of color names and its values",
"main": "index.js",
"files": [
"index.js"
],
"scripts": {
"test": "node test.js"
},
"repository": {
"type": "git",
"url": "git@github.com:colorjs/color-name.git"
},
"keywords": [
"color-name",
"color",
"color-keyword",
"keyword"
],
"author": "DY <dfcreative@gmail.com>",
"license": "MIT",
"bugs": {
"url": "https://github.com/colorjs/color-name/issues"
},
"homepage": "https://github.com/colorjs/color-name"
}

22
node_modules/commander/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
(The MIT License)
Copyright (c) 2011 TJ Holowaychuk <tj@vision-media.ca>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

1148
node_modules/commander/Readme.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

16
node_modules/commander/esm.mjs generated vendored Normal file
View File

@ -0,0 +1,16 @@
import commander from './index.js';
// wrapper to provide named exports for ESM.
export const {
program,
createCommand,
createArgument,
createOption,
CommanderError,
InvalidArgumentError,
InvalidOptionArgumentError, // deprecated old name
Command,
Argument,
Option,
Help
} = commander;

26
node_modules/commander/index.js generated vendored Normal file
View File

@ -0,0 +1,26 @@
const { Argument } = require('./lib/argument.js');
const { Command } = require('./lib/command.js');
const { CommanderError, InvalidArgumentError } = require('./lib/error.js');
const { Help } = require('./lib/help.js');
const { Option } = require('./lib/option.js');
/**
* Expose the root command.
*/
exports = module.exports = new Command();
exports.program = exports; // More explicit access to global command.
// createArgument, createCommand, and createOption are implicitly available as they are methods on program.
/**
* Expose classes
*/
exports.Command = Command;
exports.Option = Option;
exports.Argument = Argument;
exports.Help = Help;
exports.CommanderError = CommanderError;
exports.InvalidArgumentError = InvalidArgumentError;
exports.InvalidOptionArgumentError = InvalidArgumentError; // Deprecated

145
node_modules/commander/lib/argument.js generated vendored Normal file
View File

@ -0,0 +1,145 @@
const { InvalidArgumentError } = require('./error.js');
class Argument {
/**
* Initialize a new command argument with the given name and description.
* The default is that the argument is required, and you can explicitly
* indicate this with <> around the name. Put [] around the name for an optional argument.
*
* @param {string} name
* @param {string} [description]
*/
constructor(name, description) {
this.description = description || '';
this.variadic = false;
this.parseArg = undefined;
this.defaultValue = undefined;
this.defaultValueDescription = undefined;
this.argChoices = undefined;
switch (name[0]) {
case '<': // e.g. <required>
this.required = true;
this._name = name.slice(1, -1);
break;
case '[': // e.g. [optional]
this.required = false;
this._name = name.slice(1, -1);
break;
default:
this.required = true;
this._name = name;
break;
}
if (this._name.length > 3 && this._name.slice(-3) === '...') {
this.variadic = true;
this._name = this._name.slice(0, -3);
}
}
/**
* Return argument name.
*
* @return {string}
*/
name() {
return this._name;
}
/**
* @api private
*/
_concatValue(value, previous) {
if (previous === this.defaultValue || !Array.isArray(previous)) {
return [value];
}
return previous.concat(value);
}
/**
* Set the default value, and optionally supply the description to be displayed in the help.
*
* @param {*} value
* @param {string} [description]
* @return {Argument}
*/
default(value, description) {
this.defaultValue = value;
this.defaultValueDescription = description;
return this;
}
/**
* Set the custom handler for processing CLI command arguments into argument values.
*
* @param {Function} [fn]
* @return {Argument}
*/
argParser(fn) {
this.parseArg = fn;
return this;
}
/**
* Only allow argument value to be one of choices.
*
* @param {string[]} values
* @return {Argument}
*/
choices(values) {
this.argChoices = values.slice();
this.parseArg = (arg, previous) => {
if (!this.argChoices.includes(arg)) {
throw new InvalidArgumentError(`Allowed choices are ${this.argChoices.join(', ')}.`);
}
if (this.variadic) {
return this._concatValue(arg, previous);
}
return arg;
};
return this;
}
/**
* Make argument required.
*/
argRequired() {
this.required = true;
return this;
}
/**
* Make argument optional.
*/
argOptional() {
this.required = false;
return this;
}
}
/**
* Takes an argument and returns its human readable equivalent for help usage.
*
* @param {Argument} arg
* @return {string}
* @api private
*/
function humanReadableArgName(arg) {
const nameOutput = arg.name() + (arg.variadic === true ? '...' : '');
return arg.required
? '<' + nameOutput + '>'
: '[' + nameOutput + ']';
}
exports.Argument = Argument;
exports.humanReadableArgName = humanReadableArgName;

2179
node_modules/commander/lib/command.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

43
node_modules/commander/lib/error.js generated vendored Normal file
View File

@ -0,0 +1,43 @@
/**
* CommanderError class
* @class
*/
class CommanderError extends Error {
/**
* Constructs the CommanderError class
* @param {number} exitCode suggested exit code which could be used with process.exit
* @param {string} code an id string representing the error
* @param {string} message human-readable description of the error
* @constructor
*/
constructor(exitCode, code, message) {
super(message);
// properly capture stack trace in Node.js
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.code = code;
this.exitCode = exitCode;
this.nestedError = undefined;
}
}
/**
* InvalidArgumentError class
* @class
*/
class InvalidArgumentError extends CommanderError {
/**
* Constructs the InvalidArgumentError class
* @param {string} [message] explanation of why argument is invalid
* @constructor
*/
constructor(message) {
super(1, 'commander.invalidArgument', message);
// properly capture stack trace in Node.js
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
}
}
exports.CommanderError = CommanderError;
exports.InvalidArgumentError = InvalidArgumentError;

462
node_modules/commander/lib/help.js generated vendored Normal file
View File

@ -0,0 +1,462 @@
const { humanReadableArgName } = require('./argument.js');
/**
* TypeScript import types for JSDoc, used by Visual Studio Code IntelliSense and `npm run typescript-checkJS`
* https://www.typescriptlang.org/docs/handbook/jsdoc-supported-types.html#import-types
* @typedef { import("./argument.js").Argument } Argument
* @typedef { import("./command.js").Command } Command
* @typedef { import("./option.js").Option } Option
*/
// Although this is a class, methods are static in style to allow override using subclass or just functions.
class Help {
constructor() {
this.helpWidth = undefined;
this.sortSubcommands = false;
this.sortOptions = false;
this.showGlobalOptions = false;
}
/**
* Get an array of the visible subcommands. Includes a placeholder for the implicit help command, if there is one.
*
* @param {Command} cmd
* @returns {Command[]}
*/
visibleCommands(cmd) {
const visibleCommands = cmd.commands.filter(cmd => !cmd._hidden);
if (cmd._hasImplicitHelpCommand()) {
// Create a command matching the implicit help command.
const [, helpName, helpArgs] = cmd._helpCommandnameAndArgs.match(/([^ ]+) *(.*)/);
const helpCommand = cmd.createCommand(helpName)
.helpOption(false);
helpCommand.description(cmd._helpCommandDescription);
if (helpArgs) helpCommand.arguments(helpArgs);
visibleCommands.push(helpCommand);
}
if (this.sortSubcommands) {
visibleCommands.sort((a, b) => {
// @ts-ignore: overloaded return type
return a.name().localeCompare(b.name());
});
}
return visibleCommands;
}
/**
* Compare options for sort.
*
* @param {Option} a
* @param {Option} b
* @returns number
*/
compareOptions(a, b) {
const getSortKey = (option) => {
// WYSIWYG for order displayed in help. Short used for comparison if present. No special handling for negated.
return option.short ? option.short.replace(/^-/, '') : option.long.replace(/^--/, '');
};
return getSortKey(a).localeCompare(getSortKey(b));
}
/**
* Get an array of the visible options. Includes a placeholder for the implicit help option, if there is one.
*
* @param {Command} cmd
* @returns {Option[]}
*/
visibleOptions(cmd) {
const visibleOptions = cmd.options.filter((option) => !option.hidden);
// Implicit help
const showShortHelpFlag = cmd._hasHelpOption && cmd._helpShortFlag && !cmd._findOption(cmd._helpShortFlag);
const showLongHelpFlag = cmd._hasHelpOption && !cmd._findOption(cmd._helpLongFlag);
if (showShortHelpFlag || showLongHelpFlag) {
let helpOption;
if (!showShortHelpFlag) {
helpOption = cmd.createOption(cmd._helpLongFlag, cmd._helpDescription);
} else if (!showLongHelpFlag) {
helpOption = cmd.createOption(cmd._helpShortFlag, cmd._helpDescription);
} else {
helpOption = cmd.createOption(cmd._helpFlags, cmd._helpDescription);
}
visibleOptions.push(helpOption);
}
if (this.sortOptions) {
visibleOptions.sort(this.compareOptions);
}
return visibleOptions;
}
/**
* Get an array of the visible global options. (Not including help.)
*
* @param {Command} cmd
* @returns {Option[]}
*/
visibleGlobalOptions(cmd) {
if (!this.showGlobalOptions) return [];
const globalOptions = [];
for (let ancestorCmd = cmd.parent; ancestorCmd; ancestorCmd = ancestorCmd.parent) {
const visibleOptions = ancestorCmd.options.filter((option) => !option.hidden);
globalOptions.push(...visibleOptions);
}
if (this.sortOptions) {
globalOptions.sort(this.compareOptions);
}
return globalOptions;
}
/**
* Get an array of the arguments if any have a description.
*
* @param {Command} cmd
* @returns {Argument[]}
*/
visibleArguments(cmd) {
// Side effect! Apply the legacy descriptions before the arguments are displayed.
if (cmd._argsDescription) {
cmd.registeredArguments.forEach(argument => {
argument.description = argument.description || cmd._argsDescription[argument.name()] || '';
});
}
// If there are any arguments with a description then return all the arguments.
if (cmd.registeredArguments.find(argument => argument.description)) {
return cmd.registeredArguments;
}
return [];
}
/**
* Get the command term to show in the list of subcommands.
*
* @param {Command} cmd
* @returns {string}
*/
subcommandTerm(cmd) {
// Legacy. Ignores custom usage string, and nested commands.
const args = cmd.registeredArguments.map(arg => humanReadableArgName(arg)).join(' ');
return cmd._name +
(cmd._aliases[0] ? '|' + cmd._aliases[0] : '') +
(cmd.options.length ? ' [options]' : '') + // simplistic check for non-help option
(args ? ' ' + args : '');
}
/**
* Get the option term to show in the list of options.
*
* @param {Option} option
* @returns {string}
*/
optionTerm(option) {
return option.flags;
}
/**
* Get the argument term to show in the list of arguments.
*
* @param {Argument} argument
* @returns {string}
*/
argumentTerm(argument) {
return argument.name();
}
/**
* Get the longest command term length.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {number}
*/
longestSubcommandTermLength(cmd, helper) {
return helper.visibleCommands(cmd).reduce((max, command) => {
return Math.max(max, helper.subcommandTerm(command).length);
}, 0);
}
/**
* Get the longest option term length.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {number}
*/
longestOptionTermLength(cmd, helper) {
return helper.visibleOptions(cmd).reduce((max, option) => {
return Math.max(max, helper.optionTerm(option).length);
}, 0);
}
/**
* Get the longest global option term length.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {number}
*/
longestGlobalOptionTermLength(cmd, helper) {
return helper.visibleGlobalOptions(cmd).reduce((max, option) => {
return Math.max(max, helper.optionTerm(option).length);
}, 0);
}
/**
* Get the longest argument term length.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {number}
*/
longestArgumentTermLength(cmd, helper) {
return helper.visibleArguments(cmd).reduce((max, argument) => {
return Math.max(max, helper.argumentTerm(argument).length);
}, 0);
}
/**
* Get the command usage to be displayed at the top of the built-in help.
*
* @param {Command} cmd
* @returns {string}
*/
commandUsage(cmd) {
// Usage
let cmdName = cmd._name;
if (cmd._aliases[0]) {
cmdName = cmdName + '|' + cmd._aliases[0];
}
let ancestorCmdNames = '';
for (let ancestorCmd = cmd.parent; ancestorCmd; ancestorCmd = ancestorCmd.parent) {
ancestorCmdNames = ancestorCmd.name() + ' ' + ancestorCmdNames;
}
return ancestorCmdNames + cmdName + ' ' + cmd.usage();
}
/**
* Get the description for the command.
*
* @param {Command} cmd
* @returns {string}
*/
commandDescription(cmd) {
// @ts-ignore: overloaded return type
return cmd.description();
}
/**
* Get the subcommand summary to show in the list of subcommands.
* (Fallback to description for backwards compatibility.)
*
* @param {Command} cmd
* @returns {string}
*/
subcommandDescription(cmd) {
// @ts-ignore: overloaded return type
return cmd.summary() || cmd.description();
}
/**
* Get the option description to show in the list of options.
*
* @param {Option} option
* @return {string}
*/
optionDescription(option) {
const extraInfo = [];
if (option.argChoices) {
extraInfo.push(
// use stringify to match the display of the default value
`choices: ${option.argChoices.map((choice) => JSON.stringify(choice)).join(', ')}`);
}
if (option.defaultValue !== undefined) {
// default for boolean and negated more for programmer than end user,
// but show true/false for boolean option as may be for hand-rolled env or config processing.
const showDefault = option.required || option.optional ||
(option.isBoolean() && typeof option.defaultValue === 'boolean');
if (showDefault) {
extraInfo.push(`default: ${option.defaultValueDescription || JSON.stringify(option.defaultValue)}`);
}
}
// preset for boolean and negated are more for programmer than end user
if (option.presetArg !== undefined && option.optional) {
extraInfo.push(`preset: ${JSON.stringify(option.presetArg)}`);
}
if (option.envVar !== undefined) {
extraInfo.push(`env: ${option.envVar}`);
}
if (extraInfo.length > 0) {
return `${option.description} (${extraInfo.join(', ')})`;
}
return option.description;
}
/**
* Get the argument description to show in the list of arguments.
*
* @param {Argument} argument
* @return {string}
*/
argumentDescription(argument) {
const extraInfo = [];
if (argument.argChoices) {
extraInfo.push(
// use stringify to match the display of the default value
`choices: ${argument.argChoices.map((choice) => JSON.stringify(choice)).join(', ')}`);
}
if (argument.defaultValue !== undefined) {
extraInfo.push(`default: ${argument.defaultValueDescription || JSON.stringify(argument.defaultValue)}`);
}
if (extraInfo.length > 0) {
const extraDescripton = `(${extraInfo.join(', ')})`;
if (argument.description) {
return `${argument.description} ${extraDescripton}`;
}
return extraDescripton;
}
return argument.description;
}
/**
* Generate the built-in help text.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {string}
*/
formatHelp(cmd, helper) {
const termWidth = helper.padWidth(cmd, helper);
const helpWidth = helper.helpWidth || 80;
const itemIndentWidth = 2;
const itemSeparatorWidth = 2; // between term and description
function formatItem(term, description) {
if (description) {
const fullText = `${term.padEnd(termWidth + itemSeparatorWidth)}${description}`;
return helper.wrap(fullText, helpWidth - itemIndentWidth, termWidth + itemSeparatorWidth);
}
return term;
}
function formatList(textArray) {
return textArray.join('\n').replace(/^/gm, ' '.repeat(itemIndentWidth));
}
// Usage
let output = [`Usage: ${helper.commandUsage(cmd)}`, ''];
// Description
const commandDescription = helper.commandDescription(cmd);
if (commandDescription.length > 0) {
output = output.concat([helper.wrap(commandDescription, helpWidth, 0), '']);
}
// Arguments
const argumentList = helper.visibleArguments(cmd).map((argument) => {
return formatItem(helper.argumentTerm(argument), helper.argumentDescription(argument));
});
if (argumentList.length > 0) {
output = output.concat(['Arguments:', formatList(argumentList), '']);
}
// Options
const optionList = helper.visibleOptions(cmd).map((option) => {
return formatItem(helper.optionTerm(option), helper.optionDescription(option));
});
if (optionList.length > 0) {
output = output.concat(['Options:', formatList(optionList), '']);
}
if (this.showGlobalOptions) {
const globalOptionList = helper.visibleGlobalOptions(cmd).map((option) => {
return formatItem(helper.optionTerm(option), helper.optionDescription(option));
});
if (globalOptionList.length > 0) {
output = output.concat(['Global Options:', formatList(globalOptionList), '']);
}
}
// Commands
const commandList = helper.visibleCommands(cmd).map((cmd) => {
return formatItem(helper.subcommandTerm(cmd), helper.subcommandDescription(cmd));
});
if (commandList.length > 0) {
output = output.concat(['Commands:', formatList(commandList), '']);
}
return output.join('\n');
}
/**
* Calculate the pad width from the maximum term length.
*
* @param {Command} cmd
* @param {Help} helper
* @returns {number}
*/
padWidth(cmd, helper) {
return Math.max(
helper.longestOptionTermLength(cmd, helper),
helper.longestGlobalOptionTermLength(cmd, helper),
helper.longestSubcommandTermLength(cmd, helper),
helper.longestArgumentTermLength(cmd, helper)
);
}
/**
* Wrap the given string to width characters per line, with lines after the first indented.
* Do not wrap if insufficient room for wrapping (minColumnWidth), or string is manually formatted.
*
* @param {string} str
* @param {number} width
* @param {number} indent
* @param {number} [minColumnWidth=40]
* @return {string}
*
*/
wrap(str, width, indent, minColumnWidth = 40) {
// Full \s characters, minus the linefeeds.
const indents = ' \\f\\t\\v\u00a0\u1680\u2000-\u200a\u202f\u205f\u3000\ufeff';
// Detect manually wrapped and indented strings by searching for line break followed by spaces.
const manualIndent = new RegExp(`[\\n][${indents}]+`);
if (str.match(manualIndent)) return str;
// Do not wrap if not enough room for a wrapped column of text (as could end up with a word per line).
const columnWidth = width - indent;
if (columnWidth < minColumnWidth) return str;
const leadingStr = str.slice(0, indent);
const columnText = str.slice(indent).replace('\r\n', '\n');
const indentString = ' '.repeat(indent);
const zeroWidthSpace = '\u200B';
const breaks = `\\s${zeroWidthSpace}`;
// Match line end (so empty lines don't collapse),
// or as much text as will fit in column, or excess text up to first break.
const regex = new RegExp(`\n|.{1,${columnWidth - 1}}([${breaks}]|$)|[^${breaks}]+?([${breaks}]|$)`, 'g');
const lines = columnText.match(regex) || [];
return leadingStr + lines.map((line, i) => {
if (line === '\n') return ''; // preserve empty lines
return ((i > 0) ? indentString : '') + line.trimEnd();
}).join('\n');
}
}
exports.Help = Help;

329
node_modules/commander/lib/option.js generated vendored Normal file
View File

@ -0,0 +1,329 @@
const { InvalidArgumentError } = require('./error.js');
class Option {
/**
* Initialize a new `Option` with the given `flags` and `description`.
*
* @param {string} flags
* @param {string} [description]
*/
constructor(flags, description) {
this.flags = flags;
this.description = description || '';
this.required = flags.includes('<'); // A value must be supplied when the option is specified.
this.optional = flags.includes('['); // A value is optional when the option is specified.
// variadic test ignores <value,...> et al which might be used to describe custom splitting of single argument
this.variadic = /\w\.\.\.[>\]]$/.test(flags); // The option can take multiple values.
this.mandatory = false; // The option must have a value after parsing, which usually means it must be specified on command line.
const optionFlags = splitOptionFlags(flags);
this.short = optionFlags.shortFlag;
this.long = optionFlags.longFlag;
this.negate = false;
if (this.long) {
this.negate = this.long.startsWith('--no-');
}
this.defaultValue = undefined;
this.defaultValueDescription = undefined;
this.presetArg = undefined;
this.envVar = undefined;
this.parseArg = undefined;
this.hidden = false;
this.argChoices = undefined;
this.conflictsWith = [];
this.implied = undefined;
}
/**
* Set the default value, and optionally supply the description to be displayed in the help.
*
* @param {*} value
* @param {string} [description]
* @return {Option}
*/
default(value, description) {
this.defaultValue = value;
this.defaultValueDescription = description;
return this;
}
/**
* Preset to use when option used without option-argument, especially optional but also boolean and negated.
* The custom processing (parseArg) is called.
*
* @example
* new Option('--color').default('GREYSCALE').preset('RGB');
* new Option('--donate [amount]').preset('20').argParser(parseFloat);
*
* @param {*} arg
* @return {Option}
*/
preset(arg) {
this.presetArg = arg;
return this;
}
/**
* Add option name(s) that conflict with this option.
* An error will be displayed if conflicting options are found during parsing.
*
* @example
* new Option('--rgb').conflicts('cmyk');
* new Option('--js').conflicts(['ts', 'jsx']);
*
* @param {string | string[]} names
* @return {Option}
*/
conflicts(names) {
this.conflictsWith = this.conflictsWith.concat(names);
return this;
}
/**
* Specify implied option values for when this option is set and the implied options are not.
*
* The custom processing (parseArg) is not called on the implied values.
*
* @example
* program
* .addOption(new Option('--log', 'write logging information to file'))
* .addOption(new Option('--trace', 'log extra details').implies({ log: 'trace.txt' }));
*
* @param {Object} impliedOptionValues
* @return {Option}
*/
implies(impliedOptionValues) {
let newImplied = impliedOptionValues;
if (typeof impliedOptionValues === 'string') {
// string is not documented, but easy mistake and we can do what user probably intended.
newImplied = { [impliedOptionValues]: true };
}
this.implied = Object.assign(this.implied || {}, newImplied);
return this;
}
/**
* Set environment variable to check for option value.
*
* An environment variable is only used if when processed the current option value is
* undefined, or the source of the current value is 'default' or 'config' or 'env'.
*
* @param {string} name
* @return {Option}
*/
env(name) {
this.envVar = name;
return this;
}
/**
* Set the custom handler for processing CLI option arguments into option values.
*
* @param {Function} [fn]
* @return {Option}
*/
argParser(fn) {
this.parseArg = fn;
return this;
}
/**
* Whether the option is mandatory and must have a value after parsing.
*
* @param {boolean} [mandatory=true]
* @return {Option}
*/
makeOptionMandatory(mandatory = true) {
this.mandatory = !!mandatory;
return this;
}
/**
* Hide option in help.
*
* @param {boolean} [hide=true]
* @return {Option}
*/
hideHelp(hide = true) {
this.hidden = !!hide;
return this;
}
/**
* @api private
*/
_concatValue(value, previous) {
if (previous === this.defaultValue || !Array.isArray(previous)) {
return [value];
}
return previous.concat(value);
}
/**
* Only allow option value to be one of choices.
*
* @param {string[]} values
* @return {Option}
*/
choices(values) {
this.argChoices = values.slice();
this.parseArg = (arg, previous) => {
if (!this.argChoices.includes(arg)) {
throw new InvalidArgumentError(`Allowed choices are ${this.argChoices.join(', ')}.`);
}
if (this.variadic) {
return this._concatValue(arg, previous);
}
return arg;
};
return this;
}
/**
* Return option name.
*
* @return {string}
*/
name() {
if (this.long) {
return this.long.replace(/^--/, '');
}
return this.short.replace(/^-/, '');
}
/**
* Return option name, in a camelcase format that can be used
* as a object attribute key.
*
* @return {string}
* @api private
*/
attributeName() {
return camelcase(this.name().replace(/^no-/, ''));
}
/**
* Check if `arg` matches the short or long flag.
*
* @param {string} arg
* @return {boolean}
* @api private
*/
is(arg) {
return this.short === arg || this.long === arg;
}
/**
* Return whether a boolean option.
*
* Options are one of boolean, negated, required argument, or optional argument.
*
* @return {boolean}
* @api private
*/
isBoolean() {
return !this.required && !this.optional && !this.negate;
}
}
/**
* This class is to make it easier to work with dual options, without changing the existing
* implementation. We support separate dual options for separate positive and negative options,
* like `--build` and `--no-build`, which share a single option value. This works nicely for some
* use cases, but is tricky for others where we want separate behaviours despite
* the single shared option value.
*/
class DualOptions {
/**
* @param {Option[]} options
*/
constructor(options) {
this.positiveOptions = new Map();
this.negativeOptions = new Map();
this.dualOptions = new Set();
options.forEach(option => {
if (option.negate) {
this.negativeOptions.set(option.attributeName(), option);
} else {
this.positiveOptions.set(option.attributeName(), option);
}
});
this.negativeOptions.forEach((value, key) => {
if (this.positiveOptions.has(key)) {
this.dualOptions.add(key);
}
});
}
/**
* Did the value come from the option, and not from possible matching dual option?
*
* @param {*} value
* @param {Option} option
* @returns {boolean}
*/
valueFromOption(value, option) {
const optionKey = option.attributeName();
if (!this.dualOptions.has(optionKey)) return true;
// Use the value to deduce if (probably) came from the option.
const preset = this.negativeOptions.get(optionKey).presetArg;
const negativeValue = (preset !== undefined) ? preset : false;
return option.negate === (negativeValue === value);
}
}
/**
* Convert string from kebab-case to camelCase.
*
* @param {string} str
* @return {string}
* @api private
*/
function camelcase(str) {
return str.split('-').reduce((str, word) => {
return str + word[0].toUpperCase() + word.slice(1);
});
}
/**
* Split the short and long flag out of something like '-m,--mixed <value>'
*
* @api private
*/
function splitOptionFlags(flags) {
let shortFlag;
let longFlag;
// Use original very loose parsing to maintain backwards compatibility for now,
// which allowed for example unintended `-sw, --short-word` [sic].
const flagParts = flags.split(/[ |,]+/);
if (flagParts.length > 1 && !/^[[<]/.test(flagParts[1])) shortFlag = flagParts.shift();
longFlag = flagParts.shift();
// Add support for lone short flag without significantly changing parsing!
if (!shortFlag && /^-[^-]$/.test(longFlag)) {
shortFlag = longFlag;
longFlag = undefined;
}
return { shortFlag, longFlag };
}
exports.Option = Option;
exports.splitOptionFlags = splitOptionFlags;
exports.DualOptions = DualOptions;

100
node_modules/commander/lib/suggestSimilar.js generated vendored Normal file
View File

@ -0,0 +1,100 @@
const maxDistance = 3;
function editDistance(a, b) {
// https://en.wikipedia.org/wiki/DamerauLevenshtein_distance
// Calculating optimal string alignment distance, no substring is edited more than once.
// (Simple implementation.)
// Quick early exit, return worst case.
if (Math.abs(a.length - b.length) > maxDistance) return Math.max(a.length, b.length);
// distance between prefix substrings of a and b
const d = [];
// pure deletions turn a into empty string
for (let i = 0; i <= a.length; i++) {
d[i] = [i];
}
// pure insertions turn empty string into b
for (let j = 0; j <= b.length; j++) {
d[0][j] = j;
}
// fill matrix
for (let j = 1; j <= b.length; j++) {
for (let i = 1; i <= a.length; i++) {
let cost = 1;
if (a[i - 1] === b[j - 1]) {
cost = 0;
} else {
cost = 1;
}
d[i][j] = Math.min(
d[i - 1][j] + 1, // deletion
d[i][j - 1] + 1, // insertion
d[i - 1][j - 1] + cost // substitution
);
// transposition
if (i > 1 && j > 1 && a[i - 1] === b[j - 2] && a[i - 2] === b[j - 1]) {
d[i][j] = Math.min(d[i][j], d[i - 2][j - 2] + 1);
}
}
}
return d[a.length][b.length];
}
/**
* Find close matches, restricted to same number of edits.
*
* @param {string} word
* @param {string[]} candidates
* @returns {string}
*/
function suggestSimilar(word, candidates) {
if (!candidates || candidates.length === 0) return '';
// remove possible duplicates
candidates = Array.from(new Set(candidates));
const searchingOptions = word.startsWith('--');
if (searchingOptions) {
word = word.slice(2);
candidates = candidates.map(candidate => candidate.slice(2));
}
let similar = [];
let bestDistance = maxDistance;
const minSimilarity = 0.4;
candidates.forEach((candidate) => {
if (candidate.length <= 1) return; // no one character guesses
const distance = editDistance(word, candidate);
const length = Math.max(word.length, candidate.length);
const similarity = (length - distance) / length;
if (similarity > minSimilarity) {
if (distance < bestDistance) {
// better edit distance, throw away previous worse matches
bestDistance = distance;
similar = [candidate];
} else if (distance === bestDistance) {
similar.push(candidate);
}
}
});
similar.sort((a, b) => a.localeCompare(b));
if (searchingOptions) {
similar = similar.map(candidate => `--${candidate}`);
}
if (similar.length > 1) {
return `\n(Did you mean one of ${similar.join(', ')}?)`;
}
if (similar.length === 1) {
return `\n(Did you mean ${similar[0]}?)`;
}
return '';
}
exports.suggestSimilar = suggestSimilar;

16
node_modules/commander/package-support.json generated vendored Normal file
View File

@ -0,0 +1,16 @@
{
"versions": [
{
"version": "*",
"target": {
"node": "supported"
},
"response": {
"type": "time-permitting"
},
"backing": {
"npm-funding": true
}
}
]
}

80
node_modules/commander/package.json generated vendored Normal file
View File

@ -0,0 +1,80 @@
{
"name": "commander",
"version": "11.1.0",
"description": "the complete solution for node.js command-line programs",
"keywords": [
"commander",
"command",
"option",
"parser",
"cli",
"argument",
"args",
"argv"
],
"author": "TJ Holowaychuk <tj@vision-media.ca>",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/tj/commander.js.git"
},
"scripts": {
"lint": "npm run lint:javascript && npm run lint:typescript",
"lint:javascript": "eslint index.js esm.mjs \"lib/*.js\" \"tests/**/*.js\"",
"lint:typescript": "eslint typings/*.ts tests/*.ts",
"test": "jest && npm run typecheck-ts",
"test-esm": "node ./tests/esm-imports-test.mjs",
"typecheck-ts": "tsd && tsc -p tsconfig.ts.json",
"typecheck-js": "tsc -p tsconfig.js.json",
"test-all": "npm run test && npm run lint && npm run typecheck-js && npm run test-esm"
},
"files": [
"index.js",
"lib/*.js",
"esm.mjs",
"typings/index.d.ts",
"typings/esm.d.mts",
"package-support.json"
],
"type": "commonjs",
"main": "./index.js",
"exports": {
".": {
"require": {
"types": "./typings/index.d.ts",
"default": "./index.js"
},
"import": {
"types": "./typings/esm.d.mts",
"default": "./esm.mjs"
},
"default": "./index.js"
},
"./esm.mjs": {
"types": "./typings/esm.d.mts",
"import": "./esm.mjs"
}
},
"devDependencies": {
"@types/jest": "^29.2.4",
"@types/node": "^20.2.5",
"@typescript-eslint/eslint-plugin": "^5.47.1",
"@typescript-eslint/parser": "^5.47.1",
"eslint": "^8.30.0",
"eslint-config-standard": "^17.0.0",
"eslint-config-standard-with-typescript": "^33.0.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-jest": "^27.1.7",
"eslint-plugin-n": "^15.6.0",
"eslint-plugin-promise": "^6.1.1",
"jest": "^29.3.1",
"ts-jest": "^29.0.3",
"tsd": "^0.28.1",
"typescript": "^5.0.4"
},
"types": "typings/index.d.ts",
"engines": {
"node": ">=16"
},
"support": true
}

3
node_modules/commander/typings/esm.d.mts generated vendored Normal file
View File

@ -0,0 +1,3 @@
// Just reexport the types from cjs
// This is a bit indirect. There is not an index.js, but TypeScript will look for index.d.ts for types.
export * from './index.js';

884
node_modules/commander/typings/index.d.ts generated vendored Normal file
View File

@ -0,0 +1,884 @@
// Type definitions for commander
// Original definitions by: Alan Agius <https://github.com/alan-agius4>, Marcelo Dezem <https://github.com/mdezem>, vvakame <https://github.com/vvakame>, Jules Randolph <https://github.com/sveinburne>
// Using method rather than property for method-signature-style, to document method overloads separately. Allow either.
/* eslint-disable @typescript-eslint/method-signature-style */
/* eslint-disable @typescript-eslint/no-explicit-any */
// This is a trick to encourage editor to suggest the known literals while still
// allowing any BaseType value.
// References:
// - https://github.com/microsoft/TypeScript/issues/29729
// - https://github.com/sindresorhus/type-fest/blob/main/source/literal-union.d.ts
// - https://github.com/sindresorhus/type-fest/blob/main/source/primitive.d.ts
type LiteralUnion<LiteralType, BaseType extends string | number> = LiteralType | (BaseType & Record<never, never>);
export class CommanderError extends Error {
code: string;
exitCode: number;
message: string;
nestedError?: string;
/**
* Constructs the CommanderError class
* @param exitCode - suggested exit code which could be used with process.exit
* @param code - an id string representing the error
* @param message - human-readable description of the error
* @constructor
*/
constructor(exitCode: number, code: string, message: string);
}
export class InvalidArgumentError extends CommanderError {
/**
* Constructs the InvalidArgumentError class
* @param message - explanation of why argument is invalid
* @constructor
*/
constructor(message: string);
}
export { InvalidArgumentError as InvalidOptionArgumentError }; // deprecated old name
export interface ErrorOptions { // optional parameter for error()
/** an id string representing the error */
code?: string;
/** suggested exit code which could be used with process.exit */
exitCode?: number;
}
export class Argument {
description: string;
required: boolean;
variadic: boolean;
defaultValue?: any;
defaultValueDescription?: string;
argChoices?: string[];
/**
* Initialize a new command argument with the given name and description.
* The default is that the argument is required, and you can explicitly
* indicate this with <> around the name. Put [] around the name for an optional argument.
*/
constructor(arg: string, description?: string);
/**
* Return argument name.
*/
name(): string;
/**
* Set the default value, and optionally supply the description to be displayed in the help.
*/
default(value: unknown, description?: string): this;
/**
* Set the custom handler for processing CLI command arguments into argument values.
*/
argParser<T>(fn: (value: string, previous: T) => T): this;
/**
* Only allow argument value to be one of choices.
*/
choices(values: readonly string[]): this;
/**
* Make argument required.
*/
argRequired(): this;
/**
* Make argument optional.
*/
argOptional(): this;
}
export class Option {
flags: string;
description: string;
required: boolean; // A value must be supplied when the option is specified.
optional: boolean; // A value is optional when the option is specified.
variadic: boolean;
mandatory: boolean; // The option must have a value after parsing, which usually means it must be specified on command line.
short?: string;
long?: string;
negate: boolean;
defaultValue?: any;
defaultValueDescription?: string;
presetArg?: unknown;
envVar?: string;
parseArg?: <T>(value: string, previous: T) => T;
hidden: boolean;
argChoices?: string[];
constructor(flags: string, description?: string);
/**
* Set the default value, and optionally supply the description to be displayed in the help.
*/
default(value: unknown, description?: string): this;
/**
* Preset to use when option used without option-argument, especially optional but also boolean and negated.
* The custom processing (parseArg) is called.
*
* @example
* ```ts
* new Option('--color').default('GREYSCALE').preset('RGB');
* new Option('--donate [amount]').preset('20').argParser(parseFloat);
* ```
*/
preset(arg: unknown): this;
/**
* Add option name(s) that conflict with this option.
* An error will be displayed if conflicting options are found during parsing.
*
* @example
* ```ts
* new Option('--rgb').conflicts('cmyk');
* new Option('--js').conflicts(['ts', 'jsx']);
* ```
*/
conflicts(names: string | string[]): this;
/**
* Specify implied option values for when this option is set and the implied options are not.
*
* The custom processing (parseArg) is not called on the implied values.
*
* @example
* program
* .addOption(new Option('--log', 'write logging information to file'))
* .addOption(new Option('--trace', 'log extra details').implies({ log: 'trace.txt' }));
*/
implies(optionValues: OptionValues): this;
/**
* Set environment variable to check for option value.
*
* An environment variables is only used if when processed the current option value is
* undefined, or the source of the current value is 'default' or 'config' or 'env'.
*/
env(name: string): this;
/**
* Calculate the full description, including defaultValue etc.
*/
fullDescription(): string;
/**
* Set the custom handler for processing CLI option arguments into option values.
*/
argParser<T>(fn: (value: string, previous: T) => T): this;
/**
* Whether the option is mandatory and must have a value after parsing.
*/
makeOptionMandatory(mandatory?: boolean): this;
/**
* Hide option in help.
*/
hideHelp(hide?: boolean): this;
/**
* Only allow option value to be one of choices.
*/
choices(values: readonly string[]): this;
/**
* Return option name.
*/
name(): string;
/**
* Return option name, in a camelcase format that can be used
* as a object attribute key.
*/
attributeName(): string;
/**
* Return whether a boolean option.
*
* Options are one of boolean, negated, required argument, or optional argument.
*/
isBoolean(): boolean;
}
export class Help {
/** output helpWidth, long lines are wrapped to fit */
helpWidth?: number;
sortSubcommands: boolean;
sortOptions: boolean;
showGlobalOptions: boolean;
constructor();
/** Get the command term to show in the list of subcommands. */
subcommandTerm(cmd: Command): string;
/** Get the command summary to show in the list of subcommands. */
subcommandDescription(cmd: Command): string;
/** Get the option term to show in the list of options. */
optionTerm(option: Option): string;
/** Get the option description to show in the list of options. */
optionDescription(option: Option): string;
/** Get the argument term to show in the list of arguments. */
argumentTerm(argument: Argument): string;
/** Get the argument description to show in the list of arguments. */
argumentDescription(argument: Argument): string;
/** Get the command usage to be displayed at the top of the built-in help. */
commandUsage(cmd: Command): string;
/** Get the description for the command. */
commandDescription(cmd: Command): string;
/** Get an array of the visible subcommands. Includes a placeholder for the implicit help command, if there is one. */
visibleCommands(cmd: Command): Command[];
/** Get an array of the visible options. Includes a placeholder for the implicit help option, if there is one. */
visibleOptions(cmd: Command): Option[];
/** Get an array of the visible global options. (Not including help.) */
visibleGlobalOptions(cmd: Command): Option[];
/** Get an array of the arguments which have descriptions. */
visibleArguments(cmd: Command): Argument[];
/** Get the longest command term length. */
longestSubcommandTermLength(cmd: Command, helper: Help): number;
/** Get the longest option term length. */
longestOptionTermLength(cmd: Command, helper: Help): number;
/** Get the longest global option term length. */
longestGlobalOptionTermLength(cmd: Command, helper: Help): number;
/** Get the longest argument term length. */
longestArgumentTermLength(cmd: Command, helper: Help): number;
/** Calculate the pad width from the maximum term length. */
padWidth(cmd: Command, helper: Help): number;
/**
* Wrap the given string to width characters per line, with lines after the first indented.
* Do not wrap if insufficient room for wrapping (minColumnWidth), or string is manually formatted.
*/
wrap(str: string, width: number, indent: number, minColumnWidth?: number): string;
/** Generate the built-in help text. */
formatHelp(cmd: Command, helper: Help): string;
}
export type HelpConfiguration = Partial<Help>;
export interface ParseOptions {
from: 'node' | 'electron' | 'user';
}
export interface HelpContext { // optional parameter for .help() and .outputHelp()
error: boolean;
}
export interface AddHelpTextContext { // passed to text function used with .addHelpText()
error: boolean;
command: Command;
}
export interface OutputConfiguration {
writeOut?(str: string): void;
writeErr?(str: string): void;
getOutHelpWidth?(): number;
getErrHelpWidth?(): number;
outputError?(str: string, write: (str: string) => void): void;
}
export type AddHelpTextPosition = 'beforeAll' | 'before' | 'after' | 'afterAll';
export type HookEvent = 'preSubcommand' | 'preAction' | 'postAction';
// The source is a string so author can define their own too.
export type OptionValueSource = LiteralUnion<'default' | 'config' | 'env' | 'cli' | 'implied', string> | undefined;
export type OptionValues = Record<string, any>;
export class Command {
args: string[];
processedArgs: any[];
readonly commands: readonly Command[];
readonly options: readonly Option[];
readonly registeredArguments: readonly Argument[];
parent: Command | null;
constructor(name?: string);
/**
* Set the program version to `str`.
*
* This method auto-registers the "-V, --version" flag
* which will print the version number when passed.
*
* You can optionally supply the flags and description to override the defaults.
*/
version(str: string, flags?: string, description?: string): this;
/**
* Get the program version.
*/
version(): string | undefined;
/**
* Define a command, implemented using an action handler.
*
* @remarks
* The command description is supplied using `.description`, not as a parameter to `.command`.
*
* @example
* ```ts
* program
* .command('clone <source> [destination]')
* .description('clone a repository into a newly created directory')
* .action((source, destination) => {
* console.log('clone command called');
* });
* ```
*
* @param nameAndArgs - command name and arguments, args are `<required>` or `[optional]` and last may also be `variadic...`
* @param opts - configuration options
* @returns new command
*/
command(nameAndArgs: string, opts?: CommandOptions): ReturnType<this['createCommand']>;
/**
* Define a command, implemented in a separate executable file.
*
* @remarks
* The command description is supplied as the second parameter to `.command`.
*
* @example
* ```ts
* program
* .command('start <service>', 'start named service')
* .command('stop [service]', 'stop named service, or all if no name supplied');
* ```
*
* @param nameAndArgs - command name and arguments, args are `<required>` or `[optional]` and last may also be `variadic...`
* @param description - description of executable command
* @param opts - configuration options
* @returns `this` command for chaining
*/
command(nameAndArgs: string, description: string, opts?: ExecutableCommandOptions): this;
/**
* Factory routine to create a new unattached command.
*
* See .command() for creating an attached subcommand, which uses this routine to
* create the command. You can override createCommand to customise subcommands.
*/
createCommand(name?: string): Command;
/**
* Add a prepared subcommand.
*
* See .command() for creating an attached subcommand which inherits settings from its parent.
*
* @returns `this` command for chaining
*/
addCommand(cmd: Command, opts?: CommandOptions): this;
/**
* Factory routine to create a new unattached argument.
*
* See .argument() for creating an attached argument, which uses this routine to
* create the argument. You can override createArgument to return a custom argument.
*/
createArgument(name: string, description?: string): Argument;
/**
* Define argument syntax for command.
*
* The default is that the argument is required, and you can explicitly
* indicate this with <> around the name. Put [] around the name for an optional argument.
*
* @example
* ```
* program.argument('<input-file>');
* program.argument('[output-file]');
* ```
*
* @returns `this` command for chaining
*/
argument<T>(flags: string, description: string, fn: (value: string, previous: T) => T, defaultValue?: T): this;
argument(name: string, description?: string, defaultValue?: unknown): this;
/**
* Define argument syntax for command, adding a prepared argument.
*
* @returns `this` command for chaining
*/
addArgument(arg: Argument): this;
/**
* Define argument syntax for command, adding multiple at once (without descriptions).
*
* See also .argument().
*
* @example
* ```
* program.arguments('<cmd> [env]');
* ```
*
* @returns `this` command for chaining
*/
arguments(names: string): this;
/**
* Override default decision whether to add implicit help command.
*
* @example
* ```
* addHelpCommand() // force on
* addHelpCommand(false); // force off
* addHelpCommand('help [cmd]', 'display help for [cmd]'); // force on with custom details
* ```
*
* @returns `this` command for chaining
*/
addHelpCommand(enableOrNameAndArgs?: string | boolean, description?: string): this;
/**
* Add hook for life cycle event.
*/
hook(event: HookEvent, listener: (thisCommand: Command, actionCommand: Command) => void | Promise<void>): this;
/**
* Register callback to use as replacement for calling process.exit.
*/
exitOverride(callback?: (err: CommanderError) => never | void): this;
/**
* Display error message and exit (or call exitOverride).
*/
error(message: string, errorOptions?: ErrorOptions): never;
/**
* You can customise the help with a subclass of Help by overriding createHelp,
* or by overriding Help properties using configureHelp().
*/
createHelp(): Help;
/**
* You can customise the help by overriding Help properties using configureHelp(),
* or with a subclass of Help by overriding createHelp().
*/
configureHelp(configuration: HelpConfiguration): this;
/** Get configuration */
configureHelp(): HelpConfiguration;
/**
* The default output goes to stdout and stderr. You can customise this for special
* applications. You can also customise the display of errors by overriding outputError.
*
* The configuration properties are all functions:
* ```
* // functions to change where being written, stdout and stderr
* writeOut(str)
* writeErr(str)
* // matching functions to specify width for wrapping help
* getOutHelpWidth()
* getErrHelpWidth()
* // functions based on what is being written out
* outputError(str, write) // used for displaying errors, and not used for displaying help
* ```
*/
configureOutput(configuration: OutputConfiguration): this;
/** Get configuration */
configureOutput(): OutputConfiguration;
/**
* Copy settings that are useful to have in common across root command and subcommands.
*
* (Used internally when adding a command using `.command()` so subcommands inherit parent settings.)
*/
copyInheritedSettings(sourceCommand: Command): this;
/**
* Display the help or a custom message after an error occurs.
*/
showHelpAfterError(displayHelp?: boolean | string): this;
/**
* Display suggestion of similar commands for unknown commands, or options for unknown options.
*/
showSuggestionAfterError(displaySuggestion?: boolean): this;
/**
* Register callback `fn` for the command.
*
* @example
* ```
* program
* .command('serve')
* .description('start service')
* .action(function() {
* // do work here
* });
* ```
*
* @returns `this` command for chaining
*/
action(fn: (...args: any[]) => void | Promise<void>): this;
/**
* Define option with `flags`, `description`, and optional argument parsing function or `defaultValue` or both.
*
* The `flags` string contains the short and/or long flags, separated by comma, a pipe or space. A required
* option-argument is indicated by `<>` and an optional option-argument by `[]`.
*
* See the README for more details, and see also addOption() and requiredOption().
*
* @example
*
* ```js
* program
* .option('-p, --pepper', 'add pepper')
* .option('-p, --pizza-type <TYPE>', 'type of pizza') // required option-argument
* .option('-c, --cheese [CHEESE]', 'add extra cheese', 'mozzarella') // optional option-argument with default
* .option('-t, --tip <VALUE>', 'add tip to purchase cost', parseFloat) // custom parse function
* ```
*
* @returns `this` command for chaining
*/
option(flags: string, description?: string, defaultValue?: string | boolean | string[]): this;
option<T>(flags: string, description: string, parseArg: (value: string, previous: T) => T, defaultValue?: T): this;
/** @deprecated since v7, instead use choices or a custom function */
option(flags: string, description: string, regexp: RegExp, defaultValue?: string | boolean | string[]): this;
/**
* Define a required option, which must have a value after parsing. This usually means
* the option must be specified on the command line. (Otherwise the same as .option().)
*
* The `flags` string contains the short and/or long flags, separated by comma, a pipe or space.
*/
requiredOption(flags: string, description?: string, defaultValue?: string | boolean | string[]): this;
requiredOption<T>(flags: string, description: string, parseArg: (value: string, previous: T) => T, defaultValue?: T): this;
/** @deprecated since v7, instead use choices or a custom function */
requiredOption(flags: string, description: string, regexp: RegExp, defaultValue?: string | boolean | string[]): this;
/**
* Factory routine to create a new unattached option.
*
* See .option() for creating an attached option, which uses this routine to
* create the option. You can override createOption to return a custom option.
*/
createOption(flags: string, description?: string): Option;
/**
* Add a prepared Option.
*
* See .option() and .requiredOption() for creating and attaching an option in a single call.
*/
addOption(option: Option): this;
/**
* Whether to store option values as properties on command object,
* or store separately (specify false). In both cases the option values can be accessed using .opts().
*
* @returns `this` command for chaining
*/
storeOptionsAsProperties<T extends OptionValues>(): this & T;
storeOptionsAsProperties<T extends OptionValues>(storeAsProperties: true): this & T;
storeOptionsAsProperties(storeAsProperties?: boolean): this;
/**
* Retrieve option value.
*/
getOptionValue(key: string): any;
/**
* Store option value.
*/
setOptionValue(key: string, value: unknown): this;
/**
* Store option value and where the value came from.
*/
setOptionValueWithSource(key: string, value: unknown, source: OptionValueSource): this;
/**
* Get source of option value.
*/
getOptionValueSource(key: string): OptionValueSource | undefined;
/**
* Get source of option value. See also .optsWithGlobals().
*/
getOptionValueSourceWithGlobals(key: string): OptionValueSource | undefined;
/**
* Alter parsing of short flags with optional values.
*
* @example
* ```
* // for `.option('-f,--flag [value]'):
* .combineFlagAndOptionalValue(true) // `-f80` is treated like `--flag=80`, this is the default behaviour
* .combineFlagAndOptionalValue(false) // `-fb` is treated like `-f -b`
* ```
*
* @returns `this` command for chaining
*/
combineFlagAndOptionalValue(combine?: boolean): this;
/**
* Allow unknown options on the command line.
*
* @returns `this` command for chaining
*/
allowUnknownOption(allowUnknown?: boolean): this;
/**
* Allow excess command-arguments on the command line. Pass false to make excess arguments an error.
*
* @returns `this` command for chaining
*/
allowExcessArguments(allowExcess?: boolean): this;
/**
* Enable positional options. Positional means global options are specified before subcommands which lets
* subcommands reuse the same option names, and also enables subcommands to turn on passThroughOptions.
*
* The default behaviour is non-positional and global options may appear anywhere on the command line.
*
* @returns `this` command for chaining
*/
enablePositionalOptions(positional?: boolean): this;
/**
* Pass through options that come after command-arguments rather than treat them as command-options,
* so actual command-options come before command-arguments. Turning this on for a subcommand requires
* positional options to have been enabled on the program (parent commands).
*
* The default behaviour is non-positional and options may appear before or after command-arguments.
*
* @returns `this` command for chaining
*/
passThroughOptions(passThrough?: boolean): this;
/**
* Parse `argv`, setting options and invoking commands when defined.
*
* The default expectation is that the arguments are from node and have the application as argv[0]
* and the script being run in argv[1], with user parameters after that.
*
* @example
* ```
* program.parse(process.argv);
* program.parse(); // implicitly use process.argv and auto-detect node vs electron conventions
* program.parse(my-args, { from: 'user' }); // just user supplied arguments, nothing special about argv[0]
* ```
*
* @returns `this` command for chaining
*/
parse(argv?: readonly string[], options?: ParseOptions): this;
/**
* Parse `argv`, setting options and invoking commands when defined.
*
* Use parseAsync instead of parse if any of your action handlers are async. Returns a Promise.
*
* The default expectation is that the arguments are from node and have the application as argv[0]
* and the script being run in argv[1], with user parameters after that.
*
* @example
* ```
* program.parseAsync(process.argv);
* program.parseAsync(); // implicitly use process.argv and auto-detect node vs electron conventions
* program.parseAsync(my-args, { from: 'user' }); // just user supplied arguments, nothing special about argv[0]
* ```
*
* @returns Promise
*/
parseAsync(argv?: readonly string[], options?: ParseOptions): Promise<this>;
/**
* Parse options from `argv` removing known options,
* and return argv split into operands and unknown arguments.
*
* argv => operands, unknown
* --known kkk op => [op], []
* op --known kkk => [op], []
* sub --unknown uuu op => [sub], [--unknown uuu op]
* sub -- --unknown uuu op => [sub --unknown uuu op], []
*/
parseOptions(argv: string[]): ParseOptionsResult;
/**
* Return an object containing local option values as key-value pairs
*/
opts<T extends OptionValues>(): T;
/**
* Return an object containing merged local and global option values as key-value pairs.
*/
optsWithGlobals<T extends OptionValues>(): T;
/**
* Set the description.
*
* @returns `this` command for chaining
*/
description(str: string): this;
/** @deprecated since v8, instead use .argument to add command argument with description */
description(str: string, argsDescription: Record<string, string>): this;
/**
* Get the description.
*/
description(): string;
/**
* Set the summary. Used when listed as subcommand of parent.
*
* @returns `this` command for chaining
*/
summary(str: string): this;
/**
* Get the summary.
*/
summary(): string;
/**
* Set an alias for the command.
*
* You may call more than once to add multiple aliases. Only the first alias is shown in the auto-generated help.
*
* @returns `this` command for chaining
*/
alias(alias: string): this;
/**
* Get alias for the command.
*/
alias(): string;
/**
* Set aliases for the command.
*
* Only the first alias is shown in the auto-generated help.
*
* @returns `this` command for chaining
*/
aliases(aliases: readonly string[]): this;
/**
* Get aliases for the command.
*/
aliases(): string[];
/**
* Set the command usage.
*
* @returns `this` command for chaining
*/
usage(str: string): this;
/**
* Get the command usage.
*/
usage(): string;
/**
* Set the name of the command.
*
* @returns `this` command for chaining
*/
name(str: string): this;
/**
* Get the name of the command.
*/
name(): string;
/**
* Set the name of the command from script filename, such as process.argv[1],
* or require.main.filename, or __filename.
*
* (Used internally and public although not documented in README.)
*
* @example
* ```ts
* program.nameFromFilename(require.main.filename);
* ```
*
* @returns `this` command for chaining
*/
nameFromFilename(filename: string): this;
/**
* Set the directory for searching for executable subcommands of this command.
*
* @example
* ```ts
* program.executableDir(__dirname);
* // or
* program.executableDir('subcommands');
* ```
*
* @returns `this` command for chaining
*/
executableDir(path: string): this;
/**
* Get the executable search directory.
*/
executableDir(): string | null;
/**
* Output help information for this command.
*
* Outputs built-in help, and custom text added using `.addHelpText()`.
*
*/
outputHelp(context?: HelpContext): void;
/** @deprecated since v7 */
outputHelp(cb?: (str: string) => string): void;
/**
* Return command help documentation.
*/
helpInformation(context?: HelpContext): string;
/**
* You can pass in flags and a description to override the help
* flags and help description for your command. Pass in false
* to disable the built-in help option.
*/
helpOption(flags?: string | boolean, description?: string): this;
/**
* Output help information and exit.
*
* Outputs built-in help, and custom text added using `.addHelpText()`.
*/
help(context?: HelpContext): never;
/** @deprecated since v7 */
help(cb?: (str: string) => string): never;
/**
* Add additional text to be displayed with the built-in help.
*
* Position is 'before' or 'after' to affect just this command,
* and 'beforeAll' or 'afterAll' to affect this command and all its subcommands.
*/
addHelpText(position: AddHelpTextPosition, text: string): this;
addHelpText(position: AddHelpTextPosition, text: (context: AddHelpTextContext) => string): this;
/**
* Add a listener (callback) for when events occur. (Implemented using EventEmitter.)
*/
on(event: string | symbol, listener: (...args: any[]) => void): this;
}
export interface CommandOptions {
hidden?: boolean;
isDefault?: boolean;
/** @deprecated since v7, replaced by hidden */
noHelp?: boolean;
}
export interface ExecutableCommandOptions extends CommandOptions {
executableFile?: string;
}
export interface ParseOptionsResult {
operands: string[];
unknown: string[];
}
export function createCommand(name?: string): Command;
export function createOption(flags: string, description?: string): Option;
export function createArgument(name: string, description?: string): Argument;
export const program: Command;

15
node_modules/fs-extra/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
(The MIT License)
Copyright (c) 2011-2024 JP Richardson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

292
node_modules/fs-extra/README.md generated vendored Normal file
View File

@ -0,0 +1,292 @@
Node.js: fs-extra
=================
`fs-extra` adds file system methods that aren't included in the native `fs` module and adds promise support to the `fs` methods. It also uses [`graceful-fs`](https://github.com/isaacs/node-graceful-fs) to prevent `EMFILE` errors. It should be a drop in replacement for `fs`.
[![npm Package](https://img.shields.io/npm/v/fs-extra.svg)](https://www.npmjs.org/package/fs-extra)
[![License](https://img.shields.io/npm/l/fs-extra.svg)](https://github.com/jprichardson/node-fs-extra/blob/master/LICENSE)
[![build status](https://img.shields.io/github/actions/workflow/status/jprichardson/node-fs-extra/ci.yml?branch=master)](https://github.com/jprichardson/node-fs-extra/actions/workflows/ci.yml?query=branch%3Amaster)
[![downloads per month](http://img.shields.io/npm/dm/fs-extra.svg)](https://www.npmjs.org/package/fs-extra)
[![JavaScript Style Guide](https://img.shields.io/badge/code_style-standard-brightgreen.svg)](https://standardjs.com)
Why?
----
I got tired of including `mkdirp`, `rimraf`, and `ncp` in most of my projects.
Installation
------------
npm install fs-extra
Usage
-----
### CommonJS
`fs-extra` is a drop in replacement for native `fs`. All methods in `fs` are attached to `fs-extra`. All `fs` methods return promises if the callback isn't passed.
You don't ever need to include the original `fs` module again:
```js
const fs = require('fs') // this is no longer necessary
```
you can now do this:
```js
const fs = require('fs-extra')
```
or if you prefer to make it clear that you're using `fs-extra` and not `fs`, you may want
to name your `fs` variable `fse` like so:
```js
const fse = require('fs-extra')
```
you can also keep both, but it's redundant:
```js
const fs = require('fs')
const fse = require('fs-extra')
```
### ESM
There is also an `fs-extra/esm` import, that supports both default and named exports. However, note that `fs` methods are not included in `fs-extra/esm`; you still need to import `fs` and/or `fs/promises` seperately:
```js
import { readFileSync } from 'fs'
import { readFile } from 'fs/promises'
import { outputFile, outputFileSync } from 'fs-extra/esm'
```
Default exports are supported:
```js
import fs from 'fs'
import fse from 'fs-extra/esm'
// fse.readFileSync is not a function; must use fs.readFileSync
```
but you probably want to just use regular `fs-extra` instead of `fs-extra/esm` for default exports:
```js
import fs from 'fs-extra'
// both fs and fs-extra methods are defined
```
Sync vs Async vs Async/Await
-------------
Most methods are async by default. All async methods will return a promise if the callback isn't passed.
Sync methods on the other hand will throw if an error occurs.
Also Async/Await will throw an error if one occurs.
Example:
```js
const fs = require('fs-extra')
// Async with promises:
fs.copy('/tmp/myfile', '/tmp/mynewfile')
.then(() => console.log('success!'))
.catch(err => console.error(err))
// Async with callbacks:
fs.copy('/tmp/myfile', '/tmp/mynewfile', err => {
if (err) return console.error(err)
console.log('success!')
})
// Sync:
try {
fs.copySync('/tmp/myfile', '/tmp/mynewfile')
console.log('success!')
} catch (err) {
console.error(err)
}
// Async/Await:
async function copyFiles () {
try {
await fs.copy('/tmp/myfile', '/tmp/mynewfile')
console.log('success!')
} catch (err) {
console.error(err)
}
}
copyFiles()
```
Methods
-------
### Async
- [copy](docs/copy.md)
- [emptyDir](docs/emptyDir.md)
- [ensureFile](docs/ensureFile.md)
- [ensureDir](docs/ensureDir.md)
- [ensureLink](docs/ensureLink.md)
- [ensureSymlink](docs/ensureSymlink.md)
- [mkdirp](docs/ensureDir.md)
- [mkdirs](docs/ensureDir.md)
- [move](docs/move.md)
- [outputFile](docs/outputFile.md)
- [outputJson](docs/outputJson.md)
- [pathExists](docs/pathExists.md)
- [readJson](docs/readJson.md)
- [remove](docs/remove.md)
- [writeJson](docs/writeJson.md)
### Sync
- [copySync](docs/copy-sync.md)
- [emptyDirSync](docs/emptyDir-sync.md)
- [ensureFileSync](docs/ensureFile-sync.md)
- [ensureDirSync](docs/ensureDir-sync.md)
- [ensureLinkSync](docs/ensureLink-sync.md)
- [ensureSymlinkSync](docs/ensureSymlink-sync.md)
- [mkdirpSync](docs/ensureDir-sync.md)
- [mkdirsSync](docs/ensureDir-sync.md)
- [moveSync](docs/move-sync.md)
- [outputFileSync](docs/outputFile-sync.md)
- [outputJsonSync](docs/outputJson-sync.md)
- [pathExistsSync](docs/pathExists-sync.md)
- [readJsonSync](docs/readJson-sync.md)
- [removeSync](docs/remove-sync.md)
- [writeJsonSync](docs/writeJson-sync.md)
**NOTE:** You can still use the native Node.js methods. They are promisified and copied over to `fs-extra`. See [notes on `fs.read()`, `fs.write()`, & `fs.writev()`](docs/fs-read-write-writev.md)
### What happened to `walk()` and `walkSync()`?
They were removed from `fs-extra` in v2.0.0. If you need the functionality, `walk` and `walkSync` are available as separate packages, [`klaw`](https://github.com/jprichardson/node-klaw) and [`klaw-sync`](https://github.com/manidlou/node-klaw-sync).
Third Party
-----------
### CLI
[fse-cli](https://www.npmjs.com/package/@atao60/fse-cli) allows you to run `fs-extra` from a console or from [npm](https://www.npmjs.com) scripts.
### TypeScript
If you like TypeScript, you can use `fs-extra` with it: https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/fs-extra
### File / Directory Watching
If you want to watch for changes to files or directories, then you should use [chokidar](https://github.com/paulmillr/chokidar).
### Obtain Filesystem (Devices, Partitions) Information
[fs-filesystem](https://github.com/arthurintelligence/node-fs-filesystem) allows you to read the state of the filesystem of the host on which it is run. It returns information about both the devices and the partitions (volumes) of the system.
### Misc.
- [fs-extra-debug](https://github.com/jdxcode/fs-extra-debug) - Send your fs-extra calls to [debug](https://npmjs.org/package/debug).
- [mfs](https://github.com/cadorn/mfs) - Monitor your fs-extra calls.
Hacking on fs-extra
-------------------
Wanna hack on `fs-extra`? Great! Your help is needed! [fs-extra is one of the most depended upon Node.js packages](http://nodei.co/npm/fs-extra.png?downloads=true&downloadRank=true&stars=true). This project
uses [JavaScript Standard Style](https://github.com/feross/standard) - if the name or style choices bother you,
you're gonna have to get over it :) If `standard` is good enough for `npm`, it's good enough for `fs-extra`.
[![js-standard-style](https://cdn.rawgit.com/feross/standard/master/badge.svg)](https://github.com/feross/standard)
What's needed?
- First, take a look at existing issues. Those are probably going to be where the priority lies.
- More tests for edge cases. Specifically on different platforms. There can never be enough tests.
- Improve test coverage.
Note: If you make any big changes, **you should definitely file an issue for discussion first.**
### Running the Test Suite
fs-extra contains hundreds of tests.
- `npm run lint`: runs the linter ([standard](http://standardjs.com/))
- `npm run unit`: runs the unit tests
- `npm run unit-esm`: runs tests for `fs-extra/esm` exports
- `npm test`: runs the linter and all tests
When running unit tests, set the environment variable `CROSS_DEVICE_PATH` to the absolute path of an empty directory on another device (like a thumb drive) to enable cross-device move tests.
### Windows
If you run the tests on the Windows and receive a lot of symbolic link `EPERM` permission errors, it's
because on Windows you need elevated privilege to create symbolic links. You can add this to your Windows's
account by following the instructions here: http://superuser.com/questions/104845/permission-to-make-symbolic-links-in-windows-7
However, I didn't have much luck doing this.
Since I develop on Mac OS X, I use VMWare Fusion for Windows testing. I create a shared folder that I map to a drive on Windows.
I open the `Node.js command prompt` and run as `Administrator`. I then map the network drive running the following command:
net use z: "\\vmware-host\Shared Folders"
I can then navigate to my `fs-extra` directory and run the tests.
Naming
------
I put a lot of thought into the naming of these functions. Inspired by @coolaj86's request. So he deserves much of the credit for raising the issue. See discussion(s) here:
* https://github.com/jprichardson/node-fs-extra/issues/2
* https://github.com/flatiron/utile/issues/11
* https://github.com/ryanmcgrath/wrench-js/issues/29
* https://github.com/substack/node-mkdirp/issues/17
First, I believe that in as many cases as possible, the [Node.js naming schemes](http://nodejs.org/api/fs.html) should be chosen. However, there are problems with the Node.js own naming schemes.
For example, `fs.readFile()` and `fs.readdir()`: the **F** is capitalized in *File* and the **d** is not capitalized in *dir*. Perhaps a bit pedantic, but they should still be consistent. Also, Node.js has chosen a lot of POSIX naming schemes, which I believe is great. See: `fs.mkdir()`, `fs.rmdir()`, `fs.chown()`, etc.
We have a dilemma though. How do you consistently name methods that perform the following POSIX commands: `cp`, `cp -r`, `mkdir -p`, and `rm -rf`?
My perspective: when in doubt, err on the side of simplicity. A directory is just a hierarchical grouping of directories and files. Consider that for a moment. So when you want to copy it or remove it, in most cases you'll want to copy or remove all of its contents. When you want to create a directory, if the directory that it's suppose to be contained in does not exist, then in most cases you'll want to create that too.
So, if you want to remove a file or a directory regardless of whether it has contents, just call `fs.remove(path)`. If you want to copy a file or a directory whether it has contents, just call `fs.copy(source, destination)`. If you want to create a directory regardless of whether its parent directories exist, just call `fs.mkdirs(path)` or `fs.mkdirp(path)`.
Credit
------
`fs-extra` wouldn't be possible without using the modules from the following authors:
- [Isaac Shlueter](https://github.com/isaacs)
- [Charlie McConnel](https://github.com/avianflu)
- [James Halliday](https://github.com/substack)
- [Andrew Kelley](https://github.com/andrewrk)
License
-------
Licensed under MIT
Copyright (c) 2011-2024 [JP Richardson](https://github.com/jprichardson)
[1]: http://nodejs.org/docs/latest/api/fs.html
[jsonfile]: https://github.com/jprichardson/node-jsonfile

171
node_modules/fs-extra/lib/copy/copy-sync.js generated vendored Normal file
View File

@ -0,0 +1,171 @@
'use strict'
const fs = require('graceful-fs')
const path = require('path')
const mkdirsSync = require('../mkdirs').mkdirsSync
const utimesMillisSync = require('../util/utimes').utimesMillisSync
const stat = require('../util/stat')
function copySync (src, dest, opts) {
if (typeof opts === 'function') {
opts = { filter: opts }
}
opts = opts || {}
opts.clobber = 'clobber' in opts ? !!opts.clobber : true // default to true for now
opts.overwrite = 'overwrite' in opts ? !!opts.overwrite : opts.clobber // overwrite falls back to clobber
// Warn about using preserveTimestamps on 32-bit node
if (opts.preserveTimestamps && process.arch === 'ia32') {
process.emitWarning(
'Using the preserveTimestamps option in 32-bit node is not recommended;\n\n' +
'\tsee https://github.com/jprichardson/node-fs-extra/issues/269',
'Warning', 'fs-extra-WARN0002'
)
}
const { srcStat, destStat } = stat.checkPathsSync(src, dest, 'copy', opts)
stat.checkParentPathsSync(src, srcStat, dest, 'copy')
if (opts.filter && !opts.filter(src, dest)) return
const destParent = path.dirname(dest)
if (!fs.existsSync(destParent)) mkdirsSync(destParent)
return getStats(destStat, src, dest, opts)
}
function getStats (destStat, src, dest, opts) {
const statSync = opts.dereference ? fs.statSync : fs.lstatSync
const srcStat = statSync(src)
if (srcStat.isDirectory()) return onDir(srcStat, destStat, src, dest, opts)
else if (srcStat.isFile() ||
srcStat.isCharacterDevice() ||
srcStat.isBlockDevice()) return onFile(srcStat, destStat, src, dest, opts)
else if (srcStat.isSymbolicLink()) return onLink(destStat, src, dest, opts)
else if (srcStat.isSocket()) throw new Error(`Cannot copy a socket file: ${src}`)
else if (srcStat.isFIFO()) throw new Error(`Cannot copy a FIFO pipe: ${src}`)
throw new Error(`Unknown file: ${src}`)
}
function onFile (srcStat, destStat, src, dest, opts) {
if (!destStat) return copyFile(srcStat, src, dest, opts)
return mayCopyFile(srcStat, src, dest, opts)
}
function mayCopyFile (srcStat, src, dest, opts) {
if (opts.overwrite) {
fs.unlinkSync(dest)
return copyFile(srcStat, src, dest, opts)
} else if (opts.errorOnExist) {
throw new Error(`'${dest}' already exists`)
}
}
function copyFile (srcStat, src, dest, opts) {
fs.copyFileSync(src, dest)
if (opts.preserveTimestamps) handleTimestamps(srcStat.mode, src, dest)
return setDestMode(dest, srcStat.mode)
}
function handleTimestamps (srcMode, src, dest) {
// Make sure the file is writable before setting the timestamp
// otherwise open fails with EPERM when invoked with 'r+'
// (through utimes call)
if (fileIsNotWritable(srcMode)) makeFileWritable(dest, srcMode)
return setDestTimestamps(src, dest)
}
function fileIsNotWritable (srcMode) {
return (srcMode & 0o200) === 0
}
function makeFileWritable (dest, srcMode) {
return setDestMode(dest, srcMode | 0o200)
}
function setDestMode (dest, srcMode) {
return fs.chmodSync(dest, srcMode)
}
function setDestTimestamps (src, dest) {
// The initial srcStat.atime cannot be trusted
// because it is modified by the read(2) system call
// (See https://nodejs.org/api/fs.html#fs_stat_time_values)
const updatedSrcStat = fs.statSync(src)
return utimesMillisSync(dest, updatedSrcStat.atime, updatedSrcStat.mtime)
}
function onDir (srcStat, destStat, src, dest, opts) {
if (!destStat) return mkDirAndCopy(srcStat.mode, src, dest, opts)
return copyDir(src, dest, opts)
}
function mkDirAndCopy (srcMode, src, dest, opts) {
fs.mkdirSync(dest)
copyDir(src, dest, opts)
return setDestMode(dest, srcMode)
}
function copyDir (src, dest, opts) {
const dir = fs.opendirSync(src)
try {
let dirent
while ((dirent = dir.readSync()) !== null) {
copyDirItem(dirent.name, src, dest, opts)
}
} finally {
dir.closeSync()
}
}
function copyDirItem (item, src, dest, opts) {
const srcItem = path.join(src, item)
const destItem = path.join(dest, item)
if (opts.filter && !opts.filter(srcItem, destItem)) return
const { destStat } = stat.checkPathsSync(srcItem, destItem, 'copy', opts)
return getStats(destStat, srcItem, destItem, opts)
}
function onLink (destStat, src, dest, opts) {
let resolvedSrc = fs.readlinkSync(src)
if (opts.dereference) {
resolvedSrc = path.resolve(process.cwd(), resolvedSrc)
}
if (!destStat) {
return fs.symlinkSync(resolvedSrc, dest)
} else {
let resolvedDest
try {
resolvedDest = fs.readlinkSync(dest)
} catch (err) {
// dest exists and is a regular file or directory,
// Windows may throw UNKNOWN error. If dest already exists,
// fs throws error anyway, so no need to guard against it here.
if (err.code === 'EINVAL' || err.code === 'UNKNOWN') return fs.symlinkSync(resolvedSrc, dest)
throw err
}
if (opts.dereference) {
resolvedDest = path.resolve(process.cwd(), resolvedDest)
}
if (stat.isSrcSubdir(resolvedSrc, resolvedDest)) {
throw new Error(`Cannot copy '${resolvedSrc}' to a subdirectory of itself, '${resolvedDest}'.`)
}
// prevent copy if src is a subdir of dest since unlinking
// dest in this case would result in removing src contents
// and therefore a broken symlink would be created.
if (stat.isSrcSubdir(resolvedDest, resolvedSrc)) {
throw new Error(`Cannot overwrite '${resolvedDest}' with '${resolvedSrc}'.`)
}
return copyLink(resolvedSrc, dest)
}
}
function copyLink (resolvedSrc, dest) {
fs.unlinkSync(dest)
return fs.symlinkSync(resolvedSrc, dest)
}
module.exports = copySync

182
node_modules/fs-extra/lib/copy/copy.js generated vendored Normal file
View File

@ -0,0 +1,182 @@
'use strict'
const fs = require('../fs')
const path = require('path')
const { mkdirs } = require('../mkdirs')
const { pathExists } = require('../path-exists')
const { utimesMillis } = require('../util/utimes')
const stat = require('../util/stat')
async function copy (src, dest, opts = {}) {
if (typeof opts === 'function') {
opts = { filter: opts }
}
opts.clobber = 'clobber' in opts ? !!opts.clobber : true // default to true for now
opts.overwrite = 'overwrite' in opts ? !!opts.overwrite : opts.clobber // overwrite falls back to clobber
// Warn about using preserveTimestamps on 32-bit node
if (opts.preserveTimestamps && process.arch === 'ia32') {
process.emitWarning(
'Using the preserveTimestamps option in 32-bit node is not recommended;\n\n' +
'\tsee https://github.com/jprichardson/node-fs-extra/issues/269',
'Warning', 'fs-extra-WARN0001'
)
}
const { srcStat, destStat } = await stat.checkPaths(src, dest, 'copy', opts)
await stat.checkParentPaths(src, srcStat, dest, 'copy')
const include = await runFilter(src, dest, opts)
if (!include) return
// check if the parent of dest exists, and create it if it doesn't exist
const destParent = path.dirname(dest)
const dirExists = await pathExists(destParent)
if (!dirExists) {
await mkdirs(destParent)
}
await getStatsAndPerformCopy(destStat, src, dest, opts)
}
async function runFilter (src, dest, opts) {
if (!opts.filter) return true
return opts.filter(src, dest)
}
async function getStatsAndPerformCopy (destStat, src, dest, opts) {
const statFn = opts.dereference ? fs.stat : fs.lstat
const srcStat = await statFn(src)
if (srcStat.isDirectory()) return onDir(srcStat, destStat, src, dest, opts)
if (
srcStat.isFile() ||
srcStat.isCharacterDevice() ||
srcStat.isBlockDevice()
) return onFile(srcStat, destStat, src, dest, opts)
if (srcStat.isSymbolicLink()) return onLink(destStat, src, dest, opts)
if (srcStat.isSocket()) throw new Error(`Cannot copy a socket file: ${src}`)
if (srcStat.isFIFO()) throw new Error(`Cannot copy a FIFO pipe: ${src}`)
throw new Error(`Unknown file: ${src}`)
}
async function onFile (srcStat, destStat, src, dest, opts) {
if (!destStat) return copyFile(srcStat, src, dest, opts)
if (opts.overwrite) {
await fs.unlink(dest)
return copyFile(srcStat, src, dest, opts)
}
if (opts.errorOnExist) {
throw new Error(`'${dest}' already exists`)
}
}
async function copyFile (srcStat, src, dest, opts) {
await fs.copyFile(src, dest)
if (opts.preserveTimestamps) {
// Make sure the file is writable before setting the timestamp
// otherwise open fails with EPERM when invoked with 'r+'
// (through utimes call)
if (fileIsNotWritable(srcStat.mode)) {
await makeFileWritable(dest, srcStat.mode)
}
// Set timestamps and mode correspondingly
// Note that The initial srcStat.atime cannot be trusted
// because it is modified by the read(2) system call
// (See https://nodejs.org/api/fs.html#fs_stat_time_values)
const updatedSrcStat = await fs.stat(src)
await utimesMillis(dest, updatedSrcStat.atime, updatedSrcStat.mtime)
}
return fs.chmod(dest, srcStat.mode)
}
function fileIsNotWritable (srcMode) {
return (srcMode & 0o200) === 0
}
function makeFileWritable (dest, srcMode) {
return fs.chmod(dest, srcMode | 0o200)
}
async function onDir (srcStat, destStat, src, dest, opts) {
// the dest directory might not exist, create it
if (!destStat) {
await fs.mkdir(dest)
}
const promises = []
// loop through the files in the current directory to copy everything
for await (const item of await fs.opendir(src)) {
const srcItem = path.join(src, item.name)
const destItem = path.join(dest, item.name)
promises.push(
runFilter(srcItem, destItem, opts).then(include => {
if (include) {
// only copy the item if it matches the filter function
return stat.checkPaths(srcItem, destItem, 'copy', opts).then(({ destStat }) => {
// If the item is a copyable file, `getStatsAndPerformCopy` will copy it
// If the item is a directory, `getStatsAndPerformCopy` will call `onDir` recursively
return getStatsAndPerformCopy(destStat, srcItem, destItem, opts)
})
}
})
)
}
await Promise.all(promises)
if (!destStat) {
await fs.chmod(dest, srcStat.mode)
}
}
async function onLink (destStat, src, dest, opts) {
let resolvedSrc = await fs.readlink(src)
if (opts.dereference) {
resolvedSrc = path.resolve(process.cwd(), resolvedSrc)
}
if (!destStat) {
return fs.symlink(resolvedSrc, dest)
}
let resolvedDest = null
try {
resolvedDest = await fs.readlink(dest)
} catch (e) {
// dest exists and is a regular file or directory,
// Windows may throw UNKNOWN error. If dest already exists,
// fs throws error anyway, so no need to guard against it here.
if (e.code === 'EINVAL' || e.code === 'UNKNOWN') return fs.symlink(resolvedSrc, dest)
throw e
}
if (opts.dereference) {
resolvedDest = path.resolve(process.cwd(), resolvedDest)
}
if (stat.isSrcSubdir(resolvedSrc, resolvedDest)) {
throw new Error(`Cannot copy '${resolvedSrc}' to a subdirectory of itself, '${resolvedDest}'.`)
}
// do not copy if src is a subdir of dest since unlinking
// dest in this case would result in removing src contents
// and therefore a broken symlink would be created.
if (stat.isSrcSubdir(resolvedDest, resolvedSrc)) {
throw new Error(`Cannot overwrite '${resolvedDest}' with '${resolvedSrc}'.`)
}
// copy the link
await fs.unlink(dest)
return fs.symlink(resolvedSrc, dest)
}
module.exports = copy

7
node_modules/fs-extra/lib/copy/index.js generated vendored Normal file
View File

@ -0,0 +1,7 @@
'use strict'
const u = require('universalify').fromPromise
module.exports = {
copy: u(require('./copy')),
copySync: require('./copy-sync')
}

39
node_modules/fs-extra/lib/empty/index.js generated vendored Normal file
View File

@ -0,0 +1,39 @@
'use strict'
const u = require('universalify').fromPromise
const fs = require('../fs')
const path = require('path')
const mkdir = require('../mkdirs')
const remove = require('../remove')
const emptyDir = u(async function emptyDir (dir) {
let items
try {
items = await fs.readdir(dir)
} catch {
return mkdir.mkdirs(dir)
}
return Promise.all(items.map(item => remove.remove(path.join(dir, item))))
})
function emptyDirSync (dir) {
let items
try {
items = fs.readdirSync(dir)
} catch {
return mkdir.mkdirsSync(dir)
}
items.forEach(item => {
item = path.join(dir, item)
remove.removeSync(item)
})
}
module.exports = {
emptyDirSync,
emptydirSync: emptyDirSync,
emptyDir,
emptydir: emptyDir
}

66
node_modules/fs-extra/lib/ensure/file.js generated vendored Normal file
View File

@ -0,0 +1,66 @@
'use strict'
const u = require('universalify').fromPromise
const path = require('path')
const fs = require('../fs')
const mkdir = require('../mkdirs')
async function createFile (file) {
let stats
try {
stats = await fs.stat(file)
} catch { }
if (stats && stats.isFile()) return
const dir = path.dirname(file)
let dirStats = null
try {
dirStats = await fs.stat(dir)
} catch (err) {
// if the directory doesn't exist, make it
if (err.code === 'ENOENT') {
await mkdir.mkdirs(dir)
await fs.writeFile(file, '')
return
} else {
throw err
}
}
if (dirStats.isDirectory()) {
await fs.writeFile(file, '')
} else {
// parent is not a directory
// This is just to cause an internal ENOTDIR error to be thrown
await fs.readdir(dir)
}
}
function createFileSync (file) {
let stats
try {
stats = fs.statSync(file)
} catch { }
if (stats && stats.isFile()) return
const dir = path.dirname(file)
try {
if (!fs.statSync(dir).isDirectory()) {
// parent is not a directory
// This is just to cause an internal ENOTDIR error to be thrown
fs.readdirSync(dir)
}
} catch (err) {
// If the stat call above failed because the directory doesn't exist, create it
if (err && err.code === 'ENOENT') mkdir.mkdirsSync(dir)
else throw err
}
fs.writeFileSync(file, '')
}
module.exports = {
createFile: u(createFile),
createFileSync
}

23
node_modules/fs-extra/lib/ensure/index.js generated vendored Normal file
View File

@ -0,0 +1,23 @@
'use strict'
const { createFile, createFileSync } = require('./file')
const { createLink, createLinkSync } = require('./link')
const { createSymlink, createSymlinkSync } = require('./symlink')
module.exports = {
// file
createFile,
createFileSync,
ensureFile: createFile,
ensureFileSync: createFileSync,
// link
createLink,
createLinkSync,
ensureLink: createLink,
ensureLinkSync: createLinkSync,
// symlink
createSymlink,
createSymlinkSync,
ensureSymlink: createSymlink,
ensureSymlinkSync: createSymlinkSync
}

64
node_modules/fs-extra/lib/ensure/link.js generated vendored Normal file
View File

@ -0,0 +1,64 @@
'use strict'
const u = require('universalify').fromPromise
const path = require('path')
const fs = require('../fs')
const mkdir = require('../mkdirs')
const { pathExists } = require('../path-exists')
const { areIdentical } = require('../util/stat')
async function createLink (srcpath, dstpath) {
let dstStat
try {
dstStat = await fs.lstat(dstpath)
} catch {
// ignore error
}
let srcStat
try {
srcStat = await fs.lstat(srcpath)
} catch (err) {
err.message = err.message.replace('lstat', 'ensureLink')
throw err
}
if (dstStat && areIdentical(srcStat, dstStat)) return
const dir = path.dirname(dstpath)
const dirExists = await pathExists(dir)
if (!dirExists) {
await mkdir.mkdirs(dir)
}
await fs.link(srcpath, dstpath)
}
function createLinkSync (srcpath, dstpath) {
let dstStat
try {
dstStat = fs.lstatSync(dstpath)
} catch {}
try {
const srcStat = fs.lstatSync(srcpath)
if (dstStat && areIdentical(srcStat, dstStat)) return
} catch (err) {
err.message = err.message.replace('lstat', 'ensureLink')
throw err
}
const dir = path.dirname(dstpath)
const dirExists = fs.existsSync(dir)
if (dirExists) return fs.linkSync(srcpath, dstpath)
mkdir.mkdirsSync(dir)
return fs.linkSync(srcpath, dstpath)
}
module.exports = {
createLink: u(createLink),
createLinkSync
}

101
node_modules/fs-extra/lib/ensure/symlink-paths.js generated vendored Normal file
View File

@ -0,0 +1,101 @@
'use strict'
const path = require('path')
const fs = require('../fs')
const { pathExists } = require('../path-exists')
const u = require('universalify').fromPromise
/**
* Function that returns two types of paths, one relative to symlink, and one
* relative to the current working directory. Checks if path is absolute or
* relative. If the path is relative, this function checks if the path is
* relative to symlink or relative to current working directory. This is an
* initiative to find a smarter `srcpath` to supply when building symlinks.
* This allows you to determine which path to use out of one of three possible
* types of source paths. The first is an absolute path. This is detected by
* `path.isAbsolute()`. When an absolute path is provided, it is checked to
* see if it exists. If it does it's used, if not an error is returned
* (callback)/ thrown (sync). The other two options for `srcpath` are a
* relative url. By default Node's `fs.symlink` works by creating a symlink
* using `dstpath` and expects the `srcpath` to be relative to the newly
* created symlink. If you provide a `srcpath` that does not exist on the file
* system it results in a broken symlink. To minimize this, the function
* checks to see if the 'relative to symlink' source file exists, and if it
* does it will use it. If it does not, it checks if there's a file that
* exists that is relative to the current working directory, if does its used.
* This preserves the expectations of the original fs.symlink spec and adds
* the ability to pass in `relative to current working direcotry` paths.
*/
async function symlinkPaths (srcpath, dstpath) {
if (path.isAbsolute(srcpath)) {
try {
await fs.lstat(srcpath)
} catch (err) {
err.message = err.message.replace('lstat', 'ensureSymlink')
throw err
}
return {
toCwd: srcpath,
toDst: srcpath
}
}
const dstdir = path.dirname(dstpath)
const relativeToDst = path.join(dstdir, srcpath)
const exists = await pathExists(relativeToDst)
if (exists) {
return {
toCwd: relativeToDst,
toDst: srcpath
}
}
try {
await fs.lstat(srcpath)
} catch (err) {
err.message = err.message.replace('lstat', 'ensureSymlink')
throw err
}
return {
toCwd: srcpath,
toDst: path.relative(dstdir, srcpath)
}
}
function symlinkPathsSync (srcpath, dstpath) {
if (path.isAbsolute(srcpath)) {
const exists = fs.existsSync(srcpath)
if (!exists) throw new Error('absolute srcpath does not exist')
return {
toCwd: srcpath,
toDst: srcpath
}
}
const dstdir = path.dirname(dstpath)
const relativeToDst = path.join(dstdir, srcpath)
const exists = fs.existsSync(relativeToDst)
if (exists) {
return {
toCwd: relativeToDst,
toDst: srcpath
}
}
const srcExists = fs.existsSync(srcpath)
if (!srcExists) throw new Error('relative srcpath does not exist')
return {
toCwd: srcpath,
toDst: path.relative(dstdir, srcpath)
}
}
module.exports = {
symlinkPaths: u(symlinkPaths),
symlinkPathsSync
}

34
node_modules/fs-extra/lib/ensure/symlink-type.js generated vendored Normal file
View File

@ -0,0 +1,34 @@
'use strict'
const fs = require('../fs')
const u = require('universalify').fromPromise
async function symlinkType (srcpath, type) {
if (type) return type
let stats
try {
stats = await fs.lstat(srcpath)
} catch {
return 'file'
}
return (stats && stats.isDirectory()) ? 'dir' : 'file'
}
function symlinkTypeSync (srcpath, type) {
if (type) return type
let stats
try {
stats = fs.lstatSync(srcpath)
} catch {
return 'file'
}
return (stats && stats.isDirectory()) ? 'dir' : 'file'
}
module.exports = {
symlinkType: u(symlinkType),
symlinkTypeSync
}

67
node_modules/fs-extra/lib/ensure/symlink.js generated vendored Normal file
View File

@ -0,0 +1,67 @@
'use strict'
const u = require('universalify').fromPromise
const path = require('path')
const fs = require('../fs')
const { mkdirs, mkdirsSync } = require('../mkdirs')
const { symlinkPaths, symlinkPathsSync } = require('./symlink-paths')
const { symlinkType, symlinkTypeSync } = require('./symlink-type')
const { pathExists } = require('../path-exists')
const { areIdentical } = require('../util/stat')
async function createSymlink (srcpath, dstpath, type) {
let stats
try {
stats = await fs.lstat(dstpath)
} catch { }
if (stats && stats.isSymbolicLink()) {
const [srcStat, dstStat] = await Promise.all([
fs.stat(srcpath),
fs.stat(dstpath)
])
if (areIdentical(srcStat, dstStat)) return
}
const relative = await symlinkPaths(srcpath, dstpath)
srcpath = relative.toDst
const toType = await symlinkType(relative.toCwd, type)
const dir = path.dirname(dstpath)
if (!(await pathExists(dir))) {
await mkdirs(dir)
}
return fs.symlink(srcpath, dstpath, toType)
}
function createSymlinkSync (srcpath, dstpath, type) {
let stats
try {
stats = fs.lstatSync(dstpath)
} catch { }
if (stats && stats.isSymbolicLink()) {
const srcStat = fs.statSync(srcpath)
const dstStat = fs.statSync(dstpath)
if (areIdentical(srcStat, dstStat)) return
}
const relative = symlinkPathsSync(srcpath, dstpath)
srcpath = relative.toDst
type = symlinkTypeSync(relative.toCwd, type)
const dir = path.dirname(dstpath)
const exists = fs.existsSync(dir)
if (exists) return fs.symlinkSync(srcpath, dstpath, type)
mkdirsSync(dir)
return fs.symlinkSync(srcpath, dstpath, type)
}
module.exports = {
createSymlink: u(createSymlink),
createSymlinkSync
}

68
node_modules/fs-extra/lib/esm.mjs generated vendored Normal file
View File

@ -0,0 +1,68 @@
import _copy from './copy/index.js'
import _empty from './empty/index.js'
import _ensure from './ensure/index.js'
import _json from './json/index.js'
import _mkdirs from './mkdirs/index.js'
import _move from './move/index.js'
import _outputFile from './output-file/index.js'
import _pathExists from './path-exists/index.js'
import _remove from './remove/index.js'
// NOTE: Only exports fs-extra's functions; fs functions must be imported from "node:fs" or "node:fs/promises"
export const copy = _copy.copy
export const copySync = _copy.copySync
export const emptyDirSync = _empty.emptyDirSync
export const emptydirSync = _empty.emptydirSync
export const emptyDir = _empty.emptyDir
export const emptydir = _empty.emptydir
export const createFile = _ensure.createFile
export const createFileSync = _ensure.createFileSync
export const ensureFile = _ensure.ensureFile
export const ensureFileSync = _ensure.ensureFileSync
export const createLink = _ensure.createLink
export const createLinkSync = _ensure.createLinkSync
export const ensureLink = _ensure.ensureLink
export const ensureLinkSync = _ensure.ensureLinkSync
export const createSymlink = _ensure.createSymlink
export const createSymlinkSync = _ensure.createSymlinkSync
export const ensureSymlink = _ensure.ensureSymlink
export const ensureSymlinkSync = _ensure.ensureSymlinkSync
export const readJson = _json.readJson
export const readJSON = _json.readJSON
export const readJsonSync = _json.readJsonSync
export const readJSONSync = _json.readJSONSync
export const writeJson = _json.writeJson
export const writeJSON = _json.writeJSON
export const writeJsonSync = _json.writeJsonSync
export const writeJSONSync = _json.writeJSONSync
export const outputJson = _json.outputJson
export const outputJSON = _json.outputJSON
export const outputJsonSync = _json.outputJsonSync
export const outputJSONSync = _json.outputJSONSync
export const mkdirs = _mkdirs.mkdirs
export const mkdirsSync = _mkdirs.mkdirsSync
export const mkdirp = _mkdirs.mkdirp
export const mkdirpSync = _mkdirs.mkdirpSync
export const ensureDir = _mkdirs.ensureDir
export const ensureDirSync = _mkdirs.ensureDirSync
export const move = _move.move
export const moveSync = _move.moveSync
export const outputFile = _outputFile.outputFile
export const outputFileSync = _outputFile.outputFileSync
export const pathExists = _pathExists.pathExists
export const pathExistsSync = _pathExists.pathExistsSync
export const remove = _remove.remove
export const removeSync = _remove.removeSync
export default {
..._copy,
..._empty,
..._ensure,
..._json,
..._mkdirs,
..._move,
..._outputFile,
..._pathExists,
..._remove
}

146
node_modules/fs-extra/lib/fs/index.js generated vendored Normal file
View File

@ -0,0 +1,146 @@
'use strict'
// This is adapted from https://github.com/normalize/mz
// Copyright (c) 2014-2016 Jonathan Ong me@jongleberry.com and Contributors
const u = require('universalify').fromCallback
const fs = require('graceful-fs')
const api = [
'access',
'appendFile',
'chmod',
'chown',
'close',
'copyFile',
'cp',
'fchmod',
'fchown',
'fdatasync',
'fstat',
'fsync',
'ftruncate',
'futimes',
'glob',
'lchmod',
'lchown',
'lutimes',
'link',
'lstat',
'mkdir',
'mkdtemp',
'open',
'opendir',
'readdir',
'readFile',
'readlink',
'realpath',
'rename',
'rm',
'rmdir',
'stat',
'statfs',
'symlink',
'truncate',
'unlink',
'utimes',
'writeFile'
].filter(key => {
// Some commands are not available on some systems. Ex:
// fs.cp was added in Node.js v16.7.0
// fs.statfs was added in Node v19.6.0, v18.15.0
// fs.glob was added in Node.js v22.0.0
// fs.lchown is not available on at least some Linux
return typeof fs[key] === 'function'
})
// Export cloned fs:
Object.assign(exports, fs)
// Universalify async methods:
api.forEach(method => {
exports[method] = u(fs[method])
})
// We differ from mz/fs in that we still ship the old, broken, fs.exists()
// since we are a drop-in replacement for the native module
exports.exists = function (filename, callback) {
if (typeof callback === 'function') {
return fs.exists(filename, callback)
}
return new Promise(resolve => {
return fs.exists(filename, resolve)
})
}
// fs.read(), fs.write(), fs.readv(), & fs.writev() need special treatment due to multiple callback args
exports.read = function (fd, buffer, offset, length, position, callback) {
if (typeof callback === 'function') {
return fs.read(fd, buffer, offset, length, position, callback)
}
return new Promise((resolve, reject) => {
fs.read(fd, buffer, offset, length, position, (err, bytesRead, buffer) => {
if (err) return reject(err)
resolve({ bytesRead, buffer })
})
})
}
// Function signature can be
// fs.write(fd, buffer[, offset[, length[, position]]], callback)
// OR
// fs.write(fd, string[, position[, encoding]], callback)
// We need to handle both cases, so we use ...args
exports.write = function (fd, buffer, ...args) {
if (typeof args[args.length - 1] === 'function') {
return fs.write(fd, buffer, ...args)
}
return new Promise((resolve, reject) => {
fs.write(fd, buffer, ...args, (err, bytesWritten, buffer) => {
if (err) return reject(err)
resolve({ bytesWritten, buffer })
})
})
}
// Function signature is
// s.readv(fd, buffers[, position], callback)
// We need to handle the optional arg, so we use ...args
exports.readv = function (fd, buffers, ...args) {
if (typeof args[args.length - 1] === 'function') {
return fs.readv(fd, buffers, ...args)
}
return new Promise((resolve, reject) => {
fs.readv(fd, buffers, ...args, (err, bytesRead, buffers) => {
if (err) return reject(err)
resolve({ bytesRead, buffers })
})
})
}
// Function signature is
// s.writev(fd, buffers[, position], callback)
// We need to handle the optional arg, so we use ...args
exports.writev = function (fd, buffers, ...args) {
if (typeof args[args.length - 1] === 'function') {
return fs.writev(fd, buffers, ...args)
}
return new Promise((resolve, reject) => {
fs.writev(fd, buffers, ...args, (err, bytesWritten, buffers) => {
if (err) return reject(err)
resolve({ bytesWritten, buffers })
})
})
}
// fs.realpath.native sometimes not available if fs is monkey-patched
if (typeof fs.realpath.native === 'function') {
exports.realpath.native = u(fs.realpath.native)
} else {
process.emitWarning(
'fs.realpath.native is not a function. Is fs being monkey-patched?',
'Warning', 'fs-extra-WARN0003'
)
}

16
node_modules/fs-extra/lib/index.js generated vendored Normal file
View File

@ -0,0 +1,16 @@
'use strict'
module.exports = {
// Export promiseified graceful-fs:
...require('./fs'),
// Export extra methods:
...require('./copy'),
...require('./empty'),
...require('./ensure'),
...require('./json'),
...require('./mkdirs'),
...require('./move'),
...require('./output-file'),
...require('./path-exists'),
...require('./remove')
}

16
node_modules/fs-extra/lib/json/index.js generated vendored Normal file
View File

@ -0,0 +1,16 @@
'use strict'
const u = require('universalify').fromPromise
const jsonFile = require('./jsonfile')
jsonFile.outputJson = u(require('./output-json'))
jsonFile.outputJsonSync = require('./output-json-sync')
// aliases
jsonFile.outputJSON = jsonFile.outputJson
jsonFile.outputJSONSync = jsonFile.outputJsonSync
jsonFile.writeJSON = jsonFile.writeJson
jsonFile.writeJSONSync = jsonFile.writeJsonSync
jsonFile.readJSON = jsonFile.readJson
jsonFile.readJSONSync = jsonFile.readJsonSync
module.exports = jsonFile

11
node_modules/fs-extra/lib/json/jsonfile.js generated vendored Normal file
View File

@ -0,0 +1,11 @@
'use strict'
const jsonFile = require('jsonfile')
module.exports = {
// jsonfile exports
readJson: jsonFile.readFile,
readJsonSync: jsonFile.readFileSync,
writeJson: jsonFile.writeFile,
writeJsonSync: jsonFile.writeFileSync
}

12
node_modules/fs-extra/lib/json/output-json-sync.js generated vendored Normal file
View File

@ -0,0 +1,12 @@
'use strict'
const { stringify } = require('jsonfile/utils')
const { outputFileSync } = require('../output-file')
function outputJsonSync (file, data, options) {
const str = stringify(data, options)
outputFileSync(file, str, options)
}
module.exports = outputJsonSync

12
node_modules/fs-extra/lib/json/output-json.js generated vendored Normal file
View File

@ -0,0 +1,12 @@
'use strict'
const { stringify } = require('jsonfile/utils')
const { outputFile } = require('../output-file')
async function outputJson (file, data, options = {}) {
const str = stringify(data, options)
await outputFile(file, str, options)
}
module.exports = outputJson

14
node_modules/fs-extra/lib/mkdirs/index.js generated vendored Normal file
View File

@ -0,0 +1,14 @@
'use strict'
const u = require('universalify').fromPromise
const { makeDir: _makeDir, makeDirSync } = require('./make-dir')
const makeDir = u(_makeDir)
module.exports = {
mkdirs: makeDir,
mkdirsSync: makeDirSync,
// alias
mkdirp: makeDir,
mkdirpSync: makeDirSync,
ensureDir: makeDir,
ensureDirSync: makeDirSync
}

27
node_modules/fs-extra/lib/mkdirs/make-dir.js generated vendored Normal file
View File

@ -0,0 +1,27 @@
'use strict'
const fs = require('../fs')
const { checkPath } = require('./utils')
const getMode = options => {
const defaults = { mode: 0o777 }
if (typeof options === 'number') return options
return ({ ...defaults, ...options }).mode
}
module.exports.makeDir = async (dir, options) => {
checkPath(dir)
return fs.mkdir(dir, {
mode: getMode(options),
recursive: true
})
}
module.exports.makeDirSync = (dir, options) => {
checkPath(dir)
return fs.mkdirSync(dir, {
mode: getMode(options),
recursive: true
})
}

21
node_modules/fs-extra/lib/mkdirs/utils.js generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Adapted from https://github.com/sindresorhus/make-dir
// Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'use strict'
const path = require('path')
// https://github.com/nodejs/node/issues/8987
// https://github.com/libuv/libuv/pull/1088
module.exports.checkPath = function checkPath (pth) {
if (process.platform === 'win32') {
const pathHasInvalidWinCharacters = /[<>:"|?*]/.test(pth.replace(path.parse(pth).root, ''))
if (pathHasInvalidWinCharacters) {
const error = new Error(`Path contains invalid characters: ${pth}`)
error.code = 'EINVAL'
throw error
}
}
}

7
node_modules/fs-extra/lib/move/index.js generated vendored Normal file
View File

@ -0,0 +1,7 @@
'use strict'
const u = require('universalify').fromPromise
module.exports = {
move: u(require('./move')),
moveSync: require('./move-sync')
}

55
node_modules/fs-extra/lib/move/move-sync.js generated vendored Normal file
View File

@ -0,0 +1,55 @@
'use strict'
const fs = require('graceful-fs')
const path = require('path')
const copySync = require('../copy').copySync
const removeSync = require('../remove').removeSync
const mkdirpSync = require('../mkdirs').mkdirpSync
const stat = require('../util/stat')
function moveSync (src, dest, opts) {
opts = opts || {}
const overwrite = opts.overwrite || opts.clobber || false
const { srcStat, isChangingCase = false } = stat.checkPathsSync(src, dest, 'move', opts)
stat.checkParentPathsSync(src, srcStat, dest, 'move')
if (!isParentRoot(dest)) mkdirpSync(path.dirname(dest))
return doRename(src, dest, overwrite, isChangingCase)
}
function isParentRoot (dest) {
const parent = path.dirname(dest)
const parsedPath = path.parse(parent)
return parsedPath.root === parent
}
function doRename (src, dest, overwrite, isChangingCase) {
if (isChangingCase) return rename(src, dest, overwrite)
if (overwrite) {
removeSync(dest)
return rename(src, dest, overwrite)
}
if (fs.existsSync(dest)) throw new Error('dest already exists.')
return rename(src, dest, overwrite)
}
function rename (src, dest, overwrite) {
try {
fs.renameSync(src, dest)
} catch (err) {
if (err.code !== 'EXDEV') throw err
return moveAcrossDevice(src, dest, overwrite)
}
}
function moveAcrossDevice (src, dest, overwrite) {
const opts = {
overwrite,
errorOnExist: true,
preserveTimestamps: true
}
copySync(src, dest, opts)
return removeSync(src)
}
module.exports = moveSync

59
node_modules/fs-extra/lib/move/move.js generated vendored Normal file
View File

@ -0,0 +1,59 @@
'use strict'
const fs = require('../fs')
const path = require('path')
const { copy } = require('../copy')
const { remove } = require('../remove')
const { mkdirp } = require('../mkdirs')
const { pathExists } = require('../path-exists')
const stat = require('../util/stat')
async function move (src, dest, opts = {}) {
const overwrite = opts.overwrite || opts.clobber || false
const { srcStat, isChangingCase = false } = await stat.checkPaths(src, dest, 'move', opts)
await stat.checkParentPaths(src, srcStat, dest, 'move')
// If the parent of dest is not root, make sure it exists before proceeding
const destParent = path.dirname(dest)
const parsedParentPath = path.parse(destParent)
if (parsedParentPath.root !== destParent) {
await mkdirp(destParent)
}
return doRename(src, dest, overwrite, isChangingCase)
}
async function doRename (src, dest, overwrite, isChangingCase) {
if (!isChangingCase) {
if (overwrite) {
await remove(dest)
} else if (await pathExists(dest)) {
throw new Error('dest already exists.')
}
}
try {
// Try w/ rename first, and try copy + remove if EXDEV
await fs.rename(src, dest)
} catch (err) {
if (err.code !== 'EXDEV') {
throw err
}
await moveAcrossDevice(src, dest, overwrite)
}
}
async function moveAcrossDevice (src, dest, overwrite) {
const opts = {
overwrite,
errorOnExist: true,
preserveTimestamps: true
}
await copy(src, dest, opts)
return remove(src)
}
module.exports = move

31
node_modules/fs-extra/lib/output-file/index.js generated vendored Normal file
View File

@ -0,0 +1,31 @@
'use strict'
const u = require('universalify').fromPromise
const fs = require('../fs')
const path = require('path')
const mkdir = require('../mkdirs')
const pathExists = require('../path-exists').pathExists
async function outputFile (file, data, encoding = 'utf-8') {
const dir = path.dirname(file)
if (!(await pathExists(dir))) {
await mkdir.mkdirs(dir)
}
return fs.writeFile(file, data, encoding)
}
function outputFileSync (file, ...args) {
const dir = path.dirname(file)
if (!fs.existsSync(dir)) {
mkdir.mkdirsSync(dir)
}
fs.writeFileSync(file, ...args)
}
module.exports = {
outputFile: u(outputFile),
outputFileSync
}

12
node_modules/fs-extra/lib/path-exists/index.js generated vendored Normal file
View File

@ -0,0 +1,12 @@
'use strict'
const u = require('universalify').fromPromise
const fs = require('../fs')
function pathExists (path) {
return fs.access(path).then(() => true).catch(() => false)
}
module.exports = {
pathExists: u(pathExists),
pathExistsSync: fs.existsSync
}

17
node_modules/fs-extra/lib/remove/index.js generated vendored Normal file
View File

@ -0,0 +1,17 @@
'use strict'
const fs = require('graceful-fs')
const u = require('universalify').fromCallback
function remove (path, callback) {
fs.rm(path, { recursive: true, force: true }, callback)
}
function removeSync (path) {
fs.rmSync(path, { recursive: true, force: true })
}
module.exports = {
remove: u(remove),
removeSync
}

158
node_modules/fs-extra/lib/util/stat.js generated vendored Normal file
View File

@ -0,0 +1,158 @@
'use strict'
const fs = require('../fs')
const path = require('path')
const u = require('universalify').fromPromise
function getStats (src, dest, opts) {
const statFunc = opts.dereference
? (file) => fs.stat(file, { bigint: true })
: (file) => fs.lstat(file, { bigint: true })
return Promise.all([
statFunc(src),
statFunc(dest).catch(err => {
if (err.code === 'ENOENT') return null
throw err
})
]).then(([srcStat, destStat]) => ({ srcStat, destStat }))
}
function getStatsSync (src, dest, opts) {
let destStat
const statFunc = opts.dereference
? (file) => fs.statSync(file, { bigint: true })
: (file) => fs.lstatSync(file, { bigint: true })
const srcStat = statFunc(src)
try {
destStat = statFunc(dest)
} catch (err) {
if (err.code === 'ENOENT') return { srcStat, destStat: null }
throw err
}
return { srcStat, destStat }
}
async function checkPaths (src, dest, funcName, opts) {
const { srcStat, destStat } = await getStats(src, dest, opts)
if (destStat) {
if (areIdentical(srcStat, destStat)) {
const srcBaseName = path.basename(src)
const destBaseName = path.basename(dest)
if (funcName === 'move' &&
srcBaseName !== destBaseName &&
srcBaseName.toLowerCase() === destBaseName.toLowerCase()) {
return { srcStat, destStat, isChangingCase: true }
}
throw new Error('Source and destination must not be the same.')
}
if (srcStat.isDirectory() && !destStat.isDirectory()) {
throw new Error(`Cannot overwrite non-directory '${dest}' with directory '${src}'.`)
}
if (!srcStat.isDirectory() && destStat.isDirectory()) {
throw new Error(`Cannot overwrite directory '${dest}' with non-directory '${src}'.`)
}
}
if (srcStat.isDirectory() && isSrcSubdir(src, dest)) {
throw new Error(errMsg(src, dest, funcName))
}
return { srcStat, destStat }
}
function checkPathsSync (src, dest, funcName, opts) {
const { srcStat, destStat } = getStatsSync(src, dest, opts)
if (destStat) {
if (areIdentical(srcStat, destStat)) {
const srcBaseName = path.basename(src)
const destBaseName = path.basename(dest)
if (funcName === 'move' &&
srcBaseName !== destBaseName &&
srcBaseName.toLowerCase() === destBaseName.toLowerCase()) {
return { srcStat, destStat, isChangingCase: true }
}
throw new Error('Source and destination must not be the same.')
}
if (srcStat.isDirectory() && !destStat.isDirectory()) {
throw new Error(`Cannot overwrite non-directory '${dest}' with directory '${src}'.`)
}
if (!srcStat.isDirectory() && destStat.isDirectory()) {
throw new Error(`Cannot overwrite directory '${dest}' with non-directory '${src}'.`)
}
}
if (srcStat.isDirectory() && isSrcSubdir(src, dest)) {
throw new Error(errMsg(src, dest, funcName))
}
return { srcStat, destStat }
}
// recursively check if dest parent is a subdirectory of src.
// It works for all file types including symlinks since it
// checks the src and dest inodes. It starts from the deepest
// parent and stops once it reaches the src parent or the root path.
async function checkParentPaths (src, srcStat, dest, funcName) {
const srcParent = path.resolve(path.dirname(src))
const destParent = path.resolve(path.dirname(dest))
if (destParent === srcParent || destParent === path.parse(destParent).root) return
let destStat
try {
destStat = await fs.stat(destParent, { bigint: true })
} catch (err) {
if (err.code === 'ENOENT') return
throw err
}
if (areIdentical(srcStat, destStat)) {
throw new Error(errMsg(src, dest, funcName))
}
return checkParentPaths(src, srcStat, destParent, funcName)
}
function checkParentPathsSync (src, srcStat, dest, funcName) {
const srcParent = path.resolve(path.dirname(src))
const destParent = path.resolve(path.dirname(dest))
if (destParent === srcParent || destParent === path.parse(destParent).root) return
let destStat
try {
destStat = fs.statSync(destParent, { bigint: true })
} catch (err) {
if (err.code === 'ENOENT') return
throw err
}
if (areIdentical(srcStat, destStat)) {
throw new Error(errMsg(src, dest, funcName))
}
return checkParentPathsSync(src, srcStat, destParent, funcName)
}
function areIdentical (srcStat, destStat) {
return destStat.ino && destStat.dev && destStat.ino === srcStat.ino && destStat.dev === srcStat.dev
}
// return true if dest is a subdir of src, otherwise false.
// It only checks the path strings.
function isSrcSubdir (src, dest) {
const srcArr = path.resolve(src).split(path.sep).filter(i => i)
const destArr = path.resolve(dest).split(path.sep).filter(i => i)
return srcArr.every((cur, i) => destArr[i] === cur)
}
function errMsg (src, dest, funcName) {
return `Cannot ${funcName} '${src}' to a subdirectory of itself, '${dest}'.`
}
module.exports = {
// checkPaths
checkPaths: u(checkPaths),
checkPathsSync,
// checkParent
checkParentPaths: u(checkParentPaths),
checkParentPathsSync,
// Misc
isSrcSubdir,
areIdentical
}

36
node_modules/fs-extra/lib/util/utimes.js generated vendored Normal file
View File

@ -0,0 +1,36 @@
'use strict'
const fs = require('../fs')
const u = require('universalify').fromPromise
async function utimesMillis (path, atime, mtime) {
// if (!HAS_MILLIS_RES) return fs.utimes(path, atime, mtime, callback)
const fd = await fs.open(path, 'r+')
let closeErr = null
try {
await fs.futimes(fd, atime, mtime)
} finally {
try {
await fs.close(fd)
} catch (e) {
closeErr = e
}
}
if (closeErr) {
throw closeErr
}
}
function utimesMillisSync (path, atime, mtime) {
const fd = fs.openSync(path, 'r+')
fs.futimesSync(fd, atime, mtime)
return fs.closeSync(fd)
}
module.exports = {
utimesMillis: u(utimesMillis),
utimesMillisSync
}

71
node_modules/fs-extra/package.json generated vendored Normal file
View File

@ -0,0 +1,71 @@
{
"name": "fs-extra",
"version": "11.3.0",
"description": "fs-extra contains methods that aren't included in the vanilla Node.js fs package. Such as recursive mkdir, copy, and remove.",
"engines": {
"node": ">=14.14"
},
"homepage": "https://github.com/jprichardson/node-fs-extra",
"repository": {
"type": "git",
"url": "https://github.com/jprichardson/node-fs-extra"
},
"keywords": [
"fs",
"file",
"file system",
"copy",
"directory",
"extra",
"mkdirp",
"mkdir",
"mkdirs",
"recursive",
"json",
"read",
"write",
"extra",
"delete",
"remove",
"touch",
"create",
"text",
"output",
"move",
"promise"
],
"author": "JP Richardson <jprichardson@gmail.com>",
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"devDependencies": {
"klaw": "^2.1.1",
"klaw-sync": "^3.0.2",
"minimist": "^1.1.1",
"mocha": "^10.1.0",
"nyc": "^15.0.0",
"proxyquire": "^2.0.1",
"read-dir-files": "^0.1.1",
"standard": "^17.0.0"
},
"main": "./lib/index.js",
"exports": {
".": "./lib/index.js",
"./esm": "./lib/esm.mjs"
},
"files": [
"lib/",
"!lib/**/__tests__/"
],
"scripts": {
"lint": "standard",
"test-find": "find ./lib/**/__tests__ -name *.test.js | xargs mocha",
"test": "npm run lint && npm run unit && npm run unit-esm",
"unit": "nyc node test.js",
"unit-esm": "node test.mjs"
},
"sideEffects": false
}

15
node_modules/graceful-fs/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
The ISC License
Copyright (c) 2011-2022 Isaac Z. Schlueter, Ben Noordhuis, and Contributors
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More