Compare commits

..

No commits in common. "eb59cd5e9a490f03cfca99c648ea3a280b4ed7a3" and "694d221a30eea245adac92d3266ab00a6fa894e8" have entirely different histories.

39 changed files with 894 additions and 10265 deletions

View File

@ -1,80 +0,0 @@
# Git related files
.git
.gitignore
.gitattributes
# Python related
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# Virtual environments
venv/
env/
ENV/
.venv/
# IDEs and editors
.vscode/
.idea/
*.swp
*.swo
*~
# Operating systems
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Log files
logs/
*.log
# Temporary files
tmp/
temp/
.tmp/
# Docker related
Dockerfile*
.dockerignore
docker-compose*.yml
Makefile
# Documentation
*.md
docs/
# Configuration files (avoid including sensitive information)
config.yaml
*.yaml
!config.yaml.sample
config/
.env
.env.*
# Backup files
*.bak
*.backup

View File

@ -1,52 +0,0 @@
# ESXi MCP Server Configuration
# Copy this file to .env and fill in your values
# Docker Compose project name (prevents environment clashes)
COMPOSE_PROJECT=esxi-mcp
# ─────────────────────────────────────────────────────────────────────────────
# VMware vCenter/ESXi Connection (Required)
# ─────────────────────────────────────────────────────────────────────────────
VCENTER_HOST=your-vcenter-ip-or-hostname
VCENTER_USER=administrator@vsphere.local
VCENTER_PASSWORD=your-password
# ─────────────────────────────────────────────────────────────────────────────
# VMware Optional Settings
# ─────────────────────────────────────────────────────────────────────────────
# Datacenter name (auto-selects first if not specified)
# VCENTER_DATACENTER=your-datacenter-name
# Cluster name (auto-selects first if not specified)
# VCENTER_CLUSTER=your-cluster-name
# Datastore name (auto-selects largest if not specified)
# VCENTER_DATASTORE=your-datastore-name
# Default network for VMs
VCENTER_NETWORK=VM Network
# Skip SSL certificate verification (set to true for self-signed certs)
VCENTER_INSECURE=true
# ─────────────────────────────────────────────────────────────────────────────
# MCP Server Settings
# ─────────────────────────────────────────────────────────────────────────────
# API key for authentication (optional, but recommended for production)
# MCP_API_KEY=your-secret-api-key
# Transport type: stdio (for Claude Desktop) or sse (for web/Docker)
MCP_TRANSPORT=sse
# Server binding (only used with SSE transport)
MCP_HOST=0.0.0.0
MCP_PORT=8080
# ─────────────────────────────────────────────────────────────────────────────
# Logging Configuration
# ─────────────────────────────────────────────────────────────────────────────
# Log level: DEBUG, INFO, WARNING, ERROR
LOG_LEVEL=INFO
# Log file path (logs to console if not specified)
# LOG_FILE=/app/logs/esxi-mcp.log

14
.gitignore vendored
View File

@ -21,21 +21,10 @@ wheels/
*.egg *.egg
# Virtual Environment # Virtual Environment
.venv/
venv/ venv/
ENV/ ENV/
env/ env/
# uv
.uv/
# Testing & Linting
.pytest_cache/
.ruff_cache/
.mypy_cache/
.coverage
htmlcov/
# IDE # IDE
.idea/ .idea/
.vscode/ .vscode/
@ -46,10 +35,9 @@ htmlcov/
logs/ logs/
*.log *.log
# Config (secrets) # Config
config.yaml config.yaml
config.json config.json
.env
# OS # OS
.DS_Store .DS_Store

View File

@ -1,67 +0,0 @@
# mcvsphere - Modern Python with uv
# Multi-stage build for optimal image size
# Build stage
FROM ghcr.io/astral-sh/uv:python3.11-bookworm-slim AS builder
WORKDIR /app
# Enable bytecode compilation for production
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
# Install dependencies first (cached layer)
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
--mount=type=bind,source=uv.lock,target=uv.lock \
uv sync --frozen --no-install-project --no-dev
# Install project
COPY pyproject.toml uv.lock ./
COPY src ./src
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --no-dev --no-editable
# Production stage
FROM python:3.11-slim-bookworm AS production
# Create non-root user
RUN groupadd -r mcpuser && useradd -r -g mcpuser mcpuser
WORKDIR /app
# Install runtime dependencies only
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
# Copy virtual environment from builder
COPY --from=builder /app/.venv /app/.venv
# Create directories
RUN mkdir -p /app/logs /app/config \
&& chown -R mcpuser:mcpuser /app
# Environment configuration
ENV PATH="/app/.venv/bin:$PATH"
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
# Default to SSE transport for Docker
ENV MCP_TRANSPORT=sse
ENV MCP_HOST=0.0.0.0
ENV MCP_PORT=8080
# Switch to non-root user
USER mcpuser
EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8080')" || exit 1
# Run the MCP server
ENTRYPOINT ["mcvsphere"]
CMD ["--transport", "sse"]

View File

@ -1,33 +0,0 @@
# ESXi MCP Server - Development with hot-reload
FROM ghcr.io/astral-sh/uv:python3.11-bookworm-slim
WORKDIR /app
# Install watchfiles for hot-reload
ENV UV_COMPILE_BYTECODE=0
ENV UV_LINK_MODE=copy
# Install dependencies
COPY pyproject.toml uv.lock ./
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --no-install-project
# Copy source
COPY src ./src
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen
# Install watchfiles for development
RUN uv pip install watchfiles
# Create directories
RUN mkdir -p /app/logs /app/config
# Environment
ENV PATH="/app/.venv/bin:$PATH"
ENV PYTHONUNBUFFERED=1
EXPOSE 8080
# Run with watchfiles for hot-reload
CMD ["watchfiles", "--filter", "python", "esxi-mcp-server --transport sse", "src"]

132
Makefile
View File

@ -1,132 +0,0 @@
# ESXi MCP Server Makefile
# Modern Python with uv, Docker Compose for containerization
.PHONY: help install dev test lint format build run run-dev stop logs clean
# Default target
help: ## Show this help
@echo "ESXi MCP Server"
@echo "==============="
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
# ─────────────────────────────────────────────────────────────────────────────
# Development Commands
# ─────────────────────────────────────────────────────────────────────────────
install: ## Install dependencies with uv
uv sync
dev: ## Install with dev dependencies
uv sync --all-extras
test: ## Run tests
uv run pytest
lint: ## Run ruff linter
uv run ruff check src/
format: ## Format code with ruff
uv run ruff format src/
uv run ruff check --fix src/
typecheck: ## Run type checking
uv run mypy src/
# ─────────────────────────────────────────────────────────────────────────────
# Local Run Commands
# ─────────────────────────────────────────────────────────────────────────────
run-local: ## Run MCP server locally (stdio mode for Claude Desktop)
uv run esxi-mcp-server
run-local-sse: ## Run MCP server locally with SSE transport
uv run esxi-mcp-server --transport sse
# ─────────────────────────────────────────────────────────────────────────────
# Docker Commands
# ─────────────────────────────────────────────────────────────────────────────
setup: ## Initial setup - create .env from example
@if [ ! -f .env ]; then \
cp .env.example .env; \
echo "Created .env from .env.example"; \
echo "Please edit .env with your vCenter credentials"; \
else \
echo ".env already exists"; \
fi
@mkdir -p logs
build: ## Build Docker image
docker compose build esxi-mcp-server
build-dev: ## Build development Docker image
docker compose build esxi-mcp-server-dev
run: setup ## Run production container
docker compose --profile prod up -d
@echo "Waiting for container to start..."
@sleep 3
docker compose --profile prod logs --tail=20
run-dev: setup ## Run development container with hot-reload
docker compose --profile dev up -d
@echo "Waiting for container to start..."
@sleep 3
docker compose --profile dev logs --tail=20
stop: ## Stop all containers
docker compose --profile prod --profile dev down
restart: stop run ## Restart production container
logs: ## Show container logs
docker compose --profile prod --profile dev logs -f
logs-prod: ## Show production logs only
docker compose --profile prod logs -f
logs-dev: ## Show development logs only
docker compose --profile dev logs -f
shell: ## Open shell in running container
docker compose --profile prod exec esxi-mcp-server bash || \
docker compose --profile dev exec esxi-mcp-server-dev bash
status: ## Show container status
docker compose --profile prod --profile dev ps
health: ## Check container health
@docker compose --profile prod exec esxi-mcp-server \
python -c "import urllib.request; urllib.request.urlopen('http://localhost:8080')" \
&& echo "✅ Health check passed" || echo "❌ Health check failed"
# ─────────────────────────────────────────────────────────────────────────────
# Maintenance Commands
# ─────────────────────────────────────────────────────────────────────────────
clean: ## Remove containers and images
docker compose --profile prod --profile dev down -v --rmi local
rm -rf .venv __pycache__ .pytest_cache .ruff_cache
clean-all: clean ## Remove everything including uv cache
rm -rf .uv
lock: ## Update uv.lock file
uv lock
update: ## Update dependencies
uv lock --upgrade
uv sync
# ─────────────────────────────────────────────────────────────────────────────
# Release Commands
# ─────────────────────────────────────────────────────────────────────────────
build-release: ## Build release package
uv build
publish-test: ## Publish to TestPyPI
uv publish --repository testpypi
publish: ## Publish to PyPI
uv publish

489
README.md
View File

@ -1,345 +1,188 @@
# mcvsphere # ESXi MCP Server
A comprehensive VMware vSphere management server implementing the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/), enabling AI assistants like Claude to manage virtual infrastructure through natural language. A VMware ESXi/vCenter management server based on MCP (Machine Control Protocol), providing simple REST API interfaces for virtual machine management.
## Why mcvsphere?
Traditional VMware management requires navigating complex UIs or writing scripts. With mcvsphere, you can simply ask:
> "Create a new VM with 4 CPUs and 8GB RAM, then take a snapshot before installing the OS"
And watch it happen. The server exposes **94 tools** covering every aspect of vSphere management.
## Features ## Features
- **94 MCP Tools** - Complete vSphere management capabilities - Support for ESXi and vCenter Server connections
- **6 MCP Resources** - Real-time access to VMs, hosts, datastores, networks, and clusters - Real-time communication based on SSE (Server-Sent Events)
- **Modular Architecture** - 13 specialized mixins organized by function - RESTful API interface with JSON-RPC support
- **Full vCenter & ESXi Support** - Works with standalone hosts or vCenter Server - API key authentication
- **Guest Operations** - Execute commands, transfer files inside VMs via VMware Tools - Complete virtual machine lifecycle management
- **Serial Console Access** - Network serial ports for headless VMs and network appliances - Real-time performance monitoring
- **VM Screenshots** - Capture console screenshots for monitoring or documentation - SSL/TLS secure connection support
- Flexible configuration options (YAML/JSON/Environment Variables)
## Quick Start ## Core Functions
### Installation - Virtual Machine Management
- Create VM
```bash - Clone VM
# Install with uv (recommended) - Delete VM
uvx mcvsphere - Power On/Off operations
- List all VMs
# Or install with pip - Performance Monitoring
pip install mcvsphere - CPU usage
``` - Memory usage
- Storage usage
### Configuration - Network traffic statistics
Create a `.env` file or set environment variables:
```bash
VCENTER_HOST=vcenter.example.com
VCENTER_USER=administrator@vsphere.local
VCENTER_PASSWORD=your-password
VCENTER_INSECURE=true # Skip SSL verification (dev only)
```
### Run the Server
```bash
# Using uvx
uvx mcvsphere
# Or if installed
mcvsphere
```
### Add to Claude Code
```bash
claude mcp add esxi "uvx mcvsphere"
```
## Available Tools (94 Total)
### VM Lifecycle (6 tools)
| Tool | Description |
|------|-------------|
| `list_vms` | List all virtual machines |
| `get_vm_info` | Get detailed VM information |
| `create_vm` | Create a new virtual machine |
| `clone_vm` | Clone from template or existing VM |
| `delete_vm` | Delete a virtual machine |
| `reconfigure_vm` | Modify CPU, memory, annotation |
| `rename_vm` | Rename a virtual machine |
### Power Operations (6 tools)
| Tool | Description |
|------|-------------|
| `power_on_vm` | Power on a VM |
| `power_off_vm` | Power off a VM (hard) |
| `shutdown_guest` | Graceful guest OS shutdown |
| `reboot_guest` | Graceful guest OS reboot |
| `suspend_vm` | Suspend a VM |
| `reset_vm` | Hard reset a VM |
### Snapshots (5 tools)
| Tool | Description |
|------|-------------|
| `list_snapshots` | List all snapshots |
| `create_snapshot` | Create a new snapshot |
| `revert_to_snapshot` | Revert to a snapshot |
| `delete_snapshot` | Delete a snapshot |
| `delete_all_snapshots` | Remove all snapshots |
### Guest Operations (7 tools)
*Requires VMware Tools running in the guest*
| Tool | Description |
|------|-------------|
| `list_guest_processes` | List processes in guest OS |
| `run_command_in_guest` | Execute command in guest |
| `read_guest_file` | Read file from guest OS |
| `write_guest_file` | Write file to guest OS |
| `list_guest_directory` | List directory contents |
| `create_guest_directory` | Create directory in guest |
| `delete_guest_file` | Delete file or directory |
### Console & Monitoring (5 tools)
| Tool | Description |
|------|-------------|
| `vm_screenshot` | Capture VM console screenshot |
| `wait_for_vm_tools` | Wait for VMware Tools to be ready |
| `get_vm_tools_status` | Get VMware Tools status |
| `get_vm_stats` | Get VM performance statistics |
| `get_host_stats` | Get host performance statistics |
### Serial Port Management (5 tools)
*For network appliances and headless VMs*
| Tool | Description |
|------|-------------|
| `get_serial_port` | Get serial port configuration |
| `setup_serial_port` | Configure network serial port |
| `connect_serial_port` | Connect/disconnect serial port |
| `clear_serial_port` | Reset serial port connection |
| `remove_serial_port` | Remove serial port from VM |
### Disk Management (5 tools)
| Tool | Description |
|------|-------------|
| `list_disks` | List VM disks |
| `add_disk` | Add a new disk |
| `remove_disk` | Remove a disk |
| `resize_disk` | Expand disk size |
| `get_disk_info` | Get disk details |
### NIC Management (6 tools)
| Tool | Description |
|------|-------------|
| `list_nics` | List VM network adapters |
| `add_nic` | Add a network adapter |
| `remove_nic` | Remove a network adapter |
| `connect_nic` | Connect/disconnect NIC |
| `change_nic_network` | Change NIC network |
| `get_nic_info` | Get NIC details |
### OVF/OVA Management (5 tools)
| Tool | Description |
|------|-------------|
| `deploy_ovf` | Deploy VM from OVF template |
| `export_ovf` | Export VM to OVF |
| `list_ovf_networks` | List OVF network mappings |
| `upload_to_datastore` | Upload file to datastore |
| `download_from_datastore` | Download file from datastore |
### Host Management (10 tools)
| Tool | Description |
|------|-------------|
| `list_hosts` | List ESXi hosts |
| `get_host_info` | Get host details |
| `get_host_hardware` | Get hardware information |
| `get_host_networking` | Get network configuration |
| `list_services` | List host services |
| `get_service_status` | Get service status |
| `start_service` | Start a host service |
| `stop_service` | Stop a host service |
| `restart_service` | Restart a host service |
| `get_ntp_config` | Get NTP configuration |
### Datastore & Resources (8 tools)
| Tool | Description |
|------|-------------|
| `get_datastore_info` | Get datastore details |
| `browse_datastore` | Browse datastore files |
| `get_vcenter_info` | Get vCenter information |
| `get_resource_pool_info` | Get resource pool details |
| `get_network_info` | Get network details |
| `list_templates` | List VM templates |
| `get_alarms` | Get active alarms |
| `get_recent_events` | Get recent events |
### vCenter Operations (18 tools)
*Available when connected to vCenter Server*
| Tool | Description |
|------|-------------|
| `list_folders` | List VM folders |
| `create_folder` | Create a folder |
| `delete_folder` | Delete a folder |
| `move_vm_to_folder` | Move VM to folder |
| `list_clusters` | List clusters |
| `get_cluster_info` | Get cluster details |
| `list_resource_pools` | List resource pools |
| `create_resource_pool` | Create resource pool |
| `delete_resource_pool` | Delete resource pool |
| `move_vm_to_resource_pool` | Move VM to resource pool |
| `list_tags` | List tags |
| `get_vm_tags` | Get tags on a VM |
| `apply_tag_to_vm` | Apply tag to VM |
| `remove_tag_from_vm` | Remove tag from VM |
| `migrate_vm` | Migrate VM to another host |
| `list_recent_tasks` | List recent tasks |
| `list_recent_events` | List recent events |
| `cancel_task` | Cancel a running task |
## MCP Resources
Access real-time data through MCP resources:
| Resource URI | Description |
|--------------|-------------|
| `esxi://vms` | All virtual machines |
| `esxi://hosts` | All ESXi hosts |
| `esxi://datastores` | All datastores |
| `esxi://networks` | All networks |
| `esxi://clusters` | All clusters |
| `esxi://resource-pools` | All resource pools |
## Architecture
The server uses a modular mixin architecture:
```
mcvsphere/
├── server.py # FastMCP server setup
├── connection.py # VMware connection management
├── config.py # Settings and configuration
└── mixins/
├── vm_lifecycle.py # VM CRUD operations
├── power_ops.py # Power management
├── snapshots.py # Snapshot management
├── guest_ops.py # Guest OS operations
├── console.py # Screenshots & Tools monitoring
├── serial_port.py # Serial console access
├── disk_management.py # Disk operations
├── nic_management.py # Network adapter operations
├── ovf_management.py # OVF/OVA handling
├── host_management.py # Host operations
├── monitoring.py # Performance monitoring
├── resources.py # MCP resources
└── vcenter_ops.py # vCenter-specific operations
```
## Configuration Options
| Variable | Description | Default |
|----------|-------------|---------|
| `VCENTER_HOST` | vCenter/ESXi hostname or IP | *required* |
| `VCENTER_USER` | Username | *required* |
| `VCENTER_PASSWORD` | Password | *required* |
| `VCENTER_INSECURE` | Skip SSL verification | `false` |
| `VCENTER_DATACENTER` | Target datacenter | *auto-detect* |
| `VCENTER_CLUSTER` | Target cluster | *auto-detect* |
| `VCENTER_DATASTORE` | Default datastore | *auto-detect* |
| `VCENTER_NETWORK` | Default network | *auto-detect* |
| `MCP_TRANSPORT` | Transport mode (`stdio` or `sse`) | `stdio` |
| `LOG_LEVEL` | Logging level | `INFO` |
## Docker Support
```bash
# Build
docker build -t mcvsphere .
# Run
docker run -d \
-e VCENTER_HOST=vcenter.example.com \
-e VCENTER_USER=admin@vsphere.local \
-e VCENTER_PASSWORD=secret \
mcvsphere
```
## Examples
### Create a VM and Install an OS
```
User: Create a new VM called "web-server" with 4 CPUs, 8GB RAM, and a 100GB disk
Claude: I'll create that VM for you.
[Calls create_vm with name="web-server", cpu=4, memory_mb=8192, disk_gb=100]
VM 'web-server' created successfully.
User: Power it on and take a screenshot
Claude: [Calls power_on_vm, then vm_screenshot]
The VM is now running. Here's the console screenshot showing the BIOS boot screen.
```
### Guest Operations
```
User: Run "uname -a" on the linux-server VM
Claude: [Calls run_command_in_guest with command="/usr/bin/uname", arguments="-a"]
The command returned:
Linux linux-server 5.15.0-generic #1 SMP x86_64 GNU/Linux
```
### Serial Console for Network Appliances
```
User: Set up a serial console on my Cisco router VM
Claude: [Calls setup_serial_port with name="cisco-router", protocol="telnet"]
Serial port configured. You can connect via:
telnet://10.20.0.22:4521
```
## Requirements ## Requirements
- Python 3.11+ - Python 3.7+
- VMware vSphere 7.0+ (ESXi or vCenter) - pyVmomi
- VMware Tools (for guest operations) - PyYAML
- uvicorn
- mcp-core (Machine Control Protocol core library)
## Development ## Quick Start
1. Install dependencies:
```bash ```bash
# Clone the repo pip install pyvmomi pyyaml uvicorn mcp-core
git clone https://github.com/yourusername/mcvsphere.git
cd mcvsphere
# Install dependencies
uv sync
# Run tests
uv run python test_client.py
``` ```
2. Create configuration file `config.yaml`:
```yaml
vcenter_host: "your-vcenter-ip"
vcenter_user: "administrator@vsphere.local"
vcenter_password: "your-password"
datacenter: "your-datacenter" # Optional
cluster: "your-cluster" # Optional
datastore: "your-datastore" # Optional
network: "VM Network" # Optional
insecure: true # Skip SSL certificate verification
api_key: "your-api-key" # API access key
log_file: "./logs/vmware_mcp.log" # Log file path
log_level: "INFO" # Log level
```
3. Run the server:
```bash
python server.py -c config.yaml
```
## API Interface
### Authentication
All privileged operations require authentication first:
```http
POST /sse/messages
Authorization: Bearer your-api-key
```
### Main Tool Interfaces
1. Create VM
```json
{
"name": "vm-name",
"cpu": 2,
"memory": 4096,
"datastore": "datastore-name",
"network": "network-name"
}
```
2. Clone VM
```json
{
"template_name": "source-vm",
"new_name": "new-vm-name"
}
```
3. Delete VM
```json
{
"name": "vm-name"
}
```
4. Power Operations
```json
{
"name": "vm-name"
}
```
### Resource Monitoring Interface
Get VM performance data:
```http
GET vmstats://{vm_name}
```
## Configuration
| Parameter | Description | Required | Default |
|-----------|-------------|----------|---------|
| vcenter_host | vCenter/ESXi server address | Yes | - |
| vcenter_user | Login username | Yes | - |
| vcenter_password | Login password | Yes | - |
| datacenter | Datacenter name | No | Auto-select first |
| cluster | Cluster name | No | Auto-select first |
| datastore | Storage name | No | Auto-select largest available |
| network | Network name | No | VM Network |
| insecure | Skip SSL verification | No | false |
| api_key | API access key | No | - |
| log_file | Log file path | No | Console output |
| log_level | Log level | No | INFO |
## Environment Variables
All configuration items support environment variable settings, following these naming rules:
- VCENTER_HOST
- VCENTER_USER
- VCENTER_PASSWORD
- VCENTER_DATACENTER
- VCENTER_CLUSTER
- VCENTER_DATASTORE
- VCENTER_NETWORK
- VCENTER_INSECURE
- MCP_API_KEY
- MCP_LOG_FILE
- MCP_LOG_LEVEL
## Security Recommendations
1. Production Environment:
- Use valid SSL certificates
- Enable API key authentication
- Set appropriate log levels
- Restrict API access scope
2. Testing Environment:
- Set insecure: true to skip SSL verification
- Use more detailed log level (DEBUG)
## License ## License
MIT License - See [LICENSE](LICENSE) for details. MIT License
## Contributing ## Contributing
Contributions welcome! Please read the contributing guidelines and submit a PR. Issues and Pull Requests are welcome!
## Changelog
### v0.0.1
- Initial release
- Basic VM management functionality
- SSE communication support
- API key authentication
- Performance monitoring
## Author
Bright8192
## Acknowledgments ## Acknowledgments
- Built with [FastMCP](https://github.com/jlowin/fastmcp) - VMware pyvmomi team
- Uses [pyVmomi](https://github.com/vmware/pyvmomi) for vSphere API - MCP Protocol development team
- Inspired by the Model Context Protocol specification

View File

@ -1,286 +0,0 @@
# mcvsphere - Docker Guide
This guide provides instructions for running the mcvsphere using Docker and Docker Compose.
## Quick Start
### Prerequisites
- Docker 20.10+
- Docker Compose 2.0+
- Access to a VMware vCenter Server or ESXi host
### 1. Setup
```bash
# Clone the repository
git clone <repository-url>
cd mcvsphere
# Create necessary directories and configuration
make setup
# Create environment variables file (optional)
make env-example
cp .env.example .env
```
### 2. Configuration
You have two options for configuration:
#### Option A: Configuration File (Recommended)
Edit `config/config.yaml`:
```yaml
vcenter_host: "your-vcenter-ip"
vcenter_user: "administrator@vsphere.local"
vcenter_password: "your-password"
datacenter: "your-datacenter"
cluster: "your-cluster"
datastore: "your-datastore"
network: "VM Network"
insecure: true
api_key: "your-api-key"
log_level: "INFO"
```
#### Option B: Environment Variables
Edit `.env` file:
```bash
VCENTER_HOST=your-vcenter-ip
VCENTER_USER=administrator@vsphere.local
VCENTER_PASSWORD=your-password
VCENTER_DATACENTER=your-datacenter
VCENTER_CLUSTER=your-cluster
VCENTER_DATASTORE=your-datastore
VCENTER_NETWORK=VM Network
VCENTER_INSECURE=true
MCP_API_KEY=your-api-key
MCP_LOG_LEVEL=INFO
```
### 3. Run the Server
```bash
# Build and run
make dev
# Or run in background
make run
# Check status
make status
# View logs
make logs
```
## Available Commands
Use `make help` to see all available commands:
```bash
make help
```
### Build Commands
- `make build` - Build Docker image
- `make build-no-cache` - Build without cache
### Run Commands
- `make run` - Run in background
- `make run-logs` - Run with logs
- `make stop` - Stop containers
- `make restart` - Restart containers
### Development Commands
- `make dev` - Development mode (build + run with logs)
- `make logs` - Show logs
- `make shell` - Open bash shell in container
- `make status` - Show container status
- `make health` - Check container health
### Maintenance Commands
- `make clean` - Remove containers and volumes
- `make clean-all` - Remove everything
- `make update` - Rebuild and restart
## Docker Architecture
### Multi-stage Build
The Dockerfile uses a multi-stage build process:
1. **Builder Stage**: Installs build dependencies and Python packages
2. **Production Stage**: Creates a minimal runtime image
### Security Features
- Runs as non-root user (`mcpuser`)
- Minimal base image (python:3.11-slim)
- Only necessary runtime dependencies
- Configurable resource limits
### Directory Structure
```
/app/
├── server.py # Main application
├── config.yaml.sample # Configuration template
├── docker-entrypoint.sh # Startup script
├── config/ # Configuration directory (mounted)
│ └── config.yaml # Runtime configuration
└── logs/ # Log directory (mounted)
└── vmware_mcp.log # Application logs
```
## Configuration Options
### Volume Mounts
- `./config.yaml:/app/config/config.yaml:ro` - Configuration file (read-only)
- `./logs:/app/logs` - Log directory
### Environment Variables
All configuration options can be set via environment variables:
| Variable | Description | Default |
|----------|-------------|---------|
| `VCENTER_HOST` | vCenter/ESXi hostname | Required |
| `VCENTER_USER` | Username | Required |
| `VCENTER_PASSWORD` | Password | Required |
| `VCENTER_DATACENTER` | Datacenter name | Auto-detect |
| `VCENTER_CLUSTER` | Cluster name | Auto-detect |
| `VCENTER_DATASTORE` | Datastore name | Auto-detect |
| `VCENTER_NETWORK` | Network name | VM Network |
| `VCENTER_INSECURE` | Skip SSL verification | true |
| `MCP_API_KEY` | API authentication key | None |
| `MCP_LOG_LEVEL` | Log level | INFO |
### Resource Limits
Default resource limits in docker-compose.yml:
- **Memory**: 512MB limit, 256MB reserved
- **CPU**: 0.5 cores limit, 0.25 cores reserved
## Health Checks
The container includes automatic health checks:
- **Interval**: 30 seconds
- **Timeout**: 10 seconds
- **Retries**: 3
- **Start Period**: 40 seconds
Check health manually:
```bash
make health
```
## Networking
The server exposes:
- **Port 8080**: HTTP API endpoint
- **Path `/sse`**: Server-Sent Events endpoint
- **Path `/sse/messages`**: JSON-RPC messages endpoint
## Troubleshooting
### Check Logs
```bash
make logs
```
### Check Container Status
```bash
make status
```
### Open Shell in Container
```bash
make shell
```
### Common Issues
1. **Configuration not found**: Ensure `config/config.yaml` exists or environment variables are set
2. **Permission denied**: Check that the `logs` directory is writable
3. **Connection failed**: Verify vCenter/ESXi connectivity and credentials
4. **Health check failed**: Check if the server is responding on port 8080
### Debug Mode
Run with debug logging:
```bash
# Set in .env file
MCP_LOG_LEVEL=DEBUG
# Or in config.yaml
log_level: "DEBUG"
```
## Production Deployment
### Security Recommendations
1. Use a dedicated user account for vCenter access
2. Enable API key authentication
3. Use valid SSL certificates (set `insecure: false`)
4. Limit container resources
5. Use Docker secrets for sensitive data
### High Availability
For production deployments, consider:
- Running multiple container instances
- Using a load balancer
- Implementing persistent storage for logs
- Setting up monitoring and alerting
## Examples
### Basic Usage
```bash
# Start the server
make run
# Check if it's working
curl http://localhost:8080/sse
```
### API Authentication
```bash
# With API key
curl -H "Authorization: Bearer your-api-key" http://localhost:8080/sse
```
### Development
```bash
# Development workflow
make build
make dev
# Make changes to code
# Rebuild and restart
make update
```

View File

@ -1,6 +1,6 @@
# ESXi MCP Server # ESXi MCP Server
一个基于 MCP (Model Control Protocol) 的 VMware ESXi/vCenter 管理服务器,提供简单的 REST API 接口来管理虚拟机。 一个基于 MCP (Machine Control Protocol) 的 VMware ESXi/vCenter 管理服务器,提供简单的 REST API 接口来管理虚拟机。
## 功能特性 ## 功能特性

View File

@ -1,63 +0,0 @@
# ESXi MCP Server Docker Compose Configuration
# Supports dev (hot-reload) and prod modes via COMPOSE_PROFILES
services:
mcvsphere:
build:
context: .
dockerfile: Dockerfile
container_name: mcvsphere
restart: unless-stopped
profiles: ["prod"]
ports:
- "${MCP_PORT:-8080}:8080"
volumes:
- ./logs:/app/logs
env_file:
- .env
environment:
- MCP_TRANSPORT=sse
- MCP_HOST=0.0.0.0
- MCP_PORT=8080
networks:
- mcp-network
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 15s
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 256M
cpus: '0.25'
# Development mode with hot-reload
mcvsphere-dev:
build:
context: .
dockerfile: Dockerfile.dev
container_name: mcvsphere-dev
profiles: ["dev"]
ports:
- "${MCP_PORT:-8080}:8080"
volumes:
- ./src:/app/src:ro
- ./logs:/app/logs
env_file:
- .env
environment:
- MCP_TRANSPORT=sse
- MCP_HOST=0.0.0.0
- MCP_PORT=8080
- LOG_LEVEL=DEBUG
networks:
- mcp-network
networks:
mcp-network:
driver: bridge

View File

@ -1,79 +0,0 @@
[project]
name = "mcvsphere"
version = "0.2.0"
description = "Model Control for vSphere - AI-driven VMware virtual machine management via MCP"
readme = "README.md"
license = "MIT"
requires-python = ">=3.11"
authors = [
{name = "Ryan Malloy", email = "ryan@supported.systems"},
]
keywords = ["mcp", "vmware", "esxi", "vcenter", "vsphere", "fastmcp", "virtualization", "model-context-protocol"]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: System :: Systems Administration",
]
dependencies = [
"fastmcp>=2.14.1",
"pyvmomi>=8.0",
"pyyaml>=6.0",
"pydantic>=2.0",
"pydantic-settings>=2.0",
]
[project.optional-dependencies]
dev = [
"pytest>=8.0",
"pytest-asyncio>=0.24",
"ruff>=0.8",
]
[project.scripts]
mcvsphere = "mcvsphere:main"
[project.urls]
Homepage = "https://git.supported.systems/MCP/mcvsphere"
Repository = "https://git.supported.systems/MCP/mcvsphere"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["src/mcvsphere"]
[tool.ruff]
target-version = "py311"
line-length = 100
src = ["src"]
[tool.ruff.lint]
select = [
"E", # pycodestyle errors
"W", # pycodestyle warnings
"F", # Pyflakes
"I", # isort
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"UP", # pyupgrade
"ARG", # flake8-unused-arguments
"SIM", # flake8-simplify
]
ignore = [
"E501", # line too long (handled by formatter)
"B008", # do not perform function calls in argument defaults
]
[tool.ruff.lint.isort]
known-first-party = ["mcvsphere"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]

5
requirements.txt Normal file
View File

@ -0,0 +1,5 @@
pyvmomi>=7.0
pyyaml>=6.0
uvicorn>=0.15.0
mcp
pytest>=7.0.0

View File

@ -1,4 +0,0 @@
#!/bin/bash
# ESXi MCP Server wrapper for Claude Code
cd "$(dirname "$0")"
exec uv run esxi-mcp-server "$@"

720
server.py Normal file
View File

@ -0,0 +1,720 @@
import os
import json
import logging
import ssl
import argparse
from dataclasses import dataclass
from typing import Optional, Dict, Any
# MCP protocol related imports
from mcp.server.lowlevel import Server # MCP server base class
from mcp.server.sse import SseServerTransport # SSE transport support
from mcp import types # MCP type definitions
# pyVmomi VMware API imports
from pyVim import connect
from pyVmomi import vim, vmodl
# Configuration data class for storing configuration options
@dataclass
class Config:
vcenter_host: str
vcenter_user: str
vcenter_password: str
datacenter: Optional[str] = None # Datacenter name (optional)
cluster: Optional[str] = None # Cluster name (optional)
datastore: Optional[str] = None # Datastore name (optional)
network: Optional[str] = None # Virtual network name (optional)
insecure: bool = False # Whether to skip SSL certificate verification (default: False)
api_key: Optional[str] = None # API access key for authentication
log_file: Optional[str] = None # Log file path (if not specified, output to console)
log_level: str = "INFO" # Log level
# VMware management class, encapsulating pyVmomi operations for vSphere
class VMwareManager:
def __init__(self, config: Config):
self.config = config
self.si = None # Service instance (ServiceInstance)
self.content = None # vSphere content root
self.datacenter_obj = None
self.resource_pool = None
self.datastore_obj = None
self.network_obj = None
self.authenticated = False # Authentication flag for API key verification
self._connect_vcenter()
def _connect_vcenter(self):
"""Connect to vCenter/ESXi and retrieve main resource object references."""
try:
if self.config.insecure:
# Connection method without SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False # Disable hostname checking
context.verify_mode = ssl.CERT_NONE
self.si = connect.SmartConnect(
host=self.config.vcenter_host,
user=self.config.vcenter_user,
pwd=self.config.vcenter_password,
sslContext=context)
else:
# Standard SSL verification connection
self.si = connect.SmartConnect(
host=self.config.vcenter_host,
user=self.config.vcenter_user,
pwd=self.config.vcenter_password)
except Exception as e:
logging.error(f"Failed to connect to vCenter/ESXi: {e}")
raise
# Retrieve content root object
self.content = self.si.RetrieveContent()
logging.info("Successfully connected to VMware vCenter/ESXi API")
# Retrieve target datacenter object
if self.config.datacenter:
# Find specified datacenter by name
self.datacenter_obj = next((dc for dc in self.content.rootFolder.childEntity
if isinstance(dc, vim.Datacenter) and dc.name == self.config.datacenter), None)
if not self.datacenter_obj:
logging.error(f"Datacenter named {self.config.datacenter} not found")
raise Exception(f"Datacenter {self.config.datacenter} not found")
else:
# Default to the first available datacenter
self.datacenter_obj = next((dc for dc in self.content.rootFolder.childEntity
if isinstance(dc, vim.Datacenter)), None)
if not self.datacenter_obj:
raise Exception("No datacenter object found")
# Retrieve resource pool (if a cluster is configured, use the cluster's resource pool; otherwise, use the host resource pool)
compute_resource = None
if self.config.cluster:
# Find specified cluster
for folder in self.datacenter_obj.hostFolder.childEntity:
if isinstance(folder, vim.ClusterComputeResource) and folder.name == self.config.cluster:
compute_resource = folder
break
if not compute_resource:
logging.error(f"Cluster named {self.config.cluster} not found")
raise Exception(f"Cluster {self.config.cluster} not found")
else:
# Default to the first ComputeResource (cluster or standalone host)
compute_resource = next((cr for cr in self.datacenter_obj.hostFolder.childEntity
if isinstance(cr, vim.ComputeResource)), None)
if not compute_resource:
raise Exception("No compute resource (cluster or host) found")
self.resource_pool = compute_resource.resourcePool
logging.info(f"Using resource pool: {self.resource_pool.name}")
# Retrieve datastore object
if self.config.datastore:
# Find specified datastore in the datacenter
self.datastore_obj = next((ds for ds in self.datacenter_obj.datastoreFolder.childEntity
if isinstance(ds, vim.Datastore) and ds.name == self.config.datastore), None)
if not self.datastore_obj:
logging.error(f"Datastore named {self.config.datastore} not found")
raise Exception(f"Datastore {self.config.datastore} not found")
else:
# Default to the datastore with the largest available capacity
datastores = [ds for ds in self.datacenter_obj.datastoreFolder.childEntity if isinstance(ds, vim.Datastore)]
if not datastores:
raise Exception("No available datastore found in the datacenter")
# Select the one with the maximum free space
self.datastore_obj = max(datastores, key=lambda ds: ds.summary.freeSpace)
logging.info(f"Using datastore: {self.datastore_obj.name}")
# Retrieve network object (network or distributed virtual portgroup)
if self.config.network:
# Find specified network in the datacenter network list
networks = self.datacenter_obj.networkFolder.childEntity
self.network_obj = next((net for net in networks if net.name == self.config.network), None)
if not self.network_obj:
logging.error(f"Network {self.config.network} not found")
raise Exception(f"Network {self.config.network} not found")
logging.info(f"Using network: {self.network_obj.name}")
else:
self.network_obj = None # If no network is specified, VM creation can choose to not connect to a network
def list_vms(self) -> list:
"""List all virtual machine names."""
vm_list = []
# Create a view to iterate over all virtual machines
container = self.content.viewManager.CreateContainerView(self.content.rootFolder, [vim.VirtualMachine], True)
for vm in container.view:
vm_list.append(vm.name)
container.Destroy()
return vm_list
def find_vm(self, name: str) -> Optional[vim.VirtualMachine]:
"""Find virtual machine object by name."""
container = self.content.viewManager.CreateContainerView(self.content.rootFolder, [vim.VirtualMachine], True)
vm_obj = None
for vm in container.view:
if vm.name == name:
vm_obj = vm
break
container.Destroy()
return vm_obj
def get_vm_performance(self, vm_name: str) -> Dict[str, Any]:
"""Retrieve performance data (CPU, memory, storage, and network) for the specified virtual machine."""
vm = self.find_vm(vm_name)
if not vm:
raise Exception(f"VM {vm_name} not found")
# CPU and memory usage (obtained from quickStats)
stats = {}
qs = vm.summary.quickStats
stats["cpu_usage"] = qs.overallCpuUsage # MHz
stats["memory_usage"] = qs.guestMemoryUsage # MB
# Storage usage (committed storage, in GB)
committed = vm.summary.storage.committed if vm.summary.storage else 0
stats["storage_usage"] = round(committed / (1024**3), 2) # Convert to GB
# Network usage (obtained from host or VM NIC statistics, latest sample)
# Here we simply obtain the latest performance counter for VM network I/O
net_bytes_transmitted = 0
net_bytes_received = 0
try:
pm = self.content.perfManager
# Define performance counter IDs to query: network transmitted and received bytes
counter_ids = []
for c in pm.perfCounter:
counter_full_name = f"{c.groupInfo.key}.{c.nameInfo.key}.{c.rollupType}"
if counter_full_name in ("net.transmitted.average", "net.received.average"):
counter_ids.append(c.key)
if counter_ids:
query = vim.PerformanceManager.QuerySpec(maxSample=1, entity=vm, metricId=[vim.PerformanceManager.MetricId(counterId=cid, instance="*") for cid in counter_ids])
stats_res = pm.QueryStats(querySpec=[query])
for series in stats_res[0].value:
# Sum data from each network interface
if series.id.counterId == counter_ids[0]:
net_bytes_transmitted = sum(series.value)
elif series.id.counterId == counter_ids[1]:
net_bytes_received = sum(series.value)
stats["network_transmit_KBps"] = net_bytes_transmitted
stats["network_receive_KBps"] = net_bytes_received
except Exception as e:
# If obtaining performance counters fails, log the error but do not terminate
logging.warning(f"Failed to retrieve network performance data: {e}")
stats["network_transmit_KBps"] = None
stats["network_receive_KBps"] = None
return stats
def create_vm(self, name: str, cpus: int, memory_mb: int, datastore: Optional[str] = None, network: Optional[str] = None) -> str:
"""Create a new virtual machine (from scratch, with an empty disk and optional network)."""
# If a specific datastore or network is provided, update the corresponding object accordingly
datastore_obj = self.datastore_obj
network_obj = self.network_obj
if datastore:
datastore_obj = next((ds for ds in self.datacenter_obj.datastoreFolder.childEntity
if isinstance(ds, vim.Datastore) and ds.name == datastore), None)
if not datastore_obj:
raise Exception(f"Specified datastore {datastore} not found")
if network:
networks = self.datacenter_obj.networkFolder.childEntity
network_obj = next((net for net in networks if net.name == network), None)
if not network_obj:
raise Exception(f"Specified network {network} not found")
# Build VM configuration specification
vm_spec = vim.vm.ConfigSpec(name=name, memoryMB=memory_mb, numCPUs=cpus, guestId="otherGuest") # guestId can be adjusted as needed
device_specs = []
# Add SCSI controller
controller_spec = vim.vm.device.VirtualDeviceSpec()
controller_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
controller_spec.device = vim.vm.device.ParaVirtualSCSIController() # Using ParaVirtual SCSI controller
controller_spec.device.deviceInfo = vim.Description(label="SCSI Controller", summary="ParaVirtual SCSI Controller")
controller_spec.device.busNumber = 0
controller_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.noSharing
# Set a temporary negative key for the controller for later reference
controller_spec.device.key = -101
device_specs.append(controller_spec)
# Add virtual disk
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.capacityInKB = 1024 * 1024 * 10 # Create a 10GB disk
disk_spec.device.deviceInfo = vim.Description(label="Hard Disk 1", summary="10 GB disk")
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.diskMode = "persistent"
disk_spec.device.backing.thinProvisioned = True # Thin provisioning
disk_spec.device.backing.datastore = datastore_obj
# Attach the disk to the previously created controller
disk_spec.device.controllerKey = controller_spec.device.key
disk_spec.device.unitNumber = 0
device_specs.append(disk_spec)
# If a network is provided, add a virtual network adapter
if network_obj:
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualVmxnet3() # Using VMXNET3 network adapter
nic_spec.device.deviceInfo = vim.Description(label="Network Adapter 1", summary=network_obj.name)
if isinstance(network_obj, vim.Network):
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(network=network_obj, deviceName=network_obj.name)
elif isinstance(network_obj, vim.dvs.DistributedVirtualPortgroup):
# Distributed virtual switch portgroup
dvs_uuid = network_obj.config.distributedVirtualSwitch.uuid
port_key = network_obj.key
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo(
port=vim.dvs.PortConnection(portgroupKey=port_key, switchUuid=dvs_uuid)
)
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo(startConnected=True, allowGuestControl=True)
device_specs.append(nic_spec)
vm_spec.deviceChange = device_specs
# Get the folder in which to place the VM (default is the datacenter's vmFolder)
vm_folder = self.datacenter_obj.vmFolder
# Create the VM in the specified resource pool
try:
task = vm_folder.CreateVM_Task(config=vm_spec, pool=self.resource_pool)
# Wait for the task to complete
while task.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
continue
if task.info.state == vim.TaskInfo.State.error:
raise task.info.error
except Exception as e:
logging.error(f"Failed to create virtual machine: {e}")
raise
logging.info(f"Virtual machine created: {name}")
return f"VM '{name}' created."
def clone_vm(self, template_name: str, new_name: str) -> str:
"""Clone a new virtual machine from an existing template or VM."""
template_vm = self.find_vm(template_name)
if not template_vm:
raise Exception(f"Template virtual machine {template_name} not found")
vm_folder = template_vm.parent # Place the new VM in the same folder as the template
if not isinstance(vm_folder, vim.Folder):
vm_folder = self.datacenter_obj.vmFolder
# Use the resource pool of the host/cluster where the template is located
resource_pool = template_vm.resourcePool or self.resource_pool
relocate_spec = vim.vm.RelocateSpec(pool=resource_pool, datastore=self.datastore_obj)
clone_spec = vim.vm.CloneSpec(powerOn=False, template=False, location=relocate_spec)
try:
task = template_vm.Clone(folder=vm_folder, name=new_name, spec=clone_spec)
while task.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
continue
if task.info.state == vim.TaskInfo.State.error:
raise task.info.error
except Exception as e:
logging.error(f"Failed to clone virtual machine: {e}")
raise
logging.info(f"Cloned virtual machine {template_name} to new VM: {new_name}")
return f"VM '{new_name}' cloned from '{template_name}'."
def delete_vm(self, name: str) -> str:
"""Delete the specified virtual machine."""
vm = self.find_vm(name)
if not vm:
raise Exception(f"Virtual machine {name} not found")
try:
task = vm.Destroy_Task()
while task.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
continue
if task.info.state == vim.TaskInfo.State.error:
raise task.info.error
except Exception as e:
logging.error(f"Failed to delete virtual machine: {e}")
raise
logging.info(f"Virtual machine deleted: {name}")
return f"VM '{name}' deleted."
def power_on_vm(self, name: str) -> str:
"""Power on the specified virtual machine."""
vm = self.find_vm(name)
if not vm:
raise Exception(f"Virtual machine {name} not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOn:
return f"VM '{name}' is already powered on."
task = vm.PowerOnVM_Task()
while task.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
continue
if task.info.state == vim.TaskInfo.State.error:
raise task.info.error
logging.info(f"Virtual machine powered on: {name}")
return f"VM '{name}' powered on."
def power_off_vm(self, name: str) -> str:
"""Power off the specified virtual machine."""
vm = self.find_vm(name)
if not vm:
raise Exception(f"Virtual machine {name} not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOff:
return f"VM '{name}' is already powered off."
task = vm.PowerOffVM_Task()
while task.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
continue
if task.info.state == vim.TaskInfo.State.error:
raise task.info.error
logging.info(f"Virtual machine powered off: {name}")
return f"VM '{name}' powered off."
# ---------------- MCP Server Definition ----------------
# Initialize MCP Server object
mcp_server = Server(name="VMware-MCP-Server", version="0.0.1")
# Define supported tools (executable operations) and resources (data interfaces)
# The implementation of tools and resources will call methods in VMwareManager
# Note: For each operation, perform API key authentication check, and only execute sensitive operations if the authenticated flag is True
# If not authenticated, an exception is raised
# Tool 1: Authentication (via API Key)
def tool_authenticate(key: str) -> str:
"""Validate the API key and enable subsequent operations upon success."""
if config.api_key and key == config.api_key:
manager.authenticated = True
logging.info("API key verification successful, client is authorized")
return "Authentication successful."
else:
logging.warning("API key verification failed")
raise Exception("Authentication failed: invalid API key.")
# Tool 2: Create virtual machine
def tool_create_vm(name: str, cpu: int, memory: int, datastore: str = None, network: str = None) -> str:
"""Create a new virtual machine."""
_check_auth() # Check access permissions
return manager.create_vm(name, cpu, memory, datastore, network)
# Tool 3: Clone virtual machine
def tool_clone_vm(template_name: str, new_name: str) -> str:
"""Clone a virtual machine from a template."""
_check_auth()
return manager.clone_vm(template_name, new_name)
# Tool 4: Delete virtual machine
def tool_delete_vm(name: str) -> str:
"""Delete the specified virtual machine."""
_check_auth()
return manager.delete_vm(name)
# Tool 5: Power on virtual machine
def tool_power_on(name: str) -> str:
"""Power on the specified virtual machine."""
_check_auth()
return manager.power_on_vm(name)
# Tool 6: Power off virtual machine
def tool_power_off(name: str) -> str:
"""Power off the specified virtual machine."""
_check_auth()
return manager.power_off_vm(name)
# Tool 7: List all virtual machines
def tool_list_vms() -> list:
"""Return a list of all virtual machine names."""
_check_auth()
return manager.list_vms()
# Resource 1: Retrieve virtual machine performance data
def resource_vm_performance(vm_name: str) -> dict:
"""Retrieve CPU, memory, storage, and network usage for the specified virtual machine."""
_check_auth()
return manager.get_vm_performance(vm_name)
# Internal helper: Check API access permissions
def _check_auth():
if config.api_key:
# If an API key is configured, require that manager.authenticated is True
if not manager.authenticated:
raise Exception("Unauthorized: API key required.")
# Register the above functions as tools and resources for the MCP Server
# Encapsulate using mcp.types.Tool and mcp.types.Resource
tools = {
"authenticate": types.Tool(
name="authenticate",
description="Authenticate using API key to enable privileged operations",
parameters={"key": str},
handler=lambda params: tool_authenticate(**params),
inputSchema={"type": "object", "properties": {"key": {"type": "string"}}, "required": ["key"]}
),
"createVM": types.Tool(
name="createVM",
description="Create a new virtual machine",
parameters={"name": str, "cpu": int, "memory": int, "datastore": Optional[str], "network": Optional[str]},
handler=lambda params: tool_create_vm(**params),
inputSchema={
"type": "object",
"properties": {
"name": {"type": "string"},
"cpu": {"type": "integer"},
"memory": {"type": "integer"},
"datastore": {"type": "string", "nullable": True},
"network": {"type": "string", "nullable": True}
},
"required": ["name", "cpu", "memory"]
}
),
"cloneVM": types.Tool(
name="cloneVM",
description="Clone a virtual machine from a template or existing VM",
parameters={"template_name": str, "new_name": str},
handler=lambda params: tool_clone_vm(**params),
inputSchema={
"type": "object",
"properties": {
"template_name": {"type": "string"},
"new_name": {"type": "string"}
},
"required": ["template_name", "new_name"]
}
),
"deleteVM": types.Tool(
name="deleteVM",
description="Delete a virtual machine",
parameters={"name": str},
handler=lambda params: tool_delete_vm(**params),
inputSchema={
"type": "object",
"properties": {"name": {"type": "string"}},
"required": ["name"]
}
),
"powerOn": types.Tool(
name="powerOn",
description="Power on a virtual machine",
parameters={"name": str},
handler=lambda params: tool_power_on(**params),
inputSchema={
"type": "object",
"properties": {"name": {"type": "string"}},
"required": ["name"]
}
),
"powerOff": types.Tool(
name="powerOff",
description="Power off a virtual machine",
parameters={"name": str},
handler=lambda params: tool_power_off(**params),
inputSchema={
"type": "object",
"properties": {"name": {"type": "string"}},
"required": ["name"]
}
),
"listVMs": types.Tool(
name="listVMs",
description="List all virtual machines",
parameters={},
handler=lambda params: tool_list_vms(),
inputSchema={"type": "object", "properties": {}}
)
}
resources = {
"vmStats": types.Resource(
name="vmStats",
uri="vmstats://{vm_name}",
description="Get CPU, memory, storage, network usage of a VM",
parameters={"vm_name": str},
handler=lambda params: resource_vm_performance(**params),
inputSchema={
"type": "object",
"properties": {
"vm_name": {"type": "string"}
},
"required": ["vm_name"]
}
)
}
# Add tools and resources to the MCP Server object
for name, tool in tools.items():
setattr(mcp_server, f"tool_{name}", tool)
for name, res in resources.items():
setattr(mcp_server, f"resource_{name}", res)
# Set the MCP Server capabilities, declaring that the tools and resources list is available
mcp_server.capabilities = {
"tools": {"listChanged": True},
"resources": {"listChanged": True}
}
# Maintain a global SSE transport instance for sending events during POST request processing
active_transport: Optional[SseServerTransport] = None
# SSE initialization request handler (HTTP GET /sse)
async def sse_endpoint(scope, receive, send):
"""Handle SSE connection initialization requests. Establish an MCP SSE session."""
global active_transport
# Construct response headers to establish an event stream
headers = [(b"content-type", b"text/event-stream")]
# Verify API key: Retrieve from request headers 'Authorization' or 'X-API-Key'
headers_dict = {k.lower().decode(): v.decode() for (k, v) in scope.get("headers", [])}
provided_key = None
if b"authorization" in scope["headers"]:
provided_key = headers_dict.get("authorization")
elif b"x-api-key" in scope["headers"]:
provided_key = headers_dict.get("x-api-key")
if config.api_key and provided_key != f"Bearer {config.api_key}" and provided_key != config.api_key:
# If the correct API key is not provided, return 401
res_status = b"401 UNAUTHORIZED"
await send({"type": "http.response.start", "status": 401, "headers": [(b"content-type", b"text/plain")]})
await send({"type": "http.response.body", "body": b"Unauthorized"})
logging.warning("No valid API key provided, rejecting SSE connection")
return
# Establish SSE transport and connect to the MCP Server
active_transport = SseServerTransport("/sse/messages")
logging.info("Established new SSE session")
# Send SSE response headers to the client, preparing to start sending events
await send({"type": "http.response.start", "status": 200, "headers": headers})
try:
async with active_transport.connect_sse(scope, receive, send) as (read_stream, write_stream):
init_opts = mcp_server.create_initialization_options()
# Run MCP Server, passing the read/write streams to the Server
await mcp_server.run(read_stream, write_stream, init_opts)
except Exception as e:
logging.error(f"SSE session encountered an error: {e}")
finally:
active_transport = None
# SSE session ended, send an empty message to indicate completion
await send({"type": "http.response.body", "body": b"", "more_body": False})
# JSON-RPC message handler (HTTP POST /sse/messages)
async def messages_endpoint(scope, receive, send):
"""Handle JSON-RPC requests sent by the client (via POST)."""
global active_transport
# Read request body data
body_bytes = b''
more_body = True
while more_body:
event = await receive()
if event["type"] == "http.request":
body_bytes += event.get("body", b'')
more_body = event.get("more_body", False)
# Parse JSON-RPC request
try:
body_str = body_bytes.decode('utf-8')
msg = json.loads(body_str)
except Exception as e:
logging.error(f"JSON parsing failed: {e}")
await send({"type": "http.response.start", "status": 400,
"headers": [(b"content-type", b"text/plain")]})
await send({"type": "http.response.body", "body": b"Invalid JSON"})
return
# Only accept requests sent through an established SSE transport
if not active_transport:
await send({"type": "http.response.start", "status": 400,
"headers": [(b"content-type", b"text/plain")]})
await send({"type": "http.response.body", "body": b"No active session"})
return
# Pass the POST request content to active_transport to trigger the corresponding MCP Server operation
try:
# Handle the POST message through SseServerTransport, which injects the request into the MCP session
await active_transport.handle_post(scope, body_bytes)
status = 200
response_body = b""
except Exception as e:
logging.error(f"Error handling POST message: {e}")
status = 500
response_body = str(e).encode('utf-8')
# Reply to the client with HTTP status
await send({"type": "http.response.start", "status": status,
"headers": [(b"content-type", b"text/plain")]})
await send({"type": "http.response.body", "body": response_body})
# Simple ASGI application routing: dispatch requests to the appropriate handler based on the path and method
async def app(scope, receive, send):
if scope["type"] == "http":
path = scope.get("path", "")
method = scope.get("method", "").upper()
if path == "/sse" and method == "GET":
# SSE initialization request
await sse_endpoint(scope, receive, send)
elif path == "/sse/messages" and method in ("POST", "OPTIONS"):
# JSON-RPC message request; handle CORS preflight OPTIONS request
if method == "OPTIONS":
# Return allowed methods
headers = [
(b"access-control-allow-methods", b"POST, OPTIONS"),
(b"access-control-allow-headers", b"Content-Type, Authorization, X-API-Key"),
(b"access-control-allow-origin", b"*")
]
await send({"type": "http.response.start", "status": 204, "headers": headers})
await send({"type": "http.response.body", "body": b""})
else:
await messages_endpoint(scope, receive, send)
else:
# Route not found
await send({"type": "http.response.start", "status": 404,
"headers": [(b"content-type", b"text/plain")]})
await send({"type": "http.response.body", "body": b"Not Found"})
else:
# Non-HTTP event, do not process
return
# Parse command-line arguments and environment variables, and load configuration
parser = argparse.ArgumentParser(description="MCP VMware ESXi Management Server")
parser.add_argument("--config", "-c", help="Configuration file path (JSON or YAML)", default=None)
args = parser.parse_args()
# Attempt to load configuration from a file or environment variables
config_data = {}
config_path = args.config or os.environ.get("MCP_CONFIG_FILE")
if config_path:
# Parse JSON or YAML based on the file extension
if config_path.endswith((".yml", ".yaml")):
import yaml
with open(config_path, 'r') as f:
config_data = yaml.safe_load(f)
elif config_path.endswith(".json"):
with open(config_path, 'r') as f:
config_data = json.load(f)
else:
raise ValueError("Unsupported configuration file format. Please use JSON or YAML")
# Override configuration from environment variables (higher priority than file)
env_map = {
"VCENTER_HOST": "vcenter_host",
"VCENTER_USER": "vcenter_user",
"VCENTER_PASSWORD": "vcenter_password",
"VCENTER_DATACENTER": "datacenter",
"VCENTER_CLUSTER": "cluster",
"VCENTER_DATASTORE": "datastore",
"VCENTER_NETWORK": "network",
"VCENTER_INSECURE": "insecure",
"MCP_API_KEY": "api_key",
"MCP_LOG_FILE": "log_file",
"MCP_LOG_LEVEL": "log_level"
}
for env_key, cfg_key in env_map.items():
if env_key in os.environ:
val = os.environ[env_key]
# Boolean type conversion
if cfg_key == "insecure":
config_data[cfg_key] = val.lower() in ("1", "true", "yes")
else:
config_data[cfg_key] = val
# Construct Config object from config_data
required_keys = ["vcenter_host", "vcenter_user", "vcenter_password"]
for k in required_keys:
if k not in config_data or not config_data[k]:
raise Exception(f"Missing required configuration item: {k}")
config = Config(**config_data)
# Initialize logging
log_level = getattr(logging, config.log_level.upper(), logging.INFO)
logging.basicConfig(level=log_level,
format="%(asctime)s [%(levelname)s] %(message)s",
filename=config.log_file if config.log_file else None)
if not config.log_file:
# If no log file is specified, output logs to the console
logging.getLogger().addHandler(logging.StreamHandler())
logging.info("Starting VMware ESXi Management MCP Server...")
# Create VMware Manager instance and connect
manager = VMwareManager(config)
# If an API key is configured, prompt that authentication is required before invoking sensitive operations
if config.api_key:
logging.info("API key authentication is enabled. Clients must call the authenticate tool to verify the key before invoking sensitive operations")
# Start ASGI server to listen for MCP SSE connections
if __name__ == "__main__":
# Start ASGI application using the built-in uvicorn server (listening on 0.0.0.0:8080)
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8080)

View File

@ -1,140 +0,0 @@
"""FastMCP server setup for ESXi MCP Server."""
import logging
import sys
from pathlib import Path
from fastmcp import FastMCP
from esxi_mcp_server.config import Settings, get_settings
from esxi_mcp_server.connection import VMwareConnection
from esxi_mcp_server.mixins import (
ConsoleMixin,
DiskManagementMixin,
GuestOpsMixin,
HostManagementMixin,
MonitoringMixin,
NICManagementMixin,
OVFManagementMixin,
PowerOpsMixin,
ResourcesMixin,
SerialPortMixin,
SnapshotsMixin,
VCenterOpsMixin,
VMLifecycleMixin,
)
logger = logging.getLogger(__name__)
def create_server(settings: Settings | None = None) -> FastMCP:
"""Create and configure the FastMCP server.
Args:
settings: Optional settings instance. If not provided, will load from
environment variables and/or config file.
Returns:
Configured FastMCP server instance with VMware tools registered.
"""
if settings is None:
settings = get_settings()
# Configure logging - MUST go to stderr for stdio transport compatibility
log_level = getattr(logging, settings.log_level.upper(), logging.INFO)
# For stdio mode, suppress most logging to avoid interference
if settings.mcp_transport == "stdio":
log_level = logging.WARNING
logging.basicConfig(
level=log_level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
stream=sys.stderr, # Explicitly use stderr
)
# Create FastMCP server
mcp = FastMCP(
name="ESXi MCP Server",
instructions=(
"VMware ESXi/vCenter management server via Model Context Protocol. "
"Provides tools for VM lifecycle management, power operations, "
"snapshots, guest OS operations, monitoring, and infrastructure resources."
),
)
# Create shared VMware connection
logger.info("Connecting to VMware vCenter/ESXi...")
conn = VMwareConnection(settings)
# Create and register all mixins
mixins = [
VMLifecycleMixin(conn),
PowerOpsMixin(conn),
SnapshotsMixin(conn),
MonitoringMixin(conn),
GuestOpsMixin(conn),
ResourcesMixin(conn),
DiskManagementMixin(conn),
NICManagementMixin(conn),
OVFManagementMixin(conn),
HostManagementMixin(conn),
VCenterOpsMixin(conn),
ConsoleMixin(conn),
SerialPortMixin(conn),
]
tool_count = 0
resource_count = 0
for mixin in mixins:
mixin.register_all(mcp)
tool_count += len(getattr(mixin, "_mcp_tools", []))
resource_count += len(getattr(mixin, "_mcp_resources", []))
# Get actual counts from MCP server
actual_tools = len(mcp._tool_manager._tools)
actual_resources = len(mcp._resource_manager._resources)
logger.info(
"ESXi MCP Server ready - %d tools, %d resources registered",
actual_tools,
actual_resources,
)
return mcp
def run_server(config_path: Path | None = None) -> None:
"""Run the ESXi MCP server.
Args:
config_path: Optional path to YAML/JSON config file.
"""
# Load settings
settings = Settings.from_yaml(config_path) if config_path else get_settings()
# Only print banner for SSE mode (stdio must stay clean for JSON-RPC)
if settings.mcp_transport == "sse":
try:
from importlib.metadata import version
package_version = version("esxi-mcp-server")
except Exception:
package_version = "dev"
print(f"ESXi MCP Server v{package_version}", file=sys.stderr)
print("" * 40, file=sys.stderr)
print(
f"Starting SSE transport on {settings.mcp_host}:{settings.mcp_port}",
file=sys.stderr,
)
# Create and run server
mcp = create_server(settings)
if settings.mcp_transport == "sse":
mcp.run(transport="sse", host=settings.mcp_host, port=settings.mcp_port)
else:
# stdio mode - suppress banner to keep stdout clean for JSON-RPC
mcp.run(show_banner=False)

View File

@ -1,73 +0,0 @@
"""ESXi MCP Server - VMware vSphere management via Model Context Protocol.
This package provides an MCP server for managing VMware ESXi/vCenter
virtual machines through AI assistants like Claude.
"""
import argparse
from pathlib import Path
from mcvsphere.config import Settings, get_settings
from mcvsphere.connection import VMwareConnection
from mcvsphere.server import create_server, run_server
__all__ = [
"Settings",
"get_settings",
"VMwareConnection",
"create_server",
"run_server",
"main",
]
def main() -> None:
"""Entry point for the mcvsphere CLI."""
parser = argparse.ArgumentParser(
description="ESXi MCP Server - VMware vSphere management via MCP"
)
parser.add_argument(
"--config",
"-c",
type=Path,
help="Path to configuration file (YAML or JSON)",
default=None,
)
parser.add_argument(
"--transport",
"-t",
choices=["stdio", "sse"],
help="MCP transport type (default: stdio)",
default=None,
)
parser.add_argument(
"--host",
help="Host to bind SSE server (default: 0.0.0.0)",
default=None,
)
parser.add_argument(
"--port",
"-p",
type=int,
help="Port for SSE server (default: 8080)",
default=None,
)
args = parser.parse_args()
# Load base settings
settings = Settings.from_yaml(args.config) if args.config else get_settings()
# Override with CLI args
if args.transport:
settings = settings.model_copy(update={"mcp_transport": args.transport})
if args.host:
settings = settings.model_copy(update={"mcp_host": args.host})
if args.port:
settings = settings.model_copy(update={"mcp_port": args.port})
run_server(args.config)
if __name__ == "__main__":
main()

View File

@ -1,108 +0,0 @@
"""Configuration management using pydantic-settings."""
from functools import lru_cache
from pathlib import Path
from typing import Literal
import yaml
from pydantic import Field, SecretStr, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
"""ESXi MCP Server configuration.
Settings are loaded from (in order of precedence):
1. Environment variables (highest priority)
2. Config file (YAML/JSON)
3. Default values
"""
model_config = SettingsConfigDict(
env_prefix="",
env_file=".env",
env_file_encoding="utf-8",
extra="ignore",
)
# vCenter/ESXi connection settings
vcenter_host: str = Field(description="vCenter or ESXi server hostname/IP")
vcenter_user: str = Field(description="Login username")
vcenter_password: SecretStr = Field(description="Login password")
# Optional VMware settings
vcenter_datacenter: str | None = Field(
default=None, description="Datacenter name (auto-selects first if not specified)"
)
vcenter_cluster: str | None = Field(
default=None, description="Cluster name (auto-selects first if not specified)"
)
vcenter_datastore: str | None = Field(
default=None, description="Datastore name (auto-selects largest if not specified)"
)
vcenter_network: str = Field(default="VM Network", description="Default network for VMs")
vcenter_insecure: bool = Field(default=False, description="Skip SSL certificate verification")
# MCP server settings
mcp_api_key: SecretStr | None = Field(
default=None, description="API key for authentication (optional)"
)
mcp_host: str = Field(default="0.0.0.0", description="Server bind address")
mcp_port: int = Field(default=8080, description="Server port")
mcp_transport: Literal["stdio", "sse"] = Field(
default="stdio", description="MCP transport type"
)
# Logging settings
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = Field(
default="INFO", description="Logging level"
)
log_file: Path | None = Field(
default=None, description="Log file path (logs to console if not specified)"
)
@field_validator("vcenter_insecure", mode="before")
@classmethod
def parse_bool(cls, v: str | bool) -> bool:
if isinstance(v, bool):
return v
return v.lower() in ("true", "1", "yes", "on")
@classmethod
def from_yaml(cls, path: Path) -> "Settings":
"""Load settings from a YAML file, with env vars taking precedence."""
if not path.exists():
raise FileNotFoundError(f"Config file not found: {path}")
with path.open() as f:
config_data = yaml.safe_load(f) or {}
# Map old config keys to new naming convention
key_mapping = {
"vcenter_host": "vcenter_host",
"vcenter_user": "vcenter_user",
"vcenter_password": "vcenter_password",
"datacenter": "vcenter_datacenter",
"cluster": "vcenter_cluster",
"datastore": "vcenter_datastore",
"network": "vcenter_network",
"insecure": "vcenter_insecure",
"api_key": "mcp_api_key",
"log_file": "log_file",
"log_level": "log_level",
}
mapped_data = {}
for old_key, new_key in key_mapping.items():
if old_key in config_data:
mapped_data[new_key] = config_data[old_key]
return cls(**mapped_data)
@lru_cache
def get_settings(config_path: Path | None = None) -> Settings:
"""Get cached settings instance."""
if config_path:
return Settings.from_yaml(config_path)
return Settings()

View File

@ -1,217 +0,0 @@
"""VMware vSphere connection management."""
import logging
import ssl
from typing import TYPE_CHECKING
from pyVim import connect
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.config import Settings
logger = logging.getLogger(__name__)
class VMwareConnection:
"""Shared VMware vSphere connection for all MCP mixins.
This class manages the connection to vCenter/ESXi and provides
common helper methods used across all operation categories.
"""
def __init__(self, settings: "Settings"):
self.settings = settings
self.si: vim.ServiceInstance | None = None
self.content: vim.ServiceContent | None = None
self.datacenter: vim.Datacenter | None = None
self.resource_pool: vim.ResourcePool | None = None
self.datastore: vim.Datastore | None = None
self.network: vim.Network | None = None
self._connect()
def _connect(self) -> None:
"""Establish connection to vCenter/ESXi."""
try:
if self.settings.vcenter_insecure:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
self.si = connect.SmartConnect(
host=self.settings.vcenter_host,
user=self.settings.vcenter_user,
pwd=self.settings.vcenter_password.get_secret_value(),
sslContext=context,
)
else:
self.si = connect.SmartConnect(
host=self.settings.vcenter_host,
user=self.settings.vcenter_user,
pwd=self.settings.vcenter_password.get_secret_value(),
)
except Exception as e:
logger.exception("Failed to connect to vCenter/ESXi")
raise ConnectionError(f"Failed to connect to vCenter/ESXi: {e}") from e
self.content = self.si.RetrieveContent()
logger.info("Connected to VMware vCenter/ESXi at %s", self.settings.vcenter_host)
self._setup_datacenter()
self._setup_compute_resource()
self._setup_datastore()
self._setup_network()
def _setup_datacenter(self) -> None:
"""Find and configure the target datacenter."""
datacenters = [
dc for dc in self.content.rootFolder.childEntity if isinstance(dc, vim.Datacenter)
]
if self.settings.vcenter_datacenter:
self.datacenter = next(
(dc for dc in datacenters if dc.name == self.settings.vcenter_datacenter),
None,
)
if not self.datacenter:
raise ValueError(f"Datacenter '{self.settings.vcenter_datacenter}' not found")
else:
self.datacenter = datacenters[0] if datacenters else None
if not self.datacenter:
raise ValueError("No datacenter found in vSphere inventory")
logger.info("Using datacenter: %s", self.datacenter.name)
def _setup_compute_resource(self) -> None:
"""Find and configure compute resource (cluster or host)."""
compute_resources = [
cr
for cr in self.datacenter.hostFolder.childEntity
if isinstance(cr, vim.ComputeResource)
]
if self.settings.vcenter_cluster:
compute_resource = next(
(
cr
for cr in compute_resources
if isinstance(cr, vim.ClusterComputeResource)
and cr.name == self.settings.vcenter_cluster
),
None,
)
if not compute_resource:
raise ValueError(f"Cluster '{self.settings.vcenter_cluster}' not found")
else:
compute_resource = compute_resources[0] if compute_resources else None
if not compute_resource:
raise ValueError("No compute resource (cluster or host) found")
self.resource_pool = compute_resource.resourcePool
logger.info("Using resource pool: %s", self.resource_pool.name)
def _setup_datastore(self) -> None:
"""Find and configure the target datastore."""
datastores = [
ds
for ds in self.datacenter.datastoreFolder.childEntity
if isinstance(ds, vim.Datastore)
]
if not datastores:
raise ValueError("No datastore found in datacenter")
if self.settings.vcenter_datastore:
self.datastore = next(
(ds for ds in datastores if ds.name == self.settings.vcenter_datastore),
None,
)
if not self.datastore:
raise ValueError(f"Datastore '{self.settings.vcenter_datastore}' not found")
else:
self.datastore = max(datastores, key=lambda ds: ds.summary.freeSpace)
logger.info("Using datastore: %s", self.datastore.name)
def _setup_network(self) -> None:
"""Find and configure the target network."""
if not self.settings.vcenter_network:
self.network = None
return
networks = self.datacenter.networkFolder.childEntity
self.network = next(
(net for net in networks if net.name == self.settings.vcenter_network),
None,
)
if self.network:
logger.info("Using network: %s", self.network.name)
else:
logger.warning("Network '%s' not found", self.settings.vcenter_network)
# ─────────────────────────────────────────────────────────────────────────────
# Helper Methods (shared across mixins)
# ─────────────────────────────────────────────────────────────────────────────
def find_vm(self, name: str) -> vim.VirtualMachine | None:
"""Find a virtual machine by name."""
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [vim.VirtualMachine], True
)
try:
return next((vm for vm in container.view if vm.name == name), None)
finally:
container.Destroy()
def get_all_vms(self) -> list[vim.VirtualMachine]:
"""Get all virtual machines."""
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [vim.VirtualMachine], True
)
try:
return list(container.view)
finally:
container.Destroy()
def find_datastore(self, name: str) -> vim.Datastore | None:
"""Find a datastore by name."""
return next(
(
ds
for ds in self.datacenter.datastoreFolder.childEntity
if isinstance(ds, vim.Datastore) and ds.name == name
),
None,
)
def find_network(self, name: str) -> vim.Network | None:
"""Find a network by name."""
return next(
(net for net in self.datacenter.networkFolder.childEntity if net.name == name),
None,
)
def find_host(self, name: str) -> vim.HostSystem | None:
"""Find an ESXi host by name."""
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, [vim.HostSystem], True
)
try:
return next((host for host in container.view if host.name == name), None)
finally:
container.Destroy()
def wait_for_task(self, task: vim.Task) -> None:
"""Wait for a vSphere task to complete."""
while task.info.state not in (vim.TaskInfo.State.success, vim.TaskInfo.State.error):
pass
if task.info.state == vim.TaskInfo.State.error:
raise RuntimeError(f"Task failed: {task.info.error}")
def disconnect(self) -> None:
"""Disconnect from vCenter/ESXi."""
if self.si:
connect.Disconnect(self.si)
logger.info("Disconnected from VMware vCenter/ESXi")

View File

@ -1,31 +0,0 @@
"""MCP Mixins for ESXi operations organized by category."""
from mcvsphere.mixins.console import ConsoleMixin
from mcvsphere.mixins.disk_management import DiskManagementMixin
from mcvsphere.mixins.guest_ops import GuestOpsMixin
from mcvsphere.mixins.host_management import HostManagementMixin
from mcvsphere.mixins.monitoring import MonitoringMixin
from mcvsphere.mixins.nic_management import NICManagementMixin
from mcvsphere.mixins.ovf_management import OVFManagementMixin
from mcvsphere.mixins.power_ops import PowerOpsMixin
from mcvsphere.mixins.resources import ResourcesMixin
from mcvsphere.mixins.serial_port import SerialPortMixin
from mcvsphere.mixins.snapshots import SnapshotsMixin
from mcvsphere.mixins.vcenter_ops import VCenterOpsMixin
from mcvsphere.mixins.vm_lifecycle import VMLifecycleMixin
__all__ = [
"ConsoleMixin",
"DiskManagementMixin",
"GuestOpsMixin",
"HostManagementMixin",
"MonitoringMixin",
"NICManagementMixin",
"OVFManagementMixin",
"PowerOpsMixin",
"ResourcesMixin",
"SerialPortMixin",
"SnapshotsMixin",
"VCenterOpsMixin",
"VMLifecycleMixin",
]

View File

@ -1,178 +0,0 @@
"""VM Console operations - screenshots and tools monitoring."""
import base64
import time
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any
import requests
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class ConsoleMixin(MCPMixin):
"""VM console operations - screenshots and VMware Tools monitoring."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
@mcp_tool(
name="wait_for_vm_tools",
description="Wait for VMware Tools to become available on a VM. Useful after powering on a VM.",
annotations=ToolAnnotations(readOnlyHint=True),
)
def wait_for_vm_tools(
self, name: str, timeout: int = 120, poll_interval: int = 5
) -> dict[str, Any]:
"""Wait for VMware Tools to become available.
Args:
name: VM name
timeout: Maximum seconds to wait (default: 120)
poll_interval: Seconds between status checks (default: 5)
Returns:
Dict with tools status, version, and guest info when ready
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
start_time = datetime.now()
end_time = start_time + timedelta(seconds=timeout)
while datetime.now() < end_time:
tools_status = vm.guest.toolsStatus if vm.guest else None
if tools_status == vim.vm.GuestInfo.ToolsStatus.toolsOk:
return {
"status": "ready",
"tools_status": str(tools_status),
"tools_version": vm.guest.toolsVersion if vm.guest else None,
"tools_running_status": (
vm.guest.toolsRunningStatus if vm.guest else None
),
"ip_address": vm.guest.ipAddress if vm.guest else None,
"hostname": vm.guest.hostName if vm.guest else None,
"guest_os": vm.guest.guestFullName if vm.guest else None,
"wait_time_seconds": (datetime.now() - start_time).total_seconds(),
}
time.sleep(poll_interval)
# Timeout reached
return {
"status": "timeout",
"tools_status": str(vm.guest.toolsStatus) if vm.guest else None,
"message": f"VMware Tools not ready after {timeout} seconds",
"wait_time_seconds": timeout,
}
@mcp_tool(
name="get_vm_tools_status",
description="Get current VMware Tools status for a VM",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_vm_tools_status(self, name: str) -> dict[str, Any]:
"""Get VMware Tools status without waiting.
Args:
name: VM name
Returns:
Dict with current tools status and guest info
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
return {
"tools_status": str(vm.guest.toolsStatus) if vm.guest else None,
"tools_version": vm.guest.toolsVersion if vm.guest else None,
"tools_running_status": (
vm.guest.toolsRunningStatus if vm.guest else None
),
"tools_version_status": (
str(vm.guest.toolsVersionStatus) if vm.guest else None
),
"ip_address": vm.guest.ipAddress if vm.guest else None,
"hostname": vm.guest.hostName if vm.guest else None,
"guest_os": vm.guest.guestFullName if vm.guest else None,
"guest_id": vm.guest.guestId if vm.guest else None,
"guest_state": vm.guest.guestState if vm.guest else None,
}
@mcp_tool(
name="vm_screenshot",
description="Capture a screenshot of the VM console. Returns base64-encoded PNG image.",
annotations=ToolAnnotations(readOnlyHint=True),
)
def vm_screenshot(
self,
name: str,
width: int | None = None,
height: int | None = None,
) -> dict[str, Any]:
"""Capture VM console screenshot via vSphere HTTP API.
Args:
name: VM name
width: Optional width to scale the image
height: Optional height to scale the image
Returns:
Dict with base64-encoded image data and metadata
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Build screenshot URL
# Format: https://{host}/screen?id={moid}
host = self.conn.settings.vcenter_host
moid = vm._moId
screenshot_url = f"https://{host}/screen?id={moid}"
# Add optional scaling parameters
params = []
if width:
params.append(f"w={width}")
if height:
params.append(f"h={height}")
if params:
screenshot_url += "&" + "&".join(params)
# Build auth header
username = self.conn.settings.vcenter_user
password = self.conn.settings.vcenter_password.get_secret_value()
auth = base64.b64encode(f"{username}:{password}".encode()).decode("ascii")
# Make request
try:
response = requests.get(
screenshot_url,
headers={"Authorization": f"Basic {auth}"},
verify=not self.conn.settings.vcenter_insecure,
timeout=30,
)
response.raise_for_status()
except requests.RequestException as e:
raise ValueError(f"Failed to capture screenshot: {e}") from e
# Encode image as base64
image_data = base64.b64encode(response.content).decode("ascii")
content_type = response.headers.get("Content-Type", "image/png")
return {
"vm_name": name,
"moid": moid,
"content_type": content_type,
"size_bytes": len(response.content),
"image_base64": image_data,
"width": width,
"height": height,
}

View File

@ -1,439 +0,0 @@
"""Virtual Disk Management - add, remove, extend disks and manage ISOs."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class DiskManagementMixin(MCPMixin):
"""Virtual disk and ISO management tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _get_next_disk_unit_number(self, vm: vim.VirtualMachine) -> tuple[int, vim.vm.device.VirtualSCSIController]:
"""Find the next available SCSI unit number and controller."""
scsi_controllers = []
used_units = {}
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualSCSIController):
scsi_controllers.append(device)
used_units[device.key] = set()
if (
hasattr(device, "controllerKey")
and hasattr(device, "unitNumber")
and device.controllerKey in used_units
):
used_units[device.controllerKey].add(device.unitNumber)
if not scsi_controllers:
raise ValueError("No SCSI controller found on VM")
# Find first available slot (unit 7 is reserved for controller)
for controller in scsi_controllers:
for unit in range(16):
if unit == 7: # Reserved for SCSI controller
continue
if unit not in used_units.get(controller.key, set()):
return unit, controller
raise ValueError("No available SCSI unit numbers (all 15 slots used)")
def _find_disk_by_label(
self, vm: vim.VirtualMachine, label: str
) -> vim.vm.device.VirtualDisk | None:
"""Find a virtual disk by its label (e.g., 'Hard disk 1')."""
for device in vm.config.hardware.device:
if (
isinstance(device, vim.vm.device.VirtualDisk)
and device.deviceInfo.label.lower() == label.lower()
):
return device
return None
def _find_cdrom(self, vm: vim.VirtualMachine) -> vim.vm.device.VirtualCdrom | None:
"""Find the first CD-ROM drive on the VM."""
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualCdrom):
return device
return None
@mcp_tool(
name="add_disk",
description="Add a new virtual disk to a VM",
annotations=ToolAnnotations(destructiveHint=True),
)
def add_disk(
self,
vm_name: str,
size_gb: int,
thin_provisioned: bool = True,
datastore: str | None = None,
) -> dict[str, Any]:
"""Add a new virtual disk to a VM.
Args:
vm_name: Name of the virtual machine
size_gb: Size of the new disk in GB
thin_provisioned: Use thin provisioning (default True)
datastore: Datastore for the disk (default: same as VM)
Returns:
Dict with new disk details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
# Get next available unit number and controller
unit_number, controller = self._get_next_disk_unit_number(vm)
# Determine datastore
if datastore:
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
ds_name = datastore
else:
# Use VM's datastore
ds_name = vm.config.files.vmPathName.split("]")[0].strip("[")
# Calculate size in KB
size_kb = size_gb * 1024 * 1024
# Create disk backing
backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
backing.diskMode = "persistent"
backing.thinProvisioned = thin_provisioned
backing.datastore = self.conn.find_datastore(ds_name)
# Create the virtual disk
disk = vim.vm.device.VirtualDisk()
disk.backing = backing
disk.controllerKey = controller.key
disk.unitNumber = unit_number
disk.capacityInKB = size_kb
# Create device config spec
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
disk_spec.device = disk
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [disk_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "disk_added",
"size_gb": size_gb,
"thin_provisioned": thin_provisioned,
"datastore": ds_name,
"controller": controller.deviceInfo.label,
"unit_number": unit_number,
}
@mcp_tool(
name="remove_disk",
description="Remove a virtual disk from a VM",
annotations=ToolAnnotations(destructiveHint=True),
)
def remove_disk(
self,
vm_name: str,
disk_label: str,
delete_file: bool = False,
) -> dict[str, Any]:
"""Remove a virtual disk from a VM.
Args:
vm_name: Name of the virtual machine
disk_label: Label of disk to remove (e.g., 'Hard disk 2')
delete_file: Also delete the VMDK file (default False - keep file)
Returns:
Dict with removal details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
disk = self._find_disk_by_label(vm, disk_label)
if not disk:
# List available disks
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualDisk)
]
raise ValueError(f"Disk '{disk_label}' not found. Available: {available}")
# Create device removal spec
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
if delete_file:
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
disk_spec.device = disk
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [disk_spec]
# Get disk info before removal
disk_path = disk.backing.fileName if hasattr(disk.backing, "fileName") else "unknown"
disk_size_gb = disk.capacityInKB / (1024 * 1024)
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "disk_removed",
"disk_label": disk_label,
"disk_path": disk_path,
"size_gb": round(disk_size_gb, 2),
"file_deleted": delete_file,
}
@mcp_tool(
name="extend_disk",
description="Extend/grow a virtual disk",
annotations=ToolAnnotations(destructiveHint=True),
)
def extend_disk(
self,
vm_name: str,
disk_label: str,
new_size_gb: int,
) -> dict[str, Any]:
"""Extend a virtual disk to a larger size.
Args:
vm_name: Name of the virtual machine
disk_label: Label of disk to extend (e.g., 'Hard disk 1')
new_size_gb: New total size in GB (must be larger than current)
Returns:
Dict with extension details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
disk = self._find_disk_by_label(vm, disk_label)
if not disk:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualDisk)
]
raise ValueError(f"Disk '{disk_label}' not found. Available: {available}")
current_size_gb = disk.capacityInKB / (1024 * 1024)
if new_size_gb <= current_size_gb:
raise ValueError(
f"New size ({new_size_gb}GB) must be larger than current ({current_size_gb:.2f}GB)"
)
# Update disk capacity
new_size_kb = new_size_gb * 1024 * 1024
disk.capacityInKB = new_size_kb
# Create device edit spec
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
disk_spec.device = disk
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [disk_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "disk_extended",
"disk_label": disk_label,
"old_size_gb": round(current_size_gb, 2),
"new_size_gb": new_size_gb,
}
@mcp_tool(
name="list_disks",
description="List all virtual disks attached to a VM",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_disks(self, vm_name: str) -> list[dict[str, Any]]:
"""List all virtual disks attached to a VM.
Args:
vm_name: Name of the virtual machine
Returns:
List of disk details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
disks = []
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
backing = device.backing
disk_info = {
"label": device.deviceInfo.label,
"size_gb": round(device.capacityInKB / (1024 * 1024), 2),
"unit_number": device.unitNumber,
}
if hasattr(backing, "fileName"):
disk_info["file"] = backing.fileName
if hasattr(backing, "thinProvisioned"):
disk_info["thin_provisioned"] = backing.thinProvisioned
if hasattr(backing, "diskMode"):
disk_info["mode"] = backing.diskMode
disks.append(disk_info)
return disks
@mcp_tool(
name="attach_iso",
description="Attach an ISO file to a VM's CD/DVD drive",
annotations=ToolAnnotations(destructiveHint=True),
)
def attach_iso(
self,
vm_name: str,
iso_path: str,
datastore: str | None = None,
) -> dict[str, Any]:
"""Attach an ISO file to a VM's CD/DVD drive.
Args:
vm_name: Name of the virtual machine
iso_path: Path to ISO file on datastore (e.g., 'iso/ubuntu.iso')
datastore: Datastore containing the ISO (default: first VM datastore)
Returns:
Dict with attachment details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
cdrom = self._find_cdrom(vm)
if not cdrom:
raise ValueError(f"No CD/DVD drive found on VM '{vm_name}'")
# Determine datastore
if not datastore:
datastore = vm.config.files.vmPathName.split("]")[0].strip("[")
# Build full ISO path
full_iso_path = f"[{datastore}] {iso_path}"
# Create ISO backing
backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
backing.fileName = full_iso_path
# Configure CD-ROM
cdrom.backing = backing
cdrom.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom.connectable.connected = True
cdrom.connectable.startConnected = True
cdrom.connectable.allowGuestControl = True
# Create device edit spec
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [cdrom_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "iso_attached",
"iso_path": full_iso_path,
"cdrom": cdrom.deviceInfo.label,
"connected": True,
}
@mcp_tool(
name="detach_iso",
description="Detach/eject ISO from a VM's CD/DVD drive",
annotations=ToolAnnotations(destructiveHint=True),
)
def detach_iso(self, vm_name: str) -> dict[str, Any]:
"""Detach/eject ISO from a VM's CD/DVD drive.
Args:
vm_name: Name of the virtual machine
Returns:
Dict with detachment details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
cdrom = self._find_cdrom(vm)
if not cdrom:
raise ValueError(f"No CD/DVD drive found on VM '{vm_name}'")
# Get current ISO path for reporting
old_iso = None
if hasattr(cdrom.backing, "fileName"):
old_iso = cdrom.backing.fileName
# Create empty client device backing (ejects the ISO)
backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
backing.deviceName = ""
# Configure CD-ROM
cdrom.backing = backing
cdrom.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom.connectable.connected = False
cdrom.connectable.startConnected = False
cdrom.connectable.allowGuestControl = True
# Create device edit spec
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [cdrom_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "iso_detached",
"previous_iso": old_iso,
"cdrom": cdrom.deviceInfo.label,
}

View File

@ -1,378 +0,0 @@
"""Guest operations - run commands, file transfers (requires VMware Tools)."""
import base64
import time
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class GuestOpsMixin(MCPMixin):
"""Guest OS operations (requires VMware Tools running in the VM)."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _get_guest_auth(
self, username: str, password: str
) -> vim.vm.guest.NamePasswordAuthentication:
"""Create guest authentication object."""
return vim.vm.guest.NamePasswordAuthentication(
username=username,
password=password,
interactiveSession=False,
)
def _check_tools_running(self, vm: vim.VirtualMachine) -> None:
"""Verify VMware Tools is running."""
if vm.runtime.powerState != vim.VirtualMachine.PowerState.poweredOn:
raise RuntimeError(f"VM '{vm.name}' is not powered on")
if vm.guest.toolsRunningStatus != "guestToolsRunning":
raise RuntimeError(
f"VMware Tools not running on '{vm.name}'. "
"Guest operations require VMware Tools to be installed and running."
)
@mcp_tool(
name="run_command_in_guest",
description="Execute a command inside a VM's guest OS (requires VMware Tools and guest credentials)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=False),
)
def run_command_in_guest(
self,
name: str,
username: str,
password: str,
command: str,
arguments: str = "",
working_directory: str = "",
wait_for_completion: bool = True,
timeout_seconds: int = 300,
) -> dict[str, Any]:
"""Run a command in the guest OS.
Args:
name: VM name
username: Guest OS username
password: Guest OS password
command: Path to executable (e.g., /bin/bash, cmd.exe)
arguments: Command arguments (e.g., -c "echo hello")
working_directory: Working directory for the command
wait_for_completion: Wait for command to complete
timeout_seconds: Timeout in seconds (only if waiting)
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
process_manager = guest_ops.processManager
auth = self._get_guest_auth(username, password)
# Build program spec
program_spec = vim.vm.guest.ProcessManager.ProgramSpec(
programPath=command,
arguments=arguments,
workingDirectory=working_directory if working_directory else None,
)
# Start the process
pid = process_manager.StartProgramInGuest(vm, auth, program_spec)
result = {
"pid": pid,
"command": command,
"arguments": arguments,
"started": True,
}
if wait_for_completion:
# Poll for completion
start_time = time.time()
while time.time() - start_time < timeout_seconds:
processes = process_manager.ListProcessesInGuest(vm, auth, [pid])
if processes:
proc = processes[0]
if proc.endTime:
result["exit_code"] = proc.exitCode
result["completed"] = True
result["end_time"] = proc.endTime.isoformat()
break
time.sleep(1)
else:
result["completed"] = False
result["timeout"] = True
return result
@mcp_tool(
name="list_guest_processes",
description="List running processes in a VM's guest OS",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_guest_processes(
self, name: str, username: str, password: str
) -> list[dict[str, Any]]:
"""List processes running in the guest OS."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
process_manager = guest_ops.processManager
auth = self._get_guest_auth(username, password)
processes = process_manager.ListProcessesInGuest(vm, auth, pids=[])
return [
{
"pid": proc.pid,
"name": proc.name,
"owner": proc.owner,
"command": proc.cmdLine,
"start_time": proc.startTime.isoformat() if proc.startTime else None,
}
for proc in processes
]
@mcp_tool(
name="read_guest_file",
description="Read a file from a VM's guest OS (returns base64 for binary files)",
annotations=ToolAnnotations(readOnlyHint=True),
)
def read_guest_file(
self, name: str, username: str, password: str, guest_path: str
) -> dict[str, Any]:
"""Read a file from the guest OS.
Args:
name: VM name
username: Guest OS username
password: Guest OS password
guest_path: Path to file in guest (e.g., /etc/hosts, C:\\Windows\\System32\\hosts)
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
file_manager = guest_ops.fileManager
auth = self._get_guest_auth(username, password)
# Get file attributes first
try:
attrs = file_manager.ListFilesInGuest(
vm, auth, guest_path, matchPattern=None
)
if not attrs.files:
raise ValueError(f"File not found: {guest_path}")
file_info = attrs.files[0]
except vim.fault.FileNotFound:
raise ValueError(f"File not found: {guest_path}") from None
# Initiate file transfer from guest
file_transfer = file_manager.InitiateFileTransferFromGuest(
vm, auth, guest_path
)
# Download the file content via the transfer URL
import ssl
import urllib.request
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with urllib.request.urlopen(file_transfer.url, context=context) as response:
content = response.read()
# Try to decode as text, fall back to base64
try:
text_content = content.decode("utf-8")
return {
"path": guest_path,
"size": file_info.size,
"content": text_content,
"encoding": "utf-8",
}
except UnicodeDecodeError:
return {
"path": guest_path,
"size": file_info.size,
"content": base64.b64encode(content).decode("ascii"),
"encoding": "base64",
}
@mcp_tool(
name="write_guest_file",
description="Write a file to a VM's guest OS",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def write_guest_file(
self,
name: str,
username: str,
password: str,
guest_path: str,
content: str,
overwrite: bool = True,
) -> str:
"""Write a file to the guest OS.
Args:
name: VM name
username: Guest OS username
password: Guest OS password
guest_path: Destination path in guest
content: File content (text)
overwrite: Overwrite if exists
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
file_manager = guest_ops.fileManager
auth = self._get_guest_auth(username, password)
content_bytes = content.encode("utf-8")
# Initiate file transfer to guest
file_attrs = vim.vm.guest.FileManager.FileAttributes()
transfer_url = file_manager.InitiateFileTransferToGuest(
vm,
auth,
guest_path,
file_attrs,
len(content_bytes),
overwrite,
)
# Upload the content
import ssl
import urllib.request
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
request = urllib.request.Request(
transfer_url,
data=content_bytes,
method="PUT",
)
request.add_header("Content-Type", "application/octet-stream")
with urllib.request.urlopen(request, context=context):
pass
return f"File written to {guest_path} ({len(content_bytes)} bytes)"
@mcp_tool(
name="list_guest_directory",
description="List files in a directory on a VM's guest OS",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_guest_directory(
self, name: str, username: str, password: str, guest_path: str
) -> list[dict[str, Any]]:
"""List files in a guest directory."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
file_manager = guest_ops.fileManager
auth = self._get_guest_auth(username, password)
try:
listing = file_manager.ListFilesInGuest(
vm, auth, guest_path, matchPattern=None
)
except vim.fault.FileNotFound:
raise ValueError(f"Directory not found: {guest_path}") from None
results = []
for f in listing.files:
mod_time = getattr(f, "modificationTime", None)
results.append({
"name": f.path,
"size": getattr(f, "size", None),
"type": getattr(f, "type", None),
"owner": getattr(f, "owner", None),
"modified": mod_time.isoformat() if mod_time else None,
})
return results
@mcp_tool(
name="create_guest_directory",
description="Create a directory in a VM's guest OS",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def create_guest_directory(
self,
name: str,
username: str,
password: str,
guest_path: str,
create_parents: bool = True,
) -> str:
"""Create a directory in the guest OS."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
file_manager = guest_ops.fileManager
auth = self._get_guest_auth(username, password)
file_manager.MakeDirectoryInGuest(
vm, auth, guest_path, createParentDirectories=create_parents
)
return f"Directory created: {guest_path}"
@mcp_tool(
name="delete_guest_file",
description="Delete a file or directory from a VM's guest OS",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def delete_guest_file(
self, name: str, username: str, password: str, guest_path: str
) -> str:
"""Delete a file or directory from the guest OS."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
self._check_tools_running(vm)
guest_ops = self.conn.content.guestOperationsManager
file_manager = guest_ops.fileManager
auth = self._get_guest_auth(username, password)
try:
file_manager.DeleteFileInGuest(vm, auth, guest_path)
return f"Deleted: {guest_path}"
except vim.fault.NotAFile:
# It's a directory
file_manager.DeleteDirectoryInGuest(vm, auth, guest_path, recursive=True)
return f"Directory deleted: {guest_path}"

View File

@ -1,592 +0,0 @@
"""ESXi Host Management - maintenance mode, services, NTP, and host configuration."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class HostManagementMixin(MCPMixin):
"""ESXi host management tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _get_host(self) -> vim.HostSystem:
"""Get the ESXi host system."""
for entity in self.conn.datacenter.hostFolder.childEntity:
if isinstance(entity, vim.ComputeResource):
if entity.host:
return entity.host[0]
elif isinstance(entity, vim.HostSystem):
return entity
raise ValueError("No ESXi host found")
@mcp_tool(
name="get_host_info",
description="Get detailed information about the ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_host_info(self) -> dict[str, Any]:
"""Get detailed ESXi host information.
Returns:
Dict with host details including hardware, software, and status
"""
host = self._get_host()
summary = host.summary
hardware = summary.hardware
config = summary.config
return {
"name": config.name,
"uuid": hardware.uuid,
"product": {
"name": config.product.name,
"version": config.product.version,
"build": config.product.build,
"full_name": config.product.fullName,
},
"hardware": {
"vendor": hardware.vendor,
"model": hardware.model,
"cpu_model": hardware.cpuModel,
"cpu_cores": hardware.numCpuCores,
"cpu_threads": hardware.numCpuThreads,
"cpu_mhz": hardware.cpuMhz,
"memory_gb": round(hardware.memorySize / (1024**3), 2),
"nics": hardware.numNics,
"hbas": hardware.numHBAs,
},
"status": {
"power_state": str(host.runtime.powerState),
"connection_state": str(host.runtime.connectionState),
"maintenance_mode": host.runtime.inMaintenanceMode,
"uptime_seconds": host.summary.quickStats.uptime,
"boot_time": str(host.runtime.bootTime) if host.runtime.bootTime else None,
},
"management_ip": getattr(config, "managementServerIp", None),
}
@mcp_tool(
name="enter_maintenance_mode",
description="Put ESXi host into maintenance mode",
annotations=ToolAnnotations(destructiveHint=True),
)
def enter_maintenance_mode(
self,
evacuate_vms: bool = True,
timeout_seconds: int = 300,
) -> dict[str, Any]:
"""Put ESXi host into maintenance mode.
Args:
evacuate_vms: Evacuate/suspend VMs before entering (default True)
timeout_seconds: Timeout for the operation (default 300)
Returns:
Dict with operation result
"""
host = self._get_host()
if host.runtime.inMaintenanceMode:
return {
"host": host.name,
"action": "already_in_maintenance_mode",
"maintenance_mode": True,
}
# Enter maintenance mode
task = host.EnterMaintenanceMode_Task(
timeout=timeout_seconds,
evacuatePoweredOffVms=evacuate_vms,
)
self.conn.wait_for_task(task)
return {
"host": host.name,
"action": "entered_maintenance_mode",
"maintenance_mode": True,
"evacuate_vms": evacuate_vms,
}
@mcp_tool(
name="exit_maintenance_mode",
description="Exit ESXi host from maintenance mode",
annotations=ToolAnnotations(destructiveHint=True),
)
def exit_maintenance_mode(
self,
timeout_seconds: int = 300,
) -> dict[str, Any]:
"""Exit ESXi host from maintenance mode.
Args:
timeout_seconds: Timeout for the operation (default 300)
Returns:
Dict with operation result
"""
host = self._get_host()
if not host.runtime.inMaintenanceMode:
return {
"host": host.name,
"action": "not_in_maintenance_mode",
"maintenance_mode": False,
}
task = host.ExitMaintenanceMode_Task(timeout=timeout_seconds)
self.conn.wait_for_task(task)
return {
"host": host.name,
"action": "exited_maintenance_mode",
"maintenance_mode": False,
}
@mcp_tool(
name="list_services",
description="List all services on the ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_services(self) -> list[dict[str, Any]]:
"""List all services on the ESXi host.
Returns:
List of service details
"""
host = self._get_host()
service_system = host.configManager.serviceSystem
services = []
for service in service_system.serviceInfo.service:
services.append({
"key": service.key,
"label": service.label,
"policy": service.policy,
"running": service.running,
"required": service.required,
"uninstallable": service.uninstallable,
})
return services
@mcp_tool(
name="start_service",
description="Start a service on the ESXi host",
annotations=ToolAnnotations(destructiveHint=True),
)
def start_service(self, service_key: str) -> dict[str, Any]:
"""Start a service on the ESXi host.
Args:
service_key: Service key (e.g., 'TSM-SSH', 'ntpd', 'sfcbd')
Returns:
Dict with operation result
"""
host = self._get_host()
service_system = host.configManager.serviceSystem
# Verify service exists
service_found = None
for service in service_system.serviceInfo.service:
if service.key == service_key:
service_found = service
break
if not service_found:
available = [s.key for s in service_system.serviceInfo.service]
raise ValueError(f"Service '{service_key}' not found. Available: {available}")
if service_found.running:
return {
"host": host.name,
"service": service_key,
"action": "already_running",
"running": True,
}
service_system.StartService(id=service_key)
return {
"host": host.name,
"service": service_key,
"action": "started",
"running": True,
}
@mcp_tool(
name="stop_service",
description="Stop a service on the ESXi host",
annotations=ToolAnnotations(destructiveHint=True),
)
def stop_service(self, service_key: str) -> dict[str, Any]:
"""Stop a service on the ESXi host.
Args:
service_key: Service key (e.g., 'TSM-SSH', 'ntpd')
Returns:
Dict with operation result
"""
host = self._get_host()
service_system = host.configManager.serviceSystem
# Verify service exists
service_found = None
for service in service_system.serviceInfo.service:
if service.key == service_key:
service_found = service
break
if not service_found:
available = [s.key for s in service_system.serviceInfo.service]
raise ValueError(f"Service '{service_key}' not found. Available: {available}")
if not service_found.running:
return {
"host": host.name,
"service": service_key,
"action": "already_stopped",
"running": False,
}
service_system.StopService(id=service_key)
return {
"host": host.name,
"service": service_key,
"action": "stopped",
"running": False,
}
@mcp_tool(
name="set_service_policy",
description="Set the startup policy for a service",
annotations=ToolAnnotations(destructiveHint=True),
)
def set_service_policy(
self,
service_key: str,
policy: str,
) -> dict[str, Any]:
"""Set the startup policy for a service.
Args:
service_key: Service key (e.g., 'TSM-SSH', 'ntpd')
policy: Startup policy - 'on' (auto), 'off' (manual), 'automatic'
Returns:
Dict with operation result
"""
host = self._get_host()
service_system = host.configManager.serviceSystem
valid_policies = ["on", "off", "automatic"]
if policy not in valid_policies:
raise ValueError(f"Invalid policy '{policy}'. Valid: {valid_policies}")
# Verify service exists
service_found = None
for service in service_system.serviceInfo.service:
if service.key == service_key:
service_found = service
break
if not service_found:
available = [s.key for s in service_system.serviceInfo.service]
raise ValueError(f"Service '{service_key}' not found. Available: {available}")
old_policy = service_found.policy
service_system.UpdateServicePolicy(id=service_key, policy=policy)
return {
"host": host.name,
"service": service_key,
"action": "policy_updated",
"old_policy": old_policy,
"new_policy": policy,
}
@mcp_tool(
name="get_ntp_config",
description="Get NTP configuration for the ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_ntp_config(self) -> dict[str, Any]:
"""Get NTP configuration for the ESXi host.
Returns:
Dict with NTP configuration
"""
host = self._get_host()
datetime_system = host.configManager.dateTimeSystem
ntp_config = datetime_system.dateTimeInfo.ntpConfig
# Get ntpd service status
service_system = host.configManager.serviceSystem
ntpd_running = False
ntpd_policy = "unknown"
for service in service_system.serviceInfo.service:
if service.key == "ntpd":
ntpd_running = service.running
ntpd_policy = service.policy
break
return {
"host": host.name,
"ntp_servers": list(ntp_config.server) if ntp_config else [],
"service_running": ntpd_running,
"service_policy": ntpd_policy,
"current_time": str(datetime_system.QueryDateTime()),
"timezone": datetime_system.dateTimeInfo.timeZone.name,
}
@mcp_tool(
name="configure_ntp",
description="Configure NTP servers for the ESXi host",
annotations=ToolAnnotations(destructiveHint=True),
)
def configure_ntp(
self,
ntp_servers: list[str],
start_service: bool = True,
) -> dict[str, Any]:
"""Configure NTP servers for the ESXi host.
Args:
ntp_servers: List of NTP server addresses
start_service: Start ntpd service after configuring (default True)
Returns:
Dict with configuration result
"""
host = self._get_host()
datetime_system = host.configManager.dateTimeSystem
# Create NTP config
ntp_config = vim.host.NtpConfig(server=ntp_servers)
# Create DateTime config
datetime_config = vim.host.DateTimeConfig(ntpConfig=ntp_config)
# Apply configuration
datetime_system.UpdateDateTimeConfig(config=datetime_config)
result = {
"host": host.name,
"action": "ntp_configured",
"ntp_servers": ntp_servers,
}
if start_service:
# Restart ntpd to pick up new config
service_system = host.configManager.serviceSystem
try:
service_system.RestartService(id="ntpd")
result["service_restarted"] = True
except Exception:
# Service might not be running, try to start it
try:
service_system.StartService(id="ntpd")
result["service_started"] = True
except Exception as e:
result["service_error"] = str(e)
return result
@mcp_tool(
name="reboot_host",
description="Reboot the ESXi host (requires maintenance mode)",
annotations=ToolAnnotations(destructiveHint=True),
)
def reboot_host(self, force: bool = False) -> dict[str, Any]:
"""Reboot the ESXi host.
Args:
force: Force reboot even if VMs are running (dangerous!)
Returns:
Dict with operation result
"""
host = self._get_host()
if not host.runtime.inMaintenanceMode and not force:
raise ValueError(
"Host must be in maintenance mode to reboot. "
"Use enter_maintenance_mode first, or set force=True (dangerous!)."
)
host.RebootHost_Task(force=force)
# Don't wait for task - host will reboot
return {
"host": host.name,
"action": "reboot_initiated",
"force": force,
"warning": "Host is rebooting. Connection will be lost.",
}
@mcp_tool(
name="shutdown_host",
description="Shutdown the ESXi host (requires maintenance mode)",
annotations=ToolAnnotations(destructiveHint=True),
)
def shutdown_host(self, force: bool = False) -> dict[str, Any]:
"""Shutdown the ESXi host.
Args:
force: Force shutdown even if VMs are running (dangerous!)
Returns:
Dict with operation result
"""
host = self._get_host()
if not host.runtime.inMaintenanceMode and not force:
raise ValueError(
"Host must be in maintenance mode to shutdown. "
"Use enter_maintenance_mode first, or set force=True (dangerous!)."
)
host.ShutdownHost_Task(force=force)
# Don't wait for task - host will shutdown
return {
"host": host.name,
"action": "shutdown_initiated",
"force": force,
"warning": "Host is shutting down. Connection will be lost.",
}
@mcp_tool(
name="get_host_hardware",
description="Get detailed hardware information for the ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_host_hardware(self) -> dict[str, Any]:
"""Get detailed hardware information.
Returns:
Dict with hardware details
"""
host = self._get_host()
hardware = host.hardware
# CPU info
cpu_info = {
"packages": hardware.cpuInfo.numCpuPackages,
"cores": hardware.cpuInfo.numCpuCores,
"threads": hardware.cpuInfo.numCpuThreads,
"hz": hardware.cpuInfo.hz,
}
# Memory info
memory_info = {
"total_bytes": hardware.memorySize,
"total_gb": round(hardware.memorySize / (1024**3), 2),
}
# PCI devices
pci_devices = []
for pci in hardware.pciDevice[:10]: # Limit to first 10
pci_devices.append({
"id": pci.id,
"vendor_name": pci.vendorName,
"device_name": pci.deviceName,
"class_id": pci.classId,
})
# NICs
nics = []
for nic in host.config.network.pnic:
nics.append({
"device": nic.device,
"driver": nic.driver,
"mac": nic.mac,
"link_speed": nic.linkSpeed.speedMb if nic.linkSpeed else None,
})
return {
"host": host.name,
"uuid": hardware.systemInfo.uuid,
"bios": {
"vendor": hardware.biosInfo.vendor,
"version": hardware.biosInfo.biosVersion,
"release_date": str(hardware.biosInfo.releaseDate),
},
"cpu": cpu_info,
"memory": memory_info,
"pci_devices": pci_devices,
"network_adapters": nics,
}
@mcp_tool(
name="get_host_networking",
description="Get network configuration for the ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_host_networking(self) -> dict[str, Any]:
"""Get network configuration for the ESXi host.
Returns:
Dict with networking details
"""
host = self._get_host()
network_config = host.config.network
# Virtual switches
vswitches = []
for vswitch in network_config.vswitch:
vswitches.append({
"name": vswitch.name,
"ports": vswitch.numPorts,
"ports_available": vswitch.numPortsAvailable,
"mtu": vswitch.mtu,
"pnics": list(vswitch.pnic) if vswitch.pnic else [],
})
# Port groups
portgroups = []
for pg in network_config.portgroup:
portgroups.append({
"name": pg.spec.name,
"vswitch": pg.spec.vswitchName,
"vlan_id": pg.spec.vlanId,
})
# VMkernel adapters
vmknics = []
for vmk in network_config.vnic:
vmknics.append({
"device": vmk.device,
"portgroup": vmk.portgroup,
"ip": vmk.spec.ip.ipAddress,
"netmask": vmk.spec.ip.subnetMask,
"mac": vmk.spec.mac,
"mtu": vmk.spec.mtu,
})
# DNS config
dns = network_config.dnsConfig
dns_info = {
"hostname": dns.hostName,
"domain": dns.domainName,
"servers": list(dns.address) if dns.address else [],
"search_domains": list(dns.searchDomain) if dns.searchDomain else [],
}
return {
"host": host.name,
"vswitches": vswitches,
"portgroups": portgroups,
"vmkernel_adapters": vmknics,
"dns": dns_info,
}

View File

@ -1,280 +0,0 @@
"""Monitoring and performance - stats, metrics, events."""
from datetime import UTC, datetime, timedelta
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class MonitoringMixin(MCPMixin):
"""VM and host monitoring tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
@mcp_tool(
name="get_vm_stats",
description="Get current performance statistics for a virtual machine",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_vm_stats(self, name: str) -> dict[str, Any]:
"""Get VM performance statistics."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
qs = vm.summary.quickStats
storage = vm.summary.storage
stats = {
"name": name,
"power_state": str(vm.runtime.powerState),
"cpu_usage_mhz": qs.overallCpuUsage,
"cpu_demand_mhz": qs.overallCpuDemand,
"memory_usage_mb": qs.guestMemoryUsage,
"memory_active_mb": qs.activeMemory,
"memory_ballooned_mb": qs.balloonedMemory,
"memory_swapped_mb": qs.swappedMemory,
"storage_committed_gb": round(storage.committed / (1024**3), 2)
if storage
else 0,
"storage_uncommitted_gb": round(storage.uncommitted / (1024**3), 2)
if storage
else 0,
"uptime_seconds": qs.uptimeSeconds,
"uptime_human": self._format_uptime(qs.uptimeSeconds)
if qs.uptimeSeconds
else None,
}
return stats
@mcp_tool(
name="get_host_stats",
description="Get performance statistics for an ESXi host",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_host_stats(self, host_name: str | None = None) -> dict[str, Any]:
"""Get ESXi host performance statistics.
If host_name is not provided, returns stats for the first host.
"""
if host_name:
host = self.conn.find_host(host_name)
if not host:
raise ValueError(f"Host '{host_name}' not found")
else:
# Get first host
container = self.conn.content.viewManager.CreateContainerView(
self.conn.content.rootFolder, [vim.HostSystem], True
)
try:
hosts = list(container.view)
if not hosts:
raise ValueError("No ESXi hosts found")
host = hosts[0]
finally:
container.Destroy()
summary = host.summary
hardware = summary.hardware
qs = summary.quickStats
return {
"name": host.name,
"connection_state": str(summary.runtime.connectionState),
"power_state": str(summary.runtime.powerState),
"model": hardware.model,
"vendor": hardware.vendor,
"cpu_model": hardware.cpuModel,
"cpu_cores": hardware.numCpuCores,
"cpu_threads": hardware.numCpuThreads,
"cpu_mhz": hardware.cpuMhz,
"cpu_usage_mhz": qs.overallCpuUsage,
"cpu_usage_percent": round(
(qs.overallCpuUsage / (hardware.numCpuCores * hardware.cpuMhz)) * 100, 1
)
if qs.overallCpuUsage
else 0,
"memory_total_gb": round(hardware.memorySize / (1024**3), 2),
"memory_usage_mb": qs.overallMemoryUsage,
"memory_usage_percent": round(
(qs.overallMemoryUsage * 1024 * 1024 / hardware.memorySize) * 100, 1
)
if qs.overallMemoryUsage
else 0,
"uptime_seconds": qs.uptime,
"uptime_human": self._format_uptime(qs.uptime) if qs.uptime else None,
"vm_count": len(host.vm) if host.vm else 0,
}
@mcp_tool(
name="list_hosts",
description="List all ESXi hosts in the datacenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_hosts(self) -> list[dict[str, Any]]:
"""List all ESXi hosts with basic info."""
container = self.conn.content.viewManager.CreateContainerView(
self.conn.content.rootFolder, [vim.HostSystem], True
)
try:
hosts = []
for host in container.view:
summary = host.summary
hardware = summary.hardware
hosts.append(
{
"name": host.name,
"connection_state": str(summary.runtime.connectionState),
"power_state": str(summary.runtime.powerState),
"model": hardware.model,
"cpu_cores": hardware.numCpuCores,
"memory_gb": round(hardware.memorySize / (1024**3), 2),
"vm_count": len(host.vm) if host.vm else 0,
}
)
return hosts
finally:
container.Destroy()
@mcp_tool(
name="get_recent_tasks",
description="Get recent vSphere tasks (VM operations, etc.)",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_recent_tasks(self, count: int = 20) -> list[dict[str, Any]]:
"""Get recent vSphere tasks."""
task_manager = self.conn.content.taskManager
recent_tasks = task_manager.recentTask[:count] if task_manager.recentTask else []
tasks = []
for task in recent_tasks:
try:
info = task.info
tasks.append(
{
"name": info.name,
"state": str(info.state),
"progress": info.progress,
"entity": info.entityName,
"queued_time": info.queueTime.isoformat()
if info.queueTime
else None,
"start_time": info.startTime.isoformat()
if info.startTime
else None,
"complete_time": info.completeTime.isoformat()
if info.completeTime
else None,
"description": str(info.description) if info.description else None,
"error": str(info.error) if info.error else None,
}
)
except Exception:
# Task may have been cleaned up
continue
return tasks
@mcp_tool(
name="get_recent_events",
description="Get recent vSphere events (alarms, changes, etc.)",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_recent_events(
self, count: int = 50, hours: int = 24
) -> list[dict[str, Any]]:
"""Get recent vSphere events."""
event_manager = self.conn.content.eventManager
# Create time filter
time_filter = vim.event.EventFilterSpec.ByTime()
time_filter.beginTime = datetime.now(UTC) - timedelta(hours=hours)
filter_spec = vim.event.EventFilterSpec(time=time_filter)
events = []
try:
collector = event_manager.CreateCollectorForEvents(filter_spec)
try:
collector.SetCollectorPageSize(count)
latest_events = collector.latestPage
for event in latest_events:
events.append(
{
"key": event.key,
"type": type(event).__name__,
"created_time": event.createdTime.isoformat()
if event.createdTime
else None,
"message": event.fullFormattedMessage,
"username": event.userName,
"datacenter": event.datacenter.name
if event.datacenter
else None,
"host": event.host.name if event.host else None,
"vm": event.vm.name if event.vm else None,
}
)
finally:
collector.DestroyCollector()
except Exception as e:
raise RuntimeError(f"Failed to retrieve events: {e}") from e
return events
@mcp_tool(
name="get_alarms",
description="Get triggered alarms in the datacenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_alarms(self) -> list[dict[str, Any]]:
"""Get all triggered alarms."""
alarms = []
# Check datacenter alarms
if self.conn.datacenter.triggeredAlarmState:
for alarm_state in self.conn.datacenter.triggeredAlarmState:
alarms.append(self._format_alarm(alarm_state))
# Check VM alarms
for vm in self.conn.get_all_vms():
if vm.triggeredAlarmState:
for alarm_state in vm.triggeredAlarmState:
alarms.append(self._format_alarm(alarm_state, vm.name))
return alarms
def _format_alarm(
self, alarm_state: vim.alarm.AlarmState, entity_name: str | None = None
) -> dict[str, Any]:
"""Format alarm state for output."""
return {
"alarm": alarm_state.alarm.info.name if alarm_state.alarm else "Unknown",
"entity": entity_name or str(alarm_state.entity),
"status": str(alarm_state.overallStatus),
"time": alarm_state.time.isoformat() if alarm_state.time else None,
"acknowledged": alarm_state.acknowledged,
"acknowledged_by": alarm_state.acknowledgedByUser,
}
def _format_uptime(self, seconds: int | None) -> str:
"""Format uptime seconds to human readable string."""
if not seconds:
return "N/A"
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, _ = divmod(remainder, 60)
if days > 0:
return f"{days}d {hours}h {minutes}m"
elif hours > 0:
return f"{hours}h {minutes}m"
else:
return f"{minutes}m"

View File

@ -1,432 +0,0 @@
"""Virtual NIC Management - add, remove, configure network adapters."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class NICManagementMixin(MCPMixin):
"""Virtual network adapter management tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _find_nic_by_label(
self, vm: vim.VirtualMachine, label: str
) -> vim.vm.device.VirtualEthernetCard | None:
"""Find a virtual NIC by its label (e.g., 'Network adapter 1')."""
for device in vm.config.hardware.device:
if (
isinstance(device, vim.vm.device.VirtualEthernetCard)
and device.deviceInfo.label.lower() == label.lower()
):
return device
return None
def _get_network_backing(
self, network_name: str
) -> vim.vm.device.VirtualEthernetCard.NetworkBackingInfo:
"""Get the appropriate backing info for a network."""
network = self.conn.find_network(network_name)
if not network:
raise ValueError(f"Network '{network_name}' not found")
if isinstance(network, vim.dvs.DistributedVirtualPortgroup):
# Distributed virtual switch portgroup
backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
backing.port = vim.dvs.PortConnection()
backing.port.portgroupKey = network.key
backing.port.switchUuid = network.config.distributedVirtualSwitch.uuid
else:
# Standard vSwitch network
backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
backing.network = network
backing.deviceName = network_name
return backing
@mcp_tool(
name="list_nics",
description="List all network adapters attached to a VM",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_nics(self, vm_name: str) -> list[dict[str, Any]]:
"""List all virtual network adapters on a VM.
Args:
vm_name: Name of the virtual machine
Returns:
List of NIC details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
nics = []
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
nic_info = {
"label": device.deviceInfo.label,
"type": type(device).__name__.replace("Virtual", ""),
"mac_address": device.macAddress,
"mac_type": device.addressType,
"connected": device.connectable.connected if device.connectable else False,
"start_connected": device.connectable.startConnected if device.connectable else False,
}
# Get network name from backing
backing = device.backing
if hasattr(backing, "deviceName"):
nic_info["network"] = backing.deviceName
elif hasattr(backing, "port") and hasattr(backing.port, "portgroupKey"):
# For distributed switch, look up the portgroup name
nic_info["network"] = f"DVS:{backing.port.portgroupKey}"
# Try to get actual name
for net in self.conn.datacenter.networkFolder.childEntity:
if hasattr(net, "key") and net.key == backing.port.portgroupKey:
nic_info["network"] = net.name
break
nics.append(nic_info)
return nics
@mcp_tool(
name="add_nic",
description="Add a new network adapter to a VM",
annotations=ToolAnnotations(destructiveHint=True),
)
def add_nic(
self,
vm_name: str,
network: str,
nic_type: str = "vmxnet3",
start_connected: bool = True,
) -> dict[str, Any]:
"""Add a new network adapter to a VM.
Args:
vm_name: Name of the virtual machine
network: Network/portgroup name to connect to
nic_type: Adapter type - vmxnet3 (default), e1000, e1000e
start_connected: Connect adapter when VM powers on (default True)
Returns:
Dict with new NIC details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
# Create the appropriate NIC type
nic_types = {
"vmxnet3": vim.vm.device.VirtualVmxnet3,
"vmxnet2": vim.vm.device.VirtualVmxnet2,
"e1000": vim.vm.device.VirtualE1000,
"e1000e": vim.vm.device.VirtualE1000e,
}
nic_class = nic_types.get(nic_type.lower())
if not nic_class:
raise ValueError(f"Unknown NIC type '{nic_type}'. Valid: {list(nic_types.keys())}")
# Create the NIC
nic = nic_class()
nic.backing = self._get_network_backing(network)
nic.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.connectable.startConnected = start_connected
nic.connectable.connected = False # Can't connect until powered on
nic.connectable.allowGuestControl = True
nic.addressType = "generated" # Let ESXi generate MAC address
# Create device add spec
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = nic
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
# Get the MAC address that was assigned
vm.Reload()
new_nic = None
for device in vm.config.hardware.device:
if (
isinstance(device, vim.vm.device.VirtualEthernetCard)
and device.backing
and hasattr(device.backing, "deviceName")
and device.backing.deviceName == network
):
new_nic = device
break
mac_address = new_nic.macAddress if new_nic else "pending"
return {
"vm": vm_name,
"action": "nic_added",
"network": network,
"nic_type": nic_type,
"mac_address": mac_address,
"start_connected": start_connected,
}
@mcp_tool(
name="remove_nic",
description="Remove a network adapter from a VM",
annotations=ToolAnnotations(destructiveHint=True),
)
def remove_nic(
self,
vm_name: str,
nic_label: str,
) -> dict[str, Any]:
"""Remove a network adapter from a VM.
Args:
vm_name: Name of the virtual machine
nic_label: Label of NIC to remove (e.g., 'Network adapter 1')
Returns:
Dict with removal details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
nic = self._find_nic_by_label(vm, nic_label)
if not nic:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualEthernetCard)
]
raise ValueError(f"NIC '{nic_label}' not found. Available: {available}")
# Get info before removal
mac_address = nic.macAddress
network = "unknown"
if hasattr(nic.backing, "deviceName"):
network = nic.backing.deviceName
# Create device removal spec
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
nic_spec.device = nic
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "nic_removed",
"nic_label": nic_label,
"mac_address": mac_address,
"network": network,
}
@mcp_tool(
name="change_nic_network",
description="Change which network a NIC is connected to",
annotations=ToolAnnotations(destructiveHint=True),
)
def change_nic_network(
self,
vm_name: str,
nic_label: str,
new_network: str,
) -> dict[str, Any]:
"""Change which network a NIC is connected to.
Args:
vm_name: Name of the virtual machine
nic_label: Label of NIC to modify (e.g., 'Network adapter 1')
new_network: New network/portgroup name
Returns:
Dict with change details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
nic = self._find_nic_by_label(vm, nic_label)
if not nic:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualEthernetCard)
]
raise ValueError(f"NIC '{nic_label}' not found. Available: {available}")
# Get old network name
old_network = "unknown"
if hasattr(nic.backing, "deviceName"):
old_network = nic.backing.deviceName
# Update backing to new network
nic.backing = self._get_network_backing(new_network)
# Create device edit spec
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic_spec.device = nic
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "nic_network_changed",
"nic_label": nic_label,
"old_network": old_network,
"new_network": new_network,
"mac_address": nic.macAddress,
}
@mcp_tool(
name="connect_nic",
description="Connect or disconnect a NIC on a running VM",
annotations=ToolAnnotations(destructiveHint=True),
)
def connect_nic(
self,
vm_name: str,
nic_label: str,
connected: bool = True,
) -> dict[str, Any]:
"""Connect or disconnect a NIC on a running VM.
Args:
vm_name: Name of the virtual machine
nic_label: Label of NIC (e.g., 'Network adapter 1')
connected: True to connect, False to disconnect
Returns:
Dict with connection status
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
nic = self._find_nic_by_label(vm, nic_label)
if not nic:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualEthernetCard)
]
raise ValueError(f"NIC '{nic_label}' not found. Available: {available}")
if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
raise ValueError("VM must be powered on to change NIC connection state")
# Update connection state
nic.connectable.connected = connected
# Create device edit spec
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic_spec.device = nic
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "nic_connected" if connected else "nic_disconnected",
"nic_label": nic_label,
"connected": connected,
}
@mcp_tool(
name="set_nic_mac",
description="Set a custom MAC address for a NIC",
annotations=ToolAnnotations(destructiveHint=True),
)
def set_nic_mac(
self,
vm_name: str,
nic_label: str,
mac_address: str,
) -> dict[str, Any]:
"""Set a custom MAC address for a NIC.
Args:
vm_name: Name of the virtual machine
nic_label: Label of NIC (e.g., 'Network adapter 1')
mac_address: MAC address in format XX:XX:XX:XX:XX:XX
Returns:
Dict with MAC address change details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
nic = self._find_nic_by_label(vm, nic_label)
if not nic:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualEthernetCard)
]
raise ValueError(f"NIC '{nic_label}' not found. Available: {available}")
# Validate MAC address format
import re
if not re.match(r"^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$", mac_address):
raise ValueError(f"Invalid MAC address format: {mac_address}")
old_mac = nic.macAddress
# Set manual MAC address
nic.addressType = "manual"
nic.macAddress = mac_address
# Create device edit spec
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic_spec.device = nic
# Create VM config spec
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
# Reconfigure VM
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "mac_address_changed",
"nic_label": nic_label,
"old_mac": old_mac,
"new_mac": mac_address,
}

View File

@ -1,456 +0,0 @@
"""OVF/OVA Management - deploy and export virtual appliances."""
import ssl
import tarfile
import tempfile
import urllib.request
from pathlib import Path
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class OVFManagementMixin(MCPMixin):
"""OVF/OVA deployment and export tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _extract_ova(self, ova_path: str) -> tuple[str, str, list[str]]:
"""Extract OVA file and return (temp_dir, ovf_path, disk_files)."""
temp_dir = tempfile.mkdtemp(prefix="ovf_")
with tarfile.open(ova_path, "r") as tar:
tar.extractall(temp_dir)
# Find OVF descriptor and disk files
temp_path = Path(temp_dir)
ovf_files = list(temp_path.glob("*.ovf"))
if not ovf_files:
raise ValueError("No OVF descriptor found in OVA file")
ovf_path = str(ovf_files[0])
disk_files = [str(f) for f in temp_path.glob("*.vmdk")]
disk_files.extend([str(f) for f in temp_path.glob("*.iso")])
return temp_dir, ovf_path, disk_files
def _get_ovf_descriptor(self, ovf_path: str) -> str:
"""Read OVF descriptor XML content."""
with open(ovf_path) as f:
return f.read()
def _upload_disk_to_lease(
self,
_lease: vim.HttpNfcLease,
disk_path: str,
device_url: str,
) -> None:
"""Upload a disk file via NFC lease."""
# Create SSL context
context = ssl.create_default_context()
if self.conn.settings.vcenter_insecure:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Get file size
file_size = Path(disk_path).stat().st_size
# Create upload request
request = urllib.request.Request(device_url, method="PUT")
request.add_header("Content-Type", "application/x-vnd.vmware-streamVmdk")
request.add_header("Content-Length", str(file_size))
request.add_header("Connection", "Keep-Alive")
# Add session cookie
if hasattr(self.conn.service_instance, "_stub"):
cookie = self.conn.service_instance._stub.cookie
if cookie:
request.add_header("Cookie", cookie)
# Upload file
with open(disk_path, "rb") as f:
request.data = f
urllib.request.urlopen(request, context=context)
@mcp_tool(
name="deploy_ovf",
description="Deploy a VM from an OVF or OVA file on a datastore",
annotations=ToolAnnotations(destructiveHint=True),
)
def deploy_ovf(
self,
ovf_path: str,
vm_name: str,
datastore: str,
network: str | None = None,
power_on: bool = False,
) -> dict[str, Any]:
"""Deploy a virtual machine from an OVF or OVA file.
The OVF/OVA must already be on a datastore accessible to ESXi.
Args:
ovf_path: Path to OVF/OVA on datastore (e.g., 'templates/ubuntu.ova')
vm_name: Name for the new VM
datastore: Target datastore for VM files
network: Network to connect VM to (optional)
power_on: Power on VM after deployment (default False)
Returns:
Dict with deployment details
"""
# Get OVF Manager
ovf_manager = self.conn.content.ovfManager
# Find target datastore
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Get resource pool and folder
host = None
for h in self.conn.datacenter.hostFolder.childEntity:
if hasattr(h, "host"):
host = h.host[0] if h.host else None
break
elif hasattr(h, "resourcePool"):
host = h
break
if not host:
raise ValueError("No ESXi host found")
# Get resource pool
if hasattr(host, "resourcePool"):
resource_pool = host.resourcePool
else:
resource_pool = host.parent.resourcePool
# Get VM folder
vm_folder = self.conn.datacenter.vmFolder
# Read OVF descriptor from datastore
# For OVA, we need to extract first
is_ova = ovf_path.lower().endswith(".ova")
if is_ova:
# Download OVA to temp location for extraction
# This is complex - for now, require OVF files directly
raise ValueError(
"Direct OVA deployment from datastore not yet supported. "
"Please extract OVF first, or use local OVA deployment."
)
# Read OVF descriptor via datastore browser
ovf_descriptor = self._read_datastore_file(datastore, ovf_path)
# Create import spec params
import_spec_params = vim.OvfManager.CreateImportSpecParams(
entityName=vm_name,
diskProvisioning="thin",
)
# If network specified, add network mapping
if network:
net = self.conn.find_network(network)
if net:
network_mapping = vim.OvfManager.NetworkMapping(
name="VM Network", # Common default in OVF
network=net,
)
import_spec_params.networkMapping = [network_mapping]
# Create import spec
import_spec = ovf_manager.CreateImportSpec(
ovfDescriptor=ovf_descriptor,
resourcePool=resource_pool,
datastore=ds,
cisp=import_spec_params,
)
if import_spec.error:
errors = [str(e.msg) for e in import_spec.error]
raise ValueError(f"OVF import errors: {errors}")
# Import the OVF
lease = resource_pool.ImportVApp(
spec=import_spec.importSpec,
folder=vm_folder,
)
# Wait for lease to be ready
while lease.state == vim.HttpNfcLease.State.initializing:
pass
if lease.state == vim.HttpNfcLease.State.error:
raise ValueError(f"Lease error: {lease.error}")
# Upload disk files if needed
if import_spec.fileItem:
ovf_dir = str(Path(ovf_path).parent)
for item in import_spec.fileItem:
for device_url in lease.info.deviceUrl:
if device_url.importKey == item.deviceId:
disk_path = f"{ovf_dir}/{item.path}"
self._upload_disk_from_datastore(
datastore, disk_path, device_url.url
)
break
# Complete the lease
lease.Complete()
# Find the newly created VM
vm = self.conn.find_vm(vm_name)
result = {
"vm": vm_name,
"action": "ovf_deployed",
"datastore": datastore,
"source": ovf_path,
}
if vm:
result["uuid"] = vm.config.uuid
if power_on:
task = vm.PowerOnVM_Task()
self.conn.wait_for_task(task)
result["power_state"] = "poweredOn"
else:
result["power_state"] = "poweredOff"
return result
def _read_datastore_file(self, datastore: str, path: str) -> str:
"""Read a text file from datastore."""
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Build HTTP URL
dc_name = self.conn.datacenter.name
url = (
f"https://{self.conn.settings.vcenter_host}/folder/{path}"
f"?dcPath={dc_name}&dsName={datastore}"
)
# Setup request
context = ssl.create_default_context()
if self.conn.settings.vcenter_insecure:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
request = urllib.request.Request(url)
if hasattr(self.conn.service_instance, "_stub"):
cookie = self.conn.service_instance._stub.cookie
if cookie:
request.add_header("Cookie", cookie)
with urllib.request.urlopen(request, context=context) as response:
return response.read().decode("utf-8")
def _upload_disk_from_datastore(
self, datastore: str, disk_path: str, target_url: str
) -> None:
"""Stream disk from datastore to NFC lease URL."""
# This is complex - need to pipe from datastore HTTP to NFC upload
# For now, document this limitation
pass
@mcp_tool(
name="export_vm_ovf",
description="Export a VM to OVF format on a datastore",
annotations=ToolAnnotations(readOnlyHint=False),
)
def export_vm_ovf(
self,
vm_name: str,
target_path: str,
datastore: str | None = None,
) -> dict[str, Any]:
"""Export a virtual machine to OVF format.
Args:
vm_name: Name of the VM to export
target_path: Target directory path on datastore
datastore: Target datastore (default: VM's datastore)
Returns:
Dict with export details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
# VM must be powered off
if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
raise ValueError("VM must be powered off to export")
# Determine target datastore
if datastore:
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
ds_name = datastore
else:
ds_name = vm.config.files.vmPathName.split("]")[0].strip("[")
# Get export lease
lease = vm.ExportVm()
# Wait for lease to be ready
while lease.state == vim.HttpNfcLease.State.initializing:
pass
if lease.state == vim.HttpNfcLease.State.error:
raise ValueError(f"Export lease error: {lease.error}")
# Get OVF descriptor
ovf_manager = self.conn.content.ovfManager
ovf_descriptor = ovf_manager.CreateDescriptor(
obj=vm,
cdp=vim.OvfManager.CreateDescriptorParams(
name=vm_name,
description=f"Exported from {vm_name}",
),
)
if ovf_descriptor.error:
lease.Abort(fault=vim.LocalizedMethodFault(localizedMessage="OVF error"))
errors = [str(e.msg) for e in ovf_descriptor.error]
raise ValueError(f"OVF descriptor errors: {errors}")
# Download disk files from lease
exported_files = []
for device_url in lease.info.deviceUrl:
# Get disk key for filename
disk_key = device_url.key
# Create output path
disk_filename = f"{vm_name}-disk-{disk_key}.vmdk"
output_path = f"{target_path}/{disk_filename}"
# Download disk to datastore
# This would need streaming from NFC to datastore HTTP PUT
exported_files.append(output_path)
# Write OVF descriptor
ovf_filename = f"{vm_name}.ovf"
ovf_output_path = f"{target_path}/{ovf_filename}"
# Upload OVF descriptor to datastore
self._write_datastore_file(ds_name, ovf_output_path, ovf_descriptor.ovfDescriptor)
exported_files.append(ovf_output_path)
# Complete the lease
lease.Complete()
return {
"vm": vm_name,
"action": "vm_exported",
"datastore": ds_name,
"target_path": target_path,
"files": exported_files,
"ovf_descriptor": ovf_filename,
}
def _write_datastore_file(self, datastore: str, path: str, content: str) -> None:
"""Write a text file to datastore."""
dc_name = self.conn.datacenter.name
url = (
f"https://{self.conn.settings.vcenter_host}/folder/{path}"
f"?dcPath={dc_name}&dsName={datastore}"
)
# Setup request
context = ssl.create_default_context()
if self.conn.settings.vcenter_insecure:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
data = content.encode("utf-8")
request = urllib.request.Request(url, data=data, method="PUT")
request.add_header("Content-Type", "application/xml")
request.add_header("Content-Length", str(len(data)))
if hasattr(self.conn.service_instance, "_stub"):
cookie = self.conn.service_instance._stub.cookie
if cookie:
request.add_header("Cookie", cookie)
urllib.request.urlopen(request, context=context)
@mcp_tool(
name="list_ovf_networks",
description="List networks defined in an OVF file",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_ovf_networks(
self,
ovf_path: str,
datastore: str,
) -> list[dict[str, str]]:
"""List networks defined in an OVF descriptor.
Args:
ovf_path: Path to OVF file on datastore
datastore: Datastore containing the OVF
Returns:
List of network definitions
"""
# Read OVF descriptor
ovf_descriptor = self._read_datastore_file(datastore, ovf_path)
# Parse network references
ovf_manager = self.conn.content.ovfManager
# Get resource pool for parsing
host = None
for h in self.conn.datacenter.hostFolder.childEntity:
if hasattr(h, "host"):
host = h.host[0] if h.host else None
break
if not host:
raise ValueError("No ESXi host found")
resource_pool = host.parent.resourcePool if hasattr(host, "parent") else None
ds = self.conn.find_datastore(datastore)
# Create parse params to extract network info
import_spec_params = vim.OvfManager.CreateImportSpecParams()
result = ovf_manager.CreateImportSpec(
ovfDescriptor=ovf_descriptor,
resourcePool=resource_pool,
datastore=ds,
cisp=import_spec_params,
)
networks = []
if result.importSpec and hasattr(result.importSpec, "networkMapping"):
for net in result.importSpec.networkMapping:
networks.append({
"name": net.name,
"network": net.network.name if net.network else "Not mapped",
})
# Also check warnings for network requirements
if result.warning:
for warn in result.warning:
if "network" in str(warn.msg).lower():
networks.append({
"name": "Warning",
"network": str(warn.msg),
})
return networks

View File

@ -1,159 +0,0 @@
"""Power operations - power on/off, shutdown, reset, suspend."""
from typing import TYPE_CHECKING
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class PowerOpsMixin(MCPMixin):
"""VM power management tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
@mcp_tool(
name="power_on",
description="Power on a virtual machine",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def power_on(self, name: str) -> str:
"""Power on a virtual machine."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOn:
return f"VM '{name}' is already powered on"
task = vm.PowerOnVM_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' powered on"
@mcp_tool(
name="power_off",
description="Power off a virtual machine (hard shutdown, like pulling the power cord)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def power_off(self, name: str) -> str:
"""Power off a virtual machine (hard shutdown)."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOff:
return f"VM '{name}' is already powered off"
task = vm.PowerOffVM_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' powered off"
@mcp_tool(
name="shutdown_guest",
description="Gracefully shutdown the guest OS (requires VMware Tools installed and running)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def shutdown_guest(self, name: str) -> str:
"""Gracefully shutdown the guest OS."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOff:
return f"VM '{name}' is already powered off"
if vm.guest.toolsRunningStatus != "guestToolsRunning":
raise RuntimeError(
f"VMware Tools not running on '{name}'. "
"Use power_off for hard shutdown instead."
)
vm.ShutdownGuest()
return f"Guest shutdown initiated for VM '{name}'"
@mcp_tool(
name="reboot_guest",
description="Gracefully reboot the guest OS (requires VMware Tools)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=False),
)
def reboot_guest(self, name: str) -> str:
"""Gracefully reboot the guest OS."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState != vim.VirtualMachine.PowerState.poweredOn:
raise RuntimeError(f"VM '{name}' is not powered on")
if vm.guest.toolsRunningStatus != "guestToolsRunning":
raise RuntimeError(
f"VMware Tools not running on '{name}'. "
"Use reset_vm for hard reset instead."
)
vm.RebootGuest()
return f"Guest reboot initiated for VM '{name}'"
@mcp_tool(
name="reset_vm",
description="Reset (hard reboot) a virtual machine - like pressing the reset button",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=False),
)
def reset_vm(self, name: str) -> str:
"""Reset (hard reboot) a virtual machine."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
task = vm.ResetVM_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' reset"
@mcp_tool(
name="suspend_vm",
description="Suspend a virtual machine (save state to disk)",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def suspend_vm(self, name: str) -> str:
"""Suspend a virtual machine."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState == vim.VirtualMachine.PowerState.suspended:
return f"VM '{name}' is already suspended"
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOff:
return f"VM '{name}' is powered off, cannot suspend"
task = vm.SuspendVM_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' suspended"
@mcp_tool(
name="standby_guest",
description="Put guest OS into standby mode (requires VMware Tools)",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def standby_guest(self, name: str) -> str:
"""Put guest OS into standby mode."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if vm.runtime.powerState != vim.VirtualMachine.PowerState.poweredOn:
raise RuntimeError(f"VM '{name}' is not powered on")
if vm.guest.toolsRunningStatus != "guestToolsRunning":
raise RuntimeError(f"VMware Tools not running on '{name}'")
vm.StandbyGuest()
return f"Standby initiated for VM '{name}'"

View File

@ -1,859 +0,0 @@
"""MCP Resources - datastores, networks, hosts, clusters."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_resource, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class ResourcesMixin(MCPMixin):
"""MCP Resources for vSphere infrastructure."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
# ─────────────────────────────────────────────────────────────────────────────
# Datastore File Browser (templated resource)
# ─────────────────────────────────────────────────────────────────────────────
@mcp_resource(
uri="datastore://{datastore_name}",
name="datastore_files",
description="Browse root files on a datastore (e.g., datastore://c1_ds-02)",
)
def browse_datastore_root(self, datastore_name: str) -> list[dict[str, Any]]:
"""Browse files and folders at the root of a datastore."""
return self._browse_datastore_path(datastore_name, "")
def _browse_datastore_path(
self, datastore_name: str, path: str
) -> list[dict[str, Any]]:
"""Browse files and folders on a datastore at a given path."""
ds = self.conn.find_datastore(datastore_name)
if not ds:
raise ValueError(f"Datastore '{datastore_name}' not found")
browser = ds.browser
# Build the datastore path
if path and not path.endswith("/"):
path = path + "/"
ds_path = f"[{datastore_name}] {path}" if path else f"[{datastore_name}]"
# Search spec to get file details
search_spec = vim.host.DatastoreBrowser.SearchSpec()
search_spec.details = vim.host.DatastoreBrowser.FileInfo.Details(
fileType=True,
fileSize=True,
modification=True,
fileOwner=False,
)
# Match all files
search_spec.matchPattern = ["*"]
# Search for files
task = browser.SearchDatastore_Task(ds_path, search_spec)
self.conn.wait_for_task(task)
results = []
if task.info.result and task.info.result.file:
for file_info in task.info.result.file:
file_type = type(file_info).__name__.replace("Info", "")
entry = {
"name": file_info.path,
"type": file_type,
"size_bytes": file_info.fileSize if file_info.fileSize else 0,
"size_human": self._format_size(file_info.fileSize)
if file_info.fileSize
else "0 B",
"modified": file_info.modification.isoformat()
if file_info.modification
else None,
}
# Add type-specific info
if isinstance(file_info, vim.host.DatastoreBrowser.VmDiskInfo):
entry["disk_type"] = file_info.diskType
entry["capacity_kb"] = file_info.capacityKb
entry["hardware_version"] = file_info.hardwareVersion
results.append(entry)
return sorted(results, key=lambda x: (x["type"] != "Folder", x["name"]))
def _stream_from_esxi(self, datastore: str, path: str, chunk_size: int = 1024 * 1024):
"""Generator that streams file content from ESXi datastore.
Yields raw bytes chunks as they arrive from ESXi HTTP API.
Used internally for memory-efficient file transfers.
"""
import ssl
import urllib.request
from urllib.parse import quote
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Build download URL
dc_name = self.conn.datacenter.name
host = self.conn.settings.vcenter_host
encoded_path = quote(path, safe="")
url = (
f"https://{host}/folder/{encoded_path}"
f"?dcPath={quote(dc_name)}&dsName={quote(datastore)}"
)
# Create SSL context
context = ssl.create_default_context()
if self.conn.settings.vcenter_insecure:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Get session cookie
stub = self.conn.si._stub
cookie = stub.cookie
request = urllib.request.Request(url, method="GET")
request.add_header("Cookie", cookie)
try:
with urllib.request.urlopen(request, context=context) as response:
# Yield total size first (or None if unknown)
content_length = response.headers.get("Content-Length")
yield int(content_length) if content_length else None
# Then yield chunks
while True:
chunk = response.read(chunk_size)
if not chunk:
break
yield chunk
except urllib.error.HTTPError as e:
if e.code == 404:
raise ValueError(f"File not found: [{datastore}] {path}") from e
raise RuntimeError(f"Download failed: {e.code} {e.reason}") from e
@mcp_tool(
name="browse_datastore",
description="Browse files in a datastore folder (use path='' for root)",
annotations=ToolAnnotations(readOnlyHint=True),
)
def browse_datastore_tool(
self, datastore: str, path: str = ""
) -> list[dict[str, Any]]:
"""Browse files at a specific path on a datastore.
Args:
datastore: Datastore name (e.g., c1_ds-02)
path: Path within datastore (e.g., "rpm-desktop-1/" or "" for root)
"""
return self._browse_datastore_path(datastore, path)
@mcp_tool(
name="download_from_datastore",
description="Download a file from datastore. Returns content for small files, streams to disk for large files.",
annotations=ToolAnnotations(readOnlyHint=True),
)
def download_from_datastore(
self,
datastore: str,
path: str,
save_to: str | None = None,
max_memory_mb: int = 50,
) -> dict[str, Any]:
"""Download a file from a datastore using streaming.
Streams data from ESXi as it arrives (generator-based).
For small files: assembles chunks and returns content.
For large files or save_to: streams directly to disk.
Args:
datastore: Datastore name (e.g., c1_ds-02)
path: Path to file on datastore (e.g., "iso/readme.txt")
save_to: Local path to save file (recommended for large files)
max_memory_mb: Max file size in MB to return in response (default 50MB)
Returns:
Dict with file content or save confirmation
"""
import base64
max_bytes = max_memory_mb * 1024 * 1024
stream = self._stream_from_esxi(datastore, path)
# First yield is total size (or None)
total_size = next(stream)
if save_to:
# Stream directly to disk
bytes_written = 0
with open(save_to, "wb") as f:
for chunk in stream:
f.write(chunk)
bytes_written += len(chunk)
return {
"datastore": datastore,
"path": path,
"size_bytes": bytes_written,
"size_human": self._format_size(bytes_written),
"saved_to": save_to,
}
# Check size limit before loading into memory
if total_size and total_size > max_bytes:
raise ValueError(
f"File too large: {self._format_size(total_size)} exceeds {max_memory_mb}MB limit. "
f"Use save_to parameter to stream to disk."
)
# Assemble chunks into memory (with streaming limit check)
chunks = []
bytes_read = 0
for chunk in stream:
bytes_read += len(chunk)
if bytes_read > max_bytes:
raise ValueError(
f"File exceeded {max_memory_mb}MB limit during streaming. "
f"Use save_to parameter for large files."
)
chunks.append(chunk)
content = b"".join(chunks)
# Try to decode as text, fall back to base64
try:
return {
"datastore": datastore,
"path": path,
"size_bytes": len(content),
"size_human": self._format_size(len(content)),
"encoding": "utf-8",
"content": content.decode("utf-8"),
}
except UnicodeDecodeError:
return {
"datastore": datastore,
"path": path,
"size_bytes": len(content),
"size_human": self._format_size(len(content)),
"encoding": "base64",
"content": base64.b64encode(content).decode("ascii"),
}
@mcp_tool(
name="upload_to_datastore",
description="Upload a file to a datastore from local path or base64 content. Streams large files from disk.",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def upload_to_datastore(
self,
datastore: str,
remote_path: str,
local_path: str | None = None,
content_base64: str | None = None,
chunk_size: int = 8 * 1024 * 1024, # 8MB chunks
) -> dict[str, Any]:
"""Upload a file to a datastore.
For local_path: streams from disk in chunks (memory efficient for large files)
For content_base64: decodes and uploads (use for small files only)
Args:
datastore: Datastore name (e.g., c1_ds-02)
remote_path: Destination path on datastore (e.g., "iso/myfile.iso")
local_path: Local file path to upload - streams from disk (preferred for large files)
content_base64: Base64-encoded file content (for small files only)
chunk_size: Chunk size for streaming uploads (default 8MB)
Returns:
Dict with upload details including size and whether streaming was used
"""
import base64
import os
import ssl
import urllib.request
if not local_path and not content_base64:
raise ValueError("Either local_path or content_base64 must be provided")
if local_path and content_base64:
raise ValueError("Only one of local_path or content_base64 can be provided")
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Build upload URL
dc_name = self.conn.datacenter.name
host = self.conn.settings.vcenter_host
from urllib.parse import quote
encoded_path = quote(remote_path, safe="")
url = (
f"https://{host}/folder/{encoded_path}"
f"?dcPath={quote(dc_name)}&dsName={quote(datastore)}"
)
# Create SSL context
context = ssl.create_default_context()
if self.conn.settings.vcenter_insecure:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Get session cookie from existing connection
stub = self.conn.si._stub
cookie = stub.cookie
if local_path:
# Stream from disk - never load entire file into memory
if not os.path.exists(local_path):
raise ValueError(f"Local file not found: {local_path}")
file_size = os.path.getsize(local_path)
# Use a file-like wrapper that reads in chunks
class StreamingFileReader:
"""File wrapper that streams content for HTTP upload."""
def __init__(self, filepath: str, chunk_sz: int):
self.filepath = filepath
self.chunk_size = chunk_sz
self.file_size = os.path.getsize(filepath)
self._file = None
def __len__(self) -> int:
return self.file_size
def read(self, size: int = -1) -> bytes:
if self._file is None:
self._file = open(self.filepath, "rb") # noqa: SIM115
if size == -1:
size = self.chunk_size
return self._file.read(size)
def close(self) -> None:
if self._file:
self._file.close()
streamer = StreamingFileReader(local_path, chunk_size)
try:
request = urllib.request.Request(url, data=streamer, method="PUT")
request.add_header("Content-Type", "application/octet-stream")
request.add_header("Content-Length", str(file_size))
request.add_header("Cookie", cookie)
with urllib.request.urlopen(request, context=context) as response:
if response.status not in (200, 201):
raise RuntimeError(f"Upload failed with status {response.status}")
except urllib.error.HTTPError as e:
raise RuntimeError(f"Upload failed: {e.code} {e.reason}") from e
finally:
streamer.close()
return {
"datastore": datastore,
"path": remote_path,
"size_bytes": file_size,
"size_human": self._format_size(file_size),
"source": local_path,
"streamed": True,
}
else:
# Base64 content - small files only
content = base64.b64decode(content_base64)
file_size = len(content)
request = urllib.request.Request(url, data=content, method="PUT")
request.add_header("Content-Type", "application/octet-stream")
request.add_header("Content-Length", str(file_size))
request.add_header("Cookie", cookie)
try:
with urllib.request.urlopen(request, context=context) as response:
if response.status not in (200, 201):
raise RuntimeError(f"Upload failed with status {response.status}")
except urllib.error.HTTPError as e:
raise RuntimeError(f"Upload failed: {e.code} {e.reason}") from e
return {
"datastore": datastore,
"path": remote_path,
"size_bytes": file_size,
"size_human": self._format_size(file_size),
"source": "base64",
"streamed": False,
}
@mcp_tool(
name="delete_datastore_file",
description="Delete a file or folder from a datastore",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def delete_datastore_file(self, datastore: str, path: str) -> str:
"""Delete a file or folder from a datastore.
Args:
datastore: Datastore name
path: Path to file or folder to delete (e.g., "iso/old-file.iso")
Returns:
Success message
"""
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Build full datastore path
ds_path = f"[{datastore}] {path}"
# Use FileManager to delete
file_manager = self.conn.content.fileManager
dc = self.conn.datacenter
task = file_manager.DeleteDatastoreFile_Task(name=ds_path, datacenter=dc)
self.conn.wait_for_task(task)
return f"Deleted [{datastore}] {path}"
@mcp_tool(
name="create_datastore_folder",
description="Create a folder on a datastore",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def create_datastore_folder(self, datastore: str, path: str) -> str:
"""Create a folder on a datastore.
Args:
datastore: Datastore name
path: Folder path to create (e.g., "iso/new-folder")
Returns:
Success message
"""
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
# Build full datastore path
ds_path = f"[{datastore}] {path}"
# Use FileManager to create directory
file_manager = self.conn.content.fileManager
file_manager.MakeDirectory(name=ds_path, datacenter=self.conn.datacenter)
return f"Created folder [{datastore}] {path}"
def _format_size(self, size_bytes: int | None) -> str:
"""Format bytes to human readable size."""
if not size_bytes:
return "0 B"
for unit in ["B", "KB", "MB", "GB", "TB"]:
if abs(size_bytes) < 1024:
return f"{size_bytes:.1f} {unit}"
size_bytes /= 1024
return f"{size_bytes:.1f} PB"
@mcp_tool(
name="move_datastore_file",
description="Move/rename a file or folder on a datastore",
annotations=ToolAnnotations(destructiveHint=True),
)
def move_datastore_file(
self,
source_datastore: str,
source_path: str,
dest_datastore: str | None = None,
dest_path: str | None = None,
) -> dict[str, Any]:
"""Move or rename a file or folder on a datastore.
Args:
source_datastore: Source datastore name
source_path: Source path (e.g., "iso/old-name.iso")
dest_datastore: Destination datastore (default: same as source)
dest_path: Destination path (default: same as source with new name)
Returns:
Dict with move operation details
"""
ds = self.conn.find_datastore(source_datastore)
if not ds:
raise ValueError(f"Datastore '{source_datastore}' not found")
# Default to same datastore if not specified
if not dest_datastore:
dest_datastore = source_datastore
else:
dest_ds = self.conn.find_datastore(dest_datastore)
if not dest_ds:
raise ValueError(f"Destination datastore '{dest_datastore}' not found")
if not dest_path:
dest_path = source_path
# Build full paths
source_ds_path = f"[{source_datastore}] {source_path}"
dest_ds_path = f"[{dest_datastore}] {dest_path}"
# Use FileManager to move
file_manager = self.conn.content.fileManager
dc = self.conn.datacenter
task = file_manager.MoveDatastoreFile_Task(
sourceName=source_ds_path,
sourceDatacenter=dc,
destinationName=dest_ds_path,
destinationDatacenter=dc,
force=False,
)
self.conn.wait_for_task(task)
return {
"action": "moved",
"source": source_ds_path,
"destination": dest_ds_path,
}
@mcp_tool(
name="copy_datastore_file",
description="Copy a file or folder on a datastore",
annotations=ToolAnnotations(destructiveHint=True),
)
def copy_datastore_file(
self,
source_datastore: str,
source_path: str,
dest_datastore: str | None = None,
dest_path: str | None = None,
force: bool = False,
) -> dict[str, Any]:
"""Copy a file or folder on a datastore.
Args:
source_datastore: Source datastore name
source_path: Source path (e.g., "iso/original.iso")
dest_datastore: Destination datastore (default: same as source)
dest_path: Destination path (required)
force: Overwrite destination if exists (default False)
Returns:
Dict with copy operation details
"""
ds = self.conn.find_datastore(source_datastore)
if not ds:
raise ValueError(f"Datastore '{source_datastore}' not found")
# Default to same datastore if not specified
if not dest_datastore:
dest_datastore = source_datastore
else:
dest_ds = self.conn.find_datastore(dest_datastore)
if not dest_ds:
raise ValueError(f"Destination datastore '{dest_datastore}' not found")
if not dest_path:
raise ValueError("dest_path is required for copy operation")
# Build full paths
source_ds_path = f"[{source_datastore}] {source_path}"
dest_ds_path = f"[{dest_datastore}] {dest_path}"
# Use FileManager to copy
file_manager = self.conn.content.fileManager
dc = self.conn.datacenter
task = file_manager.CopyDatastoreFile_Task(
sourceName=source_ds_path,
sourceDatacenter=dc,
destinationName=dest_ds_path,
destinationDatacenter=dc,
force=force,
)
self.conn.wait_for_task(task)
return {
"action": "copied",
"source": source_ds_path,
"destination": dest_ds_path,
"force": force,
}
# ─────────────────────────────────────────────────────────────────────────────
# MCP Resources (read-only data exposed as URIs)
# ─────────────────────────────────────────────────────────────────────────────
@mcp_resource(
uri="esxi://vms",
name="vm_list",
description="List of all virtual machines with power state",
)
def resource_vm_list(self) -> list[dict[str, Any]]:
"""Get list of all VMs with basic info."""
return [
{
"name": vm.name,
"power_state": str(vm.runtime.powerState),
"guest_os": vm.config.guestFullName if vm.config else None,
}
for vm in self.conn.get_all_vms()
]
@mcp_resource(
uri="esxi://datastores",
name="datastore_list",
description="List of all datastores with capacity information",
)
def resource_datastore_list(self) -> list[dict[str, Any]]:
"""Get list of datastores with capacity information."""
datastores = [
ds
for ds in self.conn.datacenter.datastoreFolder.childEntity
if isinstance(ds, vim.Datastore)
]
return [
{
"name": ds.name,
"capacity_gb": round(ds.summary.capacity / (1024**3), 2),
"free_gb": round(ds.summary.freeSpace / (1024**3), 2),
"used_percent": round(
(1 - ds.summary.freeSpace / ds.summary.capacity) * 100, 1
)
if ds.summary.capacity
else 0,
"type": ds.summary.type,
"accessible": ds.summary.accessible,
}
for ds in datastores
]
@mcp_resource(
uri="esxi://networks",
name="network_list",
description="List of all available networks",
)
def resource_network_list(self) -> list[dict[str, Any]]:
"""Get list of available networks."""
networks = []
for net in self.conn.datacenter.networkFolder.childEntity:
net_info = {"name": net.name, "type": type(net).__name__}
if isinstance(net, vim.dvs.DistributedVirtualPortgroup):
net_info["vlan"] = getattr(net.config.defaultPortConfig.vlan, "vlanId", None)
net_info["switch"] = net.config.distributedVirtualSwitch.name
networks.append(net_info)
return networks
@mcp_resource(
uri="esxi://hosts",
name="host_list",
description="List of all ESXi hosts",
)
def resource_host_list(self) -> list[dict[str, Any]]:
"""Get list of ESXi hosts."""
container = self.conn.content.viewManager.CreateContainerView(
self.conn.content.rootFolder, [vim.HostSystem], True
)
try:
return [
{
"name": host.name,
"connection_state": str(host.summary.runtime.connectionState),
"power_state": str(host.summary.runtime.powerState),
"cpu_cores": host.summary.hardware.numCpuCores,
"memory_gb": round(host.summary.hardware.memorySize / (1024**3), 2),
}
for host in container.view
]
finally:
container.Destroy()
@mcp_resource(
uri="esxi://clusters",
name="cluster_list",
description="List of all clusters with DRS/HA status",
)
def resource_cluster_list(self) -> list[dict[str, Any]]:
"""Get list of clusters."""
clusters = [
cr
for cr in self.conn.datacenter.hostFolder.childEntity
if isinstance(cr, vim.ClusterComputeResource)
]
return [
{
"name": cluster.name,
"host_count": len(cluster.host) if cluster.host else 0,
"total_cpu_cores": cluster.summary.numCpuCores,
"total_memory_gb": round(
cluster.summary.totalMemory / (1024**3), 2
)
if cluster.summary.totalMemory
else 0,
"drs_enabled": cluster.configuration.drsConfig.enabled
if cluster.configuration.drsConfig
else False,
"ha_enabled": cluster.configuration.dasConfig.enabled
if cluster.configuration.dasConfig
else False,
}
for cluster in clusters
]
# ─────────────────────────────────────────────────────────────────────────────
# Tools for detailed resource information
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="get_datastore_info",
description="Get detailed information about a specific datastore",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_datastore_info(self, name: str) -> dict[str, Any]:
"""Get detailed datastore information."""
ds = self.conn.find_datastore(name)
if not ds:
raise ValueError(f"Datastore '{name}' not found")
summary = ds.summary
# Get VMs on this datastore
vm_names = [vm.name for vm in ds.vm] if ds.vm else []
return {
"name": ds.name,
"type": summary.type,
"capacity_gb": round(summary.capacity / (1024**3), 2),
"free_gb": round(summary.freeSpace / (1024**3), 2),
"used_gb": round((summary.capacity - summary.freeSpace) / (1024**3), 2),
"used_percent": round(
(1 - summary.freeSpace / summary.capacity) * 100, 1
)
if summary.capacity
else 0,
"accessible": summary.accessible,
"maintenance_mode": summary.maintenanceMode,
"url": summary.url,
"vm_count": len(vm_names),
"vms": vm_names[:20], # Limit to first 20
}
@mcp_tool(
name="get_network_info",
description="Get detailed information about a specific network",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_network_info(self, name: str) -> dict[str, Any]:
"""Get detailed network information."""
net = self.conn.find_network(name)
if not net:
raise ValueError(f"Network '{name}' not found")
info = {
"name": net.name,
"type": type(net).__name__,
"vm_count": len(net.vm) if net.vm else 0,
"host_count": len(net.host) if hasattr(net, "host") and net.host else 0,
}
if isinstance(net, vim.dvs.DistributedVirtualPortgroup):
config = net.config
info["switch"] = config.distributedVirtualSwitch.name
info["port_binding"] = config.type
info["num_ports"] = config.numPorts
if hasattr(config.defaultPortConfig, "vlan"):
vlan = config.defaultPortConfig.vlan
if hasattr(vlan, "vlanId"):
info["vlan_id"] = vlan.vlanId
return info
@mcp_tool(
name="get_resource_pool_info",
description="Get information about resource pools",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_resource_pool_info(self, name: str | None = None) -> dict[str, Any]:
"""Get resource pool information.
If name is not provided, returns info for the default resource pool.
"""
if name:
container = self.conn.content.viewManager.CreateContainerView(
self.conn.content.rootFolder, [vim.ResourcePool], True
)
try:
pool = next((p for p in container.view if p.name == name), None)
finally:
container.Destroy()
if not pool:
raise ValueError(f"Resource pool '{name}' not found")
else:
pool = self.conn.resource_pool
runtime = pool.summary.runtime
config = pool.summary.config
return {
"name": pool.name,
"cpu_reservation_mhz": config.cpuAllocation.reservation,
"cpu_limit_mhz": config.cpuAllocation.limit,
"cpu_expandable": config.cpuAllocation.expandableReservation,
"cpu_usage_mhz": runtime.cpu.overallUsage if runtime.cpu else 0,
"memory_reservation_mb": config.memoryAllocation.reservation,
"memory_limit_mb": config.memoryAllocation.limit,
"memory_expandable": config.memoryAllocation.expandableReservation,
"memory_usage_mb": runtime.memory.overallUsage if runtime.memory else 0,
"vm_count": len(pool.vm) if pool.vm else 0,
}
@mcp_tool(
name="list_templates",
description="List all VM templates in the inventory",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_templates(self) -> list[dict[str, Any]]:
"""List all VM templates."""
templates = []
for vm in self.conn.get_all_vms():
if vm.config and vm.config.template:
templates.append(
{
"name": vm.name,
"guest_os": vm.config.guestFullName,
"cpu": vm.config.hardware.numCPU,
"memory_mb": vm.config.hardware.memoryMB,
}
)
return templates
@mcp_tool(
name="get_vcenter_info",
description="Get vCenter/ESXi server information",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_vcenter_info(self) -> dict[str, Any]:
"""Get vCenter/ESXi server information."""
about = self.conn.content.about
return {
"name": about.name,
"full_name": about.fullName,
"vendor": about.vendor,
"version": about.version,
"build": about.build,
"os_type": about.osType,
"api_type": about.apiType,
"api_version": about.apiVersion,
"instance_uuid": about.instanceUuid,
}

View File

@ -1,312 +0,0 @@
"""Serial Port Management - network console access for VMs."""
import random
import socket
import time
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class SerialPortMixin(MCPMixin):
"""Serial port management for VM network console access.
Network serial ports allow telnet/TCP connections to VM consoles,
useful for headless VMs, network appliances, or serial console access.
Supported protocols:
- telnet: Telnet over TCP (can negotiate SSL)
- telnets: Telnet over SSL over TCP
- tcp: Unencrypted TCP
- tcp+ssl: Encrypted SSL over TCP
"""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _get_serial_port(self, vm: vim.VirtualMachine) -> vim.vm.device.VirtualSerialPort | None:
"""Find existing serial port with URI backing on a VM."""
if not vm.config:
return None
for device in vm.config.hardware.device:
if (
isinstance(device, vim.vm.device.VirtualSerialPort)
and isinstance(device.backing, vim.vm.device.VirtualSerialPort.URIBackingInfo)
):
return device
return None
def _find_unused_port(self, host_ip: str, start: int = 2000, end: int = 9000) -> int:
"""Find an unused TCP port on the ESXi host."""
# Try random ports in range until we find one that's available
attempts = 0
max_attempts = 50
while attempts < max_attempts:
port = random.randint(start, end)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
try:
result = sock.connect_ex((host_ip, port))
if result != 0: # Port not in use
return port
except (OSError, TimeoutError):
return port # Likely available
finally:
sock.close()
attempts += 1
raise ValueError(f"Could not find unused port in range {start}-{end}")
@mcp_tool(
name="get_serial_port",
description="Get current serial port configuration for a VM",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_serial_port(self, name: str) -> dict[str, Any]:
"""Get serial port configuration.
Args:
name: VM name
Returns:
Dict with serial port details or message if not configured
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
serial_port = self._get_serial_port(vm)
if not serial_port:
return {
"configured": False,
"message": "No network serial port configured",
}
backing = serial_port.backing
return {
"configured": True,
"label": serial_port.deviceInfo.label,
"connected": serial_port.connectable.connected if serial_port.connectable else None,
"start_connected": serial_port.connectable.startConnected if serial_port.connectable else None,
"direction": backing.direction if backing else None,
"service_uri": backing.serviceURI if backing else None,
"yield_on_poll": serial_port.yieldOnPoll,
}
@mcp_tool(
name="setup_serial_port",
description="Configure a network serial port on a VM for console access. VM must be powered off.",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def setup_serial_port(
self,
name: str,
protocol: str = "telnet",
port: int | None = None,
direction: str = "server",
yield_on_poll: bool = True,
) -> dict[str, Any]:
"""Setup or update network serial port.
Args:
name: VM name
protocol: Protocol to use (telnet, telnets, tcp, tcp+ssl). Default: telnet
port: TCP port number. If not specified, auto-assigns unused port.
direction: 'server' (VM listens) or 'client' (VM connects). Default: server
yield_on_poll: Enable CPU yield behavior. Default: True
Returns:
Dict with configured serial port URI and details
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Check VM is powered off
if vm.runtime.powerState != vim.VirtualMachine.PowerState.poweredOff:
raise ValueError(f"VM '{name}' must be powered off to configure serial port")
# Validate protocol
valid_protocols = ["telnet", "telnets", "tcp", "tcp+ssl", "tcp4", "tcp6"]
if protocol not in valid_protocols:
raise ValueError(f"Invalid protocol '{protocol}'. Must be one of: {valid_protocols}")
# Validate direction
if direction not in ["server", "client"]:
raise ValueError("Direction must be 'server' or 'client'")
# Find or assign port
if port is None:
host = vm.runtime.host
host_ip = host.name if host else self.conn.settings.vcenter_host
port = self._find_unused_port(host_ip)
# Build service URI
service_uri = f"{protocol}://:{port}"
# Build spec
serial_spec = vim.vm.device.VirtualDeviceSpec()
existing_port = self._get_serial_port(vm)
if existing_port:
# Edit existing
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
serial_spec.device = existing_port
else:
# Add new
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
serial_spec.device = vim.vm.device.VirtualSerialPort()
# Configure backing
serial_spec.device.yieldOnPoll = yield_on_poll
serial_spec.device.backing = vim.vm.device.VirtualSerialPort.URIBackingInfo()
serial_spec.device.backing.direction = direction
serial_spec.device.backing.serviceURI = service_uri
# Configure connectable
serial_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
serial_spec.device.connectable.startConnected = True
serial_spec.device.connectable.allowGuestControl = True
serial_spec.device.connectable.connected = False # Will connect on power on
# Apply config
spec = vim.vm.ConfigSpec()
spec.deviceChange = [serial_spec]
task = vm.ReconfigVM_Task(spec=spec)
self.conn.wait_for_task(task)
# Get ESXi host info for connection string
host = vm.runtime.host
host_ip = host.name if host else self.conn.settings.vcenter_host
return {
"vm_name": name,
"service_uri": service_uri,
"connection_string": f"{protocol}://{host_ip}:{port}",
"protocol": protocol,
"port": port,
"direction": direction,
"yield_on_poll": yield_on_poll,
"operation": "updated" if existing_port else "created",
}
@mcp_tool(
name="connect_serial_port",
description="Connect or disconnect an existing serial port on a VM",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def connect_serial_port(self, name: str, connected: bool = True) -> dict[str, Any]:
"""Connect or disconnect serial port.
Args:
name: VM name
connected: True to connect, False to disconnect. Default: True
Returns:
Dict with result
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
serial_port = self._get_serial_port(vm)
if not serial_port:
raise ValueError(f"No network serial port configured on VM '{name}'")
# Build edit spec
serial_spec = vim.vm.device.VirtualDeviceSpec()
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
serial_spec.device = serial_port
serial_spec.device.connectable.connected = connected
spec = vim.vm.ConfigSpec()
spec.deviceChange = [serial_spec]
task = vm.ReconfigVM_Task(spec=spec)
self.conn.wait_for_task(task)
return {
"vm_name": name,
"connected": connected,
"service_uri": serial_port.backing.serviceURI if serial_port.backing else None,
}
@mcp_tool(
name="clear_serial_port",
description="Reset serial port by disconnecting and reconnecting (clears stuck connections)",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def clear_serial_port(self, name: str) -> dict[str, Any]:
"""Clear serial port by cycling connection state.
Useful for clearing stuck or stale connections.
Args:
name: VM name
Returns:
Dict with result
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
serial_port = self._get_serial_port(vm)
if not serial_port:
raise ValueError(f"No network serial port configured on VM '{name}'")
# Disconnect
self.connect_serial_port(name, connected=False)
time.sleep(1)
# Reconnect
self.connect_serial_port(name, connected=True)
return {
"vm_name": name,
"status": "cleared",
"service_uri": serial_port.backing.serviceURI if serial_port.backing else None,
"message": "Serial port disconnected and reconnected",
}
@mcp_tool(
name="remove_serial_port",
description="Remove the network serial port from a VM. VM must be powered off.",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def remove_serial_port(self, name: str) -> str:
"""Remove serial port from VM.
Args:
name: VM name
Returns:
Success message
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Check VM is powered off
if vm.runtime.powerState != vim.VirtualMachine.PowerState.poweredOff:
raise ValueError(f"VM '{name}' must be powered off to remove serial port")
serial_port = self._get_serial_port(vm)
if not serial_port:
return f"No network serial port configured on VM '{name}'"
# Build remove spec
serial_spec = vim.vm.device.VirtualDeviceSpec()
serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
serial_spec.device = serial_port
spec = vim.vm.ConfigSpec()
spec.deviceChange = [serial_spec]
task = vm.ReconfigVM_Task(spec=spec)
self.conn.wait_for_task(task)
return f"Serial port removed from VM '{name}'"

View File

@ -1,270 +0,0 @@
"""Snapshot management - create, revert, delete, list snapshots."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class SnapshotsMixin(MCPMixin):
"""VM snapshot management tools."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
def _get_snapshot_tree(
self, snapshots: list, parent_path: str = ""
) -> list[dict[str, Any]]:
"""Recursively build snapshot tree."""
result = []
for snapshot in snapshots:
path = f"{parent_path}/{snapshot.name}" if parent_path else snapshot.name
result.append(
{
"name": snapshot.name,
"path": path,
"description": snapshot.description,
"created": snapshot.createTime.isoformat() if snapshot.createTime else None,
"state": str(snapshot.state),
"quiesced": snapshot.quiesced,
"id": snapshot.id,
}
)
if snapshot.childSnapshotList:
result.extend(self._get_snapshot_tree(snapshot.childSnapshotList, path))
return result
def _find_snapshot_by_name(
self, snapshots: list, name: str
) -> vim.vm.Snapshot | None:
"""Find a snapshot by name in the tree."""
for snapshot in snapshots:
if snapshot.name == name:
return snapshot.snapshot
if snapshot.childSnapshotList:
found = self._find_snapshot_by_name(snapshot.childSnapshotList, name)
if found:
return found
return None
@mcp_tool(
name="list_snapshots",
description="List all snapshots for a virtual machine",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_snapshots(self, name: str) -> list[dict[str, Any]]:
"""List all snapshots for a VM."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.rootSnapshotList:
return []
snapshots = self._get_snapshot_tree(vm.snapshot.rootSnapshotList)
# Mark current snapshot
current_snapshot = vm.snapshot.currentSnapshot
for snap in snapshots:
snap["is_current"] = False
if current_snapshot:
for snap in snapshots:
# Compare by checking if this is the current one
found = self._find_snapshot_by_name(
vm.snapshot.rootSnapshotList, snap["name"]
)
if found and found == current_snapshot:
snap["is_current"] = True
break
return snapshots
@mcp_tool(
name="create_snapshot",
description="Create a snapshot of a virtual machine",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=False),
)
def create_snapshot(
self,
name: str,
snapshot_name: str,
description: str = "",
memory: bool = True,
quiesce: bool = False,
) -> str:
"""Create a VM snapshot.
Args:
name: VM name
snapshot_name: Name for the new snapshot
description: Optional description
memory: Include memory state (allows instant restore to running state)
quiesce: Quiesce guest filesystem (requires VMware Tools, ensures consistent state)
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Can only quiesce if VM is powered on and has tools
if (
quiesce
and vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOn
and vm.guest.toolsRunningStatus != "guestToolsRunning"
):
raise RuntimeError(
"Cannot quiesce: VMware Tools not running. "
"Set quiesce=False or install/start VMware Tools."
)
task = vm.CreateSnapshot_Task(
name=snapshot_name,
description=description,
memory=memory,
quiesce=quiesce,
)
self.conn.wait_for_task(task)
return f"Snapshot '{snapshot_name}' created for VM '{name}'"
@mcp_tool(
name="revert_to_snapshot",
description="Revert a VM to a specific snapshot",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=False),
)
def revert_to_snapshot(self, name: str, snapshot_name: str) -> str:
"""Revert VM to a specific snapshot."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.rootSnapshotList:
raise ValueError(f"VM '{name}' has no snapshots")
snapshot = self._find_snapshot_by_name(
vm.snapshot.rootSnapshotList, snapshot_name
)
if not snapshot:
raise ValueError(f"Snapshot '{snapshot_name}' not found on VM '{name}'")
task = snapshot.RevertToSnapshot_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' reverted to snapshot '{snapshot_name}'"
@mcp_tool(
name="revert_to_current_snapshot",
description="Revert a VM to its current (most recent) snapshot",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=False),
)
def revert_to_current_snapshot(self, name: str) -> str:
"""Revert VM to its current snapshot."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.currentSnapshot:
raise ValueError(f"VM '{name}' has no current snapshot")
task = vm.RevertToCurrentSnapshot_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' reverted to current snapshot"
@mcp_tool(
name="delete_snapshot",
description="Delete a specific snapshot from a VM",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def delete_snapshot(
self, name: str, snapshot_name: str, remove_children: bool = False
) -> str:
"""Delete a VM snapshot.
Args:
name: VM name
snapshot_name: Name of snapshot to delete
remove_children: If True, also delete child snapshots
"""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.rootSnapshotList:
raise ValueError(f"VM '{name}' has no snapshots")
snapshot = self._find_snapshot_by_name(
vm.snapshot.rootSnapshotList, snapshot_name
)
if not snapshot:
raise ValueError(f"Snapshot '{snapshot_name}' not found on VM '{name}'")
task = snapshot.RemoveSnapshot_Task(removeChildren=remove_children)
self.conn.wait_for_task(task)
msg = f"Snapshot '{snapshot_name}' deleted from VM '{name}'"
if remove_children:
msg += " (including children)"
return msg
@mcp_tool(
name="delete_all_snapshots",
description="Delete ALL snapshots from a VM (consolidates disk)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def delete_all_snapshots(self, name: str) -> str:
"""Delete all snapshots from a VM."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.rootSnapshotList:
return f"VM '{name}' has no snapshots to delete"
task = vm.RemoveAllSnapshots_Task()
self.conn.wait_for_task(task)
return f"All snapshots deleted from VM '{name}'"
@mcp_tool(
name="rename_snapshot",
description="Rename a snapshot and/or update its description",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def rename_snapshot(
self,
name: str,
snapshot_name: str,
new_name: str | None = None,
new_description: str | None = None,
) -> str:
"""Rename a snapshot or update its description."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
if not vm.snapshot or not vm.snapshot.rootSnapshotList:
raise ValueError(f"VM '{name}' has no snapshots")
snapshot = self._find_snapshot_by_name(
vm.snapshot.rootSnapshotList, snapshot_name
)
if not snapshot:
raise ValueError(f"Snapshot '{snapshot_name}' not found on VM '{name}'")
snapshot.RenameSnapshot(
name=new_name if new_name else snapshot_name,
description=new_description if new_description else None,
)
changes = []
if new_name:
changes.append(f"renamed to '{new_name}'")
if new_description:
changes.append("description updated")
return f"Snapshot '{snapshot_name}': {', '.join(changes)}"

View File

@ -1,697 +0,0 @@
"""vCenter-specific Operations - Storage vMotion, Templates, Folders, Tasks."""
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class VCenterOpsMixin(MCPMixin):
"""vCenter-specific operations (require vCenter, not just ESXi)."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
# ─────────────────────────────────────────────────────────────────────────────
# Storage vMotion (works even on single-host vCenter)
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="storage_vmotion",
description="Move a VM's disks to a different datastore (Storage vMotion). Idempotent if already on target.",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def storage_vmotion(
self,
vm_name: str,
target_datastore: str,
thin_provision: bool | None = None,
) -> dict[str, Any]:
"""Move a VM's storage to a different datastore.
This moves all VM files (disks, config) to the target datastore.
VM can be running during the migration.
Args:
vm_name: Name of the VM to migrate
target_datastore: Target datastore name
thin_provision: Convert to thin provisioning (None = keep current)
Returns:
Dict with migration details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
ds = self.conn.find_datastore(target_datastore)
if not ds:
raise ValueError(f"Datastore '{target_datastore}' not found")
# Get current datastore
current_ds = vm.config.files.vmPathName.split("]")[0].strip("[")
if current_ds == target_datastore:
return {
"vm": vm_name,
"action": "no_migration_needed",
"message": f"VM is already on datastore '{target_datastore}'",
}
# Create relocate spec
relocate_spec = vim.vm.RelocateSpec()
relocate_spec.datastore = ds
# Set disk provisioning if specified
if thin_provision is not None:
if thin_provision:
relocate_spec.transform = vim.vm.RelocateSpec.Transformation.sparse
else:
relocate_spec.transform = vim.vm.RelocateSpec.Transformation.flat
# Perform the relocation
task = vm.RelocateVM_Task(spec=relocate_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "storage_vmotion_complete",
"source_datastore": current_ds,
"target_datastore": target_datastore,
"thin_provision": thin_provision,
}
@mcp_tool(
name="move_vm_disk",
description="Move a specific VM disk to a different datastore",
annotations=ToolAnnotations(destructiveHint=True),
)
def move_vm_disk(
self,
vm_name: str,
disk_label: str,
target_datastore: str,
) -> dict[str, Any]:
"""Move a specific VM disk to a different datastore.
Args:
vm_name: Name of the VM
disk_label: Label of the disk (e.g., 'Hard disk 1')
target_datastore: Target datastore name
Returns:
Dict with migration details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
ds = self.conn.find_datastore(target_datastore)
if not ds:
raise ValueError(f"Datastore '{target_datastore}' not found")
# Find the specific disk
target_disk = None
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk) and device.deviceInfo.label.lower() == disk_label.lower():
target_disk = device
break
if not target_disk:
available = [
d.deviceInfo.label
for d in vm.config.hardware.device
if isinstance(d, vim.vm.device.VirtualDisk)
]
raise ValueError(f"Disk '{disk_label}' not found. Available: {available}")
# Get current disk location
current_path = target_disk.backing.fileName
current_ds = current_path.split("]")[0].strip("[")
# Create disk locator for this specific disk
disk_locator = vim.vm.RelocateSpec.DiskLocator()
disk_locator.diskId = target_disk.key
disk_locator.datastore = ds
# Create relocate spec with just this disk
relocate_spec = vim.vm.RelocateSpec()
relocate_spec.disk = [disk_locator]
# Perform the relocation
task = vm.RelocateVM_Task(spec=relocate_spec)
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "disk_moved",
"disk": disk_label,
"source_datastore": current_ds,
"target_datastore": target_datastore,
}
# ─────────────────────────────────────────────────────────────────────────────
# Template Management
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="convert_to_template",
description="Convert a VM to a template (idempotent - safe to call on existing template)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def convert_to_template(self, vm_name: str) -> dict[str, Any]:
"""Convert a VM to a template.
The VM must be powered off. Once converted, it cannot be powered on
until converted back to a VM.
Args:
vm_name: Name of the VM to convert
Returns:
Dict with conversion details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
raise ValueError("VM must be powered off to convert to template")
if vm.config.template:
return {
"vm": vm_name,
"action": "already_template",
"is_template": True,
}
vm.MarkAsTemplate()
return {
"vm": vm_name,
"action": "converted_to_template",
"is_template": True,
}
@mcp_tool(
name="convert_to_vm",
description="Convert a template back to a VM (idempotent - safe to call on existing VM)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def convert_to_vm(
self,
template_name: str,
resource_pool: str | None = None,
) -> dict[str, Any]:
"""Convert a template back to a regular VM.
Args:
template_name: Name of the template
resource_pool: Resource pool for the VM (optional)
Returns:
Dict with conversion details
"""
vm = self.conn.find_vm(template_name)
if not vm:
raise ValueError(f"Template '{template_name}' not found")
if not vm.config.template:
return {
"vm": template_name,
"action": "already_vm",
"is_template": False,
}
# Get resource pool
if resource_pool:
pool = self._find_resource_pool(resource_pool)
if not pool:
raise ValueError(f"Resource pool '{resource_pool}' not found")
else:
pool = self.conn.resource_pool
# Get a host from the resource pool
host = None
if hasattr(pool, "owner") and hasattr(pool.owner, "host"):
hosts = pool.owner.host
if hosts:
host = hosts[0]
vm.MarkAsVirtualMachine(pool=pool, host=host)
return {
"vm": template_name,
"action": "converted_to_vm",
"is_template": False,
}
def _find_resource_pool(self, name: str) -> vim.ResourcePool | None:
"""Find a resource pool by name."""
container = self.conn.content.viewManager.CreateContainerView(
self.conn.content.rootFolder, [vim.ResourcePool], True
)
try:
for pool in container.view:
if pool.name == name:
return pool
finally:
container.Destroy()
return None
@mcp_tool(
name="deploy_from_template",
description="Deploy a new VM from a template",
annotations=ToolAnnotations(destructiveHint=True),
)
def deploy_from_template(
self,
template_name: str,
new_vm_name: str,
datastore: str | None = None,
power_on: bool = False,
) -> dict[str, Any]:
"""Deploy a new VM from a template.
Args:
template_name: Name of the template to clone
new_vm_name: Name for the new VM
datastore: Target datastore (default: same as template)
power_on: Power on after deployment (default False)
Returns:
Dict with deployment details
"""
template = self.conn.find_vm(template_name)
if not template:
raise ValueError(f"Template '{template_name}' not found")
if not template.config.template:
raise ValueError(f"'{template_name}' is not a template")
# Check if target VM already exists
if self.conn.find_vm(new_vm_name):
raise ValueError(f"VM '{new_vm_name}' already exists")
# Build clone spec
relocate_spec = vim.vm.RelocateSpec()
relocate_spec.pool = self.conn.resource_pool
if datastore:
ds = self.conn.find_datastore(datastore)
if not ds:
raise ValueError(f"Datastore '{datastore}' not found")
relocate_spec.datastore = ds
clone_spec = vim.vm.CloneSpec()
clone_spec.location = relocate_spec
clone_spec.powerOn = power_on
clone_spec.template = False # Create VM, not another template
# Get target folder
folder = self.conn.datacenter.vmFolder
# Clone the template
task = template.Clone(folder=folder, name=new_vm_name, spec=clone_spec)
self.conn.wait_for_task(task)
# Get the new VM info
new_vm = self.conn.find_vm(new_vm_name)
return {
"vm": new_vm_name,
"action": "deployed_from_template",
"template": template_name,
"datastore": datastore or "same as template",
"power_state": str(new_vm.runtime.powerState) if new_vm else "unknown",
}
# ─────────────────────────────────────────────────────────────────────────────
# Folder Organization
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="list_folders",
description="List VM folders in the datacenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_folders(self) -> list[dict[str, Any]]:
"""List all VM folders in the datacenter.
Returns:
List of folder details
"""
folders = []
def _collect_folders(folder: vim.Folder, path: str = ""):
current_path = f"{path}/{folder.name}" if path else folder.name
folders.append({
"name": folder.name,
"path": current_path,
"type": "Folder",
"children": len(folder.childEntity) if hasattr(folder, "childEntity") else 0,
})
if hasattr(folder, "childEntity"):
for child in folder.childEntity:
if isinstance(child, vim.Folder):
_collect_folders(child, current_path)
# Start from VM folder
vm_folder = self.conn.datacenter.vmFolder
_collect_folders(vm_folder)
return folders
@mcp_tool(
name="create_folder",
description="Create a new VM folder",
annotations=ToolAnnotations(destructiveHint=True),
)
def create_folder(
self,
folder_name: str,
parent_path: str | None = None,
) -> dict[str, Any]:
"""Create a new VM folder.
Args:
folder_name: Name for the new folder
parent_path: Path to parent folder (None = root vm folder)
Returns:
Dict with folder details
"""
if parent_path:
parent = self._find_folder_by_path(parent_path)
if not parent:
raise ValueError(f"Parent folder '{parent_path}' not found")
else:
parent = self.conn.datacenter.vmFolder
parent.CreateFolder(name=folder_name)
return {
"action": "folder_created",
"name": folder_name,
"parent": parent_path or "vm (root)",
"path": f"{parent_path}/{folder_name}" if parent_path else f"vm/{folder_name}",
}
def _find_folder_by_path(self, path: str) -> vim.Folder | None:
"""Find a folder by its path (e.g., 'vm/Production/WebServers')."""
parts = [p for p in path.split("/") if p and p != "vm"]
current = self.conn.datacenter.vmFolder
for part in parts:
found = None
if hasattr(current, "childEntity"):
for child in current.childEntity:
if isinstance(child, vim.Folder) and child.name == part:
found = child
break
if not found:
return None
current = found
return current
@mcp_tool(
name="move_vm_to_folder",
description="Move a VM to a different folder",
annotations=ToolAnnotations(destructiveHint=True),
)
def move_vm_to_folder(
self,
vm_name: str,
folder_path: str,
) -> dict[str, Any]:
"""Move a VM to a different folder.
Args:
vm_name: Name of the VM to move
folder_path: Path to target folder
Returns:
Dict with move details
"""
vm = self.conn.find_vm(vm_name)
if not vm:
raise ValueError(f"VM '{vm_name}' not found")
folder = self._find_folder_by_path(folder_path)
if not folder:
raise ValueError(f"Folder '{folder_path}' not found")
# Get current folder
current_folder = vm.parent.name if vm.parent else "unknown"
# Move the VM
task = folder.MoveIntoFolder_Task([vm])
self.conn.wait_for_task(task)
return {
"vm": vm_name,
"action": "moved_to_folder",
"from_folder": current_folder,
"to_folder": folder_path,
}
# ─────────────────────────────────────────────────────────────────────────────
# vCenter Tasks and Events
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="list_recent_tasks",
description="List recent tasks from vCenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_recent_tasks(
self,
max_count: int = 20,
entity_name: str | None = None,
) -> list[dict[str, Any]]:
"""List recent tasks from vCenter.
Args:
max_count: Maximum number of tasks to return (default 20)
entity_name: Filter by entity name (optional)
Returns:
List of task details
"""
task_manager = self.conn.content.taskManager
recent_tasks = task_manager.recentTask
tasks = []
for task in recent_tasks[:max_count]:
task_info = {
"key": task.info.key,
"name": task.info.name or task.info.descriptionId,
"state": str(task.info.state),
"progress": task.info.progress,
"queued_time": str(task.info.queueTime) if task.info.queueTime else None,
"start_time": str(task.info.startTime) if task.info.startTime else None,
"complete_time": str(task.info.completeTime) if task.info.completeTime else None,
}
# Add entity info if available
if task.info.entity:
task_info["entity"] = task.info.entity.name
task_info["entity_type"] = type(task.info.entity).__name__
# Add error info if failed
if task.info.error:
task_info["error"] = str(task.info.error.msg)
# Filter by entity if specified
if entity_name and task.info.entity and task.info.entity.name != entity_name:
continue
tasks.append(task_info)
# Ensure we return something even if empty
if not tasks:
return [{"message": "No recent tasks found", "count": 0}]
return tasks
@mcp_tool(
name="list_recent_events",
description="List recent events from vCenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_recent_events(
self,
max_count: int = 50,
event_types: list[str] | None = None,
hours_back: int = 24,
) -> list[dict[str, Any]]:
"""List recent events from vCenter.
Args:
max_count: Maximum number of events (default 50)
event_types: Filter by event type names (optional)
hours_back: How many hours back to look (default 24)
Returns:
List of event details
"""
event_manager = self.conn.content.eventManager
# Create filter spec
filter_spec = vim.event.EventFilterSpec()
filter_spec.time = vim.event.EventFilterSpec.ByTime()
filter_spec.time.beginTime = datetime.now() - timedelta(hours=hours_back)
# Get events
event_collector = event_manager.CreateCollectorForEvents(filter=filter_spec)
try:
events = event_collector.ReadNextEvents(max_count)
result = []
for event in events:
event_info = {
"key": event.key,
"type": type(event).__name__,
"created_time": str(event.createdTime),
"message": event.fullFormattedMessage,
"user": event.userName if hasattr(event, "userName") else None,
}
# Add entity info if available
if hasattr(event, "vm") and event.vm:
event_info["vm"] = event.vm.name
if hasattr(event, "host") and event.host:
event_info["host"] = event.host.name
# Filter by type if specified
if event_types and type(event).__name__ not in event_types:
continue
result.append(event_info)
# Ensure we return something even if empty
if not result:
return [{"message": f"No events found in the last {hours_back} hours", "count": 0}]
return result
finally:
event_collector.DestroyCollector()
# ─────────────────────────────────────────────────────────────────────────────
# Cluster Operations (for multi-host environments)
# ─────────────────────────────────────────────────────────────────────────────
@mcp_tool(
name="list_clusters",
description="List all clusters in the datacenter",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_clusters(self) -> list[dict[str, Any]]:
"""List all clusters in the datacenter.
Returns:
List of cluster details with DRS/HA status
"""
clusters = []
for entity in self.conn.datacenter.hostFolder.childEntity:
if isinstance(entity, vim.ClusterComputeResource):
drs_config = entity.configuration.drsConfig
ha_config = entity.configuration.dasConfig
clusters.append({
"name": entity.name,
"host_count": len(entity.host) if entity.host else 0,
"total_cpu_mhz": entity.summary.totalCpu,
"total_memory_gb": round(entity.summary.totalMemory / (1024**3), 2),
"effective_cpu_mhz": entity.summary.effectiveCpu,
"effective_memory_gb": round(entity.summary.effectiveMemory / 1024, 2),
"drs": {
"enabled": drs_config.enabled if drs_config else False,
"behavior": str(drs_config.defaultVmBehavior) if drs_config else None,
},
"ha": {
"enabled": ha_config.enabled if ha_config else False,
"admission_control": ha_config.admissionControlEnabled if ha_config else False,
},
})
# Return informative message if no clusters found (standalone host mode)
if not clusters:
return [{
"message": "No clusters found - this appears to be a standalone host or non-clustered environment",
"count": 0,
}]
return clusters
@mcp_tool(
name="get_drs_recommendations",
description="Get DRS recommendations for a cluster",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_drs_recommendations(
self,
cluster_name: str,
) -> list[dict[str, Any]]:
"""Get DRS recommendations for a cluster.
Args:
cluster_name: Name of the cluster
Returns:
List of DRS recommendations
"""
cluster = self._find_cluster(cluster_name)
if not cluster:
raise ValueError(f"Cluster '{cluster_name}' not found")
if not cluster.configuration.drsConfig.enabled:
return [{
"message": "DRS is not enabled for this cluster",
"cluster": cluster_name,
}]
recommendations = []
if hasattr(cluster, "recommendation") and cluster.recommendation:
for rec in cluster.recommendation:
rec_info = {
"key": rec.key,
"reason": rec.reason,
"rating": rec.rating,
"type": rec.reasonText,
}
# Add action details
if rec.action:
rec_info["actions"] = []
for action in rec.action:
if hasattr(action, "target"):
rec_info["actions"].append({
"type": type(action).__name__,
"target": action.target.name if action.target else "Unknown",
})
recommendations.append(rec_info)
if not recommendations:
return [{
"message": "No DRS recommendations at this time",
"cluster": cluster_name,
}]
return recommendations
def _find_cluster(self, name: str) -> vim.ClusterComputeResource | None:
"""Find a cluster by name."""
for entity in self.conn.datacenter.hostFolder.childEntity:
if isinstance(entity, vim.ClusterComputeResource) and entity.name == name:
return entity
return None

View File

@ -1,322 +0,0 @@
"""VM Lifecycle operations - create, clone, delete, reconfigure."""
from typing import TYPE_CHECKING, Any
from fastmcp.contrib.mcp_mixin import MCPMixin, mcp_tool
from mcp.types import ToolAnnotations
from pyVmomi import vim
if TYPE_CHECKING:
from mcvsphere.connection import VMwareConnection
class VMLifecycleMixin(MCPMixin):
"""VM lifecycle management tools - CRUD operations for virtual machines."""
def __init__(self, conn: "VMwareConnection"):
self.conn = conn
@mcp_tool(
name="list_vms",
description="List all virtual machines in the vSphere inventory",
annotations=ToolAnnotations(readOnlyHint=True),
)
def list_vms(self) -> list[dict[str, Any]]:
"""List all virtual machines with basic info."""
vms = []
for vm in self.conn.get_all_vms():
vms.append(
{
"name": vm.name,
"power_state": str(vm.runtime.powerState),
"cpu": vm.config.hardware.numCPU if vm.config else None,
"memory_mb": vm.config.hardware.memoryMB if vm.config else None,
"guest_os": vm.config.guestFullName if vm.config else None,
}
)
return vms
@mcp_tool(
name="get_vm_info",
description="Get detailed information about a specific virtual machine",
annotations=ToolAnnotations(readOnlyHint=True),
)
def get_vm_info(self, name: str) -> dict[str, Any]:
"""Get detailed VM information including hardware, network, and storage."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Get disk info
disks = []
if vm.config:
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
disks.append(
{
"label": device.deviceInfo.label,
"capacity_gb": round(device.capacityInKB / (1024 * 1024), 2),
"thin_provisioned": getattr(
device.backing, "thinProvisioned", None
),
}
)
# Get NIC info
nics = []
if vm.config:
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
nics.append(
{
"label": device.deviceInfo.label,
"mac_address": device.macAddress,
"connected": device.connectable.connected
if device.connectable
else None,
}
)
return {
"name": vm.name,
"power_state": str(vm.runtime.powerState),
"cpu": vm.config.hardware.numCPU if vm.config else None,
"memory_mb": vm.config.hardware.memoryMB if vm.config else None,
"guest_os": vm.config.guestFullName if vm.config else None,
"guest_id": vm.config.guestId if vm.config else None,
"uuid": vm.config.uuid if vm.config else None,
"instance_uuid": vm.config.instanceUuid if vm.config else None,
"host": vm.runtime.host.name if vm.runtime.host else None,
"datastore": [ds.name for ds in vm.datastore] if vm.datastore else [],
"ip_address": vm.guest.ipAddress if vm.guest else None,
"hostname": vm.guest.hostName if vm.guest else None,
"tools_status": str(vm.guest.toolsStatus) if vm.guest else None,
"tools_version": vm.guest.toolsVersion if vm.guest else None,
"disks": disks,
"nics": nics,
"annotation": vm.config.annotation if vm.config else None,
}
@mcp_tool(
name="create_vm",
description="Create a new virtual machine with specified resources",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=False),
)
def create_vm(
self,
name: str,
cpu: int = 2,
memory_mb: int = 4096,
disk_gb: int = 20,
datastore: str | None = None,
network: str | None = None,
guest_id: str = "otherGuest64",
) -> str:
"""Create a new virtual machine with specified configuration."""
# Resolve datastore
datastore_obj = self.conn.datastore
if datastore:
datastore_obj = self.conn.find_datastore(datastore)
if not datastore_obj:
raise ValueError(f"Datastore '{datastore}' not found")
# Resolve network
network_obj = self.conn.network
if network:
network_obj = self.conn.find_network(network)
if not network_obj:
raise ValueError(f"Network '{network}' not found")
# Build VM config spec with required files property
vm_file_info = vim.vm.FileInfo(
vmPathName=f"[{datastore_obj.name}]"
)
vm_spec = vim.vm.ConfigSpec(
name=name,
memoryMB=memory_mb,
numCPUs=cpu,
guestId=guest_id,
files=vm_file_info,
)
device_specs = []
# Add SCSI controller
controller_spec = vim.vm.device.VirtualDeviceSpec()
controller_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
controller_spec.device = vim.vm.device.ParaVirtualSCSIController()
controller_spec.device.busNumber = 0
controller_spec.device.sharedBus = (
vim.vm.device.VirtualSCSIController.Sharing.noSharing
)
controller_spec.device.key = -101
device_specs.append(controller_spec)
# Add virtual disk
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.capacityInKB = disk_gb * 1024 * 1024
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.diskMode = "persistent"
disk_spec.device.backing.thinProvisioned = True
disk_spec.device.backing.fileName = f"[{datastore_obj.name}]"
disk_spec.device.controllerKey = controller_spec.device.key
disk_spec.device.unitNumber = 0
disk_spec.device.key = -1 # Negative key for new device
device_specs.append(disk_spec)
# Add network adapter if network is available
if network_obj:
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualVmxnet3()
if isinstance(network_obj, vim.Network):
nic_spec.device.backing = (
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
network=network_obj, deviceName=network_obj.name
)
)
elif isinstance(network_obj, vim.dvs.DistributedVirtualPortgroup):
dvs_uuid = network_obj.config.distributedVirtualSwitch.uuid
port_key = network_obj.key
nic_spec.device.backing = (
vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo(
port=vim.dvs.PortConnection(
portgroupKey=port_key, switchUuid=dvs_uuid
)
)
)
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo(
startConnected=True, allowGuestControl=True
)
device_specs.append(nic_spec)
vm_spec.deviceChange = device_specs
# Create VM
task = self.conn.datacenter.vmFolder.CreateVM_Task(
config=vm_spec, pool=self.conn.resource_pool
)
self.conn.wait_for_task(task)
return f"VM '{name}' created successfully"
@mcp_tool(
name="clone_vm",
description="Clone a virtual machine from an existing VM or template",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=False),
)
def clone_vm(
self,
template_name: str,
new_name: str,
power_on: bool = False,
datastore: str | None = None,
) -> str:
"""Clone a VM from a template or existing VM."""
template_vm = self.conn.find_vm(template_name)
if not template_vm:
raise ValueError(f"Template VM '{template_name}' not found")
vm_folder = template_vm.parent
if not isinstance(vm_folder, vim.Folder):
vm_folder = self.conn.datacenter.vmFolder
# Resolve datastore
datastore_obj = self.conn.datastore
if datastore:
datastore_obj = self.conn.find_datastore(datastore)
if not datastore_obj:
raise ValueError(f"Datastore '{datastore}' not found")
resource_pool = template_vm.resourcePool or self.conn.resource_pool
relocate_spec = vim.vm.RelocateSpec(pool=resource_pool, datastore=datastore_obj)
clone_spec = vim.vm.CloneSpec(
powerOn=power_on, template=False, location=relocate_spec
)
task = template_vm.Clone(folder=vm_folder, name=new_name, spec=clone_spec)
self.conn.wait_for_task(task)
return f"VM '{new_name}' cloned from '{template_name}'"
@mcp_tool(
name="delete_vm",
description="Delete a virtual machine permanently (powers off if running)",
annotations=ToolAnnotations(destructiveHint=True, idempotentHint=True),
)
def delete_vm(self, name: str) -> str:
"""Delete a virtual machine permanently."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
# Power off if running
if vm.runtime.powerState == vim.VirtualMachine.PowerState.poweredOn:
task = vm.PowerOffVM_Task()
self.conn.wait_for_task(task)
task = vm.Destroy_Task()
self.conn.wait_for_task(task)
return f"VM '{name}' deleted"
@mcp_tool(
name="reconfigure_vm",
description="Reconfigure VM hardware (CPU, memory). VM should be powered off for most changes.",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def reconfigure_vm(
self,
name: str,
cpu: int | None = None,
memory_mb: int | None = None,
annotation: str | None = None,
) -> str:
"""Reconfigure VM hardware settings."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
config_spec = vim.vm.ConfigSpec()
changes = []
if cpu is not None:
config_spec.numCPUs = cpu
changes.append(f"CPU: {cpu}")
if memory_mb is not None:
config_spec.memoryMB = memory_mb
changes.append(f"Memory: {memory_mb}MB")
if annotation is not None:
config_spec.annotation = annotation
changes.append("annotation updated")
if not changes:
return f"No changes specified for VM '{name}'"
task = vm.ReconfigVM_Task(spec=config_spec)
self.conn.wait_for_task(task)
return f"VM '{name}' reconfigured: {', '.join(changes)}"
@mcp_tool(
name="rename_vm",
description="Rename a virtual machine",
annotations=ToolAnnotations(destructiveHint=False, idempotentHint=True),
)
def rename_vm(self, name: str, new_name: str) -> str:
"""Rename a virtual machine."""
vm = self.conn.find_vm(name)
if not vm:
raise ValueError(f"VM '{name}' not found")
task = vm.Rename_Task(newName=new_name)
self.conn.wait_for_task(task)
return f"VM renamed from '{name}' to '{new_name}'"

View File

View File

@ -1,140 +0,0 @@
"""FastMCP server setup for mcvsphere."""
import logging
import sys
from pathlib import Path
from fastmcp import FastMCP
from mcvsphere.config import Settings, get_settings
from mcvsphere.connection import VMwareConnection
from mcvsphere.mixins import (
ConsoleMixin,
DiskManagementMixin,
GuestOpsMixin,
HostManagementMixin,
MonitoringMixin,
NICManagementMixin,
OVFManagementMixin,
PowerOpsMixin,
ResourcesMixin,
SerialPortMixin,
SnapshotsMixin,
VCenterOpsMixin,
VMLifecycleMixin,
)
logger = logging.getLogger(__name__)
def create_server(settings: Settings | None = None) -> FastMCP:
"""Create and configure the FastMCP server.
Args:
settings: Optional settings instance. If not provided, will load from
environment variables and/or config file.
Returns:
Configured FastMCP server instance with VMware tools registered.
"""
if settings is None:
settings = get_settings()
# Configure logging - MUST go to stderr for stdio transport compatibility
log_level = getattr(logging, settings.log_level.upper(), logging.INFO)
# For stdio mode, suppress most logging to avoid interference
if settings.mcp_transport == "stdio":
log_level = logging.WARNING
logging.basicConfig(
level=log_level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
stream=sys.stderr, # Explicitly use stderr
)
# Create FastMCP server
mcp = FastMCP(
name="mcvsphere",
instructions=(
"Model Control for vSphere - AI-driven VMware virtual machine management. "
"Provides tools for VM lifecycle management, power operations, "
"snapshots, guest OS operations, monitoring, and infrastructure resources."
),
)
# Create shared VMware connection
logger.info("Connecting to VMware vCenter/ESXi...")
conn = VMwareConnection(settings)
# Create and register all mixins
mixins = [
VMLifecycleMixin(conn),
PowerOpsMixin(conn),
SnapshotsMixin(conn),
MonitoringMixin(conn),
GuestOpsMixin(conn),
ResourcesMixin(conn),
DiskManagementMixin(conn),
NICManagementMixin(conn),
OVFManagementMixin(conn),
HostManagementMixin(conn),
VCenterOpsMixin(conn),
ConsoleMixin(conn),
SerialPortMixin(conn),
]
tool_count = 0
resource_count = 0
for mixin in mixins:
mixin.register_all(mcp)
tool_count += len(getattr(mixin, "_mcp_tools", []))
resource_count += len(getattr(mixin, "_mcp_resources", []))
# Get actual counts from MCP server
actual_tools = len(mcp._tool_manager._tools)
actual_resources = len(mcp._resource_manager._resources)
logger.info(
"mcvsphere ready - %d tools, %d resources registered",
actual_tools,
actual_resources,
)
return mcp
def run_server(config_path: Path | None = None) -> None:
"""Run the mcvsphere server.
Args:
config_path: Optional path to YAML/JSON config file.
"""
# Load settings
settings = Settings.from_yaml(config_path) if config_path else get_settings()
# Only print banner for SSE mode (stdio must stay clean for JSON-RPC)
if settings.mcp_transport == "sse":
try:
from importlib.metadata import version
package_version = version("mcvsphere")
except Exception:
package_version = "dev"
print(f"mcvsphere v{package_version}", file=sys.stderr)
print("" * 40, file=sys.stderr)
print(
f"Starting SSE transport on {settings.mcp_host}:{settings.mcp_port}",
file=sys.stderr,
)
# Create and run server
mcp = create_server(settings)
if settings.mcp_transport == "sse":
mcp.run(transport="sse", host=settings.mcp_host, port=settings.mcp_port)
else:
# stdio mode - suppress banner to keep stdout clean for JSON-RPC
mcp.run(show_banner=False)

View File

@ -1,290 +0,0 @@
#!/usr/bin/env python3
"""Comprehensive MCP client to test all read-only ESXi MCP server tools."""
import asyncio
import json
import os
from pathlib import Path
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
def load_env_file(path: str = ".env") -> dict[str, str]:
"""Load environment variables from a .env file."""
env = {}
env_path = Path(path)
if env_path.exists():
with open(env_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith("#") and "=" in line:
key, _, value = line.partition("=")
env[key.strip()] = value.strip()
return env
def print_result(data, indent=2, max_items=5):
"""Pretty print result data with truncation."""
if isinstance(data, list):
# Check for empty-result message
if data and isinstance(data[0], dict) and "message" in data[0] and "count" in data[0]:
print(f" {data[0]['message']}")
return
print(f" Found {len(data)} items:")
for item in data[:max_items]:
if isinstance(item, dict):
summary = ", ".join(f"{k}={v}" for k, v in list(item.items())[:4])
print(f" - {summary[:100]}...")
else:
print(f" - {item}")
if len(data) > max_items:
print(f" ... and {len(data) - max_items} more")
elif isinstance(data, dict):
for k, v in list(data.items())[:8]:
val_str = str(v)[:60] + "..." if len(str(v)) > 60 else str(v)
print(f" {k}: {val_str}")
else:
print(f" {data}")
async def test_tool(session, name: str, args: dict = None, description: str = ""):
"""Test a single tool and print results."""
args = args or {}
print(f"\n{'' * 60}")
print(f"Testing: {name} {description}")
print(f"{'' * 60}")
try:
result = await session.call_tool(name, args)
if result.content:
data = json.loads(result.content[0].text)
print_result(data)
return data
else:
print(" No content returned")
return None
except Exception as e:
print(f" ERROR: {e}")
return None
async def test_resource(session, uri: str):
"""Test reading an MCP resource."""
print(f"\n{'' * 60}")
print(f"Resource: {uri}")
print(f"{'' * 60}")
try:
result = await session.read_resource(uri)
if result.contents:
data = json.loads(result.contents[0].text)
print_result(data)
return data
else:
print(" No content returned")
return None
except Exception as e:
print(f" ERROR: {e}")
return None
async def main():
"""Test all read-only ESXi MCP server tools."""
print("=" * 60)
print("ESXi MCP Server - Comprehensive Read-Only Test Suite")
print("=" * 60)
# Load from .env file
dotenv = load_env_file()
server_params = StdioServerParameters(
command="uv",
args=["run", "mcvsphere"],
env={
**os.environ,
"VCENTER_HOST": dotenv.get("VCENTER_HOST", os.environ.get("VCENTER_HOST", "")),
"VCENTER_USER": dotenv.get("VCENTER_USER", os.environ.get("VCENTER_USER", "")),
"VCENTER_PASSWORD": dotenv.get("VCENTER_PASSWORD", os.environ.get("VCENTER_PASSWORD", "")),
"VCENTER_INSECURE": dotenv.get("VCENTER_INSECURE", os.environ.get("VCENTER_INSECURE", "true")),
"MCP_TRANSPORT": "stdio",
}
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
print("\n✓ Connected to ESXi MCP Server\n")
# ─────────────────────────────────────────────────────────────
# List available tools and resources
# ─────────────────────────────────────────────────────────────
tools_result = await session.list_tools()
resources_result = await session.list_resources()
print(f"Available: {len(tools_result.tools)} tools, {len(resources_result.resources)} resources")
# ─────────────────────────────────────────────────────────────
# Test MCP Resources
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 1: MCP Resources")
print("=" * 60)
vms = await test_resource(session, "esxi://vms")
await test_resource(session, "esxi://hosts")
await test_resource(session, "esxi://datastores")
await test_resource(session, "esxi://networks")
await test_resource(session, "esxi://clusters")
# Get a VM name for subsequent tests
vm_name = vms[0]["name"] if vms else None
print(f"\n>>> Using VM '{vm_name}' for subsequent tests")
# ─────────────────────────────────────────────────────────────
# VM Lifecycle (read-only)
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 2: VM Lifecycle Tools")
print("=" * 60)
await test_tool(session, "list_vms")
if vm_name:
await test_tool(session, "get_vm_info", {"name": vm_name})
# ─────────────────────────────────────────────────────────────
# Monitoring Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 3: Monitoring Tools")
print("=" * 60)
await test_tool(session, "list_hosts")
await test_tool(session, "get_host_stats")
if vm_name:
await test_tool(session, "get_vm_stats", {"name": vm_name})
await test_tool(session, "get_alarms")
await test_tool(session, "get_recent_events", {"count": 5})
await test_tool(session, "get_recent_tasks", {"count": 5})
# ─────────────────────────────────────────────────────────────
# Host Management Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 4: Host Management Tools")
print("=" * 60)
await test_tool(session, "get_host_info")
await test_tool(session, "get_host_hardware")
await test_tool(session, "get_host_networking")
await test_tool(session, "list_services")
await test_tool(session, "get_ntp_config")
# ─────────────────────────────────────────────────────────────
# Datastore/Resources Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 5: Datastore & Resource Tools")
print("=" * 60)
# Get datastore name from resources
ds_result = await session.read_resource("esxi://datastores")
datastores = json.loads(ds_result.contents[0].text) if ds_result.contents else []
ds_name = datastores[0]["name"] if datastores else None
print(f"\n>>> Using datastore '{ds_name}' for tests")
if ds_name:
await test_tool(session, "get_datastore_info", {"name": ds_name})
await test_tool(session, "browse_datastore", {"datastore": ds_name, "path": ""})
await test_tool(session, "get_vcenter_info")
await test_tool(session, "get_resource_pool_info")
# Get network name
net_result = await session.read_resource("esxi://networks")
networks = json.loads(net_result.contents[0].text) if net_result.contents else []
net_name = networks[0]["name"] if networks else None
if net_name:
await test_tool(session, "get_network_info", {"name": net_name})
await test_tool(session, "list_templates")
# ─────────────────────────────────────────────────────────────
# Disk Management Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 6: Disk Management Tools")
print("=" * 60)
if vm_name:
await test_tool(session, "list_disks", {"vm_name": vm_name})
# ─────────────────────────────────────────────────────────────
# NIC Management Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 7: NIC Management Tools")
print("=" * 60)
if vm_name:
await test_tool(session, "list_nics", {"vm_name": vm_name})
# ─────────────────────────────────────────────────────────────
# Snapshot Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 8: Snapshot Tools")
print("=" * 60)
if vm_name:
await test_tool(session, "list_snapshots", {"name": vm_name})
# ─────────────────────────────────────────────────────────────
# OVF Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 9: OVF Tools")
print("=" * 60)
await test_tool(session, "list_ovf_networks")
# ─────────────────────────────────────────────────────────────
# vCenter-Specific Tools
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 10: vCenter-Specific Tools")
print("=" * 60)
await test_tool(session, "list_folders")
await test_tool(session, "list_clusters")
await test_tool(session, "list_recent_tasks", {"max_count": 5})
await test_tool(session, "list_recent_events", {"max_count": 5, "hours_back": 24})
# ─────────────────────────────────────────────────────────────
# Guest Operations (require VMware Tools + credentials)
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("SECTION 11: Guest Operations (may fail without VMware Tools)")
print("=" * 60)
# These typically need a running VM with VMware Tools
# and guest credentials - expect failures on most VMs
if vm_name:
await test_tool(
session, "list_guest_processes",
{"name": vm_name, "username": "root", "password": "test"},
"(expected to fail without valid credentials)"
)
# ─────────────────────────────────────────────────────────────
# Summary
# ─────────────────────────────────────────────────────────────
print("\n" + "=" * 60)
print("TEST SUMMARY")
print("=" * 60)
print(f"✅ Read-only test suite completed")
print(f" Tools available: {len(tools_result.tools)}")
print(f" Resources available: {len(resources_result.resources)}")
print(f"\nNote: Guest operations require VMware Tools + valid credentials")
print("Note: Some vCenter tools return empty on standalone hosts")
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,66 +0,0 @@
# ESXi MCP Server Test Commands
Try these in a new Claude Code session:
## 1. Basic Discovery
```
List all VMs on the ESXi host
```
```
Show me the datastores and their free space
```
```
What networks are available?
```
## 2. Host Management (NEW!)
```
Get detailed info about the ESXi host
```
```
List all services on the ESXi host
```
```
Show the NTP configuration
```
```
Show me the host networking config (vswitches, portgroups)
```
## 3. VM Hardware (NEW!)
```
List the disks on VM "your-vm-name"
```
```
List the NICs on VM "your-vm-name"
```
## 4. Datastore Operations
```
Browse the datastore "your-datastore" in the iso folder
```
```
Show me what's in the root of datastore "your-datastore"
```
## 5. Advanced Operations (be careful!)
```
# Add a 10GB disk to a VM
Add a 10GB thin-provisioned disk to VM "test-vm"
# Add a NIC
Add a vmxnet3 NIC to VM "test-vm" on network "VM Network"
# Configure NTP
Configure NTP servers 0.pool.ntp.org and 1.pool.ntp.org on the ESXi host
```
---
Start a new session with: `claude`

View File

@ -1,503 +0,0 @@
#!/usr/bin/env python3
"""Destructive test suite for ESXi MCP server - creates/modifies/deletes resources.
WARNING: This test creates real VMs and modifies infrastructure!
Only run in a test environment.
Usage:
python test_destructive.py [--skip-cleanup]
--skip-cleanup: Leave test VM for inspection (default: cleanup)
"""
import asyncio
import json
import os
import sys
from datetime import datetime
from pathlib import Path
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
# Test configuration
TEST_VM_NAME = f"mcp-test-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
TEST_FOLDER_NAME = f"mcp-test-folder-{datetime.now().strftime('%H%M%S')}"
SKIP_CLEANUP = "--skip-cleanup" in sys.argv
def load_env_file(path: str = ".env") -> dict[str, str]:
"""Load environment variables from a .env file."""
env = {}
env_path = Path(path)
if env_path.exists():
with open(env_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith("#") and "=" in line:
key, _, value = line.partition("=")
env[key.strip()] = value.strip()
return env
class TestResult:
"""Track test results."""
def __init__(self):
self.passed = 0
self.failed = 0
self.skipped = 0
self.errors = []
def record(self, name: str, success: bool, error: str = None):
if success:
self.passed += 1
print(f"{name}")
else:
self.failed += 1
self.errors.append((name, error))
print(f"{name}: {error}")
def skip(self, name: str, reason: str):
self.skipped += 1
print(f" ⏭️ {name}: {reason}")
def summary(self):
total = self.passed + self.failed + self.skipped
print(f"\n{'=' * 60}")
print("DESTRUCTIVE TEST SUMMARY")
print(f"{'=' * 60}")
print(f" Passed: {self.passed}/{total}")
print(f" Failed: {self.failed}/{total}")
print(f" Skipped: {self.skipped}/{total}")
if self.errors:
print(f"\nErrors:")
for name, error in self.errors:
print(f" - {name}: {error}")
return self.failed == 0
async def call_tool(session, name: str, args: dict = None) -> tuple[bool, any]:
"""Call a tool and return (success, result)."""
args = args or {}
try:
result = await session.call_tool(name, args)
if result.content:
text = result.content[0].text
# Try to parse as JSON, fall back to plain text
try:
data = json.loads(text)
except json.JSONDecodeError:
data = text
return True, data
return True, None
except Exception as e:
return False, str(e)
async def main():
"""Run destructive tests."""
print("=" * 60)
print("ESXi MCP Server - DESTRUCTIVE Test Suite")
print("=" * 60)
print(f"\n⚠️ WARNING: This test will CREATE and MODIFY resources!")
print(f" Test VM: {TEST_VM_NAME}")
print(f" Cleanup: {'DISABLED' if SKIP_CLEANUP else 'ENABLED'}")
print()
results = TestResult()
dotenv = load_env_file()
# Get datastore and network from env or use defaults
default_datastore = dotenv.get("VCENTER_DATASTORE", "datastore1")
default_network = dotenv.get("VCENTER_NETWORK", "VM Network")
server_params = StdioServerParameters(
command="uv",
args=["run", "mcvsphere"],
env={
**os.environ,
"VCENTER_HOST": dotenv.get("VCENTER_HOST", ""),
"VCENTER_USER": dotenv.get("VCENTER_USER", ""),
"VCENTER_PASSWORD": dotenv.get("VCENTER_PASSWORD", ""),
"VCENTER_INSECURE": dotenv.get("VCENTER_INSECURE", "true"),
"MCP_TRANSPORT": "stdio",
}
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
print("✓ Connected to ESXi MCP Server\n")
# Get available datastores and networks for test
ds_result = await session.read_resource("esxi://datastores")
datastores = json.loads(ds_result.contents[0].text) if ds_result.contents else []
datastore = datastores[0]["name"] if datastores else default_datastore
net_result = await session.read_resource("esxi://networks")
networks = json.loads(net_result.contents[0].text) if net_result.contents else []
network = networks[0]["name"] if networks else default_network
print(f"Using datastore: {datastore}")
print(f"Using network: {network}")
# ─────────────────────────────────────────────────────────────
# SECTION 1: VM Lifecycle
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 1: VM Lifecycle")
print(f"{'=' * 60}")
# Create VM
print(f"\n>>> Creating test VM: {TEST_VM_NAME}")
success, data = await call_tool(session, "create_vm", {
"name": TEST_VM_NAME,
"cpu": 1,
"memory_mb": 512,
"disk_gb": 1,
"guest_id": "otherGuest64",
"datastore": datastore,
"network": network,
})
results.record("create_vm", success, data if not success else None)
if not success:
print("\n❌ Cannot continue without test VM. Aborting.")
results.summary()
return
# Get VM info
success, data = await call_tool(session, "get_vm_info", {"name": TEST_VM_NAME})
results.record("get_vm_info (new VM)", success, data if not success else None)
# Rename VM (and rename back)
new_name = f"{TEST_VM_NAME}-renamed"
success, data = await call_tool(session, "rename_vm", {
"name": TEST_VM_NAME,
"new_name": new_name,
})
results.record("rename_vm", success, data if not success else None)
if success:
# Rename back
await call_tool(session, "rename_vm", {"name": new_name, "new_name": TEST_VM_NAME})
# Reconfigure VM
success, data = await call_tool(session, "reconfigure_vm", {
"name": TEST_VM_NAME,
"memory_mb": 1024,
})
results.record("reconfigure_vm (memory)", success, data if not success else None)
# ─────────────────────────────────────────────────────────────
# SECTION 2: Power Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 2: Power Operations")
print(f"{'=' * 60}")
# Power on
success, data = await call_tool(session, "power_on", {"name": TEST_VM_NAME})
results.record("power_on", success, data if not success else None)
if success:
# Wait a moment for power state to stabilize
await asyncio.sleep(3)
# Suspend
success, data = await call_tool(session, "suspend_vm", {"name": TEST_VM_NAME})
results.record("suspend_vm", success, data if not success else None)
if success:
await asyncio.sleep(2)
# Power on again to test power off
await call_tool(session, "power_on", {"name": TEST_VM_NAME})
await asyncio.sleep(2)
# Power off
success, data = await call_tool(session, "power_off", {"name": TEST_VM_NAME})
results.record("power_off", success, data if not success else None)
else:
results.skip("suspend_vm", "power_on failed")
results.skip("power_off", "power_on failed")
# Ensure VM is off for disk operations
await call_tool(session, "power_off", {"name": TEST_VM_NAME})
await asyncio.sleep(2)
# ─────────────────────────────────────────────────────────────
# SECTION 3: Disk Management
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 3: Disk Management")
print(f"{'=' * 60}")
# Add disk
success, data = await call_tool(session, "add_disk", {
"vm_name": TEST_VM_NAME,
"size_gb": 1,
"thin_provisioned": True,
})
results.record("add_disk", success, data if not success else None)
# List disks
success, data = await call_tool(session, "list_disks", {"vm_name": TEST_VM_NAME})
results.record("list_disks (after add)", success, data if not success else None)
disk_count = len(data) if success and isinstance(data, list) else 0
# Extend disk
if disk_count > 0:
success, data = await call_tool(session, "extend_disk", {
"vm_name": TEST_VM_NAME,
"disk_label": "Hard disk 1",
"new_size_gb": 2,
})
results.record("extend_disk", success, data if not success else None)
# Remove the added disk (Hard disk 2)
if disk_count >= 2:
success, data = await call_tool(session, "remove_disk", {
"vm_name": TEST_VM_NAME,
"disk_label": "Hard disk 2",
})
results.record("remove_disk", success, data if not success else None)
else:
results.skip("remove_disk", "Not enough disks")
# ─────────────────────────────────────────────────────────────
# SECTION 4: NIC Management
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 4: NIC Management")
print(f"{'=' * 60}")
# Add NIC
success, data = await call_tool(session, "add_nic", {
"vm_name": TEST_VM_NAME,
"network": network,
"nic_type": "vmxnet3",
})
results.record("add_nic", success, data if not success else None)
# List NICs
success, data = await call_tool(session, "list_nics", {"vm_name": TEST_VM_NAME})
results.record("list_nics (after add)", success, data if not success else None)
nic_count = len(data) if success and isinstance(data, list) else 0
# Connect/disconnect NIC
if nic_count > 0:
success, data = await call_tool(session, "connect_nic", {
"vm_name": TEST_VM_NAME,
"nic_label": "Network adapter 1",
"connected": False,
})
results.record("connect_nic (disconnect)", success, data if not success else None)
# Remove added NIC (Network adapter 2)
if nic_count >= 2:
success, data = await call_tool(session, "remove_nic", {
"vm_name": TEST_VM_NAME,
"nic_label": "Network adapter 2",
})
results.record("remove_nic", success, data if not success else None)
else:
results.skip("remove_nic", "Not enough NICs")
# ─────────────────────────────────────────────────────────────
# SECTION 5: Snapshots
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 5: Snapshots")
print(f"{'=' * 60}")
# Create snapshot
success, data = await call_tool(session, "create_snapshot", {
"name": TEST_VM_NAME,
"snapshot_name": "test-snapshot-1",
"description": "MCP test snapshot",
})
results.record("create_snapshot", success, data if not success else None)
# List snapshots
success, data = await call_tool(session, "list_snapshots", {"name": TEST_VM_NAME})
results.record("list_snapshots", success, data if not success else None)
# Rename snapshot
success, data = await call_tool(session, "rename_snapshot", {
"name": TEST_VM_NAME,
"snapshot_name": "test-snapshot-1",
"new_name": "renamed-snapshot",
"new_description": "Renamed by MCP test",
})
results.record("rename_snapshot", success, data if not success else None)
# Revert to snapshot
success, data = await call_tool(session, "revert_to_snapshot", {
"name": TEST_VM_NAME,
"snapshot_name": "renamed-snapshot",
})
results.record("revert_to_snapshot", success, data if not success else None)
# Delete snapshot
success, data = await call_tool(session, "delete_snapshot", {
"name": TEST_VM_NAME,
"snapshot_name": "renamed-snapshot",
})
results.record("delete_snapshot", success, data if not success else None)
# ─────────────────────────────────────────────────────────────
# SECTION 6: Folder Operations (vCenter)
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 6: Folder Operations (vCenter)")
print(f"{'=' * 60}")
# Create folder
success, data = await call_tool(session, "create_folder", {
"folder_name": TEST_FOLDER_NAME,
})
results.record("create_folder", success, data if not success else None)
folder_created = success
# Move VM to folder
if folder_created:
success, data = await call_tool(session, "move_vm_to_folder", {
"vm_name": TEST_VM_NAME,
"folder_path": f"vm/{TEST_FOLDER_NAME}",
})
results.record("move_vm_to_folder", success, data if not success else None)
# Move back to root for cleanup
if success:
await call_tool(session, "move_vm_to_folder", {
"vm_name": TEST_VM_NAME,
"folder_path": "vm",
})
else:
results.skip("move_vm_to_folder", "folder creation failed")
# ─────────────────────────────────────────────────────────────
# SECTION 7: Datastore Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 7: Datastore Operations")
print(f"{'=' * 60}")
# Create folder in datastore
test_ds_folder = f"mcp-test-{datetime.now().strftime('%H%M%S')}"
success, data = await call_tool(session, "create_datastore_folder", {
"datastore": datastore,
"path": test_ds_folder,
})
results.record("create_datastore_folder", success, data if not success else None)
ds_folder_created = success
# Delete datastore folder
if ds_folder_created:
success, data = await call_tool(session, "delete_datastore_file", {
"datastore": datastore,
"path": test_ds_folder,
})
results.record("delete_datastore_file (folder)", success, data if not success else None)
else:
results.skip("delete_datastore_file", "folder creation failed")
# ─────────────────────────────────────────────────────────────
# SECTION 8: vCenter Advanced Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 8: vCenter Advanced Operations")
print(f"{'=' * 60}")
# Storage vMotion - move VM to different datastore
# Get list of datastores to find a second one
if len(datastores) >= 2:
# Find a different datastore
other_datastore = None
for ds in datastores:
if ds["name"] != datastore:
other_datastore = ds["name"]
break
if other_datastore:
print(f"\n>>> Storage vMotion: {datastore}{other_datastore}")
success, data = await call_tool(session, "storage_vmotion", {
"vm_name": TEST_VM_NAME,
"target_datastore": other_datastore,
})
results.record("storage_vmotion", success, data if not success else None)
# Move back to original datastore
if success:
await call_tool(session, "storage_vmotion", {
"vm_name": TEST_VM_NAME,
"target_datastore": datastore,
})
else:
results.skip("storage_vmotion", "No alternate datastore found")
else:
results.skip("storage_vmotion", "Only one datastore available")
# Convert VM to template
print(f"\n>>> Converting VM to template: {TEST_VM_NAME}")
success, data = await call_tool(session, "convert_to_template", {
"vm_name": TEST_VM_NAME,
})
results.record("convert_to_template", success, data if not success else None)
is_template = success
# Deploy from template
deployed_vm_name = f"{TEST_VM_NAME}-deployed"
if is_template:
success, data = await call_tool(session, "deploy_from_template", {
"template_name": TEST_VM_NAME,
"new_vm_name": deployed_vm_name,
"datastore": datastore,
})
results.record("deploy_from_template", success, data if not success else None)
deployed_vm_created = success
# Clean up deployed VM
if deployed_vm_created:
await call_tool(session, "delete_vm", {"name": deployed_vm_name})
else:
results.skip("deploy_from_template", "template conversion failed")
# Convert template back to VM
if is_template:
success, data = await call_tool(session, "convert_to_vm", {
"template_name": TEST_VM_NAME,
})
results.record("convert_to_vm", success, data if not success else None)
else:
results.skip("convert_to_vm", "template conversion failed")
# ─────────────────────────────────────────────────────────────
# CLEANUP
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("CLEANUP")
print(f"{'=' * 60}")
if SKIP_CLEANUP:
print(f"\n⚠️ Cleanup SKIPPED. Test VM '{TEST_VM_NAME}' remains.")
if folder_created:
print(f" Test folder '{TEST_FOLDER_NAME}' remains.")
else:
# Delete test VM
print(f"\n>>> Deleting test VM: {TEST_VM_NAME}")
success, data = await call_tool(session, "delete_vm", {"name": TEST_VM_NAME})
results.record("delete_vm (cleanup)", success, data if not success else None)
# Note: Folder deletion would require empty folder
# In a real scenario, you'd need to handle this
if folder_created:
print(f" Note: Test folder '{TEST_FOLDER_NAME}' may need manual cleanup")
# Print summary
return results.summary()
if __name__ == "__main__":
success = asyncio.run(main())
sys.exit(0 if success else 1)

View File

@ -1,467 +0,0 @@
#!/usr/bin/env python3
"""Extended test suite for ESXi MCP Server - covers tools not in main test suites.
Uses the Photon OS guest VM for testing guest operations, serial ports, etc.
Skips host management operations for safety.
Usage:
python test_extended.py
"""
import asyncio
import base64
import json
import os
import sys
from datetime import datetime
from pathlib import Path
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
# Test VM configuration
TEST_VM = "photon-guest-test"
GUEST_USER = "root"
GUEST_PASS = "wa9ukw!!"
def load_env_file(path: str = ".env") -> dict[str, str]:
"""Load environment variables from a .env file."""
env = {}
env_path = Path(path)
if env_path.exists():
with open(env_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith("#") and "=" in line:
key, _, value = line.partition("=")
env[key.strip()] = value.strip()
return env
class TestResults:
"""Track test results."""
def __init__(self):
self.passed = 0
self.failed = 0
self.skipped = 0
self.results = []
def record(self, name: str, success: bool, message: str = ""):
if success:
self.passed += 1
print(f"{name}")
self.results.append((name, "PASS", message))
else:
self.failed += 1
print(f"{name}: {message}")
self.results.append((name, "FAIL", message))
def skip(self, name: str, reason: str):
self.skipped += 1
print(f" ⏭️ {name}: {reason}")
self.results.append((name, "SKIP", reason))
def summary(self):
total = self.passed + self.failed + self.skipped
print(f"\n{'=' * 60}")
print("EXTENDED TEST SUMMARY")
print(f"{'=' * 60}")
print(f" ✅ Passed: {self.passed}/{total}")
print(f" ❌ Failed: {self.failed}/{total}")
print(f" ⏭️ Skipped: {self.skipped}/{total}")
return self.failed == 0
async def call_tool(session, name: str, args: dict = None) -> tuple[bool, any]:
"""Call a tool and return (success, result)."""
args = args or {}
try:
result = await session.call_tool(name, args)
if result.content:
text = result.content[0].text
try:
return True, json.loads(text)
except json.JSONDecodeError:
return True, text
return True, None
except Exception as e:
return False, str(e)
async def main():
print("=" * 60)
print("ESXi MCP Server - Extended Test Suite")
print("=" * 60)
print(f"Test VM: {TEST_VM}")
print(f"Guest credentials: {GUEST_USER}/{'*' * len(GUEST_PASS)}")
print()
results = TestResults()
dotenv = load_env_file()
server_params = StdioServerParameters(
command="uv",
args=["run", "mcvsphere"],
env={
**os.environ,
"VCENTER_HOST": dotenv.get("VCENTER_HOST", ""),
"VCENTER_USER": dotenv.get("VCENTER_USER", ""),
"VCENTER_PASSWORD": dotenv.get("VCENTER_PASSWORD", ""),
"VCENTER_INSECURE": dotenv.get("VCENTER_INSECURE", "true"),
"MCP_TRANSPORT": "stdio",
}
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
print("✓ Connected to ESXi MCP Server\n")
# Get VM info first to ensure it exists
success, vm_info = await call_tool(session, "get_vm_info", {"name": TEST_VM})
if not success:
print(f"❌ Test VM '{TEST_VM}' not found. Aborting.")
return False
power_state = vm_info.get("power_state", "unknown")
print(f"VM power state: {power_state}")
# Get datastore for file operations
ds_result = await session.read_resource("esxi://datastores")
datastores = json.loads(ds_result.contents[0].text) if ds_result.contents else []
datastore = datastores[0]["name"] if datastores else "datastore1"
# ─────────────────────────────────────────────────────────────
# SECTION 1: Console & VMware Tools (NEW)
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 1: Console & VMware Tools")
print(f"{'=' * 60}")
# get_vm_tools_status
success, data = await call_tool(session, "get_vm_tools_status", {"name": TEST_VM})
results.record("get_vm_tools_status", success, str(data) if not success else "")
tools_ok = success and data.get("tools_status") == "toolsOk"
# vm_screenshot (works on powered-on VMs)
if power_state == "poweredOn":
success, data = await call_tool(session, "vm_screenshot", {
"name": TEST_VM, "width": 640, "height": 480
})
if success and data.get("image_base64"):
results.record("vm_screenshot", True)
else:
results.record("vm_screenshot", False, str(data))
else:
results.skip("vm_screenshot", "VM not powered on")
# wait_for_vm_tools (quick timeout since already running)
if tools_ok:
success, data = await call_tool(session, "wait_for_vm_tools", {
"name": TEST_VM, "timeout": 5, "poll_interval": 1
})
results.record("wait_for_vm_tools", success, str(data) if not success else "")
else:
results.skip("wait_for_vm_tools", "Tools not ready")
# ─────────────────────────────────────────────────────────────
# SECTION 2: Guest Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 2: Guest Operations (requires VMware Tools)")
print(f"{'=' * 60}")
if not tools_ok:
print(" ⚠️ VMware Tools not ready, skipping guest operations")
for tool in ["run_command_in_guest", "list_guest_directory",
"create_guest_directory", "write_guest_file",
"read_guest_file", "delete_guest_file"]:
results.skip(tool, "VMware Tools not ready")
else:
guest_creds = {"name": TEST_VM, "username": GUEST_USER, "password": GUEST_PASS}
# run_command_in_guest
success, data = await call_tool(session, "run_command_in_guest", {
**guest_creds,
"command": "/usr/bin/uname",
"arguments": "-a",
})
results.record("run_command_in_guest", success, str(data) if not success else "")
# list_guest_directory
success, data = await call_tool(session, "list_guest_directory", {
**guest_creds,
"guest_path": "/tmp",
})
results.record("list_guest_directory", success, str(data) if not success else "")
# create_guest_directory
test_dir = f"/tmp/mcp_test_{datetime.now().strftime('%H%M%S')}"
success, data = await call_tool(session, "create_guest_directory", {
**guest_creds,
"guest_path": test_dir,
})
results.record("create_guest_directory", success, str(data) if not success else "")
dir_created = success
# write_guest_file
test_file = f"{test_dir}/test.txt"
test_content = f"MCP test file created at {datetime.now().isoformat()}"
if dir_created:
success, data = await call_tool(session, "write_guest_file", {
**guest_creds,
"guest_path": test_file,
"content": test_content,
})
results.record("write_guest_file", success, str(data) if not success else "")
file_written = success
else:
results.skip("write_guest_file", "Directory not created")
file_written = False
# read_guest_file
if file_written:
success, data = await call_tool(session, "read_guest_file", {
**guest_creds,
"guest_path": test_file,
})
if success:
# Verify content matches
read_content = data.get("content", "") if isinstance(data, dict) else str(data)
results.record("read_guest_file", True)
else:
results.record("read_guest_file", False, str(data))
else:
results.skip("read_guest_file", "File not written")
# delete_guest_file (cleanup)
if dir_created:
# Delete file first
if file_written:
await call_tool(session, "delete_guest_file", {
**guest_creds, "guest_path": test_file
})
# Delete directory
success, data = await call_tool(session, "delete_guest_file", {
**guest_creds,
"guest_path": test_dir,
})
results.record("delete_guest_file", success, str(data) if not success else "")
else:
results.skip("delete_guest_file", "Nothing to clean up")
# ─────────────────────────────────────────────────────────────
# SECTION 3: Serial Port Management (NEW)
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 3: Serial Port Management")
print(f"{'=' * 60}")
# get_serial_port (should work regardless of power state)
success, data = await call_tool(session, "get_serial_port", {"name": TEST_VM})
results.record("get_serial_port", success, str(data) if not success else "")
has_serial = success and data.get("configured", False)
# For setup/remove, VM must be powered off
if power_state == "poweredOff":
# setup_serial_port
success, data = await call_tool(session, "setup_serial_port", {
"name": TEST_VM,
"protocol": "telnet",
})
results.record("setup_serial_port", success, str(data) if not success else "")
serial_configured = success
if serial_configured:
# Power on to test connect operations
await call_tool(session, "power_on", {"name": TEST_VM})
await asyncio.sleep(3)
# connect_serial_port (disconnect)
success, data = await call_tool(session, "connect_serial_port", {
"name": TEST_VM, "connected": False
})
results.record("connect_serial_port (disconnect)", success, str(data) if not success else "")
# clear_serial_port
success, data = await call_tool(session, "clear_serial_port", {"name": TEST_VM})
results.record("clear_serial_port", success, str(data) if not success else "")
# Power off to remove
await call_tool(session, "power_off", {"name": TEST_VM})
await asyncio.sleep(2)
# remove_serial_port
success, data = await call_tool(session, "remove_serial_port", {"name": TEST_VM})
results.record("remove_serial_port", success, str(data) if not success else "")
else:
results.skip("connect_serial_port", "Serial port not configured")
results.skip("clear_serial_port", "Serial port not configured")
results.skip("remove_serial_port", "Serial port not configured")
else:
print(f" ⚠️ VM must be powered off for serial port setup (current: {power_state})")
results.skip("setup_serial_port", "VM must be powered off")
results.skip("connect_serial_port", "VM must be powered off")
results.skip("clear_serial_port", "VM must be powered off")
results.skip("remove_serial_port", "VM must be powered off")
# ─────────────────────────────────────────────────────────────
# SECTION 4: Power & Guest Control
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 4: Power & Guest Control")
print(f"{'=' * 60}")
# Helper to ensure VM is running with tools ready
async def ensure_vm_running():
_, info = await call_tool(session, "get_vm_info", {"name": TEST_VM})
state = info.get("power_state") if info else "unknown"
if state == "suspended":
print(" VM is suspended, powering on...")
await call_tool(session, "power_on", {"name": TEST_VM})
await asyncio.sleep(5)
elif state != "poweredOn":
await call_tool(session, "power_on", {"name": TEST_VM})
await asyncio.sleep(5)
# Wait for tools
await call_tool(session, "wait_for_vm_tools", {
"name": TEST_VM, "timeout": 60, "poll_interval": 5
})
await ensure_vm_running()
# standby_guest (puts guest into standby/sleep - may suspend VM)
# Skip this test as it's disruptive and puts VM in suspended state
results.skip("standby_guest", "Skipped - causes suspended state issues")
# reboot_guest (graceful reboot via VMware Tools)
await ensure_vm_running()
success, data = await call_tool(session, "reboot_guest", {"name": TEST_VM})
results.record("reboot_guest", success, str(data) if not success else "")
if success:
print(" Waiting for reboot to complete...")
await asyncio.sleep(20)
await call_tool(session, "wait_for_vm_tools", {
"name": TEST_VM, "timeout": 60, "poll_interval": 5
})
# reset_vm (hard reset - more disruptive)
await ensure_vm_running()
success, data = await call_tool(session, "reset_vm", {"name": TEST_VM})
results.record("reset_vm", success, str(data) if not success else "")
if success:
print(" Waiting for reset to complete...")
await asyncio.sleep(15)
# shutdown_guest (graceful shutdown via VMware Tools)
await ensure_vm_running()
success, data = await call_tool(session, "shutdown_guest", {"name": TEST_VM})
results.record("shutdown_guest", success, str(data) if not success else "")
if success:
print(" Waiting for shutdown...")
await asyncio.sleep(10)
# ─────────────────────────────────────────────────────────────
# SECTION 5: Snapshot Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 5: Additional Snapshot Operations")
print(f"{'=' * 60}")
# Ensure VM is powered off for clean snapshots
await call_tool(session, "power_off", {"name": TEST_VM})
await asyncio.sleep(3)
# Create a couple snapshots for testing
snap1_success, _ = await call_tool(session, "create_snapshot", {
"name": TEST_VM, "snapshot_name": "test-snap-1", "description": "Test 1"
})
snap2_success, _ = await call_tool(session, "create_snapshot", {
"name": TEST_VM, "snapshot_name": "test-snap-2", "description": "Test 2"
})
if snap1_success and snap2_success:
# revert_to_current_snapshot (reverts to most recent)
success, data = await call_tool(session, "revert_to_current_snapshot", {"name": TEST_VM})
results.record("revert_to_current_snapshot", success, str(data) if not success else "")
# delete_all_snapshots
success, data = await call_tool(session, "delete_all_snapshots", {"name": TEST_VM})
results.record("delete_all_snapshots", success, str(data) if not success else "")
else:
results.skip("revert_to_current_snapshot", "Snapshot creation failed")
results.skip("delete_all_snapshots", "Snapshot creation failed")
# ─────────────────────────────────────────────────────────────
# SECTION 6: VM Hardware Operations
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 6: VM Hardware Operations")
print(f"{'=' * 60}")
# Ensure VM is off for hardware changes
await call_tool(session, "power_off", {"name": TEST_VM})
await asyncio.sleep(3)
# change_nic_network - get current networks first
net_result = await session.read_resource("esxi://networks")
networks = json.loads(net_result.contents[0].text) if net_result.contents else []
if len(networks) >= 1:
net_name = networks[0]["name"]
success, data = await call_tool(session, "change_nic_network", {
"vm_name": TEST_VM,
"nic_label": "Network adapter 1",
"new_network": net_name,
})
results.record("change_nic_network", success, str(data) if not success else "")
else:
results.skip("change_nic_network", "No networks available")
# set_nic_mac
success, data = await call_tool(session, "set_nic_mac", {
"vm_name": TEST_VM,
"nic_label": "Network adapter 1",
"mac_address": "00:50:56:00:00:01",
})
results.record("set_nic_mac", success, str(data) if not success else "")
# ─────────────────────────────────────────────────────────────
# SECTION 7: Clone & Template (if time permits)
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("SECTION 7: Clone Operations")
print(f"{'=' * 60}")
clone_name = f"mcp-clone-{datetime.now().strftime('%H%M%S')}"
success, data = await call_tool(session, "clone_vm", {
"template_name": TEST_VM,
"new_name": clone_name,
"datastore": datastore,
})
results.record("clone_vm", success, str(data) if not success else "")
clone_created = success
# Cleanup clone
if clone_created:
print(f" Cleaning up clone: {clone_name}")
await call_tool(session, "delete_vm", {"name": clone_name})
# ─────────────────────────────────────────────────────────────
# Restore VM state
# ─────────────────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print("CLEANUP: Restoring VM state")
print(f"{'=' * 60}")
# Power the test VM back on
print(f" Powering on {TEST_VM}...")
await call_tool(session, "power_on", {"name": TEST_VM})
# Print summary
return results.summary()
if __name__ == "__main__":
success = asyncio.run(main())
sys.exit(0 if success else 1)

1722
uv.lock generated

File diff suppressed because it is too large Load Diff