mcesptool/tests/test_idf_integration.py
Ryan Malloy 76ff1ad46a Fix v5.x parsers, clean up CLI, bump to 2026.02.25
- Rewrite _parse_tools_list for ESP-IDF v5.x compact format
  (handles both v5.x and older verbose output)
- Archive detection runs before v5.x version matching to avoid
  false positives on filenames like *.tar.gz
- Remove dead --config and --port CLI parameters
- Add 21 new tests: v5.x parser coverage, Tier 2 tool invocations,
  resource/prompt tests (193 total)
2026-03-02 02:16:40 -07:00

1249 lines
46 KiB
Python

"""
Test ESP-IDF Integration component
Tests parsers, validators, subprocess runners, tools.json caching,
tool/resource/prompt registration, and the TARGET_ARCH constant.
"""
import asyncio
import json
import os
import textwrap
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from mcesptool.components.idf_integration import (
TARGET_ARCH,
IDFIntegration,
_parse_export_vars,
_parse_tools_check,
_parse_tools_list,
_validate_target,
_validate_tool_names,
)
from mcesptool.config import ESPToolServerConfig
# ------------------------------------------------------------------ #
# Fixtures
# ------------------------------------------------------------------ #
@pytest.fixture
def mock_app():
"""Mock FastMCP app that captures registered tools, resources, and prompts."""
app = MagicMock()
registered_tools = {}
registered_resources = {}
registered_prompts = {}
def tool_decorator(name):
def decorator(func):
registered_tools[name] = func
return func
return decorator
def resource_decorator(uri):
def decorator(func):
registered_resources[uri] = func
return func
return decorator
def prompt_decorator(name):
def decorator(func):
registered_prompts[name] = func
return func
return decorator
app.tool = tool_decorator
app.resource = resource_decorator
app.prompt = prompt_decorator
app._registered_tools = registered_tools
app._registered_resources = registered_resources
app._registered_prompts = registered_prompts
return app
@pytest.fixture
def config(tmp_path):
"""Config with a fake ESP-IDF path rooted in tmp_path."""
idf_dir = tmp_path / "esp-idf"
tools_dir = idf_dir / "tools"
tools_dir.mkdir(parents=True)
(tools_dir / "idf_tools.py").write_text("# stub")
(tools_dir / "idf.py").write_text("# stub")
with patch.object(ESPToolServerConfig, "__post_init__"):
cfg = ESPToolServerConfig()
cfg.esp_idf_path = idf_dir
return cfg
@pytest.fixture
def integration(mock_app, config):
"""IDFIntegration instance wired to mock_app and config."""
return IDFIntegration(mock_app, config)
@pytest.fixture
def mock_context():
ctx = MagicMock()
ctx.info = AsyncMock()
ctx.log = AsyncMock()
ctx.progress = AsyncMock()
return ctx
# ------------------------------------------------------------------ #
# 1. Parser unit tests (pure functions)
# ------------------------------------------------------------------ #
class TestParseToolsList:
"""Tests for _parse_tools_list parser."""
SAMPLE_OUTPUT = textwrap.dedent("""\
* xtensa-esp-elf-gdb
- Version 14.2_20240403
- xtensa-esp-elf-gdb-14.2_20240403-x86_64-linux-gnu.tar.gz (installed)
- Version 12.1_20231023
- xtensa-esp-elf-gdb-12.1_20231023-x86_64-linux-gnu.tar.gz
* riscv32-esp-elf
- Version 14.2.0_20241119
- riscv32-esp-elf-14.2.0_20241119-x86_64-linux-gnu.tar.gz (installed)
- riscv32-esp-elf-14.2.0_20241119-aarch64-linux-gnu.tar.gz
* cmake
- Version 3.24.0
- cmake-3.24.0-linux-x86_64.tar.gz
""")
def test_extracts_tool_names(self):
tools = _parse_tools_list(self.SAMPLE_OUTPUT)
names = [t["name"] for t in tools]
assert names == ["xtensa-esp-elf-gdb", "riscv32-esp-elf", "cmake"]
def test_extracts_versions(self):
tools = _parse_tools_list(self.SAMPLE_OUTPUT)
gdb_tool = tools[0]
assert len(gdb_tool["versions"]) == 2
assert gdb_tool["versions"][0]["version"] == "14.2_20240403"
assert gdb_tool["versions"][1]["version"] == "12.1_20231023"
def test_installed_status(self):
tools = _parse_tools_list(self.SAMPLE_OUTPUT)
gdb_tool = tools[0]
assert gdb_tool["versions"][0]["installed"] is True
assert gdb_tool["versions"][1]["installed"] is False
def test_archives_extracted(self):
tools = _parse_tools_list(self.SAMPLE_OUTPUT)
riscv_tool = tools[1]
archives = riscv_tool["versions"][0]["archives"]
assert len(archives) == 2
assert archives[0]["installed"] is True
assert archives[1]["installed"] is False
def test_archive_filenames(self):
tools = _parse_tools_list(self.SAMPLE_OUTPUT)
riscv_tool = tools[1]
archive_files = [a["file"] for a in riscv_tool["versions"][0]["archives"]]
assert "riscv32-esp-elf-14.2.0_20241119-x86_64-linux-gnu.tar.gz" in archive_files
assert "riscv32-esp-elf-14.2.0_20241119-aarch64-linux-gnu.tar.gz" in archive_files
def test_empty_output(self):
assert _parse_tools_list("") == []
def test_output_with_only_blank_lines(self):
assert _parse_tools_list("\n\n\n") == []
def test_single_tool_no_archives(self):
output = "* lonely-tool\n - Version 1.0\n"
tools = _parse_tools_list(output)
assert len(tools) == 1
assert tools[0]["name"] == "lonely-tool"
assert tools[0]["versions"][0]["archives"] == []
# v5.x compact format tests (real ESP-IDF v5.3 output)
SAMPLE_V5 = textwrap.dedent("""\
* xtensa-esp-elf-gdb: GDB for Xtensa
- 14.2_20240403 (recommended, installed)
* riscv32-esp-elf-gdb: GDB for RISC-V
- 14.2_20240403 (recommended, installed)
* xtensa-esp-elf: Toolchain for 32-bit Xtensa based on GCC
- esp-13.2.0_20240530 (recommended, installed)
* cmake: CMake build system (optional)
- 3.24.0 (recommended, installed)
- 3.16.3 (supported)
* qemu-riscv32: QEMU for RISC-V (optional)
- esp_develop_8.2.0_20240122 (recommended)
""")
def test_v5_extracts_tool_names(self):
tools = _parse_tools_list(self.SAMPLE_V5)
names = [t["name"] for t in tools]
assert "xtensa-esp-elf-gdb" in names
assert "riscv32-esp-elf-gdb" in names
assert "cmake" in names
assert "qemu-riscv32" in names
def test_v5_extracts_descriptions(self):
tools = _parse_tools_list(self.SAMPLE_V5)
gdb = next(t for t in tools if t["name"] == "xtensa-esp-elf-gdb")
assert gdb["description"] == "GDB for Xtensa"
def test_v5_tool_count(self):
tools = _parse_tools_list(self.SAMPLE_V5)
assert len(tools) == 5
def test_v5_installed_status(self):
tools = _parse_tools_list(self.SAMPLE_V5)
gdb = next(t for t in tools if t["name"] == "xtensa-esp-elf-gdb")
assert gdb["versions"][0]["installed"] is True
qemu = next(t for t in tools if t["name"] == "qemu-riscv32")
assert qemu["versions"][0]["installed"] is False
def test_v5_version_extracted(self):
tools = _parse_tools_list(self.SAMPLE_V5)
xtensa = next(t for t in tools if t["name"] == "xtensa-esp-elf")
assert xtensa["versions"][0]["version"] == "esp-13.2.0_20240530"
def test_v5_multiple_versions(self):
tools = _parse_tools_list(self.SAMPLE_V5)
cmake = next(t for t in tools if t["name"] == "cmake")
assert len(cmake["versions"]) == 2
assert cmake["versions"][0]["version"] == "3.24.0"
assert cmake["versions"][1]["version"] == "3.16.3"
def test_v5_status_field(self):
tools = _parse_tools_list(self.SAMPLE_V5)
cmake = next(t for t in tools if t["name"] == "cmake")
assert cmake["versions"][0]["status"] == "recommended, installed"
assert cmake["versions"][1]["status"] == "supported"
class TestParseToolsCheck:
"""Tests for _parse_tools_check parser."""
# Old single-line format (kept for backwards compat)
SAMPLE_OUTPUT_LEGACY = textwrap.dedent("""\
xtensa-esp-elf 14.2.0_20241119: found
riscv32-esp-elf 14.2.0_20241119: found
xtensa-esp-elf-gdb 14.2_20240403: found
esp-rom-elfs 20240305: found in /home/user/.espressif
cmake 3.24.0: not found
ninja 1.11.1: not found
""")
# Real ESP-IDF v5.3 multi-line format
SAMPLE_OUTPUT_V5 = textwrap.dedent("""\
Checking for installed tools...
Checking tool xtensa-esp-elf-gdb
no version found in PATH
version installed in tools directory: 14.2_20240403
Checking tool riscv32-esp-elf-gdb
no version found in PATH
version installed in tools directory: 14.2_20240403
Checking tool xtensa-esp-elf
no version found in PATH
version installed in tools directory: esp-13.2.0_20240530
Checking tool cmake
version found in PATH: 4.2.2
version installed in tools directory: 3.24.0
Checking tool qemu-riscv32
no version found in PATH
""")
# Legacy single-line tests
def test_installed_tools(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_LEGACY)
assert "xtensa-esp-elf 14.2.0_20241119" in result["installed"]
assert "riscv32-esp-elf 14.2.0_20241119" in result["installed"]
def test_missing_tools(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_LEGACY)
assert "cmake 3.24.0" in result["missing"]
assert "ninja 1.11.1" in result["missing"]
def test_installed_count(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_LEGACY)
assert len(result["installed"]) == 4
def test_missing_count(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_LEGACY)
assert len(result["missing"]) == 2
def test_empty_output(self):
result = _parse_tools_check("")
assert result == {"installed": [], "missing": []}
def test_all_found(self):
output = "cmake 3.24.0: found\nninja 1.11.1: found\n"
result = _parse_tools_check(output)
assert len(result["installed"]) == 2
assert len(result["missing"]) == 0
def test_all_missing(self):
output = "cmake 3.24.0: not found\nninja 1.11.1: not found\n"
result = _parse_tools_check(output)
assert len(result["installed"]) == 0
assert len(result["missing"]) == 2
# Multi-line format tests (ESP-IDF v5.x)
def test_v5_installed_tools(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_V5)
names = [t.split()[0] for t in result["installed"]]
assert "xtensa-esp-elf-gdb" in names
assert "riscv32-esp-elf-gdb" in names
assert "xtensa-esp-elf" in names
assert "cmake" in names
def test_v5_missing_tools(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_V5)
assert "qemu-riscv32" in result["missing"]
def test_v5_installed_count(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_V5)
assert len(result["installed"]) == 4
def test_v5_missing_count(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_V5)
assert len(result["missing"]) == 1
def test_v5_version_included_in_name(self):
result = _parse_tools_check(self.SAMPLE_OUTPUT_V5)
# Installed tools should include version from "version installed" line
installed_str = " ".join(result["installed"])
assert "14.2_20240403" in installed_str
assert "esp-13.2.0_20240530" in installed_str
class TestParseExportVars:
"""Tests for _parse_export_vars parser."""
def test_basic_key_value(self):
output = "IDF_PATH=/opt/esp-idf\nIDF_TOOLS_PATH=/home/user/.espressif\n"
result = _parse_export_vars(output)
assert result["IDF_PATH"] == "/opt/esp-idf"
assert result["IDF_TOOLS_PATH"] == "/home/user/.espressif"
def test_skips_comments(self):
output = "# This is a comment\nKEY=value\n# Another comment\n"
result = _parse_export_vars(output)
assert len(result) == 1
assert result["KEY"] == "value"
def test_skips_blank_lines(self):
output = "\n\nKEY=value\n\n\nOTHER=thing\n\n"
result = _parse_export_vars(output)
assert len(result) == 2
def test_path_entry_with_colons(self):
output = "PATH=/opt/esp-idf/tools/bin:/home/user/.espressif/tools/bin\n"
result = _parse_export_vars(output)
assert result["PATH"] == "/opt/esp-idf/tools/bin:/home/user/.espressif/tools/bin"
def test_empty_output(self):
assert _parse_export_vars("") == {}
def test_value_with_equals_sign(self):
output = "CMAKE_FLAGS=-DFOO=bar\n"
result = _parse_export_vars(output)
assert result["CMAKE_FLAGS"] == "-DFOO=bar"
def test_whitespace_stripping(self):
output = " KEY = value \n"
result = _parse_export_vars(output)
assert result["KEY"] == "value"
# ------------------------------------------------------------------ #
# 2. Validation function tests
# ------------------------------------------------------------------ #
class TestValidateTarget:
"""Tests for _validate_target."""
def test_valid_targets(self):
for target in TARGET_ARCH:
assert _validate_target(target) == target
def test_esp32(self):
assert _validate_target("esp32") == "esp32"
def test_esp32p4(self):
assert _validate_target("esp32p4") == "esp32p4"
def test_esp32c3(self):
assert _validate_target("esp32c3") == "esp32c3"
def test_invalid_target_raises(self):
with pytest.raises(ValueError, match="Unknown target"):
_validate_target("esp8266")
def test_empty_string_raises(self):
with pytest.raises(ValueError, match="Unknown target"):
_validate_target("")
def test_similar_name_raises(self):
with pytest.raises(ValueError, match="Unknown target"):
_validate_target("ESP32")
def test_error_lists_valid_targets(self):
with pytest.raises(ValueError, match="esp32") as exc_info:
_validate_target("nope")
# The error message should list valid targets
assert "Valid targets" in str(exc_info.value)
class TestValidateToolNames:
"""Tests for _validate_tool_names."""
def test_valid_tool_names(self):
names = ["riscv32-esp-elf", "xtensa-esp-elf-gdb", "cmake", "ninja"]
assert _validate_tool_names(names) == names
def test_tool_name_with_dots(self):
names = ["esp-rom-elfs"]
assert _validate_tool_names(names) == names
def test_flag_injection_double_dash(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["--dangerous-flag"])
def test_flag_injection_single_dash(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["-rf"])
def test_special_chars_semicolon(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["cmake; rm -rf /"])
def test_special_chars_pipe(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["cmake|evil"])
def test_special_chars_backtick(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["`whoami`"])
def test_empty_list(self):
assert _validate_tool_names([]) == []
def test_mixed_valid_and_invalid(self):
with pytest.raises(ValueError, match="Invalid tool name"):
_validate_tool_names(["cmake", "--evil", "ninja"])
# ------------------------------------------------------------------ #
# 3. Component tests (mock subprocess)
# ------------------------------------------------------------------ #
class TestToolRegistration:
"""Verify all tools, resources, and prompts are registered."""
def test_tier1_tools_registered(self, mock_app, config):
IDFIntegration(mock_app, config)
tools = mock_app._registered_tools
assert "idf_tools_list" in tools
assert "idf_tools_check" in tools
assert "idf_tools_install" in tools
assert "idf_env_info" in tools
def test_tier2_tools_registered(self, mock_app, config):
IDFIntegration(mock_app, config)
tools = mock_app._registered_tools
assert "idf_build_project" in tools
assert "idf_flash_project" in tools
assert "idf_monitor" in tools
def test_all_seven_tools(self, mock_app, config):
IDFIntegration(mock_app, config)
assert len(mock_app._registered_tools) == 7
def test_resource_registered(self, mock_app, config):
IDFIntegration(mock_app, config)
assert "esp://idf/status" in mock_app._registered_resources
def test_prompt_registered(self, mock_app, config):
IDFIntegration(mock_app, config)
assert "idf_setup_target" in mock_app._registered_prompts
class TestRunIdfTools:
"""Tests for _run_idf_tools subprocess runner."""
@pytest.mark.asyncio
async def test_success_case(self, integration):
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"tool output", b""))
mock_proc.returncode = 0
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
result = await integration._run_idf_tools(["list"])
assert result["success"] is True
assert result["output"] == "tool output"
@pytest.mark.asyncio
async def test_nonzero_exit_code(self, integration):
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"", b"fatal error"))
mock_proc.returncode = 1
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
result = await integration._run_idf_tools(["check"])
assert result["success"] is False
assert "fatal error" in result["error"]
@pytest.mark.asyncio
async def test_timeout_kills_process(self, integration):
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(side_effect=asyncio.TimeoutError)
mock_proc.returncode = None
mock_proc.kill = MagicMock()
mock_proc.wait = AsyncMock()
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
with patch("asyncio.wait_for", side_effect=asyncio.TimeoutError):
result = await integration._run_idf_tools(["list"], timeout=0.1)
assert result["success"] is False
assert "Timeout" in result["error"]
mock_proc.kill.assert_called_once()
@pytest.mark.asyncio
async def test_file_not_found(self, integration):
with patch(
"asyncio.create_subprocess_exec", side_effect=FileNotFoundError("python3 missing")
):
result = await integration._run_idf_tools(["list"])
assert result["success"] is False
assert "not found" in result["error"]
@pytest.mark.asyncio
async def test_no_idf_path(self, integration):
integration.config.esp_idf_path = None
result = await integration._run_idf_tools(["list"])
assert result["success"] is False
assert "not configured" in result["error"]
@pytest.mark.asyncio
async def test_stderr_output_captured(self, integration):
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"stdout", b"stderr info"))
mock_proc.returncode = 0
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
result = await integration._run_idf_tools(["list"])
assert result["success"] is True
assert result["stderr"] == "stderr info"
@pytest.mark.asyncio
async def test_custom_env_passed(self, integration):
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"ok", b""))
mock_proc.returncode = 0
with patch("asyncio.create_subprocess_exec", return_value=mock_proc) as mock_exec:
await integration._run_idf_tools(["list"], env={"CUSTOM_VAR": "test"})
call_kwargs = mock_exec.call_args
env = call_kwargs.kwargs["env"]
assert env["CUSTOM_VAR"] == "test"
assert env["IDF_PATH"] == str(integration.config.esp_idf_path)
class TestRunIdfPy:
"""Tests for _run_idf_py subprocess runner."""
def _mock_env(self, integration):
idf_path = str(integration.config.esp_idf_path)
return {"PATH": "/usr/bin", "IDF_PATH": idf_path}
@pytest.mark.asyncio
async def test_unavailable_returns_error(self, integration):
integration.config.get_idf_available = MagicMock(return_value=False)
result = await integration._run_idf_py(["build"])
assert result["success"] is False
assert "not available" in result["error"]
assert "hint" in result
@pytest.mark.asyncio
async def test_success_case(self, integration):
integration.config.get_idf_available = MagicMock(return_value=True)
integration._build_idf_env = AsyncMock(return_value=self._mock_env(integration))
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"build ok", b""))
mock_proc.returncode = 0
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
result = await integration._run_idf_py(["build"])
assert result["success"] is True
assert result["output"] == "build ok"
@pytest.mark.asyncio
async def test_build_env_failure(self, integration):
integration.config.get_idf_available = MagicMock(return_value=True)
integration._build_idf_env = AsyncMock(return_value=None)
result = await integration._run_idf_py(["build"])
assert result["success"] is False
assert "Failed to export" in result["error"]
@pytest.mark.asyncio
async def test_timeout_cleanup(self, integration):
integration.config.get_idf_available = MagicMock(return_value=True)
integration._build_idf_env = AsyncMock(return_value=self._mock_env(integration))
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(side_effect=asyncio.TimeoutError)
mock_proc.returncode = None
mock_proc.kill = MagicMock()
mock_proc.wait = AsyncMock()
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
with patch("asyncio.wait_for", side_effect=asyncio.TimeoutError):
result = await integration._run_idf_py(["build"], timeout=0.1)
assert result["success"] is False
assert "Timeout" in result["error"]
@pytest.mark.asyncio
async def test_file_not_found(self, integration):
integration.config.get_idf_available = MagicMock(return_value=True)
integration._build_idf_env = AsyncMock(return_value=self._mock_env(integration))
with patch(
"asyncio.create_subprocess_exec", side_effect=FileNotFoundError("not found")
):
result = await integration._run_idf_py(["build"])
assert result["success"] is False
assert "not found" in result["error"]
@pytest.mark.asyncio
async def test_nonzero_exit_code(self, integration):
integration.config.get_idf_available = MagicMock(return_value=True)
integration._build_idf_env = AsyncMock(return_value=self._mock_env(integration))
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"output", b"error detail"))
mock_proc.returncode = 2
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
result = await integration._run_idf_py(["build"])
assert result["success"] is False
assert "error detail" in result["error"]
class TestBuildIdfEnv:
"""Tests for _build_idf_env."""
@pytest.mark.asyncio
async def test_parses_export_output(self, integration):
export_output = "IDF_TOOLS_PATH=/home/user/.espressif\nPATH=/tools/bin\n"
integration._run_idf_tools = AsyncMock(
return_value={"success": True, "output": export_output, "stderr": ""}
)
env = await integration._build_idf_env()
assert env is not None
assert env["IDF_TOOLS_PATH"] == "/home/user/.espressif"
assert env["IDF_PATH"] == str(integration.config.esp_idf_path)
@pytest.mark.asyncio
async def test_path_prepended(self, integration):
export_output = "PATH=/new/tools/bin\n"
integration._run_idf_tools = AsyncMock(
return_value={"success": True, "output": export_output, "stderr": ""}
)
env = await integration._build_idf_env()
assert env is not None
# The exported PATH should be prepended to the existing PATH
assert env["PATH"].startswith("/new/tools/bin")
@pytest.mark.asyncio
async def test_returns_none_on_failure(self, integration):
integration._run_idf_tools = AsyncMock(
return_value={"success": False, "error": "script missing"}
)
env = await integration._build_idf_env()
assert env is None
class TestLoadToolsJson:
"""Tests for _load_tools_json with mtime-based caching."""
@pytest.mark.asyncio
async def test_loads_on_first_call(self, integration, config):
tools_json_path = config.esp_idf_path / "tools" / "tools.json"
tools_data = {"version": 2, "tools": [{"name": "cmake"}]}
tools_json_path.write_text(json.dumps(tools_data))
result = await integration._load_tools_json()
assert result is not None
assert result["version"] == 2
assert len(result["tools"]) == 1
@pytest.mark.asyncio
async def test_cache_hit_same_mtime(self, integration, config):
tools_json_path = config.esp_idf_path / "tools" / "tools.json"
tools_data = {"version": 2, "tools": [{"name": "cmake"}]}
tools_json_path.write_text(json.dumps(tools_data))
result1 = await integration._load_tools_json()
assert result1 is not None
# Second call should return cached version without re-reading
result2 = await integration._load_tools_json()
assert result2 is result1 # Same object reference = cache hit
@pytest.mark.asyncio
async def test_cache_invalidation_on_mtime_change(self, integration, config):
tools_json_path = config.esp_idf_path / "tools" / "tools.json"
# Write initial data
tools_data_v1 = {"version": 1, "tools": []}
tools_json_path.write_text(json.dumps(tools_data_v1))
result1 = await integration._load_tools_json()
assert result1 is not None
assert result1["version"] == 1
# Modify file with new content and force a different mtime
tools_data_v2 = {"version": 2, "tools": [{"name": "new-tool"}]}
tools_json_path.write_text(json.dumps(tools_data_v2))
# Bump mtime to ensure it differs (some filesystems have 1s resolution)
new_mtime = tools_json_path.stat().st_mtime + 2.0
os.utime(tools_json_path, (new_mtime, new_mtime))
result2 = await integration._load_tools_json()
assert result2 is not None
assert result2["version"] == 2
@pytest.mark.asyncio
async def test_returns_none_when_no_idf_path(self, integration):
integration.config.esp_idf_path = None
result = await integration._load_tools_json()
assert result is None
@pytest.mark.asyncio
async def test_returns_none_when_file_missing(self, integration, config):
# tools.json does not exist by default in our tmp_path fixture
tools_json_path = config.esp_idf_path / "tools" / "tools.json"
if tools_json_path.exists():
tools_json_path.unlink()
result = await integration._load_tools_json()
assert result is None
class TestToolsForTarget:
"""Tests for _tools_for_target."""
def test_returns_matching_tools(self, integration):
tools_json = {
"tools": [
{
"name": "xtensa-esp-elf",
"description": "Xtensa compiler",
"supported_targets": ["esp32", "esp32s2", "esp32s3"],
"versions": [{"name": "14.2.0", "status": "recommended"}],
},
{
"name": "riscv32-esp-elf",
"description": "RISC-V compiler",
"supported_targets": ["esp32c3", "esp32c6", "esp32h2"],
"versions": [{"name": "14.2.0", "status": "recommended"}],
},
]
}
result = integration._tools_for_target(tools_json, "esp32")
assert len(result) == 1
assert result[0]["name"] == "xtensa-esp-elf"
assert result[0]["version"] == "14.2.0"
def test_handles_all_supported_targets(self, integration):
tools_json = {
"tools": [
{
"name": "cmake",
"description": "Build system",
"supported_targets": "all",
"versions": [{"name": "3.24.0", "status": "recommended"}],
},
{
"name": "xtensa-esp-elf",
"description": "Xtensa compiler",
"supported_targets": ["esp32"],
"versions": [{"name": "14.2.0", "status": "recommended"}],
},
]
}
result = integration._tools_for_target(tools_json, "esp32")
assert len(result) == 2
names = [t["name"] for t in result]
assert "cmake" in names
assert "xtensa-esp-elf" in names
def test_no_matching_target(self, integration):
tools_json = {
"tools": [
{
"name": "xtensa-esp-elf",
"description": "Xtensa compiler",
"supported_targets": ["esp32"],
"versions": [{"name": "14.2.0", "status": "recommended"}],
},
]
}
result = integration._tools_for_target(tools_json, "esp32c3")
assert len(result) == 0
def test_supported_targets_converted_to_list(self, integration):
tools_json = {
"tools": [
{
"name": "cmake",
"description": "Build system",
"supported_targets": "all",
"versions": [{"name": "3.24.0", "status": "recommended"}],
},
]
}
result = integration._tools_for_target(tools_json, "esp32")
assert result[0]["supported_targets"] == ["all"]
def test_empty_tools(self, integration):
tools_json = {"tools": []}
result = integration._tools_for_target(tools_json, "esp32")
assert result == []
def test_picks_recommended_version(self, integration):
tools_json = {
"tools": [
{
"name": "cmake",
"supported_targets": "all",
"versions": [
{"name": "3.20.0", "status": "deprecated"},
{"name": "3.24.0", "status": "recommended"},
{"name": "3.28.0", "status": "preview"},
],
},
]
}
result = integration._tools_for_target(tools_json, "esp32")
assert result[0]["version"] == "3.24.0"
# ------------------------------------------------------------------ #
# 4. TARGET_ARCH constant tests
# ------------------------------------------------------------------ #
class TestTargetArch:
"""Tests for the TARGET_ARCH constant."""
def test_all_ten_targets_present(self):
expected = {
"esp32", "esp32s2", "esp32s3",
"esp32c2", "esp32c3", "esp32c5", "esp32c6", "esp32c61",
"esp32h2", "esp32p4",
}
assert set(TARGET_ARCH.keys()) == expected
def test_count(self):
assert len(TARGET_ARCH) == 10
def test_only_valid_architectures(self):
valid_archs = {"xtensa", "riscv"}
for target, arch in TARGET_ARCH.items():
assert arch in valid_archs, f"{target} has unexpected arch {arch!r}"
def test_xtensa_targets(self):
xtensa_targets = {k for k, v in TARGET_ARCH.items() if v == "xtensa"}
assert xtensa_targets == {"esp32", "esp32s2", "esp32s3"}
def test_riscv_targets(self):
riscv_targets = {k for k, v in TARGET_ARCH.items() if v == "riscv"}
expected = {"esp32c2", "esp32c3", "esp32c5", "esp32c6", "esp32c61", "esp32h2", "esp32p4"}
assert riscv_targets == expected
# ------------------------------------------------------------------ #
# 5. Mocked invocation tests (tool / resource / prompt functions)
# ------------------------------------------------------------------ #
class TestIdfBuildProject:
"""Test idf_build_project tool function execution (mocked subprocess)."""
@pytest.fixture
def project_dir(self, tmp_path):
"""Create a minimal ESP-IDF project directory with CMakeLists.txt."""
proj = tmp_path / "my_project"
proj.mkdir()
(proj / "CMakeLists.txt").write_text("cmake_minimum_required(VERSION 3.16)\n")
return proj
@pytest.mark.asyncio
async def test_build_validates_target(self, mock_app, config, mock_context, project_dir):
"""Invalid target returns error before any subprocess is spawned."""
IDFIntegration(mock_app, config)
build_fn = mock_app._registered_tools["idf_build_project"]
result = await build_fn(mock_context, project_path=str(project_dir), target="esp8266")
assert result["success"] is False
assert "Unknown target" in result["error"]
@pytest.mark.asyncio
async def test_build_checks_cmakelists_exists(self, mock_app, config, mock_context, tmp_path):
"""Missing CMakeLists.txt returns error."""
empty_dir = tmp_path / "no_cmake"
empty_dir.mkdir()
# Put the directory inside a project root so path validation passes
config.project_roots = [tmp_path]
IDFIntegration(mock_app, config)
build_fn = mock_app._registered_tools["idf_build_project"]
result = await build_fn(mock_context, project_path=str(empty_dir), target="esp32")
assert result["success"] is False
assert "CMakeLists.txt" in result["error"]
@pytest.mark.asyncio
async def test_build_calls_set_target_and_build(
self, mock_app, config, mock_context, project_dir
):
"""Mocks _run_idf_py and verifies set-target + build are called."""
config.project_roots = [project_dir.parent]
integration = IDFIntegration(mock_app, config)
build_fn = mock_app._registered_tools["idf_build_project"]
calls = []
async def fake_run_idf_py(args, timeout=300.0):
calls.append(args)
return {"success": True, "output": "ok", "stderr": ""}
integration._run_idf_py = fake_run_idf_py
result = await build_fn(mock_context, project_path=str(project_dir), target="esp32s3")
assert result["success"] is True
# Should have called set-target then build
assert len(calls) == 2
assert "set-target" in calls[0]
assert "esp32s3" in calls[0]
assert "build" in calls[1]
@pytest.mark.asyncio
async def test_build_clean_runs_fullclean(
self, mock_app, config, mock_context, project_dir
):
"""When clean=True, verifies fullclean is called before set-target and build."""
config.project_roots = [project_dir.parent]
integration = IDFIntegration(mock_app, config)
build_fn = mock_app._registered_tools["idf_build_project"]
calls = []
async def fake_run_idf_py(args, timeout=300.0):
calls.append(args)
return {"success": True, "output": "ok", "stderr": ""}
integration._run_idf_py = fake_run_idf_py
result = await build_fn(
mock_context, project_path=str(project_dir), target="esp32", clean=True,
)
assert result["success"] is True
# First call should be fullclean, then set-target, then build
assert len(calls) == 3
assert "fullclean" in calls[0]
assert "set-target" in calls[1]
assert "build" in calls[2]
@pytest.mark.asyncio
async def test_build_project_path_validation(self, mock_app, config, mock_context, tmp_path):
"""Path outside project_roots is rejected."""
allowed_root = tmp_path / "allowed"
allowed_root.mkdir()
outside_dir = tmp_path / "outside"
outside_dir.mkdir()
(outside_dir / "CMakeLists.txt").write_text("# stub")
config.project_roots = [allowed_root]
# Clear idf_path so it can't be used as fallback
config.esp_idf_path = None
IDFIntegration(mock_app, config)
build_fn = mock_app._registered_tools["idf_build_project"]
result = await build_fn(mock_context, project_path=str(outside_dir), target="esp32")
assert result["success"] is False
assert "outside" in result["error"].lower() or "roots" in result["error"].lower()
class TestIdfFlashProject:
"""Test idf_flash_project tool function execution (mocked subprocess)."""
@pytest.fixture
def built_project(self, tmp_path):
"""Create a project directory that looks like it has been built."""
proj = tmp_path / "built_project"
proj.mkdir()
(proj / "CMakeLists.txt").write_text("# stub")
(proj / "build").mkdir()
return proj
@pytest.mark.asyncio
async def test_flash_validates_baud_rate(self, mock_app, config, mock_context, built_project):
"""Invalid baud rate returns error."""
config.project_roots = [built_project.parent]
IDFIntegration(mock_app, config)
flash_fn = mock_app._registered_tools["idf_flash_project"]
result = await flash_fn(
mock_context,
project_path=str(built_project),
port="/dev/ttyUSB0",
baud=12345,
)
assert result["success"] is False
assert "Invalid baud rate" in result["error"]
@pytest.mark.asyncio
async def test_flash_calls_idf_py_flash(self, mock_app, config, mock_context, built_project):
"""Mocks _run_idf_py and verifies args include --port and flash."""
config.project_roots = [built_project.parent]
integration = IDFIntegration(mock_app, config)
flash_fn = mock_app._registered_tools["idf_flash_project"]
calls = []
async def fake_run_idf_py(args, timeout=300.0):
calls.append(args)
return {"success": True, "output": "flash ok", "stderr": ""}
integration._run_idf_py = fake_run_idf_py
result = await flash_fn(
mock_context,
project_path=str(built_project),
port="/dev/ttyUSB0",
baud=460800,
)
assert result["success"] is True
assert len(calls) == 1
cmd_args = calls[0]
assert "flash" in cmd_args
assert "-p" in cmd_args
assert "/dev/ttyUSB0" in cmd_args
assert "-b" in cmd_args
assert "460800" in cmd_args
@pytest.mark.asyncio
async def test_flash_project_path_validation(self, mock_app, config, mock_context, tmp_path):
"""Path outside project_roots is rejected."""
allowed_root = tmp_path / "allowed"
allowed_root.mkdir()
outside_dir = tmp_path / "outside"
outside_dir.mkdir()
(outside_dir / "build").mkdir()
config.project_roots = [allowed_root]
config.esp_idf_path = None
IDFIntegration(mock_app, config)
flash_fn = mock_app._registered_tools["idf_flash_project"]
result = await flash_fn(
mock_context,
project_path=str(outside_dir),
port="/dev/ttyUSB0",
)
assert result["success"] is False
assert "outside" in result["error"].lower() or "roots" in result["error"].lower()
class TestIdfMonitor:
"""Test idf_monitor tool function execution (mocked subprocess)."""
@pytest.mark.asyncio
async def test_monitor_calls_idf_py(self, mock_app, config, mock_context):
"""Mocks subprocess and verifies monitor command is issued."""
config.get_idf_available = MagicMock(return_value=True)
integration = IDFIntegration(mock_app, config)
monitor_fn = mock_app._registered_tools["idf_monitor"]
# Mock _build_idf_env to return a valid env
integration._build_idf_env = AsyncMock(
return_value={"PATH": "/usr/bin", "IDF_PATH": str(config.esp_idf_path)}
)
mock_proc = AsyncMock()
mock_proc.communicate = AsyncMock(return_value=(b"serial output here", b""))
mock_proc.returncode = 0
with patch("asyncio.create_subprocess_exec", return_value=mock_proc) as mock_exec:
result = await monitor_fn(mock_context, port="/dev/ttyUSB0", duration=5)
assert result["success"] is True
assert result["port"] == "/dev/ttyUSB0"
assert result["duration"] == 5
# Verify the subprocess was called with monitor args
exec_args = mock_exec.call_args[0]
assert "monitor" in exec_args
assert "--no-reset" in exec_args
assert "-p" in exec_args
@pytest.mark.asyncio
async def test_monitor_timeout_cleanup(self, mock_app, config, mock_context):
"""Process is terminated/killed on timeout."""
config.get_idf_available = MagicMock(return_value=True)
integration = IDFIntegration(mock_app, config)
monitor_fn = mock_app._registered_tools["idf_monitor"]
integration._build_idf_env = AsyncMock(
return_value={"PATH": "/usr/bin", "IDF_PATH": str(config.esp_idf_path)}
)
mock_proc = AsyncMock()
# First communicate raises TimeoutError, then terminate+communicate also times out
mock_proc.communicate = AsyncMock(side_effect=asyncio.TimeoutError)
mock_proc.returncode = None
mock_proc.terminate = MagicMock()
mock_proc.kill = MagicMock()
mock_proc.wait = AsyncMock()
with patch("asyncio.create_subprocess_exec", return_value=mock_proc):
with patch("asyncio.wait_for", side_effect=asyncio.TimeoutError):
result = await monitor_fn(mock_context, port="/dev/ttyUSB0", duration=1)
assert result["success"] is True
# Process should have been killed (terminate or kill called)
assert mock_proc.kill.called or mock_proc.terminate.called
class TestIdfStatusResource:
"""Test esp://idf/status resource function."""
@pytest.mark.asyncio
async def test_status_resource_returns_valid_json(self, mock_app, config):
"""Call the registered resource function; verify it returns dict with expected keys."""
integration = IDFIntegration(mock_app, config)
status_fn = mock_app._registered_resources["esp://idf/status"]
# Mock _run_idf_tools so it doesn't actually run subprocess
integration._run_idf_tools = AsyncMock(
return_value={
"success": True,
"output": "xtensa-esp-elf 14.2.0: found\ncmake 3.24.0: not found\n",
"stderr": "",
}
)
# Mock _load_tools_json to skip disk I/O
integration._load_tools_json = AsyncMock(return_value=None)
# Write a version.txt so the resource can read it
version_file = config.esp_idf_path / "version.txt"
version_file.write_text("v5.3\n")
result = await status_fn()
assert isinstance(result, dict)
assert result["available"] is True
assert "idf_path" in result
assert "idf_version" in result
assert result["idf_version"] == "v5.3"
assert "installed_tools" in result
assert "missing_tools" in result
assert "missing_tool_names" in result
@pytest.mark.asyncio
async def test_status_with_no_idf_path(self, mock_app, config):
"""config.esp_idf_path = None returns graceful response."""
config.esp_idf_path = None
IDFIntegration(mock_app, config)
status_fn = mock_app._registered_resources["esp://idf/status"]
result = await status_fn()
assert isinstance(result, dict)
assert result["available"] is False
assert "error" in result
class TestIdfSetupTargetPrompt:
"""Test idf_setup_target prompt function."""
@pytest.mark.asyncio
async def test_prompt_returns_markdown(self, mock_app, config):
"""Call registered prompt with target='esp32p4'; verify non-empty output."""
integration = IDFIntegration(mock_app, config)
prompt_fn = mock_app._registered_prompts["idf_setup_target"]
# Mock _run_idf_tools to return something parseable
integration._run_idf_tools = AsyncMock(
return_value={
"success": True,
"output": "riscv32-esp-elf 14.2.0: found\ncmake 3.24.0: found\n",
"stderr": "",
}
)
integration._load_tools_json = AsyncMock(return_value={
"tools": [
{
"name": "riscv32-esp-elf",
"description": "RISC-V compiler",
"supported_targets": ["esp32c3", "esp32c6", "esp32h2", "esp32p4"],
"versions": [{"name": "14.2.0", "status": "recommended"}],
},
]
})
result = await prompt_fn(target="esp32p4")
assert isinstance(result, str)
assert len(result) > 0
assert "esp32p4" in result
assert "riscv" in result.lower()
@pytest.mark.asyncio
async def test_prompt_invalid_target(self, mock_app, config):
"""Call with bad target; verify architecture shows 'unknown'."""
integration = IDFIntegration(mock_app, config)
prompt_fn = mock_app._registered_prompts["idf_setup_target"]
# Mock _run_idf_tools -- the prompt still runs check even for unknown targets
integration._run_idf_tools = AsyncMock(
return_value={
"success": True,
"output": "",
"stderr": "",
}
)
integration._load_tools_json = AsyncMock(return_value={"tools": []})
result = await prompt_fn(target="badchip")
assert isinstance(result, str)
assert "badchip" in result
assert "unknown" in result.lower()