Add document navigation tools and MCP prompts

New tools for Word document analysis:
- extract_entities: Pattern-based extraction of people, places, organizations
- get_chapter_summaries: Chapter previews with opening sentences and word counts
- save_reading_progress: Bookmark reading position to JSON file
- get_reading_progress: Resume reading from saved position

New MCP prompts (basic to advanced workflows):
- explore-document: Get started with a new document
- find-character: Track character mentions
- chapter-preview: Quick chapter overviews
- resume-reading: Continue where you left off
- document-analysis: Comprehensive multi-tool analysis
- character-journey: Track character arc through narrative
- document-comparison: Compare entities between chapters
- full-reading-session: Guided reading with bookmarking
- manuscript-review: Complete editorial workflow

Updated test counts for 19 total tools (6 universal + 10 word + 3 excel)
This commit is contained in:
Ryan Malloy 2026-01-11 07:23:15 -07:00
parent 1abce7f26d
commit 4b38f6455c
5 changed files with 681 additions and 5 deletions

View File

@ -999,4 +999,438 @@ class WordMixin(MCPMixin):
"results": results,
"search_time": round(time.time() - start_time, 3),
"truncated": len(results) >= max_results
}
@mcp_tool(
name="extract_entities",
description="Extract named entities (people, places, organizations) from a Word document using pattern-based recognition. Great for identifying key characters, locations, and institutions mentioned in the text."
)
@handle_office_errors("Entity extraction")
async def extract_entities(
self,
file_path: str = Field(description="Path to Word document or URL"),
entity_types: str = Field(default="all", description="Entity types to extract: 'all', 'people', 'places', 'organizations', or comma-separated combination"),
min_occurrences: int = Field(default=1, description="Minimum occurrences for an entity to be included"),
include_context: bool = Field(default=True, description="Include sample context for each entity")
) -> dict[str, Any]:
"""Extract named entities from document using pattern-based recognition."""
from docx import Document
from collections import defaultdict
import re
start_time = time.time()
local_path = await resolve_office_file_path(file_path)
validation = await validate_office_file(local_path)
if not validation["is_valid"]:
raise OfficeFileError(f"Invalid file: {', '.join(validation['errors'])}")
doc = Document(local_path)
# Parse entity types to extract
if entity_types == "all":
extract_types = {"people", "places", "organizations"}
else:
extract_types = set(t.strip().lower() for t in entity_types.split(","))
# Entity containers with context tracking
entities = {
"people": defaultdict(lambda: {"count": 0, "contexts": []}),
"places": defaultdict(lambda: {"count": 0, "contexts": []}),
"organizations": defaultdict(lambda: {"count": 0, "contexts": []})
}
# Patterns for entity detection
# Titles indicating people
title_pattern = re.compile(
r'\b(Dr\.?|Mr\.?|Mrs\.?|Ms\.?|Miss|Professor|Prof\.?|Sister|Father|Rev\.?|'
r'President|Director|Nurse|RN|LPN|MD)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)',
re.IGNORECASE
)
# Organization patterns
org_suffixes = re.compile(
r'\b([A-Z][a-zA-Z\s\'\-]+(?:Hospital|Medical Center|Center|Clinic|University|'
r'College|School|Association|Institute|Foundation|Department|Administration|'
r'Committee|Board|Agency|Service|Company|Inc|Corp|LLC|VA|ANA))\b'
)
# Place patterns (cities, states, geographic locations)
place_patterns = re.compile(
r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*),\s*((?:[A-Z]{2}|[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*))\b|'
r'\b((?:North|South|East|West)\s+[A-Z][a-z]+)\b|'
r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\s+(?:City|County|State|Valley|Mountain|River|Lake|Island)\b'
)
# Known US states for validation
us_states = {
'Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado',
'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho',
'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana',
'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota',
'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada',
'New Hampshire', 'New Jersey', 'New Mexico', 'New York',
'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon',
'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota',
'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington',
'West Virginia', 'Wisconsin', 'Wyoming', 'DC', 'ID', 'WA', 'NY',
'CA', 'ND', 'MN', 'IA', 'MT', 'OR', 'NV', 'AZ', 'NM', 'CO', 'WY'
}
# Common first names for better people detection
common_titles = {'dr', 'mr', 'mrs', 'ms', 'miss', 'professor', 'prof',
'sister', 'father', 'rev', 'president', 'director', 'nurse'}
current_chapter = "Document Start"
for para_idx, para in enumerate(doc.paragraphs):
text = para.text
style_name = para.style.name if para.style else ""
# Track chapters for context
if "heading" in style_name.lower() and "1" in style_name:
current_chapter = text.strip()[:60]
# Skip very short paragraphs
if len(text) < 10:
continue
# Extract people
if "people" in extract_types:
for match in title_pattern.finditer(text):
title = match.group(1)
name = match.group(2).strip()
full_name = f"{title} {name}".strip()
# Clean up the name
if len(name) >= 2:
entities["people"][full_name]["count"] += 1
if include_context and len(entities["people"][full_name]["contexts"]) < 3:
# Get surrounding context
start = max(0, match.start() - 30)
end = min(len(text), match.end() + 50)
context = text[start:end].strip()
entities["people"][full_name]["contexts"].append({
"text": f"...{context}...",
"chapter": current_chapter,
"paragraph": para_idx
})
# Also look for standalone capitalized names after verbs
name_after_verb = re.finditer(
r'\b(?:said|told|asked|replied|answered|explained|noted|added|mentioned)\s+'
r'([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)\b',
text
)
for match in name_after_verb:
name = match.group(1).strip()
if len(name) >= 3 and name not in us_states:
entities["people"][name]["count"] += 1
if include_context and len(entities["people"][name]["contexts"]) < 3:
start = max(0, match.start() - 20)
end = min(len(text), match.end() + 40)
context = text[start:end].strip()
entities["people"][name]["contexts"].append({
"text": f"...{context}...",
"chapter": current_chapter,
"paragraph": para_idx
})
# Extract organizations
if "organizations" in extract_types:
for match in org_suffixes.finditer(text):
org_name = match.group(1).strip()
if len(org_name) >= 5:
entities["organizations"][org_name]["count"] += 1
if include_context and len(entities["organizations"][org_name]["contexts"]) < 3:
start = max(0, match.start() - 20)
end = min(len(text), match.end() + 40)
context = text[start:end].strip()
entities["organizations"][org_name]["contexts"].append({
"text": f"...{context}...",
"chapter": current_chapter,
"paragraph": para_idx
})
# Extract places
if "places" in extract_types:
for match in place_patterns.finditer(text):
# Try different capture groups
place = None
if match.group(1) and match.group(2): # City, State pattern
city = match.group(1).strip()
state = match.group(2).strip()
if state in us_states or len(state) == 2:
place = f"{city}, {state}"
elif match.group(3): # Directional places
place = match.group(3).strip()
elif match.group(4): # Geographic features
place = match.group(4).strip()
if place and len(place) >= 3:
entities["places"][place]["count"] += 1
if include_context and len(entities["places"][place]["contexts"]) < 3:
start = max(0, match.start() - 20)
end = min(len(text), match.end() + 40)
context = text[start:end].strip()
entities["places"][place]["contexts"].append({
"text": f"...{context}...",
"chapter": current_chapter,
"paragraph": para_idx
})
# Filter by minimum occurrences and prepare output
def filter_and_sort(entity_dict, min_count):
filtered = []
for name, data in entity_dict.items():
if data["count"] >= min_count:
entry = {
"name": name,
"occurrences": data["count"]
}
if include_context and data["contexts"]:
entry["sample_contexts"] = data["contexts"]
filtered.append(entry)
return sorted(filtered, key=lambda x: x["occurrences"], reverse=True)
result = {
"entities": {},
"summary": {
"total_entities": 0,
"by_type": {}
},
"extraction_time": round(time.time() - start_time, 3)
}
for entity_type in extract_types:
if entity_type in entities:
filtered = filter_and_sort(entities[entity_type], min_occurrences)
result["entities"][entity_type] = filtered
result["summary"]["by_type"][entity_type] = len(filtered)
result["summary"]["total_entities"] += len(filtered)
return result
@mcp_tool(
name="get_chapter_summaries",
description="Get brief summaries/previews of each chapter in a Word document. Extracts the opening sentences of each chapter to give a quick overview of content."
)
@handle_office_errors("Chapter summaries")
async def get_chapter_summaries(
self,
file_path: str = Field(description="Path to Word document or URL"),
sentences_per_chapter: int = Field(default=3, description="Number of opening sentences to include per chapter"),
include_word_counts: bool = Field(default=True, description="Include word count for each chapter")
) -> dict[str, Any]:
"""Extract chapter summaries/previews from document."""
from docx import Document
import re
start_time = time.time()
local_path = await resolve_office_file_path(file_path)
validation = await validate_office_file(local_path)
if not validation["is_valid"]:
raise OfficeFileError(f"Invalid file: {', '.join(validation['errors'])}")
doc = Document(local_path)
chapters = []
current_chapter = None
chapter_text = []
chapter_word_count = 0
chapter_pattern = re.compile(r'^chapter\s*(\d+)', re.IGNORECASE)
def extract_preview(text_paragraphs, num_sentences):
"""Extract first N sentences from collected paragraphs."""
full_text = " ".join(text_paragraphs)
# Simple sentence splitting
sentences = re.split(r'(?<=[.!?])\s+', full_text)
preview_sentences = sentences[:num_sentences]
return " ".join(preview_sentences).strip()
def save_current_chapter():
"""Save the current chapter's data."""
nonlocal current_chapter, chapter_text, chapter_word_count
if current_chapter:
preview = extract_preview(chapter_text, sentences_per_chapter)
chapter_data = {
"chapter_number": current_chapter["number"],
"title": current_chapter["title"],
"paragraph_index": current_chapter["paragraph_index"],
"preview": preview if preview else "(No text content found)",
}
if include_word_counts:
chapter_data["word_count"] = chapter_word_count
chapters.append(chapter_data)
for para_idx, para in enumerate(doc.paragraphs):
text = para.text.strip()
style_name = para.style.name if para.style else ""
# Check if this is a chapter heading
chapter_match = chapter_pattern.match(text)
if chapter_match:
# Save previous chapter first
save_current_chapter()
# Start new chapter
current_chapter = {
"number": int(chapter_match.group(1)),
"title": text[:100],
"paragraph_index": para_idx
}
chapter_text = []
chapter_word_count = 0
elif current_chapter:
# Accumulate text for current chapter
if text:
word_count = len(text.split())
chapter_word_count += word_count
# Only collect first portion of text for preview
if len(" ".join(chapter_text)) < 1000:
chapter_text.append(text)
# Don't forget the last chapter
save_current_chapter()
# Calculate statistics
total_words = sum(c.get("word_count", 0) for c in chapters)
avg_words = total_words // len(chapters) if chapters else 0
return {
"chapters": chapters,
"summary": {
"total_chapters": len(chapters),
"total_words": total_words,
"average_words_per_chapter": avg_words,
"shortest_chapter": min((c for c in chapters), key=lambda x: x.get("word_count", 0), default=None),
"longest_chapter": max((c for c in chapters), key=lambda x: x.get("word_count", 0), default=None)
},
"extraction_time": round(time.time() - start_time, 3)
}
@mcp_tool(
name="save_reading_progress",
description="Save your reading progress in a Word document. Creates a bookmark file to track which chapter/paragraph you're on, so you can resume reading later."
)
@handle_office_errors("Save reading progress")
async def save_reading_progress(
self,
file_path: str = Field(description="Path to Word document"),
chapter_number: int = Field(default=1, description="Current chapter number"),
paragraph_index: int = Field(default=0, description="Current paragraph index"),
notes: str = Field(default="", description="Optional notes about where you left off")
) -> dict[str, Any]:
"""Save reading progress to a bookmark file."""
import json
from datetime import datetime
local_path = await resolve_office_file_path(file_path)
validation = await validate_office_file(local_path)
if not validation["is_valid"]:
raise OfficeFileError(f"Invalid file: {', '.join(validation['errors'])}")
# Create bookmark file path (same location as document)
doc_dir = os.path.dirname(local_path)
doc_name = os.path.splitext(os.path.basename(local_path))[0]
bookmark_path = os.path.join(doc_dir, f".{doc_name}.reading_progress.json")
# Load existing bookmarks or create new
bookmarks = {"history": []}
if os.path.exists(bookmark_path):
try:
with open(bookmark_path, 'r') as f:
bookmarks = json.load(f)
except (json.JSONDecodeError, IOError):
bookmarks = {"history": []}
# Create new bookmark entry
bookmark = {
"timestamp": datetime.now().isoformat(),
"chapter": chapter_number,
"paragraph_index": paragraph_index,
"notes": notes
}
# Update current position and add to history
bookmarks["current"] = bookmark
bookmarks["document"] = os.path.basename(local_path)
bookmarks["history"].append(bookmark)
# Keep only last 50 history entries
if len(bookmarks["history"]) > 50:
bookmarks["history"] = bookmarks["history"][-50:]
# Save bookmark file
with open(bookmark_path, 'w') as f:
json.dump(bookmarks, f, indent=2)
return {
"saved": True,
"bookmark_file": bookmark_path,
"position": {
"chapter": chapter_number,
"paragraph_index": paragraph_index
},
"notes": notes,
"timestamp": bookmark["timestamp"],
"history_entries": len(bookmarks["history"])
}
@mcp_tool(
name="get_reading_progress",
description="Retrieve your saved reading progress for a Word document. Shows where you left off and your reading history."
)
@handle_office_errors("Get reading progress")
async def get_reading_progress(
self,
file_path: str = Field(description="Path to Word document")
) -> dict[str, Any]:
"""Retrieve saved reading progress from bookmark file."""
import json
local_path = await resolve_office_file_path(file_path)
validation = await validate_office_file(local_path)
if not validation["is_valid"]:
raise OfficeFileError(f"Invalid file: {', '.join(validation['errors'])}")
# Find bookmark file
doc_dir = os.path.dirname(local_path)
doc_name = os.path.splitext(os.path.basename(local_path))[0]
bookmark_path = os.path.join(doc_dir, f".{doc_name}.reading_progress.json")
if not os.path.exists(bookmark_path):
return {
"has_progress": False,
"message": "No reading progress saved for this document. Use save_reading_progress to save your position."
}
# Load bookmarks
try:
with open(bookmark_path, 'r') as f:
bookmarks = json.load(f)
except (json.JSONDecodeError, IOError) as e:
return {
"has_progress": False,
"error": f"Could not read bookmark file: {str(e)}"
}
current = bookmarks.get("current", {})
history = bookmarks.get("history", [])
return {
"has_progress": True,
"document": bookmarks.get("document", os.path.basename(local_path)),
"current_position": {
"chapter": current.get("chapter"),
"paragraph_index": current.get("paragraph_index"),
"notes": current.get("notes", ""),
"last_read": current.get("timestamp")
},
"reading_sessions": len(history),
"recent_history": history[-5:] if history else [],
"bookmark_file": bookmark_path
}

View File

@ -14,6 +14,7 @@ import os
import tempfile
from fastmcp import FastMCP
from fastmcp.prompts import Prompt
from .mixins import UniversalMixin, WordMixin, ExcelMixin, PowerPointMixin
@ -39,6 +40,247 @@ powerpoint_mixin.register_all(app, prefix="")
# Note: All helper functions are still available from server_legacy.py for import by mixins
# This allows gradual migration while maintaining backward compatibility
# ==================== MCP Prompts ====================
# Prompts help users understand how to use tools effectively
# Organized from basic to advanced multi-step workflows
@app.prompt(
name="explore-document",
description="Basic: Start exploring a new document - get structure, identify key content"
)
def prompt_explore_document(file_path: str = "") -> list:
"""Guide for exploring a new Word document."""
path_hint = f"the document at `{file_path}`" if file_path else "your document"
return [
{
"role": "user",
"content": f"""I want to explore {path_hint}. Please help me understand it by:
1. First, use `get_document_outline` to show me the document structure (chapters, sections, headings)
2. Then use `check_style_consistency` to identify any formatting issues or problems
3. Finally, give me a summary of what the document contains based on the outline
This will help me understand what I'm working with before diving into the content."""
}
]
@app.prompt(
name="find-character",
description="Basic: Find all mentions of a person/character in a document"
)
def prompt_find_character(file_path: str = "", character_name: str = "") -> list:
"""Guide for finding character mentions."""
path_hint = f"in `{file_path}`" if file_path else "in my document"
name_hint = f'"{character_name}"' if character_name else "a character"
return [
{
"role": "user",
"content": f"""Help me find all mentions of {name_hint} {path_hint}.
Use `search_document` to find occurrences with context. I want to see:
- Each mention with surrounding text
- Which chapter each mention appears in
- A count of total appearances
This will help me track the character's journey through the narrative."""
}
]
@app.prompt(
name="chapter-preview",
description="Basic: Get a quick preview of each chapter without reading the full content"
)
def prompt_chapter_preview(file_path: str = "") -> list:
"""Guide for getting chapter previews."""
path_hint = f"from `{file_path}`" if file_path else ""
return [
{
"role": "user",
"content": f"""I want a quick preview of each chapter {path_hint}.
Use `get_chapter_summaries` with 3-4 sentences per chapter to give me a preview of what each chapter covers. Include word counts so I know which chapters are longest.
This gives me a roadmap before I start reading in depth."""
}
]
@app.prompt(
name="resume-reading",
description="Intermediate: Check where you left off and continue reading"
)
def prompt_resume_reading(file_path: str = "") -> list:
"""Guide for resuming reading."""
path_hint = f"in `{file_path}`" if file_path else ""
return [
{
"role": "user",
"content": f"""I want to continue reading where I left off {path_hint}.
1. First, use `get_reading_progress` to see where I was
2. Then use `convert_to_markdown` with `chapter_name` set to that chapter to show me the content
3. When I tell you where to stop, use `save_reading_progress` to bookmark my position
This is my reading workflow for long documents."""
}
]
@app.prompt(
name="document-analysis",
description="Intermediate: Comprehensive analysis - structure, entities, and key information"
)
def prompt_document_analysis(file_path: str = "") -> list:
"""Guide for comprehensive document analysis."""
path_hint = f"the document `{file_path}`" if file_path else "my document"
return [
{
"role": "user",
"content": f"""Perform a comprehensive analysis of {path_hint}:
1. **Structure Analysis** (`get_document_outline`): Map out all chapters, sections, and headings
2. **Quality Check** (`check_style_consistency`): Identify any formatting issues
3. **Entity Extraction** (`extract_entities`): Find all people, places, and organizations mentioned
4. **Chapter Overview** (`get_chapter_summaries`): Generate previews of each chapter
Summarize the findings in a report format. This gives me a complete picture of the document."""
}
]
@app.prompt(
name="character-journey",
description="Advanced: Track a character's complete journey through a document"
)
def prompt_character_journey(file_path: str = "", character_name: str = "") -> list:
"""Guide for tracking a character's journey."""
path_hint = f"in `{file_path}`" if file_path else ""
name_hint = f'"{character_name}"' if character_name else "the main character"
return [
{
"role": "user",
"content": f"""Help me track {name_hint}'s complete journey {path_hint}:
**Step 1 - Get Context**
Use `get_document_outline` to understand the chapter structure
**Step 2 - Find All Mentions**
Use `search_document` to find every mention of the character with context
**Step 3 - Analyze by Chapter**
For each chapter where the character appears, use `convert_to_markdown` with `chapter_name` to extract the relevant sections
**Step 4 - Summarize the Journey**
Create a timeline or narrative summary of the character's arc through the story
This multi-step workflow helps me understand a character's complete narrative arc."""
}
]
@app.prompt(
name="document-comparison",
description="Advanced: Compare entities and themes between chapters or sections"
)
def prompt_document_comparison(file_path: str = "") -> list:
"""Guide for comparing document sections."""
path_hint = f"from `{file_path}`" if file_path else ""
return [
{
"role": "user",
"content": f"""Help me compare different sections of the document {path_hint}:
**Step 1 - Get Structure**
Use `get_document_outline` to identify all chapters/sections
**Step 2 - Extract Entities by Section**
Use `extract_entities` with different chapters to see which characters/places appear where
**Step 3 - Get Chapter Summaries**
Use `get_chapter_summaries` to understand the focus of each section
**Step 4 - Compare and Contrast**
Based on the data, identify:
- Which characters appear in which chapters
- How locations shift through the narrative
- Patterns in entity distribution
Create a comparison matrix or analysis."""
}
]
@app.prompt(
name="full-reading-session",
description="Advanced: Complete guided reading session with bookmarking"
)
def prompt_full_reading_session(file_path: str = "", start_chapter: int = 1) -> list:
"""Guide for a complete reading session."""
path_hint = f"of `{file_path}`" if file_path else ""
return [
{
"role": "user",
"content": f"""Let's do a guided reading session {path_hint}:
**Setup Phase**
1. Use `get_reading_progress` to check if I have a saved position
2. Use `get_document_outline` to show the chapter list
3. Use `check_style_consistency` to flag any document issues
**Reading Phase**
4. Use `convert_to_markdown` with `chapter_name="Chapter {start_chapter}"` to show that chapter
5. When I'm done, I'll say "stop at paragraph X" and you use `save_reading_progress`
**Analysis Phase (Optional)**
6. Use `extract_entities` with `entity_types="people"` to show who appears in what I've read
7. Use `search_document` if I want to find specific references
This creates an interactive, bookmark-enabled reading experience."""
}
]
@app.prompt(
name="manuscript-review",
description="Advanced: Complete manuscript review workflow for editors"
)
def prompt_manuscript_review(file_path: str = "") -> list:
"""Guide for comprehensive manuscript review."""
path_hint = f"manuscript at `{file_path}`" if file_path else "the manuscript"
return [
{
"role": "user",
"content": f"""Help me conduct a complete editorial review of {path_hint}:
**Phase 1: Structure Assessment**
1. `get_document_outline` - Map the complete structure
2. `check_style_consistency` - Identify formatting issues, missing chapters, style problems
3. Report any structural issues found
**Phase 2: Content Analysis**
4. `get_chapter_summaries` - Get overview of each chapter's content
5. `extract_entities` - Extract all characters, locations, organizations
6. Flag any inconsistencies (characters who appear then disappear, etc.)
**Phase 3: Deep Dive**
7. For each chapter with issues, use `convert_to_markdown` to review
8. Use `search_document` to verify specific details if needed
9. Document findings with chapter numbers and paragraph indices
**Phase 4: Final Report**
Compile all findings into an editorial report with:
- Structure issues and recommendations
- Character/entity tracking
- Suggested fixes with specific locations
This is a complete editorial workflow for manuscript review."""
}
]
def main():
"""Entry point for the MCP Office Tools server."""
# CRITICAL: show_banner=False is required for stdio transport!

View File

@ -64,7 +64,7 @@ class TestMixinArchitecture:
word = WordMixin()
word.register_all(app)
word_tools = len(app._tool_manager._tools) - initial_tool_count - universal_tools
assert word_tools == 6 # convert_to_markdown, extract_word_tables, analyze_word_structure, get_document_outline, check_style_consistency, search_document
assert word_tools == 10 # convert_to_markdown, extract_word_tables, analyze_word_structure, get_document_outline, check_style_consistency, search_document, extract_entities, get_chapter_summaries, save_reading_progress, get_reading_progress
excel = ExcelMixin()
excel.register_all(app)

View File

@ -149,8 +149,8 @@ class TestMixinIntegration:
# Verify no duplicates
assert len(tool_names) == len(set(tool_names)), "Tool names should be unique"
# Verify expected count: 6 universal + 6 word + 3 excel = 15
assert len(tool_names) == 15, f"Expected 15 tools, got {len(tool_names)}: {list(tool_names.keys())}"
# Verify expected count: 6 universal + 10 word + 3 excel = 19
assert len(tool_names) == 19, f"Expected 19 tools, got {len(tool_names)}: {list(tool_names.keys())}"
if __name__ == "__main__":

View File

@ -28,14 +28,14 @@ class TestWordMixinRegistration:
mixin.register_all(app)
assert mixin is not None
assert len(app._tool_manager._tools) == 6 # convert_to_markdown, extract_word_tables, analyze_word_structure, get_document_outline, check_style_consistency, search_document
assert len(app._tool_manager._tools) == 10 # convert_to_markdown, extract_word_tables, analyze_word_structure, get_document_outline, check_style_consistency, search_document, extract_entities, get_chapter_summaries, save_reading_progress, get_reading_progress
def test_tool_names_registered(self):
"""Test that Word-specific tools are registered."""
app = FastMCP("Test Word")
WordMixin().register_all(app)
expected_tools = {"convert_to_markdown", "extract_word_tables", "analyze_word_structure", "get_document_outline", "check_style_consistency", "search_document"}
expected_tools = {"convert_to_markdown", "extract_word_tables", "analyze_word_structure", "get_document_outline", "check_style_consistency", "search_document", "extract_entities", "get_chapter_summaries", "save_reading_progress", "get_reading_progress"}
registered_tools = set(app._tool_manager._tools.keys())
assert expected_tools.issubset(registered_tools)