├── .python-version
├── mcp_code_analyzer
├── server
│ ├── __init__.py
│ └── handlers.py
├── tools
│ ├── __init__.py
│ ├── logger.py
│ ├── dependency_tools.py
│ ├── base.py
│ ├── project_tools.py
│ ├── reference_tools.py
│ ├── manager.py
│ ├── file_tools.py
│ ├── version_manager.py
│ └── search_tools.py
├── __init__.py
├── __main__.py
└── config.py
├── requirements.txt
├── .gitignore
├── LICENCE
├── pyproject.toml
├── README.md
└── uv.lock
/.python-version:
--------------------------------------------------------------------------------
1 | 3.10
2 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/server/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | chardet~=5.2.0
2 | astroid~=3.3.5
3 | pydantic~=2.10.2
4 | radon~=6.0.1
5 | networkx~=3.4.2
6 | mcp~=1.0.0
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | build/
8 | develop-eggs/
9 | dist/
10 | downloads/
11 | eggs/
12 | .eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | wheels/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 |
23 | # Virtual Environment
24 | .env
25 | .venv
26 | env/
27 | venv/
28 | ENV/
29 |
30 | # IDE
31 | .idea/
32 | .vscode/
33 | *.swp
34 | *.swo
35 | *.swn
36 | .vscode/
37 | .vs/
38 | *.sublime-project
39 | *.sublime-workspace
40 |
41 | # OS
42 | .DS_Store
43 | Thumbs.db
44 | *.db
45 | desktop.ini
46 | .directory
47 |
48 | # Project specific
49 | .code_analyzer_cache/
50 | test.db
51 | *.log
52 |
53 |
54 | # Test Coverage
55 | .coverage
56 | coverage.xml
57 | htmlcov/
58 |
59 | # Documentation
60 | docs/_build/
61 |
62 | # Temporary files
63 | *.bak
64 | *.tmp
65 | *~
66 |
67 | # Environment variables
68 | .env
69 | .env.local
70 | .env.*.local
71 |
72 | # Debug logs
73 | npm-debug.log*
74 | yarn-debug.log*
75 | yarn-error.log*
76 | debug.log
77 |
--------------------------------------------------------------------------------
/LICENCE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 MCP Code Analyzer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """Tool implementations for MCP Code Analyzer"""
2 | from .base import BaseTool
3 | from .file_tools import MCPFileOperations
4 | from .pattern_tools import CodePatternAnalyzer, PatternUsageAnalyzer
5 | from .dependency_tools import FileDependencyAnalyzer
6 | from .analysis_tools import (
7 | CodeStructureAnalyzer,
8 | ImportAnalyzer,
9 | ProjectAnalyzer,
10 | CodeValidator,
11 | SyntaxChecker
12 | )
13 | from .reference_tools import FindReferences, PreviewChanges
14 | from .project_tools import ProjectStructure, ProjectStatistics, ProjectTechnology
15 | from .version_manager import VersionManager
16 | from .search_tools import PathFinder, ContentScanner
17 | from .modification_tools import CodeModifier
18 |
19 | __all__ = [
20 | "BaseTool",
21 | "MCPFileOperations",
22 | "ProjectStructure",
23 | "ProjectStatistics",
24 | "ProjectTechnology",
25 | "ProjectAnalyzer",
26 | "CodePatternAnalyzer",
27 | "PatternUsageAnalyzer",
28 | "FileDependencyAnalyzer",
29 | "CodeStructureAnalyzer",
30 | "ImportAnalyzer",
31 | "CodeValidator",
32 | "SyntaxChecker",
33 | "FindReferences",
34 | "PreviewChanges",
35 | "VersionManager",
36 | "PathFinder",
37 | "ContentScanner",
38 | "CodeModifier"
39 | ]
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "mcp-code-analyzer"
3 | version = "0.1.0"
4 | description = "A code analysis tool using Model Context Protocol"
5 | readme = "README.md"
6 | requires-python = ">=3.10"
7 | dependencies = [
8 | "mcp>=1.0.0",
9 | "astroid>=2.14.2",
10 | "radon>=5.1.0",
11 | "networkx>=3.0",
12 | "chardet>=4.0.0"
13 | ]
14 |
15 | [project.scripts]
16 | mcp-code-analyzer = "mcp_code_analyzer:main"
17 |
18 | [build-system]
19 | requires = ["hatchling"]
20 | build-backend = "hatchling.build"
21 |
22 | [tool.hatch.build.targets.wheel]
23 | packages = ["mcp_code_analyzer"]
24 |
25 | [tool.ruff]
26 | select = ["E", "F", "B", "I"]
27 | ignore = ["E501"]
28 | target-version = "py310"
29 |
30 | [tool.ruff.per-file-ignores]
31 | "__init__.py" = ["F401"]
32 |
33 | [tool.black]
34 | line-length = 88
35 | target-version = ["py310"]
36 | include = '\.pyi?$'
37 |
38 | [tool.isort]
39 | profile = "black"
40 | multi_line_output = 3
41 | line_length = 88
42 |
43 | [tool.mypy]
44 | python_version = "3.10"
45 | warn_return_any = true
46 | warn_unused_configs = true
47 | disallow_untyped_defs = true
48 | check_untyped_defs = true
49 |
50 | [project.urls]
51 | Homepage = "https://github.com/yourusername/mcp-code-analyzer"
52 | Issues = "https://github.com/yourusername/mcp-code-analyzer/issues"
--------------------------------------------------------------------------------
/mcp_code_analyzer/__init__.py:
--------------------------------------------------------------------------------
1 | """MCP Code Analyzer
2 | A code analysis tool using Model Context Protocol
3 | """
4 |
5 | from .server.handlers import main
6 | from .tools.project_tools import ProjectStructure, ProjectStatistics, ProjectTechnology
7 | from .tools.pattern_tools import (
8 | PatternUsageAnalyzer,
9 | CodePatternAnalyzer
10 | )
11 | from .tools.analysis_tools import (
12 | CodeStructureAnalyzer,
13 | ImportAnalyzer,
14 | ProjectAnalyzer,
15 | CodeValidator,
16 | SyntaxChecker
17 | )
18 | from .tools.reference_tools import FindReferences, PreviewChanges
19 | from .tools.dependency_tools import FileDependencyAnalyzer
20 | from .tools.file_tools import (
21 | MCPFileOperations,
22 | FileAnalyzer
23 | )
24 | from .tools.modification_tools import CodeModifier
25 | from .tools.search_tools import PathFinder, ContentScanner
26 | from .tools.version_manager import VersionManager
27 |
28 | __version__ = "0.1.0"
29 |
30 | __all__ = [
31 | # Main entrypoint
32 | "main",
33 |
34 | # Project Analysis
35 | "ProjectStructure",
36 | "ProjectStatistics",
37 | "ProjectTechnology",
38 | "ProjectAnalyzer",
39 |
40 | # Code Analysis
41 | "CodeStructureAnalyzer",
42 | "ImportAnalyzer",
43 | "CodeValidator",
44 | "SyntaxChecker",
45 |
46 | # Pattern Analysis
47 | "PatternUsageAnalyzer",
48 | "CodePatternAnalyzer",
49 |
50 | # File Operations
51 | "FileAnalyzer",
52 | "MCPFileOperations",
53 |
54 | # Code Modifications
55 | "CodeModifier",
56 |
57 | # Search and Reference
58 | "PathFinder",
59 | "ContentScanner",
60 | "FindReferences",
61 | "PreviewChanges",
62 |
63 | # Dependencies
64 | "FileDependencyAnalyzer",
65 |
66 | # Version Control
67 | "VersionManager"
68 | ]
--------------------------------------------------------------------------------
/mcp_code_analyzer/__main__.py:
--------------------------------------------------------------------------------
1 | # __main__.py
2 | import sys
3 | import logging
4 | import asyncio
5 | import locale
6 | from pathlib import Path
7 | from .server.handlers import main
8 |
9 | def configure_encoding():
10 | """Configure system encoding settings"""
11 | if sys.platform == 'win32':
12 | import io
13 | import codecs
14 | if isinstance(sys.stdout, io.TextIOWrapper):
15 | sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer)
16 | sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer)
17 | try:
18 | locale.setlocale(locale.LC_ALL, 'Turkish_Turkey.utf8')
19 | except locale.Error:
20 | try:
21 | locale.setlocale(locale.LC_ALL, 'tr_TR.UTF-8')
22 | except locale.Error:
23 | pass
24 |
25 | # Configure logging with encoding support
26 | logging.basicConfig(
27 | level=logging.INFO,
28 | format='%(asctime)s - %(levelname)s - %(message)s',
29 | datefmt='%Y-%m-%d %H:%M:%S',
30 | encoding='utf-8'
31 | )
32 | logger = logging.getLogger(__name__)
33 |
34 | def run():
35 | """Main entry point"""
36 | configure_encoding()
37 |
38 | analyze_paths = []
39 | try:
40 | path_start = sys.argv.index('--analyze-paths') + 1
41 | while path_start < len(sys.argv) and not sys.argv[path_start].startswith('--'):
42 | path = Path(sys.argv[path_start]).resolve()
43 | analyze_paths.append(str(path))
44 | path_start += 1
45 | except ValueError:
46 | analyze_paths = [str(Path.cwd())]
47 | except Exception as e:
48 | logger.error(f"Error parsing arguments: {e}")
49 | sys.exit(1)
50 |
51 | logger.info(f"Starting analysis with paths: {analyze_paths}")
52 |
53 | try:
54 | asyncio.run(main(analyze_paths))
55 | except KeyboardInterrupt:
56 | logger.info("Analysis interrupted by user")
57 | except Exception as e:
58 | logger.error(f"Error during analysis: {e}", exc_info=True)
59 | sys.exit(1)
60 |
61 | if __name__ == '__main__':
62 | run()
--------------------------------------------------------------------------------
/mcp_code_analyzer/config.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import Set, Dict, Any
3 | import logging
4 |
5 | # Configure logging
6 | logging.basicConfig(
7 | level=logging.INFO,
8 | format='%(asctime)s - %(levelname)s - %(message)s',
9 | datefmt='%Y-%m-%d %H:%M:%S'
10 | )
11 |
12 | @dataclass
13 | class SystemConfig:
14 | """System-wide configuration settings"""
15 |
16 | # Maximum file size to analyze (in bytes)
17 | MAX_FILE_SIZE: int = 1024 * 1024 # 1MB
18 |
19 | # Maximum directory depth for recursive analysis
20 | MAX_DEPTH: int = 10
21 |
22 | # Number of worker threads for parallel processing
23 | THREAD_POOL_SIZE: int = 4
24 |
25 | # Cache settings
26 | ENABLE_CACHE: bool = True
27 | MAX_CACHE_SIZE: int = 100 # Maximum number of cached results
28 | CACHE_TTL: int = 3600 # Cache time-to-live in seconds
29 |
30 | @dataclass
31 | class AnalysisConfig:
32 | """Analysis-specific configuration"""
33 |
34 | # Directories to exclude from analysis
35 | excluded_dirs: Set[str] = field(default_factory=lambda: {
36 | 'node_modules', 'release', 'dist', 'build', '.git', '.aws', '.next',
37 | '__pycache__', 'venv', '.venv', 'env', '.env', 'coverage',
38 | '.coverage', 'tmp', '.tmp', '.idea', '.vscode'
39 | })
40 |
41 | # File types to exclude from analysis
42 | excluded_files: Set[str] = field(default_factory=lambda: {
43 | '.pyc', '.pyo', '.pyd', '.so', '.dll', '.dylib', '.log',
44 | '.DS_Store', '.env', '.coverage', '.pytest_cache'
45 | })
46 |
47 | # File types to analyze
48 | analyzable_extensions: Set[str] = field(default_factory=lambda: {
49 | '.py', '.js', '.ts', '.jsx', '.tsx', '.vue', '.go', '.java', '.rs'
50 | })
51 |
52 | # Technology markers for detection
53 | tech_markers: Dict[str, Any] = field(default_factory=lambda: {
54 | "Python": [".py", "requirements.txt", "setup.py", "pyproject.toml"],
55 | "JavaScript": [".js", "package.json", "package-lock.json"],
56 | "TypeScript": [".ts", "tsconfig.json"],
57 | "React": [".jsx", ".tsx"],
58 | "Vue": [".vue"],
59 | "Docker": ["Dockerfile", "docker-compose.yml"],
60 | "Go": [".go", "go.mod"],
61 | "Java": [".java", "pom.xml", "build.gradle"],
62 | "Rust": [".rs", "Cargo.toml"]
63 | })
64 |
65 | # Global instances
66 | system_config = SystemConfig()
67 | analysis_config = AnalysisConfig()
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | from pathlib import Path
4 |
5 | class LogManager:
6 | """Centralized logging manager for MCP"""
7 |
8 | def __init__(self, log_dir: str = None):
9 | self.log_dir = Path(log_dir) if log_dir else Path.cwd() / "logs"
10 | self.log_dir.mkdir(parents=True, exist_ok=True)
11 |
12 | self.main_log = self.log_dir / "mcp_server.log"
13 |
14 | self.tool_log = self.log_dir / "mcp_tools.log"
15 |
16 | self._setup_logging()
17 |
18 | def _setup_logging(self):
19 | main_formatter = logging.Formatter(
20 | '%(asctime)s - %(levelname)s - [%(name)s] - %(message)s',
21 | datefmt='%Y-%m-%d %H:%M:%S'
22 | )
23 |
24 | tool_formatter = logging.Formatter(
25 | '%(asctime)s - %(levelname)s - [%(tool_name)s] - %(operation)s - %(message)s',
26 | datefmt='%Y-%m-%d %H:%M:%S'
27 | )
28 |
29 | main_handler = logging.FileHandler(self.main_log, encoding='utf-8')
30 | main_handler.setFormatter(main_formatter)
31 |
32 | tool_handler = logging.FileHandler(self.tool_log, encoding='utf-8')
33 | tool_handler.setFormatter(tool_formatter)
34 |
35 | console_handler = logging.StreamHandler(sys.stdout)
36 | console_handler.setFormatter(main_formatter)
37 |
38 | # Root logger setup
39 | root_logger = logging.getLogger()
40 | root_logger.setLevel(logging.INFO)
41 | root_logger.addHandler(main_handler)
42 | root_logger.addHandler(console_handler)
43 |
44 | # Tool logger setup
45 | tool_logger = logging.getLogger('mcp.tools')
46 | tool_logger.setLevel(logging.INFO)
47 | tool_logger.addHandler(tool_handler)
48 |
49 | def log_tool_operation(self, tool_name: str, operation: str, message: str,
50 | level: str = 'INFO', **kwargs):
51 | logger = logging.getLogger('mcp.tools')
52 |
53 | extra = {
54 | 'tool_name': tool_name,
55 | 'operation': operation
56 | }
57 |
58 | if kwargs:
59 | message = f"{message} - {kwargs}"
60 |
61 | if level.upper() == 'ERROR':
62 | logger.error(message, extra=extra)
63 | elif level.upper() == 'WARNING':
64 | logger.warning(message, extra=extra)
65 | else:
66 | logger.info(message, extra=extra)
67 |
68 | def log_server_operation(self, message: str, level: str = 'INFO', **kwargs):
69 | logger = logging.getLogger('mcp.server')
70 |
71 | if kwargs:
72 | message = f"{message} - {kwargs}"
73 |
74 | if level.upper() == 'ERROR':
75 | logger.error(message)
76 | elif level.upper() == 'WARNING':
77 | logger.warning(message)
78 | else:
79 | logger.info(message)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MCP Code Analyzer
2 | The main purpose of the MCP tool is to adapt changes to the project intelligently.
3 | For instance, when a code modification or structural adjustment is needed, it aims to ensure that other related usages are also updated accordingly.
4 |
5 | Currently, the project has a lot of bugs, and the tools are not functioning as expected sometimes. Due to work commitments,
6 | I can’t dedicate much time to it, but I’ll try to fix the issues and bring it to a working state whenever I find the time.
7 |
8 |
9 | ⚠️ **WARNING: BACKUP RECOMMENDED**
10 | This tool performs file operations that could potentially modify or delete files. Always backup your codebase before using any modification features.
11 |
12 | ## Overview
13 | The Model Context Protocol (MCP) Code Analyzer is a comprehensive code analysis and management tool that integrates with Claude Desktop. It analyzes code at both project and file levels, providing insights for needed changes and project understanding.
14 |
15 | ## Prerequisites
16 | - Python 3.10 or later
17 | - Windows (Linux/MacOS support not yet tested)
18 |
19 | ## Installation
20 | ```bash
21 | # Clone the repository
22 | git clone https://github.com/[your-username]/mcp-code-analyzer.git
23 | cd mcp-code-analyzer
24 |
25 | # Install dependencies and package
26 | pip install -e .
27 | ```
28 |
29 | ## Claude Desktop Integration
30 |
31 | ### Configuration
32 | 1. Access Claude Desktop config:
33 | ```
34 | Win + R → %AppData%\Claude\
35 | ```
36 | 2. Create/edit `claude_desktop_config.json`:
37 | ```json
38 | {
39 | "globalShortcut": "Ctrl+Space",
40 | "mcpServers": {
41 | "code-analyzer": {
42 | "command": "python",
43 | "args": [
44 | "-m",
45 | "mcp_code_analyzer",
46 | "--analyze-paths",
47 | "C:\\Projects\\path1"
48 | ],
49 | "type": "module"
50 | }
51 | }
52 | }
53 | ```
54 |
55 | ### Path Configuration
56 | - Multiple project paths can be specified in configuration
57 | - Additional paths or files can be analyzed via chat messages
58 | - No limit on number of analyzable paths/files
59 |
60 | ## 🛠️ Tools and Status
61 |
62 | ### Working Tools
63 | - ✅ **Project Analysis**
64 | - analyze_project_structure (XML tree format)
65 | - analyze_project_statistics
66 | - analyze_project_technology
67 | - analyze_code_structure
68 |
69 | - ✅ **Code Analysis**
70 | - analyze_imports
71 | - analyze_file
72 | - find_references
73 |
74 | - ⚠️🔄 **File Operations** (Always backup before use)
75 | - file_operations
76 | - version_control (Creates dated backups)
77 | -
78 | - ⚠️🔄 **Code Modify** (Always backup before use)
79 | - code_modifier (Performs modifying code line by line)(Currently has big issues)
80 |
81 | ### Testing Phase
82 | - 🔄 check_syntax
83 | - 🔄 find_code_patterns
84 | - 🔄 find_pattern_usages
85 | - 🔄 search_content
86 | - 🔄 search_files
87 | - 🔄 validate_code
88 |
89 | ### Known Issues
90 | 1. Chat context limitations may interrupt large file modifications
91 | 2. AI-generated documentation comments can affect code operation in large files
92 |
93 | ## Demo Video
94 | Watch demonstration of MCP tool using with building Mario game:
95 | [](https://youtu.be/MQtZCKNg13I)
96 |
97 |
98 | Note: This is a demonstration of the tool's capabilities, not a complete game tutorial. While the game isn't fully playable due to Claude's message limits, this video shows how the MCP tool works and what you can do with it.
99 |
100 | ## Contributing
101 | Contributions are welcome! Whether it's bug reports, feature requests, documentation improvements, or code contributions - every contribution helps. Simply fork the repository and submit a pull request.
102 |
103 | ## License
104 | This project is licensed under the MIT License - see the [`LICENSE`](LICENSE) file for details.
105 |
106 | ---
107 | **Note**: This project is under active development. Features and documentation may change frequently.
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/dependency_tools.py:
--------------------------------------------------------------------------------
1 | import ast
2 | import logging
3 | from pathlib import Path
4 | from typing import Dict, Any, List
5 | import networkx as nx
6 | from .base import BaseTool ,safe_read_file
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | class FileDependencyAnalyzer(BaseTool):
11 | """Analyze file dependencies"""
12 |
13 | def __init__(self):
14 | super().__init__()
15 | self.dependency_graph = nx.DiGraph()
16 |
17 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
18 | file_path = arguments.get('file_path')
19 | if not file_path:
20 | return {"error": "File path is required"}
21 |
22 | path = self._normalize_path(file_path)
23 | if not self._validate_path(path):
24 | return {"error": "Invalid file path"}
25 |
26 | cache_key = f"file_deps_{path}"
27 | if cached := self._get_cached_result(cache_key):
28 | return cached
29 |
30 | try:
31 | result = {
32 | "direct_dependencies": await self._analyze_direct_dependencies(path),
33 | "indirect_dependencies": await self._analyze_indirect_dependencies(path),
34 | "dependents": await self._find_dependents(path),
35 | "cycles": await self._detect_cycles(path),
36 | "metrics": await self._calculate_metrics(path)
37 | }
38 |
39 | self._cache_result(cache_key, result)
40 | return result
41 |
42 | except Exception as e:
43 | logger.error(f"Error analyzing file dependencies: {e}")
44 | return {"error": str(e)}
45 |
46 | async def _analyze_direct_dependencies(self, path: Path) -> Dict[str, Any]:
47 | """Analyze direct dependencies"""
48 | deps = {
49 | "imports": [],
50 | "from_imports": [],
51 | "total_count": 0
52 | }
53 |
54 | try:
55 | content = safe_read_file(str(path))
56 | if content:
57 | tree = ast.parse(content)
58 |
59 | for node in ast.walk(tree):
60 | if isinstance(node, ast.Import):
61 | for name in node.names:
62 | deps["imports"].append({
63 | "name": name.name,
64 | "alias": name.asname,
65 | "line": node.lineno
66 | })
67 | self.dependency_graph.add_edge(str(path), name.name)
68 |
69 | elif isinstance(node, ast.ImportFrom):
70 | if node.module:
71 | deps["from_imports"].append({
72 | "module": node.module,
73 | "names": [{"name": n.name, "alias": n.asname} for n in node.names],
74 | "line": node.lineno,
75 | "level": node.level
76 | })
77 | self.dependency_graph.add_edge(str(path), node.module)
78 |
79 | deps["total_count"] = len(deps["imports"]) + len(deps["from_imports"])
80 |
81 | except Exception as e:
82 | logger.error(f"Error analyzing direct dependencies: {e}")
83 |
84 | return deps
85 |
86 | async def _analyze_indirect_dependencies(self, path: Path) -> List[Dict[str, Any]]:
87 | """Analyze indirect dependencies"""
88 | indirect_deps = []
89 |
90 | try:
91 | # Get all paths except input path's successors
92 | all_paths = list(nx.dfs_edges(self.dependency_graph, str(path)))
93 | direct_deps = set(self.dependency_graph.successors(str(path)))
94 |
95 | for source, target in all_paths:
96 | if target not in direct_deps and source != str(path):
97 | indirect_deps.append({
98 | "name": target,
99 | "through": source,
100 | "path": self._find_shortest_path(str(path), target)
101 | })
102 |
103 | except Exception as e:
104 | logger.error(f"Error analyzing indirect dependencies: {e}")
105 |
106 | return indirect_deps
107 |
108 | def _should_skip(self, path: Path) -> bool:
109 | """Check if path should be skipped"""
110 | try:
111 | if any(excluded in path.parts for excluded in self.analysis_config.excluded_dirs):
112 | return True
113 | if path.is_file() and any(path.name.endswith(ext) for ext in self.analysis_config.excluded_files):
114 | return True
115 | return False
116 | except:
117 | return True
118 |
119 | async def _find_dependents(self, path: Path) -> List[Dict[str, Any]]:
120 | """Find files that depend on this file"""
121 | dependents = []
122 |
123 | try:
124 | for py_file in Path('.').rglob('*.py'):
125 | if py_file != path and not self._should_skip(py_file):
126 | content = safe_read_file(str(py_file))
127 | if not content:
128 | continue
129 |
130 | tree = ast.parse(content)
131 | found = False
132 |
133 | for node in ast.walk(tree):
134 | if isinstance(node, (ast.Import, ast.ImportFrom)):
135 | module_name = path.stem
136 | if (isinstance(node, ast.Import) and
137 | any(name.name == module_name for name in node.names)):
138 | found = True
139 | break
140 | elif (isinstance(node, ast.ImportFrom) and
141 | node.module and module_name in node.module):
142 | found = True
143 | break
144 |
145 | if found:
146 | dependents.append({
147 | "file": str(py_file),
148 | "type": "direct" if self.dependency_graph.has_edge(str(py_file), str(path)) else "indirect"
149 | })
150 |
151 | except Exception as e:
152 | logger.error(f"Error finding dependents: {e}")
153 |
154 | return dependents
155 |
156 | async def _detect_cycles(self, path: Path) -> List[List[str]]:
157 | """Detect dependency cycles"""
158 | cycles = []
159 | try:
160 | for cycle in nx.simple_cycles(self.dependency_graph):
161 | if str(path) in cycle:
162 | cycles.append(cycle)
163 | except Exception as e:
164 | logger.error(f"Error detecting cycles: {e}")
165 | return cycles
166 |
167 | async def _calculate_metrics(self, path: Path) -> Dict[str, Any]:
168 | """Calculate dependency metrics"""
169 | metrics = {
170 | "fanin": 0, # Number of files that depend on this
171 | "fanout": 0, # Number of files this depends on
172 | "instability": 0.0, # fanout / (fanin + fanout)
173 | "dependency_depth": 0 # Longest dependency chain
174 | }
175 |
176 | try:
177 | metrics["fanin"] = len(list(self.dependency_graph.predecessors(str(path))))
178 | metrics["fanout"] = len(list(self.dependency_graph.successors(str(path))))
179 |
180 | if metrics["fanin"] + metrics["fanout"] > 0:
181 | metrics["instability"] = metrics["fanout"] / (metrics["fanin"] + metrics["fanout"])
182 |
183 | # Calculate dependency depth
184 | depths = []
185 | for node in self.dependency_graph.nodes():
186 | try:
187 | path_length = nx.shortest_path_length(self.dependency_graph, str(path), node)
188 | depths.append(path_length)
189 | except (nx.NetworkXNoPath, nx.NodeNotFound):
190 | continue
191 |
192 | metrics["dependency_depth"] = max(depths) if depths else 0
193 |
194 | except Exception as e:
195 | logger.error(f"Error calculating metrics: {e}")
196 |
197 | return metrics
198 |
199 | def _find_shortest_path(self, source: str, target: str) -> List[str]:
200 | """Find shortest dependency path between two modules"""
201 | try:
202 | return nx.shortest_path(self.dependency_graph, source, target)
203 | except (nx.NetworkXNoPath, nx.NodeNotFound):
204 | return []
205 |
206 |
207 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from ..config import analysis_config, system_config
3 | import os
4 | import logging
5 | from pathlib import Path
6 | from typing import List, Dict, Any, Union, Optional
7 | import chardet
8 | from functools import lru_cache
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 | @lru_cache(maxsize=1000)
13 | def detect_file_encoding(file_path: str) -> str:
14 | try:
15 | with open(file_path, 'rb') as f:
16 | raw_data = f.read()
17 | result = chardet.detect(raw_data)
18 | return result['encoding'] or 'utf-8'
19 | except Exception as e:
20 | logger.warning(f"Error detecting encoding for {file_path}: {e}")
21 | return 'utf-8'
22 |
23 | def calculate_directory_size(path: Union[str, Path]) -> int:
24 | total_size = 0
25 | try:
26 | for entry in os.scandir(path):
27 | try:
28 | if entry.is_file():
29 | total_size += entry.stat().st_size
30 | elif entry.is_dir():
31 | total_size += calculate_directory_size(entry.path)
32 | except (PermissionError, FileNotFoundError) as e:
33 | continue
34 | except Exception as e:
35 | logger.error(f"Error calculating directory size for {path}: {e}")
36 | return total_size
37 |
38 | def safe_read_file(file_path: Union[str, Path], base_path: Optional[Union[str, Path]] = None) -> Optional[str]:
39 | """Safely read a file with proper encoding detection and error handling"""
40 | try:
41 | # Convert to Path object
42 | path = Path(file_path)
43 |
44 | # Handle base path
45 | if base_path and not path.is_absolute():
46 | path = Path(base_path) / path
47 |
48 | # Ensure path is resolved
49 | path = path.resolve()
50 |
51 | if not path.exists():
52 | logger.error(f"File not found: {path}")
53 | return None
54 |
55 | if not path.is_file():
56 | logger.error(f"Not a file: {path}")
57 | return None
58 |
59 | try:
60 | # First try reading as binary to detect encoding
61 | with open(path, 'rb') as f:
62 | raw_content = f.read()
63 |
64 | # Detect encoding with BOM check
65 | if raw_content.startswith(b'\xef\xbb\xbf'):
66 | encoding = 'utf-8-sig'
67 | else:
68 | # Try different encodings in order of likelihood
69 | encodings = ['utf-8', 'utf-16', 'utf-16le', 'utf-16be', 'cp1252', 'iso-8859-1']
70 | content = None
71 |
72 | for enc in encodings:
73 | try:
74 | content = raw_content.decode(enc)
75 | encoding = enc
76 | break
77 | except UnicodeDecodeError:
78 | continue
79 |
80 | if content is None:
81 | # If all encodings fail, use utf-8 with error handling
82 | content = raw_content.decode('utf-8', errors='replace')
83 | return content
84 |
85 | # Read with detected encoding
86 | with open(path, 'r', encoding=encoding) as f:
87 | return f.read()
88 |
89 | except Exception as e:
90 | logger.error(f"Error reading file {path}: {e}")
91 | # Last resort: try to decode with utf-8 and replace errors
92 | try:
93 | return raw_content.decode('utf-8', errors='replace')
94 | except:
95 | return None
96 |
97 | except Exception as e:
98 | logger.error(f"Error processing file path {file_path}: {e}")
99 | return None
100 |
101 | def get_relative_path(base_path: Union[str, Path], full_path: Union[str, Path]) -> str:
102 | try:
103 | base = Path(base_path).resolve()
104 | full = Path(full_path).resolve()
105 | try:
106 | return str(full.relative_to(base))
107 | except ValueError:
108 | return str(full)
109 | except Exception as e:
110 | logger.error(f"Error getting relative path: {e}")
111 | return str(full_path)
112 |
113 | class BaseTool(ABC):
114 | def __init__(self):
115 | self.analysis_config = analysis_config
116 | self.system_config = system_config
117 | self._cache = {}
118 |
119 | @abstractmethod
120 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
121 | pass
122 |
123 | def _cache_result(self, key: str, result: Any):
124 | if self.system_config.ENABLE_CACHE:
125 | if len(self._cache) >= self.system_config.MAX_CACHE_SIZE:
126 | self._cache.pop(next(iter(self._cache)))
127 | self._cache[key] = result
128 |
129 | def _get_cached_result(self, key: str) -> Optional[Any]:
130 | if self.system_config.ENABLE_CACHE:
131 | return self._cache.get(key)
132 | return None
133 |
134 | def _get_absolute_path(self, path: Union[str, Path], base_path: Optional[Union[str, Path]] = None) -> Path:
135 | try:
136 | path = Path(path)
137 | if base_path:
138 | base = Path(base_path)
139 | return (base / path).resolve()
140 | elif not path.is_absolute():
141 | return (Path.cwd() / path).resolve()
142 | return path.resolve()
143 | except Exception as e:
144 | logger.error(f"Error getting absolute path: {e}")
145 | return Path.cwd()
146 |
147 | def _normalize_path(self, path: Union[str, Path]) -> Path:
148 | try:
149 | path_obj = Path(path)
150 |
151 | if isinstance(path, str):
152 | path = path.replace('\\', '/')
153 |
154 | if path_obj.is_absolute():
155 | return path_obj.resolve()
156 |
157 | if path_obj.exists():
158 | return path_obj.resolve()
159 | try:
160 | found_paths = list(Path('.').rglob(path_obj.name))
161 | if found_paths:
162 | for found_path in found_paths:
163 | if found_path.exists() and not self._should_skip(found_path):
164 | return found_path.resolve()
165 | except Exception:
166 | pass
167 |
168 | return Path(path).resolve()
169 |
170 | except Exception as e:
171 | logger.error(f"Error normalizing path {path}: {e}")
172 | return Path(path) if isinstance(path, str) else path
173 |
174 |
175 | def _validate_path(self, path: Path) -> bool:
176 | try:
177 | path = self._normalize_path(path)
178 | if not path.exists():
179 | return False
180 | return os.access(path, os.R_OK)
181 | except Exception as e:
182 | logger.error(f"Error validating path {path}: {e}")
183 | return False
184 |
185 | def _should_skip_path(self, path: Path) -> bool:
186 | try:
187 | if any(excluded in path.parts for excluded in self.analysis_config.excluded_dirs):
188 | return True
189 | if path.is_file() and any(path.name.endswith(ext) for ext in self.analysis_config.excluded_files):
190 | return True
191 | return False
192 | except Exception:
193 | return True
194 |
195 | def _is_valid_project_path(self, path: Path) -> bool:
196 | try:
197 | return path.is_dir() and not self._should_skip_path(path)
198 | except Exception:
199 | return False
200 |
201 | @staticmethod
202 | def create_file_tree(files: List[Dict[str, Any]]) -> Dict[str, Any]:
203 | tree = {}
204 | for file_info in files:
205 | path_parts = file_info['path'].split(os.sep)
206 | current = tree
207 | for part in path_parts[:-1]:
208 | if part not in current:
209 | current[part] = {}
210 | current = current[part]
211 | current[path_parts[-1]] = file_info
212 | return tree
213 |
214 | @staticmethod
215 | def group_files_by_type(files: List[Dict[str, Any]]) -> Dict[str, List[str]]:
216 | grouped = {}
217 | for file_info in files:
218 | ext = file_info.get('type', '')
219 | if ext not in grouped:
220 | grouped[ext] = []
221 | grouped[ext].append(file_info['path'])
222 | return grouped
223 |
224 | @staticmethod
225 | def find_similar_files(files: List[Dict[str, Any]], threshold: float = 0.8) -> List[Dict[str, Any]]:
226 | from difflib import SequenceMatcher
227 | similar_groups = []
228 | for i, file1 in enumerate(files):
229 | similar = []
230 | for j, file2 in enumerate(files):
231 | if i != j:
232 | similarity = SequenceMatcher(None, file1['name'], file2['name']).ratio()
233 | if similarity >= threshold:
234 | similar.append({
235 | 'file': file2['path'],
236 | 'similarity': similarity
237 | })
238 | if similar:
239 | similar_groups.append({
240 | 'file': file1['path'],
241 | 'similar_to': similar
242 | })
243 | return similar_groups
244 |
245 | def _should_skip(self, path: Path) -> bool:
246 | try:
247 | if any(excluded in path.parts for excluded in self.analysis_config.excluded_dirs):
248 | return True
249 | if path.is_file() and any(path.name.endswith(ext) for ext in self.analysis_config.excluded_files):
250 | return True
251 | return False
252 | except Exception:
253 | return True
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/project_tools.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from pathlib import Path
4 | from typing import Dict, Any, List
5 | from .base import BaseTool
6 | from ..config import analysis_config, system_config
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | class ProjectStructure(BaseTool):
11 | """Analyzes and creates project structure tree"""
12 |
13 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
14 | path = self._normalize_path(arguments.get('path', '.'))
15 | if not self._validate_path(path):
16 | return {"error": "Path not found"}
17 |
18 | cache_key = f"project_structure_{path}"
19 | if cached := self._get_cached_result(cache_key):
20 | return cached
21 |
22 | try:
23 | result = await self._analyze_structure(path)
24 | self._cache_result(cache_key, result)
25 | return result
26 | except Exception as e:
27 | logger.error(f"Error analyzing project structure: {e}")
28 | return {"error": str(e)}
29 |
30 | async def _analyze_structure(self, path: Path) -> Dict[str, Any]:
31 | try:
32 | if isinstance(path, str):
33 | path = Path(path)
34 | path = path.resolve()
35 |
36 | if not path.exists():
37 | return {"error": f"Path does not exist: {path}"}
38 |
39 | def build_tree(current_path: Path, indent: int = 0) -> List[str]:
40 | if not current_path.exists() or indent > system_config.MAX_DEPTH:
41 | return []
42 |
43 | result = []
44 | items = sorted(current_path.iterdir(), key=lambda x: (not x.is_dir(), x.name.lower()))
45 | indent_str = ' ' * indent
46 |
47 | for item in items:
48 | if self._should_skip(item):
49 | continue
50 |
51 | if item.is_dir():
52 | result.append(f"{indent_str}
")
53 | result.extend(build_tree(item, indent + 1))
54 | result.append(f"{indent_str}")
55 | else:
56 | if item.stat().st_size <= system_config.MAX_FILE_SIZE:
57 | ext = item.suffix or 'no_ext'
58 | result.append(
59 | f"{indent_str}"
60 | )
61 |
62 | return result
63 |
64 | xml_lines = [
65 | f"",
66 | *build_tree(path, indent=1),
67 | ''
68 | ]
69 |
70 | return {
71 | "structure": {
72 | "xml": '\n'.join(xml_lines),
73 | "project_path": str(path)
74 | }
75 | }
76 |
77 | except Exception as e:
78 | logger.error(f"Error analyzing structure at {path}: {e}")
79 | return {"error": str(e)}
80 |
81 | class ProjectStatistics(BaseTool):
82 | """Collects detailed project statistics"""
83 |
84 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
85 | path = self._normalize_path(arguments.get('path', '.'))
86 | if not self._validate_path(path):
87 | return {"error": "Path not found"}
88 |
89 | cache_key = f"project_stats_{path}"
90 | if cached := self._get_cached_result(cache_key):
91 | return cached
92 |
93 | try:
94 | result = await self._collect_statistics(path)
95 | self._cache_result(cache_key, result)
96 | return result
97 | except Exception as e:
98 | logger.error(f"Error collecting project statistics: {e}")
99 | return {"error": str(e)}
100 |
101 | async def _collect_statistics(self, path: Path) -> Dict[str, Any]:
102 | try:
103 | stats = {
104 | "files": {
105 | "total": 0,
106 | "by_extension": {},
107 | "analyzable": 0
108 | },
109 | "directories": {
110 | "total": 0,
111 | "max_depth": 0,
112 | "by_depth": {}
113 | },
114 | "size": {
115 | "total": 0,
116 | "by_extension": {},
117 | "average_file_size": 0
118 | }
119 | }
120 |
121 | for item in path.rglob("*"):
122 | if not self._should_skip(item):
123 | depth = len(item.relative_to(path).parts)
124 |
125 | if item.is_dir():
126 | stats["directories"]["total"] += 1
127 | stats["directories"]["max_depth"] = max(stats["directories"]["max_depth"], depth)
128 | stats["directories"]["by_depth"][depth] = stats["directories"]["by_depth"].get(depth, 0) + 1
129 |
130 | elif item.is_file() and item.stat().st_size <= system_config.MAX_FILE_SIZE:
131 | size = item.stat().st_size
132 | ext = item.suffix or 'no_ext'
133 |
134 | stats["files"]["total"] += 1
135 | stats["size"]["total"] += size
136 |
137 | if ext not in stats["files"]["by_extension"]:
138 | stats["files"]["by_extension"][ext] = 0
139 | stats["size"]["by_extension"][ext] = 0
140 |
141 | stats["files"]["by_extension"][ext] += 1
142 | stats["size"]["by_extension"][ext] += size
143 |
144 | if ext in analysis_config.analyzable_extensions:
145 | stats["files"]["analyzable"] += 1
146 |
147 | if stats["files"]["total"] > 0:
148 | stats["size"]["average_file_size"] = stats["size"]["total"] / stats["files"]["total"]
149 |
150 | return stats
151 |
152 | except Exception as e:
153 | logger.error(f"Error collecting statistics: {e}")
154 | return {}
155 |
156 | class ProjectTechnology(BaseTool):
157 | """Analyzes technologies used in the project"""
158 |
159 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
160 | path = self._normalize_path(arguments.get('path', '.'))
161 | if not self._validate_path(path):
162 | return {"error": "Path not found"}
163 |
164 | cache_key = f"project_tech_{path}"
165 | if cached := self._get_cached_result(cache_key):
166 | return cached
167 |
168 | try:
169 | result = await self._detect_technologies(path)
170 | self._cache_result(cache_key, result)
171 | return result
172 | except Exception as e:
173 | logger.error(f"Error detecting technologies: {e}")
174 | return {"error": str(e)}
175 |
176 | async def _detect_technologies(self, path: Path) -> Dict[str, Any]:
177 | try:
178 | tech_info = {
179 | "detected_techs": {},
180 | "frameworks": set(),
181 | "languages": set()
182 | }
183 |
184 | # Scan for technology markers
185 | for item in path.rglob("*"):
186 | if not self._should_skip(item):
187 | # Check against technology markers
188 | for tech, markers in analysis_config.tech_markers.items():
189 | for marker in markers:
190 | if marker.lower() in str(item).lower():
191 | if tech not in tech_info["detected_techs"]:
192 | tech_info["detected_techs"][tech] = {
193 | "markers_found": [],
194 | "files_count": 0
195 | }
196 | tech_info["detected_techs"][tech]["markers_found"].append(str(item.name))
197 | tech_info["detected_techs"][tech]["files_count"] += 1
198 |
199 | # Special handling for framework detection
200 | if item.is_file():
201 | if item.suffix in ['.jsx', '.tsx']:
202 | tech_info["frameworks"].add("React")
203 | elif item.name == 'package.json':
204 | try:
205 | with open(item) as f:
206 | data = json.load(f)
207 | deps = {**data.get('dependencies', {}), **data.get('devDependencies', {})}
208 |
209 | framework_indicators = {
210 | 'vue': 'Vue.js',
211 | 'angular': 'Angular',
212 | 'next': 'Next.js',
213 | 'nest': 'NestJS'
214 | }
215 |
216 | for indicator, framework in framework_indicators.items():
217 | if indicator in deps:
218 | tech_info["frameworks"].add(framework)
219 | except:
220 | continue
221 |
222 | # Convert sets to sorted lists for JSON serialization
223 | tech_info["frameworks"] = sorted(list(tech_info["frameworks"]))
224 | tech_info["languages"] = sorted(list(tech_info["languages"]))
225 |
226 | return tech_info
227 |
228 | except Exception as e:
229 | logger.error(f"Error detecting technologies: {e}")
230 | return {}
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/reference_tools.py:
--------------------------------------------------------------------------------
1 | import ast
2 | import logging
3 | from pathlib import Path
4 | from typing import Dict, Any, List
5 | from .base import BaseTool
6 | from .base import safe_read_file
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | class PreviewChanges(BaseTool):
11 | """Preview impact of code changes"""
12 |
13 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
14 | pattern = arguments.get('pattern')
15 | replacement = arguments.get('replacement')
16 |
17 | if not pattern or not replacement:
18 | return {"error": "Both pattern and replacement are required"}
19 |
20 | cache_key = f"preview_{pattern}_{replacement}"
21 | if cached := self._get_cached_result(cache_key):
22 | return cached
23 |
24 | try:
25 | result = {
26 | "original": pattern,
27 | "replacement": replacement,
28 | "changes": await self._preview_changes(pattern, replacement),
29 | "impact": await self._analyze_change_impact(pattern, replacement),
30 | "safety_analysis": await self._analyze_safety(pattern, replacement)
31 | }
32 |
33 | self._cache_result(cache_key, result)
34 | return result
35 |
36 | except Exception as e:
37 | logger.error(f"Error previewing changes: {e}")
38 | return {"error": str(e)}
39 |
40 |
41 | def _should_skip(self, path: Path) -> bool:
42 | """Check if path should be skipped"""
43 | try:
44 | if any(excluded in path.parts for excluded in self.analysis_config.excluded_dirs):
45 | return True
46 | if path.is_file() and any(path.name.endswith(ext) for ext in self.analysis_config.excluded_files):
47 | return True
48 | return False
49 | except:
50 | return True
51 |
52 | async def _preview_changes(self, pattern: str, replacement: str) -> List[Dict[str, Any]]:
53 | """Generate preview of changes"""
54 | changes = []
55 |
56 | try:
57 | # Analyze current working directory recursively
58 | for path in Path('.').rglob('*.py'):
59 | if not self._should_skip(path):
60 | content = safe_read_file(str(path))
61 | if content and pattern in content:
62 | # Generate diff for each occurrence
63 | lines = content.splitlines()
64 | for i, line in enumerate(lines, 1):
65 | if pattern in line:
66 | changes.append({
67 | "file": str(path),
68 | "line": i,
69 | "original": line.strip(),
70 | "modified": line.replace(pattern, replacement).strip(),
71 | "context": self._get_context(lines, i)
72 | })
73 |
74 | except Exception as e:
75 | logger.error(f"Error previewing changes: {e}")
76 |
77 | return changes
78 |
79 | async def _analyze_change_impact(self, pattern: str, replacement: str) -> Dict[str, Any]:
80 | """Analyze impact of changes"""
81 | impact = {
82 | "risk_level": "low",
83 | "affected_components": [],
84 | "potential_issues": []
85 | }
86 |
87 | try:
88 | # Check for potential issues
89 | if len(replacement) > len(pattern):
90 | impact["potential_issues"].append("Replacement is longer than original")
91 |
92 | if replacement.count('_') != pattern.count('_'):
93 | impact["potential_issues"].append("Different naming convention")
94 |
95 | if replacement.lower() == pattern.lower() and replacement != pattern:
96 | impact["potential_issues"].append("Case sensitivity might cause issues")
97 |
98 | # Adjust risk level based on issues
99 | if len(impact["potential_issues"]) > 2:
100 | impact["risk_level"] = "high"
101 | elif len(impact["potential_issues"]) > 0:
102 | impact["risk_level"] = "medium"
103 |
104 | except Exception as e:
105 | logger.error(f"Error analyzing change impact: {e}")
106 |
107 | return impact
108 |
109 | async def _analyze_safety(self, pattern: str, replacement: str) -> Dict[str, Any]:
110 | """Analyze safety of the change"""
111 | return {
112 | "safe_to_apply": True, # Default to True
113 | "warnings": [],
114 | "checks_performed": [
115 | "syntax_validation",
116 | "naming_convention",
117 | "scope_analysis"
118 | ]
119 | }
120 |
121 | def _get_context(self, lines: List[str], current_line: int, context_lines: int = 2) -> Dict[str, List[str]]:
122 | """Get context lines around the change"""
123 | start = max(0, current_line - context_lines - 1)
124 | end = min(len(lines), current_line + context_lines)
125 |
126 | return {
127 | "before": lines[start:current_line-1],
128 | "after": lines[current_line:end]
129 | }
130 |
131 | class FindReferences(BaseTool):
132 | """Find code references tool"""
133 |
134 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
135 | target = arguments.get('target')
136 | ref_type = arguments.get('ref_type', 'all')
137 |
138 | if not target:
139 | return {"error": "Target is required"}
140 |
141 | cache_key = f"refs_{target}_{ref_type}"
142 | if cached := self._get_cached_result(cache_key):
143 | return cached
144 |
145 | try:
146 | result = {
147 | "target": target,
148 | "type": ref_type,
149 | "references": await self._find_references(target, ref_type),
150 | "summary": await self._create_summary(target, ref_type)
151 | }
152 |
153 | self._cache_result(cache_key, result)
154 | return result
155 |
156 | except Exception as e:
157 | logger.error(f"Error finding references: {e}")
158 | return {"error": str(e)}
159 |
160 | def _should_skip(self, path: Path) -> bool:
161 | """Check if path should be skipped"""
162 | try:
163 | if any(excluded in path.parts for excluded in self.analysis_config.excluded_dirs):
164 | return True
165 | if path.is_file() and any(path.name.endswith(ext) for ext in self.analysis_config.excluded_files):
166 | return True
167 | return False
168 | except:
169 | return True
170 |
171 | async def _find_references(self, target: str, ref_type: str) -> List[Dict[str, Any]]:
172 | """Find all references to target"""
173 | references = []
174 |
175 | try:
176 | for path in Path('.').rglob('*.py'):
177 | if not self._should_skip(path):
178 | content = safe_read_file(str(path))
179 | if not content:
180 | continue
181 |
182 | try:
183 | tree = ast.parse(content)
184 | refs = self._analyze_node_references(tree, target, ref_type)
185 |
186 | if refs:
187 | for ref in refs:
188 | ref["file"] = str(path)
189 | references.extend(refs)
190 |
191 | except Exception as e:
192 | logger.error(f"Error parsing {path}: {e}")
193 |
194 | except Exception as e:
195 | logger.error(f"Error finding references: {e}")
196 |
197 | return references
198 |
199 | def _analyze_node_references(self, tree: ast.AST, target: str, ref_type: str) -> List[Dict[str, Any]]:
200 | """Analyze AST node for references"""
201 | refs = []
202 |
203 | for node in ast.walk(tree):
204 | # Class references
205 | if ref_type in ['all', 'class'] and isinstance(node, ast.ClassDef):
206 | if target in [node.name, *[b.id for b in node.bases if isinstance(b, ast.Name)]]:
207 | refs.append({
208 | "type": "class",
209 | "name": node.name,
210 | "line": node.lineno,
211 | "col": node.col_offset,
212 | "kind": "definition" if node.name == target else "inheritance"
213 | })
214 |
215 | # Function references
216 | elif ref_type in ['all', 'function'] and isinstance(node, ast.FunctionDef):
217 | if target == node.name:
218 | refs.append({
219 | "type": "function",
220 | "name": node.name,
221 | "line": node.lineno,
222 | "col": node.col_offset,
223 | "kind": "definition"
224 | })
225 |
226 | # Variable references
227 | elif ref_type in ['all', 'variable'] and isinstance(node, ast.Name):
228 | if target == node.id:
229 | refs.append({
230 | "type": "variable",
231 | "name": node.id,
232 | "line": node.lineno,
233 | "col": node.col_offset,
234 | "kind": "assignment" if isinstance(node.ctx, ast.Store) else "usage"
235 | })
236 |
237 | return refs
238 |
239 | async def _create_summary(self, target: str, ref_type: str) -> Dict[str, Any]:
240 | """Create reference summary"""
241 | refs = await self._find_references(target, ref_type)
242 |
243 | summary = {
244 | "total_references": len(refs),
245 | "by_type": {},
246 | "by_file": {},
247 | "unique_locations": set()
248 | }
249 |
250 | for ref in refs:
251 | # Count by type
252 | ref_type = ref["type"]
253 | summary["by_type"][ref_type] = summary["by_type"].get(ref_type, 0) + 1
254 |
255 | # Count by file
256 | file_path = ref["file"]
257 | summary["by_file"][file_path] = summary["by_file"].get(file_path, 0) + 1
258 |
259 | # Track unique locations
260 | summary["unique_locations"].add((file_path, ref["line"]))
261 |
262 | summary["unique_locations"] = len(summary["unique_locations"])
263 |
264 | return summary
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/manager.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Dict, Type, Optional, List
3 | from .base import BaseTool, logger
4 | from .file_tools import MCPFileOperations, FileAnalyzer
5 | from .project_tools import ProjectStructure, ProjectStatistics, ProjectTechnology
6 | from .pattern_tools import CodePatternAnalyzer, PatternUsageAnalyzer
7 | from .analysis_tools import (
8 | CodeStructureAnalyzer,
9 | ImportAnalyzer,
10 | CodeValidator,
11 | SyntaxChecker
12 | )
13 | from .reference_tools import FindReferences, PreviewChanges
14 | from .dependency_tools import FileDependencyAnalyzer
15 | from .version_manager import VersionManager
16 | from .search_tools import PathFinder, ContentScanner
17 | from .modification_tools import CodeModifier
18 | from ..config import analysis_config
19 |
20 | class ToolManager:
21 | """Manages all available tools"""
22 |
23 | def __init__(self):
24 | self._tools: Dict[str, BaseTool] = {}
25 | self._code_modifier = CodeModifier()
26 | self._initialize_tools()
27 |
28 | def _initialize_tools(self):
29 |
30 | # Project Analysis Tools
31 | self._register_tool("analyze_project_structure", ProjectStructure)
32 | self._register_tool("analyze_project_statistics", ProjectStatistics)
33 | self._register_tool("analyze_project_technology", ProjectTechnology)
34 |
35 | # File Operations Group
36 | self._register_tool("file_operations", MCPFileOperations)
37 | self._register_tool("analyze_file", FileAnalyzer)
38 |
39 | # Code Modification Group
40 | self._register_tool("code_modifier", lambda: self._code_modifier)
41 |
42 | # Search and Analysis Group
43 | self._register_tool("path_finder", PathFinder)
44 | self._register_tool("search_content", ContentScanner)
45 | self._register_tool("dependency_analyzer", FileDependencyAnalyzer)
46 |
47 | # Code Analysis Tools
48 | self._register_tool("analyze_code_structure", CodeStructureAnalyzer)
49 | self._register_tool("analyze_imports", ImportAnalyzer)
50 | self._register_tool("validate_code", CodeValidator)
51 | self._register_tool("check_syntax", SyntaxChecker)
52 |
53 | # Pattern Analysis Tools
54 | self._register_tool("find_patterns", CodePatternAnalyzer)
55 | self._register_tool("analyze_pattern_usage", PatternUsageAnalyzer)
56 |
57 | # Reference Tools
58 | self._register_tool("find_references", FindReferences)
59 | self._register_tool("preview_changes", PreviewChanges)
60 |
61 | # Version Control
62 | self._register_tool("version_control", VersionManager)
63 |
64 | def _register_tool(self, name: str, tool_factory: Type[BaseTool] | callable):
65 | """Register a tool with factory pattern"""
66 | self._tools[name] = tool_factory() if callable(tool_factory) else tool_factory()
67 |
68 | async def execute_tool(self, name: str, arguments: Dict) -> Dict:
69 | """Execute a tool by name with enhanced error handling"""
70 | if name not in self._tools:
71 | return {"error": f"Tool {name} not found"}
72 |
73 | try:
74 | # Special handling for code modification operations
75 | if name == "code_modifier":
76 | return await self._handle_code_modification(arguments)
77 |
78 | return await self._tools[name].execute(arguments)
79 | except Exception as e:
80 | return {"error": str(e)}
81 |
82 | def get_tool(self, name: str) -> Optional[BaseTool]:
83 | """Get a tool instance by name"""
84 | return self._tools.get(name)
85 |
86 | def list_tools(self) -> List[str]:
87 | """List all available tools"""
88 | return list(self._tools.keys())
89 |
90 | async def execute_workflow(self, workflow_type: str, arguments: Dict) -> Dict:
91 | """Execute a coordinated workflow"""
92 | try:
93 | if workflow_type == "modify_code":
94 | return await self._handle_code_modification(arguments)
95 | elif workflow_type == "modify_file":
96 | return await self._handle_file_modification(arguments)
97 | elif workflow_type == "search_and_modify":
98 | return await self._handle_search_modify(arguments)
99 | else:
100 | return {"error": f"Unknown workflow type: {workflow_type}"}
101 | except Exception as e:
102 | return {"error": str(e), "workflow_type": workflow_type}
103 |
104 | async def _handle_code_modification(self, arguments: Dict) -> Dict:
105 | """Central code modification handler"""
106 | try:
107 | operation = arguments.get('operation', 'modify')
108 | file_path = arguments.get('file_path')
109 |
110 | # Analyze dependencies if needed
111 | if operation in ['modify', 'delete']:
112 | deps = await self._analyze_dependencies(file_path, arguments)
113 | if deps.get('error'):
114 | return deps
115 |
116 | # Execute modification
117 | result = await self._code_modifier.modify_code(
118 | file_path=file_path,
119 | section=arguments.get('section', {}),
120 | new_content=arguments.get('content', ''),
121 | description=arguments.get('description')
122 | )
123 |
124 | if not result.success:
125 | return {"error": result.error}
126 |
127 | # Convert enum to string and prepare safe result
128 | return {
129 | "success": True,
130 | "modification": {
131 | "change_type": result.change.change_type.name if result.change else None,
132 | "backup_path": result.backup_path,
133 | "affected_files": [
134 | {
135 | "file_path": code.file_path,
136 | "reason": code.reason,
137 | "suggested_action": code.suggested_action,
138 | "severity": code.severity
139 | }
140 | for code in (result.affected_code or [])
141 | ] if result.affected_code else [],
142 | "dependencies": deps.get('dependencies', [])
143 | }
144 | }
145 |
146 | except Exception as e:
147 | logger.error(f"Code modification failed: {e}")
148 | return {"error": str(e)}
149 |
150 | async def _handle_file_modification(self, arguments: Dict) -> Dict:
151 | """Handle general file modification workflow"""
152 | try:
153 | file_ops = self.get_tool("file_operations")
154 |
155 | # Check if it's a code file
156 | if self._is_code_file(arguments.get('file_path', '')):
157 | return await self._handle_code_modification(arguments)
158 |
159 | # Regular file modification
160 | return await file_ops.execute({
161 | "operation": "modify",
162 | **arguments
163 | })
164 |
165 | except Exception as e:
166 | return {"error": str(e), "stage": "file_modification"}
167 |
168 |
169 |
170 | async def _analyze_dependencies(self, file_path: str, arguments: Dict) -> Dict:
171 | """Analyze dependencies before modification"""
172 | try:
173 | analyzer = self._tools.get('dependency_analyzer')
174 | if not analyzer:
175 | return {"error": "Dependency analyzer not available"}
176 |
177 | return await analyzer.execute({
178 | "file_path": file_path,
179 | "section": arguments.get('section', {}),
180 | "operation": arguments.get('operation')
181 | })
182 |
183 | except Exception as e:
184 | return {"error": f"Dependency analysis failed: {e}"}
185 |
186 | async def _handle_search_modify(self, arguments: Dict) -> Dict:
187 | """Handle search and modify workflow"""
188 | try:
189 | # 1. Search Phase
190 | content_scanner = self.get_tool("content_scanner")
191 | search_results = await content_scanner.execute({
192 | "operation": "search",
193 | "pattern": arguments.get("search_pattern"),
194 | "scope": arguments.get("scope", "current_file")
195 | })
196 |
197 | if not search_results.get("success"):
198 | return {"error": "Search failed", "details": search_results.get("error")}
199 |
200 | # 2. Analysis Phase
201 | affected_locations = []
202 | for result in search_results.get("results", []):
203 | analyzer = self.get_tool("file_analyzer")
204 | analysis = await analyzer.execute({
205 | "file_path": result["file"],
206 | "line_range": result["line_range"]
207 | })
208 |
209 | if analysis.get("success"):
210 | affected_locations.append({
211 | "file": result["file"],
212 | "location": result["location"],
213 | "analysis": analysis["data"]
214 | })
215 |
216 | # 3. Modification Phase
217 | modifications = []
218 | for location in affected_locations:
219 | if self._is_code_file(location["file"]):
220 | mod_result = await self._handle_code_modification({
221 | "file_path": location["file"],
222 | "line_range": location["location"],
223 | "new_content": arguments.get("replacement"),
224 | "mode": arguments.get("mode", "safe")
225 | })
226 | else:
227 | mod_result = await self._handle_file_modification({
228 | "file_path": location["file"],
229 | "line_range": location["location"],
230 | "new_content": arguments.get("replacement")
231 | })
232 |
233 | modifications.append({
234 | "location": location,
235 | "result": mod_result
236 | })
237 |
238 | return {
239 | "success": True,
240 | "search_results": search_results.get("results", []),
241 | "affected_locations": affected_locations,
242 | "modifications": modifications
243 | }
244 |
245 | except Exception as e:
246 | return {"error": str(e), "stage": "search_modify"}
247 |
248 | def _is_code_file(self, file_path: str) -> bool:
249 | """Determine if a file is a code file"""
250 | return Path(file_path).suffix.lower() in analysis_config.analyzable_extensions
251 |
252 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/file_tools.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from .base import BaseTool , safe_read_file
4 | from .logger import LogManager
5 | import logging
6 | import shutil
7 | from datetime import datetime
8 | from pathlib import Path
9 | from typing import Dict, Any, Union, List
10 | import ast
11 | import astroid
12 | from pydantic import json
13 | from radon.complexity import cc_visit
14 | from radon.metrics import mi_visit
15 | from radon.raw import analyze
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | class MCPFileOperations(BaseTool):
21 | """MCP compatible file operations implementation"""
22 |
23 | def __init__(self):
24 | super().__init__()
25 | self._active_streams = {}
26 | self._file_locks = {}
27 |
28 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
29 | """Execute file operations with MCP protocol support"""
30 | operation = arguments.get('operation')
31 | if not operation:
32 | return {"error": "Operation is required"}
33 |
34 | operations = {
35 | 'analyze': self._analyze_file,
36 | 'create': self._create_file,
37 | 'modify': self._modify_file,
38 | 'stream': self._handle_stream
39 | }
40 |
41 | if operation not in operations:
42 | return {
43 | "error": f"Unknown operation: {operation}",
44 | "available_operations": list(operations.keys())
45 | }
46 |
47 | try:
48 | result = await operations[operation](arguments)
49 | return {
50 | "success": True,
51 | "operation": operation,
52 | "timestamp": datetime.now().isoformat(),
53 | "data": result
54 | }
55 | except Exception as e:
56 | logger.error(f"File operation failed: {e}")
57 | return {
58 | "success": False,
59 | "operation": operation,
60 | "error": str(e)
61 | }
62 |
63 | async def _analyze_file(self, args: Dict[str, Any]) -> Dict[str, Any]:
64 | """Analyze file with enhanced error handling"""
65 | path = args.get('path')
66 | if not path:
67 | raise ValueError("Path is required for analysis")
68 |
69 | path_obj = Path(path)
70 | if not path_obj.exists():
71 | raise FileNotFoundError(f"File not found: {path}")
72 |
73 | if not path_obj.is_file():
74 | raise ValueError(f"Not a file: {path}")
75 |
76 | try:
77 | stat = path_obj.stat()
78 |
79 | # Basic file info
80 | result = {
81 | "path": str(path_obj),
82 | "size": stat.st_size,
83 | "created": datetime.fromtimestamp(stat.st_ctime).isoformat(),
84 | "modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
85 | "type": path_obj.suffix,
86 | "permissions": oct(stat.st_mode)[-3:]
87 | }
88 |
89 | # Add content analysis if requested
90 | if args.get('analyze_content', False):
91 | result["content_analysis"] = await self._analyze_content(path_obj)
92 |
93 | return result
94 |
95 | except Exception as e:
96 | raise RuntimeError(f"Analysis failed: {e}")
97 |
98 | async def _create_file(self, args: Dict[str, Any]) -> Dict[str, Any]:
99 | """Create file with MCP protocol support"""
100 | path = args.get('path')
101 | content = args.get('content', '')
102 | overwrite = args.get('overwrite', False)
103 |
104 | if not path:
105 | raise ValueError("Path is required for file creation")
106 |
107 | path_obj = Path(path)
108 |
109 | try:
110 | # Create parent directories
111 | path_obj.parent.mkdir(parents=True, exist_ok=True)
112 |
113 | # Handle existing file
114 | if path_obj.exists():
115 | if not overwrite:
116 | raise FileExistsError(f"File already exists: {path}")
117 | backup_path = self._create_backup(path_obj)
118 |
119 | # Write file with explicit encoding
120 | with path_obj.open('w', encoding='utf-8') as f:
121 | f.write(content)
122 |
123 | return {
124 | "path": str(path_obj),
125 | "size": len(content.encode('utf-8')),
126 | "backup_path": str(backup_path) if locals().get('backup_path') else None
127 | }
128 |
129 | except Exception as e:
130 | raise RuntimeError(f"File creation failed: {e}")
131 |
132 | async def _modify_file(self, args: Dict[str, Any]) -> Dict[str, Any]:
133 | """Modify file with section support"""
134 | path = args.get('path')
135 | content = args.get('content')
136 | section = args.get('section')
137 |
138 | if not path:
139 | raise ValueError("Path is required for modification")
140 |
141 | if content is None:
142 | raise ValueError("Content is required for modification")
143 |
144 | path_obj = Path(path)
145 | if not path_obj.exists():
146 | raise FileNotFoundError(f"File not found: {path}")
147 |
148 | try:
149 | # Create backup
150 | backup_path = self._create_backup(path_obj)
151 |
152 | # Read current content
153 | with path_obj.open('r', encoding='utf-8') as f:
154 | current_content = f.read()
155 |
156 | # Handle section modification if specified
157 | if section:
158 | start = section.get('start', 0)
159 | end = section.get('end', len(current_content))
160 | lines = current_content.splitlines()
161 |
162 | if start < 0 or start >= len(lines) or end < 0 or end > len(lines):
163 | raise ValueError("Invalid section range")
164 |
165 | new_lines = lines[:start] + content.splitlines() + lines[end:]
166 | final_content = '\n'.join(new_lines)
167 | else:
168 | final_content = content
169 |
170 | # Write modified content
171 | with path_obj.open('w', encoding='utf-8') as f:
172 | f.write(final_content)
173 |
174 | return {
175 | "path": str(path_obj),
176 | "size": len(final_content.encode('utf-8')),
177 | "backup_path": str(backup_path),
178 | "sections_modified": bool(section)
179 | }
180 |
181 | except Exception as e:
182 | # Restore from backup if exists
183 | if 'backup_path' in locals():
184 | try:
185 | shutil.copy2(backup_path, path_obj)
186 | except Exception as restore_error:
187 | logger.error(f"Failed to restore backup: {restore_error}")
188 |
189 | raise RuntimeError(f"File modification failed: {e}")
190 |
191 | async def _handle_stream(self, args: Dict[str, Any]) -> Dict[str, Any]:
192 | """Handle streaming operations"""
193 | path = args.get('path')
194 | operation = args.get('stream_operation')
195 | content = args.get('content')
196 |
197 | if not path:
198 | raise ValueError("Path is required for streaming")
199 |
200 | if not operation:
201 | raise ValueError("Stream operation is required")
202 |
203 | path_obj = Path(path)
204 | stream_id = str(path_obj)
205 |
206 | try:
207 | if operation == 'start':
208 | return await self._start_stream(path_obj, args)
209 | elif operation == 'write':
210 | if not content:
211 | raise ValueError("Content is required for write operation")
212 | return await self._write_stream(path_obj, content, args)
213 | elif operation == 'finish':
214 | return await self._finish_stream(path_obj)
215 | else:
216 | raise ValueError(f"Unknown stream operation: {operation}")
217 |
218 | except Exception as e:
219 | raise RuntimeError(f"Stream operation failed: {e}")
220 |
221 | async def _analyze_content(self, path: Path) -> Dict[str, Any]:
222 | """Analyze file content"""
223 | try:
224 | with path.open('r', encoding='utf-8') as f:
225 | content = f.read()
226 |
227 | lines = content.splitlines()
228 | return {
229 | "line_count": len(lines),
230 | "empty_lines": len([l for l in lines if not l.strip()]),
231 | "average_line_length": sum(len(l) for l in lines) / len(lines) if lines else 0,
232 | "byte_size": len(content.encode('utf-8'))
233 | }
234 | except Exception as e:
235 | logger.error(f"Content analysis failed: {e}")
236 | return {}
237 |
238 | def _create_backup(self, path: Path) -> Path:
239 | """Create backup of existing file"""
240 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
241 | backup_path = path.parent / f"{path.stem}_backup_{timestamp}{path.suffix}"
242 | shutil.copy2(path, backup_path)
243 | return backup_path
244 |
245 | async def _start_stream(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
246 | """Start a new file stream"""
247 | stream_id = str(path)
248 | if stream_id in self._active_streams:
249 | raise RuntimeError(f"Stream already exists for {path}")
250 |
251 | self._file_locks[stream_id] = asyncio.Lock()
252 |
253 | async with self._file_locks[stream_id]:
254 | backup_path = self._create_backup(path) if path.exists() else None
255 |
256 | self._active_streams[stream_id] = {
257 | 'started_at': datetime.now().isoformat(),
258 | 'buffer': [],
259 | 'total_bytes': 0,
260 | 'backup_path': str(backup_path) if backup_path else None
261 | }
262 |
263 | return {
264 | "stream_id": stream_id,
265 | "started_at": self._active_streams[stream_id]['started_at'],
266 | "backup_created": backup_path is not None
267 | }
268 |
269 | async def _write_stream(self, path: Path, content: str, args: Dict[str, Any]) -> Dict[str, Any]:
270 | """Write content to stream"""
271 | stream_id = str(path)
272 | if stream_id not in self._active_streams:
273 | raise RuntimeError(f"No active stream for {path}")
274 |
275 | async with self._file_locks[stream_id]:
276 | stream = self._active_streams[stream_id]
277 |
278 | try:
279 | with path.open('a', encoding='utf-8') as f:
280 | f.write(content)
281 |
282 | stream['total_bytes'] += len(content.encode('utf-8'))
283 | return {
284 | "bytes_written": len(content.encode('utf-8')),
285 | "total_bytes": stream['total_bytes']
286 | }
287 |
288 | except Exception as e:
289 | raise RuntimeError(f"Stream write failed: {e}")
290 |
291 | async def _finish_stream(self, path: Path) -> Dict[str, Any]:
292 | """Finish and cleanup stream"""
293 | stream_id = str(path)
294 | if stream_id not in self._active_streams:
295 | raise RuntimeError(f"No active stream for {path}")
296 |
297 | async with self._file_locks[stream_id]:
298 | stream = self._active_streams[stream_id]
299 |
300 | try:
301 | # Remove backup if exists
302 | if stream['backup_path']:
303 | backup_path = Path(stream['backup_path'])
304 | if backup_path.exists():
305 | backup_path.unlink()
306 |
307 | # Calculate duration
308 | started = datetime.fromisoformat(stream['started_at'])
309 | duration = (datetime.now() - started).total_seconds()
310 |
311 | result = {
312 | "stream_id": stream_id,
313 | "total_bytes": stream['total_bytes'],
314 | "duration_seconds": duration
315 | }
316 |
317 | # Cleanup
318 | del self._active_streams[stream_id]
319 | del self._file_locks[stream_id]
320 |
321 | return result
322 |
323 | except Exception as e:
324 | raise RuntimeError(f"Failed to finish stream: {e}")
325 |
326 |
327 | class FileAnalyzer(BaseTool):
328 | """File level analysis tool"""
329 |
330 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
331 | """Execute file analysis"""
332 | file_path = arguments.get('file_path')
333 | if not file_path:
334 | return {"error": "No file path provided"}
335 |
336 | try:
337 | # Convert to Path object and resolve
338 | path = Path(file_path).resolve()
339 |
340 | if not path.exists():
341 | found_path = None
342 | for search_path in Path('.').rglob(Path(file_path).name):
343 | if search_path.exists() and not self._should_skip(search_path):
344 | found_path = search_path
345 | path = found_path
346 | break
347 |
348 | if not found_path:
349 | return {"error": f"File not found: {file_path}"}
350 |
351 | # Read file content with proper encoding handling
352 | content = safe_read_file(str(path))
353 | if content is None:
354 | return {"error": f"Could not read file: {path}"}
355 |
356 | # Create result with content encoded as UTF-8
357 | result = {
358 | "path": str(path),
359 | "type": path.suffix,
360 | "size": path.stat().st_size,
361 | "content": content,
362 | "metrics": self._analyze_metrics(content) if path.suffix == '.py' else {},
363 | "encoding": "utf-8" # Add encoding information
364 | }
365 |
366 | return result
367 |
368 | except Exception as e:
369 | logger.error(f"Error analyzing file {file_path}: {e}")
370 | return {"error": str(e)}
371 |
372 | async def analyze_file(self, file_path: Union[str, Path]) -> Dict[str, Any]:
373 | """Analyze a single file"""
374 | try:
375 | path = Path(file_path).resolve()
376 | if not path.exists():
377 | return {"error": f"File not found: {path}"}
378 |
379 | if path.stat().st_size > self.system_config.MAX_FILE_SIZE:
380 | return {"error": f"File too large: {path}"}
381 |
382 | result = {
383 | "path": str(path),
384 | "type": path.suffix,
385 | "size": path.stat().st_size,
386 | "metrics": {},
387 | "content_analysis": {}
388 | }
389 |
390 | try:
391 | with open(path, 'r', encoding='utf-8') as f:
392 | content = f.read()
393 |
394 | if path.suffix == '.py':
395 | result["metrics"] = self._analyze_python_file(content)
396 | elif path.suffix in ['.js', '.jsx']:
397 | result["metrics"] = self._analyze_javascript_file(content)
398 | elif path.suffix == '.json':
399 | result["metrics"] = self._analyze_json_file(content)
400 |
401 | result["content_analysis"] = {
402 | "line_count": len(content.splitlines()),
403 | "size_human": self._format_size(path.stat().st_size),
404 | "last_modified": path.stat().st_mtime,
405 | }
406 |
407 | except UnicodeDecodeError:
408 | result["content_analysis"] = {
409 | "note": "Binary file - content analysis skipped"
410 | }
411 |
412 | return result
413 |
414 | except Exception as e:
415 | logger.error(f"Error analyzing file {file_path}: {e}")
416 | return {"error": str(e)}
417 |
418 | def _analyze_python_file(self, content: str) -> Dict[str, Any]:
419 | try:
420 | tree = ast.parse(content)
421 | return {
422 | "classes": len([n for n in ast.walk(tree) if isinstance(n, ast.ClassDef)]),
423 | "functions": len([n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]),
424 | "imports": len([n for n in ast.walk(tree) if isinstance(n, (ast.Import, ast.ImportFrom))])
425 | }
426 | except:
427 | return {}
428 |
429 | def _analyze_javascript_file(self, content: str) -> Dict[str, Any]:
430 | metrics = {
431 | "component_count": content.count("export default"),
432 | "hook_usage": content.count("useState") + content.count("useEffect"),
433 | "jsx_elements": content.count("return ("),
434 | }
435 | return metrics
436 |
437 | def _analyze_json_file(self, content: str) -> Dict[str, Any]:
438 | try:
439 | data = json.loads(content)
440 | return {
441 | "keys": len(data) if isinstance(data, dict) else "not-dict",
442 | "is_valid": True
443 | }
444 | except:
445 | return {"is_valid": False}
446 |
447 | def _format_size(self, size: int) -> str:
448 | for unit in ['B', 'KB', 'MB', 'GB']:
449 | if size < 1024:
450 | return f"{size:.1f} {unit}"
451 | size /= 1024
452 | return f"{size:.1f} TB"
453 |
454 | def _analyze_metrics(self, content: str) -> Dict[str, Any]:
455 | try:
456 | return {
457 | "complexity": cc_visit(content),
458 | "maintainability": mi_visit(content, multi=True),
459 | "raw": analyze(content)
460 | }
461 | except Exception as e:
462 | logger.error(f"Error analyzing metrics: {e}")
463 | return {}
464 |
465 | async def _analyze_quality(self, content: str) -> Dict[str, Any]:
466 | """Analyze code quality"""
467 | try:
468 | tree = ast.parse(content)
469 | quality = {
470 | "issues": [],
471 | "suggestions": []
472 | }
473 |
474 | for node in ast.walk(tree):
475 | if isinstance(node, ast.FunctionDef):
476 | if len(node.body) > 50:
477 | quality["issues"].append({
478 | "type": "long_function",
479 | "location": node.lineno,
480 | "message": f"Function {node.name} is too long ({len(node.body)} lines)"
481 | })
482 | quality["suggestions"].append({
483 | "type": "function_doc",
484 | "location": node.lineno,
485 | "message": f"Consider adding docstring to function {node.name}"
486 | }) if not ast.get_docstring(node) else None
487 |
488 | elif isinstance(node, ast.ClassDef):
489 | methods = len([n for n in node.body if isinstance(n, ast.FunctionDef)])
490 | if methods > 10:
491 | quality["issues"].append({
492 | "type": "complex_class",
493 | "location": node.lineno,
494 | "message": f"Class {node.name} has too many methods ({methods})"
495 | })
496 |
497 | return quality
498 |
499 | except Exception as e:
500 | logger.error(f"Error analyzing quality: {e}")
501 | return {}
502 |
503 | async def _analyze_patterns(self, content: str) -> Dict[str, Any]:
504 | """Analyze code patterns"""
505 | try:
506 | tree = astroid.parse(content)
507 | patterns = {
508 | "design_patterns": [],
509 | "anti_patterns": [],
510 | "code_smells": []
511 | }
512 |
513 | for class_node in tree.nodes_of_class(astroid.ClassDef):
514 | # Design patterns
515 | if any(method.name == 'get_instance' for method in class_node.methods()):
516 | patterns["design_patterns"].append({
517 | "type": "singleton",
518 | "location": class_node.lineno,
519 | "class": class_node.name
520 | })
521 |
522 | # Anti-patterns
523 | method_count = len(list(class_node.methods()))
524 | if method_count > 20:
525 | patterns["anti_patterns"].append({
526 | "type": "god_class",
527 | "location": class_node.lineno,
528 | "class": class_node.name,
529 | "methods": method_count
530 | })
531 |
532 | return patterns
533 |
534 | except Exception as e:
535 | logger.error(f"Error analyzing patterns: {e}")
536 | return {}
537 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/version_manager.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from pathlib import Path
3 | from typing import Dict, Any, List, Optional
4 | from datetime import datetime
5 | import json
6 | import shutil
7 | import hashlib
8 | from dataclasses import dataclass
9 | from .base import BaseTool
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | @dataclass
14 | class Version:
15 | """Version information container"""
16 | id: str
17 | timestamp: str
18 | hash: str
19 | metadata: Dict[str, Any]
20 | backup_path: Path
21 |
22 | @dataclass
23 | class ChangeInfo:
24 | """Change information container"""
25 | type: str # 'modify', 'create', 'delete'
26 | timestamp: str
27 | description: str
28 | metadata: Dict[str, Any]
29 |
30 | class VersionManager(BaseTool):
31 | """Advanced version control and change tracking tool"""
32 |
33 | def __init__(self):
34 | super().__init__()
35 | self._version_store = {}
36 | self._change_history = {}
37 | self._backup_root = Path('backups')
38 | self._metadata_file = self._backup_root / 'version_metadata.json'
39 | self._initialize_storage()
40 |
41 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
42 | operation = arguments.get('operation', 'create_version')
43 | target_path = arguments.get('path')
44 |
45 | if not target_path:
46 | return {"error": "Path is required"}
47 |
48 | operations = {
49 | 'create_version': self._create_version,
50 | 'restore_version': self._restore_version,
51 | 'get_history': self._get_version_history,
52 | 'compare_versions': self._compare_versions,
53 | 'get_changes': self._get_changes,
54 | 'cleanup': self._cleanup_versions
55 | }
56 |
57 | if operation not in operations:
58 | return {"error": f"Unknown operation: {operation}"}
59 |
60 | try:
61 | result = await operations[operation](Path(target_path), arguments)
62 | return {"success": True, "data": result}
63 | except Exception as e:
64 | logger.error(f"VersionManager operation failed: {e}")
65 | return {"success": False, "error": str(e)}
66 |
67 | def _initialize_storage(self) -> None:
68 | """Initialize version storage"""
69 | try:
70 | self._backup_root.mkdir(parents=True, exist_ok=True)
71 | if self._metadata_file.exists():
72 | metadata = json.loads(self._metadata_file.read_text())
73 | self._version_store = metadata.get('versions', {})
74 | self._change_history = metadata.get('changes', {})
75 | else:
76 | self._save_metadata()
77 | except Exception as e:
78 | logger.error(f"Failed to initialize storage: {e}")
79 |
80 | def _save_metadata(self) -> None:
81 | """Save version metadata"""
82 | try:
83 | metadata = {
84 | 'versions': self._version_store,
85 | 'changes': self._change_history,
86 | 'last_updated': datetime.now().isoformat()
87 | }
88 | self._metadata_file.write_text(json.dumps(metadata, indent=2))
89 | except Exception as e:
90 | logger.error(f"Failed to save metadata: {e}")
91 |
92 | async def _create_version(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
93 | """Create new version of a file"""
94 | try:
95 | if not path.exists():
96 | raise FileNotFoundError(f"File not found: {path}")
97 |
98 | description = args.get('description', '')
99 | tags = args.get('tags', [])
100 |
101 | # Calculate file hash
102 | file_hash = self._calculate_file_hash(path)
103 |
104 | # Check if identical version exists
105 | for version in self._get_versions(path):
106 | if version.hash == file_hash:
107 | return {
108 | "message": "Identical version already exists",
109 | "version_id": version.id,
110 | "timestamp": version.timestamp
111 | }
112 |
113 | # Create version ID
114 | version_id = self._generate_version_id(path)
115 | timestamp = datetime.now().isoformat()
116 |
117 | # Create backup
118 | backup_path = self._create_backup(path, version_id)
119 |
120 | # Store version information
121 | version = Version(
122 | id=version_id,
123 | timestamp=timestamp,
124 | hash=file_hash,
125 | metadata={
126 | 'description': description,
127 | 'tags': tags,
128 | 'size': path.stat().st_size,
129 | 'creator': args.get('creator', 'unknown')
130 | },
131 | backup_path=backup_path
132 | )
133 |
134 | self._add_version(path, version)
135 |
136 | # Record change
137 | change = ChangeInfo(
138 | type='create_version',
139 | timestamp=timestamp,
140 | description=description,
141 | metadata={
142 | 'version_id': version_id,
143 | 'tags': tags
144 | }
145 | )
146 |
147 | self._record_change(path, change)
148 | self._save_metadata()
149 |
150 | return {
151 | "version_id": version_id,
152 | "timestamp": timestamp,
153 | "backup_path": str(backup_path),
154 | "hash": file_hash
155 | }
156 |
157 | except Exception as e:
158 | raise RuntimeError(f"Failed to create version: {e}")
159 |
160 | async def _restore_version(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
161 | """Restore file to specific version"""
162 | try:
163 | version_id = args.get('version_id')
164 | if not version_id:
165 | raise ValueError("Version ID is required")
166 |
167 | version = self._find_version(path, version_id)
168 | if not version:
169 | raise ValueError(f"Version not found: {version_id}")
170 |
171 | # Create backup of current state
172 | current_backup = self._create_backup(path, 'pre_restore_backup')
173 |
174 | # Restore from version
175 | shutil.copy2(version.backup_path, path)
176 |
177 | # Record change
178 | change = ChangeInfo(
179 | type='restore',
180 | timestamp=datetime.now().isoformat(),
181 | description=f"Restored to version {version_id}",
182 | metadata={
183 | 'version_id': version_id,
184 | 'previous_backup': str(current_backup)
185 | }
186 | )
187 |
188 | self._record_change(path, change)
189 | self._save_metadata()
190 |
191 | return {
192 | "message": "Version restored successfully",
193 | "version_id": version_id,
194 | "timestamp": version.timestamp,
195 | "previous_backup": str(current_backup)
196 | }
197 |
198 | except Exception as e:
199 | raise RuntimeError(f"Failed to restore version: {e}")
200 |
201 | async def _get_version_history(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
202 | """Get version history of a file"""
203 | try:
204 | versions = self._get_versions(path)
205 | changes = self._get_file_changes(path)
206 |
207 | return {
208 | "file": str(path),
209 | "versions": [
210 | {
211 | "id": v.id,
212 | "timestamp": v.timestamp,
213 | "hash": v.hash,
214 | "metadata": v.metadata
215 | }
216 | for v in versions
217 | ],
218 | "changes": [
219 | {
220 | "type": c.type,
221 | "timestamp": c.timestamp,
222 | "description": c.description,
223 | "metadata": c.metadata
224 | }
225 | for c in changes
226 | ],
227 | "statistics": {
228 | "total_versions": len(versions),
229 | "total_changes": len(changes),
230 | "first_version": versions[0].timestamp if versions else None,
231 | "last_version": versions[-1].timestamp if versions else None
232 | }
233 | }
234 |
235 | except Exception as e:
236 | raise RuntimeError(f"Failed to get version history: {e}")
237 |
238 | async def _compare_versions(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
239 | """Compare two versions of a file"""
240 | try:
241 | version1_id = args.get('version1')
242 | version2_id = args.get('version2')
243 |
244 | if not (version1_id and version2_id):
245 | raise ValueError("Both version IDs are required")
246 |
247 | v1 = self._find_version(path, version1_id)
248 | v2 = self._find_version(path, version2_id)
249 |
250 | if not (v1 and v2):
251 | raise ValueError("One or both versions not found")
252 |
253 | # Compare files
254 | from difflib import unified_diff
255 |
256 | with open(v1.backup_path) as f1, open(v2.backup_path) as f2:
257 | diff = list(unified_diff(
258 | f1.readlines(),
259 | f2.readlines(),
260 | fromfile=f'version_{version1_id}',
261 | tofile=f'version_{version2_id}'
262 | ))
263 |
264 | return {
265 | "version1": {
266 | "id": v1.id,
267 | "timestamp": v1.timestamp,
268 | "metadata": v1.metadata
269 | },
270 | "version2": {
271 | "id": v2.id,
272 | "timestamp": v2.timestamp,
273 | "metadata": v2.metadata
274 | },
275 | "differences": {
276 | "total_changes": len(diff),
277 | "diff": diff
278 | },
279 | "analysis": {
280 | "size_change": v2.metadata['size'] - v1.metadata['size'],
281 | "time_between": (
282 | datetime.fromisoformat(v2.timestamp) -
283 | datetime.fromisoformat(v1.timestamp)
284 | ).total_seconds()
285 | }
286 | }
287 |
288 | except Exception as e:
289 | raise RuntimeError(f"Failed to compare versions: {e}")
290 |
291 | async def _get_changes(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
292 | """Get detailed change history"""
293 | try:
294 | changes = self._get_file_changes(path)
295 | filtered_changes = []
296 |
297 | # Apply filters
298 | change_type = args.get('type')
299 | start_date = args.get('start_date')
300 | end_date = args.get('end_date')
301 |
302 | for change in changes:
303 | if change_type and change.type != change_type:
304 | continue
305 |
306 | change_date = datetime.fromisoformat(change.timestamp)
307 |
308 | if start_date and change_date < datetime.fromisoformat(start_date):
309 | continue
310 |
311 | if end_date and change_date > datetime.fromisoformat(end_date):
312 | continue
313 |
314 | filtered_changes.append(change)
315 |
316 | return {
317 | "file": str(path),
318 | "changes": [
319 | {
320 | "type": c.type,
321 | "timestamp": c.timestamp,
322 | "description": c.description,
323 | "metadata": c.metadata
324 | }
325 | for c in filtered_changes
326 | ],
327 | "statistics": {
328 | "total_changes": len(filtered_changes),
329 | "changes_by_type": self._count_changes_by_type(filtered_changes)
330 | }
331 | }
332 |
333 | except Exception as e:
334 | raise RuntimeError(f"Failed to get changes: {e}")
335 |
336 | async def _cleanup_versions(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
337 | """Clean up old versions"""
338 | try:
339 | keep_latest = args.get('keep_latest', 5)
340 | keep_days = args.get('keep_days', 30)
341 |
342 | versions = self._get_versions(path)
343 | if not versions:
344 | return {"message": "No versions to clean up"}
345 |
346 | cutoff_date = datetime.now().timestamp() - (keep_days * 86400)
347 | versions_to_delete = []
348 | kept_versions = []
349 |
350 | # Keep required number of latest versions
351 | if len(versions) > keep_latest:
352 | kept_versions = versions[-keep_latest:]
353 | versions_to_delete.extend(versions[:-keep_latest])
354 |
355 | # Process remaining versions
356 | for version in versions_to_delete[:]:
357 | version_date = datetime.fromisoformat(version.timestamp).timestamp()
358 | if version_date > cutoff_date:
359 | versions_to_delete.remove(version)
360 | kept_versions.append(version)
361 |
362 | # Delete versions
363 | for version in versions_to_delete:
364 | if version.backup_path.exists():
365 | version.backup_path.unlink()
366 |
367 | # Update version store
368 | self._version_store[str(path)] = [
369 | {
370 | 'id': v.id,
371 | 'timestamp': v.timestamp,
372 | 'hash': v.hash,
373 | 'metadata': v.metadata,
374 | 'backup_path': str(v.backup_path)
375 | }
376 | for v in kept_versions
377 | ]
378 |
379 | self._save_metadata()
380 |
381 | return {
382 | "deleted_versions": len(versions_to_delete),
383 | "kept_versions": len(kept_versions),
384 | "space_freed": sum(v.metadata['size'] for v in versions_to_delete)
385 | }
386 |
387 | except Exception as e:
388 | raise RuntimeError(f"Failed to clean up versions: {e}")
389 |
390 | def _generate_version_id(self, path: Path) -> str:
391 | """Generate unique version ID"""
392 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
393 | unique_data = f"{path}:{timestamp}:{self._calculate_file_hash(path)}"
394 | return hashlib.md5(unique_data.encode()).hexdigest()[:12]
395 |
396 | def _calculate_file_hash(self, path: Path) -> str:
397 | """Calculate file hash"""
398 | hasher = hashlib.sha256()
399 | with open(path, 'rb') as f:
400 | for chunk in iter(lambda: f.read(4096), b''):
401 | hasher.update(chunk)
402 | return hasher.hexdigest()
403 |
404 | def _create_backup(self, path: Path, version_id: str) -> Path:
405 | """Create backup of file"""
406 | backup_dir = self._backup_root / path.stem
407 | backup_dir.mkdir(parents=True, exist_ok=True)
408 |
409 | backup_path = backup_dir / f"{version_id}{path.suffix}"
410 | shutil.copy2(path, backup_path)
411 |
412 | return backup_path
413 |
414 | def _add_version(self, path: Path, version: Version) -> None:
415 | """Add version to store"""
416 | if str(path) not in self._version_store:
417 | self._version_store[str(path)] = []
418 |
419 | self._version_store[str(path)].append({
420 | 'id': version.id,
421 | 'timestamp': version.timestamp,
422 | 'hash': version.hash,
423 | 'metadata': version.metadata,
424 | 'backup_path': str(version.backup_path)
425 | })
426 |
427 | def _record_change(self, path: Path, change: ChangeInfo) -> None:
428 | """Record a change"""
429 | if str(path) not in self._change_history:
430 | self._change_history[str(path)] = []
431 |
432 | self._change_history[str(path)].append({
433 | 'type': change.type,
434 | 'timestamp': change.timestamp,
435 | 'description': change.description,
436 | 'metadata': change.metadata
437 | })
438 |
439 | def _get_versions(self, path: Path) -> List[Version]:
440 | """Get all versions of a file"""
441 | versions = []
442 | for v in self._version_store.get(str(path), []):
443 | versions.append(Version(
444 | id=v['id'],
445 | timestamp=v['timestamp'],
446 | hash=v['hash'],
447 | metadata=v['metadata'],
448 | backup_path=Path(v['backup_path'])
449 | ))
450 | return sorted(versions, key=lambda v: v.timestamp)
451 |
452 | def _find_version(self, path: Path, version_id: str) -> Optional[Version]:
453 | """Find specific version"""
454 | versions = self._get_versions(path)
455 | for version in versions:
456 | if version.id == version_id:
457 | return version
458 | return None
459 |
460 | def _get_file_changes(self, path: Path) -> List[ChangeInfo]:
461 | """Get all changes for a file"""
462 | changes = []
463 | for c in self._change_history.get(str(path), []):
464 | changes.append(ChangeInfo(
465 | type=c['type'],
466 | timestamp=c['timestamp'],
467 | description=c['description'],
468 | metadata=c['metadata']
469 | ))
470 | return sorted(changes, key=lambda c: c.timestamp)
471 |
472 | def _count_changes_by_type(self, changes: List[ChangeInfo]) -> Dict[str, int]:
473 | """Count changes by type"""
474 | counts = {}
475 | for change in changes:
476 | counts[change.type] = counts.get(change.type, 0) + 1
477 | return counts
478 |
479 | async def _cleanup_backup_directory(self) -> None:
480 | """Clean up backup directory"""
481 | try:
482 | # Find orphaned backups
483 | used_backups = set()
484 | for versions in self._version_store.values():
485 | for version in versions:
486 | used_backups.add(version['backup_path'])
487 |
488 | # Remove unused backup files
489 | for backup_file in self._backup_root.rglob('*'):
490 | if backup_file.is_file() and str(backup_file) not in used_backups:
491 | try:
492 | backup_file.unlink()
493 | except Exception as e:
494 | logger.error(f"Failed to remove orphaned backup {backup_file}: {e}")
495 |
496 | # Remove empty directories
497 | for backup_dir in sorted(self._backup_root.rglob('*'), reverse=True):
498 | if backup_dir.is_dir():
499 | try:
500 | backup_dir.rmdir() # Will only succeed if directory is empty
501 | except Exception:
502 | pass # Directory not empty, skip
503 |
504 | except Exception as e:
505 | logger.error(f"Failed to cleanup backup directory: {e}")
506 |
507 | async def _validate_backups(self) -> Dict[str, Any]:
508 | """Validate backup integrity"""
509 | validation_results = {
510 | "valid_backups": [],
511 | "invalid_backups": [],
512 | "missing_backups": []
513 | }
514 |
515 | try:
516 | for file_path, versions in self._version_store.items():
517 | for version in versions:
518 | backup_path = Path(version['backup_path'])
519 | if not backup_path.exists():
520 | validation_results["missing_backups"].append({
521 | "file": file_path,
522 | "version_id": version['id'],
523 | "backup_path": str(backup_path)
524 | })
525 | continue
526 |
527 | # Verify hash
528 | current_hash = self._calculate_file_hash(backup_path)
529 | if current_hash != version['hash']:
530 | validation_results["invalid_backups"].append({
531 | "file": file_path,
532 | "version_id": version['id'],
533 | "expected_hash": version['hash'],
534 | "actual_hash": current_hash
535 | })
536 | else:
537 | validation_results["valid_backups"].append({
538 | "file": file_path,
539 | "version_id": version['id'],
540 | "backup_path": str(backup_path)
541 | })
542 |
543 | return validation_results
544 |
545 | except Exception as e:
546 | logger.error(f"Failed to validate backups: {e}")
547 | return validation_results
548 |
549 | async def _analyze_storage_usage(self) -> Dict[str, Any]:
550 | """Analyze backup storage usage"""
551 | try:
552 | storage_info = {
553 | "total_size": 0,
554 | "backup_count": 0,
555 | "files": {},
556 | "usage_by_date": {}
557 | }
558 |
559 | for file_path, versions in self._version_store.items():
560 | file_info = {
561 | "versions": len(versions),
562 | "total_size": 0,
563 | "oldest_version": None,
564 | "newest_version": None
565 | }
566 |
567 | for version in versions:
568 | backup_path = Path(version['backup_path'])
569 | if backup_path.exists():
570 | size = backup_path.stat().st_size
571 | date = version['timestamp'][:10] # YYYY-MM-DD
572 |
573 | storage_info["total_size"] += size
574 | file_info["total_size"] += size
575 | storage_info["backup_count"] += 1
576 |
577 | # Track usage by date
578 | storage_info["usage_by_date"][date] = \
579 | storage_info["usage_by_date"].get(date, 0) + size
580 |
581 | # Track version timestamps
582 | if not file_info["oldest_version"] or \
583 | version['timestamp'] < file_info["oldest_version"]:
584 | file_info["oldest_version"] = version['timestamp']
585 | if not file_info["newest_version"] or \
586 | version['timestamp'] > file_info["newest_version"]:
587 | file_info["newest_version"] = version['timestamp']
588 |
589 | storage_info["files"][file_path] = file_info
590 |
591 | # Add summary statistics
592 | storage_info["summary"] = {
593 | "average_size_per_backup": (
594 | storage_info["total_size"] / storage_info["backup_count"]
595 | if storage_info["backup_count"] > 0 else 0
596 | ),
597 | "average_versions_per_file": (
598 | storage_info["backup_count"] / len(storage_info["files"])
599 | if storage_info["files"] else 0
600 | ),
601 | "total_size_human": self._format_size(storage_info["total_size"])
602 | }
603 |
604 | return storage_info
605 |
606 | except Exception as e:
607 | logger.error(f"Failed to analyze storage usage: {e}")
608 | return {"error": str(e)}
609 |
610 | def _format_size(self, size: int) -> str:
611 | """Format size in bytes to human readable format"""
612 | for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
613 | if size < 1024:
614 | return f"{size:.2f} {unit}"
615 | size /= 1024
616 | return f"{size:.2f} PB"
--------------------------------------------------------------------------------
/mcp_code_analyzer/server/handlers.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from enum import Enum
3 | from typing import Dict, Any, List, Optional
4 | from pathlib import Path
5 | import json
6 | import sys
7 | import mcp.types as types
8 | from mcp.server import Server, NotificationOptions
9 | from mcp.server.models import InitializationOptions
10 | import mcp.server.stdio
11 | from ..tools.manager import ToolManager
12 |
13 | logger = logging.getLogger(__name__)
14 | __all__ = ["MCPServer", "main"]
15 | class MCPServer:
16 | """MCP Server implementation for code analysis"""
17 |
18 | def __init__(self, analyze_paths: List[str]):
19 | self.analyze_paths = []
20 | self.base_path = Path.cwd()
21 | self._setup_paths(analyze_paths)
22 | self.tool_manager = ToolManager()
23 | self.server = Server("code-analyzer")
24 | self._setup_handlers()
25 |
26 | def _setup_paths(self, analyze_paths: List[str]):
27 | """Setup and validate analysis paths"""
28 | for path in analyze_paths:
29 | try:
30 | path_obj = Path(path)
31 | normalized_path = path_obj.resolve() if path_obj.is_absolute() \
32 | else (self.base_path / path_obj).resolve()
33 |
34 | if normalized_path.exists():
35 | self.analyze_paths.append(normalized_path)
36 | logger.info(f"Added valid path: {normalized_path}")
37 | else:
38 | logger.warning(f"Path does not exist: {path}")
39 | except Exception as e:
40 | logger.error(f"Error processing path {path}: {e}")
41 |
42 | if not self.analyze_paths:
43 | self.analyze_paths = [self.base_path]
44 | logger.warning(f"No valid paths provided, using current directory: {self.base_path}")
45 |
46 |
47 | async def _handle_tool_execution(self, name: str, arguments: Dict[str, Any]) -> Any:
48 | """Execute tool with enhanced error handling and logging"""
49 | operation = arguments.get('operation', 'unknown')
50 | path = arguments.get('path') or arguments.get('file_path')
51 |
52 | logger.info(f"Starting tool execution - Name: {name}, Operation: {operation}, Path: {path}")
53 |
54 | try:
55 | # Input validation
56 | if not path and name in ['create_file', 'stream_edit', 'modify_code']:
57 | logger.error(f"No path provided for {name}")
58 | return {"error": "Path is required for this operation"}
59 |
60 | # Special handling for code modification
61 | if name == "code_modifier":
62 | result = await self.tool_manager.execute_tool(name, arguments)
63 | return await self._format_modification_result(result)
64 |
65 | # Get tool instance
66 | tool = self.tool_manager.get_tool(name)
67 | if not tool:
68 | logger.error(f"Tool not found: {name}")
69 | return {"error": f"Tool {name} not found"}
70 |
71 | # Execute tool operation
72 | logger.info(f"Executing {name} with arguments: {arguments}")
73 | result = await self.tool_manager.execute_tool(name, arguments)
74 |
75 | # Log result summary
76 | if isinstance(result, dict):
77 | success = result.get('success', False)
78 | error = result.get('error')
79 | if error:
80 | logger.error(f"Tool execution failed - {name}: {error}")
81 | elif success:
82 | logger.info(f"Tool execution successful - {name}")
83 |
84 | return await self._handle_tool_result(result)
85 |
86 | except Exception as e:
87 | logger.exception(f"Error executing tool {name}: {e}")
88 | return {"error": str(e), "details": f"Failed to execute {name}"}
89 |
90 | def _ensure_utf8(self, obj: Any) -> Any:
91 | """Ensure all strings in object are UTF-8 encoded"""
92 | if isinstance(obj, str):
93 | return obj.encode('utf-8', errors='replace').decode('utf-8')
94 | elif isinstance(obj, dict):
95 | return {k: self._ensure_utf8(v) for k, v in obj.items()}
96 | elif isinstance(obj, list):
97 | return [self._ensure_utf8(item) for item in obj]
98 | return obj
99 |
100 | async def _handle_tool_result(self, result: Any) -> List[types.TextContent]:
101 | """Handle tool execution result with proper encoding"""
102 | try:
103 | # Ensure proper encoding of result
104 | encoded_result = self._ensure_utf8(result)
105 | # Convert to string safely
106 | try:
107 | if isinstance(encoded_result, (dict, list)):
108 | result_str = json.dumps(encoded_result, ensure_ascii=False)
109 | else:
110 | result_str = str(encoded_result)
111 | return [types.TextContent(type="text", text=result_str)]
112 | except Exception as json_error:
113 | logger.error(f"JSON encoding error: {json_error}")
114 | return [types.TextContent(type="text", text=str(encoded_result))]
115 | except Exception as e:
116 | logger.error(f"Error handling tool result: {e}", exc_info=True)
117 | return [types.TextContent(type="text", text=f"Error processing result: {str(e)}")]
118 |
119 | async def _format_modification_result(self, result: Dict) -> List[types.TextContent]:
120 | """Format code modification result"""
121 | if "error" in result:
122 | return [types.TextContent(type="text", text=json.dumps({
123 | "success": False,
124 | "error": result["error"]
125 | }))]
126 |
127 | # Format successful result
128 | formatted_result = {
129 | "success": True,
130 | "modification": {
131 | "backup_path": result.get("backup_path"),
132 | "affected_files": len(result.get("affected_code", [])),
133 | "dependencies": len(result.get("dependencies", []))
134 | }
135 | }
136 |
137 | if result.get("affected_code"):
138 | formatted_result["details"] = {
139 | "affected_code": [
140 | {
141 | "file": code["file_path"],
142 | "reason": code["reason"],
143 | "action": code["suggested_action"]
144 | }
145 | for code in result["affected_code"]
146 | ]
147 | }
148 |
149 | return [types.TextContent(type="text", text=json.dumps(formatted_result))]
150 |
151 |
152 | async def _handle_tool_result(self, result: Any) -> List[types.TextContent]:
153 | """Handle tool execution result with proper encoding"""
154 | try:
155 | safe_result = self._convert_to_safe_format(result)
156 | encoded_result = self._ensure_utf8(safe_result)
157 |
158 | try:
159 | if isinstance(encoded_result, (dict, list)):
160 | result_str = json.dumps(encoded_result, ensure_ascii=False)
161 | else:
162 | result_str = str(encoded_result)
163 | return [types.TextContent(type="text", text=result_str)]
164 | except Exception as json_error:
165 | logger.error(f"JSON encoding error: {json_error}")
166 | return [types.TextContent(type="text", text=str(encoded_result))]
167 |
168 | except Exception as e:
169 | logger.error(f"Error handling tool result: {e}", exc_info=True)
170 | return [types.TextContent(type="text", text=f"Error processing result: {str(e)}")]
171 |
172 |
173 | def _convert_to_safe_format(self, obj: Any) -> Any:
174 | """Convert complex objects to JSON-serializable format"""
175 | if isinstance(obj, dict):
176 | return {k: self._convert_to_safe_format(v) for k, v in obj.items()}
177 | elif isinstance(obj, list):
178 | return [self._convert_to_safe_format(item) for item in obj]
179 | elif isinstance(obj, Enum):
180 | return obj.name
181 | elif hasattr(obj, '__dict__'):
182 | return self._convert_to_safe_format(obj.__dict__)
183 | return obj
184 |
185 | def _setup_handlers(self):
186 | """Setup all server handlers"""
187 |
188 | @self.server.list_resources()
189 | async def handle_list_resources() -> List[types.Resource]:
190 | resources = []
191 | for path in self.analyze_paths:
192 | resources.append(
193 | types.Resource(
194 | uri=types.AnyUrl(f"memo://insights/{Path(path).name}"),
195 | name=f"Analysis for {Path(path).name}",
196 | description=f"Analysis results for {path}",
197 | mimeType="text/plain"
198 | )
199 | )
200 | return resources
201 |
202 | @self.server.list_prompts()
203 | async def handle_list_prompts() -> List[types.Prompt]:
204 | prompts = []
205 | for path in self.analyze_paths:
206 | prompts.append(
207 | types.Prompt(
208 | name=f"analyze-{Path(path).name}",
209 | description=f"Analyze code in {path}",
210 | arguments=[
211 | types.PromptArgument(
212 | name="tool",
213 | description="Analysis tool to use",
214 | required=True
215 | )
216 | ]
217 | )
218 | )
219 | return prompts
220 |
221 | @self.server.list_tools()
222 | async def handle_list_tools() -> List[types.Tool]:
223 | """List available analysis tools"""
224 | return [
225 | types.Tool(
226 | name="analyze_project_structure",
227 | description="Directory structure and organization analysis with tree view",
228 | inputSchema={
229 | "type": "object",
230 | "properties": {
231 | "path": {"type": "string"}
232 | },
233 | "required": ["path"]
234 | }
235 | ),
236 | types.Tool(
237 | name="analyze_project_statistics",
238 | description="Project-wide statistics and metrics",
239 | inputSchema={
240 | "type": "object",
241 | "properties": {
242 | "path": {"type": "string"}
243 | },
244 | "required": ["path"]
245 | }
246 | ),
247 |
248 | types.Tool(
249 | name="analyze_project_technology",
250 | description="Detect and analyze used technologies and frameworks",
251 | inputSchema={
252 | "type": "object",
253 | "properties": {
254 | "path": {"type": "string"}
255 | },
256 | "required": ["path"]
257 | }
258 | ),
259 | # File Operations
260 | types.Tool(
261 | name="file_operations",
262 | description="File operations with MCP support",
263 | inputSchema={
264 | "type": "object",
265 | "properties": {
266 | "operation": {"type": "string", "enum": ["analyze", "create", "modify", "stream"]},
267 | "path": {"type": "string"},
268 | "content": {"type": "string"},
269 | "section": {
270 | "type": "object",
271 | "properties": {
272 | "start": {"type": "number"},
273 | "end": {"type": "number"}
274 | }
275 | },
276 | "stream_operation": {"type": "string", "enum": ["start", "write", "finish"]}
277 | },
278 | "required": ["operation", "path"]
279 | }
280 | ),
281 | types.Tool(
282 | name="code_modifier",
283 | description="Safe code modification with impact analysis",
284 | inputSchema={
285 | "type": "object",
286 | "properties": {
287 | "file_path": {"type": "string"},
288 | "operation": {
289 | "type": "string",
290 | "enum": ["modify", "insert", "delete"]
291 | },
292 | "section": {
293 | "type": "object",
294 | "properties": {
295 | "start": {"type": "number"},
296 | "end": {"type": "number"}
297 | }
298 | },
299 | "content": {"type": "string"},
300 | "description": {"type": "string"}
301 | },
302 | "required": ["file_path", "operation"]
303 | }
304 | ),
305 | types.Tool(
306 | name="manage_changes",
307 | description="Manage code changes and their application",
308 | inputSchema={
309 | "type": "object",
310 | "properties": {
311 | "file_path": {"type": "string"},
312 | "operation": {
313 | "type": "string",
314 | "enum": ["apply", "revert", "status", "history"]
315 | },
316 | "change_ids": {
317 | "type": "array",
318 | "items": {"type": "string"}
319 | },
320 | "limit": {"type": "number"}
321 | },
322 | "required": ["file_path", "operation"]
323 | }
324 | ),
325 | types.Tool(
326 | name="search_code",
327 | description="Search code with pattern matching",
328 | inputSchema={
329 | "type": "object",
330 | "properties": {
331 | "pattern": {"type": "string"},
332 | "search_type": {
333 | "type": "string",
334 | "enum": ["text", "regex", "ast"]
335 | },
336 | "scope": {"type": "string"}
337 | },
338 | "required": ["pattern"]
339 | }
340 | ),
341 | # Code Analysis Tools
342 | types.Tool(
343 | name="analyze_code_structure",
344 | description="Analyze code structure and architecture",
345 | inputSchema={
346 | "type": "object",
347 | "properties": {
348 | "path": {"type": "string"}
349 | },
350 | "required": ["path"]
351 | }
352 | ),
353 | types.Tool(
354 | name="validate_code",
355 | description="Validate code quality and standards",
356 | inputSchema={
357 | "type": "object",
358 | "properties": {
359 | "path": {"type": "string"},
360 | "validation_type": {
361 | "type": "string",
362 | "enum": ["syntax", "style", "security", "all"]
363 | }
364 | },
365 | "required": ["path"]
366 | }
367 | ),
368 | types.Tool(
369 | name="check_syntax",
370 | description="Advanced syntax checking and analysis",
371 | inputSchema={
372 | "type": "object",
373 | "properties": {
374 | "path": {"type": "string"},
375 | "check_type": {
376 | "type": "string",
377 | "enum": ["all", "tokens", "ast", "imports", "naming"]
378 | }
379 | },
380 | "required": ["path"]
381 | }
382 | ),
383 | # Search Tools
384 | types.Tool(
385 | name="search_files",
386 | description="Advanced file search capabilities",
387 | inputSchema={
388 | "type": "object",
389 | "properties": {
390 | "path": {"type": "string"},
391 | "operation": {
392 | "type": "string",
393 | "enum": ["find", "glob", "pattern", "recent"]
394 | },
395 | "pattern": {"type": "string"},
396 | "recursive": {"type": "boolean"}
397 | },
398 | "required": ["path", "operation"]
399 | }
400 | ),
401 | types.Tool(
402 | name="search_content",
403 | description="Search within file contents",
404 | inputSchema={
405 | "type": "object",
406 | "properties": {
407 | "path": {"type": "string"},
408 | "operation": {
409 | "type": "string",
410 | "enum": ["search", "analyze", "regex", "similar"]
411 | },
412 | "text": {"type": "string"},
413 | "pattern": {"type": "string"}
414 | },
415 | "required": ["path", "operation"]
416 | }
417 | ),
418 |
419 | # Version Control
420 | types.Tool(
421 | name="version_control",
422 | description="Advanced version control and history management",
423 | inputSchema={
424 | "type": "object",
425 | "properties": {
426 | "path": {"type": "string"},
427 | "operation": {
428 | "type": "string",
429 | "enum": [
430 | "create_version",
431 | "restore_version",
432 | "get_history",
433 | "compare_versions",
434 | "get_changes",
435 | "cleanup"
436 | ]
437 | },
438 | "version_id": {"type": "string"},
439 | "description": {"type": "string"}
440 | },
441 | "required": ["path", "operation"]
442 | }
443 | ),
444 | types.Tool(
445 | name="analyze_imports",
446 | description="Analyze import statements and dependencies",
447 | inputSchema={
448 | "type": "object",
449 | "properties": {
450 | "path": {"type": "string"}
451 | },
452 | "required": ["path"]
453 | }
454 | ),
455 |
456 | types.Tool(
457 | name="find_pattern_usages",
458 | description="Find pattern occurrences and analyze usage",
459 | inputSchema={
460 | "type": "object",
461 | "properties": {
462 | "pattern": {"type": "string"},
463 | "pattern_type": {
464 | "type": "string",
465 | "enum": ["all", "code", "variable", "function", "class"]
466 | }
467 | },
468 | "required": ["pattern"]
469 | }
470 | ),
471 | types.Tool(
472 | name="find_code_patterns",
473 | description="Detect code patterns and anti-patterns",
474 | inputSchema={
475 | "type": "object",
476 | "properties": {
477 | "path": {"type": "string"}
478 | },
479 | "required": ["path"]
480 | }
481 | ),
482 | types.Tool(
483 | name="find_references",
484 | description="Find code references",
485 | inputSchema={
486 | "type": "object",
487 | "properties": {
488 | "target": {"type": "string"},
489 | "ref_type": {
490 | "type": "string",
491 | "enum": ["all", "class", "function", "variable"]
492 | }
493 | },
494 | "required": ["target"]
495 | }
496 | ),
497 | types.Tool(
498 | name="preview_changes",
499 | description="Preview code changes",
500 | inputSchema={
501 | "type": "object",
502 | "properties": {
503 | "pattern": {"type": "string"},
504 | "replacement": {"type": "string"}
505 | },
506 | "required": ["pattern", "replacement"]
507 | }
508 | )
509 | ]
510 |
511 | @self.server.call_tool()
512 | async def handle_call_tool(name: str, arguments: Dict[str, Any] | None) -> List[types.TextContent]:
513 | """Handle tool execution with improved error handling"""
514 | if not arguments:
515 | return [types.TextContent(type="text", text="Missing arguments")]
516 |
517 | try:
518 | # Special handling for file operations
519 | if name == "file_operations":
520 | tool = self.tool_manager.get_tool(name)
521 | if not tool:
522 | return [types.TextContent(type="text", text=f"Tool {name} not found")]
523 |
524 | result = await tool.execute(arguments)
525 | return [types.TextContent(type="text", text=json.dumps(result, ensure_ascii=False))]
526 |
527 | # Handle paths for other tools
528 | if "file_path" in arguments:
529 | arguments["file_path"] = self._resolve_path(arguments["file_path"])
530 | if "path" in arguments:
531 | arguments["path"] = self._resolve_path(arguments["path"])
532 |
533 | logger.info(f"Executing tool {name} with arguments: {arguments}")
534 | result = await self.tool_manager.execute_tool(name, arguments)
535 |
536 | if isinstance(result, dict) and "error" in result:
537 | return [types.TextContent(type="text", text=str(result["error"]))]
538 |
539 | return [types.TextContent(type="text", text=json.dumps(result, ensure_ascii=False))]
540 |
541 | except Exception as e:
542 | logger.error(f"Error executing tool {name}: {e}", exc_info=True)
543 | return [types.TextContent(type="text", text=f"Error: {str(e)}")]
544 |
545 | def _resolve_path(self, path_str: str) -> str:
546 | """Resolve path string to absolute path"""
547 | if not path_str or path_str == ".":
548 | return str(self.analyze_paths[0])
549 | try:
550 | path_obj = Path(path_str)
551 | if not path_obj.is_absolute():
552 | path = str((self.analyze_paths[0] / path_obj))
553 | logger.info(f"Resolved path: {path}")
554 | return path
555 | return str(path_obj)
556 | except Exception as e:
557 | logger.error(f"Error resolving path: {e}")
558 | return path_str
559 |
560 | async def run(self):
561 | """Run the MCP server"""
562 | logger.info(f"Starting server with paths: {self.analyze_paths}")
563 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
564 | try:
565 | await self.server.run(
566 | read_stream,
567 | write_stream,
568 | InitializationOptions(
569 | server_name="code-analyzer",
570 | server_version="0.1.0",
571 | capabilities=self.server.get_capabilities(
572 | notification_options=NotificationOptions(),
573 | experimental_capabilities={},
574 | ),
575 | ),
576 | )
577 | except Exception as e:
578 | logger.error(f"Server error: {e}", exc_info=True)
579 | raise
580 |
581 | async def main(analyze_paths: List[str]):
582 | """Main entry point for the MCP server"""
583 | logger.info(f"Starting Code Analyzer with paths: {analyze_paths}")
584 | server = MCPServer(analyze_paths)
585 | await server.run()
586 |
--------------------------------------------------------------------------------
/mcp_code_analyzer/tools/search_tools.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from pathlib import Path
3 | from typing import Dict, Any, List, Optional
4 | from datetime import datetime
5 | import re
6 | from dataclasses import dataclass
7 | import fnmatch
8 | from .base import BaseTool
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 | @dataclass
13 | class SearchResult:
14 | """Container for search results"""
15 | path: str
16 | match_type: str # file, content, pattern
17 | line_number: Optional[int] = None
18 | content: Optional[str] = None
19 | context: Optional[Dict[str, Any]] = None
20 |
21 | class PathFinder(BaseTool):
22 | """Advanced file and directory search tool"""
23 |
24 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
25 | search_path = arguments.get('path', '.')
26 | operation = arguments.get('operation', 'find')
27 |
28 | operations = {
29 | 'find': self._find_files,
30 | 'glob': self._glob_search,
31 | 'pattern': self._pattern_search,
32 | 'recent': self._find_recent
33 | }
34 |
35 | if operation not in operations:
36 | return {"error": f"Unknown operation: {operation}"}
37 |
38 | try:
39 | result = await operations[operation](Path(search_path), arguments)
40 | return {"success": True, "data": result}
41 | except Exception as e:
42 | logger.error(f"PathFinder operation failed: {e}")
43 | return {"success": False, "error": str(e)}
44 |
45 | async def _find_files(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
46 | """Find files based on criteria"""
47 | filters = args.get('filters', {})
48 | max_depth = args.get('max_depth', None)
49 | exclude_patterns = set(args.get('exclude', []))
50 |
51 | results = []
52 | total_scanned = 0
53 |
54 | try:
55 | for root, dirs, files in self._walk_with_depth(path, max_depth):
56 | # Apply directory exclusions
57 | dirs[:] = [d for d in dirs if not any(
58 | fnmatch.fnmatch(d, pattern) for pattern in exclude_patterns
59 | )]
60 |
61 | for file in files:
62 | total_scanned += 1
63 | file_path = Path(root) / file
64 |
65 | if self._should_skip(file_path):
66 | continue
67 |
68 | if self._matches_filters(file_path, filters):
69 | stat = file_path.stat()
70 | results.append({
71 | "path": str(file_path),
72 | "name": file_path.name,
73 | "extension": file_path.suffix,
74 | "size": stat.st_size,
75 | "modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
76 | "created": datetime.fromtimestamp(stat.st_ctime).isoformat()
77 | })
78 |
79 | return {
80 | "results": results,
81 | "summary": {
82 | "total_found": len(results),
83 | "total_scanned": total_scanned,
84 | "search_path": str(path)
85 | }
86 | }
87 |
88 | except Exception as e:
89 | raise RuntimeError(f"File search failed: {e}")
90 |
91 | async def _glob_search(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
92 | """Search using glob patterns"""
93 | patterns = args.get('patterns', ['*'])
94 | recursive = args.get('recursive', True)
95 |
96 | results = []
97 | total_matches = 0
98 |
99 | try:
100 | for pattern in patterns:
101 | if recursive:
102 | matches = path.rglob(pattern)
103 | else:
104 | matches = path.glob(pattern)
105 |
106 | for match in matches:
107 | if self._should_skip(match):
108 | continue
109 |
110 | stat = match.stat()
111 | results.append({
112 | "path": str(match),
113 | "pattern": pattern,
114 | "type": "directory" if match.is_dir() else "file",
115 | "size": stat.st_size if match.is_file() else None,
116 | "modified": datetime.fromtimestamp(stat.st_mtime).isoformat()
117 | })
118 | total_matches += 1
119 |
120 | return {
121 | "results": results,
122 | "summary": {
123 | "patterns": patterns,
124 | "total_matches": total_matches,
125 | "search_path": str(path)
126 | }
127 | }
128 |
129 | except Exception as e:
130 | raise RuntimeError(f"Glob search failed: {e}")
131 |
132 | async def _pattern_search(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
133 | """Search for files matching complex patterns"""
134 | pattern_rules = args.get('rules', {})
135 | max_results = args.get('max_results', None)
136 |
137 | results = []
138 |
139 | try:
140 | for file_path in self._recursive_search(path):
141 | if self._should_skip(file_path):
142 | continue
143 |
144 | if self._matches_pattern_rules(file_path, pattern_rules):
145 | stat = file_path.stat()
146 | results.append({
147 | "path": str(file_path),
148 | "name": file_path.name,
149 | "matches": self._get_matching_rules(file_path, pattern_rules),
150 | "size": stat.st_size,
151 | "modified": datetime.fromtimestamp(stat.st_mtime).isoformat()
152 | })
153 |
154 | if max_results and len(results) >= max_results:
155 | break
156 |
157 | return {
158 | "results": results,
159 | "summary": {
160 | "total_matches": len(results),
161 | "rules_applied": list(pattern_rules.keys()),
162 | "search_path": str(path)
163 | }
164 | }
165 |
166 | except Exception as e:
167 | raise RuntimeError(f"Pattern search failed: {e}")
168 |
169 | async def _find_recent(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
170 | """Find recently modified files"""
171 | hours = args.get('hours', 24)
172 | file_types = set(args.get('file_types', []))
173 | min_size = args.get('min_size', 0)
174 | max_size = args.get('max_size', float('inf'))
175 |
176 | results = []
177 | cutoff_time = datetime.now().timestamp() - (hours * 3600)
178 |
179 | try:
180 | for file_path in self._recursive_search(path):
181 | if self._should_skip(file_path):
182 | continue
183 |
184 | stat = file_path.stat()
185 | if stat.st_mtime >= cutoff_time:
186 | if not file_types or file_path.suffix in file_types:
187 | if min_size <= stat.st_size <= max_size:
188 | results.append({
189 | "path": str(file_path),
190 | "name": file_path.name,
191 | "size": stat.st_size,
192 | "modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
193 | "hours_ago": (datetime.now().timestamp() - stat.st_mtime) / 3600
194 | })
195 |
196 | # Sort by modification time
197 | results.sort(key=lambda x: x["modified"], reverse=True)
198 |
199 | return {
200 | "results": results,
201 | "summary": {
202 | "total_found": len(results),
203 | "time_range_hours": hours,
204 | "search_path": str(path)
205 | }
206 | }
207 |
208 | except Exception as e:
209 | raise RuntimeError(f"Recent files search failed: {e}")
210 |
211 | def _walk_with_depth(self, path: Path, max_depth: Optional[int] = None):
212 | """Walk directory tree with optional depth limit"""
213 | base_depth = len(path.parents)
214 | for root, dirs, files in path.walk():
215 | current_depth = len(Path(root).parents) - base_depth
216 | if max_depth is not None and current_depth > max_depth:
217 | dirs.clear()
218 | else:
219 | yield root, dirs, files
220 |
221 | def _matches_filters(self, path: Path, filters: Dict[str, Any]) -> bool:
222 | """Check if file matches all filters"""
223 | try:
224 | stat = path.stat()
225 |
226 | for key, value in filters.items():
227 | if key == 'extension' and path.suffix != value:
228 | return False
229 | elif key == 'name' and path.name != value:
230 | return False
231 | elif key == 'min_size' and stat.st_size < value:
232 | return False
233 | elif key == 'max_size' and stat.st_size > value:
234 | return False
235 | elif key == 'modified_after' and stat.st_mtime < value:
236 | return False
237 | elif key == 'modified_before' and stat.st_mtime > value:
238 | return False
239 |
240 | return True
241 |
242 | except Exception:
243 | return False
244 |
245 | def _matches_pattern_rules(self, path: Path, rules: Dict[str, Any]) -> bool:
246 | """Check if file matches pattern rules"""
247 | try:
248 | for rule_type, pattern in rules.items():
249 | if rule_type == 'name_pattern':
250 | if not fnmatch.fnmatch(path.name, pattern):
251 | return False
252 | elif rule_type == 'path_pattern':
253 | if not fnmatch.fnmatch(str(path), pattern):
254 | return False
255 | elif rule_type == 'regex':
256 | if not re.search(pattern, str(path)):
257 | return False
258 |
259 | return True
260 |
261 | except Exception:
262 | return False
263 |
264 | def _get_matching_rules(self, path: Path, rules: Dict[str, Any]) -> List[str]:
265 | """Get list of matching rules for a file"""
266 | matches = []
267 | for rule_type, pattern in rules.items():
268 | if rule_type == 'name_pattern' and fnmatch.fnmatch(path.name, pattern):
269 | matches.append(rule_type)
270 | elif rule_type == 'path_pattern' and fnmatch.fnmatch(str(path), pattern):
271 | matches.append(rule_type)
272 | elif rule_type == 'regex' and re.search(pattern, str(path)):
273 | matches.append(rule_type)
274 | return matches
275 |
276 | def _recursive_search(self, path: Path) -> List[Path]:
277 | """Recursively search directory"""
278 | try:
279 | return list(path.rglob('*'))
280 | except Exception:
281 | return []
282 |
283 | class ContentScanner(BaseTool):
284 | """Advanced content search and analysis tool"""
285 |
286 | def __init__(self):
287 | super().__init__()
288 | self._file_cache = {}
289 | self.max_workers = 4
290 |
291 | async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
292 | operation = arguments.get('operation', 'search')
293 | target_path = arguments.get('path', '.')
294 |
295 | operations = {
296 | 'search': self._search_content,
297 | 'analyze': self._analyze_content,
298 | 'regex': self._regex_search,
299 | 'similar': self._find_similar
300 | }
301 |
302 | if operation not in operations:
303 | return {"error": f"Unknown operation: {operation}"}
304 |
305 | try:
306 | result = await operations[operation](Path(target_path), arguments)
307 | return {"success": True, "data": result}
308 | except Exception as e:
309 | logger.error(f"ContentScanner operation failed: {e}")
310 | return {"success": False, "error": str(e)}
311 |
312 | async def _search_content(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
313 | """Search file contents for text"""
314 | search_text = args.get('text')
315 | case_sensitive = args.get('case_sensitive', False)
316 | whole_word = args.get('whole_word', False)
317 | file_pattern = args.get('file_pattern', '*')
318 |
319 | if not search_text:
320 | return {"error": "Search text is required"}
321 |
322 | results = []
323 | total_files = 0
324 | matches_found = 0
325 |
326 | try:
327 | # Prepare search pattern
328 | if whole_word:
329 | pattern = r'\b' + re.escape(search_text) + r'\b'
330 | else:
331 | pattern = re.escape(search_text)
332 |
333 | flags = 0 if case_sensitive else re.IGNORECASE
334 | regex = re.compile(pattern, flags)
335 |
336 | # Search files
337 | for file_path in path.rglob(file_pattern):
338 | if self._should_skip(file_path) or not file_path.is_file():
339 | continue
340 |
341 | total_files += 1
342 |
343 | try:
344 | matches = await self._find_matches(file_path, regex)
345 | if matches:
346 | matches_found += len(matches)
347 | results.append({
348 | "file": str(file_path),
349 | "matches": matches
350 | })
351 | except Exception as e:
352 | logger.error(f"Error searching {file_path}: {e}")
353 |
354 | return {
355 | "results": results,
356 | "summary": {
357 | "total_files_searched": total_files,
358 | "files_with_matches": len(results),
359 | "total_matches": matches_found,
360 | "search_pattern": {
361 | "text": search_text,
362 | "case_sensitive": case_sensitive,
363 | "whole_word": whole_word
364 | }
365 | }
366 | }
367 |
368 | except Exception as e:
369 | raise RuntimeError(f"Content search failed: {e}")
370 |
371 | async def _analyze_content(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
372 | """Analyze file contents"""
373 | file_pattern = args.get('file_pattern', '*')
374 | analysis_types = set(args.get('types', ['duplicates', 'statistics', 'patterns']))
375 |
376 | try:
377 | analysis_results = {
378 | "files_analyzed": 0,
379 | "total_size": 0,
380 | "analysis": {}
381 | }
382 |
383 | if 'duplicates' in analysis_types:
384 | analysis_results["analysis"]["duplicates"] = await self._find_duplicate_content(path, file_pattern)
385 |
386 | if 'statistics' in analysis_types:
387 | analysis_results["analysis"]["statistics"] = await self._generate_content_statistics(path, file_pattern)
388 |
389 | if 'patterns' in analysis_types:
390 | analysis_results["analysis"]["patterns"] = await self._analyze_content_patterns(path, file_pattern)
391 |
392 | return analysis_results
393 |
394 | except Exception as e:
395 | raise RuntimeError(f"Content analysis failed: {e}")
396 |
397 | async def _regex_search(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
398 | """Search using regular expressions"""
399 | pattern = args.get('pattern')
400 | file_pattern = args.get('file_pattern', '*')
401 | multiline = args.get('multiline', False)
402 |
403 | if not pattern:
404 | return {"error": "Regex pattern is required"}
405 |
406 | try:
407 | flags = re.MULTILINE if multiline else 0
408 | regex = re.compile(pattern, flags)
409 |
410 | results = []
411 | for file_path in path.rglob(file_pattern):
412 | if self._should_skip(file_path) or not file_path.is_file():
413 | continue
414 |
415 | matches = await self._find_matches(file_path, regex)
416 | if matches:
417 | results.append({
418 | "file": str(file_path),
419 | "matches": matches
420 | })
421 |
422 | return {
423 | "results": results,
424 | "summary": {
425 | "total_files_searched": len(list(path.rglob(file_pattern))),
426 | "files_with_matches": len(results),
427 | "pattern": pattern,
428 | "multiline": multiline
429 | }
430 | }
431 |
432 | except Exception as e:
433 | raise RuntimeError(f"Regex search failed: {e}")
434 |
435 | async def _find_similar(self, path: Path, args: Dict[str, Any]) -> Dict[str, Any]:
436 | """Find files with similar content"""
437 | threshold = args.get('similarity_threshold', 0.8)
438 | file_pattern = args.get('file_pattern', '*')
439 | min_size = args.get('min_size', 0)
440 |
441 | try:
442 | file_groups = []
443 | content_hashes = {}
444 |
445 | # First pass: collect file contents
446 | for file_path in path.rglob(file_pattern):
447 | if self._should_skip(file_path) or not file_path.is_file():
448 | continue
449 |
450 | if file_path.stat().st_size < min_size:
451 | continue
452 |
453 | try:
454 | content = await self._read_file_content(file_path)
455 | if content:
456 | content_hashes[str(file_path)] = self._calculate_similarity_hash(content)
457 | except Exception as e:
458 | logger.error(f"Error reading {file_path}: {e}")
459 |
460 | # Second pass: compare files
461 | analyzed_files = set()
462 | for file1, hash1 in content_hashes.items():
463 | if file1 in analyzed_files:
464 | continue
465 |
466 | similar_files = []
467 | for file2, hash2 in content_hashes.items():
468 | if file1 != file2 and file2 not in analyzed_files:
469 | similarity = self._calculate_hash_similarity(hash1, hash2)
470 | if similarity >= threshold:
471 | similar_files.append({
472 | "path": file2,
473 | "similarity": similarity
474 | })
475 | analyzed_files.add(file2)
476 |
477 | if similar_files:
478 | analyzed_files.add(file1)
479 | file_groups.append({
480 | "base_file": file1,
481 | "similar_files": similar_files
482 | })
483 |
484 | return {
485 | "groups": file_groups,
486 | "summary": {
487 | "total_files": len(content_hashes),
488 | "similarity_groups": len(file_groups),
489 | "threshold": threshold
490 | }
491 | }
492 |
493 | except Exception as e:
494 | raise RuntimeError(f"Similarity analysis failed: {e}")
495 |
496 | async def _find_matches(self, file_path: Path, pattern: re.Pattern) -> List[Dict[str, Any]]:
497 | """Find pattern matches in file"""
498 | matches = []
499 | try:
500 | content = await self._read_file_content(file_path)
501 | if not content:
502 | return matches
503 |
504 | for i, line in enumerate(content.splitlines(), 1):
505 | for match in pattern.finditer(line):
506 | matches.append({
507 | "line": i,
508 | "start": match.start(),
509 | "end": match.end(),
510 | "text": match.group(),
511 | "context": self._get_line_context(content.splitlines(), i)
512 | })
513 |
514 | except Exception as e:
515 | logger.error(f"Error finding matches in {file_path}: {e}")
516 |
517 | return matches
518 |
519 | async def _find_duplicate_content(self, path: Path, pattern: str) -> Dict[str, Any]:
520 | """Find duplicate content across files"""
521 | content_map = {}
522 | duplicates = []
523 |
524 | try:
525 | for file_path in path.rglob(pattern):
526 | if self._should_skip(file_path) or not file_path.is_file():
527 | continue
528 |
529 | content = await self._read_file_content(file_path)
530 | if not content:
531 | continue
532 |
533 | content_hash = self._calculate_content_hash(content)
534 | if content_hash in content_map:
535 | # Found a duplicate
536 | if content_map[content_hash] not in duplicates:
537 | duplicates.append({
538 | "original": content_map[content_hash],
539 | "duplicates": []
540 | })
541 |
542 | for group in duplicates:
543 | if group["original"] == content_map[content_hash]:
544 | group["duplicates"].append(str(file_path))
545 | else:
546 | content_map[content_hash] = str(file_path)
547 |
548 | return {
549 | "duplicate_groups": duplicates,
550 | "total_duplicates": sum(len(group["duplicates"]) for group in duplicates)
551 | }
552 |
553 | except Exception as e:
554 | logger.error(f"Error finding duplicates: {e}")
555 | return {"error": str(e)}
556 |
557 | async def _generate_content_statistics(self, path: Path, pattern: str) -> Dict[str, Any]:
558 | """Generate statistics about file contents"""
559 | stats = {
560 | "total_files": 0,
561 | "total_lines": 0,
562 | "total_size": 0,
563 | "average_line_length": 0,
564 | "file_types": {},
565 | "encoding_types": {},
566 | "line_endings": {
567 | "unix": 0,
568 | "windows": 0,
569 | "mixed": 0
570 | }
571 | }
572 |
573 | try:
574 | line_lengths = []
575 |
576 | for file_path in path.rglob(pattern):
577 | if self._should_skip(file_path) or not file_path.is_file():
578 | continue
579 |
580 | stats["total_files"] += 1
581 | stats["total_size"] += file_path.stat().st_size
582 |
583 | # Track file types
584 | ext = file_path.suffix
585 | stats["file_types"][ext] = stats["file_types"].get(ext, 0) + 1
586 |
587 | content = await self._read_file_content(file_path)
588 | if not content:
589 | continue
590 |
591 | lines = content.splitlines()
592 | stats["total_lines"] += len(lines)
593 | line_lengths.extend(len(line) for line in lines)
594 |
595 | # Detect line endings
596 | if '\r\n' in content and '\n' in content.replace('\r\n', ''):
597 | stats["line_endings"]["mixed"] += 1
598 | elif '\r\n' in content:
599 | stats["line_endings"]["windows"] += 1
600 | else:
601 | stats["line_endings"]["unix"] += 1
602 |
603 | # Track encoding
604 | encoding = self._detect_encoding(file_path)
605 | stats["encoding_types"][encoding] = stats["encoding_types"].get(encoding, 0) + 1
606 |
607 | if line_lengths:
608 | stats["average_line_length"] = sum(line_lengths) / len(line_lengths)
609 |
610 | return stats
611 |
612 | except Exception as e:
613 | logger.error(f"Error generating statistics: {e}")
614 | return {"error": str(e)}
615 |
616 | async def _analyze_content_patterns(self, path: Path, pattern: str) -> Dict[str, Any]:
617 | """Analyze content for common patterns"""
618 | patterns = {
619 | "common_words": {},
620 | "line_patterns": [],
621 | "structure_patterns": []
622 | }
623 |
624 | try:
625 | word_freq = {}
626 | line_patterns = set()
627 |
628 | for file_path in path.rglob(pattern):
629 | if self._should_skip(file_path) or not file_path.is_file():
630 | continue
631 |
632 | content = await self._read_file_content(file_path)
633 | if not content:
634 | continue
635 |
636 | # Analyze words
637 | words = re.findall(r'\w+', content.lower())
638 | for word in words:
639 | word_freq[word] = word_freq.get(word, 0) + 1
640 |
641 | # Analyze line patterns
642 | lines = content.splitlines()
643 | for line in lines:
644 | # Find repeating patterns
645 | pattern_match = re.match(r'^(\s*)(.+?)(\s*)$', line)
646 | if pattern_match:
647 | indent, content, trailing = pattern_match.groups()
648 | if len(indent) > 0:
649 | line_patterns.add(f"indent:{len(indent)}")
650 |
651 | # Analyze structure patterns
652 | if file_path.suffix == '.py':
653 | await self._analyze_python_patterns(content, patterns)
654 |
655 | # Process word frequencies
656 | patterns["common_words"] = dict(sorted(
657 | word_freq.items(),
658 | key=lambda x: x[1],
659 | reverse=True
660 | )[:100])
661 |
662 | patterns["line_patterns"] = list(line_patterns)
663 |
664 | return patterns
665 |
666 | except Exception as e:
667 | logger.error(f"Error analyzing patterns: {e}")
668 | return {"error": str(e)}
669 |
670 | async def _read_file_content(self, path: Path) -> Optional[str]:
671 | """Read file content with caching"""
672 | if str(path) in self._file_cache:
673 | return self._file_cache[str(path)]
674 |
675 | try:
676 | content = path.read_text(encoding=self._detect_encoding(path))
677 | self._file_cache[str(path)] = content
678 | return content
679 | except Exception as e:
680 | logger.error(f"Error reading {path}: {e}")
681 | return None
682 |
683 | def _detect_encoding(self, path: Path) -> str:
684 | """Detect file encoding"""
685 | try:
686 | import chardet
687 | with open(path, 'rb') as f:
688 | raw = f.read()
689 | result = chardet.detect(raw)
690 | return result['encoding'] or 'utf-8'
691 | except Exception:
692 | return 'utf-8'
693 |
694 | def _calculate_content_hash(self, content: str) -> str:
695 | """Calculate hash of content"""
696 | import hashlib
697 | return hashlib.md5(content.encode()).hexdigest()
698 |
699 | def _calculate_similarity_hash(self, content: str) -> List[int]:
700 | """Calculate similarity hash for content"""
701 | # Simplified implementation of similarity hashing
702 | words = content.split()
703 | return [hash(word) for word in words]
704 |
705 | def _calculate_hash_similarity(self, hash1: List[int], hash2: List[int]) -> float:
706 | """Calculate similarity between two hashes"""
707 | common = set(hash1) & set(hash2)
708 | return len(common) / max(len(hash1), len(hash2))
709 |
710 | def _get_line_context(self, lines: List[str], line_number: int, context_lines: int = 2) -> Dict[str, List[str]]:
711 | """Get context lines around a match"""
712 | start = max(0, line_number - context_lines - 1)
713 | end = min(len(lines), line_number + context_lines)
714 | return {
715 | "before": lines[start:line_number-1],
716 | "after": lines[line_number:end]
717 | }
718 |
719 | async def _analyze_python_patterns(self, content: str, patterns: Dict[str, Any]) -> None:
720 | """Analyze Python-specific patterns"""
721 | import ast
722 | try:
723 | tree = ast.parse(content)
724 |
725 | # Analyze structure patterns
726 | class_patterns = []
727 | function_patterns = []
728 |
729 | for node in ast.walk(tree):
730 | if isinstance(node, ast.ClassDef):
731 | methods = len([n for n in node.body if isinstance(n, ast.FunctionDef)])
732 | class_patterns.append(f"class_with_{methods}_methods")
733 |
734 | elif isinstance(node, ast.FunctionDef):
735 | args = len(node.args.args)
736 | function_patterns.append(f"function_with_{args}_args")
737 |
738 | if class_patterns:
739 | patterns["structure_patterns"].extend(class_patterns)
740 | if function_patterns:
741 | patterns["structure_patterns"].extend(function_patterns)
742 |
743 | except Exception as e:
744 | logger.error(f"Error analyzing Python patterns: {e}")
--------------------------------------------------------------------------------
/uv.lock:
--------------------------------------------------------------------------------
1 | version = 1
2 | requires-python = ">=3.10"
3 |
4 | [[package]]
5 | name = "annotated-types"
6 | version = "0.7.0"
7 | source = { registry = "https://pypi.org/simple" }
8 | sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
9 | wheels = [
10 | { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
11 | ]
12 |
13 | [[package]]
14 | name = "anyio"
15 | version = "4.6.2.post1"
16 | source = { registry = "https://pypi.org/simple" }
17 | dependencies = [
18 | { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
19 | { name = "idna" },
20 | { name = "sniffio" },
21 | { name = "typing-extensions", marker = "python_full_version < '3.11'" },
22 | ]
23 | sdist = { url = "https://files.pythonhosted.org/packages/9f/09/45b9b7a6d4e45c6bcb5bf61d19e3ab87df68e0601fa8c5293de3542546cc/anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c", size = 173422 }
24 | wheels = [
25 | { url = "https://files.pythonhosted.org/packages/e4/f5/f2b75d2fc6f1a260f340f0e7c6a060f4dd2961cc16884ed851b0d18da06a/anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d", size = 90377 },
26 | ]
27 |
28 | [[package]]
29 | name = "certifi"
30 | version = "2024.8.30"
31 | source = { registry = "https://pypi.org/simple" }
32 | sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 }
33 | wheels = [
34 | { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 },
35 | ]
36 |
37 | [[package]]
38 | name = "click"
39 | version = "8.1.7"
40 | source = { registry = "https://pypi.org/simple" }
41 | dependencies = [
42 | { name = "colorama", marker = "platform_system == 'Windows'" },
43 | ]
44 | sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 }
45 | wheels = [
46 | { url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 },
47 | ]
48 |
49 | [[package]]
50 | name = "colorama"
51 | version = "0.4.6"
52 | source = { registry = "https://pypi.org/simple" }
53 | sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
54 | wheels = [
55 | { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
56 | ]
57 |
58 | [[package]]
59 | name = "exceptiongroup"
60 | version = "1.2.2"
61 | source = { registry = "https://pypi.org/simple" }
62 | sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 }
63 | wheels = [
64 | { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 },
65 | ]
66 |
67 | [[package]]
68 | name = "h11"
69 | version = "0.14.0"
70 | source = { registry = "https://pypi.org/simple" }
71 | sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 }
72 | wheels = [
73 | { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 },
74 | ]
75 |
76 | [[package]]
77 | name = "httpcore"
78 | version = "1.0.7"
79 | source = { registry = "https://pypi.org/simple" }
80 | dependencies = [
81 | { name = "certifi" },
82 | { name = "h11" },
83 | ]
84 | sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 }
85 | wheels = [
86 | { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 },
87 | ]
88 |
89 | [[package]]
90 | name = "httpx"
91 | version = "0.28.0"
92 | source = { registry = "https://pypi.org/simple" }
93 | dependencies = [
94 | { name = "anyio" },
95 | { name = "certifi" },
96 | { name = "httpcore" },
97 | { name = "idna" },
98 | ]
99 | sdist = { url = "https://files.pythonhosted.org/packages/10/df/676b7cf674dd1bdc71a64ad393c89879f75e4a0ab8395165b498262ae106/httpx-0.28.0.tar.gz", hash = "sha256:0858d3bab51ba7e386637f22a61d8ccddaeec5f3fe4209da3a6168dbb91573e0", size = 141307 }
100 | wheels = [
101 | { url = "https://files.pythonhosted.org/packages/8f/fb/a19866137577ba60c6d8b69498dc36be479b13ba454f691348ddf428f185/httpx-0.28.0-py3-none-any.whl", hash = "sha256:dc0b419a0cfeb6e8b34e85167c0da2671206f5095f1baa9663d23bcfd6b535fc", size = 73551 },
102 | ]
103 |
104 | [[package]]
105 | name = "httpx-sse"
106 | version = "0.4.0"
107 | source = { registry = "https://pypi.org/simple" }
108 | sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624 }
109 | wheels = [
110 | { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 },
111 | ]
112 |
113 | [[package]]
114 | name = "idna"
115 | version = "3.10"
116 | source = { registry = "https://pypi.org/simple" }
117 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
118 | wheels = [
119 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
120 | ]
121 |
122 | [[package]]
123 | name = "mcp"
124 | version = "1.0.0"
125 | source = { registry = "https://pypi.org/simple" }
126 | dependencies = [
127 | { name = "anyio" },
128 | { name = "httpx" },
129 | { name = "httpx-sse" },
130 | { name = "pydantic" },
131 | { name = "sse-starlette" },
132 | { name = "starlette" },
133 | ]
134 | sdist = { url = "https://files.pythonhosted.org/packages/97/de/a9ec0a1b6439f90ea59f89004bb2e7ec6890dfaeef809751d9e6577dca7e/mcp-1.0.0.tar.gz", hash = "sha256:dba51ce0b5c6a80e25576f606760c49a91ee90210fed805b530ca165d3bbc9b7", size = 82891 }
135 | wheels = [
136 | { url = "https://files.pythonhosted.org/packages/56/89/900c0c8445ec001d3725e475fc553b0feb2e8a51be018f3bb7de51e683db/mcp-1.0.0-py3-none-any.whl", hash = "sha256:bbe70ffa3341cd4da78b5eb504958355c68381fb29971471cea1e642a2af5b8a", size = 36361 },
137 | ]
138 |
139 | [[package]]
140 | name = "mcp-server-sqlite"
141 | version = "0.6.0"
142 | source = { editable = "." }
143 | dependencies = [
144 | { name = "mcp" },
145 | ]
146 |
147 | [package.dev-dependencies]
148 | dev = [
149 | { name = "pyright" },
150 | ]
151 |
152 | [package.metadata]
153 | requires-dist = [{ name = "mcp", specifier = ">=1.0.0" }]
154 |
155 | [package.metadata.requires-dev]
156 | dev = [{ name = "pyright", specifier = ">=1.1.389" }]
157 |
158 | [[package]]
159 | name = "nodeenv"
160 | version = "1.9.1"
161 | source = { registry = "https://pypi.org/simple" }
162 | sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 }
163 | wheels = [
164 | { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 },
165 | ]
166 |
167 | [[package]]
168 | name = "pydantic"
169 | version = "2.10.2"
170 | source = { registry = "https://pypi.org/simple" }
171 | dependencies = [
172 | { name = "annotated-types" },
173 | { name = "pydantic-core" },
174 | { name = "typing-extensions" },
175 | ]
176 | sdist = { url = "https://files.pythonhosted.org/packages/41/86/a03390cb12cf64e2a8df07c267f3eb8d5035e0f9a04bb20fb79403d2a00e/pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa", size = 785401 }
177 | wheels = [
178 | { url = "https://files.pythonhosted.org/packages/d5/74/da832196702d0c56eb86b75bfa346db9238617e29b0b7ee3b8b4eccfe654/pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e", size = 456364 },
179 | ]
180 |
181 | [[package]]
182 | name = "pydantic-core"
183 | version = "2.27.1"
184 | source = { registry = "https://pypi.org/simple" }
185 | dependencies = [
186 | { name = "typing-extensions" },
187 | ]
188 | sdist = { url = "https://files.pythonhosted.org/packages/a6/9f/7de1f19b6aea45aeb441838782d68352e71bfa98ee6fa048d5041991b33e/pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235", size = 412785 }
189 | wheels = [
190 | { url = "https://files.pythonhosted.org/packages/6e/ce/60fd96895c09738648c83f3f00f595c807cb6735c70d3306b548cc96dd49/pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a", size = 1897984 },
191 | { url = "https://files.pythonhosted.org/packages/fd/b9/84623d6b6be98cc209b06687d9bca5a7b966ffed008d15225dd0d20cce2e/pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b", size = 1807491 },
192 | { url = "https://files.pythonhosted.org/packages/01/72/59a70165eabbc93b1111d42df9ca016a4aa109409db04304829377947028/pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278", size = 1831953 },
193 | { url = "https://files.pythonhosted.org/packages/7c/0c/24841136476adafd26f94b45bb718a78cb0500bd7b4f8d667b67c29d7b0d/pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05", size = 1856071 },
194 | { url = "https://files.pythonhosted.org/packages/53/5e/c32957a09cceb2af10d7642df45d1e3dbd8596061f700eac93b801de53c0/pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4", size = 2038439 },
195 | { url = "https://files.pythonhosted.org/packages/e4/8f/979ab3eccd118b638cd6d8f980fea8794f45018255a36044dea40fe579d4/pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f", size = 2787416 },
196 | { url = "https://files.pythonhosted.org/packages/02/1d/00f2e4626565b3b6d3690dab4d4fe1a26edd6a20e53749eb21ca892ef2df/pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08", size = 2134548 },
197 | { url = "https://files.pythonhosted.org/packages/9d/46/3112621204128b90898adc2e721a3cd6cf5626504178d6f32c33b5a43b79/pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6", size = 1989882 },
198 | { url = "https://files.pythonhosted.org/packages/49/ec/557dd4ff5287ffffdf16a31d08d723de6762bb1b691879dc4423392309bc/pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807", size = 1995829 },
199 | { url = "https://files.pythonhosted.org/packages/6e/b2/610dbeb74d8d43921a7234555e4c091cb050a2bdb8cfea86d07791ce01c5/pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c", size = 2091257 },
200 | { url = "https://files.pythonhosted.org/packages/8c/7f/4bf8e9d26a9118521c80b229291fa9558a07cdd9a968ec2d5c1026f14fbc/pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206", size = 2143894 },
201 | { url = "https://files.pythonhosted.org/packages/1f/1c/875ac7139c958f4390f23656fe696d1acc8edf45fb81e4831960f12cd6e4/pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c", size = 1816081 },
202 | { url = "https://files.pythonhosted.org/packages/d7/41/55a117acaeda25ceae51030b518032934f251b1dac3704a53781383e3491/pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17", size = 1981109 },
203 | { url = "https://files.pythonhosted.org/packages/27/39/46fe47f2ad4746b478ba89c561cafe4428e02b3573df882334bd2964f9cb/pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8", size = 1895553 },
204 | { url = "https://files.pythonhosted.org/packages/1c/00/0804e84a78b7fdb394fff4c4f429815a10e5e0993e6ae0e0b27dd20379ee/pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330", size = 1807220 },
205 | { url = "https://files.pythonhosted.org/packages/01/de/df51b3bac9820d38371f5a261020f505025df732ce566c2a2e7970b84c8c/pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52", size = 1829727 },
206 | { url = "https://files.pythonhosted.org/packages/5f/d9/c01d19da8f9e9fbdb2bf99f8358d145a312590374d0dc9dd8dbe484a9cde/pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4", size = 1854282 },
207 | { url = "https://files.pythonhosted.org/packages/5f/84/7db66eb12a0dc88c006abd6f3cbbf4232d26adfd827a28638c540d8f871d/pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c", size = 2037437 },
208 | { url = "https://files.pythonhosted.org/packages/34/ac/a2537958db8299fbabed81167d58cc1506049dba4163433524e06a7d9f4c/pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de", size = 2780899 },
209 | { url = "https://files.pythonhosted.org/packages/4a/c1/3e38cd777ef832c4fdce11d204592e135ddeedb6c6f525478a53d1c7d3e5/pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025", size = 2135022 },
210 | { url = "https://files.pythonhosted.org/packages/7a/69/b9952829f80fd555fe04340539d90e000a146f2a003d3fcd1e7077c06c71/pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e", size = 1987969 },
211 | { url = "https://files.pythonhosted.org/packages/05/72/257b5824d7988af43460c4e22b63932ed651fe98804cc2793068de7ec554/pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919", size = 1994625 },
212 | { url = "https://files.pythonhosted.org/packages/73/c3/78ed6b7f3278a36589bcdd01243189ade7fc9b26852844938b4d7693895b/pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c", size = 2090089 },
213 | { url = "https://files.pythonhosted.org/packages/8d/c8/b4139b2f78579960353c4cd987e035108c93a78371bb19ba0dc1ac3b3220/pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc", size = 2142496 },
214 | { url = "https://files.pythonhosted.org/packages/3e/f8/171a03e97eb36c0b51981efe0f78460554a1d8311773d3d30e20c005164e/pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9", size = 1811758 },
215 | { url = "https://files.pythonhosted.org/packages/6a/fe/4e0e63c418c1c76e33974a05266e5633e879d4061f9533b1706a86f77d5b/pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5", size = 1980864 },
216 | { url = "https://files.pythonhosted.org/packages/50/fc/93f7238a514c155a8ec02fc7ac6376177d449848115e4519b853820436c5/pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89", size = 1864327 },
217 | { url = "https://files.pythonhosted.org/packages/be/51/2e9b3788feb2aebff2aa9dfbf060ec739b38c05c46847601134cc1fed2ea/pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f", size = 1895239 },
218 | { url = "https://files.pythonhosted.org/packages/7b/9e/f8063952e4a7d0127f5d1181addef9377505dcce3be224263b25c4f0bfd9/pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02", size = 1805070 },
219 | { url = "https://files.pythonhosted.org/packages/2c/9d/e1d6c4561d262b52e41b17a7ef8301e2ba80b61e32e94520271029feb5d8/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c", size = 1828096 },
220 | { url = "https://files.pythonhosted.org/packages/be/65/80ff46de4266560baa4332ae3181fffc4488ea7d37282da1a62d10ab89a4/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac", size = 1857708 },
221 | { url = "https://files.pythonhosted.org/packages/d5/ca/3370074ad758b04d9562b12ecdb088597f4d9d13893a48a583fb47682cdf/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb", size = 2037751 },
222 | { url = "https://files.pythonhosted.org/packages/b1/e2/4ab72d93367194317b99d051947c071aef6e3eb95f7553eaa4208ecf9ba4/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529", size = 2733863 },
223 | { url = "https://files.pythonhosted.org/packages/8a/c6/8ae0831bf77f356bb73127ce5a95fe115b10f820ea480abbd72d3cc7ccf3/pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35", size = 2161161 },
224 | { url = "https://files.pythonhosted.org/packages/f1/f4/b2fe73241da2429400fc27ddeaa43e35562f96cf5b67499b2de52b528cad/pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089", size = 1993294 },
225 | { url = "https://files.pythonhosted.org/packages/77/29/4bb008823a7f4cc05828198153f9753b3bd4c104d93b8e0b1bfe4e187540/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381", size = 2001468 },
226 | { url = "https://files.pythonhosted.org/packages/f2/a9/0eaceeba41b9fad851a4107e0cf999a34ae8f0d0d1f829e2574f3d8897b0/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb", size = 2091413 },
227 | { url = "https://files.pythonhosted.org/packages/d8/36/eb8697729725bc610fd73940f0d860d791dc2ad557faaefcbb3edbd2b349/pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae", size = 2154735 },
228 | { url = "https://files.pythonhosted.org/packages/52/e5/4f0fbd5c5995cc70d3afed1b5c754055bb67908f55b5cb8000f7112749bf/pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c", size = 1833633 },
229 | { url = "https://files.pythonhosted.org/packages/ee/f2/c61486eee27cae5ac781305658779b4a6b45f9cc9d02c90cb21b940e82cc/pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16", size = 1986973 },
230 | { url = "https://files.pythonhosted.org/packages/df/a6/e3f12ff25f250b02f7c51be89a294689d175ac76e1096c32bf278f29ca1e/pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e", size = 1883215 },
231 | { url = "https://files.pythonhosted.org/packages/0f/d6/91cb99a3c59d7b072bded9959fbeab0a9613d5a4935773c0801f1764c156/pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073", size = 1895033 },
232 | { url = "https://files.pythonhosted.org/packages/07/42/d35033f81a28b27dedcade9e967e8a40981a765795c9ebae2045bcef05d3/pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08", size = 1807542 },
233 | { url = "https://files.pythonhosted.org/packages/41/c2/491b59e222ec7e72236e512108ecad532c7f4391a14e971c963f624f7569/pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf", size = 1827854 },
234 | { url = "https://files.pythonhosted.org/packages/e3/f3/363652651779113189cefdbbb619b7b07b7a67ebb6840325117cc8cc3460/pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737", size = 1857389 },
235 | { url = "https://files.pythonhosted.org/packages/5f/97/be804aed6b479af5a945daec7538d8bf358d668bdadde4c7888a2506bdfb/pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2", size = 2037934 },
236 | { url = "https://files.pythonhosted.org/packages/42/01/295f0bd4abf58902917e342ddfe5f76cf66ffabfc57c2e23c7681a1a1197/pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107", size = 2735176 },
237 | { url = "https://files.pythonhosted.org/packages/9d/a0/cd8e9c940ead89cc37812a1a9f310fef59ba2f0b22b4e417d84ab09fa970/pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51", size = 2160720 },
238 | { url = "https://files.pythonhosted.org/packages/73/ae/9d0980e286627e0aeca4c352a60bd760331622c12d576e5ea4441ac7e15e/pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a", size = 1992972 },
239 | { url = "https://files.pythonhosted.org/packages/bf/ba/ae4480bc0292d54b85cfb954e9d6bd226982949f8316338677d56541b85f/pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc", size = 2001477 },
240 | { url = "https://files.pythonhosted.org/packages/55/b7/e26adf48c2f943092ce54ae14c3c08d0d221ad34ce80b18a50de8ed2cba8/pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960", size = 2091186 },
241 | { url = "https://files.pythonhosted.org/packages/ba/cc/8491fff5b608b3862eb36e7d29d36a1af1c945463ca4c5040bf46cc73f40/pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23", size = 2154429 },
242 | { url = "https://files.pythonhosted.org/packages/78/d8/c080592d80edd3441ab7f88f865f51dae94a157fc64283c680e9f32cf6da/pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05", size = 1833713 },
243 | { url = "https://files.pythonhosted.org/packages/83/84/5ab82a9ee2538ac95a66e51f6838d6aba6e0a03a42aa185ad2fe404a4e8f/pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337", size = 1987897 },
244 | { url = "https://files.pythonhosted.org/packages/df/c3/b15fb833926d91d982fde29c0624c9f225da743c7af801dace0d4e187e71/pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5", size = 1882983 },
245 | { url = "https://files.pythonhosted.org/packages/7c/60/e5eb2d462595ba1f622edbe7b1d19531e510c05c405f0b87c80c1e89d5b1/pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6", size = 1894016 },
246 | { url = "https://files.pythonhosted.org/packages/61/20/da7059855225038c1c4326a840908cc7ca72c7198cb6addb8b92ec81c1d6/pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676", size = 1771648 },
247 | { url = "https://files.pythonhosted.org/packages/8f/fc/5485cf0b0bb38da31d1d292160a4d123b5977841ddc1122c671a30b76cfd/pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d", size = 1826929 },
248 | { url = "https://files.pythonhosted.org/packages/a1/ff/fb1284a210e13a5f34c639efc54d51da136074ffbe25ec0c279cf9fbb1c4/pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c", size = 1980591 },
249 | { url = "https://files.pythonhosted.org/packages/f1/14/77c1887a182d05af74f6aeac7b740da3a74155d3093ccc7ee10b900cc6b5/pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27", size = 1981326 },
250 | { url = "https://files.pythonhosted.org/packages/06/aa/6f1b2747f811a9c66b5ef39d7f02fbb200479784c75e98290d70004b1253/pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f", size = 1989205 },
251 | { url = "https://files.pythonhosted.org/packages/7a/d2/8ce2b074d6835f3c88d85f6d8a399790043e9fdb3d0e43455e72d19df8cc/pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed", size = 2079616 },
252 | { url = "https://files.pythonhosted.org/packages/65/71/af01033d4e58484c3db1e5d13e751ba5e3d6b87cc3368533df4c50932c8b/pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f", size = 2133265 },
253 | { url = "https://files.pythonhosted.org/packages/33/72/f881b5e18fbb67cf2fb4ab253660de3c6899dbb2dba409d0b757e3559e3d/pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c", size = 2001864 },
254 | ]
255 |
256 | [[package]]
257 | name = "pyright"
258 | version = "1.1.389"
259 | source = { registry = "https://pypi.org/simple" }
260 | dependencies = [
261 | { name = "nodeenv" },
262 | { name = "typing-extensions" },
263 | ]
264 | sdist = { url = "https://files.pythonhosted.org/packages/72/4e/9a5ab8745e7606b88c2c7ca223449ac9d82a71fd5e31df47b453f2cb39a1/pyright-1.1.389.tar.gz", hash = "sha256:716bf8cc174ab8b4dcf6828c3298cac05c5ed775dda9910106a5dcfe4c7fe220", size = 21940 }
265 | wheels = [
266 | { url = "https://files.pythonhosted.org/packages/1b/26/c288cabf8cfc5a27e1aa9e5029b7682c0f920b8074f45d22bf844314d66a/pyright-1.1.389-py3-none-any.whl", hash = "sha256:41e9620bba9254406dc1f621a88ceab5a88af4c826feb4f614d95691ed243a60", size = 18581 },
267 | ]
268 |
269 | [[package]]
270 | name = "sniffio"
271 | version = "1.3.1"
272 | source = { registry = "https://pypi.org/simple" }
273 | sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
274 | wheels = [
275 | { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
276 | ]
277 |
278 | [[package]]
279 | name = "sse-starlette"
280 | version = "2.1.3"
281 | source = { registry = "https://pypi.org/simple" }
282 | dependencies = [
283 | { name = "anyio" },
284 | { name = "starlette" },
285 | { name = "uvicorn" },
286 | ]
287 | sdist = { url = "https://files.pythonhosted.org/packages/72/fc/56ab9f116b2133521f532fce8d03194cf04dcac25f583cf3d839be4c0496/sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169", size = 19678 }
288 | wheels = [
289 | { url = "https://files.pythonhosted.org/packages/52/aa/36b271bc4fa1d2796311ee7c7283a3a1c348bad426d37293609ca4300eef/sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772", size = 9383 },
290 | ]
291 |
292 | [[package]]
293 | name = "starlette"
294 | version = "0.41.3"
295 | source = { registry = "https://pypi.org/simple" }
296 | dependencies = [
297 | { name = "anyio" },
298 | ]
299 | sdist = { url = "https://files.pythonhosted.org/packages/1a/4c/9b5764bd22eec91c4039ef4c55334e9187085da2d8a2df7bd570869aae18/starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835", size = 2574159 }
300 | wheels = [
301 | { url = "https://files.pythonhosted.org/packages/96/00/2b325970b3060c7cecebab6d295afe763365822b1306a12eeab198f74323/starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7", size = 73225 },
302 | ]
303 |
304 | [[package]]
305 | name = "typing-extensions"
306 | version = "4.12.2"
307 | source = { registry = "https://pypi.org/simple" }
308 | sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 }
309 | wheels = [
310 | { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 },
311 | ]
312 |
313 | [[package]]
314 | name = "uvicorn"
315 | version = "0.32.1"
316 | source = { registry = "https://pypi.org/simple" }
317 | dependencies = [
318 | { name = "click" },
319 | { name = "h11" },
320 | { name = "typing-extensions", marker = "python_full_version < '3.11'" },
321 | ]
322 | sdist = { url = "https://files.pythonhosted.org/packages/6a/3c/21dba3e7d76138725ef307e3d7ddd29b763119b3aa459d02cc05fefcff75/uvicorn-0.32.1.tar.gz", hash = "sha256:ee9519c246a72b1c084cea8d3b44ed6026e78a4a309cbedae9c37e4cb9fbb175", size = 77630 }
323 | wheels = [
324 | { url = "https://files.pythonhosted.org/packages/50/c1/2d27b0a15826c2b71dcf6e2f5402181ef85acf439617bb2f1453125ce1f3/uvicorn-0.32.1-py3-none-any.whl", hash = "sha256:82ad92fd58da0d12af7482ecdb5f2470a04c9c9a53ced65b9bbb4a205377602e", size = 63828 },
325 | ]
326 |
--------------------------------------------------------------------------------