├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── pyproject.toml
├── screenshots
└── skydeck_ai_helper.png
├── smithery.yaml
├── src
├── __init__.py
└── aidd
│ ├── __init__.py
│ ├── cli.py
│ ├── server.py
│ └── tools
│ ├── __init__.py
│ ├── base.py
│ ├── code_analysis.py
│ ├── code_execution.py
│ ├── code_tools.py
│ ├── directory_tools.py
│ ├── file_tools.py
│ ├── get_active_apps_tool.py
│ ├── get_available_windows_tool.py
│ ├── image_tools.py
│ ├── other_tools.py
│ ├── path_tools.py
│ ├── screenshot_tool.py
│ ├── state.py
│ ├── system_tools.py
│ └── web_tools.py
└── uv.lock
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | develop-eggs/
8 | dist/
9 | downloads/
10 | eggs/
11 | .eggs/
12 | lib/
13 | lib64/
14 | parts/
15 | sdist/
16 | var/
17 | wheels/
18 | *.egg-info/
19 | .installed.cfg
20 | *.egg
21 | MANIFEST
22 | *.manifest
23 | *.spec
24 | pip-log.txt
25 | pip-delete-this-directory.txt
26 |
27 | # Unit test / coverage reports
28 | htmlcov/
29 | .tox/
30 | .nox/
31 | .coverage
32 | .coverage.*
33 | .cache
34 | nosetests.xml
35 | coverage.xml
36 | *.cover
37 | .hypothesis/
38 | .pytest_cache/
39 | cover/
40 |
41 | # Environments
42 | .env
43 | .venv
44 | env/
45 | venv/
46 | ENV/
47 | env.bak/
48 | venv.bak/
49 | .python-version
50 |
51 | # IDEs and editors
52 | .idea/
53 | .vscode/
54 | *.swp
55 | *.swo
56 | *~
57 | .project
58 | .classpath
59 | .settings/
60 | *.sublime-workspace
61 | *.sublime-project
62 |
63 | # OS generated files
64 | .DS_Store
65 | .DS_Store?
66 | ._*
67 | .Spotlight-V100
68 | .Trashes
69 | ehthumbs.db
70 | Thumbs.db
71 |
72 | # Project specific
73 | .skydeckai-code/
74 | .uv/
75 | node_modules/
76 | *.log
77 | CLAUDE.md
78 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
2 | # Start with the official Python image with a compatible version
3 | FROM python:3.11-slim
4 |
5 | # Set the working directory
6 | WORKDIR /app
7 |
8 | # Copy the necessary files for installing the package
9 | COPY pyproject.toml /app/
10 | COPY src /app/src
11 |
12 | # Install build tools and dependencies
13 | RUN pip install --no-cache-dir hatchling
14 |
15 | # Install the package in the Docker container
16 | RUN pip install .
17 |
18 | # Expose any necessary ports (optional, depending on the server requirements)
19 | # EXPOSE 8000
20 |
21 | # Define the entry point for the container
22 | CMD ["skydeckai-code-cli", "--tool", "get_system_info"]
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are entirely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2025 SkyDeck.ai
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://mseep.ai/app/skydeckai-code)
2 |
3 | # SkyDeckAI Code
4 |
5 | An MCP server that provides a comprehensive set of tools for AI-driven development workflows. Features include file system operations, code analysis using tree-sitter for multiple programming languages, code execution, web content fetching with HTML-to-markdown conversion, multi-engine web search, code content searching, and system information retrieval. Designed to enhance AI's capability to assist in software development tasks by providing direct access to both local and remote resources.
6 |
7 | # Formerly Known As MCP-Server-AIDD
8 |
9 | This mcp server was formerly known as `mcp-server-aidd`. It was renamed to `skydeckai-code` to credit the team at [SkyDeck.ai](https://skydeck.ai) with creating this application along with [East Agile](https://eastagile.com). But more importantly we realized that the term AI Driven Development (AIDD) was just not catching on. People did not understand at a glance what it was about. And nor did LLMs. "Code" was far more intuitive. And linguistically intuitive is important in the world of agentic AI.
10 |
11 |
12 |
13 | ## Installation
14 |
15 | ```bash
16 | # Using pip
17 | pip install skydeckai-code
18 | ```
19 |
20 | ## Claude Desktop Setup
21 |
22 | Add to your `claude_desktop_config.json`:
23 |
24 | ```json
25 | {
26 | "mcpServers": {
27 | "skydeckai-code": {
28 | "command": "uvx",
29 | "args": ["skydeckai-code"]
30 | }
31 | }
32 | }
33 | ```
34 |
35 | ## SkyDeck AI Helper App
36 |
37 | If you're using SkyDeck AI Helper app, you can search for "SkyDeckAI Code" and install it.
38 |
39 | 
40 |
41 | ## Key Features
42 |
43 | - File system operations (read, write, edit, move, copy, delete)
44 | - Directory management and traversal
45 | - Multi-language code analysis using tree-sitter
46 | - Code content searching with regex pattern matching
47 | - Multi-language code execution with safety measures
48 | - Web content fetching from APIs and websites with HTML-to-markdown conversion
49 | - Multi-engine web search with reliable fallback mechanisms
50 | - Batch operations for parallel and serial tool execution
51 | - Security controls with configurable workspace boundaries
52 | - Screenshot and screen context tools
53 | - Image handling tools
54 |
55 | ## Available Tools (26)
56 |
57 | | Category | Tool Name | Description |
58 | | ---------------- | -------------------------- | -------------------------------------------- |
59 | | **File System** | `get_allowed_directory` | Get the current working directory path |
60 | | | `update_allowed_directory` | Change the working directory |
61 | | | `create_directory` | Create a new directory or nested directories |
62 | | | `write_file` | Create or overwrite a file with new content |
63 | | | `edit_file` | Make line-based edits to a text file |
64 | | | `read_file` | Read the contents of one or more files |
65 | | | `list_directory` | Get listing of files and directories |
66 | | | `move_file` | Move or rename a file or directory |
67 | | | `copy_file` | Copy a file or directory to a new location |
68 | | | `search_files` | Search for files matching a name pattern |
69 | | | `delete_file` | Delete a file or empty directory |
70 | | | `get_file_info` | Get detailed file metadata |
71 | | | `directory_tree` | Get a recursive tree view of directories |
72 | | | `read_image_file` | Read an image file as base64 data |
73 | | **Code Tools** | `codebase_mapper` | Analyze code structure across files |
74 | | | `search_code` | Find text patterns in code files |
75 | | | `execute_code` | Run code in various languages |
76 | | | `execute_shell_script` | Run shell/bash scripts |
77 | | **Web Tools** | `web_fetch` | Get content from a URL |
78 | | | `web_search` | Perform a web search |
79 | | **Screen Tools** | `capture_screenshot` | Take a screenshot of screen or window |
80 | | | `get_active_apps` | List running applications |
81 | | | `get_available_windows` | List all open windows |
82 | | **System** | `get_system_info` | Get detailed system information |
83 | | **Utility** | `batch_tools` | Run multiple tool operations together |
84 | | | `think` | Document reasoning without making changes |
85 |
86 | ## Detailed Tool Documentation
87 |
88 | ### Basic File Operations
89 |
90 | | Tool | Parameters | Returns |
91 | | ------------- | ---------------------------------------------------------- | --------------------------------------------- |
92 | | read_file | files: [{path: string, offset?: integer, limit?: integer}] | File content (single or multiple files) |
93 | | write_file | path: string, content: string | Success confirmation |
94 | | move_file | source: string, destination: string | Success confirmation |
95 | | copy_file | source: string, destination: string, recursive?: boolean | Success confirmation |
96 | | delete_file | path: string | Success confirmation |
97 | | get_file_info | path: string | File metadata (size, timestamps, permissions) |
98 |
99 | **CLI Usage:**
100 |
101 | ```bash
102 | # Read entire file
103 | skydeckai-code-cli --tool read_file --args '{"files": [{"path": "src/main.py"}]}'
104 |
105 | # Read 10 lines starting from line 20
106 | skydeckai-code-cli --tool read_file --args '{"files": [{"path": "src/main.py", "offset": 20, "limit": 10}]}'
107 |
108 | # Read from line 50 to the end of the file
109 | skydeckai-code-cli --tool read_file --args '{"files": [{"path": "src/main.py", "offset": 50}]}'
110 |
111 | # Read multiple files with different line ranges
112 | skydeckai-code-cli --tool read_file --args '{"files": [
113 | {"path": "src/main.py", "offset": 1, "limit": 10},
114 | {"path": "README.md"}
115 | ]}'
116 |
117 | # Write file
118 | skydeckai-code-cli --tool write_file --args '{"path": "output.txt", "content": "Hello World"}'
119 |
120 | # Copy file or directory
121 | skydeckai-code-cli --tool copy_file --args '{"source": "config.json", "destination": "config.backup.json"}'
122 |
123 | # Get file info
124 | skydeckai-code-cli --tool get_file_info --args '{"path": "src/main.py"}'
125 | ```
126 |
127 | ### Complex File Operations
128 |
129 | #### edit_file
130 |
131 | Pattern-based file editing with preview support:
132 |
133 | ```json
134 | {
135 | "path": "src/main.py",
136 | "edits": [
137 | {
138 | "oldText": "def old_function():",
139 | "newText": "def new_function():"
140 | }
141 | ],
142 | "dryRun": false,
143 | "options": {
144 | "partialMatch": true
145 | }
146 | }
147 | ```
148 |
149 | Returns: Diff of changes or preview in dry run mode.
150 |
151 | ### Directory Operations
152 |
153 | | Tool | Parameters | Returns |
154 | | ------------------------ | -------------------------------------------------------- | ------------------------------ |
155 | | get_allowed_directory | none | Current allowed directory path |
156 | | update_allowed_directory | directory: string (absolute path) | Success confirmation |
157 | | list_directory | path: string | Directory contents list |
158 | | create_directory | path: string | Success confirmation |
159 | | search_files | pattern: string, path?: string, include_hidden?: boolean | Matching files list |
160 |
161 | The `search_files` tool searches for files by name pattern, while the `search_code` tool searches within file contents using regex. Use `search_files` when looking for files with specific names or extensions, and `search_code` when searching for specific text patterns inside files.
162 |
163 | #### directory_tree
164 |
165 | Generates complete directory structure:
166 |
167 | ```json
168 | {
169 | "path": "src",
170 | "include_hidden": false
171 | }
172 | ```
173 |
174 | Returns: JSON tree structure of directory contents.
175 |
176 | **CLI Usage:**
177 |
178 | ```bash
179 | # List directory
180 | skydeckai-code-cli --tool list_directory --args '{"path": "."}'
181 |
182 | # Search for Python files
183 | skydeckai-code-cli --tool search_files --args '{"pattern": ".py", "path": "src"}'
184 | ```
185 |
186 | ### Code Analysis
187 |
188 | #### codebase_mapper
189 |
190 | Analyzes source code structure:
191 |
192 | ```json
193 | {
194 | "path": "src"
195 | }
196 | ```
197 |
198 | Returns:
199 |
200 | - Classes and their methods
201 | - Functions and parameters
202 | - Module structure
203 | - Code organization statistics
204 | - Inheritance relationships
205 |
206 | Supported Languages:
207 |
208 | - Python (.py)
209 | - JavaScript (.js/.jsx, .mjs, .cjs)
210 | - TypeScript (.ts/.tsx)
211 | - Java (.java)
212 | - C++ (.cpp, .hpp, .cc)
213 | - Ruby (.rb, .rake)
214 | - Go (.go)
215 | - Rust (.rs)
216 | - PHP (.php)
217 | - C# (.cs)
218 | - Kotlin (.kt, .kts)
219 |
220 | **CLI Usage:**
221 |
222 | ```bash
223 | # Map the entire codebase structure
224 | skydeckai-code-cli --tool codebase_mapper --args '{"path": "."}'
225 |
226 | # Map only the source directory
227 | skydeckai-code-cli --tool codebase_mapper --args '{"path": "src"}'
228 |
229 | # Map a specific component or module
230 | skydeckai-code-cli --tool codebase_mapper --args '{"path": "src/components"}'
231 | ```
232 |
233 | #### search_code
234 |
235 | Fast content search tool using regular expressions:
236 |
237 | ```json
238 | {
239 | "patterns": ["function\\s+\\w+", "class\\s+\\w+"],
240 | "include": "*.js",
241 | "exclude": "node_modules/**",
242 | "max_results": 50,
243 | "case_sensitive": false,
244 | "path": "src"
245 | }
246 | ```
247 |
248 | **Parameters:**
249 | | Parameter | Type | Required | Description |
250 | |-----------|------|----------|-------------|
251 | | patterns | array of strings | Yes | List of regular expression patterns to search for in file contents |
252 | | include | string | No | File pattern to include (glob syntax, default: "\*") |
253 | | exclude | string | No | File pattern to exclude (glob syntax, default: "") |
254 | | max_results | integer | No | Maximum results to return per pattern (default: 100) |
255 | | case_sensitive | boolean | No | Whether search is case-sensitive (default: false) |
256 | | path | string | No | Base directory to search from (default: ".") |
257 |
258 | **Returns:**
259 | Matching lines grouped by file with line numbers, sorted by file modification time with newest files first.
260 |
261 | This tool uses ripgrep when available for optimal performance, with a Python fallback implementation. It's ideal for finding specific code patterns like function declarations, imports, variable usages, or error handling.
262 |
263 | **CLI Usage:**
264 |
265 | ```bash
266 | # Find function and class declarations in JavaScript files
267 | skydeckai-code-cli --tool search_code --args '{
268 | "patterns": ["function\\s+\\w+", "class\\s+\\w+"],
269 | "include": "*.js"
270 | }'
271 |
272 | # Find all console.log statements with errors or warnings
273 | skydeckai-code-cli --tool search_code --args '{
274 | "patterns": ["console\\.log.*[eE]rror", "console\\.log.*[wW]arning"],
275 | "path": "src"
276 | }'
277 |
278 | # Find import and export statements in TypeScript files
279 | skydeckai-code-cli --tool search_code --args '{
280 | "patterns": ["import.*from", "export.*"],
281 | "include": "*.{ts,tsx}",
282 | "exclude": "node_modules/**"
283 | }'
284 | ```
285 |
286 | ### System Information
287 |
288 | | Tool | Parameters | Returns |
289 | | --------------- | ---------- | ---------------------------- |
290 | | get_system_info | none | Comprehensive system details |
291 |
292 | Returns:
293 |
294 | ```json
295 | {
296 | "working_directory": "/path/to/project",
297 | "system": {
298 | "os", "os_version", "architecture", "python_version"
299 | },
300 | "wifi_network": "MyWiFi",
301 | "cpu": {
302 | "physical_cores", "logical_cores", "total_cpu_usage"
303 | },
304 | "memory": { "total", "available", "used_percentage" },
305 | "disk": { "total", "free", "used_percentage" },
306 | "mac_details": { // Only present on macOS
307 | "model": "Mac mini",
308 | "chip": "Apple M2",
309 | "serial_number": "XXX"
310 | }
311 | }
312 | ```
313 |
314 | Provides essential system information in a clean, readable format.
315 |
316 | **CLI Usage:**
317 |
318 | ```bash
319 | # Get system information
320 | skydeckai-code-cli --tool get_system_info
321 | ```
322 |
323 | ### Screen Context and Image Tools
324 |
325 | #### get_active_apps
326 |
327 | Returns a list of currently active applications on the user's system.
328 |
329 | ```json
330 | {
331 | "with_details": true
332 | }
333 | ```
334 |
335 | **Parameters:**
336 | | Parameter | Type | Required | Description |
337 | |-----------|---------|----------|---------------------------------------|
338 | | with_details | boolean | No | Whether to include additional details about each application (default: false) |
339 |
340 | **Returns:**
341 |
342 | ```json
343 | {
344 | "success": true,
345 | "platform": "macos",
346 | "app_count": 12,
347 | "apps": [
348 | {
349 | "name": "Firefox",
350 | "has_windows": true,
351 | "window_count": 3,
352 | "visible_windows": [
353 | { "name": "GitHub - Mozilla Firefox", "width": 1200, "height": 800 }
354 | ]
355 | },
356 | {
357 | "name": "VSCode",
358 | "has_windows": true
359 | }
360 | ]
361 | }
362 | ```
363 |
364 | This tool provides valuable context about applications currently running on the user's system, which can help with providing more relevant assistance.
365 |
366 | #### get_available_windows
367 |
368 | Returns detailed information about all available windows currently displayed on the user's screen.
369 |
370 | ```json
371 | {}
372 | ```
373 |
374 | **Returns:**
375 |
376 | ```json
377 | {
378 | "success": true,
379 | "platform": "macos",
380 | "count": 8,
381 | "windows": [
382 | {
383 | "id": 42,
384 | "title": "Document.txt - Notepad",
385 | "app": "Notepad",
386 | "visible": true
387 | },
388 | {
389 | "title": "Terminal",
390 | "app": "Terminal",
391 | "visible": true,
392 | "active": true
393 | }
394 | ]
395 | }
396 | ```
397 |
398 | This tool helps understand what's visible on the user's screen and can be used for context-aware assistance.
399 |
400 | #### capture_screenshot
401 |
402 | Captures a screenshot of the user's screen or a specific window.
403 |
404 | ```json
405 | {
406 | "output_path": "screenshots/capture.png",
407 | "capture_mode": {
408 | "type": "named_window",
409 | "window_name": "Visual Studio Code"
410 | }
411 | }
412 | ```
413 |
414 | **Parameters:**
415 | | Parameter | Type | Required | Description |
416 | |-----------|---------|----------|---------------------------------------|
417 | | output_path | string | No | Path where the screenshot should be saved (default: generated path) |
418 | | capture_mode | object | No | Specifies what to capture |
419 | | capture_mode.type | string | No | Type of screenshot: 'full', 'active_window', or 'named_window' (default: 'full') |
420 | | capture_mode.window_name | string | No | Name of window to capture (required when type is 'named_window') |
421 |
422 | **Returns:**
423 |
424 | ```json
425 | {
426 | "success": true,
427 | "path": "/path/to/screenshots/capture.png"
428 | }
429 | ```
430 |
431 | This tool captures screenshots for visualization, debugging, or context-aware assistance.
432 |
433 | #### read_image_file
434 |
435 | Reads an image file from the file system and returns its contents as a base64-encoded string.
436 |
437 | ```json
438 | {
439 | "path": "images/logo.png"
440 | }
441 | ```
442 |
443 | **Parameters:**
444 | | Parameter | Type | Required | Description |
445 | |-----------|---------|----------|---------------------------------------|
446 | | path | string | Yes | Path to the image file to read |
447 | | max_size | integer | No | Maximum file size in bytes (default: 100MB) |
448 |
449 | **Returns:**
450 | Base64-encoded image data that can be displayed or processed.
451 |
452 | This tool supports common image formats like PNG, JPEG, GIF, and WebP, and automatically resizes images for optimal viewing.
453 |
454 | ### Web Tools
455 |
456 | #### web_fetch
457 |
458 | Fetches content from a URL and optionally saves it to a file.
459 |
460 | ```json
461 | {
462 | "url": "https://api.github.com/users/octocat",
463 | "headers": {
464 | "Accept": "application/json"
465 | },
466 | "timeout": 15,
467 | "save_to_file": "downloads/octocat.json",
468 | "convert_html_to_markdown": true
469 | }
470 | ```
471 |
472 | **Parameters:**
473 | | Parameter | Type | Required | Description |
474 | |-----------|---------|----------|---------------------------------------|
475 | | url | string | Yes | URL to fetch content from (http/https only) |
476 | | headers | object | No | Optional HTTP headers to include in the request |
477 | | timeout | integer | No | Maximum time to wait for response (default: 10s) |
478 | | save_to_file | string | No | Path to save response content (within allowed directory) |
479 | | convert_html_to_markdown | boolean | No | When true, converts HTML content to markdown for better readability (default: true) |
480 |
481 | **Returns:**
482 | Response content as text with HTTP status code and size information. For binary content, returns metadata and saves to file if requested. When convert_html_to_markdown is enabled, HTML content is automatically converted to markdown format for better readability.
483 |
484 | This tool can be used to access web APIs, fetch documentation, or download content from the web while respecting size limits (10MB max) and security constraints.
485 |
486 | **CLI Usage:**
487 |
488 | ```bash
489 | # Fetch JSON from an API
490 | skydeckai-code-cli --tool web_fetch --args '{
491 | "url": "https://api.github.com/users/octocat",
492 | "headers": {"Accept": "application/json"}
493 | }'
494 |
495 | # Download content to a file
496 | skydeckai-code-cli --tool web_fetch --args '{
497 | "url": "https://github.com/github/github-mcp-server/blob/main/README.md",
498 | "save_to_file": "downloads/readme.md"
499 | }'
500 |
501 | # Fetch a webpage and convert to markdown for better readability
502 | skydeckai-code-cli --tool web_fetch --args '{
503 | "url": "https://example.com",
504 | "convert_html_to_markdown": true
505 | }'
506 | ```
507 |
508 | #### web_search
509 |
510 | Performs a robust web search using multiple search engines and returns concise, relevant results.
511 |
512 | ```json
513 | {
514 | "query": "latest python release features",
515 | "num_results": 8,
516 | "convert_html_to_markdown": true,
517 | "search_engine": "bing"
518 | }
519 | ```
520 |
521 | **Parameters:**
522 | | Parameter | Type | Required | Description |
523 | |-----------|---------|----------|---------------------------------------|
524 | | query | string | Yes | The search query to process. Be specific for better results. |
525 | | num_results | integer | No | Maximum number of search results to return (default: 10, max: 20) |
526 | | convert_html_to_markdown | boolean | No | When true, content will be converted from HTML to markdown for better readability (default: true) |
527 | | search_engine | string | No | Specifies which search engine to use: "auto" (default), "bing", or "duckduckgo" |
528 |
529 | **Returns:**
530 | A list of search results formatted in markdown, including titles, URLs, and snippets for each result. Results are deduplicated and organized hierarchically for easy reading.
531 |
532 | This tool uses a multi-engine approach that tries different search engines with various parsing strategies to ensure reliable results. You can specify a preferred engine, but some engines may block automated access, in which case the tool will fall back to alternative engines when "auto" is selected.
533 |
534 | **CLI Usage:**
535 |
536 | ```bash
537 | # Search with default settings (auto engine selection)
538 | skydeckai-code-cli --tool web_search --args '{
539 | "query": "latest python release features"
540 | }'
541 |
542 | # Try DuckDuckGo if you want alternative results
543 | skydeckai-code-cli --tool web_search --args '{
544 | "query": "machine learning frameworks comparison",
545 | "search_engine": "duckduckgo"
546 | }'
547 |
548 | # Use Bing for reliable results
549 | skydeckai-code-cli --tool web_search --args '{
550 | "query": "best programming practices 2023",
551 | "search_engine": "bing"
552 | }'
553 | ```
554 |
555 | ### Utility Tools
556 |
557 | #### batch_tools
558 |
559 | Execute multiple tool invocations in a single request with parallel execution when possible.
560 |
561 | ```json
562 | {
563 | "description": "Setup new project",
564 | "sequential": true,
565 | "invocations": [
566 | {
567 | "tool": "create_directory",
568 | "arguments": {
569 | "path": "src"
570 | }
571 | },
572 | {
573 | "tool": "write_file",
574 | "arguments": {
575 | "path": "README.md",
576 | "content": "# New Project\n\nThis is a new project."
577 | }
578 | },
579 | {
580 | "tool": "execute_shell_script",
581 | "arguments": {
582 | "script": "git init"
583 | }
584 | }
585 | ]
586 | }
587 | ```
588 |
589 | **Parameters:**
590 | | Parameter | Type | Required | Description |
591 | |-----------|---------|----------|---------------------------------------|
592 | | description | string | Yes | Short description of the batch operation |
593 | | sequential | boolean | No | Whether to run tools in sequence (default: false) |
594 | | invocations | array | Yes | List of tool invocations to execute |
595 | | invocations[].tool | string | Yes | Name of the tool to invoke |
596 | | invocations[].arguments | object | Yes | Arguments for the specified tool |
597 |
598 | **Returns:**
599 | Combined results from all tool invocations, grouped by tool with success/error status for each. Results are presented in the original invocation order with clear section headers.
600 |
601 | This tool provides efficient execution of multiple operations in a single request. When `sequential` is false (default), tools are executed in parallel for better performance. When `sequential` is true, tools are executed in order, and if any tool fails, execution stops.
602 |
603 | **IMPORTANT**: All tools in the batch execute in the same working directory context. If a tool creates a directory and a subsequent tool needs to work inside that directory, you must either:
604 |
605 | 1. Use paths relative to the current working directory (e.g., "project/src" rather than just "src"), or
606 | 2. Include an explicit tool invocation to change directories using `update_allowed_directory`
607 |
608 | **CLI Usage:**
609 |
610 | ```bash
611 | # Setup a new project with multiple steps in sequential order (using proper paths)
612 | skydeckai-code-cli --tool batch_tools --args '{
613 | "description": "Setup new project",
614 | "sequential": true,
615 | "invocations": [
616 | {"tool": "create_directory", "arguments": {"path": "project"}},
617 | {"tool": "create_directory", "arguments": {"path": "project/src"}},
618 | {"tool": "write_file", "arguments": {"path": "project/README.md", "content": "# Project\n\nA new project."}}
619 | ]
620 | }'
621 |
622 | # Create nested structure using relative paths (without changing directory)
623 | skydeckai-code-cli --tool batch_tools --args '{
624 | "description": "Create project structure",
625 | "sequential": true,
626 | "invocations": [
627 | {"tool": "create_directory", "arguments": {"path": "project/src"}},
628 | {"tool": "create_directory", "arguments": {"path": "project/docs"}},
629 | {"tool": "write_file", "arguments": {"path": "project/README.md", "content": "# Project"}}
630 | ]
631 | }'
632 |
633 | # Gather system information and take a screenshot (tasks can run in parallel)
634 | skydeckai-code-cli --tool batch_tools --args '{
635 | "description": "System diagnostics",
636 | "sequential": false,
637 | "invocations": [
638 | {"tool": "get_system_info", "arguments": {}},
639 | {"tool": "capture_screenshot", "arguments": {
640 | "output_path": "diagnostics/screen.png",
641 | "capture_mode": {
642 | "type": "full"
643 | }
644 | }}
645 | ]
646 | }'
647 | ```
648 |
649 | #### think
650 |
651 | A tool for complex reasoning and brainstorming without making changes to the repository.
652 |
653 | ```json
654 | {
655 | "thought": "Let me analyze the performance issue in the codebase:\n\n## Root Cause Analysis\n\n1. The database query is inefficient because:\n - It doesn't use proper indexing\n - It fetches more columns than needed\n - The JOIN operation is unnecessarily complex\n\n## Potential Solutions\n\n1. **Add database indexes**:\n - Create an index on the user_id column\n - Create a composite index on (created_at, status)\n\n2. **Optimize the query**:\n - Select only necessary columns\n - Rewrite the JOIN using a subquery\n - Add LIMIT clause for pagination\n\n3. **Add caching layer**:\n - Cache frequent queries using Redis\n - Implement cache invalidation strategy\n\nAfter weighing the options, solution #2 seems to be the simplest to implement with the highest impact."
656 | }
657 | ```
658 |
659 | **Parameters:**
660 | | Parameter | Type | Required | Description |
661 | |-----------|---------|----------|---------------------------------------|
662 | | thought | string | Yes | Your detailed thoughts, analysis or reasoning process |
663 |
664 | **Returns:**
665 | Your thoughts formatted as markdown, with a note indicating this was a thinking exercise.
666 |
667 | This tool is useful for thinking through complex problems, brainstorming solutions, or laying out implementation plans without making any actual changes. It's a great way to document your reasoning process, evaluate different approaches, or plan out a multi-step strategy before taking action.
668 |
669 | **CLI Usage:**
670 |
671 | ```bash
672 | # Analyze a bug and plan a fix
673 | skydeckai-code-cli --tool think --args '{
674 | "thought": "# Bug Analysis\n\n## Observed Behavior\nThe login endpoint returns a 500 error when email contains Unicode characters.\n\n## Root Cause\nThe database adapter is not properly encoding Unicode strings before constructing the SQL query.\n\n## Potential Fixes\n1. Update the database adapter to use parameterized queries\n2. Add input validation to reject Unicode in emails\n3. Encode email input manually before database operations\n\nFix #1 is the best approach as it solves the core issue and improves security."
675 | }'
676 |
677 | # Evaluate design alternatives
678 | skydeckai-code-cli --tool think --args '{
679 | "thought": "# API Design Options\n\n## REST vs GraphQL\nFor this use case, GraphQL would provide more flexible data fetching but adds complexity. REST is simpler and sufficient for our current needs.\n\n## Authentication Methods\nJWT-based authentication offers stateless operation and better scalability compared to session-based auth.\n\nRecommendation: Use REST with JWT authentication for the initial implementation."
680 | }'
681 | ```
682 |
683 | ### Code Execution
684 |
685 | #### execute_code
686 |
687 | Executes code in various programming languages with safety measures and restrictions.
688 |
689 | ```json
690 | {
691 | "language": "python",
692 | "code": "print('Hello, World!')",
693 | "timeout": 5
694 | }
695 | ```
696 |
697 | **Supported Languages:**
698 |
699 | - Python (python3)
700 | - JavaScript (Node.js)
701 | - Ruby
702 | - PHP
703 | - Go
704 | - Rust
705 |
706 | **Parameters:**
707 | | Parameter | Type | Required | Description |
708 | |-----------|---------|----------|---------------------------------------|
709 | | language | string | Yes | Programming language to use |
710 | | code | string | Yes | Code to execute |
711 | | timeout | integer | No | Maximum execution time (default: 5s) |
712 |
713 | **CLI Usage:**
714 |
715 | ```bash
716 | # Python example
717 | skydeckai-code-cli --tool execute_code --args '{
718 | "language": "python",
719 | "code": "print(sum(range(10)))"
720 | }'
721 |
722 | # JavaScript example
723 | skydeckai-code-cli --tool execute_code --args '{
724 | "language": "javascript",
725 | "code": "console.log(Array.from({length: 5}, (_, i) => i*2))"
726 | }'
727 |
728 | # Ruby example
729 | skydeckai-code-cli --tool execute_code --args '{
730 | "language": "ruby",
731 | "code": "puts (1..5).reduce(:+)"
732 | }'
733 |
734 | # Go example
735 | skydeckai-code-cli --tool execute_code --args '{
736 | "language": "go",
737 | "code": "fmt.Println(\"Hello, Go!\")"
738 | }'
739 | ```
740 |
741 | **Requirements:**
742 |
743 | - Respective language runtimes must be installed
744 | - Commands must be available in system PATH
745 | - Proper permissions for temporary file creation
746 |
747 | ⚠️ **Security Warning:**
748 | This tool executes arbitrary code on your system. Always:
749 |
750 | 1. Review code thoroughly before execution
751 | 2. Understand the code's purpose and expected outcome
752 | 3. Never execute untrusted code
753 | 4. Be aware of potential system impacts
754 | 5. Monitor execution output
755 |
756 | #### execute_shell_script
757 |
758 | Executes shell scripts (bash/sh) with safety measures and restrictions.
759 |
760 | ```json
761 | {
762 | "script": "echo \"Current directory:\" && pwd",
763 | "timeout": 300
764 | }
765 | ```
766 |
767 | **Parameters:**
768 | | Parameter | Type | Required | Description |
769 | |-----------|---------|----------|---------------------------------------|
770 | | script | string | Yes | Shell script to execute |
771 | | timeout | integer | No | Maximum execution time (default: 300s, max: 600s) |
772 |
773 | **CLI Usage:**
774 |
775 | ```bash
776 | # List directory contents with details
777 | skydeckai-code-cli --tool execute_shell_script --args '{
778 | "script": "ls -la"
779 | }'
780 |
781 | # Find all Python files recursively
782 | skydeckai-code-cli --tool execute_shell_script --args '{
783 | "script": "find . -name \"*.py\" -type f"
784 | }'
785 |
786 | # Complex script with multiple commands
787 | skydeckai-code-cli --tool execute_shell_script --args '{
788 | "script": "echo \"System Info:\" && uname -a && echo \"\nDisk Usage:\" && df -h"
789 | }'
790 | ```
791 |
792 | **Features:**
793 |
794 | - Uses /bin/sh for maximum compatibility across systems
795 | - Executes within the allowed directory
796 | - Separate stdout and stderr output
797 | - Proper error handling and timeout controls
798 |
799 | ⚠️ **Security Warning:**
800 | This tool executes arbitrary shell commands on your system. Always:
801 |
802 | 1. Review the script thoroughly before execution
803 | 2. Understand the script's purpose and expected outcome
804 | 3. Never execute untrusted scripts
805 | 4. Be aware of potential system impacts
806 | 5. Monitor execution output
807 |
808 | ## Configuration
809 |
810 | Configuration file: `~/.skydeckai_code/config.json`
811 |
812 | ```json
813 | {
814 | "allowed_directory": "/path/to/workspace"
815 | }
816 | ```
817 |
818 | ## CLI Usage
819 |
820 | Basic command structure:
821 |
822 | ```bash
823 | skydeckai-code-cli --tool --args ''
824 |
825 | # List available tools
826 | skydeckai-code-cli --list-tools
827 |
828 | # Enable debug output
829 | skydeckai-code-cli --debug --tool --args ''
830 | ```
831 |
832 | ## Debugging
833 |
834 | Use MCP Inspector for debugging:
835 |
836 | ```bash
837 | npx @modelcontextprotocol/inspector run
838 | ```
839 |
840 | ## Security
841 |
842 | - Operations restricted to configured allowed directory
843 | - Path traversal prevention
844 | - File permission preservation
845 | - Safe operation handling
846 |
847 | ## Upcoming Features
848 |
849 | - GitHub tools:
850 | - PR Description Generator
851 | - Code Review
852 | - Actions Manager
853 | - Pivotal Tracker tools:
854 | - Story Generator
855 | - Story Manager
856 |
857 | ## Development Status
858 |
859 | Currently in active development. Features and API may change.
860 |
861 | ## License
862 |
863 | Apache License 2.0 - see [LICENSE](LICENSE)
864 |
865 | [](https://www.star-history.com/#skydeckai/skydeckai-code&Date)
866 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "skydeckai-code"
3 | version = "0.1.37"
4 | description = "This MCP server provides a comprehensive set of tools for AI-driven Development workflows including file operations, code analysis, multi-language execution, web content fetching with HTML-to-markdown conversion, multi-engine web search, code content searching, and system information retrieval."
5 | readme = "README.md"
6 | requires-python = ">=3.11"
7 | authors = [{name = "SkyDeck.ai", email = "support@skydeck.ai"}]
8 | license = {text = "Apache 2.0"}
9 | keywords = ["mcp", "development", "ai", "aidd", "code-analysis", "code"]
10 |
11 | dependencies = [
12 | "mcp>=1.6.0",
13 | "tree-sitter>=0.24.0",
14 | "tree-sitter-c-sharp>=0.23.1",
15 | "tree-sitter-cpp>=0.23.4",
16 | "tree-sitter-go>=0.23.4",
17 | "tree-sitter-java>=0.23.5",
18 | "tree-sitter-javascript>=0.23.1",
19 | "tree-sitter-kotlin>=1.1.0",
20 | "tree-sitter-php>=0.23.11",
21 | "tree-sitter-python>=0.23.6",
22 | "tree-sitter-ruby>=0.23.1",
23 | "tree-sitter-rust==0.23.2",
24 | "tree-sitter-typescript>=0.23.2",
25 | "psutil>=7.0.0",
26 | "mss>=10.0.0",
27 | "pillow>=11.1.0",
28 | "requests>=2.32.3",
29 | "html2text>=2025.4.15",
30 | "beautifulsoup4>=4.13.3",
31 | ]
32 |
33 | [project.optional-dependencies]
34 | macos = ["pyobjc-framework-Quartz>=11.0"]
35 | windows = ["pygetwindow>=0.0.9"]
36 |
37 | [tool.hatch.build.targets.wheel]
38 | packages = ["src"]
39 |
40 | [tool.hatch.envs.default]
41 | dependencies = [
42 | "build",
43 | "ruff>=0.9.1",
44 | ]
45 |
46 | [tool.ruff]
47 | line-length = 250
48 | target-version = "py311"
49 |
50 | # Enable rules
51 | lint.select = [
52 | "E", # pycodestyle errors
53 | "F", # pyflakes
54 | "I", # isort
55 | ]
56 |
57 | [project.urls]
58 | Homepage = "https://github.com/skydeckai/skydeckai-code"
59 | Repository = "https://github.com/skydeckai/skydeckai-code"
60 | Documentation = "https://github.com/skydeckai/skydeckai-code/blob/main/README.md"
61 |
62 | [build-system]
63 | requires = [ "hatchling",]
64 | build-backend = "hatchling.build"
65 |
66 | [project.scripts]
67 | skydeckai-code = "src.aidd:main"
68 | skydeckai-code-cli = "src.aidd.cli:main"
69 |
--------------------------------------------------------------------------------
/screenshots/skydeck_ai_helper.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skydeckai/skydeckai-code/2e1eed2aeca180bb97349da516f8511208296e3c/screenshots/skydeck_ai_helper.png
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | properties:
9 | allowedDirectory:
10 | type: string
11 | description: The directory that the MCP server is allowed to operate in.
12 | commandFunction:
13 | # A function that produces the CLI command to start the MCP on stdio.
14 | |-
15 | (config) => ({ command: 'skydeckai-code-cli', args: ['--tool', 'get_system_info'], env: config ? { ALLOWED_DIRECTORY: config.allowedDirectory } : undefined })
16 |
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skydeckai/skydeckai-code/2e1eed2aeca180bb97349da516f8511208296e3c/src/__init__.py
--------------------------------------------------------------------------------
/src/aidd/__init__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from . import server
4 |
5 |
6 | def main():
7 | """Main entry point for the package."""
8 | asyncio.run(server.main())
9 |
10 | # Optionally expose other important items at package level
11 | __all__ = ['main', 'server', 'cli']
12 |
--------------------------------------------------------------------------------
/src/aidd/cli.py:
--------------------------------------------------------------------------------
1 | """Command-line interface for the SkyDeckAI Code MCP server."""
2 |
3 | import argparse
4 | import asyncio
5 | import json
6 | import sys
7 | import traceback
8 | from contextlib import AsyncExitStack
9 | from typing import Optional
10 |
11 | from mcp.client.session import ClientSession
12 | from mcp.client.stdio import StdioServerParameters, stdio_client
13 | from mcp.types import CallToolResult, TextContent
14 |
15 |
16 | class MCPClient:
17 | """Client for interacting with the SkyDeckAI Code MCP server."""
18 |
19 | def __init__(self):
20 | """Initialize the client."""
21 | self.session: Optional[ClientSession] = None
22 | self.exit_stack = AsyncExitStack()
23 | self.debug = False
24 |
25 | async def connect(self):
26 | """Connect to the SkyDeckAI Code server."""
27 | server_params = StdioServerParameters(command="skydeckai-code", args=[], env=None)
28 | transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
29 | self.session = await self.exit_stack.enter_async_context(ClientSession(*transport))
30 | await self.session.initialize()
31 |
32 | async def list_tools(self):
33 | """List all available tools."""
34 |
35 | response = await self.session.list_tools()
36 | print("\nAvailable tools:")
37 | for tool in sorted(response.tools, key=lambda x: x.name):
38 | print(f"\n{tool.name}:")
39 | print(f" Description: {tool.description}")
40 | print(" Arguments:")
41 | if tool.inputSchema and "properties" in tool.inputSchema:
42 | for prop_name, prop_info in tool.inputSchema["properties"].items():
43 | required = (
44 | "required" in tool.inputSchema
45 | and prop_name in tool.inputSchema["required"]
46 | )
47 | req_str = "(required)" if required else "(optional)"
48 | desc = prop_info.get("description", "No description available")
49 | print(f" {prop_name} {req_str}: {desc}")
50 | else:
51 | print(" No arguments required")
52 |
53 | async def call_tool(self, tool_name: str, args_str: Optional[str] = None) -> None:
54 | """Call a specific tool with arguments.
55 |
56 | Args:
57 | tool_name: Name of the tool to call
58 | args_str: JSON string of tool arguments
59 | """
60 | if not self.session:
61 | raise RuntimeError("Not connected to server")
62 |
63 | args = {}
64 | if args_str:
65 | try:
66 | args = json.loads(args_str)
67 | except json.JSONDecodeError as e:
68 | print(f"Error parsing arguments: {e}")
69 | return
70 |
71 | try:
72 | result = await self.session.call_tool(tool_name, args or {})
73 | if isinstance(result, CallToolResult):
74 | for content in result.content:
75 | if isinstance(content, TextContent):
76 | print(content.text)
77 | except Exception as e:
78 | print(f"Result type: {type(result)}")
79 | print(f"Error calling tool: {e}")
80 | if self.debug:
81 | traceback.print_exc()
82 |
83 | async def cleanup(self):
84 | """Clean up resources."""
85 | await self.exit_stack.aclose()
86 |
87 |
88 | def main():
89 | """Main entry point for the CLI."""
90 | parser = argparse.ArgumentParser(
91 | description="CLI for the SkyDeckAI Code MCP server",
92 | formatter_class=argparse.RawDescriptionHelpFormatter,
93 | epilog="""
94 | Examples:
95 | # List all available tools
96 | skydeckai-code-cli --list-tools
97 |
98 | # List directory contents
99 | skydeckai-code-cli --tool list_directory --args '{"path": "."}'
100 |
101 | # Update allowed directory
102 | skydeckai-code-cli --tool update_allowed_directory --args '{"directory": "~/Code/project"}'
103 |
104 | # Read a file
105 | skydeckai-code-cli --tool read_file --args '{"path": "README.md"}'
106 |
107 | # Enable debug output
108 | skydeckai-code-cli --debug --tool read_file --args '{"path": "README.md"}'""")
109 | parser.add_argument("--list-tools", action="store_true", help="List available tools")
110 | parser.add_argument("--tool", help="Tool to call")
111 | parser.add_argument("--args", help='Tool arguments in JSON format (e.g. \'{"directory":"/path/to/dir"}\')')
112 | parser.add_argument("--debug", action="store_true", help="Enable debug mode")
113 |
114 | async def run(args):
115 | client = MCPClient()
116 | client.debug = args.debug
117 | try:
118 | async with AsyncExitStack() as _:
119 | await client.connect()
120 | if args.list_tools:
121 | await client.list_tools()
122 | elif args.tool:
123 | if args.debug and args.args:
124 | print(f"Parsing JSON arguments: {args.args}")
125 | await client.call_tool(args.tool, args.args)
126 | else:
127 | parser.print_help()
128 | finally:
129 | await client.cleanup()
130 |
131 | try:
132 | args = parser.parse_args()
133 | except Exception as e:
134 | print(f"Error: {e}", file=sys.stderr)
135 |
136 | asyncio.run(run(args))
137 | return 0
138 |
139 |
140 | if __name__ == "__main__":
141 | sys.exit(main())
142 |
--------------------------------------------------------------------------------
/src/aidd/server.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import mcp.server.stdio
4 | import mcp.types as types
5 | from mcp.server import NotificationOptions, Server
6 | from mcp.server.models import InitializationOptions
7 |
8 | from .tools import TOOL_DEFINITIONS, TOOL_HANDLERS
9 |
10 | server = Server("skydeckai-code")
11 |
12 | @server.list_tools()
13 | async def handle_list_tools() -> list[types.Tool]:
14 | """
15 | List available tools.
16 | Each tool specifies its arguments using JSON Schema validation.
17 | """
18 | return [types.Tool(**tool) for tool in TOOL_DEFINITIONS]
19 |
20 | @server.call_tool()
21 | async def handle_call_tool(
22 | name: str, arguments: dict | None
23 | ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
24 | """
25 | Handle tool execution requests.
26 | """
27 | if not arguments:
28 | arguments = {}
29 |
30 | handler = TOOL_HANDLERS.get(name)
31 | if not handler:
32 | raise ValueError(f"Unknown tool: {name}")
33 |
34 | return await handler(arguments)
35 |
36 | async def main():
37 | # Run the server using stdin/stdout streams
38 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
39 | await server.run(
40 | read_stream,
41 | write_stream,
42 | InitializationOptions(
43 | server_name="skydeckai-code",
44 | server_version="0.1.0",
45 | capabilities=server.get_capabilities(
46 | notification_options=NotificationOptions(),
47 | experimental_capabilities={},
48 | ),
49 | ),
50 | )
51 |
52 | # This is needed if you'd like to connect to a custom client
53 | if __name__ == "__main__":
54 | asyncio.run(main())
55 |
--------------------------------------------------------------------------------
/src/aidd/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .code_analysis import handle_codebase_mapper, codebase_mapper_tool
2 | from .code_execution import (
3 | execute_code_tool,
4 | execute_shell_script_tool,
5 | handle_execute_code,
6 | handle_execute_shell_script,
7 | )
8 | from .code_tools import search_code_tool, handle_search_code
9 | from .directory_tools import (
10 | create_directory_tool,
11 | directory_tree_tool,
12 | handle_create_directory,
13 | handle_directory_tree,
14 | handle_list_directory,
15 | list_directory_tool,
16 | )
17 | from .file_tools import (
18 | copy_file_tool,
19 | delete_file_tool,
20 | edit_file_tool,
21 | get_file_info_tool,
22 | handle_copy_file,
23 | handle_delete_file,
24 | handle_edit_file,
25 | handle_get_file_info,
26 | handle_move_file,
27 | handle_read_file,
28 | handle_search_files,
29 | handle_write_file,
30 | move_file_tool,
31 | read_file_tool,
32 | search_files_tool,
33 | write_file_tool,
34 | )
35 | from .get_active_apps_tool import get_active_apps_tool, handle_get_active_apps
36 | from .get_available_windows_tool import get_available_windows_tool, handle_get_available_windows
37 | from .image_tools import read_image_file_tool, handle_read_image_file
38 | from .other_tools import batch_tools_tool, handle_batch_tools, think_tool, handle_think
39 | from .path_tools import (
40 | get_allowed_directory_tool,
41 | handle_get_allowed_directory,
42 | handle_update_allowed_directory,
43 | update_allowed_directory_tool,
44 | )
45 | from .screenshot_tool import (
46 | capture_screenshot_tool,
47 | handle_capture_screenshot,
48 | )
49 | from .system_tools import get_system_info_tool, handle_get_system_info
50 | from .web_tools import web_fetch_tool, handle_web_fetch, web_search_tool, handle_web_search
51 |
52 | # Export all tools definitions
53 | TOOL_DEFINITIONS = [
54 | get_allowed_directory_tool(),
55 | write_file_tool(),
56 | update_allowed_directory_tool(),
57 | create_directory_tool(),
58 | edit_file_tool(),
59 | list_directory_tool(),
60 | read_file_tool(),
61 | move_file_tool(),
62 | copy_file_tool(),
63 | search_files_tool(),
64 | delete_file_tool(),
65 | get_file_info_tool(),
66 | directory_tree_tool(),
67 | execute_code_tool(),
68 | execute_shell_script_tool(),
69 | codebase_mapper_tool(),
70 | search_code_tool(),
71 | batch_tools_tool(),
72 | think_tool(),
73 | # Screenshot tools
74 | capture_screenshot_tool(),
75 | # System context tools
76 | get_active_apps_tool(),
77 | get_available_windows_tool(),
78 | # Image tools
79 | read_image_file_tool(),
80 | # Web tools
81 | web_fetch_tool(),
82 | web_search_tool(),
83 | # System tools
84 | get_system_info_tool(),
85 | ]
86 |
87 | # Export all handlers
88 | TOOL_HANDLERS = {
89 | "get_allowed_directory": handle_get_allowed_directory,
90 | "update_allowed_directory": handle_update_allowed_directory,
91 | "list_directory": handle_list_directory,
92 | "create_directory": handle_create_directory,
93 | "read_file": handle_read_file,
94 | "write_file": handle_write_file,
95 | "edit_file": handle_edit_file,
96 | "move_file": handle_move_file,
97 | "copy_file": handle_copy_file,
98 | "search_files": handle_search_files,
99 | "search_code": handle_search_code,
100 | "delete_file": handle_delete_file,
101 | "get_file_info": handle_get_file_info,
102 | "directory_tree": handle_directory_tree,
103 | "execute_code": handle_execute_code,
104 | "execute_shell_script": handle_execute_shell_script,
105 | "codebase_mapper": handle_codebase_mapper,
106 | "batch_tools": handle_batch_tools,
107 | "think": handle_think,
108 | "get_system_info": handle_get_system_info,
109 | # Screenshot handlers
110 | "capture_screenshot": handle_capture_screenshot,
111 | # System context handlers
112 | "get_active_apps": handle_get_active_apps,
113 | "get_available_windows": handle_get_available_windows,
114 | # Image handlers
115 | "read_image_file": handle_read_image_file,
116 | # Web handlers
117 | "web_fetch": handle_web_fetch,
118 | "web_search": handle_web_search,
119 | }
120 |
--------------------------------------------------------------------------------
/src/aidd/tools/base.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import mcp.types as types
4 |
5 |
6 | class Tool:
7 | """Base class for all tools"""
8 | name: str
9 | description: str
10 | input_schema: Dict[str, Any]
11 |
12 | @classmethod
13 | def get_definition(cls) -> types.Tool:
14 | return types.Tool(
15 | name=cls.name,
16 | description=cls.description,
17 | inputSchema=cls.input_schema
18 | )
19 |
--------------------------------------------------------------------------------
/src/aidd/tools/code_analysis.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import subprocess
4 | from typing import Any, Dict, List
5 |
6 | import tree_sitter_c_sharp
7 | import tree_sitter_cpp
8 | import tree_sitter_go
9 | import tree_sitter_java
10 | import tree_sitter_javascript
11 | import tree_sitter_kotlin
12 | import tree_sitter_python
13 | import tree_sitter_ruby
14 | import tree_sitter_rust
15 | from tree_sitter import Language, Parser
16 | from tree_sitter_php._binding import language_php
17 | from tree_sitter_typescript._binding import language_tsx, language_typescript
18 |
19 | from .state import state
20 |
21 | # Map of file extensions to language names
22 | LANGUAGE_MAP = {
23 | '.py': 'python',
24 | '.js': 'javascript', '.jsx': 'javascript', '.mjs': 'javascript', '.cjs': 'javascript',
25 | '.ts': 'typescript',
26 | '.tsx': 'tsx',
27 | '.java': 'java',
28 | '.cpp': 'cpp', '.hpp': 'cpp', '.cc': 'cpp', '.hh': 'cpp', '.cxx': 'cpp', '.hxx': 'cpp',
29 | '.rb': 'ruby', '.rake': 'ruby',
30 | '.go': 'go',
31 | '.rs': 'rust',
32 | '.php': 'php',
33 | '.cs': 'c-sharp',
34 | '.kt': 'kotlin', '.kts': 'kotlin'
35 | # Add more languages as needed
36 | }
37 |
38 | # Initialize languages and create parsers
39 | try:
40 | _parser_cache = {
41 | 'python': Parser(Language(tree_sitter_python.language())),
42 | 'javascript': Parser(Language(tree_sitter_javascript.language())),
43 | 'typescript': Parser(Language(language_typescript())),
44 | 'tsx': Parser(Language(language_tsx())),
45 | 'java': Parser(Language(tree_sitter_java.language())),
46 | 'cpp': Parser(Language(tree_sitter_cpp.language())),
47 | 'ruby': Parser(Language(tree_sitter_ruby.language())),
48 | 'go': Parser(Language(tree_sitter_go.language())),
49 | 'rust': Parser(Language(tree_sitter_rust.language())),
50 | 'php': Parser(Language(language_php())),
51 | 'c-sharp': Parser(Language(tree_sitter_c_sharp.language())),
52 | 'kotlin': Parser(Language(tree_sitter_kotlin.language())),
53 | }
54 | except Exception as e:
55 | raise RuntimeError(f"Failed to initialize languages: {e}")
56 |
57 | def codebase_mapper_tool():
58 | return {
59 | "name": "codebase_mapper",
60 | "description": "Build a structural map of source code files in a directory. "
61 | "This tool analyzes code structure to identify classes, functions, and methods. "
62 | "WHEN TO USE: When you need to understand the structure of a codebase, discover classes and "
63 | "functions across multiple files, identify inheritance relationships, or get a high-level overview of code organization without "
64 | "reading every file individually. "
65 | "WHEN NOT TO USE: When you need to search for specific text patterns (use search_files instead), when you "
66 | "need to analyze a single known file (use read_file instead), or when you're working with non-code files. "
67 | "SUPPORTED LANGUAGES: Python (.py), JavaScript (.js/.jsx), TypeScript (.ts/.tsx), Java (.java), C++ (.cpp), Ruby (.rb), Go (.go), Rust (.rs), PHP (.php), "
68 | "C# (.cs), Kotlin (.kt). "
69 | "RETURNS: A text-based tree structure showing classes and functions in the codebase, along with statistics "
70 | "about found elements. Only analyzes files within the allowed directory. "
71 | "Example: Enter '.' to analyze all source files in current directory, or 'src' to analyze all files in the src directory.",
72 | "inputSchema": {
73 | "type": "object",
74 | "properties": {
75 | "path": {
76 | "type": "string",
77 | "description": "Root directory to analyze. Examples: '.' for current directory, 'src' for src directory, 'lib/components' for a specific subdirectory. The path must point to a directory within the allowed workspace."
78 | }
79 | },
80 | "required": ["path"]
81 | },
82 | }
83 |
84 | def _detect_language(file_path: str) -> str:
85 | """Detect programming language based on file extension."""
86 | ext = os.path.splitext(file_path)[1].lower()
87 | return LANGUAGE_MAP.get(ext, 'unknown')
88 |
89 | def _get_language_parser(language: str):
90 | """Get the appropriate tree-sitter parser for a language."""
91 | try:
92 | if language not in _parser_cache:
93 | return {'error': f'Unsupported language: {language}'}
94 | return _parser_cache[language]
95 | except Exception as e:
96 | return {
97 | 'error': f'Error loading language {language}: {str(e)}'
98 | }
99 |
100 | def _extract_node_text(node, source_code: bytes) -> str:
101 | """Extract text from a node."""
102 | return source_code[node.start_byte:node.end_byte].decode('utf-8')
103 |
104 | def _analyze_file(file_path: str) -> Dict[str, Any]:
105 | """Analyze a single file using tree-sitter."""
106 | try:
107 | with open(file_path, 'rb') as f:
108 | source_code = f.read()
109 |
110 | language = _detect_language(file_path)
111 | if language == 'unknown':
112 | return {'error': f'Unsupported file type: {os.path.splitext(file_path)[1]}'}
113 |
114 | parser = _get_language_parser(language)
115 | if isinstance(parser, dict) and 'error' in parser:
116 | return parser
117 |
118 | tree = parser.parse(source_code)
119 | root_node = tree.root_node
120 |
121 | # Check if we got a valid root node
122 | if not root_node:
123 | return {'error': 'Failed to parse file - no root node'}
124 |
125 | def process_node(node) -> Dict[str, Any]:
126 | if not node:
127 | return None
128 |
129 | result = {
130 | 'type': node.type,
131 | 'start_line': node.start_point[0] + 1,
132 | 'end_line': node.end_point[0] + 1,
133 | }
134 |
135 | # Process child nodes based on language-specific patterns
136 | if language == 'python':
137 | if node.type in ['class_definition', 'function_definition']:
138 | for child in node.children:
139 | if child.type == 'identifier':
140 | result['name'] = _extract_node_text(child, source_code)
141 | elif child.type == 'parameters':
142 | params = []
143 | for param in child.children:
144 | if param.type == 'identifier':
145 | params.append(_extract_node_text(param, source_code))
146 | if params:
147 | result['parameters'] = params
148 | elif node.type == 'assignment':
149 | # Handle global variable assignments
150 | for child in node.children:
151 | if child.type == 'identifier':
152 | result['type'] = 'variable_declaration'
153 | result['name'] = _extract_node_text(child, source_code)
154 | return result
155 | # Break after first identifier to avoid capturing right-hand side
156 | break
157 |
158 | elif language == 'javascript':
159 | if node.type in ['class_declaration', 'method_definition', 'function_declaration']:
160 | for child in node.children:
161 | if child.type == 'identifier':
162 | result['name'] = _extract_node_text(child, source_code)
163 | elif child.type == 'formal_parameters':
164 | params = []
165 | for param in child.children:
166 | if param.type == 'identifier':
167 | params.append(_extract_node_text(param, source_code))
168 | if params:
169 | result['parameters'] = params
170 | elif node.type in ['variable_declaration', 'lexical_declaration']:
171 | # Handle var/let/const declarations
172 | for child in node.children:
173 | if child.type == 'variable_declarator':
174 | for subchild in child.children:
175 | if subchild.type == 'identifier':
176 | result['type'] = 'variable_declaration'
177 | result['name'] = _extract_node_text(subchild, source_code)
178 | return result
179 |
180 | elif language == 'typescript':
181 | if node.type in ['class_declaration', 'method_declaration', 'function_declaration', 'interface_declaration']:
182 | for child in node.children:
183 | if child.type == 'identifier':
184 | result['name'] = _extract_node_text(child, source_code)
185 | return result
186 | return result
187 | elif node.type in ['variable_statement', 'property_declaration']:
188 | # Handle variable declarations and property declarations
189 | for child in node.children:
190 | if child.type == 'identifier':
191 | result['type'] = 'variable_declaration'
192 | result['name'] = _extract_node_text(child, source_code)
193 | return result
194 | return result
195 |
196 | elif language == 'java':
197 | if node.type in ['class_declaration', 'method_declaration', 'constructor_declaration', 'interface_declaration']:
198 | for child in node.children:
199 | if child.type == 'identifier':
200 | result['name'] = _extract_node_text(child, source_code)
201 | return result
202 | return result
203 | elif node.type in ['field_declaration', 'variable_declaration']:
204 | # Handle Java global fields and variables
205 | for child in node.children:
206 | if child.type == 'variable_declarator':
207 | for subchild in child.children:
208 | if subchild.type == 'identifier':
209 | result['type'] = 'variable_declaration'
210 | result['name'] = _extract_node_text(subchild, source_code)
211 | return result
212 | return result
213 |
214 | elif language == 'cpp':
215 | if node.type in ['class_specifier', 'function_definition', 'struct_specifier']:
216 | for child in node.children:
217 | if child.type == 'identifier':
218 | result['name'] = _extract_node_text(child, source_code)
219 | return result
220 | return result
221 | elif node.type in ['declaration', 'variable_declaration']:
222 | # Handle C++ global variables and declarations
223 | for child in node.children:
224 | if child.type == 'init_declarator' or child.type == 'declarator':
225 | for subchild in child.children:
226 | if subchild.type == 'identifier':
227 | result['type'] = 'variable_declaration'
228 | result['name'] = _extract_node_text(subchild, source_code)
229 | return result
230 | return result
231 |
232 | elif language == 'ruby':
233 | if node.type in ['class', 'method', 'singleton_method', 'module']:
234 | for child in node.children:
235 | if child.type == 'identifier':
236 | result['name'] = _extract_node_text(child, source_code)
237 | return result
238 | return result
239 | elif node.type == 'assignment' or node.type == 'global_variable':
240 | # Handle Ruby global variables and assignments
241 | for child in node.children:
242 | if child.type == 'identifier' or child.type == 'global_variable':
243 | result['type'] = 'variable_declaration'
244 | result['name'] = _extract_node_text(child, source_code)
245 | return result
246 | return result
247 |
248 | elif language == 'go':
249 | if node.type in ['type_declaration', 'function_declaration', 'method_declaration', 'interface_declaration']:
250 | for child in node.children:
251 | if child.type == 'identifier' or child.type == 'field_identifier':
252 | result['name'] = _extract_node_text(child, source_code)
253 | return result
254 | return result
255 | elif node.type == 'var_declaration' or node.type == 'const_declaration':
256 | # Handle Go variable and constant declarations
257 | for child in node.children:
258 | if child.type == 'var_spec' or child.type == 'const_spec':
259 | for subchild in child.children:
260 | if subchild.type == 'identifier':
261 | result['type'] = 'variable_declaration'
262 | result['name'] = _extract_node_text(subchild, source_code)
263 | return result
264 | return result
265 |
266 | elif language == 'rust':
267 | if node.type in ['struct_item', 'impl_item', 'fn_item', 'trait_item']:
268 | for child in node.children:
269 | if child.type == 'identifier':
270 | result['name'] = _extract_node_text(child, source_code)
271 | return result
272 | return result
273 | elif node.type in ['static_item', 'const_item', 'let_declaration']:
274 | # Handle Rust static items, constants, and let declarations
275 | for child in node.children:
276 | if child.type == 'identifier':
277 | result['type'] = 'variable_declaration'
278 | result['name'] = _extract_node_text(child, source_code)
279 | return result
280 | elif child.type == 'pattern' and child.children:
281 | result['name'] = _extract_node_text(child.children[0], source_code)
282 | return result
283 |
284 | elif language == 'php':
285 | if node.type in ['class_declaration', 'method_declaration', 'function_definition', 'interface_declaration', 'trait_declaration']:
286 | for child in node.children:
287 | if child.type == 'name':
288 | result['name'] = _extract_node_text(child, source_code)
289 | return result
290 | return result
291 | elif node.type == 'property_declaration' or node.type == 'const_declaration':
292 | # Handle PHP class properties and constants
293 | for child in node.children:
294 | if child.type == 'property_element' or child.type == 'const_element':
295 | for subchild in child.children:
296 | if subchild.type == 'variable_name' or subchild.type == 'name':
297 | result['type'] = 'variable_declaration'
298 | result['name'] = _extract_node_text(subchild, source_code)
299 | return result
300 |
301 | elif language == 'csharp':
302 | if node.type in ['class_declaration', 'interface_declaration', 'method_declaration']:
303 | for child in node.children:
304 | if child.type == 'identifier':
305 | result['name'] = _extract_node_text(child, source_code)
306 | return result
307 | return result
308 | elif node.type in ['field_declaration', 'property_declaration']:
309 | # Handle C# fields and properties
310 | for child in node.children:
311 | if child.type == 'variable_declaration':
312 | for subchild in child.children:
313 | if subchild.type == 'identifier':
314 | result['type'] = 'variable_declaration'
315 | result['name'] = _extract_node_text(subchild, source_code)
316 | return result
317 | return result
318 |
319 | elif language == 'kotlin':
320 | if node.type in ['class_declaration', 'function_declaration']:
321 | for child in node.children:
322 | if child.type == 'simple_identifier':
323 | result['name'] = _extract_node_text(child, source_code)
324 | return result
325 | return result
326 | elif node.type in ['property_declaration', 'variable_declaration']:
327 | # Handle Kotlin properties and variables
328 | for child in node.children:
329 | if child.type == 'simple_identifier':
330 | result['type'] = 'variable_declaration'
331 | result['name'] = _extract_node_text(child, source_code)
332 | return result
333 | break # Only capture the first identifier
334 | return result
335 |
336 | # Recursively process children
337 | children = []
338 | for child in node.children:
339 | child_result = process_node(child)
340 | if child_result and (
341 | child_result.get('type') in [
342 | 'class_definition', 'function_definition',
343 | 'class_declaration', 'method_definition',
344 | 'function_declaration', 'interface_declaration',
345 | 'method_declaration', 'constructor_declaration',
346 | 'class_specifier', 'struct_specifier',
347 | 'class', 'method', 'singleton_method', 'module',
348 | 'type_declaration', 'method_declaration',
349 | 'interface_declaration', 'struct_item', 'impl_item',
350 | 'fn_item', 'trait_item', 'trait_declaration',
351 | 'property_declaration', 'object_definition',
352 | 'trait_definition', 'def_definition',
353 | 'function_definition', 'class_definition',
354 | 'variable_declaration'] or 'children' in child_result
355 | ):
356 | children.append(child_result)
357 |
358 | if children:
359 | result['children'] = children
360 | return result
361 |
362 | return process_node(root_node)
363 |
364 | except Exception as e:
365 | return {
366 | 'error': f'Error analyzing file: {str(e)}'
367 | }
368 |
369 | async def handle_codebase_mapper(arguments: dict):
370 | """Handle building a structural map of source code."""
371 | from mcp.types import TextContent
372 |
373 | path = arguments.get("path", ".")
374 |
375 | # Validate and get full path
376 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
377 | if not full_path.startswith(state.allowed_directory):
378 | return [TextContent(
379 | type="text",
380 | text=json.dumps({'error': 'Access denied: Path must be within allowed directory'})
381 | )]
382 | if not os.path.exists(full_path):
383 | return [TextContent(
384 | type="text",
385 | text=json.dumps({'error': f'Path does not exist: {path}'})
386 | )]
387 | if not os.path.isdir(full_path):
388 | return [TextContent(type="text", text=json.dumps({'error': f'Path is not a directory: {path}'}))]
389 |
390 | analyzed_files = []
391 |
392 | # First try using git ls-files
393 | try:
394 | result = subprocess.run(
395 | ['git', 'ls-files'],
396 | cwd=full_path,
397 | capture_output=True,
398 | text=True,
399 | check=True,
400 | )
401 | if result.returncode == 0:
402 | files = [
403 | os.path.join(full_path, f.strip())
404 | for f in result.stdout.splitlines()
405 | if f.strip()
406 | ]
407 | analyzed_files.extend(files)
408 |
409 | except (subprocess.SubprocessError, FileNotFoundError):
410 | pass
411 |
412 | # If git didn't work or found no files, use regular directory walk
413 | if not analyzed_files:
414 | skip_dirs = {'.git', '.svn', 'node_modules', '__pycache__', 'build', 'dist'}
415 | for root, _, filenames in os.walk(full_path):
416 |
417 | # Get the directory name
418 | dir_name = os.path.basename(root)
419 |
420 | # Skip hidden and build directories
421 | if dir_name.startswith('.') or dir_name in skip_dirs:
422 | continue
423 |
424 | for filename in filenames:
425 | # Skip hidden files
426 | if filename.startswith('.'):
427 | continue
428 |
429 | file_path = os.path.join(root, filename)
430 | language = _detect_language(file_path)
431 | if language != 'unknown':
432 | analyzed_files.append(file_path)
433 |
434 | if not analyzed_files:
435 | return [TextContent(
436 | type="text",
437 | text=json.dumps({
438 | 'error': 'No source code files found to analyze',
439 | 'path': full_path
440 | }, indent=2)
441 | )]
442 |
443 | # Analyze each file
444 | analysis_results = []
445 | errors = []
446 | for file_path in sorted(analyzed_files):
447 | rel_path = os.path.relpath(file_path, full_path)
448 | try:
449 | result = _analyze_file(file_path)
450 |
451 | if result and isinstance(result, dict) and 'error' not in result:
452 | # Successfully analyzed file
453 | analysis_results.append({
454 | 'path': rel_path,
455 | 'language': _detect_language(rel_path),
456 | 'structure': result
457 | })
458 | elif result and isinstance(result, dict) and 'error' in result:
459 | errors.append({
460 | 'path': rel_path,
461 | 'error': result['error']
462 | })
463 | except Exception as e:
464 | errors.append({
465 | 'path': rel_path,
466 | 'error': str(e)
467 | })
468 |
469 | if not analysis_results:
470 | return [TextContent(
471 | type="text",
472 | text=json.dumps({
473 | 'error': 'Analysis completed but no valid results',
474 | 'path': full_path,
475 | 'attempted': len(analyzed_files),
476 | 'files_found': len(analyzed_files),
477 | 'errors': errors
478 | }, indent=2)
479 | )]
480 |
481 | def count_nodes(structure: Dict[str, Any], node_types: set[str]) -> int:
482 | """Recursively count nodes of specific types in the tree structure."""
483 | count = 0
484 |
485 | # Count current node if it matches
486 | if structure.get('type') in node_types:
487 | count += 1
488 |
489 | # Recursively count in children
490 | for child in structure.get('children', []):
491 | count += count_nodes(child, node_types)
492 |
493 | return count
494 |
495 | # Define node types for different categories
496 | class_types = {
497 | 'class_definition', 'class_declaration', 'class_specifier',
498 | 'struct_specifier', 'struct_item', 'interface_declaration',
499 | 'object_declaration' # Kotlin object declarations
500 | }
501 |
502 | function_types = {
503 | 'function_definition', 'function_declaration', 'method_definition',
504 | 'method_declaration', 'constructor_declaration', 'fn_item',
505 | 'method', 'singleton_method',
506 | 'primary_constructor' # Kotlin primary constructors
507 | }
508 |
509 | def generate_text_map(analysis_results: List[Dict[str, Any]]) -> str:
510 | """Generate a compact text representation of the code structure analysis."""
511 |
512 | def format_node(node: Dict[str, Any], prefix: str = "", is_last: bool = True) -> List[str]:
513 | lines = []
514 |
515 | node_type = node.get('type', '')
516 | node_name = node.get('name', '')
517 |
518 | # Handle decorated functions - extract the actual function definition
519 | if node_type == 'decorated_definition' and 'children' in node:
520 | for child in node.get('children', []):
521 | if child.get('type') in {
522 | 'function_definition', 'method_definition', 'member_function_definition'
523 | }:
524 | return format_node(child, prefix, is_last)
525 |
526 | # Handle class body, block nodes, and wrapper functions
527 | if not node_name and node_type in {'class_body', 'block', 'declaration_list', 'body'}:
528 | return process_children(node.get('children', []), prefix, is_last)
529 | elif not node_name:
530 | return lines
531 |
532 | branch = "└── " if is_last else "├── "
533 |
534 | # Format node information based on type
535 | if node_type in {
536 | 'class_definition', 'class_declaration', 'class_specifier',
537 | 'class', 'interface_declaration', 'struct_specifier',
538 | 'struct_item', 'trait_item', 'trait_declaration',
539 | 'module', 'type_declaration'
540 | }:
541 | node_info = f"class {node_name}"
542 | elif node_type in {
543 | 'function_definition', 'function_declaration', 'method_definition',
544 | 'method_declaration', 'fn_item', 'method', 'singleton_method',
545 | 'constructor_declaration', 'member_function_definition',
546 | 'constructor', 'destructor', 'public_method_definition',
547 | 'private_method_definition', 'protected_method_definition'
548 | }:
549 | # Handle parameters
550 | params = []
551 | if 'parameters' in node and node['parameters']:
552 | params = node['parameters']
553 | elif 'children' in node:
554 | # Try to extract parameters from children for languages that structure them differently
555 | for child in node['children']:
556 | if child.get('type') in {'parameter_list', 'parameters', 'formal_parameters', 'argument_list'}:
557 | for param in child.get('children', []):
558 | if param.get('type') in {'identifier', 'parameter'}:
559 | param_name = param.get('name', '')
560 | if param_name:
561 | params.append(param_name)
562 |
563 | params_str = ', '.join(params) if params else ''
564 | node_info = f"{node_name}({params_str})"
565 | else:
566 | node_info = node_name
567 |
568 | lines.append(f"{prefix}{branch}{node_info}")
569 |
570 | # Process children
571 | if 'children' in node:
572 | new_prefix = prefix + (" " if is_last else "│ ")
573 | child_lines = process_children(node['children'], new_prefix, is_last)
574 | if child_lines: # Only add child lines if there are any
575 | lines.extend(child_lines)
576 |
577 | return lines
578 |
579 | def process_children(children: List[Dict], prefix: str, is_last: bool) -> List[str]:
580 | if not children:
581 | return []
582 |
583 | lines = []
584 | significant_children = [
585 | child for child in children
586 | if child.get('type') in {
587 | 'decorated_definition',
588 | # Class-related nodes
589 | 'class_definition', 'class_declaration', 'class_specifier',
590 | 'class', 'interface_declaration', 'struct_specifier',
591 | 'struct_item', 'trait_item', 'trait_declaration',
592 | 'module', 'type_declaration',
593 | 'impl_item', # Rust implementations
594 | # Method-related nodes
595 | 'function_definition', 'function_declaration', 'method_definition',
596 | 'method_declaration', 'fn_item', 'method', 'singleton_method',
597 | 'constructor_declaration', 'member_function_definition',
598 | 'constructor', 'destructor', 'public_method_definition',
599 | 'private_method_definition', 'protected_method_definition',
600 | # Container nodes that might have methods
601 | 'class_body', 'block', 'declaration_list', 'body',
602 | 'impl_block', # Rust implementation blocks
603 | # Property and field nodes
604 | 'property_declaration', 'field_declaration',
605 | 'variable_declaration', 'const_declaration'
606 | }
607 | ]
608 |
609 | for i, child in enumerate(significant_children):
610 | is_last_child = (i == len(significant_children) - 1)
611 | child_lines = format_node(child, prefix, is_last_child)
612 | if child_lines: # Only add child lines if there are any
613 | lines.extend(child_lines)
614 |
615 | return lines
616 |
617 | # Process each file
618 | output_lines = []
619 |
620 | # Sort analysis results by path
621 | sorted_results = sorted(analysis_results, key=lambda x: x['path'])
622 |
623 | for result in sorted_results:
624 | # Skip files with no significant structure
625 | if not result.get('structure') or not result.get('structure', {}).get('children'):
626 | continue
627 |
628 | # Add file header
629 | output_lines.append(f"\n{result['path']}")
630 |
631 | # Format the structure
632 | structure = result['structure']
633 | if 'children' in structure:
634 | significant_nodes = [
635 | child for child in structure['children']
636 | if child.get('type') in {
637 | 'decorated_definition',
638 | # Class-related nodes
639 | 'class_definition', 'class_declaration', 'class_specifier',
640 | 'class', 'interface_declaration', 'struct_specifier',
641 | 'struct_item', 'trait_item', 'trait_declaration',
642 | 'module', 'type_declaration',
643 | 'impl_item', # Rust implementations
644 | # Method-related nodes
645 | 'function_definition', 'function_declaration', 'method_definition',
646 | 'method_declaration', 'fn_item', 'method', 'singleton_method',
647 | 'constructor_declaration', 'member_function_definition',
648 | 'constructor', 'destructor', 'public_method_definition',
649 | 'private_method_definition', 'protected_method_definition',
650 | # Property and field nodes
651 | 'property_declaration', 'field_declaration',
652 | 'variable_declaration', 'const_declaration'
653 | }
654 | ]
655 |
656 | for i, node in enumerate(significant_nodes):
657 | is_last = (i == len(significant_nodes) - 1)
658 | node_lines = format_node(node, "", is_last)
659 | if node_lines: # Only add node lines if there are any
660 | output_lines.extend(node_lines)
661 |
662 | # Return the formatted text
663 | return '\n'.join(output_lines) if output_lines else "No significant code structure found."
664 |
665 | def format_analysis_results(analysis_results: List[Dict[str, Any]], analyzed_files: List[str], errors: List[Dict[str, str]]) -> str:
666 | """Format the analysis results into a clear text format."""
667 |
668 | # Count statistics
669 | total_files = len(analyzed_files)
670 | classes = sum(count_nodes(f['structure'], class_types) for f in analysis_results)
671 | functions = sum(count_nodes(f['structure'], function_types) for f in analysis_results)
672 | decorated_functions = sum(count_nodes(f['structure'], {'decorated_definition'}) for f in analysis_results)
673 | error_count = len(errors)
674 |
675 | # Build output sections
676 | sections = []
677 |
678 | # Add statistics section
679 | sections.append("\n===ANALYSIS STATISTICS===\n")
680 | sections.append(f"Total files analyzed: {total_files}")
681 | sections.append(f"Total errors: {error_count}")
682 | sections.append(f"Total classes found: {classes}")
683 | sections.append(f"Total functions found: {functions}")
684 | sections.append(f"Total decorated functions: {decorated_functions}")
685 |
686 | # Add errors section if any
687 | if errors:
688 | sections.append("\n===ERRORS===")
689 | for error in errors:
690 | error_first_line = error['error'].split('\n')[0]
691 | sections.append(f"{error['path']}: {error_first_line}")
692 |
693 | # Add repository map
694 | sections.append("\n===REPOSITORY STRUCTURE===")
695 | sections.append(generate_text_map(analysis_results))
696 |
697 | # Join all sections with newlines
698 | return "\n".join(sections)
699 |
700 | return [TextContent(
701 | type="text",
702 | text=format_analysis_results(analysis_results, analyzed_files, errors)
703 | )]
704 |
--------------------------------------------------------------------------------
/src/aidd/tools/code_execution.py:
--------------------------------------------------------------------------------
1 | import os
2 | import stat
3 | import subprocess
4 | from typing import Any, Dict, List
5 |
6 | import mcp.types as types
7 |
8 | from .state import state
9 |
10 | # Language configurations
11 | LANGUAGE_CONFIGS = {
12 | 'python': {
13 | 'file_extension': '.py',
14 | 'command': ['python3'],
15 | 'comment_prefix': '#'
16 | },
17 | 'javascript': {
18 | 'file_extension': '.js',
19 | 'command': ['node'],
20 | 'comment_prefix': '//'
21 | },
22 | 'ruby': {
23 | 'file_extension': '.rb',
24 | 'command': ['ruby'],
25 | 'comment_prefix': '#'
26 | },
27 | 'php': {
28 | 'file_extension': '.php',
29 | 'command': ['php'],
30 | 'comment_prefix': '//'
31 | },
32 | 'go': {
33 | 'file_extension': '.go',
34 | 'command': ['go', 'run'],
35 | 'comment_prefix': '//',
36 | 'wrapper_start': 'package main\nfunc main() {',
37 | 'wrapper_end': '}'
38 | },
39 | 'rust': {
40 | 'file_extension': '.rs',
41 | 'command': ['rustc', '-o'], # Special handling needed
42 | 'comment_prefix': '//',
43 | 'wrapper_start': 'fn main() {',
44 | 'wrapper_end': '}'
45 | }
46 | }
47 |
48 | def execute_code_tool() -> Dict[str, Any]:
49 | return {
50 | "name": "execute_code",
51 | "description": (
52 | "Execute arbitrary code in various programming languages on the user's local machine within the current working directory. "
53 | "WHEN TO USE: When you need to run small code snippets to test functionality, compute values, process data, or "
54 | "demonstrate how code works. Useful for quick prototyping, data transformations, or explaining programming concepts with running "
55 | "examples. "
56 | "WHEN NOT TO USE: When you need to modify files (use write_file or edit_file instead), when running potentially harmful "
57 | "operations, or when you need to install external dependencies. "
58 | "RETURNS: Text output including stdout, stderr, and exit code of the "
59 | "execution. The output sections are clearly labeled with '=== stdout ===' and '=== stderr ==='. "
60 | "Supported languages: " + ", ".join(LANGUAGE_CONFIGS.keys()) + ". "
61 | "Always review the code carefully before execution to prevent unintended consequences. "
62 | "Examples: "
63 | "- Python: code='print(sum(range(10)))'. "
64 | "- JavaScript: code='console.log(Array.from({length: 5}, (_, i) => i*2))'. "
65 | "- Ruby: code='puts (1..5).reduce(:+)'. "
66 | ),
67 | "inputSchema": {
68 | "type": "object",
69 | "properties": {
70 | "language": {
71 | "type": "string",
72 | "enum": list(LANGUAGE_CONFIGS.keys()),
73 | "description": "Programming language to use. Must be one of the supported languages: " + ", ".join(LANGUAGE_CONFIGS.keys()) + ". " +
74 | "Each language requires the appropriate runtime to be installed on the user's machine. The code will be executed using: python3 for " +
75 | "Python, node for JavaScript, ruby for Ruby, php for PHP, go run for Go, and rustc for Rust."
76 | },
77 | "code": {
78 | "type": "string",
79 | "description": "Code to execute on the user's local machine in the current working directory. The code will be saved to a " +
80 | "temporary file and executed within the allowed workspace. For Go and Rust, main function wrappers will be added automatically if " +
81 | "not present. For PHP, Dict[str, Any]:
97 | return {
98 | "name": "execute_shell_script",
99 | "description": (
100 | "Execute a shell script (bash/sh) on the user's local machine within the current working directory. "
101 | "WHEN TO USE: When you need to automate system tasks, run shell commands, interact with the operating system, or perform operations "
102 | "that are best expressed as shell commands. Useful for file system operations, system configuration, or running system utilities. "
103 | "Also ideal when you need to run code linters to check for style issues or potential bugs in the codebase, "
104 | "or when you need to perform version control operations such as initializing git repositories, checking status, "
105 | "committing changes, cloning repositories, and other git commands without dedicated tools. "
106 | "WHEN NOT TO USE: When you need more structured programming (use execute_code instead), when you need to execute potentially "
107 | "dangerous system operations, or when you want to run commands outside the allowed directory. "
108 | "RETURNS: Text output including stdout, stderr, and exit code of the execution. The output sections are clearly labeled with "
109 | "'=== stdout ===' and '=== stderr ==='. "
110 | "This tool can execute shell commands and scripts for system automation and management tasks. "
111 | "It is designed to perform tasks on the user's local environment, such as opening applications, installing packages and more. "
112 | "Always review the script carefully before execution to prevent unintended consequences. "
113 | "Examples: "
114 | "- script='echo \"Current directory:\" && pwd'. "
115 | "- script='for i in {1..5}; do echo $i; done'. "
116 | "- script='eslint src/ --format stylish' (for linting). "
117 | "- script='git init && git add . && git commit -m \"Initial commit\"' (for git operations)."
118 | ),
119 | "inputSchema": {
120 | "type": "object",
121 | "properties": {
122 | "script": {
123 | "type": "string",
124 | "description": "Shell script to execute on the user's local machine. Can include any valid shell commands or scripts that would "
125 | "run in a standard shell environment. The script is executed using /bin/sh for maximum compatibility across systems."
126 | },
127 | "timeout": {
128 | "type": "integer",
129 | "description": "Maximum execution time in seconds. The execution will be terminated if it exceeds this time limit. "
130 | "Default is 300 seconds (5 minutes), with a maximum allowed value of 600 seconds (10 minutes).",
131 | "default": 300,
132 | "maximum": 600
133 | }
134 | },
135 | "required": ["script"]
136 | }
137 | }
138 |
139 | def is_command_available(command: str) -> bool:
140 | """Check if a command is available in the system."""
141 | try:
142 | subprocess.run(['which', command],
143 | stdout=subprocess.PIPE,
144 | stderr=subprocess.PIPE,
145 | check=True)
146 | return True
147 | except subprocess.CalledProcessError:
148 | return False
149 |
150 | def prepare_code(code: str, language: str) -> str:
151 | """Prepare code for execution based on language requirements."""
152 | config = LANGUAGE_CONFIGS[language]
153 |
154 | if language == 'go':
155 | if 'package main' not in code and 'func main()' not in code:
156 | return f"{config['wrapper_start']}\n{code}\n{config['wrapper_end']}"
157 | elif language == 'rust':
158 | if 'fn main()' not in code:
159 | return f"{config['wrapper_start']}\n{code}\n{config['wrapper_end']}"
160 | elif language == 'php':
161 | if ' tuple[str, str, int]:
167 | """Execute code in a temporary file and return stdout, stderr, and return code."""
168 | config = LANGUAGE_CONFIGS[language]
169 | temp_file = f"temp_script{config['file_extension']}"
170 |
171 | try:
172 | # Change to allowed directory first
173 | os.chdir(state.allowed_directory)
174 |
175 | # Write code to temp file
176 | with open(temp_file, 'w') as f:
177 | # Prepare and write code
178 | prepared_code = prepare_code(code, language)
179 | f.write(prepared_code)
180 | f.flush()
181 |
182 | # Prepare command
183 | if language == 'rust':
184 | # Special handling for Rust
185 | output_path = 'temp_script.exe'
186 | compile_cmd = ['rustc', temp_file, '-o', output_path]
187 | try:
188 | subprocess.run(compile_cmd,
189 | check=True,
190 | capture_output=True,
191 | timeout=timeout)
192 | cmd = [output_path]
193 | except subprocess.CalledProcessError as e:
194 | return '', e.stderr.decode(), e.returncode
195 | else:
196 | cmd = config['command'] + [temp_file]
197 |
198 | # Execute code
199 | try:
200 | result = subprocess.run(
201 | cmd,
202 | capture_output=True,
203 | timeout=timeout,
204 | text=True,
205 | )
206 | return result.stdout, result.stderr, result.returncode
207 | except subprocess.TimeoutExpired:
208 | return '', f'Execution timed out after {timeout} seconds', 124
209 |
210 | finally:
211 | # Cleanup
212 | # Note: We stay in the allowed directory as all operations should happen there
213 | try:
214 | os.unlink(temp_file)
215 | if language == 'rust' and os.path.exists(output_path):
216 | os.unlink(output_path)
217 | except Exception:
218 | pass
219 |
220 | async def handle_execute_code(arguments: dict) -> List[types.TextContent]:
221 | """Handle code execution in various programming languages."""
222 | language = arguments.get("language")
223 | code = arguments.get("code")
224 | timeout = arguments.get("timeout", 5)
225 |
226 | if not language or not code:
227 | raise ValueError("Both language and code must be provided")
228 |
229 | if language not in LANGUAGE_CONFIGS:
230 | raise ValueError(f"Unsupported language: {language}")
231 |
232 | # Check if required command is available
233 | command = LANGUAGE_CONFIGS[language]['command'][0]
234 | if not is_command_available(command):
235 | return [types.TextContent(
236 | type="text",
237 | text=f"Error: {command} is not installed on the system"
238 | )]
239 |
240 | try:
241 | stdout, stderr, returncode = await execute_code_in_temp_file(language, code, timeout)
242 |
243 | result = []
244 | if stdout:
245 | result.append(f"=== stdout ===\n{stdout.rstrip()}")
246 | if stderr:
247 | result.append(f"=== stderr ===\n{stderr.rstrip()}")
248 | if not stdout and not stderr:
249 | result.append("Code executed successfully with no output")
250 | if returncode != 0:
251 | result.append(f"\nProcess exited with code {returncode}")
252 |
253 | return [types.TextContent(
254 | type="text",
255 | text="\n\n".join(result)
256 | )]
257 |
258 | except Exception as e:
259 | return [types.TextContent(
260 | type="text",
261 | text=f"Error executing code:\n{str(e)}"
262 | )]
263 |
264 | async def execute_shell_script_in_temp_file(script: str, timeout: int) -> tuple[str, str, int]:
265 | """Execute a shell script in a temporary file and return stdout, stderr, and return code."""
266 | temp_file = "temp_script.sh"
267 |
268 | try:
269 | # Change to allowed directory first
270 | os.chdir(state.allowed_directory)
271 |
272 | # Write script to temp file
273 | with open(temp_file, 'w') as f:
274 | f.write("#!/bin/sh\n") # Use sh for maximum compatibility
275 | f.write(script)
276 | f.flush()
277 |
278 | # Make the script executable
279 | os.chmod(temp_file, os.stat(temp_file).st_mode | stat.S_IEXEC)
280 |
281 | # Execute script
282 | try:
283 | result = subprocess.run(
284 | ["/bin/sh", temp_file], # Use sh explicitly for consistent behavior
285 | capture_output=True,
286 | timeout=timeout,
287 | text=True,
288 | )
289 | return result.stdout, result.stderr, result.returncode
290 | except subprocess.TimeoutExpired:
291 | return '', f'Execution timed out after {timeout} seconds', 124
292 |
293 | finally:
294 | # Cleanup
295 | try:
296 | os.unlink(temp_file)
297 | except Exception:
298 | pass
299 |
300 | async def handle_execute_shell_script(arguments: dict) -> List[types.TextContent]:
301 | """Handle shell script execution."""
302 | script = arguments.get("script")
303 | timeout = min(arguments.get("timeout", 300), 600) # Default 5 minutes, cap at 10 minutes
304 |
305 | try:
306 | stdout, stderr, returncode = await execute_shell_script_in_temp_file(script, timeout)
307 | result = []
308 | if stdout:
309 | result.append(f"=== stdout ===\n{stdout.rstrip()}")
310 | if stderr:
311 | result.append(f"=== stderr ===\n{stderr.rstrip()}")
312 | if not stdout and not stderr:
313 | result.append("Script executed successfully with no output")
314 | if returncode != 0:
315 | result.append(f"\nScript exited with code {returncode}")
316 |
317 | return [types.TextContent(
318 | type="text",
319 | text="\n\n".join(result)
320 | )]
321 |
322 | except Exception as e:
323 | return [types.TextContent(
324 | type="text",
325 | text=f"Error executing shell script:\n{str(e)}"
326 | )]
327 |
--------------------------------------------------------------------------------
/src/aidd/tools/code_tools.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import fnmatch
4 | import subprocess
5 | import json
6 | from datetime import datetime
7 | from typing import List, Dict, Any, Optional, Union, Tuple
8 |
9 | from mcp.types import TextContent
10 | from .state import state
11 |
12 |
13 | def search_code_tool():
14 | return {
15 | "name": "search_code",
16 | "description": "Fast content search tool using regular expressions. "
17 | "WHEN TO USE: When you need to search for specific patterns within file contents across a codebase. "
18 | "Useful for finding function definitions, variable usages, import statements, or any text pattern "
19 | "in source code files. "
20 | "WHEN NOT TO USE: When you need to find files by name (use search_files instead), when you need "
21 | "semantic code understanding (use codebase_mapper instead), or when analyzing individual file "
22 | "structure. "
23 | "RETURNS: Lines of code matching the specified patterns, grouped by file with line numbers. "
24 | "Results are sorted by file modification time with newest files first. Respects file filtering "
25 | "and ignores binary files. Search is restricted to the allowed directory.",
26 | "inputSchema": {
27 | "type": "object",
28 | "properties": {
29 | "patterns": {
30 | "type": "array",
31 | "items": {
32 | "type": "string"
33 | },
34 | "description": "List of regular expression patterns to search for in file contents. Supports full regex syntax. "
35 | "Examples: ['function\\s+\\w+', 'class\\s+\\w+'] to find both function and class declarations."
36 | },
37 | "include": {
38 | "type": "string",
39 | "description": "File pattern to include in the search. Supports glob patterns including wildcards and braces. "
40 | "Examples: '*.js' for all JavaScript files, '*.{ts,tsx}' for TypeScript files, "
41 | "'src/**/*.py' for Python files in the src directory and subdirectories.",
42 | "default": "*"
43 | },
44 | "exclude": {
45 | "type": "string",
46 | "description": "File pattern to exclude from the search. Supports glob patterns including wildcards and braces. "
47 | "Examples: 'node_modules/**' to exclude node_modules directory, '*.min.js' to exclude minified JS.",
48 | "default": ""
49 | },
50 | "max_results": {
51 | "type": "integer",
52 | "description": "Maximum number of matching results to return per pattern. Use to limit output size for common patterns. "
53 | "Default is 100, which is sufficient for most searches while preventing excessive output.",
54 | "default": 100
55 | },
56 | "case_sensitive": {
57 | "type": "boolean",
58 | "description": "Whether to perform a case-sensitive search. When true, 'Error' will not match 'error'. "
59 | "Default is false, which makes searches case-insensitive.",
60 | "default": False
61 | },
62 | "path": {
63 | "type": "string",
64 | "description": "Base directory to search from. This is the starting point for the search. "
65 | "Examples: '.' for current directory, 'src' to search only within src directory. "
66 | "Default is the root of the allowed directory.",
67 | "default": "."
68 | }
69 | },
70 | "required": ["patterns"]
71 | }
72 | }
73 |
74 |
75 | async def handle_search_code(arguments: dict) -> List[TextContent]:
76 | """Handle searching for patterns in code files."""
77 | patterns = arguments.get("patterns", [])
78 | include = arguments.get("include", "*")
79 | exclude = arguments.get("exclude", "")
80 | max_results = arguments.get("max_results", 100)
81 | case_sensitive = arguments.get("case_sensitive", False)
82 | path = arguments.get("path", ".")
83 |
84 | if not patterns:
85 | raise ValueError("At least one pattern must be provided")
86 |
87 | # Determine full path for search start
88 | if os.path.isabs(path):
89 | full_path = os.path.abspath(path)
90 | else:
91 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
92 |
93 | # Security check
94 | if not full_path.startswith(state.allowed_directory):
95 | raise ValueError(f"Access denied: Path ({full_path}) must be within allowed directory")
96 |
97 | if not os.path.exists(full_path):
98 | raise ValueError(f"Path does not exist: {path}")
99 | if not os.path.isdir(full_path):
100 | raise ValueError(f"Path is not a directory: {path}")
101 |
102 | # Results from all patterns
103 | all_results = []
104 |
105 | try:
106 | for i, pattern in enumerate(patterns):
107 | pattern_header = f"\n{'='*30}\nPattern {i+1}: {pattern}\n{'='*30}\n" if len(patterns) > 1 else ""
108 | try:
109 | # Use ripgrep if available for faster results
110 | try:
111 | result = await _search_with_ripgrep(
112 | pattern, include, exclude, max_results, case_sensitive, full_path
113 | )
114 | except (subprocess.SubprocessError, FileNotFoundError):
115 | # Fallback to Python implementation if ripgrep not available
116 | result = await _search_with_python(
117 | pattern, include, exclude, max_results, case_sensitive, full_path
118 | )
119 |
120 | # Add pattern header for multiple patterns
121 | if len(patterns) > 1 and result and result[0].text != f"No matches found for pattern '{pattern}'.":
122 | result[0].text = pattern_header + result[0].text
123 |
124 | all_results.extend(result)
125 | except Exception as e:
126 | all_results.append(TextContent(
127 | type="text",
128 | text=f"{pattern_header}Error searching for pattern '{pattern}': {str(e)}"
129 | ))
130 |
131 | return all_results
132 | except Exception as e:
133 | raise ValueError(f"Error searching code: {str(e)}")
134 |
135 |
136 | async def _search_with_ripgrep(
137 | pattern: str,
138 | include: str,
139 | exclude: str,
140 | max_results: int,
141 | case_sensitive: bool,
142 | full_path: str
143 | ) -> List[TextContent]:
144 | """Search using ripgrep for better performance."""
145 | cmd = ["rg", "--line-number"]
146 |
147 | # Add case sensitivity flag
148 | if not case_sensitive:
149 | cmd.append("--ignore-case")
150 |
151 | # Add include patterns if provided
152 | if include and include != "*":
153 | # Convert glob pattern to ripgrep glob
154 | cmd.extend(["--glob", include])
155 |
156 | # Add exclude patterns if provided
157 | if exclude:
158 | # Convert glob pattern to ripgrep glob
159 | cmd.extend(["--glob", f"!{exclude}"])
160 |
161 | # Add max results
162 | cmd.extend(["--max-count", str(max_results)])
163 |
164 | # Add pattern and path
165 | cmd.extend([pattern, full_path])
166 |
167 | try:
168 | result = subprocess.run(
169 | cmd,
170 | capture_output=True,
171 | text=True,
172 | check=True
173 | )
174 |
175 | output = result.stdout.strip()
176 | if not output:
177 | return [TextContent(
178 | type="text",
179 | text="No matches found."
180 | )]
181 |
182 | # Process output to add file modification times
183 | files_with_matches = {}
184 | current_file = None
185 |
186 | for line in output.split('\n'):
187 | if not line.strip():
188 | continue
189 |
190 | # ripgrep output format: file:line:content
191 | parts = line.split(':', 2)
192 | if len(parts) >= 3:
193 | file_path, line_num, content = parts[0], parts[1], parts[2]
194 |
195 | # Get relative path for display
196 | rel_path = os.path.relpath(file_path, state.allowed_directory)
197 |
198 | if rel_path not in files_with_matches:
199 | # Get file modification time
200 | mod_time = os.path.getmtime(file_path)
201 | files_with_matches[rel_path] = {
202 | "mod_time": mod_time,
203 | "matches": []
204 | }
205 |
206 | files_with_matches[rel_path]["matches"].append(f"{line_num}: {content}")
207 |
208 | # Sort files by modification time (newest first)
209 | sorted_files = sorted(
210 | files_with_matches.items(),
211 | key=lambda x: x[1]["mod_time"],
212 | reverse=True
213 | )
214 |
215 | # Format output
216 | formatted_output = []
217 | match_count = 0
218 | for file_path, data in sorted_files:
219 | formatted_output.append(f"\n{file_path} (modified: {datetime.fromtimestamp(data['mod_time']).strftime('%Y-%m-%d %H:%M:%S')})")
220 | formatted_output.extend(data["matches"])
221 | match_count += len(data["matches"])
222 |
223 | summary = f"Found {match_count} matches in {len(sorted_files)} files for pattern '{pattern}'"
224 | if match_count > 0:
225 | formatted_output.insert(0, summary)
226 | else:
227 | formatted_output = [summary]
228 |
229 | return [TextContent(
230 | type="text",
231 | text="\n".join(formatted_output)
232 | )]
233 |
234 | except subprocess.CalledProcessError as e:
235 | if e.returncode == 1 and not e.stderr:
236 | # ripgrep returns 1 when no matches are found
237 | return [TextContent(
238 | type="text",
239 | text=f"No matches found for pattern '{pattern}'."
240 | )]
241 | raise
242 |
243 |
244 | async def _search_with_python(
245 | pattern: str,
246 | include: str,
247 | exclude: str,
248 | max_results: int,
249 | case_sensitive: bool,
250 | full_path: str
251 | ) -> List[TextContent]:
252 | """Fallback search implementation using Python's regex and file operations."""
253 | # Compile the regex pattern
254 | try:
255 | if case_sensitive:
256 | regex = re.compile(pattern)
257 | else:
258 | regex = re.compile(pattern, re.IGNORECASE)
259 | except re.error as e:
260 | raise ValueError(f"Invalid regular expression: {str(e)}")
261 |
262 | # Convert glob patterns to regex patterns for matching
263 | include_pattern = fnmatch.translate(include)
264 | include_regex = re.compile(include_pattern)
265 |
266 | exclude_regex = None
267 | if exclude:
268 | exclude_pattern = fnmatch.translate(exclude)
269 | exclude_regex = re.compile(exclude_pattern)
270 |
271 | # Dictionary to store files with matches and their modification times
272 | files_with_matches = {}
273 | match_count = 0
274 |
275 | # Walk the directory tree
276 | for root, _, files in os.walk(full_path):
277 | if match_count >= max_results:
278 | break
279 |
280 | for filename in files:
281 | if match_count >= max_results:
282 | break
283 |
284 | file_path = os.path.join(root, filename)
285 |
286 | # Get path relative to the search root for pattern matching
287 | rel_path = os.path.relpath(file_path, full_path)
288 |
289 | # Check if file matches include pattern
290 | if not include_regex.match(filename) and not include_regex.match(rel_path):
291 | continue
292 |
293 | # Check if file matches exclude pattern
294 | if exclude_regex and (exclude_regex.match(filename) or exclude_regex.match(rel_path)):
295 | continue
296 |
297 | # Get file modification time
298 | try:
299 | mod_time = os.path.getmtime(file_path)
300 | except (OSError, IOError):
301 | continue
302 |
303 | # Skip binary files
304 | try:
305 | with open(file_path, 'r', encoding='utf-8') as f:
306 | try:
307 | # Try to read the first few bytes to check if it's a text file
308 | f.read(4096)
309 | # Rewind to beginning of file
310 | f.seek(0)
311 | except UnicodeDecodeError:
312 | # Skip binary files
313 | continue
314 |
315 | # Get relative path for display
316 | display_path = os.path.relpath(file_path, state.allowed_directory)
317 |
318 | # Initialize entry for this file
319 | if display_path not in files_with_matches:
320 | files_with_matches[display_path] = {
321 | "mod_time": mod_time,
322 | "matches": []
323 | }
324 |
325 | # Search for pattern in each line
326 | for line_num, line in enumerate(f, 1):
327 | if regex.search(line):
328 | files_with_matches[display_path]["matches"].append(f"{line_num}: {line.rstrip()}")
329 | match_count += 1
330 |
331 | if match_count >= max_results:
332 | break
333 |
334 | except (OSError, IOError):
335 | # Skip files that can't be read
336 | continue
337 |
338 | # No matches found
339 | if not files_with_matches:
340 | return [TextContent(
341 | type="text",
342 | text=f"No matches found for pattern '{pattern}'."
343 | )]
344 |
345 | # Sort files by modification time (newest first)
346 | sorted_files = sorted(
347 | files_with_matches.items(),
348 | key=lambda x: x[1]["mod_time"],
349 | reverse=True
350 | )
351 |
352 | # Format output
353 | formatted_output = []
354 | total_matches = 0
355 | files_with_actual_matches = 0
356 |
357 | for file_path, data in sorted_files:
358 | if data["matches"]: # Only include files that actually have matches
359 | formatted_output.append(f"\n{file_path} (modified: {datetime.fromtimestamp(data['mod_time']).strftime('%Y-%m-%d %H:%M:%S')})")
360 | formatted_output.extend(data["matches"])
361 | total_matches += len(data["matches"])
362 | files_with_actual_matches += 1
363 |
364 | summary = f"Found {total_matches} matches in {files_with_actual_matches} files for pattern '{pattern}'"
365 | if total_matches > 0:
366 | formatted_output.insert(0, summary)
367 | else:
368 | formatted_output = [summary]
369 |
370 | return [TextContent(
371 | type="text",
372 | text="\n".join(formatted_output)
373 | )]
374 |
--------------------------------------------------------------------------------
/src/aidd/tools/directory_tools.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import subprocess
4 | from datetime import datetime
5 |
6 | from mcp.types import TextContent
7 |
8 | from .state import state
9 |
10 |
11 | def list_directory_tool():
12 | return {
13 | "name": "list_directory",
14 | "description": "Get a detailed listing of files and directories in the specified path, including type, size, and modification "
15 | "date. WHEN TO USE: When you need to explore the contents of a directory, understand what files are available, check file sizes or "
16 | "modification dates, or locate specific files by name. WHEN NOT TO USE: When you need to read the contents of files (use read_file "
17 | "instead), when you need a recursive listing of all subdirectories (use directory_tree instead), or when searching for files by name pattern "
18 | "(use search_files instead). RETURNS: Text with each line containing file type ([DIR]/[FILE]), name, size (in B/KB/MB), and "
19 | "modification date. Only works within the allowed directory. Example: Enter 'src' to list contents of the src directory, or '.' for "
20 | "current directory.",
21 | "inputSchema": {
22 | "type": "object",
23 | "properties": {
24 | "path": {
25 | "type": "string",
26 | "description": "Path of the directory to list. Examples: '.' for current directory, 'src' for src directory, 'docs/images' for a nested directory. The path must be within the allowed workspace.",
27 | }
28 | },
29 | "required": ["path"]
30 | },
31 | }
32 |
33 | async def handle_list_directory(arguments: dict):
34 | from mcp.types import TextContent
35 |
36 | path = arguments.get("path", ".")
37 |
38 | # Determine full path based on whether input is absolute or relative
39 | if os.path.isabs(path):
40 | full_path = os.path.abspath(path) # Just normalize the absolute path
41 | else:
42 | # For relative paths, join with allowed_directory
43 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
44 |
45 | if not full_path.startswith(state.allowed_directory):
46 | raise ValueError(f"Access denied: Path ({full_path}) must be within allowed directory ({state.allowed_directory})")
47 | if not os.path.exists(full_path):
48 | raise ValueError(f"Path does not exist: {full_path}")
49 | if not os.path.isdir(full_path):
50 | raise ValueError(f"Path is not a directory: {full_path}")
51 |
52 | # List directory contents
53 | entries = []
54 | try:
55 | with os.scandir(full_path) as it:
56 | for entry in it:
57 | try:
58 | stat = entry.stat()
59 | # Format size to be human readable
60 | size = stat.st_size
61 | if size >= 1024 * 1024: # MB
62 | size_str = f"{size / (1024 * 1024):.1f}MB"
63 | elif size >= 1024: # KB
64 | size_str = f"{size / 1024:.1f}KB"
65 | else: # bytes
66 | size_str = f"{size}B"
67 |
68 | entry_type = "[DIR]" if entry.is_dir() else "[FILE]"
69 | mod_time = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S")
70 | entries.append(f"{entry_type} {entry.name:<30} {size_str:>8} {mod_time}")
71 | except (OSError, PermissionError):
72 | continue
73 |
74 | entries.sort() # Sort entries alphabetically
75 | return [TextContent(type="text", text="\n".join(entries))]
76 |
77 | except PermissionError:
78 | raise ValueError(f"Permission denied accessing: {full_path}")
79 |
80 | def create_directory_tool():
81 | return {
82 | "name": "create_directory",
83 | "description": "Create a new directory or ensure a directory exists. "
84 | "Can create multiple nested directories in one operation. "
85 | "WHEN TO USE: When you need to set up project structure, organize files, create output directories before saving files, or establish a directory hierarchy. "
86 | "WHEN NOT TO USE: When you only want to check if a directory exists (use get_file_info instead), or when trying to create directories outside the allowed workspace. "
87 | "RETURNS: Text message confirming either that the directory was successfully created or that it already exists. "
88 | "The operation succeeds silently if the directory already exists. "
89 | "Only works within the allowed directory. "
90 | "Example: Enter 'src/components' to create nested directories.",
91 | "inputSchema": {
92 | "type": "object",
93 | "properties": {
94 | "path": {
95 | "type": "string",
96 | "description": "Path of the directory to create. Can include nested directories which will all be created. Examples: 'logs' for a simple directory, 'src/components/buttons' for nested directories. Both absolute and relative paths are supported, but must be within the allowed workspace."
97 | }
98 | },
99 | "required": ["path"]
100 | },
101 | }
102 |
103 | async def handle_create_directory(arguments: dict):
104 | """Handle creating a new directory."""
105 | from mcp.types import TextContent
106 |
107 | path = arguments.get("path")
108 | if not path:
109 | raise ValueError("path must be provided")
110 |
111 | # Determine full path based on whether input is absolute or relative
112 | if os.path.isabs(path):
113 | full_path = os.path.abspath(path) # Just normalize the absolute path
114 | else:
115 | # For relative paths, join with allowed_directory
116 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
117 |
118 | # Security check: ensure path is within allowed directory
119 | if not full_path.startswith(state.allowed_directory):
120 | raise ValueError(
121 | f"Access denied: Path ({full_path}) must be within allowed directory ({state.allowed_directory})"
122 | )
123 |
124 | already_exists = os.path.exists(full_path)
125 |
126 | try:
127 | # Create directory and any necessary parent directories
128 | os.makedirs(full_path, exist_ok=True)
129 |
130 | if already_exists:
131 | return [TextContent(type="text", text=f"Directory already exists: {path}")]
132 | return [TextContent(
133 | type="text",
134 | text=f"Successfully created directory: {path}"
135 | )]
136 | except PermissionError:
137 | raise ValueError(f"Permission denied creating directory: {path}")
138 | except Exception as e:
139 | raise ValueError(f"Error creating directory: {str(e)}")
140 |
141 | def directory_tree_tool():
142 | return {
143 | "name": "directory_tree",
144 | "description": "Get a recursive tree view of files and directories in the specified path as a JSON structure. "
145 | "WHEN TO USE: When you need to understand the complete structure of a directory tree, visualize the hierarchy of files and directories, or get a comprehensive overview of a project's organization. "
146 | "Particularly useful for large projects where you need to see nested relationships. "
147 | "WHEN NOT TO USE: When you only need a flat list of files in a single directory (use directory_listing instead), or when you're only interested in specific file types (use search_files instead). "
148 | "RETURNS: JSON structure where each entry includes 'name', 'type' (file/directory), and 'children' for directories. "
149 | "Files have no children array, while directories always have a children array (which may be empty). "
150 | "The output is formatted with 2-space indentation for readability. For Git repositories, shows tracked files only. "
151 | "Only works within the allowed directory. "
152 | "Example: Enter '.' for current directory, or 'src' for a specific directory.",
153 | "inputSchema": {
154 | "type": "object",
155 | "properties": {
156 | "path": {
157 | "type": "string",
158 | "description": "Root directory to analyze. This is the starting point for the recursive tree generation. Examples: '.' for current directory, 'src' for the src directory. Both absolute and relative paths are supported, but must be within the allowed workspace."
159 | }
160 | },
161 | "required": ["path"]
162 | },
163 | }
164 |
165 | async def build_directory_tree(dir_path: str) -> dict:
166 | """Build directory tree as a JSON structure."""
167 | try:
168 | entries = list(os.scandir(dir_path))
169 | # Sort entries by name
170 | entries.sort(key=lambda e: e.name.lower())
171 |
172 | result = {
173 | "name": os.path.basename(dir_path) or dir_path,
174 | "type": "directory",
175 | "children": []
176 | }
177 |
178 | for entry in entries:
179 | if entry.is_dir():
180 | # Recursively process subdirectories
181 | child_tree = await build_directory_tree(entry.path)
182 | result["children"].append(child_tree)
183 | else:
184 | result["children"].append({
185 | "name": entry.name,
186 | "type": "file"
187 | })
188 |
189 | return result
190 | except PermissionError:
191 | raise ValueError(f"Access denied: {dir_path}")
192 | except Exception as e:
193 | raise ValueError(f"Error processing directory {dir_path}: {str(e)}")
194 |
195 | async def handle_directory_tree(arguments: dict):
196 | """Handle building a directory tree."""
197 | path = arguments.get("path", ".")
198 |
199 | # Validate and get full path
200 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
201 | if not os.path.abspath(full_path).startswith(state.allowed_directory):
202 | raise ValueError(f"Access denied: Path ({full_path}) must be within allowed directory ({state.allowed_directory})")
203 | if not os.path.exists(full_path):
204 | raise ValueError(f"Path does not exist: {full_path}")
205 | if not os.path.isdir(full_path):
206 | raise ValueError(f"Path is not a directory: {full_path}")
207 |
208 | # Try git ls-files first
209 | try:
210 | # Get list of all files tracked by git
211 | result = subprocess.run(
212 | ['git', 'ls-files'],
213 | cwd=full_path,
214 | capture_output=True,
215 | text=True,
216 | check=True,
217 | )
218 |
219 | # If git command was successful
220 | files = [f for f in result.stdout.split('\n') if f.strip()]
221 | files.sort()
222 |
223 | # Build tree from git files
224 | directory_map = {}
225 | root_name = os.path.basename(full_path) or full_path
226 |
227 | # First pass: collect all directories and files
228 | for file in files:
229 | parts = file.split(os.sep)
230 | # Add all intermediate directories
231 | for i in range(len(parts)):
232 | parent = os.sep.join(parts[:i])
233 | os.sep.join(parts[:i+1])
234 | if i < len(parts) - 1: # It's a directory
235 | directory_map.setdefault(parent, {"dirs": set(), "files": set()})["dirs"].add(parts[i])
236 | else: # It's a file
237 | directory_map.setdefault(parent, {"dirs": set(), "files": set()})["files"].add(parts[i])
238 |
239 | async def build_git_tree(current_path: str) -> dict:
240 | dir_name = current_path.split(os.sep)[-1] if current_path else ''
241 | result = {
242 | "name": dir_name or root_name,
243 | "type": "directory",
244 | "children": [],
245 | }
246 |
247 | if current_path not in directory_map:
248 | return result
249 |
250 | entry = directory_map[current_path]
251 |
252 | # Add directories first
253 | for dir_name in sorted(entry["dirs"]):
254 | child_path = os.path.join(current_path, dir_name) if current_path else dir_name
255 | child_tree = await build_git_tree(child_path)
256 | result["children"].append(child_tree)
257 |
258 | # Then add files
259 | for file_name in sorted(entry["files"]):
260 | result["children"].append({
261 | "name": file_name,
262 | "type": "file",
263 | })
264 |
265 | return result
266 |
267 | # Build the tree structure starting from root
268 | tree = await build_git_tree('')
269 | return [TextContent(type="text", text=json.dumps(tree, indent=2))]
270 |
271 | except (subprocess.CalledProcessError, FileNotFoundError):
272 | # Git not available or not a git repository, use fallback implementation
273 | pass
274 | except Exception as e:
275 | # Log the error but continue with fallback
276 | print(f"Error using git ls-files: {e}")
277 | pass
278 |
279 | # Fallback to regular directory traversal
280 | try:
281 | # Build the directory tree structure
282 | tree = await build_directory_tree(full_path)
283 |
284 | # Convert to JSON with pretty printing
285 | json_tree = json.dumps(tree, indent=2)
286 |
287 | return [TextContent(type="text", text=json_tree)]
288 | except Exception as e:
289 | raise ValueError(f"Error building directory tree: {str(e)}")
290 |
--------------------------------------------------------------------------------
/src/aidd/tools/get_active_apps_tool.py:
--------------------------------------------------------------------------------
1 | import json
2 | import platform
3 | import subprocess
4 | from typing import Any, Dict, List
5 |
6 | from mcp import types
7 |
8 | # Import platform-specific libraries if available
9 | try:
10 | import Quartz
11 | QUARTZ_AVAILABLE = True
12 | except ImportError:
13 | QUARTZ_AVAILABLE = False
14 |
15 | try:
16 | import pygetwindow as gw
17 | PYGETWINDOW_AVAILABLE = True
18 | except ImportError:
19 | PYGETWINDOW_AVAILABLE = False
20 |
21 |
22 | def get_active_apps_tool():
23 | """Define the get_active_apps tool."""
24 | return {
25 | "name": "get_active_apps",
26 | "description": "Get a list of currently active applications running on the user's system. "
27 | "WHEN TO USE: When you need to understand what software the user is currently working with, "
28 | "gain context about their active applications, provide application-specific assistance, or "
29 | "troubleshoot issues related to running programs. Especially useful for providing targeted "
30 | "help based on what the user is actively using. "
31 | "WHEN NOT TO USE: When you need information about specific windows rather than applications "
32 | "(use get_available_windows instead), when you need a screenshot of what's on screen "
33 | "(use capture_screenshot instead), or when application context isn't relevant to the task at hand. "
34 | "RETURNS: JSON object containing platform information, success status, count of applications, "
35 | "and an array of application objects. Each application object includes name, has_windows flag, "
36 | "and when details are requested, information about visible windows. Works on macOS, Windows, "
37 | "and Linux, with platform-specific implementation details.",
38 | "inputSchema": {
39 | "type": "object",
40 | "properties": {
41 | "with_details": {
42 | "type": "boolean",
43 | "description": "Whether to include additional details about each application. When true, returns extra "
44 | "information like window_count, visible_windows with their names and dimensions. When false, "
45 | "returns a simpler list with just application names and whether they have windows. Default is False."
46 | }
47 | },
48 | "required": []
49 | },
50 | }
51 |
52 |
53 | def _get_active_apps_macos(with_details: bool = False) -> List[Dict[str, Any]]:
54 | """Get a list of currently active applications on macOS."""
55 | active_apps = []
56 |
57 | # Attempt to use Quartz directly first, as it's more reliable
58 | if QUARTZ_AVAILABLE:
59 | try:
60 | window_list = Quartz.CGWindowListCopyWindowInfo(
61 | Quartz.kCGWindowListOptionOnScreenOnly,
62 | Quartz.kCGNullWindowID
63 | )
64 |
65 | # Create a map of app names to their details
66 | app_map = {}
67 | for window in window_list:
68 | owner = window.get('kCGWindowOwnerName', '')
69 |
70 | # Skip empty app names or system components we don't want to include
71 | if not owner or owner in ["SystemUIServer", "osascript"]:
72 | continue
73 |
74 | # Create new entry for this app if we haven't seen it before
75 | if owner not in app_map:
76 | app_map[owner] = {
77 | "name": owner,
78 | "has_windows": False,
79 | "window_count": 0,
80 | "visible_windows": [] if with_details else None
81 | }
82 |
83 | # Count this window
84 | app_map[owner]["window_count"] += 1
85 |
86 | # Check if this is a visible application window
87 | layer = window.get('kCGWindowLayer', 999)
88 | name = window.get('kCGWindowName', '')
89 |
90 | # Layer 0 typically indicates a standard application window
91 | if layer <= 0:
92 | app_map[owner]["has_windows"] = True
93 |
94 | # Add details about this window if detailed info was requested
95 | if with_details and name:
96 | app_map[owner]["visible_windows"].append({
97 | "name": name,
98 | "id": window.get('kCGWindowNumber', 0)
99 | })
100 |
101 | # Convert the map to a list
102 | active_apps = list(app_map.values())
103 |
104 | # If we got results from Quartz, we're done
105 | if active_apps:
106 | return active_apps
107 |
108 | except Exception as e:
109 | print(f"Error getting applications with Quartz: {str(e)}")
110 |
111 | # Fall back to AppleScript if Quartz failed or isn't available
112 | if not active_apps:
113 | try:
114 | # Modified AppleScript that tries to avoid including itself
115 | script = '''
116 | tell application "System Events"
117 | set appList to {}
118 | set allProcesses to application processes whose background only is false
119 |
120 | repeat with proc in allProcesses
121 | set procName to name of proc
122 | set procVisible to (windows of proc is not {})
123 |
124 | # Skip the scripting process itself
125 | if procName is not "osascript" and procName is not "System Events" then
126 | set end of appList to {name:procName, has_windows:procVisible}
127 | end if
128 | end repeat
129 |
130 | return appList
131 | end tell
132 | '''
133 |
134 | result = subprocess.run(["osascript", "-e", script], capture_output=True, text=True)
135 | if result.returncode == 0:
136 | # Parse the output from AppleScript
137 | output = result.stdout.strip()
138 | if output:
139 | # AppleScript returns a list of records, we need to parse it
140 | lines = output.split(", {")
141 | for i, line in enumerate(lines):
142 | if i == 0:
143 | line = line.replace("{", "")
144 | if i == len(lines) - 1:
145 | line = line.replace("}", "")
146 |
147 | if "name:" in line and "has_windows:" in line:
148 | parts = line.split(", ")
149 | app_info = {}
150 | for part in parts:
151 | if "name:" in part:
152 | app_info["name"] = part.replace("name:", "").strip()
153 | elif "has_windows:" in part:
154 | app_info["has_windows"] = part.replace("has_windows:", "").strip().lower() == "true"
155 |
156 | if app_info:
157 | active_apps.append(app_info)
158 | except Exception as e:
159 | print(f"Error getting apps with AppleScript: {str(e)}")
160 |
161 | # Add window details if requested and if we got results from AppleScript
162 | if active_apps and with_details and QUARTZ_AVAILABLE:
163 | try:
164 | window_list = Quartz.CGWindowListCopyWindowInfo(
165 | Quartz.kCGWindowListOptionOnScreenOnly,
166 | Quartz.kCGNullWindowID
167 | )
168 |
169 | # Create a map of app names to window details
170 | app_details = {}
171 | for window in window_list:
172 | owner = window.get('kCGWindowOwnerName', '')
173 | if not owner:
174 | continue
175 |
176 | if owner not in app_details:
177 | app_details[owner] = {
178 | "window_count": 0,
179 | "windows": []
180 | }
181 |
182 | app_details[owner]["window_count"] += 1
183 |
184 | # Add window details if this is a visible window
185 | layer = window.get('kCGWindowLayer', 999)
186 | name = window.get('kCGWindowName', '')
187 |
188 | if layer <= 0 and name:
189 | app_details[owner]["windows"].append({
190 | "name": name,
191 | "id": window.get('kCGWindowNumber', 0)
192 | })
193 |
194 | # Enhance the active_apps list with these details
195 | for app in active_apps:
196 | app_name = app["name"]
197 | if app_name in app_details:
198 | app["window_count"] = app_details[app_name]["window_count"]
199 | app["visible_windows"] = app_details[app_name]["windows"]
200 | except Exception as e:
201 | print(f"Error getting window details with Quartz: {str(e)}")
202 |
203 | return active_apps
204 |
205 |
206 | def _get_active_apps_windows(with_details: bool = False) -> List[Dict[str, Any]]:
207 | """Get a list of currently active applications on Windows."""
208 | active_apps = []
209 |
210 | # Basic list without details
211 | if not with_details:
212 | try:
213 | # Use a PowerShell command to get running applications
214 | script = '''
215 | Get-Process | Where-Object {$_.MainWindowTitle -ne ""} |
216 | Select-Object ProcessName, MainWindowTitle |
217 | ConvertTo-Json
218 | '''
219 |
220 | cmd = ["powershell", "-Command", script]
221 | process = subprocess.run(cmd, capture_output=True, text=True)
222 |
223 | if process.returncode == 0:
224 | try:
225 | apps_data = json.loads(process.stdout)
226 | # Handle single item (not in a list)
227 | if isinstance(apps_data, dict):
228 | apps_data = [apps_data]
229 |
230 | for app in apps_data:
231 | active_apps.append({
232 | "name": app.get("ProcessName", ""),
233 | "has_windows": True,
234 | "window_title": app.get("MainWindowTitle", "")
235 | })
236 | except json.JSONDecodeError:
237 | print("Failed to parse JSON from PowerShell output")
238 | except Exception as e:
239 | print(f"Error getting basic app list on Windows: {str(e)}")
240 |
241 | # More detailed list with PyGetWindow if available
242 | elif PYGETWINDOW_AVAILABLE:
243 | try:
244 | # Get the list of windows
245 | all_windows = gw.getAllWindows()
246 |
247 | # Group by application (approximate, since we only have window titles)
248 | app_windows = {}
249 |
250 | for window in all_windows:
251 | if not window.title:
252 | continue
253 |
254 | # Try to extract application name from window title
255 | # This is an approximation and might not be accurate for all applications
256 | title_parts = window.title.split(' - ')
257 | app_name = title_parts[-1] if len(title_parts) > 1 else window.title
258 |
259 | if app_name not in app_windows:
260 | app_windows[app_name] = {
261 | "name": app_name,
262 | "has_windows": True,
263 | "window_count": 0,
264 | "visible_windows": []
265 | }
266 |
267 | app_windows[app_name]["window_count"] += 1
268 | app_windows[app_name]["visible_windows"].append({
269 | "name": window.title,
270 | "width": window.width,
271 | "height": window.height
272 | })
273 |
274 | active_apps = list(app_windows.values())
275 | except Exception as e:
276 | print(f"Error getting detailed app list with PyGetWindow: {str(e)}")
277 |
278 | # Fallback to a basic PowerShell approach
279 | if not active_apps:
280 | try:
281 | script = '''
282 | Get-Process | Where-Object {$_.MainWindowHandle -ne 0} |
283 | Select-Object ProcessName | ConvertTo-Json
284 | '''
285 |
286 | cmd = ["powershell", "-Command", script]
287 | process = subprocess.run(cmd, capture_output=True, text=True)
288 |
289 | if process.returncode == 0:
290 | try:
291 | apps_data = json.loads(process.stdout)
292 | # Handle single item (not in a list)
293 | if isinstance(apps_data, dict):
294 | apps_data = [apps_data]
295 |
296 | for app in apps_data:
297 | active_apps.append({
298 | "name": app.get("ProcessName", ""),
299 | "has_windows": True
300 | })
301 | except json.JSONDecodeError:
302 | print("Failed to parse JSON from PowerShell output")
303 | except Exception as e:
304 | print(f"Error getting fallback app list on Windows: {str(e)}")
305 |
306 | return active_apps
307 |
308 |
309 | def _get_active_apps_linux(with_details: bool = False) -> List[Dict[str, Any]]:
310 | """Get a list of currently active applications on Linux."""
311 | active_apps = []
312 |
313 | # Try using wmctrl if available
314 | try:
315 | # Check if wmctrl is installed
316 | check_process = subprocess.run(["which", "wmctrl"], capture_output=True)
317 | if check_process.returncode == 0:
318 | # Get window list with wmctrl
319 | wmctrl_process = subprocess.run(["wmctrl", "-l"], capture_output=True, text=True)
320 |
321 | if wmctrl_process.returncode == 0:
322 | window_data = wmctrl_process.stdout.strip().split('\n')
323 |
324 | # Process each window line
325 | app_windows = {}
326 | for line in window_data:
327 | if not line:
328 | continue
329 |
330 | parts = line.split(None, 3) # Split by whitespace, max 3 splits
331 | if len(parts) >= 4:
332 | window_id, desktop, host, title = parts
333 |
334 | # Try to determine app name from window title
335 | app_name = title.split(' - ')[-1] if ' - ' in title else title
336 |
337 | if app_name not in app_windows:
338 | app_windows[app_name] = {
339 | "name": app_name,
340 | "has_windows": True,
341 | "window_count": 0,
342 | "visible_windows": []
343 | }
344 |
345 | app_windows[app_name]["window_count"] += 1
346 |
347 | if with_details:
348 | app_windows[app_name]["visible_windows"].append({
349 | "name": title,
350 | "id": window_id,
351 | "desktop": desktop
352 | })
353 |
354 | active_apps = list(app_windows.values())
355 | except Exception as e:
356 | print(f"Error getting apps with wmctrl: {str(e)}")
357 |
358 | # If wmctrl failed or isn't available, try using ps
359 | if not active_apps:
360 | try:
361 | # List GUI applications
362 | cmd = ["ps", "-e", "-o", "comm="]
363 | process = subprocess.run(cmd, capture_output=True, text=True)
364 |
365 | if process.returncode == 0:
366 | all_processes = process.stdout.strip().split('\n')
367 |
368 | # Filter for likely GUI applications (very basic heuristic)
369 | gui_indicators = ["-bin", "x11", "gtk", "qt", "wayland", "gnome", "kde"]
370 |
371 | for proc in all_processes:
372 | proc = proc.strip()
373 | if not proc:
374 | continue
375 |
376 | # Skip system processes that typically don't have UIs
377 | if proc.startswith(("ps", "bash", "sh", "zsh", "systemd", "login", "dbus")):
378 | continue
379 |
380 | # Include if it looks like a GUI app
381 | if any(indicator in proc.lower() for indicator in gui_indicators) or "/" not in proc:
382 | active_apps.append({
383 | "name": proc,
384 | "has_windows": True # Assuming these have windows, though we can't be sure
385 | })
386 | except Exception as e:
387 | print(f"Error getting apps with ps: {str(e)}")
388 |
389 | return active_apps
390 |
391 |
392 | def get_active_apps(with_details: bool = False) -> Dict[str, Any]:
393 | """
394 | Get a list of currently active applications on the user's system.
395 |
396 | Args:
397 | with_details: Whether to include additional details about each application
398 |
399 | Returns:
400 | Dictionary with platform, success status, and list of active applications
401 | """
402 | system_name = platform.system().lower()
403 |
404 | # Get active apps based on platform
405 | if system_name == "darwin" or system_name == "macos":
406 | active_apps = _get_active_apps_macos(with_details)
407 | elif system_name == "windows":
408 | active_apps = _get_active_apps_windows(with_details)
409 | elif system_name == "linux":
410 | active_apps = _get_active_apps_linux(with_details)
411 | else:
412 | return {
413 | "success": False,
414 | "platform": system_name,
415 | "error": f"Unsupported platform: {system_name}. This tool currently supports macOS, Windows, and Linux.",
416 | "apps": []
417 | }
418 |
419 | # If no apps were found, provide a descriptive error message
420 | if not active_apps:
421 | error_message = "No active applications could be detected. "
422 | if system_name == "darwin":
423 | error_message += ("This is most likely due to missing screen recording permissions. "
424 | "Please go to System Settings > Privacy & Security > Screen Recording "
425 | "and ensure that your terminal or IDE application has permission to record the screen.")
426 | elif system_name == "windows":
427 | error_message += "This might be due to insufficient permissions or no applications with visible windows."
428 | elif system_name == "linux":
429 | error_message += "This might be due to wmctrl not being installed or no applications with visible windows."
430 |
431 | return {
432 | "success": False,
433 | "platform": system_name,
434 | "error": error_message,
435 | "apps": []
436 | }
437 |
438 | # Sort by name
439 | active_apps.sort(key=lambda app: app.get("name", "").lower())
440 |
441 | return {
442 | "success": True,
443 | "platform": system_name,
444 | "app_count": len(active_apps),
445 | "apps": active_apps
446 | }
447 |
448 |
449 | async def handle_get_active_apps(arguments: dict) -> List[types.TextContent]:
450 | """Handle getting active applications."""
451 | with_details = arguments.get("with_details", False)
452 |
453 | result = get_active_apps(with_details)
454 |
455 | return [types.TextContent(type="text", text=json.dumps(result, indent=2))]
456 |
--------------------------------------------------------------------------------
/src/aidd/tools/get_available_windows_tool.py:
--------------------------------------------------------------------------------
1 | import json
2 | import platform
3 | import subprocess
4 | from typing import Any, Dict, List
5 | import importlib.util
6 |
7 | from mcp import types
8 |
9 | # Use importlib.util.find_spec to check for availability of optional packages
10 | def is_package_available(package_name):
11 | """Check if a package is available using importlib.util.find_spec."""
12 | return importlib.util.find_spec(package_name) is not None
13 |
14 | # Check for PyGetWindow
15 | PYGETWINDOW_AVAILABLE = is_package_available("pygetwindow")
16 |
17 | # Check for macOS-specific Quartz framework
18 | QUARTZ_AVAILABLE = False
19 | if platform.system().lower() == "darwin":
20 | QUARTZ_AVAILABLE = is_package_available("Quartz")
21 | if QUARTZ_AVAILABLE:
22 | from Quartz import (
23 | CGWindowListCopyWindowInfo,
24 | kCGNullWindowID,
25 | kCGWindowListOptionOnScreenOnly,
26 | )
27 |
28 |
29 | def get_available_windows_tool():
30 | """Define the get_available_windows tool."""
31 | return {
32 | "name": "get_available_windows",
33 | "description": "Get detailed information about all available windows currently displayed on the user's screen. "
34 | "WHEN TO USE: When you need to know exactly what windows are visible to the user, find a specific "
35 | "window by title, provide guidance related to something the user is viewing, or need window-level "
36 | "context that's more detailed than application-level information. Useful for referencing specific "
37 | "content the user can see on their screen. "
38 | "WHEN NOT TO USE: When application-level information is sufficient (use get_active_apps instead), "
39 | "when you need to capture what's on screen (use capture_screenshot instead), or when window "
40 | "context isn't relevant to the task at hand. "
41 | "RETURNS: JSON object containing platform information, success status, count of windows, and an "
42 | "array of window objects. Each window object includes title, application owner, visibility status, "
43 | "and platform-specific details like window IDs. Works on macOS, Windows, and Linux, with "
44 | "platform-specific implementation details.",
45 | "inputSchema": {
46 | "type": "object",
47 | "properties": {},
48 | "required": []
49 | },
50 | }
51 |
52 |
53 | def _get_windows_macos() -> List[Dict[str, Any]]:
54 | """
55 | Get information about all available windows on macOS.
56 |
57 | Returns:
58 | List of dictionaries containing window information
59 | """
60 | windows = []
61 |
62 | if not QUARTZ_AVAILABLE:
63 | print("Quartz framework not available. Unable to list windows on macOS.")
64 | return windows
65 |
66 | try:
67 | # Get the list of windows from Quartz
68 | window_list = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly, kCGNullWindowID)
69 |
70 | for window in window_list:
71 | window_id = window.get('kCGWindowNumber', 0)
72 | owner = window.get('kCGWindowOwnerName', '')
73 | name = window.get('kCGWindowName', '')
74 | layer = window.get('kCGWindowLayer', 0)
75 | alpha = window.get('kCGWindowAlpha', 1.0)
76 |
77 | # Create the window info dictionary
78 | window_info = {
79 | "id": window_id,
80 | "title": name,
81 | "app": owner,
82 | "visible": layer <= 0 and alpha > 0.1,
83 | }
84 |
85 | windows.append(window_info)
86 |
87 | # Sort windows by application name and then by window title
88 | windows.sort(key=lambda w: (w.get("app", "").lower(), w.get("title", "").lower()))
89 |
90 | except Exception as e:
91 | print(f"Error getting windows on macOS: {str(e)}")
92 |
93 | return windows
94 |
95 |
96 | def _get_windows_windows() -> List[Dict[str, Any]]:
97 | """
98 | Get information about all available windows on Windows.
99 |
100 | Returns:
101 | List of dictionaries containing window information
102 | """
103 | windows = []
104 |
105 | # Try using PyGetWindow if available
106 | if PYGETWINDOW_AVAILABLE:
107 | try:
108 | import pygetwindow as gw
109 | all_windows = gw.getAllWindows()
110 |
111 | for window in all_windows:
112 | # Skip windows with empty titles
113 | if not window.title:
114 | continue
115 |
116 | # Try to determine the application name from the window title
117 | # This is an approximation and may not be accurate for all applications
118 | app_name = ""
119 | title_parts = window.title.split(' - ')
120 | if len(title_parts) > 1:
121 | app_name = title_parts[-1]
122 |
123 | # Create the window info dictionary
124 | window_info = {
125 | "title": window.title,
126 | "visible": window.visible,
127 | "active": window.isActive
128 | }
129 |
130 | # Add app name if we were able to determine it
131 | if app_name:
132 | window_info["app"] = app_name
133 |
134 | windows.append(window_info)
135 |
136 | # Sort windows by application name and then by window title
137 | windows.sort(key=lambda w: (w.get("app", "").lower() if "app" in w else "", w.get("title", "").lower()))
138 |
139 | except Exception as e:
140 | print(f"Error getting windows with PyGetWindow: {str(e)}")
141 |
142 | # If PyGetWindow failed or isn't available, try using PowerShell
143 | if not windows:
144 | try:
145 | script = '''
146 | Add-Type @"
147 | using System;
148 | using System.Runtime.InteropServices;
149 | using System.Text;
150 |
151 | public class Window {
152 | [DllImport("user32.dll")]
153 | [return: MarshalAs(UnmanagedType.Bool)]
154 | public static extern bool EnumWindows(EnumWindowsProc enumProc, IntPtr lParam);
155 |
156 | [DllImport("user32.dll")]
157 | public static extern int GetWindowText(IntPtr hWnd, StringBuilder lpString, int nMaxCount);
158 |
159 | [DllImport("user32.dll")]
160 | public static extern bool IsWindowVisible(IntPtr hWnd);
161 |
162 | [DllImport("user32.dll", SetLastError=true)]
163 | public static extern uint GetWindowThreadProcessId(IntPtr hWnd, out uint lpdwProcessId);
164 |
165 | public delegate bool EnumWindowsProc(IntPtr hWnd, IntPtr lParam);
166 | }
167 | "@
168 |
169 | $windows = @()
170 |
171 | $enumWindowsCallback = {
172 | param($hwnd, $lParam)
173 |
174 | # Get the window title
175 | $sb = New-Object System.Text.StringBuilder(256)
176 | [void][Window]::GetWindowText($hwnd, $sb, $sb.Capacity)
177 | $title = $sb.ToString()
178 |
179 | # Only process windows with titles
180 | if($title -and $title -ne "") {
181 | # Check if the window is visible
182 | $visible = [Window]::IsWindowVisible($hwnd)
183 |
184 | # Get process ID and name
185 | $processId = 0
186 | [void][Window]::GetWindowThreadProcessId($hwnd, [ref]$processId)
187 | $process = Get-Process -Id $processId -ErrorAction SilentlyContinue
188 | $processName = if($process) { $process.ProcessName } else { "Unknown" }
189 |
190 | # Create the window object
191 | $window = @{
192 | title = $title
193 | app = $processName
194 | visible = $visible
195 | }
196 |
197 | $windows += $window
198 | }
199 |
200 | # Continue enumeration
201 | return $true
202 | }
203 |
204 | # Enumerate all windows
205 | [void][Window]::EnumWindows($enumWindowsCallback, [IntPtr]::Zero)
206 |
207 | # Sort the windows
208 | $windows = $windows | Sort-Object -Property @{Expression="app"}, @{Expression="title"}
209 |
210 | # Convert to JSON
211 | $windows | ConvertTo-Json -Depth 3
212 | '''
213 |
214 | cmd = ["powershell", "-Command", script]
215 | process = subprocess.run(cmd, capture_output=True, text=True)
216 |
217 | if process.returncode == 0 and process.stdout.strip():
218 | try:
219 | windows_data = json.loads(process.stdout)
220 | # Handle single item (not in a list)
221 | if isinstance(windows_data, dict):
222 | windows_data = [windows_data]
223 |
224 | windows = windows_data
225 | except json.JSONDecodeError:
226 | print("Failed to parse JSON from PowerShell output")
227 |
228 | except Exception as e:
229 | print(f"Error getting windows with PowerShell: {str(e)}")
230 |
231 | return windows
232 |
233 |
234 | def _get_windows_linux() -> List[Dict[str, Any]]:
235 | """
236 | Get information about all available windows on Linux.
237 |
238 | Returns:
239 | List of dictionaries containing window information
240 | """
241 | windows = []
242 |
243 | # Try using wmctrl if available
244 | try:
245 | # Check if wmctrl is installed
246 | check_process = subprocess.run(["which", "wmctrl"], capture_output=True)
247 | if check_process.returncode == 0:
248 | # Get the list of windows
249 | wmctrl_process = subprocess.run(["wmctrl", "-l"], capture_output=True, text=True)
250 |
251 | if wmctrl_process.returncode == 0:
252 | window_data = wmctrl_process.stdout.strip().split('\n')
253 |
254 | for line in window_data:
255 | if not line:
256 | continue
257 |
258 | parts = line.split(None, 3)
259 | if len(parts) < 4:
260 | continue
261 |
262 | window_id, desktop, owner, *title_parts = parts
263 | title = title_parts[0] if title_parts else ""
264 |
265 | # Create the window info dictionary
266 | window_info = {
267 | "id": window_id,
268 | "title": title,
269 | "app": owner,
270 | "desktop": desktop,
271 | "visible": True # wmctrl -l only shows visible windows
272 | }
273 |
274 | windows.append(window_info)
275 |
276 | # Sort windows by application name and then by window title
277 | windows.sort(key=lambda w: (w.get("app", "").lower(), w.get("title", "").lower()))
278 | except Exception as e:
279 | print(f"Error getting windows with wmctrl: {str(e)}")
280 |
281 | # If wmctrl failed, try using xwininfo and xprop
282 | if not windows:
283 | try:
284 | # Get the list of window IDs
285 | xwininfo_process = subprocess.run(["xwininfo", "-root", "-children"], capture_output=True, text=True)
286 |
287 | if xwininfo_process.returncode == 0:
288 | lines = xwininfo_process.stdout.strip().split('\n')
289 |
290 | # Parse the output to find window IDs
291 | window_ids = []
292 | for line in lines:
293 | # Look for lines with window IDs in hexadecimal format
294 | if "0x" in line and "child" in line.lower():
295 | parts = line.split()
296 | for part in parts:
297 | if part.startswith("0x"):
298 | window_ids.append(part)
299 | break
300 |
301 | # Get information for each window
302 | for window_id in window_ids:
303 | # Get window name
304 | xprop_name_process = subprocess.run(["xprop", "-id", window_id, "WM_NAME"], capture_output=True, text=True)
305 |
306 | # Get window class (application)
307 | xprop_class_process = subprocess.run(["xprop", "-id", window_id, "WM_CLASS"], capture_output=True, text=True)
308 |
309 | # Extract the window title
310 | title = ""
311 | if xprop_name_process.returncode == 0:
312 | output = xprop_name_process.stdout.strip()
313 | if "=" in output:
314 | title = output.split("=", 1)[1].strip().strip('"')
315 |
316 | # Extract the application name
317 | app_name = ""
318 | if xprop_class_process.returncode == 0:
319 | output = xprop_class_process.stdout.strip()
320 | if "=" in output:
321 | classes = output.split("=", 1)[1].strip().strip('"').split('", "')
322 | app_name = classes[-1] if classes else ""
323 |
324 | # Create the window info dictionary
325 | window_info = {
326 | "id": window_id,
327 | "title": title,
328 | "app": app_name,
329 | "visible": True # Assuming all retrieved windows are visible
330 | }
331 |
332 | windows.append(window_info)
333 |
334 | # Sort windows by application name and then by window title
335 | windows.sort(key=lambda w: (w.get("app", "").lower(), w.get("title", "").lower()))
336 | except Exception as e:
337 | print(f"Error getting windows with xwininfo/xprop: {str(e)}")
338 |
339 | return windows
340 |
341 |
342 | def get_available_windows() -> Dict[str, Any]:
343 | """
344 | Get detailed information about all available windows currently displayed on screen.
345 |
346 | Returns:
347 | Dictionary with platform, success status, and list of windows
348 | """
349 | system_name = platform.system().lower()
350 |
351 | # Get windows based on platform
352 | if system_name == "darwin" or system_name == "macos":
353 | windows = _get_windows_macos()
354 | elif system_name == "windows":
355 | windows = _get_windows_windows()
356 | elif system_name == "linux":
357 | windows = _get_windows_linux()
358 | else:
359 | return {
360 | "success": False,
361 | "platform": system_name,
362 | "error": f"Unsupported platform: {system_name}. This tool currently supports macOS, Windows, and Linux.",
363 | "windows": []
364 | }
365 |
366 | # If no windows were found, provide a descriptive error message
367 | if not windows:
368 | error_message = "No windows could be detected on your screen. "
369 | if system_name == "darwin":
370 | error_message += "This might be due to missing screen recording permissions. Please check System Settings > Privacy & Security > Screen Recording."
371 | elif system_name == "windows":
372 | error_message += "This might be due to insufficient permissions or no windows being displayed."
373 | elif system_name == "linux":
374 | error_message += "This might be due to wmctrl or xwininfo not being installed or no windows being displayed."
375 |
376 | return {
377 | "success": False,
378 | "platform": system_name,
379 | "error": error_message,
380 | "windows": []
381 | }
382 |
383 | return {
384 | "success": True,
385 | "platform": system_name,
386 | "count": len(windows),
387 | "windows": windows
388 | }
389 |
390 |
391 | async def handle_get_available_windows(arguments: dict) -> List[types.TextContent]:
392 | """Handle getting available windows."""
393 | result = get_available_windows()
394 |
395 | return [types.TextContent(type="text", text=json.dumps(result, indent=2))]
396 |
--------------------------------------------------------------------------------
/src/aidd/tools/image_tools.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import io
3 | import os
4 |
5 | from mcp.types import TextContent
6 | from PIL import Image
7 |
8 | from .state import state
9 |
10 | # Maximum file size (100MB)
11 | MAX_FILE_SIZE = 100 * 1024 * 1024
12 |
13 | # Image size constraints
14 | MIN_WIDTH = 20
15 | MAX_WIDTH = 800
16 |
17 | def read_image_file_tool():
18 | return {
19 | "name": "read_image_file",
20 | "description": "Read an image file from the file system and return its contents as a base64-encoded string. "
21 | "WHEN TO USE: When you need to view or process image files, include images in responses, analyze "
22 | "image content, or convert images to a format that can be transmitted as text. Useful for examining "
23 | "screenshots, diagrams, photos, or any visual content stored in the file system. "
24 | "WHEN NOT TO USE: When you only need information about the image file without its contents "
25 | "(use get_file_info instead), when working with extremely large images (over 100MB), or when you "
26 | "need to read text files (use read_file instead). "
27 | "RETURNS: A base64-encoded data URI string prefixed with the appropriate MIME type "
28 | "(e.g., 'data:image/png;base64,...'). Images that are very small or very large will be automatically "
29 | "resized to between 20-800 pixels wide while maintaining aspect ratio. This tool supports common image "
30 | "formats like PNG, JPEG, GIF, and WebP. Only works within the allowed directory.",
31 | "inputSchema": {
32 | "type": "object",
33 | "properties": {
34 | "path": {
35 | "type": "string",
36 | "description": "Path to the image file to read. This must be a valid image file in a supported format "
37 | "(PNG, JPEG, GIF, WebP). Examples: 'screenshots/screen.png', 'images/logo.jpg', "
38 | "'diagrams/flowchart.gif'. Both absolute and relative paths are supported, but must be "
39 | "within the allowed workspace."
40 | },
41 | "max_size": {
42 | "type": "integer",
43 | "description": "Maximum file size in bytes to allow. Files larger than this size will be rejected to "
44 | "prevent memory issues. Default is 100MB (104,857,600 bytes). For most use cases, the "
45 | "default value is sufficient, but you can lower this when working with limited memory.",
46 | "optional": True
47 | }
48 | },
49 | "required": ["path"]
50 | },
51 | }
52 |
53 | async def handle_read_image_file(arguments: dict):
54 | """Handle reading an image file and converting it to base64."""
55 | path = arguments.get("path")
56 | max_size = arguments.get("max_size", MAX_FILE_SIZE)
57 |
58 | if not path:
59 | raise ValueError("path must be provided")
60 |
61 | # Determine full path based on whether input is absolute or relative
62 | if os.path.isabs(path):
63 | full_path = os.path.abspath(path) # Just normalize the absolute path
64 | else:
65 | # For relative paths, join with allowed_directory
66 | full_path = os.path.abspath(os.path.join(state.allowed_directory, path))
67 |
68 | if not full_path.startswith(state.allowed_directory):
69 | raise ValueError(f"Access denied: Path ({full_path}) must be within allowed directory ({state.allowed_directory})")
70 |
71 | if not os.path.exists(full_path):
72 | raise ValueError(f"File does not exist: {full_path}")
73 | if not os.path.isfile(full_path):
74 | raise ValueError(f"Path is not a file: {full_path}")
75 |
76 | # Check file size before attempting to read
77 | file_size = os.path.getsize(full_path)
78 | if file_size > max_size:
79 | raise ValueError(f"File size ({file_size} bytes) exceeds maximum allowed size ({max_size} bytes)")
80 |
81 | try:
82 | # Try to open the image with PIL to validate it's a valid image
83 | with Image.open(full_path) as img:
84 | # Get the image format
85 | image_format = img.format.lower()
86 | if not image_format:
87 | # Try to determine format from file extension
88 | ext = os.path.splitext(full_path)[1].lower().lstrip('.')
89 | if ext in ['jpg', 'jpeg']:
90 | image_format = 'jpeg'
91 | elif ext in ['png', 'gif', 'webp']:
92 | image_format = ext
93 | else:
94 | raise ValueError(f"Unsupported image format: {ext}")
95 |
96 | # Resize image if width is greater than MAX_WIDTH or less than MIN_WIDTH
97 | if img.width > MAX_WIDTH or img.width < MIN_WIDTH:
98 | # Calculate new dimensions maintaining aspect ratio
99 | if img.width > MAX_WIDTH:
100 | target_width = MAX_WIDTH
101 | else:
102 | target_width = MIN_WIDTH
103 |
104 | ratio = target_width / img.width
105 | new_height = int(img.height * ratio)
106 | img = img.resize((target_width, new_height), Image.Resampling.LANCZOS)
107 |
108 | # Convert image to bytes
109 | img_byte_arr = io.BytesIO()
110 | if image_format.lower() == 'jpeg':
111 | img.save(img_byte_arr, format=image_format, quality=85) # Specify quality for JPEG
112 | else:
113 | img.save(img_byte_arr, format=image_format)
114 | img_byte_arr = img_byte_arr.getvalue()
115 |
116 | # Convert to base64
117 | base64_data = base64.b64encode(img_byte_arr).decode('utf-8')
118 |
119 | # Return the image data with its type
120 | return [TextContent(
121 | type="text",
122 | text=f"data:image/{image_format};base64,{base64_data}"
123 | )]
124 | except Image.UnidentifiedImageError:
125 | raise ValueError(f"File is not a valid image: {path}")
126 | except Exception as e:
127 | raise ValueError(f"Error reading image file: {str(e)}")
128 |
--------------------------------------------------------------------------------
/src/aidd/tools/other_tools.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import List, Dict, Any, Callable
3 |
4 | from mcp.types import TextContent
5 |
6 | from .state import state
7 |
8 |
9 | def batch_tools_tool():
10 | return {
11 | "name": "batch_tools",
12 | "description": "Execute multiple tool invocations in parallel or serially. "
13 | "WHEN TO USE: When you need to run multiple operations efficiently in a single request, "
14 | "combine related operations, or gather results from different tools. Useful for bulk operations, "
15 | "coordinated tasks, or performing multiple queries simultaneously. "
16 | "WHEN NOT TO USE: When operations need to be performed strictly in sequence where each step depends "
17 | "on the previous step's result, when performing simple operations that don't benefit from batching, "
18 | "or when you need fine-grained error handling. "
19 | "RETURNS: Results from all tool invocations grouped together. Each result includes the tool name "
20 | "and its output. If any individual tool fails, its error is included but other tools continue execution. "
21 | "Parallelizable tools are executed concurrently for performance. Each tool's output is presented in "
22 | "a structured format along with the description you provided. "
23 | "IMPORTANT NOTE: All tools in the batch execute in the same working directory context. If a tool creates a directory "
24 | "and a subsequent tool needs to work inside that directory, you must either use paths relative to the current working directory "
25 | "or include an explicit tool invocation to change directories (e.g., update_allowed_directory).",
26 | "inputSchema": {
27 | "type": "object",
28 | "properties": {
29 | "description": {
30 | "type": "string",
31 | "description": "A short (3-5 word) description of the batch operation. This helps identify the purpose "
32 | "of the batch and provides context for the results. Examples: 'Setup new project', "
33 | "'Analyze codebase', 'Gather system info'."
34 | },
35 | "sequential": {
36 | "type": "boolean",
37 | "description": "Whether to run tools in sequential order (true) or parallel when possible (false). "
38 | "Use sequential mode when tools need to build on the results of previous tools. "
39 | "Default is false (parallel execution).",
40 | "default": False
41 | },
42 | "invocations": {
43 | "type": "array",
44 | "items": {
45 | "type": "object",
46 | "properties": {
47 | "tool": {
48 | "type": "string",
49 | "description": "Name of the tool to invoke. Must be a valid tool name registered in the system."
50 | },
51 | "arguments": {
52 | "type": "object",
53 | "description": "Arguments to pass to the tool. These should match the required arguments "
54 | "for the specified tool."
55 | }
56 | },
57 | "required": ["tool", "arguments"]
58 | },
59 | "description": "List of tool invocations to execute. Each invocation specifies a tool name and its arguments. "
60 | "These will be executed in parallel when possible, or serially when necessary."
61 | }
62 | },
63 | "required": ["description", "invocations"]
64 | }
65 | }
66 |
67 |
68 | async def handle_batch_tools(arguments: dict) -> List[TextContent]:
69 | """Handle executing multiple tools in batch."""
70 | # Import TOOL_HANDLERS here to avoid circular imports
71 | from . import TOOL_HANDLERS
72 |
73 | description = arguments.get("description")
74 | invocations = arguments.get("invocations", [])
75 | sequential = arguments.get("sequential", False)
76 |
77 | if not description:
78 | raise ValueError("Description must be provided")
79 | if not invocations:
80 | raise ValueError("Invocations list must not be empty")
81 |
82 | # Validate that all tools exist before running any
83 | for idx, invocation in enumerate(invocations):
84 | tool_name = invocation.get("tool")
85 | if not tool_name:
86 | raise ValueError(f"Tool name missing in invocation #{idx+1}")
87 |
88 | if tool_name not in TOOL_HANDLERS:
89 | raise ValueError(f"Unknown tool '{tool_name}' in invocation #{idx+1}")
90 |
91 | # Format the results header
92 | header = f"Batch Operation: {description}\n"
93 | execution_mode = "Sequential" if sequential else "Parallel"
94 | header += f"Execution Mode: {execution_mode}\n"
95 |
96 | # Combine all results
97 | all_contents = [TextContent(type="text", text=header)]
98 |
99 | if sequential:
100 | # Sequential execution
101 | for idx, invocation in enumerate(invocations):
102 | tool_name = invocation.get("tool")
103 | tool_args = invocation.get("arguments", {})
104 |
105 | # Get the handler for this tool
106 | handler = TOOL_HANDLERS[tool_name]
107 |
108 | # Execute the tool and process results
109 | result = await _execute_tool_with_error_handling(handler, tool_args, tool_name, idx)
110 |
111 | # Add the result to our output
112 | status = "SUCCESS" if result["success"] else "ERROR"
113 | section_header = f"[{idx+1}] {tool_name} - {status}\n"
114 | all_contents.append(TextContent(type="text", text=f"\n{section_header}{'=' * len(section_header)}\n"))
115 |
116 | if result["success"]:
117 | all_contents.extend(result["content"])
118 | else:
119 | all_contents.append(TextContent(
120 | type="text",
121 | text=f"Error: {result['error']}"
122 | ))
123 |
124 | # If a tool fails in sequential mode, we stop execution
125 | if idx < len(invocations) - 1:
126 | all_contents.append(TextContent(
127 | type="text",
128 | text=f"\nExecution stopped after failure. Remaining {len(invocations) - idx - 1} tools were not executed."
129 | ))
130 | break
131 | else:
132 | # Parallel execution
133 | tasks = []
134 |
135 | for idx, invocation in enumerate(invocations):
136 | tool_name = invocation.get("tool")
137 | tool_args = invocation.get("arguments", {})
138 |
139 | # Create a task for each invocation
140 | handler = TOOL_HANDLERS[tool_name]
141 | task = asyncio.create_task(
142 | _execute_tool_with_error_handling(handler, tool_args, tool_name, idx)
143 | )
144 | tasks.append(task)
145 |
146 | # Wait for all tasks to complete
147 | completed_results = await asyncio.gather(*tasks)
148 |
149 | # Process results
150 | for tool_result in completed_results:
151 | tool_name = tool_result["tool_name"]
152 | idx = tool_result["index"]
153 | status = "SUCCESS" if tool_result["success"] else "ERROR"
154 |
155 | # Add separator and header for this tool's results
156 | section_header = f"[{idx+1}] {tool_name} - {status}\n"
157 | all_contents.append(TextContent(type="text", text=f"\n{section_header}{'=' * len(section_header)}\n"))
158 |
159 | # Add the actual content from the tool
160 | if tool_result["success"]:
161 | all_contents.extend(tool_result["content"])
162 | else:
163 | all_contents.append(TextContent(
164 | type="text",
165 | text=f"Error: {tool_result['error']}"
166 | ))
167 |
168 | return all_contents
169 |
170 |
171 | async def _execute_tool_with_error_handling(handler, arguments, tool_name, index):
172 | """Execute a single tool with error handling."""
173 | try:
174 | content = await handler(arguments)
175 | return {
176 | "tool_name": tool_name,
177 | "index": index,
178 | "success": True,
179 | "content": content
180 | }
181 | except Exception as e:
182 | return {
183 | "tool_name": tool_name,
184 | "index": index,
185 | "success": False,
186 | "error": str(e)
187 | }
188 |
189 |
190 | def think_tool():
191 | return {
192 | "name": "think",
193 | "description": "Use the tool to methodically think through a complex problem step-by-step. "
194 | "WHEN TO USE: When tackling complex reasoning tasks that benefit from breaking down problems, exploring multiple perspectives, "
195 | "or reasoning through chains of consequences. Ideal for planning system architecture, debugging complex issues, "
196 | "anticipating edge cases, weighing tradeoffs, or making implementation decisions. "
197 | "WHEN NOT TO USE: For simple explanations, direct code writing, retrieving information, or when immediate action is needed. "
198 | "RETURNS: Your structured thinking process formatted as markdown. This tool helps you methodically document your reasoning "
199 | "without making repository changes. Structuring your thoughts with this tool can lead to more reliable reasoning "
200 | "and better decision-making, especially for complex problems where it's easy to overlook important considerations.",
201 | "inputSchema": {
202 | "type": "object",
203 | "properties": {
204 | "thought": {
205 | "type": "string",
206 | "description": "Your step-by-step thinking process, including: breaking down problems, exploring alternatives, "
207 | "considering pros/cons, examining assumptions, listing requirements, or working through edge cases. "
208 | "Structure your thinking using markdown elements like bullet points, numbered lists, headings, or code blocks. "
209 | "The more systematic your thinking, the better the outcome."
210 | }
211 | },
212 | "required": ["thought"]
213 | }
214 | }
215 |
216 |
217 | async def handle_think(arguments: dict) -> List[TextContent]:
218 | """Handle recording a thought without making any changes."""
219 | thought = arguments.get("thought")
220 |
221 | if not thought:
222 | raise ValueError("Thought must be provided")
223 |
224 | # Format the thought in markdown
225 | formatted_thought = f"""# Thought Process
226 |
227 | {thought}
228 |
229 | ---
230 | *Note: This is a thinking tool used for reasoning and brainstorming. No changes were made to the repository.*
231 | """
232 |
233 | return [TextContent(
234 | type="text",
235 | text=formatted_thought
236 | )]
237 |
--------------------------------------------------------------------------------
/src/aidd/tools/path_tools.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from mcp.types import TextContent
4 | from .state import state
5 |
6 |
7 | def get_allowed_directory_tool():
8 | """Define the get_allowed_directory tool."""
9 | return {
10 | "name": "get_allowed_directory",
11 | "description": "Get the current working directory that this server is allowed to access. "
12 | "WHEN TO USE: When you need to understand the current workspace boundaries, determine "
13 | "the root directory for relative paths, or verify where file operations are permitted. "
14 | "Useful for commands that need to know the allowed workspace root. "
15 | "WHEN NOT TO USE: When you already know the current working directory or when you need "
16 | "to actually list files in the directory (use directory_listing instead). "
17 | "RETURNS: A string containing the absolute path to the current allowed working directory. "
18 | "This is the root directory within which all file operations must occur.",
19 | "inputSchema": {
20 | "type": "object",
21 | "properties": {},
22 | "required": [],
23 | },
24 | }
25 |
26 | def update_allowed_directory_tool():
27 | """Define the update_allowed_directory tool."""
28 | return {
29 | "name": "update_allowed_directory",
30 | "description": "Change the working directory that this server is allowed to access. "
31 | "WHEN TO USE: When you need to switch between different projects, change the workspace "
32 | "root to a different directory, or expand/modify the boundaries of allowed file operations. "
33 | "Useful when working with multiple projects or repositories in different locations. "
34 | "WHEN NOT TO USE: When you only need to create a subdirectory within the current workspace "
35 | "(use create_directory instead), or when you just want to list files in a different directory "
36 | "(use directory_listing instead). "
37 | "RETURNS: A confirmation message indicating that the allowed directory has been successfully "
38 | "updated to the new path.",
39 | "inputSchema": {
40 | "type": "object",
41 | "properties": {
42 | "directory": {
43 | "type": "string",
44 | "description": "Directory to allow access to. Must be an absolute path that exists on the system. "
45 | "Use ~ to refer to the user's home directory. Examples: '/Users/username/projects', "
46 | "'~/Documents/code', '/home/user/repositories'. The directory must exist and be "
47 | "accessible to the user running the application."
48 | }
49 | },
50 | "required": ["directory"]
51 | },
52 | }
53 |
54 | async def handle_get_allowed_directory(arguments: dict):
55 | """Handle getting the allowed directory."""
56 | return [TextContent(
57 | type="text",
58 | text=f"Allowed directory: {state.allowed_directory}"
59 | )]
60 |
61 | async def handle_update_allowed_directory(arguments: dict):
62 | """Handle updating the allowed directory."""
63 | directory = arguments.get("directory")
64 | if not directory:
65 | raise ValueError("directory must be provided")
66 |
67 | # Handle home directory expansion
68 | if directory.startswith("~"):
69 | directory = os.path.expanduser(directory)
70 |
71 | # Must be an absolute path
72 | if not os.path.isabs(directory):
73 | raise ValueError("Directory must be an absolute path")
74 |
75 | # Normalize the path
76 | directory = os.path.abspath(directory)
77 |
78 | # Verify directory exists
79 | if not os.path.isdir(directory):
80 | raise ValueError(f"Path is not a directory: {directory}")
81 |
82 | state.allowed_directory = directory
83 | return [TextContent(
84 | type="text",
85 | text=f"Successfully updated allowed directory to: {state.allowed_directory}"
86 | )]
87 |
--------------------------------------------------------------------------------
/src/aidd/tools/state.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 |
5 |
6 | # Global state management
7 | class GlobalState:
8 | def __init__(self):
9 | self.config_dir = Path.home() / '.skydeckai-code'
10 | self.config_file = self.config_dir / 'config.json'
11 | self._ensure_config_dir()
12 |
13 | def _ensure_config_dir(self):
14 | """Ensure the config directory exists."""
15 | self.config_dir.mkdir(exist_ok=True)
16 |
17 | def _load_config(self) -> dict:
18 | """Load the config file."""
19 | if not self.config_file.exists():
20 | return {}
21 | try:
22 | with open(self.config_file, 'r') as f:
23 | return json.load(f)
24 | except (json.JSONDecodeError, OSError):
25 | return {}
26 |
27 | def _save_config(self, config: dict):
28 | """Save the config file."""
29 | try:
30 | with open(self.config_file, 'w') as f:
31 | json.dump(config, f, indent=2, sort_keys=True)
32 | except OSError:
33 | pass # Silently fail if we can't write the config
34 |
35 | @property
36 | def allowed_directory(self) -> str:
37 | """Get the allowed directory, falling back to Desktop if not set."""
38 | config = self._load_config()
39 | return config.get('allowed_directory', str(Path.home() / "Desktop"))
40 |
41 | @allowed_directory.setter
42 | def allowed_directory(self, value: str):
43 | """Set the allowed directory and persist it."""
44 | self._save_config({'allowed_directory': value})
45 |
46 | # Single instance to be shared across modules
47 | state = GlobalState()
48 |
--------------------------------------------------------------------------------
/src/aidd/tools/system_tools.py:
--------------------------------------------------------------------------------
1 | import json
2 | import platform
3 | import re
4 | import subprocess
5 | from typing import Any, Dict, List
6 |
7 | import mcp.types as types
8 | import psutil
9 |
10 | from .state import state
11 |
12 |
13 | def get_system_info_tool():
14 | return {
15 | "name": "get_system_info",
16 | "description": "Get detailed system information about the host computer. "
17 | "WHEN TO USE: When you need to understand the system environment, diagnose performance issues, "
18 | "verify hardware specifications, check resource availability, or determine the operating environment "
19 | "for compatibility reasons. Useful for system analysis, troubleshooting, environment verification, "
20 | "and providing context-aware assistance. "
21 | "WHEN NOT TO USE: When you only need the current working directory (use get_allowed_directory instead), "
22 | "when specific file information is needed (use get_file_info instead), or when you need to interact "
23 | "with applications rather than system information (use get_active_apps instead). "
24 | "RETURNS: A JSON object containing comprehensive system details including: working directory path, "
25 | "OS details (name, version, architecture), Python version, WiFi network name, CPU information "
26 | "(cores, usage), memory statistics (total, available, usage percentage), disk information "
27 | "(total, free, usage percentage), and on macOS, additional hardware details (model, chip, serial number).",
28 | "inputSchema": {
29 | "type": "object",
30 | "properties": {},
31 | "required": [],
32 | },
33 | }
34 |
35 | def get_size(bytes: int, suffix: str = "B") -> str:
36 | """
37 | Scale bytes to its proper format
38 | e.g:
39 | 1253656 => '1.20MB'
40 | 1253656678 => '1.17GB'
41 | """
42 | factor = 1024
43 | for unit in ["", "K", "M", "G", "T", "P"]:
44 | if bytes < factor:
45 | return f"{bytes:.2f}{unit}{suffix}"
46 | bytes /= factor
47 |
48 | def get_wifi_info() -> str:
49 | """Get current WiFi network name across different platforms."""
50 | try:
51 | if platform.system() == "Darwin": # macOS
52 | cmd = ["system_profiler", "SPAirPortDataType"]
53 | process = subprocess.run(cmd, capture_output=True, text=True)
54 | if process.returncode == 0:
55 | for line in process.stdout.split('\n'):
56 | if "Current Network Information:" in line:
57 | next_line = next((line_text.strip() for line_text in process.stdout.split('\n')[process.stdout.split('\n').index(line)+1:] if line_text.strip()), "")
58 | return next_line.rstrip(':')
59 | elif platform.system() == "Linux":
60 | cmd = ["nmcli", "-t", "-f", "active,ssid", "dev", "wifi"]
61 | process = subprocess.run(cmd, capture_output=True, text=True)
62 | if process.returncode == 0:
63 | for line in process.stdout.split('\n'):
64 | if line.startswith('yes:'):
65 | return line.split(':')[1]
66 | elif platform.system() == "Windows":
67 | cmd = ["netsh", "wlan", "show", "interfaces"]
68 | process = subprocess.run(cmd, capture_output=True, text=True)
69 | if process.returncode == 0:
70 | for line in process.stdout.split('\n'):
71 | if "SSID" in line and "BSSID" not in line:
72 | return line.split(":")[1].strip()
73 | except Exception:
74 | pass # Silently handle any errors and return "Not available"
75 | return "Not available"
76 |
77 | def get_mac_details() -> Dict[str, str]:
78 | """Get Mac-specific system details."""
79 | if platform.system() != "Darwin":
80 | return {}
81 |
82 | mac_info = {}
83 | try:
84 | # Get system_profiler output
85 | cmd = ["system_profiler", "SPHardwareDataType", "SPSoftwareDataType"]
86 | process = subprocess.run(cmd, capture_output=True, text=True)
87 |
88 | if process.returncode == 0:
89 | output = process.stdout
90 |
91 | # Extract model information
92 | model_match = re.search(r"Model Name: (.*?)\n", output)
93 | if model_match:
94 | mac_info["model"] = model_match.group(1).strip()
95 |
96 | # Extract chip information
97 | chip_match = re.search(r"Chip: (.*?)\n", output)
98 | if chip_match:
99 | mac_info["chip"] = chip_match.group(1).strip()
100 |
101 | # Extract serial number
102 | serial_match = re.search(r"Serial Number \(system\): (.*?)\n", output)
103 | if serial_match:
104 | mac_info["serial_number"] = serial_match.group(1).strip()
105 |
106 | except Exception:
107 | pass
108 |
109 | return mac_info
110 |
111 | def get_system_details() -> Dict[str, Any]:
112 | """Gather detailed system information."""
113 |
114 | is_mac = platform.system() == "Darwin"
115 |
116 | # System and OS Information
117 | system_info = {
118 | "working_directory": state.allowed_directory,
119 | "system": {
120 | "os": platform.system(),
121 | "os_version": platform.release(),
122 | "architecture": platform.machine(),
123 | "python_version": platform.python_version(),
124 | },
125 | "wifi_network": get_wifi_info(),
126 |
127 | # CPU Information
128 | "cpu": {
129 | "physical_cores": psutil.cpu_count(logical=False),
130 | "logical_cores": psutil.cpu_count(logical=True),
131 | "total_cpu_usage": f"{psutil.cpu_percent()}%"
132 | },
133 |
134 | # Memory Information
135 | "memory": {
136 | "total": get_size(psutil.virtual_memory().total),
137 | "available": get_size(psutil.virtual_memory().available),
138 | "used_percentage": f"{psutil.virtual_memory().percent}%"
139 | },
140 |
141 | # Disk Information
142 | "disk": {
143 | "total": get_size(psutil.disk_usage('/').total),
144 | "free": get_size(psutil.disk_usage('/').free),
145 | "used_percentage": f"{psutil.disk_usage('/').percent}%"
146 | }
147 | }
148 |
149 | # Add Mac-specific information if on macOS
150 | if is_mac:
151 | mac_details = get_mac_details()
152 | system_info["mac_details"] = mac_details
153 |
154 | # Example output will be much cleaner now:
155 | # {
156 | # "working_directory": "/Users/user/projects/myproject",
157 | # "system": {
158 | # "os": "Darwin",
159 | # "os_version": "22.1.0",
160 | # "architecture": "arm64",
161 | # "python_version": "3.12.2",
162 | # "wifi_network": "MyWiFi"
163 | # },
164 | # "cpu": {
165 | # "physical_cores": 8,
166 | # "logical_cores": 8,
167 | # "total_cpu_usage": "14.3%"
168 | # },
169 | # "memory": {
170 | # "total": "16.00GB",
171 | # "available": "8.50GB",
172 | # "used_percentage": "46.9%"
173 | # },
174 | # "disk": {
175 | # "total": "465.63GB",
176 | # "free": "208.42GB",
177 | # "used_percentage": "55.2%"
178 | # },
179 | # "mac_details": { # Only present on macOS
180 | # "model": "Mac mini",
181 | # "chip": "Apple M2",
182 | # "serial_number": "XXXXXX"
183 | # }
184 | # }
185 | return system_info
186 |
187 | async def handle_get_system_info(arguments: dict) -> List[types.TextContent]:
188 | """Handle getting system information."""
189 | system_info = get_system_details()
190 | return [types.TextContent(type="text", text=json.dumps(system_info, indent=2))]
191 |
--------------------------------------------------------------------------------