├── servers
├── __init__.py
├── .python-version
├── pyproject.toml
├── .env
├── __main__.py
└── shttp_modules
│ └── filesystem.py
├── .gitattributes
├── .gitignore
├── images
├── alphaevolve.png
├── cli-20251016.png
├── tools-20251016.png
└── framework-20251016.png
├── .cursor
└── mcp.json
├── .devcontainer
├── compose.yaml
├── devcontainer.json
└── Dockerfile
├── clients
├── .env
├── agent.py
└── openai_client.py
├── AGENTS.md
├── README.md
├── LICENSE.txt
└── toolkami.rb
/servers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/servers/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.gif filter=lfs diff=lfs merge=lfs -text
2 | *.png filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | content_history.json
3 | .devcontainer
4 | .cursor/rules
5 | .vscode
6 | .beads
--------------------------------------------------------------------------------
/images/alphaevolve.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ecfabcf4cdd992ca71236a93393035c7a90d93d7ea210155e3ab031b3b6fb86f
3 | size 631300
4 |
--------------------------------------------------------------------------------
/images/cli-20251016.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:2af008bed83ffe6e76d2bd11920ba3fcdaeaf713c71ee9343c9c7152253a379e
3 | size 78951
4 |
--------------------------------------------------------------------------------
/images/tools-20251016.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:e2107f9426ca22b6f853369152a4cd7d6130a8de5e59b90cc8ee5fc0458cf0a3
3 | size 51216
4 |
--------------------------------------------------------------------------------
/images/framework-20251016.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:14e13923c92f7525f0af2549a4741b152c65b63ed446b79093374dcf3bd6090c
3 | size 145334
4 |
--------------------------------------------------------------------------------
/.cursor/mcp.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "toolkami-fs": {
4 | "url": "http://127.0.0.1:8002/filesystem/mcp",
5 | "headers": {
6 | "API_KEY": "value"
7 | }
8 | }
9 | }
10 | }
--------------------------------------------------------------------------------
/.devcontainer/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | toolkami-agent:
3 | # platform: linux/amd64
4 | build:
5 | context: ..
6 | dockerfile: .devcontainer/Dockerfile
7 |
8 | volumes:
9 | - ../..:/workspaces:cached
10 |
11 | command: sleep infinity
--------------------------------------------------------------------------------
/servers/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "servers"
3 | version = "0.1.0"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "uvicorn",
9 | "starlette",
10 | "mcp[cli]",
11 | "argparse",
12 | "langchain_google_vertexai",
13 | "google-genai",
14 | "google-cloud-aiplatform",
15 | "browser_use",
16 | ]
17 |
18 |
--------------------------------------------------------------------------------
/clients/.env:
--------------------------------------------------------------------------------
1 | # Gemini
2 | # GEMINI_API_KEY=
3 | # MAIN_MODEL=gemini-2.5-pro-preview-03-25
4 |
5 | # Vertex
6 | ## NOTE: gcloud auth application-default login && gcloud config set project
7 | # GOOGLE_VERTEX_PROJECT=
8 | # GOOGLE_VERTEX_LOCATION=
9 | # MAIN_MODEL=google/gemini-2.5-pro-preview-03-25
10 |
11 | # OpenAI
12 | ## for Vertex OpenAI-compat API: `gcloud auth application-default print-access-token`
13 | # OPENAI_API_KEY=
14 | # MAIN_MODEL=gpt-4.1
15 |
16 | # Anthropic
17 | # ANTHROPIC_API_KEY=
18 | # MAIN_MODEL=claude-3-7-sonnet-20250219
--------------------------------------------------------------------------------
/servers/.env:
--------------------------------------------------------------------------------
1 | # Gemini
2 | # GEMINI_API_KEY=
3 | # BROWSE_MODEL=gemini-2.5-flash-preview-04-17
4 |
5 | # Vertex
6 | ## NOTE: gcloud auth application-default login && gcloud config set project
7 | # GOOGLE_VERTEX_PROJECT=
8 | # GOOGLE_VERTEX_LOCATION=
9 | # BROWSE_MODEL=gemini-2.5-flash-preview-04-17
10 |
11 | # OpenAI
12 | ## for Vertex OpenAI-compat API: `gcloud auth application-default print-access-token`
13 | # OPENAI_API_KEY=
14 | # BROWSE_MODEL=gpt-4o
15 |
16 | # Anthropic
17 | # ANTHROPIC_API_KEY=
18 | # BROWSE_MODEL=claude-3-5-sonnet-20240620
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "toolkami",
3 | "dockerComposeFile": "compose.yaml",
4 | "service": "toolkami-agent",
5 | "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
6 | "features": {
7 | "ghcr.io/devcontainers/features/github-cli:1": {},
8 | "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {}
9 | },
10 | "customizations": {
11 | "vscode": {
12 | "extensions": [
13 | "ms-python.python"
14 | ]
15 | }
16 | },
17 | "postCreateCommand": "git config --global --add safe.directory /workspaces/toolkami"
18 | }
--------------------------------------------------------------------------------
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:24.04
2 |
3 | RUN apt-get update && apt-get install -y \
4 | # Dev dependencies
5 | git git-lfs curl libssl-dev libreadline-dev zlib1g-dev autoconf bison build-essential libyaml-dev libreadline-dev libncurses5-dev libffi-dev libgdbm-dev \
6 | # Utils
7 | nfs-common iputils-ping sudo wget unzip \
8 | # GCloud
9 | apt-transport-https ca-certificates gnupg curl \
10 | && rm -rf /var/lib/apt/lists/*
11 |
12 | RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg && apt-get update -y && apt-get install google-cloud-cli -y
13 |
14 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
--------------------------------------------------------------------------------
/servers/__main__.py:
--------------------------------------------------------------------------------
1 | # /// script
2 | # requires-python = ">=3.10"
3 | # dependencies = [
4 | # "requests>=2,<3",
5 | # "mcp>=1.2.0,<2",
6 | # ]
7 | # ///
8 |
9 | from contextlib import AsyncExitStack, asynccontextmanager
10 |
11 | import uvicorn
12 | from starlette.applications import Starlette
13 | from starlette.routing import Mount
14 |
15 | from shttp_modules.filesystem import mcp as filesystem_mcp
16 |
17 | def combine_lifespans(*lifespans):
18 | @asynccontextmanager
19 | async def combined_lifespan(app):
20 | async with AsyncExitStack() as stack:
21 | for l in lifespans:
22 | ctx = l(app)
23 | await stack.enter_async_context(ctx)
24 | yield
25 | return combined_lifespan
26 |
27 |
28 | main_app = Starlette(
29 | routes=[
30 | Mount("/filesystem/", app=filesystem_mcp.streamable_http_app()),
31 | ],
32 | lifespan=combine_lifespans(
33 | lambda _: filesystem_mcp.session_manager.run(),
34 | ),
35 | )
36 |
37 | if __name__ == "__main__":
38 | uvicorn.run(
39 | "__main__:main_app",
40 | host="127.0.0.1",
41 | port=8002,
42 | reload=True,
43 | reload_dirs=["."]
44 | )
--------------------------------------------------------------------------------
/clients/agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | import jsonpickle
3 | from google.genai import types
4 |
5 | class Agent:
6 | def __init__(self):
7 |
8 | self.system_instruction=""
9 | self._initialize_system_instruction()
10 |
11 | if os.path.exists("content_history.json"):
12 | try:
13 | with open("content_history.json", "r") as f: # Changed to 'r' as decode doesn't need 'r+'
14 | content = f.read()
15 | if content: # Check if file is not empty
16 | self.content_history = jsonpickle.decode(content)
17 | else:
18 | self._initialize_history() # Initialize if empty
19 | except Exception as e: # Catch potential errors during read/decode
20 | print(f"Warning: Error reading or decoding content_history.json: {e}. Initializing fresh history.")
21 | self._initialize_history()
22 | else:
23 | self._initialize_history()
24 |
25 | def _initialize_system_instruction(self):
26 | """Initializes the system instruction with the default prompt."""
27 | self.system_instruction = """You are a pro-active AI assistant that is confident and proceeds to carry out next action required to complete the user's request.
28 | Always use the tool 'ask' to ask the user for clarification if user input is required e.g. what to do next.
29 | """
30 |
31 | def _initialize_history(self):
32 | """Initializes the content history with the default prompt."""
33 | self.content_history = []
34 |
35 | def add_content(self, content: types.Content):
36 | """Add a content object to the content history."""
37 | self.content_history.append(content)
38 |
39 | def save_history(self):
40 | """Save the content history to a file."""
41 | with open("content_history.json", "w") as f:
42 | f.write(jsonpickle.encode(self.content_history, indent=2))
--------------------------------------------------------------------------------
/servers/shttp_modules/filesystem.py:
--------------------------------------------------------------------------------
1 | # server.py
2 | from mcp.server.fastmcp import FastMCP
3 |
4 | import re
5 | import uuid
6 | import shutil
7 | import pathlib
8 | from typing import Dict, Any, List, Optional
9 |
10 | # Create an MCP server
11 | mcp = FastMCP("Filesystem", stateless_http=True)
12 |
13 | @mcp.tool()
14 | async def read_file(path: str) -> str:
15 | """Read the contents of a file."""
16 | return pathlib.Path(path).read_text()
17 |
18 | @mcp.tool()
19 | async def diff_fenced_edit_file(diff_text: str) -> Dict[str, Any]:
20 | """Edit files using a diff-fenced format and return the status.
21 |
22 | Basic Format Structure:
23 | ```diff
24 | /filename.py
25 | <<<<<<< SEARCH
26 | // original text that should be found and replaced
27 | =======
28 | // new text that will replace the original content
29 | >>>>>>> REPLACE
30 | ```
31 | """
32 | pattern = r'```diff\n(.*?)\n<<<<<<< SEARCH\n(.*?)=======\n(.*?)>>>>>>> REPLACE\n```'
33 | edit_blocks = re.findall(pattern, diff_text, re.DOTALL)
34 |
35 | blocks_edited = 0
36 | for block in edit_blocks:
37 | if len(block) == 3:
38 | file_path, search_text, replace_text = block
39 |
40 | search_text = search_text.strip()
41 | replace_text = replace_text.strip()
42 |
43 | content = pathlib.Path(file_path).read_text()
44 | original_content = content
45 |
46 | # Replace the search text with the replace text
47 | if search_text in content:
48 | content = content.replace(search_text, replace_text)
49 | else:
50 | # If search text not found and it's empty, append the replace text
51 | if not search_text.strip():
52 | content += '\n' + replace_text
53 |
54 | # Only increment blocks_edited if content actually changed
55 | if content != original_content:
56 | pathlib.Path(file_path).write_text(content)
57 | blocks_edited += 1
58 |
59 | if blocks_edited == len(edit_blocks):
60 | return {"success": True, "blocks_edited": blocks_edited}
61 |
62 | return {"success": False, "blocks_edited": blocks_edited}
--------------------------------------------------------------------------------
/AGENTS.md:
--------------------------------------------------------------------------------
1 | # Trinket
2 |
3 |
4 | ## Build & Test
5 |
6 | - Run program: `uv run main.py`
7 |
8 | ## Architecture Overview
9 |
10 | - Basic UV Python project
11 |
12 | ## Conventions & Patterns
13 |
14 | ### Organize things
15 | Organizing code well makes it easy to navigate, maintain, and extend.
16 |
17 | - Organize code logically
18 | - Group related functions and classes together
19 | - Place high-level abstraction before low-level details
20 | - Function
21 | - Keep function interfaces simple by limiting parameters and prefer returning simple types
22 | - If a function is only called from a single place, consider in-lining it
23 | - If a function is called from multiple places, see if it is possible to arrange for the work to be done in a single place, perhaps with flags, and in-line that
24 | - If there are multiple versions of a function, consider making a single function with more, possibly defaulted, parameters
25 | - If the work is close to purely functional, with few references to global state, try to make it completely functional
26 | - Object
27 | - Initialize large structures or objects directly where they are declared. In-place construction avoids unnecessary copying or moving of data.
28 | - Variable
29 | - Declare variables close to their usage and within the smallest necessary scope
30 |
31 | ### Control and limits
32 | Predictable control flow and bounded system resources are essential for safe execution.
33 |
34 | - Centralize control flow
35 | - Parent function controls flow by containing all switch and if statements, while maintaining state
36 | - Practice single assignment. Avoid reassigning or update a variable outside of true iterative calculations in loops.
37 | - Leaf functions should be purely functional with non-branching logic
38 | - In C/C++, making almost every variable `const` at initialization is good practice.
39 | - Don't exceed more than 3 levels of indentation. It can be reduced with:
40 | - Extraction - pull out part of the function into its own function
41 | - Inversion: flipping conditions and switching to an early return
42 | - Set fixed limits: Set explicit upper bounds on loops, queues, and other data structures. It prevents infinite loops and uncontrolled resource use, following the **fail-fast** principle. This approach helps catch issues early and keeps the system stable.
43 |
44 | ### Naming things
45 | Get the nouns and verbs right. Great names capture what something is or does and create a clear, intuitive model.
46 |
47 | - Use descriptive and meaningful names for variables, functions, and files.
48 | - Avoid abbreviations unless it is widely accepted and clear (e.g., ID, URL)
49 | - Append units or qualifiers to variable names, placing them in descending order of significance (e.g., "latency_ms_max" instead of "max_latency_ms")
50 | - Use comments to explain why decisions were made, not just what the code does.
51 | - Write comments as complete sentences with correct punctuation and grammar.
52 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ToolKami - Simple Agents Made Easy
2 |
3 | ToolKami is an open sourced ["simple" framework with conceptually clear, independant parts](https://www.youtube.com/watch?v=SxdOUGdseq4) that allows you to build and work seamlessly with AI agents. It comes with a **Command Line Interface** and curated **Tools**.
4 |
5 | [](https://toolkami.com)
6 |
7 | ## Command Line Interface (CLI)
8 |
9 | `toolkami` CLI is a modified version of
's CEO Tobias [try implementation](https://github.com/tobi/try). It extends the implementation with sandboxing capabilities and designed with [functional core, imperative shell](https://www.destroyallsoftware.com/talks/boundaries) in mind.
10 |
11 | ### Usage
12 |
13 | NOTE: `tk` an alias of `toolkami` is available too.
14 |
15 | Commands:
16 |
17 | * `toolkami init [PATH]`: Generate shell function
18 | * `toolkami cd [QUERY]`: Interactive selector
19 | * `toolkami wt [NAME]`: Create worktree from current repo
20 | * `merge`: Merge worktree changes back to parent repo
21 | * `drop`: Delete worktree and branch
22 | * `toolkami sb`: Run Docker sandbox from .toolkami/docker-compose.yml
23 | * `build [--no-cache]`: Build service image (pass Docker Compose flags like `--no-cache`)
24 | * `exec [CMD...]`: Exec into the sandbox container (defaults to interactive `bash`)
25 |
26 | It is designed to support multiple, concurrent agent workflows:
27 |
28 | 
29 |
30 | ### Installation
31 | ```bash
32 | curl -sL https://raw.githubusercontent.com/aperoc/toolkami/refs/heads/main/toolkami.rb > ~/.local/toolkami.rb
33 |
34 | # Make "try" executable so it can be run directly
35 | chmod +x ~/.local/toolkami.rb
36 |
37 | # Add to your shell (bash/zsh)
38 | echo >> ~/.zshrc # add new line
39 | echo 'eval "$(ruby ~/.local/toolkami.rb init)"' >> ~/.zshrc
40 | ```
41 |
42 | ## Framework
43 |
44 | ToolKami's framework let **deterministic tools** and **dynamic agents** to work together seamlessly. It is designed on the premise of simplicity, composability and extensibility that scales nicely with LLM's increasing capability.
45 |
46 | All the MCP servers can be [distributed as a single file binary, thanks to UV script](https://blog.toolkami.com/mcp-server-in-a-file/).
47 |
48 | I have elaborated on [default File and Shell tool in this blog post, along with what can be improved](https://blog.toolkami.com/openai-codex-tools/).
49 |
50 | 
51 |
52 | ### Installation
53 | ```bash
54 | # Install UV
55 |
56 | ## OSX/Linux
57 | curl -LsSf https://astral.sh/uv/install.sh | sh
58 | ## Windows
59 | powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
60 |
61 | # Start the MCP server
62 | `./servers/__main__.py`
63 | ```
64 |
65 | ## Use Cases
66 |
67 | ### Google's AlphaEvolve: ToolKami style
68 |
69 | A minimal implementation of AlphaEvolve using this framework with [detailed writeup](https://toolkami.com/alphaevolve-toolkami-style/) and [code](https://github.com/aperoc/toolkami/pull/5).
70 |
71 | 
72 | (Credits to https://deepmind.google/discover/blog/alphaevolve-a-gemini-powered-coding-agent-for-designing-advanced-algorithms/)
73 |
74 | ## Social
75 | - [Website](https://toolkami.com)
76 | - [Blog](https://blog.toolkami.com/blog/)
77 | - [Twitter](https://x.com/tool_kami)
78 | - [toolkami@aperoc.com](mailto:toolkami@aperoc.com)
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/clients/openai_client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S PYTHONPATH=. uv run --script
2 | # /// script
3 | # dependencies = [ "mcp[cli]", "openai", "httpx", "anyio", "prompt_toolkit", "jsonpickle"]
4 | # ///
5 |
6 | import asyncio
7 | import logging
8 | import os
9 | import sys
10 | import functools
11 | from typing import Optional, Callable, Awaitable, TypeVar, List, Dict, Any, Tuple
12 | from contextlib import AsyncExitStack
13 | import argparse
14 |
15 | import httpx
16 | import anyio
17 |
18 | # Import prompt_toolkit
19 | from prompt_toolkit import PromptSession, print_formatted_text
20 | from prompt_toolkit.patch_stdout import patch_stdout
21 | from prompt_toolkit.formatted_text import FormattedText
22 | from prompt_toolkit.styles import Style
23 |
24 | import json
25 | from openai import OpenAI
26 |
27 | from mcp import ClientSession
28 | from mcp.client.sse import sse_client
29 |
30 | from agent import Agent
31 | from dotenv import load_dotenv
32 |
33 | load_dotenv()
34 |
35 | # Define prompt_toolkit styles dictionary
36 | PROMPT_STYLE_DICT = {
37 | "prompt": "fg:yellow",
38 | "output.model": "fg:green",
39 | "output.tool": "fg:blue",
40 | "output.error": "fg:red",
41 | "output.warning": "fg:yellow",
42 | "output.debug": "fg:gray",
43 | }
44 |
45 | # Create Style object from the dictionary
46 | PROMPT_STYLE_OBJ = Style.from_dict(PROMPT_STYLE_DICT)
47 |
48 | # Custom logging handler integrating with prompt_toolkit
49 | class PromptToolkitLogHandler(logging.Handler):
50 | def emit(self, record: logging.LogRecord):
51 | try:
52 | log_entry = self.format(record)
53 | style_class = "output.debug" # Default
54 |
55 | # Check if it's a captured warning and matches the specific uv cache path
56 | if record.name == 'py.warnings' and record.levelno == logging.WARNING and '/root/.cache/uv/' in record.getMessage():
57 | style_class = "output.warning"
58 | elif record.levelno >= logging.ERROR:
59 | style_class = "output.error"
60 | elif record.levelno >= logging.WARNING:
61 | # Use the standard warning style (yellow) for other warnings
62 | style_class = "output.warning"
63 | elif record.levelno >= logging.INFO:
64 | # Use a less prominent style for INFO
65 | style_class = "output.debug"
66 |
67 | # Ensure we only print if there's actual content
68 | if log_entry.strip():
69 | print_pt(log_entry.strip(), style_class=style_class)
70 | except Exception:
71 | self.handleError(record)
72 |
73 | # Helper function to set up logging
74 | def setup_logging(debug: bool = False):
75 | # Capture warnings issued by the warnings module
76 | logging.captureWarnings(True)
77 |
78 | root_logger = logging.getLogger()
79 | # Remove default handlers like StreamHandler to avoid duplicate output
80 | # or output going to the original stderr
81 | for handler in root_logger.handlers[:]:
82 | root_logger.removeHandler(handler)
83 |
84 | # Add our custom handler
85 | pt_handler = PromptToolkitLogHandler()
86 | # Basic formatter, showing level, logger name, and message
87 | formatter = logging.Formatter('[%(levelname)s] %(name)s: %(message)s')
88 | pt_handler.setFormatter(formatter)
89 | root_logger.addHandler(pt_handler)
90 | # Set logging level based on debug flag
91 | root_logger.setLevel(logging.DEBUG if debug else logging.INFO)
92 | # Specifically set httpx/anyio levels if they are too noisy later
93 | # logging.getLogger("httpx").setLevel(logging.WARNING)
94 | # logging.getLogger("anyio").setLevel(logging.WARNING)
95 |
96 | # Helper to print formatted text using prompt_toolkit
97 | def print_pt(text: str, style_class: str = ""):
98 | if style_class:
99 | print_formatted_text(FormattedText([(f"class:{style_class}", text)]), style=PROMPT_STYLE_OBJ)
100 | else:
101 | # Print with default style if no class specified
102 | print_formatted_text(text)
103 |
104 | # Decorator for retryable async functions
105 | def retryable(max_retries=5, delay=1, connection_errors=(httpx.ReadError, httpx.WriteError,
106 | httpx.RemoteProtocolError, httpx.ConnectError,
107 | anyio.ClosedResourceError, ConnectionError)):
108 | """
109 | Decorator for making async functions automatically retry on connection errors.
110 |
111 | Args:
112 | max_retries: Maximum number of retry attempts
113 | delay: Delay between retries in seconds
114 | connection_errors: Tuple of exception types to catch and retry
115 |
116 | Returns:
117 | Decorated function that will retry on connection errors
118 | """
119 | def decorator(func):
120 | @functools.wraps(func)
121 | async def wrapper(self, *args, **kwargs):
122 | operation_name = func.__name__
123 | retries = 0
124 |
125 | while True:
126 | try:
127 | return await func(self, *args, **kwargs)
128 | except connection_errors as e:
129 | retries += 1
130 | if retries >= max_retries:
131 | error_message = f"{operation_name} failed after {retries} attempts: {e}"
132 | print_pt(error_message, "output.error")
133 | return error_message
134 |
135 | print_pt(f"Connection error during {operation_name}: {e}. Attempting reconnect... ({retries}/{max_retries})", "output.error")
136 | if hasattr(self, 'connect') and await self.connect():
137 | print_pt(f"Reconnected. Retrying {operation_name}...", "output.debug")
138 | else:
139 | error_message = f"Reconnect failed for {operation_name}"
140 | print_pt(error_message, "output.error")
141 | return error_message
142 |
143 | await asyncio.sleep(delay)
144 | except Exception as error:
145 | error_message = f"Error processing {operation_name}: {error}"
146 | print_pt(error_message, "output.error")
147 | return error_message
148 |
149 | return wrapper
150 | return decorator
151 |
152 | def truncate_text_both_ends(text: str, max_length: int = 250):
153 | if len(text) <= max_length:
154 | return text
155 | else:
156 | return text[:max_length//2] + "..." + text[-max_length//2:]
157 |
158 | class MCPClient:
159 | def __init__(self, server_url: str):
160 | self.exit_stack = AsyncExitStack() # Use one stack for the lifetime
161 | self._stop_event = asyncio.Event() # Event to signal shutdown
162 |
163 | self.server_url = server_url # Store the server URL for reconnection
164 | self._sse_stream_context = None
165 | self.sse_stream = None
166 |
167 | self._mcp_session_context = None
168 | self.mcp_session: Optional[ClientSession] = None
169 |
170 | self.prompt_session = PromptSession(history=None)
171 |
172 | if os.getenv("GOOGLE_VERTEX_PROJECT") and os.getenv("GOOGLE_VERTEX_LOCATION"):
173 | base_url = f"https://{os.getenv('GOOGLE_VERTEX_LOCATION')}-aiplatform.googleapis.com/v1beta1/projects/{os.getenv('GOOGLE_VERTEX_PROJECT')}/locations/{os.getenv('GOOGLE_VERTEX_LOCATION')}/endpoints/openapi"
174 | self.provider = OpenAI(
175 | base_url=base_url
176 | )
177 | elif os.getenv("GEMINI_API_KEY"):
178 | self.provider = OpenAI(
179 | api_key=os.getenv("GEMINI_API_KEY"),
180 | base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
181 | )
182 | elif os.getenv("ANTHROPIC_API_KEY"):
183 | self.provider = OpenAI(
184 | api_key=os.getenv("ANTHROPIC_API_KEY"),
185 | base_url="https://api.anthropic.com/v1/"
186 | )
187 | else:
188 | self.provider = OpenAI()
189 |
190 | self.agent = Agent()
191 | if len(self.agent.content_history) == 0:
192 | print_pt("[WARNING] Adding system instruction to content history...", "output.warning")
193 | self.agent.content_history.append({
194 | "role": "system",
195 | "content": self.agent.system_instruction
196 | })
197 |
198 | async def _connect_internal(self):
199 | """Internal logic to establish a connection."""
200 | await self.cleanup()
201 | self.exit_stack = AsyncExitStack()
202 |
203 | self._sse_stream_context = sse_client(url=self.server_url)
204 | self.sse_stream = await self.exit_stack.enter_async_context(self._sse_stream_context)
205 |
206 | self._mcp_session_context = ClientSession(*self.sse_stream)
207 | self.mcp_session: ClientSession = await self.exit_stack.enter_async_context(self._mcp_session_context)
208 |
209 | await self.mcp_session.initialize()
210 | print_pt(f"[DEBUG] Initialized SSE and MCP sessions...", "output.debug")
211 |
212 | @retryable(max_retries=5, delay=1)
213 | async def connect(self):
214 | """Attempts to connect to the server with retries."""
215 | try:
216 | await self._connect_internal()
217 | print_pt(f"[DEBUG] Successfully connected to server.", "output.debug")
218 | return True
219 | except Exception as e:
220 | print_pt(f"Connection error: {e}", "output.error")
221 | raise
222 |
223 | async def cleanup(self):
224 | """Properly clean up the session, streams, and background task."""
225 | print_pt(f"[DEBUG] Initiating client cleanup...", "output.debug")
226 | self._stop_event.set()
227 | await self.exit_stack.aclose()
228 | self._mcp_session_context = None
229 | self.mcp_session = None
230 | self._sse_stream_context = None
231 | self.sse_stream = None
232 | print_pt(f"[DEBUG] Client cleanup complete.", "output.debug")
233 |
234 | async def inlined_process_query_recursive(self, query: str):
235 | if query == "":
236 | print("No query provided.")
237 | return
238 |
239 | mcp_tools = await self.mcp_session.list_tools()
240 |
241 | tools = [
242 | {
243 | "type": "function",
244 | "function": {
245 | "name": tool.name,
246 | "description": tool.description,
247 | "parameters": {
248 | k: v
249 | for k, v in tool.inputSchema.items()
250 | if k not in ["additionalProperties", "$schema", "title"]
251 | }
252 | }
253 | }
254 | for tool in mcp_tools.tools
255 | ]
256 |
257 | self.agent.add_content(
258 | {
259 | "role": "user",
260 | "content": query
261 | }
262 | )
263 |
264 | while True:
265 | if logging.getLogger().isEnabledFor(logging.DEBUG):
266 | print_pt(f"[DEBUG] Content History: {self.agent.content_history}", "output.debug")
267 |
268 | try:
269 | response = self.provider.chat.completions.create(
270 | model=os.getenv("MAIN_MODEL"),
271 | messages=self.agent.content_history,
272 | temperature=0.1,
273 | tools=tools,
274 | )
275 | except Exception as e:
276 | print_pt(f"[ERROR] Error generating content: {e}", "output.error")
277 | print_pt(str(self.agent.content_history), "output.error")
278 |
279 | if e.error.code == 400:
280 | raise Exception("Token limit exceeded. Forgetting history?")
281 |
282 | raise
283 |
284 | if logging.getLogger().isEnabledFor(logging.DEBUG):
285 | print_pt(f"[DEBUG] Gemini response: {response}", "output.debug")
286 |
287 | print_pt(f"[WARNING] Token usage: {response.usage.total_tokens} / 1,047,576", "output.warning")
288 |
289 | candidate = response.choices[0]
290 |
291 | if not candidate.message:
292 | print_pt("[ERROR] No content received from OpenAI API", "output.error")
293 | print_pt(str(response), "output.error")
294 |
295 | self.agent.add_content(candidate.message)
296 | self.agent.save_history()
297 |
298 | if candidate.message.tool_calls:
299 | for tool_call in candidate.message.tool_calls:
300 | function_call = tool_call.function
301 |
302 | print_pt(f"[TOOL] Function call: {function_call.name}, args: {truncate_text_both_ends(str(function_call.arguments))}", "output.tool")
303 |
304 | tool_result = await self.mcp_session.call_tool(
305 | function_call.name,
306 | arguments=json.loads(function_call.arguments),
307 | )
308 | print_pt(f"[TOOL] Tool result: {truncate_text_both_ends(str(tool_result))}", "output.tool")
309 |
310 | self.agent.add_content(
311 | {
312 | "role": "tool",
313 | "tool_call_id": tool_call.id,
314 | "name": function_call.name,
315 | "content": tool_result.content
316 | }
317 | )
318 |
319 | if function_call.name == "ask":
320 | # get user input
321 | print(f"Model (clarification): {tool_result.content[0].text}")
322 | answer = await self.prompt_session.prompt_async(
323 | FormattedText([("class:prompt", "User (clarification): ")]),
324 | style=PROMPT_STYLE_OBJ
325 | )
326 |
327 | self.agent.add_content(
328 | {
329 | "role": "user",
330 | "content": answer
331 | }
332 | )
333 |
334 | else:
335 |
336 | # TODO: hack for pro-active tool calling
337 | self.agent.add_content(
338 | {
339 | "role": "user",
340 | "content": "Continue with the next needful action or if it starts to get repetitive, use the 'think' tool to figure out next action or how to make it better, or finally use the 'ask' tool to ask the user for input."
341 | }
342 | )
343 |
344 |
345 | async def chat_loop(self):
346 | """Run an interactive chat loop using prompt_toolkit"""
347 | prompt_session = PromptSession(history=None)
348 | print_pt(f"MCP Client Started! (Using prompt_toolkit)")
349 | print_pt(f"Type your queries or 'quit' to exit.")
350 |
351 | # await self.inlined_process_query_recursive("Re-confirm allowed directories with me and do nothing else.")
352 |
353 | while True:
354 | try:
355 | # Use prompt_async with the Style object
356 | query = await self.prompt_session.prompt_async(
357 | FormattedText([("class:prompt", "User: ")]),
358 | style=PROMPT_STYLE_OBJ
359 | )
360 | query = query.strip()
361 |
362 | if query.lower() == 'quit':
363 | break
364 |
365 | await self.inlined_process_query_recursive(query)
366 |
367 | except anyio.ClosedResourceError:
368 | print_pt(f"Connection closed. Attempting to reconnect...", "output.debug")
369 | await self.connect()
370 |
371 | await self.inlined_process_query_recursive(query)
372 |
373 | except (EOFError, KeyboardInterrupt):
374 | print_pt(f"Exiting client...", "output.debug")
375 | break
376 |
377 | async def main():
378 | # Parse command line arguments
379 | parser = argparse.ArgumentParser(description='MCP Client')
380 | parser.add_argument('server_url', help='URL of SSE MCP server (i.e. http://localhost:8080/sse)')
381 | parser.add_argument('--debug', action='store_true', help='Enable debug logging')
382 | args = parser.parse_args()
383 |
384 | # Setup logging as the very first thing
385 | setup_logging(debug=args.debug)
386 |
387 | server_url = args.server_url
388 | logging.info(f"MCP Client attempting to connect to: {server_url}")
389 | client = MCPClient(server_url=server_url)
390 | try:
391 | if not await client.connect():
392 | logging.error(f"Initial connection failed. Exiting.")
393 | # print_pt already happens within connect on failure
394 | sys.exit(1)
395 |
396 | # Use patch_stdout context manager
397 | with patch_stdout():
398 | await client.chat_loop()
399 |
400 | finally:
401 | logging.info("Initiating final client cleanup.")
402 | await client.cleanup()
403 |
404 |
405 | if __name__ == "__main__":
406 | try:
407 | asyncio.run(main())
408 | except Exception as e:
409 | # Catch any other unexpected exceptions escaping main
410 | print_pt(f"\nUnhandled exception occurred: {e}", "output.error")
411 | import traceback
412 | print_pt(traceback.format_exc(), "output.error") # Print stack trace too
413 |
--------------------------------------------------------------------------------
/toolkami.rb:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 |
3 | require 'io/console'
4 | require 'fileutils'
5 | require 'yaml'
6 | require 'shellwords'
7 |
8 | # Double-buffered UI module - adapted from try.rb
9 | module UI
10 | TOKEN_MAP = {
11 | '{text}' => "\e[39m",
12 | '{dim}' => "\e[90m",
13 | '{highlight}' => "\e[1;33m",
14 | '{reset}' => "\e[0m",
15 | }.freeze
16 |
17 | @@buffer = []
18 | @@last_buffer = []
19 | @@current_line = ""
20 |
21 | def self.print(text, io: STDERR)
22 | return if text.nil?
23 | @@current_line += text
24 | end
25 |
26 | def self.puts(text = "", io: STDERR)
27 | @@current_line += text
28 | @@buffer << @@current_line
29 | @@current_line = ""
30 | end
31 |
32 | def self.flush(io: STDERR)
33 | # Finalize current line into buffer
34 | unless @@current_line.empty?
35 | @@buffer << @@current_line
36 | @@current_line = ""
37 | end
38 |
39 | # In non-TTY contexts, print plain text without control codes
40 | unless io.tty?
41 | plain = @@buffer.join("\n").gsub(/\{.*?\}/, '')
42 | io.print(plain)
43 | io.print("\n") unless plain.end_with?("\n")
44 | @@last_buffer = []
45 | @@buffer.clear
46 | @@current_line = ""
47 | io.flush
48 | return
49 | end
50 |
51 | # Position cursor at home for TTY
52 | io.print("\e[H")
53 |
54 | max_lines = [@@buffer.length, @@last_buffer.length].max
55 | reset = TOKEN_MAP['{reset}']
56 |
57 | # Double buffering: only redraw changed lines
58 | (0...max_lines).each do |i|
59 | current_line = @@buffer[i] || ""
60 | last_line = @@last_buffer[i] || ""
61 |
62 | if current_line != last_line
63 | # Move to line and clear it, then write new content
64 | io.print("\e[#{i + 1};1H\e[2K")
65 | if !current_line.empty?
66 | processed_line = expand_tokens(current_line)
67 | io.print(processed_line)
68 | io.print(reset)
69 | end
70 | end
71 | end
72 |
73 | # Store current buffer as last buffer for next comparison
74 | @@last_buffer = @@buffer.dup
75 | @@buffer.clear
76 | @@current_line = ""
77 |
78 | io.flush
79 | end
80 |
81 | def self.cls(io: STDERR)
82 | @@current_line = ""
83 | @@buffer.clear
84 | @@last_buffer.clear
85 | io.print("\e[2J\e[H") # Clear screen and go home
86 | end
87 |
88 | def self.expand_tokens(str)
89 | str.gsub(/\{.*?\}/) { |match| TOKEN_MAP.fetch(match, '') }
90 | end
91 |
92 | def self.read_key
93 | input = STDIN.getc
94 | if input == "\e"
95 | input << STDIN.read_nonblock(3) rescue ""
96 | input << STDIN.read_nonblock(2) rescue ""
97 | end
98 | input
99 | end
100 | end
101 |
102 | class ToolkamiSelector
103 | DEFAULT_PATH = ENV['TOOLKAMI_PATH'] || File.expand_path("~/kamis")
104 |
105 | def initialize(search_term = "", base_path: DEFAULT_PATH)
106 | @search_term = search_term.strip
107 | @base_path = base_path
108 | @cursor = 0
109 |
110 | FileUtils.mkdir_p(@base_path) unless Dir.exist?(@base_path)
111 | end
112 |
113 | def self.is_git_uri?(arg)
114 | return false unless arg
115 | arg.match?(%r{^(https?://|git@)}) || arg.include?('github.com') || arg.include?('gitlab.com') || arg.end_with?('.git')
116 | end
117 |
118 | def self.parse_git_uri(uri)
119 | # Remove .git suffix if present
120 | uri = uri.sub(/\.git$/, '')
121 |
122 | # Handle different git URI formats
123 | if uri.match(%r{^https?://github\.com/([^/]+)/([^/]+)})
124 | # https://github.com/user/repo
125 | user, repo = $1, $2
126 | return { user: user, repo: repo, host: 'github.com' }
127 | elsif uri.match(%r{^git@github\.com:([^/]+)/([^/]+)})
128 | # git@github.com:user/repo
129 | user, repo = $1, $2
130 | return { user: user, repo: repo, host: 'github.com' }
131 | elsif uri.match(%r{^https?://([^/]+)/([^/]+)/([^/]+)})
132 | # https://gitlab.com/user/repo or other git hosts
133 | host, user, repo = $1, $2, $3
134 | return { user: user, repo: repo, host: host }
135 | elsif uri.match(%r{^git@([^:]+):([^/]+)/([^/]+)})
136 | # git@host:user/repo
137 | host, user, repo = $1, $2, $3
138 | return { user: user, repo: repo, host: host }
139 | else
140 | return nil
141 | end
142 | end
143 |
144 | def self.generate_clone_directory_name(git_uri, custom_name = nil)
145 | return custom_name if custom_name && !custom_name.empty?
146 |
147 | parsed = parse_git_uri(git_uri)
148 | return nil unless parsed
149 |
150 | date_prefix = Time.now.strftime("%Y-%m-%d")
151 | "#{date_prefix}-#{parsed[:user]}-#{parsed[:repo]}"
152 | end
153 |
154 | def run
155 | return nil unless STDIN.tty? && STDERR.tty?
156 |
157 | UI.cls # Clear screen once at start
158 | STDERR.raw do
159 | STDERR.print("\e[?25l") # Hide cursor
160 | loop do
161 | items = get_items
162 | render(items)
163 |
164 | key = UI.read_key
165 | case key
166 | when "\r" # Enter
167 | result = items[@cursor]
168 | STDERR.print("\e[?25h") # Show cursor
169 | UI.cls
170 | return result ? result[:path] : create_new
171 | when "\e[A", "\x10" # Up, Ctrl-P
172 | @cursor = [@cursor - 1, 0].max
173 | when "\e[B", "\x0E" # Down, Ctrl-N
174 | @cursor = [@cursor + 1, items.length].min
175 | when "\x7F" # Backspace
176 | @search_term = @search_term[0...-1]
177 | @cursor = 0
178 | when "\e", "\x03" # ESC, Ctrl-C
179 | STDERR.print("\e[?25h")
180 | UI.cls
181 | return nil
182 | when /^[a-zA-Z0-9\-_. ]$/
183 | @search_term += key
184 | @cursor = 0
185 | end
186 | end
187 | end
188 | ensure
189 | STDERR.print("\e[?25h")
190 | UI.cls
191 | end
192 |
193 | private
194 |
195 | def get_items
196 | dirs = Dir.entries(@base_path)
197 | .reject { |e| e.start_with?('.') }
198 | .select { |e| File.directory?(File.join(@base_path, e)) }
199 | .map { |name| { name: name, path: File.join(@base_path, name) } }
200 |
201 | return dirs if @search_term.empty?
202 |
203 | # Simple fuzzy filter
204 | query = @search_term.downcase
205 | dirs.select { |d| fuzzy_match?(d[:name].downcase, query) }
206 | end
207 |
208 | def fuzzy_match?(text, query)
209 | qi = 0
210 | text.each_char do |c|
211 | qi += 1 if qi < query.length && c == query[qi]
212 | end
213 | qi == query.length
214 | end
215 |
216 | def render(items)
217 | UI.puts "{highlight}Toolkami Selector{reset}"
218 | UI.puts "{dim}#{'─' * 40}{reset}"
219 | UI.puts "Search: #{@search_term}"
220 | UI.puts "{dim}#{'─' * 40}{reset}"
221 |
222 | items.each_with_index do |item, idx|
223 | prefix = idx == @cursor ? "→ " : " "
224 | UI.puts "#{prefix}#{item[:name]}"
225 | end
226 |
227 | # "Create new" option
228 | prefix = @cursor == items.length ? "→ " : " "
229 | UI.puts ""
230 | UI.puts "#{prefix}+ Create new: #{@search_term.empty? ? '(enter name)' : @search_term}"
231 |
232 | UI.flush # Flush double buffer
233 | end
234 |
235 | def create_new
236 | name = @search_term.empty? ? nil : @search_term
237 |
238 | unless name
239 | STDERR.print("\e[2J\e[H\e[?25h")
240 | UI.puts "Enter name:"
241 | input = nil
242 | STDERR.cooked { input = STDIN.gets }
243 | return nil if input.nil?
244 |
245 | name = input.chomp
246 | return nil if name.empty?
247 | end
248 |
249 | # Check if it's a git URI
250 | if ToolkamiSelector.is_git_uri?(name)
251 | dir_name = ToolkamiSelector.generate_clone_directory_name(name)
252 | return { type: :clone, uri: name, path: File.join(@base_path, dir_name) } if dir_name
253 | end
254 |
255 | # Add date prefix for new directories
256 | date_prefix = Time.now.strftime("%Y-%m-%d")
257 | sanitized_name = name.strip
258 | .gsub(/[\/\\]/, '-') # prevent path traversal
259 | .gsub(/[^a-zA-Z0-9_-]/, '-')
260 | .gsub(/-+/, '-')
261 | .gsub(/\A-+|-+\z/, '')
262 | sanitized_name = 'project' if sanitized_name.empty?
263 | File.join(@base_path, "#{date_prefix}-#{sanitized_name}")
264 | end
265 | end
266 |
267 | class ConfigSelector
268 | def initialize(base_path:)
269 | @base_path = base_path
270 | @config_dir = File.join(base_path, '.configs')
271 | @cursor = 0
272 | end
273 |
274 | def run
275 | return nil unless STDIN.tty? && STDERR.tty?
276 |
277 | # Return nil early if no configs directory exists
278 | return nil unless Dir.exist?(@config_dir)
279 |
280 | configs = get_configs
281 | return nil if configs.empty?
282 |
283 | UI.cls # Clear screen once at start
284 | STDERR.raw do
285 | STDERR.print("\e[?25l") # Hide cursor
286 | loop do
287 | render(configs)
288 |
289 | key = UI.read_key
290 | case key
291 | when "\r" # Enter
292 | STDERR.print("\e[?25h")
293 | UI.cls
294 | return @cursor == 0 ? nil : configs[@cursor - 1]
295 | when "\e[A", "\x10" # Up, Ctrl-P
296 | @cursor = [@cursor - 1, 0].max
297 | when "\e[B", "\x0E" # Down, Ctrl-N
298 | @cursor = [@cursor + 1, configs.length].min
299 | when "\e", "\x03" # ESC, Ctrl-C
300 | STDERR.print("\e[?25h")
301 | UI.cls
302 | return nil
303 | end
304 | end
305 | end
306 | ensure
307 | STDERR.print("\e[?25h")
308 | UI.cls
309 | end
310 |
311 | private
312 |
313 | def get_configs
314 | return [] unless Dir.exist?(@config_dir)
315 |
316 | Dir.entries(@config_dir)
317 | .reject { |e| e.start_with?('.') }
318 | .select { |name|
319 | path = File.join(@config_dir, name)
320 | File.directory?(path) &&
321 | File.exist?(File.join(path, 'Dockerfile')) &&
322 | File.exist?(File.join(path, 'docker-compose.yml'))
323 | }
324 | .sort
325 | end
326 |
327 | def render(configs)
328 | UI.puts "{highlight}Select Config{reset}"
329 | UI.puts "{dim}#{'─' * 40}{reset}"
330 |
331 | # "Skip" option at index 0
332 | prefix = @cursor == 0 ? "→ " : " "
333 | UI.puts "#{prefix}{dim}Skip (no config){reset}"
334 | UI.puts ""
335 |
336 | # Config options start at index 1
337 | configs.each_with_index do |name, idx|
338 | prefix = @cursor == (idx + 1) ? "→ " : " "
339 | UI.puts "#{prefix}#{name}"
340 | end
341 |
342 | UI.puts "{dim}#{'─' * 40}{reset}"
343 | UI.puts "{dim}↑↓: Navigate Enter: Select ESC: Cancel{reset}"
344 |
345 | UI.flush # Flush double buffer
346 | end
347 | end
348 |
349 | # CLI Entry Point
350 | if __FILE__ == $0
351 | def print_help
352 | puts <<~HELP
353 | Toolkami - Minimal directory selector
354 |
355 | Usage:
356 | toolkami.rb init [PATH] # Generate shell function
357 | toolkami.rb cd [QUERY] # Interactive selector
358 | toolkami.rb wt [NAME] # Create worktree from current repo
359 | toolkami.rb wt merge # Merge worktree changes back to parent repo
360 | toolkami.rb wt drop # Delete worktree and branch
361 | toolkami.rb sb # Run Docker sandbox from .toolkami/docker-compose.yml
362 | toolkami.rb sb exec [CMD...] # Exec into running sandbox container (default: bash)
363 | toolkami.rb sb build [--no-cache] # Build sandbox image (with optional compose flags)
364 |
365 | Config Selection:
366 | Place configs in $TOOLKAMI_PATH/.configs/
367 | Each config dir should contain:
368 | - Dockerfile (required)
369 | - docker-compose.yml (required)
370 | - config.toml, settings.json, etc. (optional)
371 | - Other files (copied to .toolkami/ in worktree)
372 |
373 | Environment:
374 | TOOLKAMI_PATH - Root directory (default: ~/kamis)
375 | HELP
376 | end
377 |
378 | def sanitize_service_name(name)
379 | name.downcase.gsub(/[^a-z0-9_.-]/, '-').gsub(/-+/, '-').sub(/^-+/, '').sub(/-+$/, '')
380 | end
381 |
382 | def determine_service_name(compose_path)
383 | data = YAML.safe_load(File.read(compose_path))
384 | if data.is_a?(Hash) && data['services'].is_a?(Hash) && !data['services'].empty?
385 | data['services'].keys.first.to_s
386 | else
387 | sanitize_service_name(File.basename(Dir.pwd))
388 | end
389 | rescue
390 | sanitize_service_name(File.basename(Dir.pwd))
391 | end
392 |
393 | def ensure_docker_available!
394 | return if system('command -v docker >/dev/null 2>&1')
395 |
396 | STDERR.puts "Error: Docker is not installed or not in PATH"
397 | STDERR.puts "Install Docker from https://docs.docker.com/get-docker/"
398 | exit 1
399 | end
400 |
401 | command = ARGV.shift
402 |
403 | case command
404 | when 'init'
405 | path = ARGV.shift || ToolkamiSelector::DEFAULT_PATH
406 | script_path = File.expand_path($0)
407 |
408 | # Create directory structure
409 | FileUtils.mkdir_p(path)
410 | configs_dir = File.join(path, '.configs')
411 | FileUtils.mkdir_p(configs_dir)
412 |
413 | # Create example codex config if it doesn't exist
414 | codex_config_dir = File.join(configs_dir, 'codex')
415 | unless Dir.exist?(codex_config_dir)
416 | FileUtils.mkdir_p(codex_config_dir)
417 |
418 | # Create example Dockerfile
419 | dockerfile_path = File.join(codex_config_dir, 'Dockerfile')
420 | File.write(dockerfile_path, <<~DOCKERFILE)
421 | FROM node:22
422 | WORKDIR /workspace
423 |
424 | RUN npm install -g @openai/codex
425 |
426 | # Install UV
427 | RUN curl -fsSL https://astral.sh/uv/install.sh | sh
428 | ENV PATH="/root/.local/bin:$PATH"
429 |
430 | # Install Go
431 | RUN wget \
432 | https://go.dev/dl/go1.25.3.linux-arm64.tar.gz && \
433 | tar -C /usr/local -xzf go1.25.3.linux-arm64.tar.gz && \
434 | rm -rf go1.25.3.linux-arm64.tar.gz
435 | ENV PATH="/usr/local/go/bin:$PATH"
436 | ENV PATH="/root/go/bin:$PATH"
437 |
438 | RUN curl -fsSL https://raw.githubusercontent.com/steveyegge/beads/main/install.sh | bash
439 | RUN uv tool install beads-mcp
440 |
441 | CMD ["/bin/bash"]
442 | DOCKERFILE
443 |
444 | # Create example config.toml
445 | config_toml_path = File.join(codex_config_dir, 'config.toml')
446 | File.write(config_toml_path, <<~TOML)
447 | model = "gpt-5-codex"
448 | approval_policy = "never"
449 | sandbox_mode = "danger-full-access"
450 | sandbox_workspace_write.network_access = true
451 |
452 | [shell_environment_policy]
453 | ignore_default_excludes = true
454 |
455 | [mcp_servers.beads]
456 | command = "beads-mcp"
457 |
458 | [tools]
459 | web_search = true
460 |
461 | [projects."/workspace"]
462 | trust_level = "trusted"
463 | TOML
464 |
465 | # Create example docker-compose.yml
466 | compose_path = File.join(codex_config_dir, 'docker-compose.yml')
467 | File.write(compose_path, <<~COMPOSE)
468 | services:
469 | toolkami:
470 | build:
471 | context: .
472 | dockerfile: Dockerfile
473 | volumes:
474 | - ..:/workspace
475 | - ./config.toml:/root/.codex/config.toml:ro
476 | network_mode: host
477 | working_dir: /workspace
478 | stdin_open: true
479 | tty: true
480 | command: ["/bin/bash"]
481 | COMPOSE
482 |
483 | STDERR.puts "✓ Created example config at #{codex_config_dir}"
484 | end
485 |
486 | # Simple bash/zsh wrapper
487 | puts <<~SHELL
488 | toolkami() {
489 | script_path='#{script_path}'
490 | case "$1" in
491 | wt|init|sb|worktree|merge|drop)
492 | cmd=$(/usr/bin/env ruby "$script_path" "$@" 2>/dev/tty)
493 | ;;
494 | *)
495 | cmd=$(/usr/bin/env ruby "$script_path" cd "$@" 2>/dev/tty)
496 | ;;
497 | esac
498 | [ $? -eq 0 ] && [ -n "$cmd" ] && eval "$cmd"
499 | }
500 |
501 | tk() {
502 | toolkami "$@"
503 | }
504 | SHELL
505 |
506 | when 'cd', nil
507 | search = ARGV.join(' ')
508 | selector = ToolkamiSelector.new(search)
509 | result = selector.run
510 |
511 | if result
512 | # Handle different result types
513 | if result.is_a?(Hash) && result[:type] == :clone
514 | # Git clone workflow
515 | quoted_path = "'" + result[:path].gsub("'", %q('"'"')) + "'"
516 | quoted_uri = "'" + result[:uri].gsub("'", %q('"'"')) + "'"
517 | puts "mkdir -p #{quoted_path} && git clone #{quoted_uri} #{quoted_path} && cd #{quoted_path}"
518 | else
519 | # Regular directory creation
520 | quoted = "'" + result.gsub("'", %q('"'"')) + "'"
521 | puts "mkdir -p #{quoted} && cd #{quoted}"
522 | end
523 | end
524 |
525 | when 'wt'
526 | subcommand = ARGV.first
527 |
528 | case subcommand
529 | when 'merge'
530 | ARGV.shift
531 |
532 | # Check if we're in a git worktree
533 | git_common_dir = `git rev-parse --git-common-dir 2>/dev/null`.strip
534 | if $?.exitstatus != 0
535 | STDERR.puts "Error: Not in a git repository"
536 | exit 1
537 | end
538 |
539 | git_dir = `git rev-parse --git-dir 2>/dev/null`.strip
540 | if git_common_dir == git_dir || git_common_dir == '.git'
541 | STDERR.puts "Error: Not in a worktree (you're in the main repository)"
542 | exit 1
543 | end
544 |
545 | # Check for uncommitted changes
546 | status_output = `git status --porcelain 2>/dev/null`.strip
547 | if !status_output.empty?
548 | STDERR.puts "Error: You have uncommitted changes. Please commit or stash them first."
549 | STDERR.puts ""
550 | STDERR.puts "Uncommitted changes:"
551 | STDERR.puts status_output
552 | exit 1
553 | end
554 |
555 | # Get current commit SHA
556 | commit_sha = `git rev-parse HEAD 2>/dev/null`.strip
557 | if commit_sha.empty?
558 | STDERR.puts "Error: Could not determine current commit"
559 | exit 1
560 | end
561 |
562 | # Get parent repo location
563 | parent_repo = File.dirname(git_common_dir)
564 |
565 | # Get current worktree path for cleanup
566 | worktree_path = Dir.pwd
567 | quoted_worktree = "'" + worktree_path.gsub("'", %q('"'"')) + "'"
568 | quoted_parent = "'" + parent_repo.gsub("'", %q('"'"')) + "'"
569 |
570 | # Get branch name for merge
571 | branch_name = `git rev-parse --abbrev-ref HEAD 2>/dev/null`.strip
572 |
573 | # Emit shell commands: cd to parent and squash-merge the branch, then return to worktree
574 | if branch_name.empty? || branch_name == "HEAD"
575 | # Fallback to cherry-pick for detached HEAD
576 | puts "cd #{quoted_parent} && git cherry-pick #{commit_sha} && cd #{quoted_worktree}"
577 | else
578 | puts "cd #{quoted_parent} && git merge --squash #{branch_name} && git commit -m 'squash: merge #{branch_name}' && cd #{quoted_worktree}"
579 | end
580 |
581 | when 'drop'
582 | ARGV.shift
583 |
584 | # Check if we're in a git worktree
585 | git_common_dir = `git rev-parse --git-common-dir 2>/dev/null`.strip
586 | if $?.exitstatus != 0
587 | STDERR.puts "Error: Not in a git repository"
588 | exit 1
589 | end
590 |
591 | git_dir = `git rev-parse --git-dir 2>/dev/null`.strip
592 | if git_common_dir == git_dir || git_common_dir == '.git'
593 | STDERR.puts "Error: Not in a worktree (you're in the main repository)"
594 | exit 1
595 | end
596 |
597 | # Get parent repo location
598 | parent_repo = File.dirname(git_common_dir)
599 |
600 | # Get current worktree path for cleanup
601 | worktree_path = Dir.pwd
602 | quoted_worktree = "'" + worktree_path.gsub("'", %q('"'"')) + "'"
603 | quoted_parent = "'" + parent_repo.gsub("'", %q('"'"')) + "'"
604 |
605 | # Emit shell commands: cd to parent, remove worktree and delete branch
606 | branch_name = `git rev-parse --abbrev-ref HEAD 2>/dev/null`.strip
607 | if branch_name.empty? || branch_name == "HEAD"
608 | # Just remove worktree for detached HEAD
609 | puts "cd #{quoted_parent} && git worktree remove --force #{quoted_worktree}"
610 | else
611 | # Remove worktree and delete branch
612 | puts "cd #{quoted_parent} && git worktree remove --force #{quoted_worktree} && git branch -D #{branch_name}"
613 | end
614 |
615 | else
616 | # Get custom name from args or use repo name
617 | custom_name = ARGV.join(' ')
618 |
619 | # Get current directory base name
620 | base = if custom_name && !custom_name.strip.empty?
621 | custom_name.gsub(/\s+/, '-')
622 | else
623 | File.basename(Dir.pwd)
624 | end
625 |
626 | # Add date prefix
627 | date_prefix = Time.now.strftime("%Y-%m-%d")
628 | dir_name = "#{date_prefix}-#{base}"
629 | full_path = File.join(ToolkamiSelector::DEFAULT_PATH, dir_name)
630 |
631 | # Check if we're in a git repo
632 | if File.directory?(File.join(Dir.pwd, '.git'))
633 | # Check for config selection
634 | config_selector = ConfigSelector.new(base_path: ToolkamiSelector::DEFAULT_PATH)
635 | selected_config = config_selector.run
636 |
637 | # Build shell command parts
638 | quoted_path = "'" + full_path.gsub("'", %q('"'"')) + "'"
639 | branch_name = "feature/#{dir_name}"
640 | commands = [
641 | "mkdir -p #{quoted_path}",
642 | "git worktree add -b #{branch_name} #{quoted_path}"
643 | ]
644 |
645 | # Add config copy if selected
646 | if selected_config
647 | # Add confirmation message to shell output
648 | commands << "echo '✓ Config: #{selected_config} → .toolkami/' >&2"
649 |
650 | config_src = File.join(ToolkamiSelector::DEFAULT_PATH, '.configs', selected_config)
651 | quoted_src = "'" + config_src.gsub("'", %q('"'"')) + "'"
652 | toolkami_dir = File.join(full_path, '.toolkami')
653 | quoted_toolkami = "'" + toolkami_dir.gsub("'", %q('"'"')) + "'"
654 |
655 | commands << "mkdir -p #{quoted_toolkami}"
656 | commands << "cp -r #{quoted_src}/. #{quoted_toolkami}/"
657 |
658 | # After copying, rename the first docker compose service to the directory name (sanitized)
659 | compose_dest = File.join(full_path, '.toolkami', 'docker-compose.yml')
660 | quoted_compose = "'" + compose_dest.gsub("'", %q('"'"')) + "'"
661 | service_name = dir_name.downcase.gsub(/[^a-z0-9_.-]/, '-').gsub(/-+/, '-').sub(/^-+/, '').sub(/-+$/, '')
662 | quoted_service = "'" + service_name.gsub("'", %q('"'"')) + "'"
663 | ruby_edit = %q{p=ARGV[0]; s=ARGV[1]; l=File.readlines(p); i=l.index{|x| x =~ /^\s*services\s*:/}; if i; j=i+1; while j "toolkami wt [NAME]",
678 | 'merge' => "toolkami wt merge",
679 | 'drop' => "toolkami wt drop"
680 | }
681 | STDERR.puts "Error: Command '#{command}' has been renamed. Use `#{suggestions[command]}` instead."
682 | exit 1
683 |
684 | when 'sb'
685 | # Check for .toolkami/docker-compose.yml
686 | compose_path = File.join(Dir.pwd, '.toolkami', 'docker-compose.yml')
687 | unless File.exist?(compose_path)
688 | STDERR.puts "Error: No .toolkami/docker-compose.yml found in current directory"
689 | STDERR.puts "Run 'toolkami wt' from a git repo and select a config to create a sandbox."
690 | exit 1
691 | end
692 |
693 | # Check if Docker is available
694 | ensure_docker_available!
695 |
696 | # Quote current directory for shell command
697 | quoted_pwd = "'" + Dir.pwd.gsub("'", %q('"'"')) + "'"
698 |
699 | # Optional subcommand: build, exec
700 | subcommand = ARGV.shift
701 |
702 | # Determine service name from compose file, fallback to sanitized directory name
703 | service_name = determine_service_name(compose_path)
704 |
705 | case subcommand
706 | when 'build'
707 | build_options = ARGV.map { |arg| Shellwords.escape(arg) }
708 | options_segment = build_options.empty? ? "" : "#{build_options.join(' ')} "
709 | puts "cd #{quoted_pwd} && docker compose -f .toolkami/docker-compose.yml build #{options_segment}#{service_name}"
710 | when 'exec'
711 | exec_args = ARGV.empty? ? ['bash'] : ARGV
712 | exec_command = exec_args.map { |arg| Shellwords.escape(arg) }.join(' ')
713 | puts "cd #{quoted_pwd} && docker compose -f .toolkami/docker-compose.yml exec -it #{service_name} #{exec_command}".strip
714 | else
715 | puts "cd #{quoted_pwd} && docker compose -f .toolkami/docker-compose.yml run --rm #{service_name}"
716 | end
717 |
718 | when '--help', '-h'
719 | print_help
720 |
721 | else
722 | STDERR.puts "Unknown command: #{command}"
723 | print_help
724 | exit 1
725 | end
726 | end
727 |
--------------------------------------------------------------------------------