├── .cursorrule ├── .github └── workflows │ ├── release.yml │ └── test.yml ├── .gitignore ├── .python-version ├── .repomixignore ├── CONFIG.md ├── LICENSE ├── README.md ├── c4_diagram.png ├── c4_diagram.puml ├── mcp-server-config-example.json ├── pyproject.toml ├── repomix.config.json ├── src └── mcp_client_cli │ ├── __init__.py │ ├── cli.py │ ├── config.py │ ├── const.py │ ├── input.py │ ├── memory.py │ ├── output.py │ ├── prompt.py │ ├── storage.py │ └── tool.py └── uv.lock /.cursorrule: -------------------------------------------------------------------------------- 1 | You are an expert in Python development, with strong experience in LangChain, FastAPI, and LLM integration. This project is a Python-based LLM client that implements the Model Context Protocol (MCP). 2 | 3 | Core Technologies 4 | - Python 3.12+ 5 | - LangChain for LLM orchestration 6 | - Model Context Protocol (MCP) for tool integration 7 | - Async/await patterns for I/O operations 8 | - Environment-based configuration management 9 | 10 | Development Principles 11 | - Write clean, type-annotated Python code using modern Python features 12 | - Implement proper error handling and logging for LLM interactions 13 | - Use async/await patterns for efficient I/O operations 14 | - Maintain backward compatibility with existing MCP tools 15 | - Follow proper caching strategies for improved performance 16 | 17 | Code Organization 18 | - Separate concerns between tool management, LLM interaction, and CLI interface 19 | - Use proper type hints and Pydantic models for data validation 20 | - Implement efficient caching mechanisms for tool configurations 21 | - Follow modular design patterns for easy testing and maintenance 22 | 23 | Best Practices 24 | 1. Always use type hints and validate input/output with Pydantic models 25 | 2. Implement proper error handling for LLM and tool interactions 26 | 3. Cache expensive operations appropriately 27 | 4. Write clear docstrings and maintain API documentation 28 | 5. Follow async best practices for I/O operations 29 | 30 | Testing and Quality 31 | - Write unit tests for core functionality 32 | - Test edge cases in tool interactions 33 | - Validate LLM responses and error handling 34 | - Ensure proper cache invalidation 35 | - Monitor performance and resource usage 36 | 37 | Documentation 38 | - Maintain clear API documentation 39 | - Document configuration requirements 40 | - Provide examples for common use cases 41 | - Keep README up-to-date with setup instructions 42 | - When proposing an edit to a markdown file, first decide if there will be code snippets in the markdown file. 43 | - If there are no code snippets, wrap the beginning and end of your answer in backticks and markdown as the language. 44 | - If there are code snippets, indent the code snippets with two spaces and the correct language for proper rendering. Indentations level 0 and 4 is not allowed. 45 | - If a markdown code block is indented with any value other than 2 spaces, automatically fix it 46 | 47 | Environment and Dependencies 48 | - Use virtual environments for development 49 | - Pin dependency versions in pyproject.toml 50 | - Document required API keys and environment variables 51 | - Handle sensitive data through proper environment configuration 52 | 53 | Refer to LangChain, MCP, and Python documentation for best practices and implementation patterns. 54 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release to PyPI 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths-ignore: 8 | - '**.md' 9 | - '.gitignore' 10 | 11 | jobs: 12 | release: 13 | if: contains(github.event.head_commit.message, 'release') 14 | runs-on: ubuntu-latest 15 | permissions: 16 | id-token: write 17 | contents: write # Required for creating GitHub releases 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Set up Python 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: "3.12" 26 | 27 | - name: Install uv 28 | run: | 29 | curl -LsSf https://astral.sh/uv/install.sh | sh 30 | uv venv 31 | source .venv/bin/activate 32 | 33 | - name: Install build dependencies 34 | run: | 35 | source .venv/bin/activate 36 | uv pip install build twine 37 | 38 | - name: Build package 39 | run: | 40 | source .venv/bin/activate 41 | python -m build 42 | 43 | - name: Get version 44 | id: get_version 45 | run: | 46 | source .venv/bin/activate 47 | uv pip install tomli 48 | echo "VERSION=$(python -c 'import tomli; print(tomli.load(open("pyproject.toml", "rb"))["project"]["version"])')" >> $GITHUB_OUTPUT 49 | 50 | - name: Publish package to PyPI 51 | uses: pypa/gh-action-pypi-publish@release/v1 52 | 53 | - name: Create GitHub Release 54 | uses: softprops/action-gh-release@v1 55 | with: 56 | tag_name: v${{ steps.get_version.outputs.VERSION }} 57 | name: Release v${{ steps.get_version.outputs.VERSION }} 58 | generate_release_notes: true -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test Installation 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | pull_request: 9 | branches: 10 | - main 11 | - master 12 | 13 | jobs: 14 | test-installation: 15 | name: Test on ${{ matrix.os }} 16 | runs-on: ${{ matrix.os }} 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | os: [ubuntu-latest, windows-latest, macos-latest] 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: "3.12" 29 | 30 | - name: Install uv (Linux) 31 | if: runner.os == 'Linux' 32 | run: | 33 | curl -LsSf https://astral.sh/uv/install.sh | sh 34 | echo "$HOME/.cargo/bin" >> $GITHUB_PATH 35 | 36 | - name: Install uv (Windows) 37 | if: runner.os == 'Windows' 38 | run: | 39 | iwr -useb https://astral.sh/uv/install.ps1 | iex 40 | echo "$HOME\.cargo\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 41 | 42 | - name: Install uv (macOS) 43 | if: runner.os == 'macOS' 44 | run: | 45 | curl -LsSf https://astral.sh/uv/install.sh | sh 46 | echo "$HOME/.cargo/bin" >> $GITHUB_PATH 47 | 48 | - name: Create virtual environment (Linux) 49 | if: runner.os == 'Linux' 50 | run: | 51 | uv venv 52 | echo "$PWD/.venv/bin" >> $GITHUB_PATH 53 | 54 | - name: Create virtual environment (Windows) 55 | if: runner.os == 'Windows' 56 | run: | 57 | uv venv 58 | echo "$PWD\.venv\Scripts" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 59 | 60 | - name: Create virtual environment (macOS) 61 | if: runner.os == 'macOS' 62 | run: | 63 | uv venv 64 | echo "$PWD/.venv/bin" >> $GITHUB_PATH 65 | 66 | - name: Install package 67 | run: | 68 | uv pip install -e . 69 | 70 | - name: Verify CLI works (Linux) 71 | if: runner.os == 'Linux' 72 | run: | 73 | # Run the help command and capture output 74 | output=$(llm --help) 75 | 76 | # Check if the output contains expected help text 77 | if [[ "$output" == *"Run LangChain agent with MCP tools"* ]]; then 78 | echo "CLI help command works correctly" 79 | else 80 | echo "CLI help command failed to produce expected output" 81 | echo "Actual output:" 82 | echo "$output" 83 | exit 1 84 | fi 85 | 86 | - name: Verify CLI works (Windows) 87 | if: runner.os == 'Windows' 88 | shell: pwsh 89 | run: | 90 | # Run the help command and capture output 91 | $output = llm --help 92 | 93 | # Check if the output contains expected help text 94 | if ($output -match "Run LangChain agent with MCP tools") { 95 | Write-Host "CLI help command works correctly" 96 | } else { 97 | Write-Host "CLI help command failed to produce expected output" 98 | Write-Host "Actual output:" 99 | Write-Host $output 100 | exit 1 101 | } 102 | 103 | - name: Verify CLI works (macOS) 104 | if: runner.os == 'macOS' 105 | run: | 106 | # Run the help command and capture output 107 | output=$(llm --help) 108 | 109 | # Check if the output contains expected help text 110 | if [[ "$output" == *"Run LangChain agent with MCP tools"* ]]; then 111 | echo "CLI help command works correctly" 112 | else 113 | echo "CLI help command failed to produce expected output" 114 | echo "Actual output:" 115 | echo "$output" 116 | exit 1 117 | fi 118 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | 9 | # Virtual environments 10 | .venv 11 | .env 12 | 13 | mcp-server-config.json 14 | 15 | .DS_Store 16 | repomix-output.md 17 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /.repomixignore: -------------------------------------------------------------------------------- 1 | # Add patterns to ignore here, one per line 2 | # Example: 3 | # *.log 4 | # tmp/ 5 | uv.lock 6 | LICENSE 7 | .cursorrule -------------------------------------------------------------------------------- /CONFIG.md: -------------------------------------------------------------------------------- 1 | # MCP Client CLI Configuration 2 | 3 | This document describes the configuration format for the MCP Client CLI. The configuration file uses JSON format (with support for comments via the `commentjson` library). 4 | 5 | ## Configuration File Location 6 | 7 | The configuration file can be placed in either: 8 | - `~/.llm/config.json` (user's home directory) 9 | - `mcp-server-config.json` (in the current working directory) 10 | 11 | ## Configuration Structure 12 | 13 | ```json 14 | { 15 | "systemPrompt": "string", 16 | "llm": { 17 | "provider": "string", 18 | "model": "string", 19 | "api_key": "string", 20 | "temperature": float, 21 | "base_url": "string" 22 | }, 23 | "mcpServers": { 24 | "server_name": { 25 | "command": "string", 26 | "args": ["string"], 27 | "env": { 28 | "ENV_VAR_NAME": "value" 29 | }, 30 | "enabled": boolean, 31 | "exclude_tools": ["string"], 32 | "requires_confirmation": ["string"] 33 | } 34 | } 35 | } 36 | ``` 37 | 38 | ## Field Specifications 39 | 40 | ### Top-Level Fields 41 | 42 | | Field | Type | Required | Description | 43 | |-------|------|----------|-------------| 44 | | `systemPrompt` | string | Yes | System prompt for the LLM | 45 | | `llm` | object | No | LLM configuration | 46 | | `mcpServers` | object | Yes | Dictionary of MCP server configurations | 47 | 48 | ### LLM Configuration 49 | 50 | | Field | Type | Required | Default | Description | 51 | |-------|------|----------|---------|-------------| 52 | | `provider` | string | No | `"openai"` | LLM provider | 53 | | `model` | string | No | `"gpt-4o"` | LLM model name | 54 | | `api_key` | string | No | Environment vars | API key for the LLM service | 55 | | `temperature` | float | No | `0` | Temperature for LLM responses | 56 | | `base_url` | string | No | `null` | Custom API endpoint URL | 57 | 58 | **Notes:** 59 | - The `api_key` can be omitted if it's set via environment variables `LLM_API_KEY` or `OPENAI_API_KEY` 60 | 61 | ### MCP Server Configuration 62 | 63 | | Field | Type | Required | Default | Description | 64 | |-------|------|----------|---------|-------------| 65 | | `command` | string | Yes | - | Command to run the server | 66 | | `args` | array | No | `[]` | Command-line arguments | 67 | | `env` | object | No | `{}` | Environment variables | 68 | | `enabled` | boolean | No | `true` | Whether the server is enabled | 69 | | `exclude_tools` | array | No | `[]` | Tool names to exclude | 70 | | `requires_confirmation` | array | No | `[]` | Tools requiring user confirmation | 71 | 72 | ## Example Configuration 73 | 74 | ```json 75 | { 76 | "systemPrompt": "You are an AI assistant helping a software engineer...", 77 | "llm": { 78 | "provider": "openai", 79 | "model": "gpt-4o-mini", 80 | "api_key": "your-api-key-here", 81 | "temperature": 0 82 | }, 83 | "mcpServers": { 84 | "fetch": { 85 | "command": "uvx", 86 | "args": ["mcp-server-fetch"] 87 | }, 88 | "brave-search": { 89 | "command": "npx", 90 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 91 | "env": { 92 | "BRAVE_API_KEY": "your-brave-api-key-here" 93 | } 94 | }, 95 | "mcp-server-commands": { 96 | "command": "npx", 97 | "args": ["mcp-server-commands"], 98 | "requires_confirmation": [ 99 | "run_command", 100 | "run_script" 101 | ] 102 | } 103 | } 104 | } 105 | ``` 106 | 107 | ## Comments in Configuration 108 | 109 | The configuration file supports comments with `//` syntax: 110 | 111 | ```json 112 | { 113 | "systemPrompt": "You are an AI assistant helping a software engineer...", 114 | // Uncomment this section to use Anthropic Claude 115 | // "llm": { 116 | // "provider": "anthropic", 117 | // "model": "claude-3-opus-20240229", 118 | // "api_key": "your-anthropic-api-key" 119 | // }, 120 | "llm": { 121 | "provider": "openai", 122 | "model": "gpt-4o", 123 | "api_key": "your-openai-api-key" 124 | } 125 | } 126 | ``` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Adhika Setya Pramudita 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP CLI client 2 | 3 | A simple CLI program to run LLM prompt and implement [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) client. 4 | 5 | You can use any [MCP-compatible servers](https://github.com/punkpeye/awesome-mcp-servers) from the convenience of your terminal. 6 | 7 | This act as alternative client beside Claude Desktop. Additionally you can use any LLM provider like OpenAI, Groq, or local LLM model via [llama](https://github.com/ggerganov/llama.cpp). 8 | 9 | ![C4 Diagram](https://raw.githubusercontent.com/adhikasp/mcp-client-cli/refs/heads/master/c4_diagram.png) 10 | 11 | ## Setup 12 | 13 | 1. Install via pip: 14 | ```bash 15 | pip install mcp-client-cli 16 | ``` 17 | 18 | 2. Create a `~/.llm/config.json` file to configure your LLM and MCP servers: 19 | ```json 20 | { 21 | "systemPrompt": "You are an AI assistant helping a software engineer...", 22 | "llm": { 23 | "provider": "openai", 24 | "model": "gpt-4", 25 | "api_key": "your-openai-api-key", 26 | "temperature": 0.7, 27 | "base_url": "https://api.openai.com/v1" // Optional, for OpenRouter or other providers 28 | }, 29 | "mcpServers": { 30 | "fetch": { 31 | "command": "uvx", 32 | "args": ["mcp-server-fetch"], 33 | "requires_confirmation": ["fetch"], 34 | "enabled": true, // Optional, defaults to true 35 | "exclude_tools": [] // Optional, list of tool names to exclude 36 | }, 37 | "brave-search": { 38 | "command": "npx", 39 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 40 | "env": { 41 | "BRAVE_API_KEY": "your-brave-api-key" 42 | }, 43 | "requires_confirmation": ["brave_web_search"] 44 | }, 45 | "youtube": { 46 | "command": "uvx", 47 | "args": ["--from", "git+https://github.com/adhikasp/mcp-youtube", "mcp-youtube"] 48 | } 49 | } 50 | } 51 | ``` 52 | 53 | Note: 54 | - See [CONFIG.md](CONFIG.md) for complete documentation of the configuration format 55 | - Use `requires_confirmation` to specify which tools need user confirmation before execution 56 | - The LLM API key can also be set via environment variables `LLM_API_KEY` or `OPENAI_API_KEY` 57 | - The config file can be placed in either `~/.llm/config.json` or `$PWD/.llm/config.json` 58 | - You can comment the JSON config file with `//` if you like to switch around the configuration 59 | 60 | 3. Run the CLI: 61 | ```bash 62 | llm "What is the capital city of North Sumatra?" 63 | ``` 64 | 65 | ## Usage 66 | 67 | ### Basic Usage 68 | 69 | ```bash 70 | $ llm What is the capital city of North Sumatra? 71 | The capital city of North Sumatra is Medan. 72 | ``` 73 | 74 | You can omit the quotes, but be careful with bash special characters like `&`, `|`, `;` that might be interpreted by your shell. 75 | 76 | You can also pipe input from other commands or files: 77 | 78 | ```bash 79 | $ echo "What is the capital city of North Sumatra?" | llm 80 | The capital city of North Sumatra is Medan. 81 | 82 | $ echo "Given a location, tell me its capital city." > instructions.txt 83 | $ cat instruction.txt | llm "West Java" 84 | The capital city of West Java is Bandung. 85 | ``` 86 | 87 | ### Image Input 88 | 89 | You can pipe image files to analyze them with multimodal LLMs: 90 | 91 | ```bash 92 | $ cat image.jpg | llm "What do you see in this image?" 93 | [LLM will analyze and describe the image] 94 | 95 | $ cat screenshot.png | llm "Is there any error in this screenshot?" 96 | [LLM will analyze the screenshot and point out any errors] 97 | ``` 98 | 99 | ### Using Prompt Templates 100 | 101 | You can use predefined prompt templates by using the `p` prefix followed by the template name and its arguments: 102 | 103 | ```bash 104 | # List available prompt templates 105 | $ llm --list-prompts 106 | 107 | # Use a template 108 | $ llm p review # Review git changes 109 | $ llm p commit # Generate commit message 110 | $ llm p yt url=https://youtube.com/... # Summarize YouTube video 111 | ``` 112 | 113 | ### Triggering a tool 114 | 115 | ```bash 116 | $ llm What is the top article on hackernews today? 117 | 118 | ================================== Ai Message ================================== 119 | Tool Calls: 120 | brave_web_search (call_eXmFQizLUp8TKBgPtgFo71et) 121 | Call ID: call_eXmFQizLUp8TKBgPtgFo71et 122 | Args: 123 | query: site:news.ycombinator.com 124 | count: 1 125 | Brave Search MCP Server running on stdio 126 | 127 | # If the tool requires confirmation, you'll be prompted: 128 | Confirm tool call? [y/n]: y 129 | 130 | ================================== Ai Message ================================== 131 | Tool Calls: 132 | fetch (call_xH32S0QKqMfudgN1ZGV6vH1P) 133 | Call ID: call_xH32S0QKqMfudgN1ZGV6vH1P 134 | Args: 135 | url: https://news.ycombinator.com/ 136 | ================================= Tool Message ================================= 137 | Name: fetch 138 | 139 | [TextContent(type='text', text='Contents [REDACTED]] 140 | ================================== Ai Message ================================== 141 | 142 | The top article on Hacker News today is: 143 | 144 | ### [Why pipes sometimes get "stuck": buffering](https://jvns.ca) 145 | - **Points:** 31 146 | - **Posted by:** tanelpoder 147 | - **Posted:** 1 hour ago 148 | 149 | You can view the full list of articles on [Hacker News](https://news.ycombinator.com/) 150 | ``` 151 | 152 | To bypass tool confirmation requirements, use the `--no-confirmations` flag: 153 | 154 | ```bash 155 | $ llm --no-confirmations "What is the top article on hackernews today?" 156 | ``` 157 | 158 | To use in bash scripts, add the --no-intermediates, so it doesn't print intermediate messages, only the concluding end message. 159 | ```bash 160 | $ llm --no-intermediates "What is the time in Tokyo right now?" 161 | ``` 162 | 163 | ### Continuation 164 | 165 | Add a `c ` prefix to your message to continue the last conversation. 166 | 167 | ```bash 168 | $ llm asldkfjasdfkl 169 | It seems like your message might have been a typo or an error. Could you please clarify or provide more details about what you need help with? 170 | $ llm c what did i say previously? 171 | You previously typed "asldkfjasdfkl," which appears to be a random string of characters. If you meant to ask something specific or if you have a question, please let me know! 172 | ``` 173 | 174 | ### Clipboard Support 175 | 176 | You can use content from your clipboard using the `cb` command: 177 | 178 | ```bash 179 | # After copying text to clipboard 180 | $ llm cb 181 | [LLM will process the clipboard text] 182 | 183 | $ llm cb "What language is this code written in?" 184 | [LLM will analyze the clipboard text with your question] 185 | 186 | # After copying an image to clipboard 187 | $ llm cb "What do you see in this image?" 188 | [LLM will analyze the clipboard image] 189 | 190 | # You can combine it with continuation 191 | $ llm cb c "Tell me more about what you see" 192 | [LLM will continue the conversation about the clipboard content] 193 | ``` 194 | 195 | The clipboard feature works in: 196 | - Native Windows/macOS/Linux environments 197 | - Windows: Uses PowerShell 198 | - macOS: Uses `pbpaste` for text, `pngpaste` for images (optional) 199 | - Linux: Uses `xclip` (required for clipboard support) 200 | - Windows Subsystem for Linux (WSL) 201 | - Accesses the Windows clipboard through PowerShell 202 | - Works with both text and images 203 | - Make sure you have access to `powershell.exe` from WSL 204 | 205 | Required tools for clipboard support: 206 | - Windows: PowerShell (built-in) 207 | - macOS: 208 | - `pbpaste` (built-in) for text 209 | - `pngpaste` (optional) for images: `brew install pngpaste` 210 | - Linux: 211 | - `xclip`: `sudo apt install xclip` or equivalent 212 | 213 | The CLI automatically detects if the clipboard content is text or image and handles it appropriately. 214 | 215 | ### Additional Options 216 | 217 | ```bash 218 | $ llm --list-tools # List all available tools 219 | $ llm --list-prompts # List available prompt templates 220 | $ llm --no-tools # Run without any tools 221 | $ llm --force-refresh # Force refresh tool capabilities cache 222 | $ llm --text-only # Output raw text without markdown formatting 223 | $ llm --show-memories # Show user memories 224 | $ llm --model gpt-4 # Override the model specified in config 225 | ``` 226 | 227 | ## Contributing 228 | 229 | Feel free to submit issues and pull requests for improvements or bug fixes. 230 | -------------------------------------------------------------------------------- /c4_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adhikasp/mcp-client-cli/e9eb8bcd29eadc0f1281d695af6f182249f7f94c/c4_diagram.png -------------------------------------------------------------------------------- /c4_diagram.puml: -------------------------------------------------------------------------------- 1 | @startuml MCP Client CLI Architecture 2 | 3 | !include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Component.puml 4 | 5 | LAYOUT_WITH_LEGEND() 6 | 7 | title Component diagram for MCP Client CLI 8 | 9 | Person(user, "User", "Software engineer using the CLI") 10 | 11 | note right of user 12 | Common Usage: 13 | $ llm "What is the capital of France?" 14 | $ llm c "tell me more" 15 | $ llm p review 16 | $ cat file.txt | llm 17 | $ llm --list-tools 18 | $ llm --list-prompts 19 | end note 20 | 21 | System_Boundary(mcp_client_cli, "MCP Client CLI") { 22 | Container(cli_component, "CLI Component", "Python", "Handles command-line interface and user interaction", $tags="cli") 23 | 24 | 25 | Component(config_manager, "Config Manager", "Python", "Manages configuration loading and validation") 26 | Component(output_handler, "Output Handler", "Python", "Handles output formatting and display") 27 | Component(conversation_manager, "Conversation Manager", "Python", "Manages conversation persistence") 28 | Component(memory_manager, "Memory Manager", "Python", "Manages user memories and vector storage") 29 | Component(tool_manager, "Tool Manager", "Python", "Manages MCP tools and their conversion") 30 | 31 | ContainerDb(sqlite_db, "SQLite Database", "SQLite", "Stores conversations and memories") 32 | ContainerDb(config_file, "Config File", "JSON", "Stores application configuration") 33 | } 34 | 35 | System_Ext(mcp_servers, "MCP Servers", "External MCP-compatible servers providing various tools") 36 | System_Ext(llm_providers, "LLM Providers", "External LLM services (OpenAI, Google, etc.)") 37 | 38 | Rel(user, cli_component, "Uses", "CLI commands") 39 | Rel(cli_component, config_manager, "Uses", "Loads configuration") 40 | Rel(cli_component, output_handler, "Uses", "Displays output") 41 | Rel(cli_component, conversation_manager, "Uses", "Manages conversations") 42 | Rel(cli_component, memory_manager, "Uses", "Accesses memories") 43 | Rel(cli_component, tool_manager, "Uses", "Manages tools") 44 | 45 | Rel(conversation_manager, sqlite_db, "Reads/Writes", "SQL") 46 | Rel(memory_manager, sqlite_db, "Reads/Writes", "SQL") 47 | Rel(config_manager, config_file, "Reads", "JSON") 48 | 49 | Rel(tool_manager, mcp_servers, "Connects", "MCP Protocol") 50 | Rel(cli_component, llm_providers, "Uses", "API calls") 51 | 52 | @enduml -------------------------------------------------------------------------------- /mcp-server-config-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "systemPrompt": "You are an AI assistant helping a software engineer. Your user is a professional software engineer who works on various programming projects. Today's date is {today_datetime}. I aim to provide clear, accurate, and helpful responses with a focus on software development best practices. I should be direct, technical, and practical in my communication style. When doing git diff operation, do check the README.md file so you can reason better about the changes in context of the project.", 3 | "llm": { 4 | "provider": "openai", 5 | "model": "gpt-4o-mini", 6 | "api_key": "your-api-key-here", 7 | "temperature": 0 8 | }, 9 | "mcpServers": { 10 | "fetch": { 11 | "command": "uvx", 12 | "args": ["mcp-server-fetch"] 13 | }, 14 | "brave-search": { 15 | "command": "npx", 16 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 17 | "env": { 18 | "BRAVE_API_KEY": "your-brave-api-key-here" 19 | } 20 | }, 21 | "youtube": { 22 | "command": "npx", 23 | "args": ["-y", "github:anaisbetts/mcp-youtube"] 24 | }, 25 | "mcp-server-commands": { 26 | "command": "npx", 27 | "args": ["mcp-server-commands"], 28 | "requires_confirmation": [ 29 | "run_command", 30 | "run_script" 31 | ] 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "mcp_client_cli" 7 | version = "1.0.4" 8 | description = "Command line interface for MCP client" 9 | readme = "README.md" 10 | requires-python = ">=3.12" 11 | authors = [ 12 | { name = "Adhika Setya Pramudita", email = "adhika.setya.p@gmail.com" } 13 | ] 14 | license = { text = "MIT" } 15 | dependencies = [ 16 | "langchain-anthropic>=0.3.0", 17 | "langchain>=0.3.8", 18 | "mcp>=1.6.0", 19 | "python-dotenv>=1.0.1", 20 | "langgraph>=0.4.1", 21 | "langchain-openai>=0.2.10", 22 | "langchain-google-genai>=2.0.7", 23 | "aiosqlite>=0.20.0", 24 | "langgraph-checkpoint-sqlite>=2.0.1", 25 | "rich>=13.9.0", 26 | "commentjson>=0.9.0", 27 | "jsonschema-pydantic>=0.6", 28 | "pywin32>=306; sys_platform == 'win32' or platform_system == 'Windows'", 29 | "langgraph-prebuilt>=0.1.0", 30 | "standard-imghdr>=3.13.0", 31 | ] 32 | classifiers = [ 33 | "Programming Language :: Python :: 3", 34 | "License :: OSI Approved :: MIT License", 35 | "Operating System :: OS Independent", 36 | ] 37 | 38 | [project.optional-dependencies] 39 | clipboard = [ 40 | "pyperclip>=1.8.2", 41 | "pngpaste; sys_platform == 'darwin' and python_version < '3.12'" 42 | ] 43 | 44 | [project.urls] 45 | Homepage = "https://github.com/adhikasp/mcp_client_cli" 46 | Issues = "https://github.com/adhikasp/mcp_client_cli/issues" 47 | 48 | [project.scripts] 49 | llm = "mcp_client_cli.cli:main" 50 | -------------------------------------------------------------------------------- /repomix.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "output": { 3 | "filePath": "repomix-output.md", 4 | "style": "markdown", 5 | "removeComments": false, 6 | "removeEmptyLines": false, 7 | "topFilesLength": 5, 8 | "showLineNumbers": false, 9 | "copyToClipboard": false 10 | }, 11 | "include": [], 12 | "ignore": { 13 | "useGitignore": true, 14 | "useDefaultPatterns": true, 15 | "customPatterns": [] 16 | }, 17 | "security": { 18 | "enableSecurityCheck": true 19 | } 20 | } -------------------------------------------------------------------------------- /src/mcp_client_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adhikasp/mcp-client-cli/e9eb8bcd29eadc0f1281d695af6f182249f7f94c/src/mcp_client_cli/__init__.py -------------------------------------------------------------------------------- /src/mcp_client_cli/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Simple llm CLI that acts as MCP client. 5 | """ 6 | 7 | from datetime import datetime 8 | import argparse 9 | import asyncio 10 | import os 11 | from typing import Annotated, TypedDict 12 | import uuid 13 | import sys 14 | import re 15 | import anyio 16 | from langchain_core.messages import BaseMessage, HumanMessage 17 | from langchain_core.prompts import ChatPromptTemplate 18 | from langchain_core.language_models.chat_models import BaseChatModel 19 | from langgraph.prebuilt import create_react_agent 20 | from langgraph.managed import IsLastStep 21 | from langgraph.graph.message import add_messages 22 | from langchain.chat_models import init_chat_model 23 | from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver 24 | from rich.console import Console 25 | from rich.table import Table 26 | import base64 27 | import imghdr as imghdr 28 | import mimetypes 29 | 30 | from .input import * 31 | from .const import * 32 | from .output import * 33 | from .storage import * 34 | from .tool import * 35 | from .prompt import * 36 | from .memory import * 37 | from .config import AppConfig 38 | 39 | # The AgentState class is used to maintain the state of the agent during a conversation. 40 | class AgentState(TypedDict): 41 | # A list of messages exchanged in the conversation. 42 | messages: Annotated[list[BaseMessage], add_messages] 43 | # A flag indicating whether the current step is the last step in the conversation. 44 | is_last_step: IsLastStep 45 | # The current date and time, used for context in the conversation. 46 | today_datetime: str 47 | # The user's memories. 48 | memories: str = "no memories" 49 | remaining_steps: int = 5 50 | 51 | async def run() -> None: 52 | """Run the LLM agent.""" 53 | args = setup_argument_parser() 54 | query, is_conversation_continuation = parse_query(args) 55 | app_config = AppConfig.load() 56 | 57 | if args.list_tools: 58 | await handle_list_tools(app_config, args) 59 | return 60 | 61 | if args.show_memories: 62 | await handle_show_memories() 63 | return 64 | 65 | if args.list_prompts: 66 | handle_list_prompts() 67 | return 68 | 69 | await handle_conversation(args, query, is_conversation_continuation, app_config) 70 | 71 | def setup_argument_parser() -> argparse.Namespace: 72 | """Setup and return the argument parser.""" 73 | parser = argparse.ArgumentParser( 74 | description='Run LangChain agent with MCP tools', 75 | formatter_class=argparse.RawDescriptionHelpFormatter, 76 | epilog=""" 77 | Examples: 78 | llm "What is the capital of France?" Ask a simple question 79 | llm c "tell me more" Continue previous conversation 80 | llm p review Use a prompt template 81 | cat file.txt | llm Process input from a file 82 | llm --list-tools Show available tools 83 | llm --list-prompts Show available prompt templates 84 | llm --no-confirmations "search web" Run tools without confirmation 85 | """ 86 | ) 87 | parser.add_argument('query', nargs='*', default=[], 88 | help='The query to process (default: read from stdin). ' 89 | 'Special prefixes:\n' 90 | ' c: Continue previous conversation\n' 91 | ' p: Use prompt template') 92 | parser.add_argument('--list-tools', action='store_true', 93 | help='List all available LLM tools') 94 | parser.add_argument('--list-prompts', action='store_true', 95 | help='List all available prompts') 96 | parser.add_argument('--no-confirmations', action='store_true', 97 | help='Bypass tool confirmation requirements') 98 | parser.add_argument('--force-refresh', action='store_true', 99 | help='Force refresh of tools capabilities') 100 | parser.add_argument('--text-only', action='store_true', 101 | help='Print output as raw text instead of parsing markdown') 102 | parser.add_argument('--no-tools', action='store_true', 103 | help='Do not add any tools') 104 | parser.add_argument('--no-intermediates', action='store_true', 105 | help='Only print the final message') 106 | parser.add_argument('--show-memories', action='store_true', 107 | help='Show user memories') 108 | parser.add_argument('--model', 109 | help='Override the model specified in config') 110 | return parser.parse_args() 111 | 112 | async def handle_list_tools(app_config: AppConfig, args: argparse.Namespace) -> None: 113 | """Handle the --list-tools command.""" 114 | server_configs = [ 115 | McpServerConfig( 116 | server_name=name, 117 | server_param=StdioServerParameters( 118 | command=config.command, 119 | args=config.args or [], 120 | env={**(config.env or {}), **os.environ} 121 | ), 122 | exclude_tools=config.exclude_tools or [] 123 | ) 124 | for name, config in app_config.get_enabled_servers().items() 125 | ] 126 | toolkits, tools = await load_tools(server_configs, args.no_tools, args.force_refresh) 127 | 128 | console = Console() 129 | table = Table(title="Available LLM Tools") 130 | table.add_column("Toolkit", style="cyan") 131 | table.add_column("Tool Name", style="cyan") 132 | table.add_column("Description", style="green") 133 | 134 | for tool in tools: 135 | if isinstance(tool, McpTool): 136 | table.add_row(tool.toolkit_name, tool.name, tool.description) 137 | 138 | console.print(table) 139 | 140 | for toolkit in toolkits: 141 | await toolkit.close() 142 | 143 | async def handle_show_memories() -> None: 144 | """Handle the --show-memories command.""" 145 | store = SqliteStore(SQLITE_DB) 146 | memories = await get_memories(store) 147 | console = Console() 148 | table = Table(title="My LLM Memories") 149 | for memory in memories: 150 | table.add_row(memory) 151 | console.print(table) 152 | 153 | def handle_list_prompts() -> None: 154 | """Handle the --list-prompts command.""" 155 | console = Console() 156 | table = Table(title="Available Prompt Templates") 157 | table.add_column("Name", style="cyan") 158 | table.add_column("Template") 159 | table.add_column("Arguments") 160 | 161 | for name, template in prompt_templates.items(): 162 | table.add_row(name, template, ", ".join(re.findall(r'\{(\w+)\}', template))) 163 | 164 | console.print(table) 165 | 166 | async def load_tools(server_configs: list[McpServerConfig], no_tools: bool, force_refresh: bool) -> tuple[list, list]: 167 | """Load and convert MCP tools to LangChain tools.""" 168 | if no_tools: 169 | return [], [] 170 | 171 | toolkits = [] 172 | langchain_tools = [] 173 | 174 | async def convert_toolkit(server_config: McpServerConfig): 175 | toolkit = await convert_mcp_to_langchain_tools(server_config, force_refresh) 176 | toolkits.append(toolkit) 177 | langchain_tools.extend(toolkit.get_tools()) 178 | 179 | async with anyio.create_task_group() as tg: 180 | for server_param in server_configs: 181 | tg.start_soon(convert_toolkit, server_param) 182 | 183 | langchain_tools.append(save_memory) 184 | return toolkits, langchain_tools 185 | 186 | async def handle_conversation(args: argparse.Namespace, query: HumanMessage, 187 | is_conversation_continuation: bool, app_config: AppConfig) -> None: 188 | """Handle the main conversation flow.""" 189 | server_configs = [ 190 | McpServerConfig( 191 | server_name=name, 192 | server_param=StdioServerParameters( 193 | command=config.command, 194 | args=config.args or [], 195 | env={**(config.env or {}), **os.environ} 196 | ), 197 | exclude_tools=config.exclude_tools or [] 198 | ) 199 | for name, config in app_config.get_enabled_servers().items() 200 | ] 201 | toolkits, tools = await load_tools(server_configs, args.no_tools, args.force_refresh) 202 | 203 | extra_body = {} 204 | if app_config.llm.base_url and "openrouter" in app_config.llm.base_url: 205 | extra_body = {"transforms": ["middle-out"]} 206 | # Override model if specified in command line 207 | if args.model: 208 | app_config.llm.model = args.model 209 | 210 | model: BaseChatModel = init_chat_model( 211 | model=app_config.llm.model, 212 | model_provider=app_config.llm.provider, 213 | api_key=app_config.llm.api_key, 214 | temperature=app_config.llm.temperature, 215 | base_url=app_config.llm.base_url, 216 | default_headers={ 217 | "X-Title": "mcp-client-cli", 218 | "HTTP-Referer": "https://github.com/adhikasp/mcp-client-cli", 219 | }, 220 | extra_body=extra_body 221 | ) 222 | 223 | prompt = ChatPromptTemplate.from_messages([ 224 | ("system", app_config.system_prompt), 225 | ("placeholder", "{messages}") 226 | ]) 227 | 228 | conversation_manager = ConversationManager(SQLITE_DB) 229 | 230 | async with AsyncSqliteSaver.from_conn_string(SQLITE_DB) as checkpointer: 231 | store = SqliteStore(SQLITE_DB) 232 | memories = await get_memories(store) 233 | formatted_memories = "\n".join(f"- {memory}" for memory in memories) 234 | agent_executor = create_react_agent( 235 | model, tools, state_schema=AgentState, 236 | state_modifier=prompt, checkpointer=checkpointer, store=store 237 | ) 238 | 239 | thread_id = (await conversation_manager.get_last_id() if is_conversation_continuation 240 | else uuid.uuid4().hex) 241 | 242 | input_messages = AgentState( 243 | messages=[query], 244 | today_datetime=datetime.now().isoformat(), 245 | memories=formatted_memories, 246 | remaining_steps=3 247 | ) 248 | 249 | output = OutputHandler(text_only=args.text_only, only_last_message=args.no_intermediates) 250 | output.start() 251 | try: 252 | async for chunk in agent_executor.astream( 253 | input_messages, 254 | stream_mode=["messages", "values"], 255 | config={"configurable": {"thread_id": thread_id, "user_id": "myself"}, 256 | "recursion_limit": 100} 257 | ): 258 | output.update(chunk) 259 | if not args.no_confirmations: 260 | if not output.confirm_tool_call(app_config.__dict__, chunk): 261 | break 262 | except Exception as e: 263 | output.update_error(e) 264 | finally: 265 | output.finish() 266 | 267 | await conversation_manager.save_id(thread_id, checkpointer.conn) 268 | 269 | for toolkit in toolkits: 270 | await toolkit.close() 271 | 272 | def parse_query(args: argparse.Namespace) -> tuple[HumanMessage, bool]: 273 | """ 274 | Parse the query from command line arguments. 275 | Returns a tuple of (HumanMessage, is_conversation_continuation). 276 | """ 277 | query_parts = ' '.join(args.query).split() 278 | stdin_content = "" 279 | stdin_image = None 280 | is_continuation = False 281 | 282 | # Handle clipboard content if requested 283 | if query_parts and query_parts[0] == 'cb': 284 | # Remove 'cb' from query parts 285 | query_parts = query_parts[1:] 286 | # Try to get content from clipboard 287 | clipboard_result = get_clipboard_content() 288 | if clipboard_result: 289 | content, mime_type = clipboard_result 290 | if mime_type: # It's an image 291 | stdin_image = base64.b64encode(content).decode('utf-8') 292 | else: # It's text 293 | stdin_content = content 294 | else: 295 | print("No content found in clipboard") 296 | raise Exception("Clipboard is empty") 297 | # Check if there's input from pipe 298 | elif not sys.stdin.isatty(): 299 | stdin_data = sys.stdin.buffer.read() 300 | # Try to detect if it's an image 301 | image_type = imghdr.what(None, h=stdin_data) 302 | if image_type: 303 | # It's an image, encode it as base64 304 | stdin_image = base64.b64encode(stdin_data).decode('utf-8') 305 | mime_type = mimetypes.guess_type(f"dummy.{image_type}")[0] or f"image/{image_type}" 306 | else: 307 | # It's text 308 | stdin_content = stdin_data.decode('utf-8').strip() 309 | 310 | # Process the query text 311 | query_text = "" 312 | if query_parts: 313 | if query_parts[0] == 'c': 314 | is_continuation = True 315 | query_text = ' '.join(query_parts[1:]) 316 | elif query_parts[0] == 'p' and len(query_parts) >= 2: 317 | template_name = query_parts[1] 318 | if template_name not in prompt_templates: 319 | print(f"Error: Prompt template '{template_name}' not found.") 320 | print("Available templates:", ", ".join(prompt_templates.keys())) 321 | return HumanMessage(content=""), False 322 | 323 | template = prompt_templates[template_name] 324 | template_args = query_parts[2:] 325 | try: 326 | # Extract variable names from the template 327 | var_names = re.findall(r'\{(\w+)\}', template) 328 | # Create dict mapping parameter names to arguments 329 | template_vars = dict(zip(var_names, template_args)) 330 | query_text = template.format(**template_vars) 331 | except KeyError as e: 332 | print(f"Error: Missing argument {e}") 333 | return HumanMessage(content=""), False 334 | else: 335 | query_text = ' '.join(query_parts) 336 | 337 | # Combine stdin content with query text if both exist 338 | if stdin_content and query_text: 339 | query_text = f"{stdin_content}\n\n{query_text}" 340 | elif stdin_content: 341 | query_text = stdin_content 342 | elif not query_text and not stdin_image: 343 | return HumanMessage(content=""), False 344 | 345 | # Create the message content 346 | if stdin_image: 347 | content = [ 348 | {"type": "text", "text": query_text or "What do you see in this image?"}, 349 | {"type": "image_url", "image_url": {"url": f"data:{mime_type};base64,{stdin_image}"}} 350 | ] 351 | else: 352 | content = query_text 353 | 354 | return HumanMessage(content=content), is_continuation 355 | 356 | def main() -> None: 357 | """Entry point of the script.""" 358 | asyncio.run(run()) 359 | 360 | 361 | if __name__ == "__main__": 362 | main() 363 | -------------------------------------------------------------------------------- /src/mcp_client_cli/config.py: -------------------------------------------------------------------------------- 1 | """Configuration management for the MCP client CLI.""" 2 | 3 | from dataclasses import dataclass 4 | from pathlib import Path 5 | import os 6 | import commentjson 7 | from typing import Dict, List, Optional 8 | 9 | from .const import CONFIG_FILE, CONFIG_DIR 10 | 11 | @dataclass 12 | class LLMConfig: 13 | """Configuration for the LLM model.""" 14 | model: str = "gpt-4o" 15 | provider: str = "openai" 16 | api_key: Optional[str] = None 17 | temperature: float = 0 18 | base_url: Optional[str] = None 19 | 20 | @classmethod 21 | def from_dict(cls, config: dict) -> "LLMConfig": 22 | """Create LLMConfig from dictionary.""" 23 | return cls( 24 | model=config.get("model", cls.model), 25 | provider=config.get("provider", cls.provider), 26 | api_key=config.get("api_key", os.getenv("LLM_API_KEY", os.getenv("OPENAI_API_KEY", ""))), 27 | temperature=config.get("temperature", cls.temperature), 28 | base_url=config.get("base_url"), 29 | ) 30 | 31 | @dataclass 32 | class ServerConfig: 33 | """Configuration for an MCP server.""" 34 | command: str 35 | args: List[str] = None 36 | env: Dict[str, str] = None 37 | enabled: bool = True 38 | exclude_tools: List[str] = None 39 | requires_confirmation: List[str] = None 40 | 41 | @classmethod 42 | def from_dict(cls, config: dict) -> "ServerConfig": 43 | """Create ServerConfig from dictionary.""" 44 | return cls( 45 | command=config["command"], 46 | args=config.get("args", []), 47 | env=config.get("env", {}), 48 | enabled=config.get("enabled", True), 49 | exclude_tools=config.get("exclude_tools", []), 50 | requires_confirmation=config.get("requires_confirmation", []) 51 | ) 52 | 53 | @dataclass 54 | class AppConfig: 55 | """Main application configuration.""" 56 | llm: LLMConfig 57 | system_prompt: str 58 | mcp_servers: Dict[str, ServerConfig] 59 | tools_requires_confirmation: List[str] 60 | 61 | @classmethod 62 | def load(cls) -> "AppConfig": 63 | """Load configuration from file.""" 64 | config_paths = [CONFIG_FILE, CONFIG_DIR / "config.json"] 65 | chosen_path = next((path for path in config_paths if os.path.exists(path)), None) 66 | 67 | if chosen_path is None: 68 | raise FileNotFoundError(f"Could not find config file in any of: {', '.join(map(str, config_paths))}") 69 | 70 | with open(chosen_path, 'r') as f: 71 | config = commentjson.load(f) 72 | 73 | # Extract tools requiring confirmation 74 | tools_requires_confirmation = [] 75 | for server_config in config["mcpServers"].values(): 76 | tools_requires_confirmation.extend(server_config.get("requires_confirmation", [])) 77 | 78 | return cls( 79 | llm=LLMConfig.from_dict(config.get("llm", {})), 80 | system_prompt=config["systemPrompt"], 81 | mcp_servers={ 82 | name: ServerConfig.from_dict(server_config) 83 | for name, server_config in config["mcpServers"].items() 84 | }, 85 | tools_requires_confirmation=tools_requires_confirmation 86 | ) 87 | 88 | def get_enabled_servers(self) -> Dict[str, ServerConfig]: 89 | """Get only enabled server configurations.""" 90 | return { 91 | name: config 92 | for name, config in self.mcp_servers.items() 93 | if config.enabled 94 | } -------------------------------------------------------------------------------- /src/mcp_client_cli/const.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | CACHE_EXPIRY_HOURS = 24 4 | DEFAULT_QUERY = "Summarize https://www.youtube.com/watch?v=NExtKbS1Ljc" 5 | CONFIG_FILE = 'mcp-server-config.json' 6 | CONFIG_DIR = Path.home() / ".llm" 7 | SQLITE_DB = CONFIG_DIR / "conversations.db" 8 | CACHE_DIR = CONFIG_DIR / "mcp-tools" -------------------------------------------------------------------------------- /src/mcp_client_cli/input.py: -------------------------------------------------------------------------------- 1 | import io 2 | import platform 3 | import subprocess 4 | import base64 5 | 6 | def get_clipboard_content() -> tuple[str | bytes, str | None] | None: 7 | """Get content from clipboard, handling both text and images in native and WSL environments. 8 | Returns: 9 | tuple: (content, mime_type) where: 10 | - content is either string (for text) or bytes (for image) 11 | - mime_type is None for text or mime type string for images 12 | None: if clipboard is empty or error occurs 13 | """ 14 | system = platform.system() 15 | is_wsl = 'microsoft-standard' in platform.uname().release.lower() 16 | 17 | if is_wsl or system == 'Windows': 18 | try: 19 | # Try to get image or text from Windows clipboard 20 | ps_script = ''' 21 | Add-Type -AssemblyName System.Windows.Forms 22 | if ([Windows.Forms.Clipboard]::ContainsImage()) { 23 | $image = [Windows.Forms.Clipboard]::GetImage() 24 | $ms = New-Object System.IO.MemoryStream 25 | $image.Save($ms, [System.Drawing.Imaging.ImageFormat]::Png) 26 | Write-Output "IMAGE:" 27 | [Convert]::ToBase64String($ms.ToArray()) 28 | } elseif ([Windows.Forms.Clipboard]::ContainsText()) { 29 | Write-Output "TEXT:" 30 | [Windows.Forms.Clipboard]::GetText() 31 | } 32 | ''' 33 | # Execute PowerShell script 34 | powershell_cmd = 'powershell.exe' if is_wsl else 'powershell' 35 | result = subprocess.run( 36 | [powershell_cmd, '-Command', ps_script], 37 | capture_output=True, 38 | text=True 39 | ) 40 | if result.stdout.strip(): 41 | lines = result.stdout.strip().split('\n', 1) 42 | if len(lines) == 2: 43 | content_type, content = lines 44 | if content_type.strip() == "IMAGE:": 45 | # Convert base64 back to bytes 46 | img_bytes = base64.b64decode(content.strip()) 47 | return img_bytes, 'image/png' 48 | elif content_type.strip() == "TEXT:": 49 | return content.strip(), None 50 | except Exception as e: 51 | print(f"Error accessing Windows clipboard: {e}") 52 | return None 53 | elif system == 'Darwin': # macOS 54 | try: 55 | # Try image first (using pngpaste if available) 56 | result = subprocess.run(['pngpaste', '-'], capture_output=True) 57 | if result.returncode == 0 and result.stdout: 58 | return result.stdout, 'image/png' 59 | 60 | # If no image, try text 61 | result = subprocess.run(['pbpaste'], capture_output=True, text=True) 62 | if result.stdout: 63 | return result.stdout.strip(), None 64 | except FileNotFoundError: 65 | # pngpaste not installed 66 | print("Error: pngpaste not installed. Install it with 'brew install pngpaste' for image clipboard support") 67 | try: 68 | result = subprocess.run(['pbpaste'], capture_output=True, text=True) 69 | if result.stdout: 70 | return result.stdout.strip(), None 71 | except: 72 | pass 73 | except Exception as e: 74 | print(f"Error accessing macOS clipboard: {e}") 75 | raise e 76 | elif system == 'Linux': # Linux 77 | try: 78 | # Try image first 79 | result = subprocess.run( 80 | ['xclip', '-selection', 'clipboard', '-t', 'image/png', '-o'], 81 | capture_output=True 82 | ) 83 | if result.returncode == 0 and result.stdout: 84 | return result.stdout, 'image/png' 85 | 86 | # If no image, try text 87 | result = subprocess.run( 88 | ['xclip', '-selection', 'clipboard', '-o'], 89 | capture_output=True, 90 | text=True 91 | ) 92 | if result.stdout: 93 | return result.stdout.strip(), None 94 | except Exception as e: 95 | print(f"Error accessing Linux clipboard: {e}") 96 | raise e 97 | 98 | raise Exception("Clipboard is empty") -------------------------------------------------------------------------------- /src/mcp_client_cli/memory.py: -------------------------------------------------------------------------------- 1 | """SQLite-based store implementation with vector search capabilities. 2 | 3 | This store provides persistent storage using SQLite with optional vector search functionality. 4 | It implements the BaseStore interface from langgraph. 5 | """ 6 | 7 | from datetime import datetime, timezone 8 | import json 9 | import logging 10 | from pathlib import Path 11 | from typing import Any, Dict, List, Optional, Tuple, Union 12 | from typing_extensions import Annotated 13 | import uuid 14 | 15 | import aiosqlite 16 | from langchain_core.embeddings import Embeddings 17 | from langgraph.prebuilt import InjectedStore 18 | from langgraph.store.base import BaseStore 19 | from langchain_core.runnables import RunnableConfig 20 | from langchain_core.tools import tool 21 | 22 | from langgraph.store.base import ( 23 | BaseStore, 24 | GetOp, 25 | IndexConfig, 26 | Item, 27 | ListNamespacesOp, 28 | MatchCondition, 29 | Op, 30 | PutOp, 31 | Result, 32 | SearchItem, 33 | SearchOp, 34 | ensure_embeddings, 35 | get_text_at_path, 36 | tokenize_path, 37 | ) 38 | 39 | logger = logging.getLogger(__name__) 40 | 41 | 42 | @tool 43 | async def save_memory(memories: List[str], *, config: RunnableConfig, store: Annotated[BaseStore, InjectedStore()]) -> str: 44 | '''Save the given memory for the current user. Do not save duplicate memories.''' 45 | user_id = config.get("configurable", {}).get("user_id") 46 | namespace = ("memories", user_id) 47 | for memory in memories: 48 | id = uuid.uuid4().hex 49 | await store.aput(namespace, f"memory_{id}", {"data": memory}) 50 | return f"Saved memories: {memories}" 51 | 52 | async def get_memories(store: BaseStore, user_id: str = "myself", query: str = None) -> List[str]: 53 | namespace = ("memories", user_id) 54 | memories = [m.value["data"] for m in await store.asearch(namespace, query=query)] 55 | return memories 56 | 57 | class SqliteStore(BaseStore): 58 | """SQLite-based store with optional vector search. 59 | 60 | This store provides persistent storage using SQLite with optional vector search functionality. 61 | Data is stored in two tables: 62 | - items: Stores the actual key-value pairs with their metadata 63 | - vectors: Stores vector embeddings for semantic search 64 | 65 | Args: 66 | db_path (Union[str, Path]): Path to the SQLite database file 67 | index (Optional[IndexConfig]): Configuration for vector search functionality 68 | """ 69 | 70 | def __init__( 71 | self, db_path: Union[str, Path], *, index: Optional[IndexConfig] = None 72 | ) -> None: 73 | self.db_path = Path(db_path) 74 | self.db_path.parent.mkdir(parents=True, exist_ok=True) 75 | self.index_config = index 76 | if self.index_config: 77 | self.index_config = self.index_config.copy() 78 | self.embeddings: Optional[Embeddings] = ensure_embeddings( 79 | self.index_config.get("embed"), 80 | ) 81 | self.index_config["__tokenized_fields"] = [ 82 | (p, tokenize_path(p)) if p != "$" else (p, p) 83 | for p in (self.index_config.get("fields") or ["$"]) 84 | ] 85 | else: 86 | self.index_config = None 87 | self.embeddings = None 88 | 89 | async def _init_db(self, db: aiosqlite.Connection) -> None: 90 | """Initialize database schema. 91 | 92 | Args: 93 | db (aiosqlite.Connection): Database connection 94 | """ 95 | await db.execute(""" 96 | CREATE TABLE IF NOT EXISTS items ( 97 | namespace TEXT, 98 | key TEXT, 99 | value TEXT, 100 | created_at TEXT, 101 | updated_at TEXT, 102 | PRIMARY KEY (namespace, key) 103 | ) 104 | """) 105 | if self.index_config: 106 | await db.execute(""" 107 | CREATE TABLE IF NOT EXISTS vectors ( 108 | namespace TEXT, 109 | key TEXT, 110 | path TEXT, 111 | vector BLOB, 112 | PRIMARY KEY (namespace, key, path), 113 | FOREIGN KEY (namespace, key) REFERENCES items (namespace, key) 114 | ON DELETE CASCADE 115 | ) 116 | """) 117 | await db.commit() 118 | 119 | def batch(self, ops: List[Op]) -> List[Result]: 120 | """Execute a batch of operations synchronously. 121 | 122 | Args: 123 | ops (List[Op]): List of operations to execute 124 | 125 | Returns: 126 | List[Result]: Results of the operations 127 | """ 128 | raise NotImplementedError 129 | 130 | async def abatch(self, ops: List[Op]) -> List[Result]: 131 | """Execute a batch of operations asynchronously. 132 | 133 | Args: 134 | ops (List[Op]): List of operations to execute 135 | 136 | Returns: 137 | List[Result]: Results of the operations 138 | """ 139 | async with aiosqlite.connect(self.db_path) as db: 140 | await self._init_db(db) 141 | results: List[Result] = [] 142 | put_ops: Dict[Tuple[Tuple[str, ...], str], PutOp] = {} 143 | search_ops: Dict[int, Tuple[SearchOp, List[Tuple[Item, List[List[float]]]]]] = {} 144 | 145 | for i, op in enumerate(ops): 146 | if isinstance(op, GetOp): 147 | item = await self._get_item(db, op.namespace, op.key) 148 | results.append(item) 149 | elif isinstance(op, SearchOp): 150 | candidates = await self._filter_items(db, op) 151 | search_ops[i] = (op, candidates) 152 | results.append(None) 153 | elif isinstance(op, ListNamespacesOp): 154 | namespaces = await self._list_namespaces(db, op) 155 | results.append(namespaces) 156 | elif isinstance(op, PutOp): 157 | put_ops[(op.namespace, op.key)] = op 158 | results.append(None) 159 | else: 160 | raise ValueError(f"Unknown operation type: {type(op)}") 161 | 162 | if search_ops: 163 | query_vectors = await self._embed_search_queries(search_ops) 164 | await self._batch_search(db, search_ops, query_vectors, results) 165 | 166 | to_embed = self._extract_texts(put_ops) 167 | if to_embed and self.index_config and self.embeddings: 168 | embeddings = await self.embeddings.aembed_documents(list(to_embed)) 169 | await self._insert_vectors(db, to_embed, embeddings) 170 | 171 | await self._apply_put_ops(db, put_ops) 172 | await db.commit() 173 | 174 | return results 175 | 176 | async def _get_item( 177 | self, db: aiosqlite.Connection, namespace: Tuple[str, ...], key: str 178 | ) -> Optional[Item]: 179 | """Get an item from the database. 180 | 181 | Args: 182 | db (aiosqlite.Connection): Database connection 183 | namespace (Tuple[str, ...]): Item namespace 184 | key (str): Item key 185 | 186 | Returns: 187 | Optional[Item]: The item if found, None otherwise 188 | """ 189 | async with db.execute( 190 | "SELECT value, created_at, updated_at FROM items WHERE namespace = ? AND key = ?", 191 | ("/".join(namespace), key) 192 | ) as cursor: 193 | row = await cursor.fetchone() 194 | if row: 195 | return Item( 196 | namespace=namespace, 197 | key=key, 198 | value=json.loads(row[0]), 199 | created_at=datetime.fromisoformat(row[1]), 200 | updated_at=datetime.fromisoformat(row[2]) 201 | ) 202 | return None 203 | 204 | async def _filter_items( 205 | self, db: aiosqlite.Connection, op: SearchOp 206 | ) -> List[Tuple[Item, List[List[float]]]]: 207 | """Filter items by namespace and filter function. 208 | 209 | Args: 210 | db (aiosqlite.Connection): Database connection 211 | op (SearchOp): Search operation 212 | 213 | Returns: 214 | List[Tuple[Item, List[List[float]]]]: Filtered items with their vectors 215 | """ 216 | namespace_prefix = "/".join(op.namespace_prefix) 217 | query = """ 218 | SELECT namespace, key, value, created_at, updated_at 219 | FROM items 220 | WHERE namespace LIKE ? 221 | """ 222 | params = [f"{namespace_prefix}%"] 223 | 224 | async with db.execute(query, params) as cursor: 225 | rows = await cursor.fetchall() 226 | filtered = [] 227 | for row in rows: 228 | item = Item( 229 | namespace=tuple(row[0].split("/")), 230 | key=row[1], 231 | value=json.loads(row[2]), 232 | created_at=datetime.fromisoformat(row[3]), 233 | updated_at=datetime.fromisoformat(row[4]) 234 | ) 235 | if not op.filter or all( 236 | self._compare_values(item.value.get(key), filter_value) 237 | for key, filter_value in op.filter.items() 238 | ): 239 | if op.query and self.index_config: 240 | vectors = await self._get_vectors(db, item.namespace, item.key) 241 | filtered.append((item, vectors)) 242 | else: 243 | filtered.append((item, [])) 244 | return filtered 245 | 246 | async def _get_vectors( 247 | self, db: aiosqlite.Connection, namespace: Tuple[str, ...], key: str 248 | ) -> List[List[float]]: 249 | """Get vectors for an item. 250 | 251 | Args: 252 | db (aiosqlite.Connection): Database connection 253 | namespace (Tuple[str, ...]): Item namespace 254 | key (str): Item key 255 | 256 | Returns: 257 | List[List[float]]: List of vectors 258 | """ 259 | async with db.execute( 260 | "SELECT vector FROM vectors WHERE namespace = ? AND key = ?", 261 | ("/".join(namespace), key) 262 | ) as cursor: 263 | rows = await cursor.fetchall() 264 | return [json.loads(row[0]) for row in rows] 265 | 266 | async def _list_namespaces( 267 | self, db: aiosqlite.Connection, op: ListNamespacesOp 268 | ) -> List[Tuple[str, ...]]: 269 | """List namespaces matching the conditions. 270 | 271 | Args: 272 | db (aiosqlite.Connection): Database connection 273 | op (ListNamespacesOp): List namespaces operation 274 | 275 | Returns: 276 | List[Tuple[str, ...]]: List of matching namespaces 277 | """ 278 | async with db.execute("SELECT DISTINCT namespace FROM items") as cursor: 279 | rows = await cursor.fetchall() 280 | namespaces = [tuple(ns.split("/")) for (ns,) in rows] 281 | 282 | if op.match_conditions: 283 | namespaces = [ 284 | ns for ns in namespaces 285 | if all(self._does_match(condition, ns) for condition in op.match_conditions) 286 | ] 287 | 288 | if op.max_depth is not None: 289 | namespaces = sorted({ns[:op.max_depth] for ns in namespaces}) 290 | else: 291 | namespaces = sorted(namespaces) 292 | 293 | return namespaces[op.offset:op.offset + op.limit] 294 | 295 | async def _embed_search_queries( 296 | self, 297 | search_ops: Dict[int, Tuple[SearchOp, List[Tuple[Item, List[List[float]]]]]], 298 | ) -> Dict[str, List[float]]: 299 | """Embed search queries. 300 | 301 | Args: 302 | search_ops (Dict[int, Tuple[SearchOp, List[Tuple[Item, List[List[float]]]]]]): Search operations 303 | 304 | Returns: 305 | Dict[str, List[float]]: Query embeddings 306 | """ 307 | query_vectors = {} 308 | if self.index_config and self.embeddings and search_ops: 309 | queries = {op.query for (op, _) in search_ops.values() if op.query} 310 | if queries: 311 | embeddings = await self.embeddings.aembed_documents(list(queries)) 312 | query_vectors = dict(zip(queries, embeddings)) 313 | return query_vectors 314 | 315 | async def _batch_search( 316 | self, 317 | db: aiosqlite.Connection, 318 | ops: Dict[int, Tuple[SearchOp, List[Tuple[Item, List[List[float]]]]]], 319 | query_vectors: Dict[str, List[float]], 320 | results: List[Result], 321 | ) -> None: 322 | """Perform batch similarity search. 323 | 324 | Args: 325 | db (aiosqlite.Connection): Database connection 326 | ops (Dict[int, Tuple[SearchOp, List[Tuple[Item, List[List[float]]]]]]): Search operations 327 | query_vectors (Dict[str, List[float]]): Query embeddings 328 | results (List[Result]): Results list to update 329 | """ 330 | for i, (op, candidates) in ops.items(): 331 | if not candidates: 332 | results[i] = [] 333 | continue 334 | 335 | if op.query and query_vectors: 336 | query_vector = query_vectors[op.query] 337 | flat_items, flat_vectors = [], [] 338 | scoreless = [] 339 | 340 | for item, vectors in candidates: 341 | for vector in vectors: 342 | flat_items.append(item) 343 | flat_vectors.append(vector) 344 | if not vectors: 345 | scoreless.append(item) 346 | 347 | scores = self._cosine_similarity(query_vector, flat_vectors) 348 | sorted_results = sorted( 349 | zip(scores, flat_items), key=lambda x: x[0], reverse=True 350 | ) 351 | 352 | seen = set() 353 | kept = [] 354 | for score, item in sorted_results: 355 | key = (item.namespace, item.key) 356 | if key in seen: 357 | continue 358 | ix = len(seen) 359 | seen.add(key) 360 | if ix >= op.offset + op.limit: 361 | break 362 | if ix < op.offset: 363 | continue 364 | kept.append((score, item)) 365 | 366 | if scoreless and len(kept) < op.limit: 367 | kept.extend( 368 | (None, item) for item in scoreless[:op.limit - len(kept)] 369 | ) 370 | 371 | results[i] = [ 372 | SearchItem( 373 | namespace=item.namespace, 374 | key=item.key, 375 | value=item.value, 376 | created_at=item.created_at, 377 | updated_at=item.updated_at, 378 | score=float(score) if score is not None else None, 379 | ) 380 | for score, item in kept 381 | ] 382 | else: 383 | results[i] = [ 384 | SearchItem( 385 | namespace=item.namespace, 386 | key=item.key, 387 | value=item.value, 388 | created_at=item.created_at, 389 | updated_at=item.updated_at, 390 | ) 391 | for (item, _) in candidates[op.offset:op.offset + op.limit] 392 | ] 393 | 394 | async def _apply_put_ops( 395 | self, db: aiosqlite.Connection, put_ops: Dict[Tuple[Tuple[str, ...], str], PutOp] 396 | ) -> None: 397 | """Apply put operations to the database. 398 | 399 | Args: 400 | db (aiosqlite.Connection): Database connection 401 | put_ops (Dict[Tuple[Tuple[str, ...], str], PutOp]): Put operations 402 | """ 403 | for (namespace, key), op in put_ops.items(): 404 | if op.value is None: 405 | await db.execute( 406 | "DELETE FROM items WHERE namespace = ? AND key = ?", 407 | ("/".join(namespace), key) 408 | ) 409 | else: 410 | now = datetime.now(timezone.utc) 411 | await db.execute( 412 | """ 413 | INSERT INTO items (namespace, key, value, created_at, updated_at) 414 | VALUES (?, ?, ?, ?, ?) 415 | ON CONFLICT (namespace, key) DO UPDATE SET 416 | value = excluded.value, 417 | updated_at = excluded.updated_at 418 | """, 419 | ( 420 | "/".join(namespace), 421 | key, 422 | json.dumps(op.value), 423 | now.isoformat(), 424 | now.isoformat(), 425 | ) 426 | ) 427 | 428 | async def _insert_vectors( 429 | self, 430 | db: aiosqlite.Connection, 431 | to_embed: Dict[str, List[Tuple[Tuple[str, ...], str, str]]], 432 | embeddings: List[List[float]], 433 | ) -> None: 434 | """Insert vector embeddings into the database. 435 | 436 | Args: 437 | db (aiosqlite.Connection): Database connection 438 | to_embed (Dict[str, List[Tuple[Tuple[str, ...], str, str]]]): Texts to embed 439 | embeddings (List[List[float]]): Vector embeddings 440 | """ 441 | indices = [index for indices in to_embed.values() for index in indices] 442 | if len(indices) != len(embeddings): 443 | raise ValueError( 444 | f"Number of embeddings ({len(embeddings)}) does not" 445 | f" match number of indices ({len(indices)})" 446 | ) 447 | 448 | for embedding, (ns, key, path) in zip(embeddings, indices): 449 | await db.execute( 450 | """ 451 | INSERT INTO vectors (namespace, key, path, vector) 452 | VALUES (?, ?, ?, ?) 453 | ON CONFLICT (namespace, key, path) DO UPDATE SET 454 | vector = excluded.vector 455 | """, 456 | ("/".join(ns), key, path, json.dumps(embedding)) 457 | ) 458 | 459 | def _extract_texts( 460 | self, put_ops: Dict[Tuple[Tuple[str, ...], str], PutOp] 461 | ) -> Dict[str, List[Tuple[Tuple[str, ...], str, str]]]: 462 | """Extract texts for embedding from put operations. 463 | 464 | Args: 465 | put_ops (Dict[Tuple[Tuple[str, ...], str], PutOp]): Put operations 466 | 467 | Returns: 468 | Dict[str, List[Tuple[Tuple[str, ...], str, str]]]: Texts to embed 469 | """ 470 | if put_ops and self.index_config and self.embeddings: 471 | to_embed = {} 472 | for op in put_ops.values(): 473 | if op.value is not None and op.index is not False: 474 | if op.index is None: 475 | paths = self.index_config["__tokenized_fields"] 476 | else: 477 | paths = [(ix, tokenize_path(ix)) for ix in op.index] 478 | 479 | for path, field in paths: 480 | texts = get_text_at_path(op.value, field) 481 | if texts: 482 | if len(texts) > 1: 483 | for i, text in enumerate(texts): 484 | to_embed.setdefault(text, []).append( 485 | (op.namespace, op.key, f"{path}.{i}") 486 | ) 487 | else: 488 | to_embed.setdefault(texts[0], []).append( 489 | (op.namespace, op.key, path) 490 | ) 491 | return to_embed 492 | return {} 493 | 494 | def _compare_values(self, item_value: Any, filter_value: Any) -> bool: 495 | """Compare values in a JSONB-like way. 496 | 497 | Args: 498 | item_value (Any): Value from the item 499 | filter_value (Any): Value from the filter 500 | 501 | Returns: 502 | bool: Whether the values match 503 | """ 504 | if isinstance(filter_value, dict): 505 | if any(k.startswith("$") for k in filter_value): 506 | return all( 507 | self._apply_operator(item_value, op_key, op_value) 508 | for op_key, op_value in filter_value.items() 509 | ) 510 | if not isinstance(item_value, dict): 511 | return False 512 | return all( 513 | self._compare_values(item_value.get(k), v) 514 | for k, v in filter_value.items() 515 | ) 516 | elif isinstance(filter_value, (list, tuple)): 517 | return ( 518 | isinstance(item_value, (list, tuple)) 519 | and len(item_value) == len(filter_value) 520 | and all( 521 | self._compare_values(iv, fv) 522 | for iv, fv in zip(item_value, filter_value) 523 | ) 524 | ) 525 | else: 526 | return item_value == filter_value 527 | 528 | def _apply_operator(self, value: Any, operator: str, op_value: Any) -> bool: 529 | """Apply a comparison operator. 530 | 531 | Args: 532 | value (Any): Value to compare 533 | operator (str): Operator to apply 534 | op_value (Any): Value to compare against 535 | 536 | Returns: 537 | bool: Result of the comparison 538 | """ 539 | if operator == "$eq": 540 | return value == op_value 541 | elif operator == "$gt": 542 | return float(value) > float(op_value) 543 | elif operator == "$gte": 544 | return float(value) >= float(op_value) 545 | elif operator == "$lt": 546 | return float(value) < float(op_value) 547 | elif operator == "$lte": 548 | return float(value) <= float(op_value) 549 | elif operator == "$ne": 550 | return value != op_value 551 | else: 552 | raise ValueError(f"Unsupported operator: {operator}") 553 | 554 | def _does_match(self, match_condition: MatchCondition, key: Tuple[str, ...]) -> bool: 555 | """Check if a namespace key matches a match condition. 556 | 557 | Args: 558 | match_condition (MatchCondition): Match condition to check 559 | key (Tuple[str, ...]): Namespace key to check 560 | 561 | Returns: 562 | bool: Whether the key matches the condition 563 | """ 564 | match_type = match_condition.match_type 565 | path = match_condition.path 566 | 567 | if len(key) < len(path): 568 | return False 569 | 570 | if match_type == "prefix": 571 | for k_elem, p_elem in zip(key, path): 572 | if p_elem == "*": 573 | continue 574 | if k_elem != p_elem: 575 | return False 576 | return True 577 | elif match_type == "suffix": 578 | for k_elem, p_elem in zip(reversed(key), reversed(path)): 579 | if p_elem == "*": 580 | continue 581 | if k_elem != p_elem: 582 | return False 583 | return True 584 | else: 585 | raise ValueError(f"Unsupported match type: {match_type}") 586 | 587 | def _cosine_similarity(self, X: List[float], Y: List[List[float]]) -> List[float]: 588 | """Compute cosine similarity between a vector X and a matrix Y. 589 | 590 | Args: 591 | X (List[float]): Query vector 592 | Y (List[List[float]]): Matrix of vectors to compare against 593 | 594 | Returns: 595 | List[float]: Cosine similarities 596 | """ 597 | if not Y: 598 | return [] 599 | 600 | try: 601 | import numpy as np 602 | X_arr = np.array(X) if not isinstance(X, np.ndarray) else X 603 | Y_arr = np.array(Y) if not isinstance(Y, np.ndarray) else Y 604 | X_norm = np.linalg.norm(X_arr) 605 | Y_norm = np.linalg.norm(Y_arr, axis=1) 606 | 607 | mask = Y_norm != 0 608 | similarities = np.zeros_like(Y_norm) 609 | similarities[mask] = np.dot(Y_arr[mask], X_arr) / (Y_norm[mask] * X_norm) 610 | return similarities.tolist() 611 | except ImportError: 612 | logger.warning( 613 | "NumPy not found. Using pure Python implementation for vector operations. " 614 | "This may significantly impact performance. Consider installing NumPy: " 615 | "pip install numpy" 616 | ) 617 | similarities = [] 618 | for y in Y: 619 | dot_product = sum(a * b for a, b in zip(X, y)) 620 | norm1 = sum(a * a for a in X) ** 0.5 621 | norm2 = sum(a * a for a in y) ** 0.5 622 | similarity = ( 623 | dot_product / (norm1 * norm2) if norm1 > 0 and norm2 > 0 else 0.0 624 | ) 625 | similarities.append(similarity) 626 | return similarities -------------------------------------------------------------------------------- /src/mcp_client_cli/output.py: -------------------------------------------------------------------------------- 1 | import json 2 | from langchain_core.messages import BaseMessage, AIMessage, AIMessageChunk, ToolMessage 3 | from rich.console import Console, ConsoleDimensions 4 | from rich.live import Live 5 | from rich.markdown import Markdown 6 | from rich.prompt import Confirm 7 | 8 | class OutputHandler: 9 | def __init__(self, text_only: bool = False, only_last_message: bool = False): 10 | self.console = Console() 11 | self.text_only = text_only 12 | self.only_last_message = only_last_message 13 | self.last_message = "" 14 | if self.text_only: 15 | self.md = "" 16 | else: 17 | self.md = "Thinking...\n" 18 | self._live = None 19 | 20 | def start(self): 21 | if not self.text_only: 22 | self._live = Live( 23 | Markdown(self.md), 24 | vertical_overflow="visible", 25 | screen=True, 26 | console=self.console 27 | ) 28 | self._live.start() 29 | 30 | def update(self, chunk: any): 31 | self.md = self._parse_chunk(chunk, self.md) 32 | if(self.only_last_message and self.text_only): 33 | # when only_last_message, we print in finish() 34 | return 35 | if self.text_only: 36 | self.console.print(self._parse_chunk(chunk), end="") 37 | else: 38 | if self.md.startswith("Thinking...") and not self.md.strip("Thinking...").isspace(): 39 | self.md = self.md.strip("Thinking...").strip() 40 | partial_md = self._truncate_md_to_fit(self.md, self.console.size) 41 | self._live.update(Markdown(partial_md), refresh=True) 42 | 43 | def update_error(self, error: Exception): 44 | import traceback 45 | error = f"Error: {error}\n\nStack trace:\n```\n{traceback.format_exc()}```" 46 | self.md += error 47 | if(self.only_last_message): 48 | self.console.print(error) 49 | return 50 | if self.text_only: 51 | self.console.print_exception() 52 | else: 53 | partial_md = self._truncate_md_to_fit(self.md, self.console.size) 54 | self._live.update(Markdown(partial_md), refresh=True) 55 | 56 | def stop(self): 57 | if not self.text_only and self._live: 58 | self._live.stop() 59 | 60 | def confirm_tool_call(self, config: dict, chunk: any) -> bool: 61 | if not self._is_tool_call_requested(chunk, config): 62 | return True 63 | 64 | self.stop() 65 | is_confirmed = self._ask_tool_call_confirmation() 66 | if not is_confirmed: 67 | self.md += "# Tool call denied" 68 | return False 69 | 70 | if not self.text_only: 71 | self.start() 72 | return True 73 | 74 | def finish(self): 75 | self.stop() 76 | to_print = self.last_message if self.only_last_message else Markdown(self.md) 77 | if not self.text_only and not self.only_last_message: 78 | self.console.clear() 79 | self.console.print(Markdown(self.md)) 80 | if self.only_last_message: 81 | self.console.print(to_print) 82 | 83 | def _parse_chunk(self, chunk: any, md: str = "") -> str: 84 | """ 85 | Parse the chunk of agent response. 86 | It will stream the response as it is received. 87 | """ 88 | # If this is a message chunk 89 | if isinstance(chunk, tuple) and chunk[0] == "messages": 90 | message_chunk = chunk[1][0] # Get the message content 91 | if isinstance(message_chunk, AIMessageChunk): 92 | content = message_chunk.content 93 | self.last_message += content 94 | if isinstance(content, str): 95 | md += content 96 | elif isinstance(content, list) and len(content) > 0 and isinstance(content[0], dict) and "text" in content[0]: 97 | md += content[0]["text"] 98 | # If this is a final value 99 | elif isinstance(chunk, dict) and "messages" in chunk: 100 | # Print a newline after the complete message 101 | md += "\n" 102 | self.last_message = "" 103 | elif isinstance(chunk, tuple) and chunk[0] == "values": 104 | message: BaseMessage = chunk[1]['messages'][-1] 105 | if isinstance(message, AIMessage) and message.tool_calls: 106 | md += "\n\n### Tool Calls:" 107 | for tc in message.tool_calls: 108 | lines = [ 109 | f" {tc.get('name', 'Tool')}", 110 | ] 111 | if tc.get("error"): 112 | lines.append(f"```") 113 | lines.append(f"Error: {tc.get('error')}") 114 | lines.append("```") 115 | 116 | lines.append("Args:") 117 | lines.append("```") 118 | args = tc.get("args") 119 | if isinstance(args, str): 120 | lines.append(f"{args}") 121 | elif isinstance(args, dict): 122 | for arg, value in args.items(): 123 | lines.append(f"{arg}: {value}") 124 | lines.append("```\n") 125 | md += "\n".join(lines) 126 | self.last_message = "" 127 | elif isinstance(message, ToolMessage) and message.status != "success": 128 | md += "Failed call with error:" 129 | md += f"\n\n{message.content}" 130 | md += "\n" 131 | return md 132 | 133 | def _truncate_md_to_fit(self, md: str, dimensions: ConsoleDimensions) -> str: 134 | """ 135 | Truncate the markdown to fit the console size, with few line safety margin. 136 | """ 137 | lines = md.splitlines() 138 | max_lines = dimensions.height - 3 # Safety margin 139 | fitted_lines = [] 140 | current_height = 0 141 | code_block_count = 0 142 | 143 | for line in reversed(lines): 144 | # Calculate wrapped line height, rounding up for safety 145 | line_height = 1 + len(line) // dimensions.width 146 | 147 | if current_height + line_height > max_lines: 148 | # If we're breaking in the middle of code blocks, add closing ``` 149 | if code_block_count % 2 == 1: 150 | fitted_lines.insert(0, "```") 151 | break 152 | 153 | fitted_lines.insert(0, line) 154 | current_height += line_height 155 | 156 | # Track code block markers 157 | if line.strip() == "```": 158 | code_block_count += 1 159 | 160 | return '\n'.join(fitted_lines) if fitted_lines else '' 161 | 162 | def _is_tool_call_requested(self, chunk: any, config: dict) -> bool: 163 | """ 164 | Check if the chunk contains a tool call request and requires confirmation. 165 | """ 166 | if isinstance(chunk, tuple) and chunk[0] == "values": 167 | if len(chunk) > 1 and isinstance(chunk[1], dict) and "messages" in chunk[1]: 168 | message = chunk[1]['messages'][-1] 169 | if isinstance(message, AIMessage) and message.tool_calls: 170 | for tc in message.tool_calls: 171 | if tc.get("name") in config["tools_requires_confirmation"]: 172 | return True 173 | return False 174 | 175 | def _ask_tool_call_confirmation(self) -> bool: 176 | """ 177 | Ask the user for confirmation to run a tool call. 178 | """ 179 | self.console.set_alt_screen(True) 180 | self.console.print(Markdown(self.md)) 181 | self.console.print(f"\n\n") 182 | is_tool_call_confirmed = Confirm.ask(f"Confirm tool call?", console=self.console) 183 | self.console.set_alt_screen(False) 184 | if not is_tool_call_confirmed: 185 | return False 186 | return True -------------------------------------------------------------------------------- /src/mcp_client_cli/prompt.py: -------------------------------------------------------------------------------- 1 | prompt_templates = { 2 | "review": "You are an expert software engineer with a good taste on how a code should be. Assume that in current working directory, you are in a git repository. Get the current git status and diff. Review the change and provide feedback.", 3 | "commit": "You are an expert software engineer. Assume that in current working directory, you are in a git repository. Get the current git status and diff. Reason why the change was made. Then commit with a concise, descriptive message that follows Conventional Commits specification.", 4 | "yt": "Retell and summarize the video in a concise and descriptive manner. Use bullet points and markdown formatting. The url is {url}", 5 | } -------------------------------------------------------------------------------- /src/mcp_client_cli/storage.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from typing import Optional, List 3 | from mcp import StdioServerParameters, types 4 | import json 5 | import aiosqlite 6 | import uuid 7 | 8 | from .const import * 9 | 10 | def get_cached_tools(server_param: StdioServerParameters) -> Optional[List[types.Tool]]: 11 | """Retrieve cached tools if available and not expired. 12 | 13 | Args: 14 | server_param (StdioServerParameters): The server parameters to identify the cache. 15 | 16 | Returns: 17 | Optional[List[types.Tool]]: A list of tools if cache is available and not expired, otherwise None. 18 | """ 19 | CACHE_DIR.mkdir(parents=True, exist_ok=True) 20 | cache_key = f"{server_param.command}-{'-'.join(server_param.args)}".replace("/", "-") 21 | cache_file = CACHE_DIR / f"{cache_key}.json" 22 | 23 | if not cache_file.exists(): 24 | return None 25 | 26 | cache_data = json.loads(cache_file.read_text()) 27 | cached_time = datetime.fromisoformat(cache_data["cached_at"]) 28 | 29 | if datetime.now() - cached_time > timedelta(hours=CACHE_EXPIRY_HOURS): 30 | return None 31 | 32 | return [types.Tool(**tool) for tool in cache_data["tools"]] 33 | 34 | 35 | def save_tools_cache(server_param: StdioServerParameters, tools: List[types.Tool]) -> None: 36 | """Save tools to cache. 37 | 38 | Args: 39 | server_param (StdioServerParameters): The server parameters to identify the cache. 40 | tools (List[types.Tool]): The list of tools to be cached. 41 | """ 42 | cache_key = f"{server_param.command}-{'-'.join(server_param.args)}".replace("/", "-") 43 | cache_file = CACHE_DIR / f"{cache_key}.json" 44 | 45 | cache_data = { 46 | "cached_at": datetime.now().isoformat(), 47 | "tools": [tool.model_dump() for tool in tools] 48 | } 49 | cache_file.write_text(json.dumps(cache_data)) 50 | 51 | 52 | class ConversationManager: 53 | """Manages conversation persistence in SQLite database.""" 54 | 55 | def __init__(self, db_path: Path): 56 | self.db_path = db_path 57 | self.db_path.parent.mkdir(parents=True, exist_ok=True) 58 | 59 | async def _init_db(self, db) -> None: 60 | """Initialize database schema. 61 | 62 | Args: 63 | db: The database connection object. 64 | """ 65 | await db.execute(""" 66 | CREATE TABLE IF NOT EXISTS last_conversation ( 67 | id INTEGER PRIMARY KEY, 68 | thread_id TEXT NOT NULL 69 | ) 70 | """) 71 | await db.commit() 72 | 73 | async def get_last_id(self) -> str: 74 | """Get the thread ID of the last conversation. 75 | 76 | Returns: 77 | str: The thread ID of the last conversation, or a new UUID if no conversation exists. 78 | """ 79 | async with aiosqlite.connect(self.db_path) as db: 80 | await self._init_db(db) 81 | async with db.execute("SELECT thread_id FROM last_conversation LIMIT 1") as cursor: 82 | row = await cursor.fetchone() 83 | return row[0] if row else uuid.uuid4().hex 84 | 85 | async def save_id(self, thread_id: str, db = None) -> None: 86 | """Save thread ID as the last conversation. 87 | 88 | Args: 89 | thread_id (str): The thread ID to save. 90 | db: The database connection object (optional). 91 | """ 92 | if db is None: 93 | async with aiosqlite.connect(self.db_path) as db: 94 | await self._save_id(db, thread_id) 95 | else: 96 | await self._save_id(db, thread_id) 97 | 98 | async def _save_id(self, db, thread_id: str) -> None: 99 | """Internal method to save thread ID. 100 | 101 | Args: 102 | db: The database connection object. 103 | thread_id (str): The thread ID to save. 104 | """ 105 | async with db.cursor() as cursor: 106 | await self._init_db(db) 107 | await cursor.execute("DELETE FROM last_conversation") 108 | await cursor.execute( 109 | "INSERT INTO last_conversation (thread_id) VALUES (?)", 110 | (thread_id,) 111 | ) 112 | await db.commit() -------------------------------------------------------------------------------- /src/mcp_client_cli/tool.py: -------------------------------------------------------------------------------- 1 | from typing import List, Type, Optional, Any, override 2 | from pydantic import BaseModel 3 | from langchain_core.tools import BaseTool, BaseToolkit, ToolException 4 | from mcp import StdioServerParameters, types, ClientSession 5 | from mcp.client.stdio import stdio_client 6 | import pydantic 7 | from pydantic_core import to_json 8 | from jsonschema_pydantic import jsonschema_to_pydantic 9 | import asyncio 10 | 11 | from .storage import * 12 | 13 | class McpServerConfig(BaseModel): 14 | """Configuration for an MCP server. 15 | 16 | This class represents the configuration needed to connect to and identify an MCP server, 17 | containing both the server's name and its connection parameters. 18 | 19 | Attributes: 20 | server_name (str): The name identifier for this MCP server 21 | server_param (StdioServerParameters): Connection parameters for the server, including 22 | command, arguments and environment variables 23 | exclude_tools (list[str]): List of tool names to exclude from this server 24 | """ 25 | 26 | server_name: str 27 | server_param: StdioServerParameters 28 | exclude_tools: list[str] = [] 29 | 30 | class McpToolkit(BaseToolkit): 31 | name: str 32 | server_param: StdioServerParameters 33 | exclude_tools: list[str] = [] 34 | _session: Optional[ClientSession] = None 35 | _tools: List[BaseTool] = [] 36 | _client = None 37 | _init_lock: asyncio.Lock = None 38 | 39 | model_config = pydantic.ConfigDict(arbitrary_types_allowed=True) 40 | 41 | def __init__(self, **data): 42 | super().__init__(**data) 43 | self._init_lock = asyncio.Lock() 44 | 45 | async def _start_session(self): 46 | async with self._init_lock: 47 | if self._session: 48 | return self._session 49 | 50 | self._client = stdio_client(self.server_param) 51 | read, write = await self._client.__aenter__() 52 | self._session = ClientSession(read, write) 53 | await self._session.__aenter__() 54 | await self._session.initialize() 55 | return self._session 56 | 57 | async def initialize(self, force_refresh: bool = False): 58 | if self._tools and not force_refresh: 59 | return 60 | 61 | cached_tools = get_cached_tools(self.server_param) 62 | if cached_tools and not force_refresh: 63 | for tool in cached_tools: 64 | if tool.name in self.exclude_tools: 65 | continue 66 | self._tools.append(create_langchain_tool(tool, self._session, self)) 67 | return 68 | 69 | try: 70 | await self._start_session() 71 | tools: types.ListToolsResult = await self._session.list_tools() 72 | save_tools_cache(self.server_param, tools.tools) 73 | for tool in tools.tools: 74 | if tool.name in self.exclude_tools: 75 | continue 76 | self._tools.append(create_langchain_tool(tool, self._session, self)) 77 | except Exception as e: 78 | print(f"Error gathering tools for {self.server_param.command} {' '.join(self.server_param.args)}: {e}") 79 | raise e 80 | 81 | async def close(self): 82 | try: 83 | if self._session: 84 | try: 85 | # Add timeout to prevent hanging 86 | async with asyncio.timeout(2.0): 87 | # Create a new task to handle cleanup in the correct context 88 | await asyncio.create_task(self._session.__aexit__(None, None, None)) 89 | except asyncio.TimeoutError: 90 | pass 91 | except Exception as e: 92 | pass 93 | finally: 94 | try: 95 | if self._client: 96 | try: 97 | # Add timeout to prevent hanging 98 | async with asyncio.timeout(2.0): 99 | # Create a new task to handle cleanup in the correct context 100 | await asyncio.create_task(self._client.__aexit__(None, None, None)) 101 | except asyncio.TimeoutError: 102 | pass 103 | except Exception as e: 104 | pass 105 | except: 106 | pass 107 | 108 | def get_tools(self) -> List[BaseTool]: 109 | return self._tools 110 | 111 | 112 | class McpTool(BaseTool): 113 | toolkit_name: str 114 | name: str 115 | description: str 116 | args_schema: Type[BaseModel] 117 | session: Optional[ClientSession] 118 | toolkit: McpToolkit 119 | 120 | handle_tool_error: bool = True 121 | 122 | def _run(self, **kwargs): 123 | raise NotImplementedError("Only async operations are supported") 124 | 125 | async def _arun(self, **kwargs): 126 | if not self.session: 127 | self.session = await self.toolkit._start_session() 128 | 129 | result = await self.session.call_tool(self.name, arguments=kwargs) 130 | content = to_json(result.content).decode() 131 | if result.isError: 132 | raise ToolException(content) 133 | return content 134 | 135 | def create_langchain_tool( 136 | tool_schema: types.Tool, 137 | session: ClientSession, 138 | toolkit: McpToolkit, 139 | ) -> BaseTool: 140 | """Create a LangChain tool from MCP tool schema. 141 | 142 | Args: 143 | tool_schema (types.Tool): The MCP tool schema. 144 | session (ClientSession): The session for the tool. 145 | 146 | Returns: 147 | BaseTool: The created LangChain tool. 148 | """ 149 | return McpTool( 150 | name=tool_schema.name, 151 | description=tool_schema.description, 152 | args_schema=jsonschema_to_pydantic(tool_schema.inputSchema), 153 | session=session, 154 | toolkit=toolkit, 155 | toolkit_name=toolkit.name, 156 | ) 157 | 158 | 159 | async def convert_mcp_to_langchain_tools(server_config: McpServerConfig, force_refresh: bool = False) -> McpToolkit: 160 | """Convert MCP tools to LangChain tools and create a toolkit. 161 | 162 | Args: 163 | server_config (McpServerConfig): Configuration for the MCP server including name and parameters. 164 | force_refresh (bool, optional): Whether to force refresh the tools cache. Defaults to False. 165 | 166 | Returns: 167 | McpToolkit: A toolkit containing the converted LangChain tools. 168 | """ 169 | toolkit = McpToolkit( 170 | name=server_config.server_name, 171 | server_param=server_config.server_param, 172 | exclude_tools=server_config.exclude_tools 173 | ) 174 | await toolkit.initialize(force_refresh=force_refresh) 175 | return toolkit 176 | --------------------------------------------------------------------------------