├── src
└── llm_tools_mcp
│ ├── __init__.py
│ ├── defaults.py
│ ├── register_tools.py
│ ├── mcp_config.py
│ └── mcp_client.py
├── mypy.ini
├── test.sh
├── .gitignore
├── pytest.ini
├── e2e
├── Dockerfile
├── e2e-docker.sh
└── e2e.sh
├── example-mcp.json
├── .github
├── dependabot.yml
└── workflows
│ ├── lint.yml
│ ├── test.yml
│ └── publish.yml
├── pyproject.toml
├── check.sh
├── tests
├── test_integration.py
├── test_config_validation.py
└── test_llm_tools_mcp.py
├── README.md
└── LICENSE
/src/llm_tools_mcp/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | exclude = ^(.venv|build)/
3 |
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -uo pipefail
4 |
5 | echo "--------------------------------"
6 | echo "Running tests..."
7 | uv run --extra test pytest
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | venv
6 | .eggs
7 | .pytest_cache
8 | *.egg-info
9 | .DS_Store
10 | .vscode
11 | dist
12 | build
13 |
--------------------------------------------------------------------------------
/src/llm_tools_mcp/defaults.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | DEFAULT_CONFIG_DIR = os.environ.get("LLM_TOOLS_MCP_CONFIG_DIR", "~/.llm-tools-mcp")
5 | DEFAULT_MCP_JSON_PATH = os.path.join(DEFAULT_CONFIG_DIR, "mcp.json")
6 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | asyncio_default_fixture_loop_scope = session
3 | asyncio_default_test_loop_scope = session
4 | asyncio_mode = auto
5 |
6 | markers =
7 | online: mark test as online (requires internet)
8 |
9 | # dont run online tests by default
10 | addopts = -m "not online"
11 |
--------------------------------------------------------------------------------
/e2e/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1.5
2 |
3 | FROM ghcr.io/astral-sh/uv:0.7.13 as uv-base
4 |
5 | FROM node:24-slim
6 |
7 | RUN apt-get update && apt-get install -y python3
8 |
9 | COPY --from=uv-base /uv /usr/local/bin/uv
10 |
11 | # - uv tools are installed in /root/.local/bin
12 | ENV PATH="/usr/local/bin:/root/.local/bin:$PATH"
13 |
14 | WORKDIR /workspace
15 |
16 | CMD ["bash"]
17 |
--------------------------------------------------------------------------------
/example-mcp.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "filesystem": {
4 | "command": "npx",
5 | "args": [
6 | "-y",
7 | "@modelcontextprotocol/server-filesystem",
8 | "~/llm-tools-mcp-demo"
9 | ]
10 | },
11 | "fetch": {
12 | "type": "http",
13 | "url": "https://remote.mcpservers.org/fetch/mcp"
14 | },
15 | "deepwiki": {
16 | "type": "sse",
17 | "url": "https://mcp.deepwiki.com/sse"
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "uv"
9 | directory: "/"
10 | schedule:
11 | interval: "daily"
12 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | merge_group:
9 |
10 | permissions:
11 | contents: read
12 |
13 | jobs:
14 | lint:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v4
18 | - name: Set up Python 3.13
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: "3.13"
22 | - name: Install uv
23 | uses: astral-sh/setup-uv@v4
24 | - name: Run lint
25 | run: |
26 | uv sync --all-extras
27 | ./check.sh
28 |
--------------------------------------------------------------------------------
/e2e/e2e-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
5 |
6 | docker build -t llm-e2e:latest "$SCRIPT_DIR"
7 |
8 | if [[ "${1-}" == "bash" || "${1-}" == "shell" ]]; then
9 | echo "Running interactive bash shell..."
10 | docker run --rm -it \
11 | -v "$SCRIPT_DIR/..:/workspace" \
12 | -w /workspace \
13 | llm-e2e:latest bash
14 | else
15 | echo "Running e2e script..."
16 | docker run --rm \
17 | -v "$SCRIPT_DIR/..:/workspace" \
18 | -w /workspace \
19 | llm-e2e:latest bash /workspace/e2e/e2e.sh
20 | fi
21 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "llm-tools-mcp"
3 | version = "0.4"
4 | description = "MCP support for LLM CLI"
5 | readme = "README.md"
6 | authors = [{name = "Michal Fudala"}]
7 | license = "Apache-2.0"
8 | classifiers = []
9 | requires-python = ">=3.10"
10 | dependencies = [
11 | "llm>=0.27.1",
12 | "mcp[cli]>=1.9.1",
13 | "requests>=2.32.3",
14 | ]
15 |
16 | [build-system]
17 | requires = ["setuptools"]
18 | build-backend = "setuptools.build_meta"
19 |
20 | [dependency-groups]
21 | dev = [
22 | "mypy>=1.15.0",
23 | "ruff>=0.11.11",
24 | ]
25 |
26 | [project.urls]
27 | Homepage = "https://github.com/VirtusLab/llm-tools-mcp"
28 | Changelog = "https://github.com/VirtusLab/llm-tools-mcp/releases"
29 | Issues = "https://github.com/VirtusLab/llm-tools-mcp/issues"
30 | CI = "https://github.com/VirtusLab/llm-tools-mcp/actions"
31 |
32 | [project.entry-points.llm]
33 | mcp = "llm_tools_mcp.register_tools"
34 |
35 | [project.optional-dependencies]
36 | test = [
37 | "pytest",
38 | "pytest-asyncio>=1.0.0",
39 | "pytest-watch>=4.2.0",
40 | ]
41 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | merge_group:
9 |
10 | permissions:
11 | contents: read
12 |
13 | jobs:
14 | test:
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | # python 3.9 is not supported by MCP Python SDK
19 | python-version: ["3.10", "3.11", "3.12", "3.13"]
20 | steps:
21 | - uses: actions/checkout@v4
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 | cache: pip
27 | cache-dependency-path: pyproject.toml
28 | - name: Install uv
29 | uses: astral-sh/setup-uv@v4
30 | - name: Install dependencies
31 | run: |
32 | pip install -e '.[test]'
33 | - name: Sync dependencies
34 | run: |
35 | uv sync --all-extras
36 | - name: Check (lint)
37 | run: |
38 | ./check.sh
39 | - name: Run tests
40 | run: |
41 | python -m pytest
42 | - name: Run online tests
43 | run: |
44 | python -m pytest -m "online"
45 | - name: Run e2e tests
46 | run: |
47 | ./e2e/e2e.sh
48 |
--------------------------------------------------------------------------------
/check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -uo pipefail
4 |
5 | # Parse command line arguments
6 | ALSO_TEST=false
7 | while [[ $# -gt 0 ]]; do
8 | case $1 in
9 | --also-test)
10 | ALSO_TEST=true
11 | shift
12 | ;;
13 | *)
14 | echo "Unknown option: $1"
15 | echo "Usage: $0 [--also-test]"
16 | exit 1
17 | ;;
18 | esac
19 | done
20 |
21 | echo "--------------------------------"
22 | echo "Running mypy..."
23 | uv run mypy .
24 | mypy_exit=$?
25 |
26 | echo "--------------------------------"
27 | echo "Running ruff check..."
28 | uv run ruff check .
29 | ruff_check_exit=$?
30 |
31 | echo "--------------------------------"
32 | echo "Running ruff format..."
33 | uv run ruff format --check .
34 | ruff_format_exit=$?
35 |
36 | # Initialize exit code with current checks
37 | exit_code=$((mypy_exit + ruff_check_exit + ruff_format_exit))
38 |
39 | # Run tests if --also-test flag is provided
40 | if [ "$ALSO_TEST" = true ]; then
41 | echo "--------------------------------"
42 | echo "Running tests..."
43 | uv run pytest
44 | test_exit=$?
45 | exit_code=$((exit_code + test_exit))
46 | fi
47 |
48 | # Exit with non-zero if any command failed
49 | if [ $exit_code -ne 0 ]; then
50 | echo "One or more checks failed!"
51 | else
52 | echo "All checks passed!"
53 | fi
54 |
55 | echo "Done!"
56 |
57 | exit $exit_code
58 |
--------------------------------------------------------------------------------
/src/llm_tools_mcp/register_tools.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import llm
3 | import mcp
4 |
5 |
6 | from llm_tools_mcp.defaults import DEFAULT_MCP_JSON_PATH
7 | from llm_tools_mcp.mcp_config import McpConfig
8 | from llm_tools_mcp.mcp_client import McpClient
9 |
10 |
11 | def _create_tool_for_mcp(
12 | server_name: str, mcp_client: McpClient, mcp_tool: mcp.Tool
13 | ) -> llm.Tool:
14 | def impl(**kwargs):
15 | return asyncio.run(mcp_client.call_tool(server_name, mcp_tool.name, **kwargs))
16 |
17 | enriched_description = mcp_tool.description or ""
18 | enriched_description += f"\n[from MCP server: {server_name}]"
19 |
20 | return llm.Tool(
21 | name=mcp_tool.name,
22 | description=enriched_description,
23 | input_schema=mcp_tool.inputSchema,
24 | plugin="llm-tools-mcp",
25 | implementation=impl,
26 | )
27 |
28 |
29 | def _get_tools_for_llm(mcp_client: McpClient) -> list[llm.Tool]:
30 | tools = asyncio.run(mcp_client.get_all_tools())
31 | mapped_tools: list[llm.Tool] = []
32 | for server_name, server_tools in tools.items():
33 | for tool in server_tools:
34 | mapped_tools.append(_create_tool_for_mcp(server_name, mcp_client, tool))
35 | return mapped_tools
36 |
37 |
38 | class MCP(llm.Toolbox):
39 | def __init__(self, config_path: str = DEFAULT_MCP_JSON_PATH):
40 | mcp_config = McpConfig.for_file_path(config_path)
41 | mcp_client = McpClient(mcp_config)
42 | computed_tools = _get_tools_for_llm(mcp_client)
43 |
44 | for tool in computed_tools:
45 | self.add_tool(tool, pass_self=True)
46 |
47 |
48 | @llm.hookimpl
49 | def register_tools(register):
50 | register(MCP)
51 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python Package
2 |
3 | on:
4 | release:
5 | types: [created]
6 |
7 | permissions:
8 | contents: read
9 |
10 | jobs:
11 | test:
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | # python 3.9 is not supported by MCP Python SDK
16 | python-version: ["3.10", "3.11", "3.12", "3.13"]
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Set up Python ${{ matrix.python-version }}
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 | cache: pip
24 | cache-dependency-path: pyproject.toml
25 | - name: Install uv
26 | uses: astral-sh/setup-uv@v4
27 | - name: Sync dependencies
28 | run: |
29 | uv sync --all-extras
30 | - name: Check (lint)
31 | run: |
32 | ./check.sh
33 | - name: Run tests
34 | run: |
35 | uv run pytest
36 | - name: Run online tests
37 | run: |
38 | uv run pytest -m "online"
39 | - name: Run e2e tests
40 | run: |
41 | ./e2e/e2e.sh
42 | deploy:
43 | runs-on: ubuntu-latest
44 | needs: [test]
45 | environment: release
46 | permissions:
47 | id-token: write
48 | steps:
49 | - uses: actions/checkout@v4
50 | - name: Set up Python
51 | uses: actions/setup-python@v5
52 | with:
53 | python-version: "3.13"
54 | cache: pip
55 | cache-dependency-path: pyproject.toml
56 | - name: Install dependencies
57 | run: |
58 | pip install setuptools wheel build
59 | - name: Build
60 | run: |
61 | python -m build
62 | - name: Publish
63 | uses: pypa/gh-action-pypi-publish@release/v1
64 |
--------------------------------------------------------------------------------
/tests/test_integration.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 | from llm_tools_mcp.mcp_client import McpClient
4 | from llm_tools_mcp.mcp_config import McpConfig
5 |
6 |
7 | @pytest.mark.asyncio
8 | @pytest.mark.online
9 | async def test_sse_deepwiki_mcp():
10 | """Test SSE connection to deepwiki-mcp service with GitHub-like schema."""
11 | mcp_config_content = json.dumps(
12 | {
13 | "mcpServers": {
14 | "deepwiki": {"type": "sse", "url": "https://mcp.deepwiki.com/sse"}
15 | }
16 | }
17 | )
18 |
19 | mcp_config_obj = McpConfig.for_json_content(mcp_config_content)
20 | mcp_client = McpClient(mcp_config_obj)
21 |
22 | tools = await mcp_client.get_all_tools()
23 |
24 | assert "deepwiki" in tools, "Should have deepwiki server"
25 |
26 | tools = tools.get("deepwiki", [])
27 |
28 | result = await mcp_client.call_tool(
29 | "deepwiki", tools[0].name, repoName="facebook/react"
30 | )
31 | assert result is not None, "Tool call should return a result"
32 | assert "react" in str(result).lower(), "Available pages for facebook/react"
33 |
34 |
35 | @pytest.mark.asyncio
36 | @pytest.mark.online
37 | async def test_remote_fetch_mcp():
38 | """Test remote MCP connection to fetch-mcp service for web content fetching."""
39 | mcp_config_content = json.dumps(
40 | {
41 | "mcpServers": {
42 | "fetch": {
43 | "type": "http",
44 | "url": "https://remote.mcpservers.org/fetch/mcp",
45 | }
46 | }
47 | }
48 | )
49 |
50 | mcp_config_obj = McpConfig.for_json_content(mcp_config_content)
51 | mcp_client = McpClient(mcp_config_obj)
52 |
53 | tools = await mcp_client.get_all_tools()
54 |
55 | assert "fetch" in tools, "Should have fetch server"
56 |
57 | tools = tools.get("fetch", [])
58 | tool_names = [tool.name for tool in tools]
59 |
60 | assert "fetch" in tool_names, (
61 | f"Should have a fetching tool. Found tools: {tool_names}"
62 | )
63 |
--------------------------------------------------------------------------------
/e2e/e2e.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
6 |
7 | cleanup() {
8 | if [ -n "${TEMP_DIR:-}" ] && [ -d "$TEMP_DIR" ]; then
9 | rm -rf "$TEMP_DIR"
10 | fi
11 | if [ -n "${MCP_CONFIG_DIR:-}" ] && [ -d "$MCP_CONFIG_DIR" ]; then
12 | rm -rf "$MCP_CONFIG_DIR"
13 | fi
14 | }
15 |
16 | trap cleanup EXIT
17 |
18 | TEMP_DIR=$(mktemp -d ./e2e-testtmp.XXXXXX)
19 | TEST_DIR="$TEMP_DIR/demo_files"
20 | MCP_CONFIG_DIR="$TEMP_DIR/.llm-tools-mcp"
21 |
22 | mkdir -p "$TEST_DIR"
23 | echo "Hello, world!" > "$TEST_DIR/hello.txt"
24 |
25 | mkdir -p "$MCP_CONFIG_DIR"
26 |
27 | cat > "$MCP_CONFIG_DIR/mcp.json" << EOF
28 | {
29 | "mcpServers": {
30 | "filesystem": {
31 | "command": "npx",
32 | "args": [
33 | "-y",
34 | "@modelcontextprotocol/server-filesystem@2025.3.28",
35 | "$TEST_DIR"
36 | ]
37 | }
38 | }
39 | }
40 | EOF
41 |
42 | export LLM_TOOLS_MCP_CONFIG_DIR="$MCP_CONFIG_DIR"
43 | echo "Using config in $LLM_TOOLS_MCP_CONFIG_DIR"
44 |
45 | echo "Installing LLM and plugins..."
46 | if ! command -v uv &> /dev/null; then
47 | echo "⚠️ ERROR: uv is not installed. Please install uv first."
48 | exit 1
49 | fi
50 |
51 | uv tool install llm
52 | llm install $SCRIPT_DIR/..
53 | llm install llm-echo==0.3a3
54 |
55 |
56 | TOOLS_OUTPUT=$(llm tools list 2>&1 || true)
57 | if echo "$TOOLS_OUTPUT" | grep -q "read_file"; then
58 | echo "MCP tools are available"
59 | else
60 | echo "🛑 ERROR: MCP tools are not available"
61 | fi
62 |
63 | SIMPLE_TEST='{"tool_calls": [{"name": "list_directory", "arguments": {"path": "'$TEST_DIR'"}}], "prompt": "List files in current directory"}'
64 | SIMPLE_OUTPUT=$(llm -m echo -T MCP "$SIMPLE_TEST" 2>&1)
65 | if [ $? -ne 0 ]; then
66 | echo "🛑 ERROR: Simple MCP test failed: $SIMPLE_OUTPUT"
67 | exit 1
68 | fi
69 |
70 |
71 | if ! echo "$SIMPLE_OUTPUT" | grep -q "hello.txt"; then
72 | echo "🛑 ERROR: Simple test output doesn't contain expected tool_calls: $SIMPLE_OUTPUT"
73 | exit 1
74 | fi
75 |
76 | TEST_PROMPT='{"tool_calls": [{"name": "read_file", "arguments": {"path": "'$TEST_DIR'/hello.txt"}}], "prompt": "Read the hello.txt file"}'
77 | MCP_ECHO_OUTPUT=$(llm -m echo -T MCP "$TEST_PROMPT" 2>&1)
78 | if [ $? -ne 0 ]; then
79 | echo "🛑 ERROR: MCP echo test failed: $MCP_ECHO_OUTPUT"
80 | exit 1
81 | fi
82 | if ! echo "$MCP_ECHO_OUTPUT" | grep -q "Hello, world!"; then
83 | echo "🛑 ERROR: Echo output doesn't contain expected tool_calls: $MCP_ECHO_OUTPUT"
84 | exit 1
85 | fi
86 |
87 |
88 | echo "✅ End-to-end test completed successfully!"
89 |
--------------------------------------------------------------------------------
/src/llm_tools_mcp/mcp_config.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 | from pydantic import BaseModel, Discriminator, Field, Tag
3 | from llm_tools_mcp.defaults import DEFAULT_CONFIG_DIR
4 | from llm_tools_mcp.defaults import DEFAULT_MCP_JSON_PATH
5 |
6 |
7 | import json
8 | from pathlib import Path
9 |
10 |
11 | def _get_discriminator_value(v: dict) -> str:
12 | if "type" in v:
13 | type_value = v["type"]
14 | if isinstance(type_value, str):
15 | allowed_types = ["stdio", "sse", "http"]
16 | if type_value in allowed_types:
17 | return type_value
18 | else:
19 | raise ValueError(
20 | f"Unknown server 'type'. Provided 'type': {type_value}. Allowed types: {allowed_types}"
21 | )
22 | else:
23 | raise ValueError(
24 | f"Server 'type' should be string. Provided 'type': {type_value}"
25 | )
26 |
27 | else:
28 | if "url" in v and "command" in v:
29 | raise ValueError(
30 | f"Only 'url' or 'command' is allowed, not both. Provided 'url': {v['url']}, provided 'command': {v['command']}"
31 | )
32 | elif "url" in v:
33 | # inference rules kinda like in FastMCP 2.x
34 | # https://gofastmcp.com/clients/transports#overview
35 | if "/sse" in v["url"]:
36 | return "sse"
37 | else:
38 | return "http"
39 | elif "command" in v:
40 | return "stdio"
41 | else:
42 | raise ValueError(
43 | "Could not deduce MCP server type. Provide 'url' or 'command'. You can explicitly specify the type with 'type' field."
44 | )
45 |
46 |
47 | class StdioServerConfig(BaseModel):
48 | command: str = Field()
49 | args: list[str] | None = Field(default=None)
50 | env: dict[str, str] | None = Field(default=None)
51 |
52 |
53 | class SseServerConfig(BaseModel):
54 | url: str = Field()
55 |
56 |
57 | class HttpServerConfig(BaseModel):
58 | url: str = Field()
59 |
60 |
61 | StdioOrSseServerConfig = Annotated[
62 | Annotated[StdioServerConfig, Tag("stdio")]
63 | | Annotated[HttpServerConfig, Tag("http")]
64 | | Annotated[SseServerConfig, Tag("sse")],
65 | Discriminator(_get_discriminator_value),
66 | ]
67 |
68 |
69 | class McpConfigType(BaseModel):
70 | mcpServers: dict[str, StdioOrSseServerConfig]
71 |
72 |
73 | class McpConfig:
74 | def __init__(
75 | self,
76 | config: McpConfigType,
77 | log_path: Path = Path(DEFAULT_CONFIG_DIR) / Path("logs"),
78 | ):
79 | self.config = config
80 | self.log_path = log_path.expanduser()
81 |
82 | @classmethod
83 | def for_file_path(cls, path: str = DEFAULT_MCP_JSON_PATH):
84 | config_file_path = Path(path).expanduser()
85 | with open(config_file_path) as config_file:
86 | return cls.for_json_content(config_file.read())
87 |
88 | @classmethod
89 | def for_json_content(cls, content: str):
90 | McpConfigType.model_validate_json(content)
91 | config = json.loads(content)
92 | config_validated: McpConfigType = McpConfigType(**config)
93 | return cls(config_validated)
94 |
95 | def with_log_path(self, log_path: Path):
96 | return McpConfig(self.config, log_path)
97 |
98 | def get(self) -> McpConfigType:
99 | return self.config
100 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # `llm-tools-mcp`
4 |
5 | 
6 |
7 | **Connect to MCP servers right from your shell. Plugin for [llm](https://github.com/simonw/llm) (by [@simonw](https://github.com/simonw)).**
8 |
9 |
10 | [](https://pypi.org/project/llm-tools-mcp/)
11 | [](https://github.com/VirtusLab/llm-tools-mcp/releases)
12 | [](https://github.com/VirtusLab/llm-tools-mcp/actions/workflows/test.yml)
13 | [](https://github.com/VirtusLab/llm-tools-mcp/blob/main/LICENSE)
14 |
15 |
16 |
17 |
18 |
19 |
20 | > [!Note]
21 | > Current focus: [Authorization #4](https://github.com/VirtusLab/llm-tools-mcp/issues/4)
22 |
23 |
24 | ## Installation
25 |
26 | Install this plugin in the same environment as [LLM](https://llm.datasette.io/):
27 |
28 | ```bash
29 | llm install llm-tools-mcp
30 | ```
31 | ## Usage
32 |
33 | > [!WARNING]
34 | > It's recommended to use the `--ta` flag and approve each tool execution.
35 |
36 | 1. Create `mcp.json` file in `~/.llm-tools-mcp`.
37 |
38 | Example file:
39 |
40 | ```json
41 | {
42 | "mcpServers": {
43 | "filesystem": {
44 | "command": "npx",
45 | "args": [
46 | "-y",
47 | "@modelcontextprotocol/server-filesystem",
48 | "~/demo"
49 | ]
50 | }
51 | }
52 | }
53 | ```
54 |
55 | 2. List available tools.
56 | > [!WARNING]
57 | > `llm tools` is not implemented for dynamic toolboxes ([see this](https://github.com/simonw/llm/issues/1111#issuecomment-2992280193)),
58 | > so tools loaded as part of MCP won't be visible.
59 |
60 | ```sh
61 | llm tools list
62 | ```
63 |
64 | 3. Run `llm` with tools.
65 |
66 | ```sh
67 | llm --ta -T MCP "what files are in the demo directory? show me contents of one of the files (any)"
68 | ```
69 |
70 | ### Other examples
71 |
72 | **Dynamically change your MCP config:**
73 |
74 | ```sh
75 | llm --ta -T 'MCP("/path/to/custom/mcp.json")' "your prompt here"
76 | ```
77 |
78 | ## Development
79 |
80 | ### Now (to be verified)
81 |
82 | - Sync dependencies: `uv sync --all-extras`
83 | - Run linters / type checker: `./check.sh`
84 | - Run tests: `./test.sh`
85 | - Run end to end tests: `./e2e/e2e-docker.sh`
86 |
87 | ## To Do
88 |
89 | - [x] Release alpha version
90 | - [x] support all transports
91 | - [x] streamable http
92 | - [x] sse
93 | - [x] stdio
94 | - [ ] Build a solid test suite
95 | - [x] test config file validation
96 | - [x] test sse with dummy server
97 | - [x] test stdio with dummy server
98 | - [x] test http streamable with dummy server ([see #1](https://github.com/Virtuslab/llm-tools-mcp/issues/1))
99 | - [x] manual test for sse with real server
100 | - [x] manual test for stdio with real server
101 | - [x] manual test for http streamable with real server
102 | - [x] Redirect `stdout`/`stderr` from the MCP SDK to a file or designated location
103 | - [ ] Reuse stdio connections
104 | - [x] **Support non-stdio MCP servers**
105 | - [ ] Handle tool name conflicts (prefix with mcp server name?)
106 | - [ ] Gather feedback on the `~/.llm-tools-mcp` directory naming
107 | - [x] Improve failure handling:
108 | - [x] When connecting to an MCP server fails
109 | - [x] When `mcp.json` is malformed
110 | - [ ] Improve this README:
111 | - [ ] Add more detail in the [Development](#development) section (mention `uv`?)
112 |
--------------------------------------------------------------------------------
/tests/test_config_validation.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 | import asyncio
3 | from typing import Dict, List, TypedDict
4 | from mcp import Tool
5 | import pytest
6 | import uvicorn
7 | from starlette.applications import Starlette
8 |
9 | from llm_tools_mcp.mcp_config import McpConfig
10 |
11 | from mcp.server.fastmcp import FastMCP
12 |
13 |
14 | class JsonValidationTestCase(TypedDict):
15 | input_json: str
16 | expected_error_contains: List[str]
17 |
18 |
19 | @asynccontextmanager
20 | async def server_context(mcp: FastMCP, starlette_app: Starlette):
21 | config = uvicorn.Config(
22 | starlette_app,
23 | host=mcp.settings.host,
24 | port=mcp.settings.port,
25 | log_level=mcp.settings.log_level.lower(),
26 | )
27 | server = uvicorn.Server(config)
28 | server_task = asyncio.create_task(server.serve())
29 | await asyncio.sleep(2)
30 |
31 | try:
32 | yield
33 | finally:
34 | await server.shutdown()
35 | server_task.cancel()
36 |
37 |
38 | json_validation_test_data: List[JsonValidationTestCase] = [
39 | {
40 | "input_json": """
41 | {
42 | "invalid": "asd"
43 | }
44 | """,
45 | "expected_error_contains": ["mcpServers", "Field required"],
46 | },
47 | {
48 | "input_json": """
49 | []
50 | """,
51 | "expected_error_contains": ["Input should be an object"],
52 | },
53 | {
54 | "input_json": """
55 | {
56 | "mcpServers": []
57 | }
58 | """,
59 | "expected_error_contains": [
60 | "mcpServers",
61 | "Input should be an object",
62 | ],
63 | },
64 | {
65 | "input_json": """
66 | {
67 | "mcpServers": {
68 | "name": {
69 | "unknown": "a"
70 | }
71 | }
72 | }
73 | """,
74 | "expected_error_contains": ["Could not deduce MCP server type"],
75 | },
76 | {
77 | "input_json": """
78 | {
79 | "mcpServers": {
80 | "name": {
81 | "command": "whatever",
82 | "url": "https://url"
83 | }
84 | }
85 | }
86 | """,
87 | "expected_error_contains": [
88 | "Only 'url' or 'command' is allowed",
89 | "whatever",
90 | "https://url",
91 | ],
92 | },
93 | {
94 | "input_json": """
95 | {
96 | "mcpServers": {
97 | "name": {
98 | "command": "whatever",
99 | "type": "sse"
100 | }
101 | }
102 | }
103 | """,
104 | "expected_error_contains": ["Field required"],
105 | },
106 | {
107 | "input_json": """
108 | {
109 | "mcpServers": {
110 | "name": {
111 | "command": "whatever",
112 | "type": "invalid"
113 | }
114 | }
115 | }
116 | """,
117 | "expected_error_contains": ["Unknown server 'type'", "invalid"],
118 | },
119 | {
120 | "input_json": """
121 | {
122 | "mcpServers": {
123 | "name": {
124 | "command": "whatever",
125 | "url": "https://url"
126 | }
127 | }
128 | }
129 | """,
130 | "expected_error_contains": [
131 | "Only 'url' or 'command' is allowed",
132 | "whatever",
133 | "https://url",
134 | ],
135 | },
136 | ]
137 |
138 |
139 | def tool_names(all_tools: Dict[str, List[Tool]]) -> List[str]:
140 | return [tool.name for tools in all_tools.values() for tool in tools]
141 |
142 |
143 | @pytest.mark.parametrize("test_case", json_validation_test_data)
144 | def test_mcp_json_validation(test_case):
145 | with pytest.raises(ValueError) as excinfo:
146 | McpConfig.for_json_content(test_case["input_json"])
147 | for expected_error in test_case["expected_error_contains"]:
148 | assert expected_error in str(excinfo.value)
149 |
--------------------------------------------------------------------------------
/src/llm_tools_mcp/mcp_client.py:
--------------------------------------------------------------------------------
1 | from llm_tools_mcp.mcp_config import McpConfig, SseServerConfig, StdioServerConfig
2 | from llm_tools_mcp.mcp_config import HttpServerConfig
3 | import sys
4 |
5 | from mcp import (
6 | ClientSession,
7 | ListToolsResult,
8 | StdioServerParameters,
9 | Tool,
10 | stdio_client,
11 | )
12 | from mcp.client.sse import sse_client
13 | from mcp.client.streamable_http import streamablehttp_client
14 |
15 |
16 | import datetime
17 | import os
18 | import traceback
19 | import uuid
20 | from contextlib import asynccontextmanager
21 | from typing import TextIO
22 |
23 |
24 | class McpClient:
25 | def __init__(self, config: McpConfig):
26 | self.config = config
27 |
28 | @asynccontextmanager
29 | async def _client_session_with_logging(self, name, read, write):
30 | async with ClientSession(read, write) as session:
31 | try:
32 | await session.initialize()
33 | yield session
34 | except Exception as e:
35 | print(
36 | f"Warning: Failed to connect to the '{name}' MCP server: {e}",
37 | file=sys.stderr,
38 | )
39 | print(
40 | f"Tools from '{name}' will be unavailable (run with LLM_TOOLS_MCP_FULL_ERRORS=1) or see logs: {self.config.log_path}",
41 | file=sys.stderr,
42 | )
43 | if os.environ.get("LLM_TOOLS_MCP_FULL_ERRORS", None):
44 | print(traceback.format_exc(), file=sys.stderr)
45 | yield None
46 |
47 | @asynccontextmanager
48 | async def _client_session(self, name: str):
49 | server_config = self.config.get().mcpServers.get(name)
50 | if not server_config:
51 | raise ValueError(f"There is no such MCP server: {name}")
52 | if isinstance(server_config, SseServerConfig):
53 | async with sse_client(server_config.url) as (read, write):
54 | async with self._client_session_with_logging(
55 | name, read, write
56 | ) as session:
57 | yield session
58 | elif isinstance(server_config, HttpServerConfig):
59 | async with streamablehttp_client(server_config.url) as (read, write, _):
60 | async with self._client_session_with_logging(
61 | name, read, write
62 | ) as session:
63 | yield session
64 | elif isinstance(server_config, StdioServerConfig):
65 | params = StdioServerParameters(
66 | command=server_config.command,
67 | args=server_config.args or [],
68 | env=server_config.env,
69 | )
70 | log_file = self._log_file_for_session(name)
71 | async with stdio_client(params, errlog=log_file) as (read, write):
72 | async with self._client_session_with_logging(
73 | name, read, write
74 | ) as session:
75 | yield session
76 | else:
77 | raise ValueError(f"Unknown server config type: {type(server_config)}")
78 |
79 | def _log_file_for_session(self, name: str) -> TextIO:
80 | log_file = (
81 | self.config.log_path.parent
82 | / "logs"
83 | / f"{name}-{uuid.uuid4()}-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.log"
84 | )
85 | log_file.parent.mkdir(parents=True, exist_ok=True)
86 | return open(log_file, "w")
87 |
88 | async def get_tools_for(self, name: str) -> ListToolsResult:
89 | async with self._client_session(name) as session:
90 | if session is None:
91 | return ListToolsResult(tools=[])
92 | return await session.list_tools()
93 |
94 | async def get_all_tools(self) -> dict[str, list[Tool]]:
95 | tools_for_server: dict[str, list[Tool]] = dict()
96 | for server_name in self.config.get().mcpServers.keys():
97 | tools = await self.get_tools_for(server_name)
98 | tools_for_server[server_name] = tools.tools
99 | return tools_for_server
100 |
101 | async def call_tool(self, server_name: str, name: str, /, **kwargs):
102 | async with self._client_session(server_name) as session:
103 | if session is None:
104 | return (
105 | f"Error: Failed to call tool {name} from MCP server {server_name}"
106 | )
107 | tool_result = await session.call_tool(name, kwargs)
108 | return str(tool_result.content)
109 |
--------------------------------------------------------------------------------
/tests/test_llm_tools_mcp.py:
--------------------------------------------------------------------------------
1 | from contextlib import asynccontextmanager
2 | import socket
3 | import json
4 | import asyncio
5 | from pathlib import Path
6 | from typing import Dict, List
7 | from mcp import Tool
8 | import pytest
9 | import uvicorn
10 | from starlette.applications import Starlette
11 |
12 | from llm_tools_mcp.mcp_client import McpClient
13 | from llm_tools_mcp.mcp_config import McpConfig
14 |
15 | from mcp.server.fastmcp import FastMCP, Context
16 |
17 |
18 | def get_free_port():
19 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
20 | s.bind(("", 0))
21 | return s.getsockname()[1]
22 |
23 |
24 | @asynccontextmanager
25 | async def server_context(mcp: FastMCP, starlette_app: Starlette):
26 | config = uvicorn.Config(
27 | starlette_app,
28 | host=mcp.settings.host,
29 | port=mcp.settings.port,
30 | log_level=mcp.settings.log_level.lower(),
31 | )
32 | server = uvicorn.Server(config)
33 | server_task = asyncio.create_task(server.serve())
34 | await asyncio.sleep(2)
35 |
36 | try:
37 | yield
38 | finally:
39 | await server.shutdown()
40 | server_task.cancel()
41 |
42 |
43 | def tool_names(all_tools: Dict[str, List[Tool]]) -> List[str]:
44 | return [tool.name for tools in all_tools.values() for tool in tools]
45 |
46 |
47 | @pytest.mark.asyncio
48 | async def test_sse():
49 | port = get_free_port()
50 | print(f"Using port {port}")
51 | mcp = FastMCP(
52 | "My App SSE",
53 | port=port,
54 | )
55 |
56 | @mcp.tool()
57 | async def long_task(files: list[str], ctx: Context) -> str:
58 | """Process multiple files with progress tracking"""
59 | for i, file in enumerate(files):
60 | await ctx.info(f"Processing {file}")
61 | await ctx.report_progress(i, len(files))
62 | return "Tool output"
63 |
64 | async with server_context(mcp, mcp.sse_app()):
65 | mcp_config_content = json.dumps(
66 | {"mcpServers": {"test_sse_server": {"url": f"http://localhost:{port}/sse"}}}
67 | )
68 |
69 | mcp_config_obj = McpConfig.for_json_content(mcp_config_content)
70 | mcp_client = McpClient(mcp_config_obj)
71 |
72 | server_to_tools = await mcp_client.get_all_tools()
73 |
74 | assert "test_sse_server" in server_to_tools
75 | assert "long_task" in tool_names(server_to_tools)
76 |
77 | result = await mcp_client.call_tool(
78 | "test_sse_server", "long_task", files=["file1.txt", "file2.txt"]
79 | )
80 |
81 | assert result is not None, "Tool call should return a result"
82 | result_str = str(result)
83 | assert "Tool output" in result_str, "Should find completion message"
84 |
85 |
86 | @pytest.mark.asyncio
87 | async def test_stdio():
88 | test_dir = Path.cwd() / "test_mcp_temp"
89 | test_dir.mkdir(exist_ok=True)
90 |
91 | try:
92 | test_file1 = test_dir / "test1.txt"
93 | test_file2 = test_dir / "test2.txt"
94 |
95 | test_file1.write_text("This is test file 1")
96 | test_file2.write_text("This is test file 2")
97 |
98 | mcp_config_content = json.dumps(
99 | {
100 | "mcpServers": {
101 | "filesystem": {
102 | "command": "npx",
103 | "args": [
104 | "-y",
105 | "@modelcontextprotocol/server-filesystem@2025.3.28", # version on purpose for determinism
106 | str(test_dir),
107 | ],
108 | }
109 | }
110 | }
111 | )
112 |
113 | mcp_config_obj = McpConfig.for_json_content(mcp_config_content)
114 | mcp_client = McpClient(mcp_config_obj)
115 |
116 | server_to_tools = await mcp_client.get_all_tools()
117 |
118 | assert "filesystem" in server_to_tools, "Should have filesystem server"
119 |
120 | assert "read_file" in tool_names(server_to_tools)
121 | assert "write_file" in tool_names(server_to_tools)
122 | assert "list_directory" in tool_names(server_to_tools)
123 |
124 | result = await mcp_client.call_tool(
125 | "filesystem", "read_file", path=str(test_file1)
126 | )
127 |
128 | assert result is not None, "Tool call should return a result"
129 | result_str = str(result)
130 | assert "This is test file 1" in result_str, "Should find test file content"
131 |
132 | finally:
133 | import shutil
134 |
135 | if test_dir.exists():
136 | shutil.rmtree(test_dir)
137 |
138 |
139 | @pytest.mark.asyncio
140 | async def test_http():
141 | port = get_free_port()
142 | mcp_http = FastMCP("My App Http", port=port, debug=True)
143 |
144 | @mcp_http.tool()
145 | async def long_task_http(files: list[str]) -> str:
146 | """Process multiple files with progress tracking"""
147 | return "Tool output"
148 |
149 | async with server_context(mcp_http, mcp_http.streamable_http_app()):
150 | mcp_config_content = json.dumps(
151 | {
152 | "mcpServers": {
153 | "test_http_server": {
154 | "type": "http",
155 | "url": f"http://localhost:{port}/mcp",
156 | }
157 | }
158 | }
159 | )
160 |
161 | mcp_config_obj = McpConfig.for_json_content(mcp_config_content)
162 | mcp_client = McpClient(mcp_config_obj)
163 |
164 | server_to_tools = await mcp_client.get_all_tools()
165 |
166 | assert "test_http_server" in server_to_tools, "Should have test_http_server"
167 | assert "long_task_http" in tool_names(server_to_tools)
168 |
169 | result = await mcp_client.call_tool(
170 | "test_http_server", "long_task_http", files=["file1.txt", "file2.txt"]
171 | )
172 |
173 | assert result is not None, "Tool call should return a result"
174 | result_str = str(result)
175 | assert "Tool output" in result_str, "Should find completion message"
176 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------