├── .gitattributes ├── .github ├── pull_request_template.md └── workflows │ ├── python.yml │ ├── release.yml │ └── typescript.yml ├── .gitignore ├── .npmrc ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── package-lock.json ├── package.json ├── scripts └── release.py ├── src ├── everything │ ├── CLAUDE.md │ ├── Dockerfile │ ├── README.md │ ├── everything.ts │ ├── index.ts │ ├── package.json │ ├── sse.ts │ ├── stdio.ts │ ├── streamableHttp.ts │ └── tsconfig.json ├── fetch │ ├── .python-version │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ └── mcp_server_fetch │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ └── server.py │ └── uv.lock ├── filesystem │ ├── Dockerfile │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── git │ ├── .gitignore │ ├── .python-version │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ └── mcp_server_git │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ └── server.py │ ├── tests │ │ └── test_server.py │ └── uv.lock ├── memory │ ├── Dockerfile │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── sequentialthinking │ ├── Dockerfile │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json └── time │ ├── .python-version │ ├── Dockerfile │ ├── README.md │ ├── pyproject.toml │ ├── src │ └── mcp_server_time │ │ ├── __init__.py │ │ ├── __main__.py │ │ └── server.py │ ├── test │ └── time_server_test.py │ └── uv.lock └── tsconfig.json /.gitattributes: -------------------------------------------------------------------------------- 1 | package-lock.json linguist-generated=true 2 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Description 4 | 5 | ## Server Details 6 | 7 | - Server: 8 | - Changes to: 9 | 10 | ## Motivation and Context 11 | 12 | 13 | ## How Has This Been Tested? 14 | 15 | 16 | ## Breaking Changes 17 | 18 | 19 | ## Types of changes 20 | 21 | - [ ] Bug fix (non-breaking change which fixes an issue) 22 | - [ ] New feature (non-breaking change which adds functionality) 23 | - [ ] Breaking change (fix or feature that would cause existing functionality to change) 24 | - [ ] Documentation update 25 | 26 | ## Checklist 27 | 28 | - [ ] I have read the [MCP Protocol Documentation](https://modelcontextprotocol.io) 29 | - [ ] My changes follows MCP security best practices 30 | - [ ] I have updated the server's README accordingly 31 | - [ ] I have tested this with an LLM client 32 | - [ ] My code follows the repository's style guidelines 33 | - [ ] New and existing tests pass locally 34 | - [ ] I have added appropriate error handling 35 | - [ ] I have documented all environment variables and configuration options 36 | 37 | ## Additional context 38 | 39 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Python 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | detect-packages: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | packages: ${{ steps.find-packages.outputs.packages }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Find Python packages 20 | id: find-packages 21 | working-directory: src 22 | run: | 23 | PACKAGES=$(find . -name pyproject.toml -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]') 24 | echo "packages=$PACKAGES" >> $GITHUB_OUTPUT 25 | 26 | build: 27 | needs: [detect-packages] 28 | strategy: 29 | matrix: 30 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 31 | name: Build ${{ matrix.package }} 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - name: Install uv 37 | uses: astral-sh/setup-uv@v3 38 | 39 | - name: Set up Python 40 | uses: actions/setup-python@v5 41 | with: 42 | python-version-file: "src/${{ matrix.package }}/.python-version" 43 | 44 | - name: Install dependencies 45 | working-directory: src/${{ matrix.package }} 46 | run: uv sync --frozen --all-extras --dev 47 | 48 | - name: Run pyright 49 | working-directory: src/${{ matrix.package }} 50 | run: uv run --frozen pyright 51 | 52 | - name: Build package 53 | working-directory: src/${{ matrix.package }} 54 | run: uv build 55 | 56 | - name: Upload artifacts 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: dist-${{ matrix.package }} 60 | path: src/${{ matrix.package }}/dist/ 61 | 62 | publish: 63 | runs-on: ubuntu-latest 64 | needs: [build, detect-packages] 65 | if: github.event_name == 'release' 66 | 67 | strategy: 68 | matrix: 69 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 70 | name: Publish ${{ matrix.package }} 71 | 72 | environment: release 73 | permissions: 74 | id-token: write # Required for trusted publishing 75 | 76 | steps: 77 | - name: Download artifacts 78 | uses: actions/download-artifact@v4 79 | with: 80 | name: dist-${{ matrix.package }} 81 | path: dist/ 82 | 83 | - name: Publish package to PyPI 84 | uses: pypa/gh-action-pypi-publish@release/v1 85 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Automatic Release Creation 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 10 * * *' 7 | 8 | jobs: 9 | create-metadata: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | hash: ${{ steps.last-release.outputs.hash }} 13 | version: ${{ steps.create-version.outputs.version}} 14 | npm_packages: ${{ steps.create-npm-packages.outputs.npm_packages}} 15 | pypi_packages: ${{ steps.create-pypi-packages.outputs.pypi_packages}} 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Get last release hash 22 | id: last-release 23 | run: | 24 | HASH=$(git rev-list --tags --max-count=1 || echo "HEAD~1") 25 | echo "hash=${HASH}" >> $GITHUB_OUTPUT 26 | echo "Using last release hash: ${HASH}" 27 | 28 | - name: Install uv 29 | uses: astral-sh/setup-uv@v5 30 | 31 | - name: Create version name 32 | id: create-version 33 | run: | 34 | VERSION=$(uv run --script scripts/release.py generate-version) 35 | echo "version $VERSION" 36 | echo "version=$VERSION" >> $GITHUB_OUTPUT 37 | 38 | - name: Create notes 39 | run: | 40 | HASH="${{ steps.last-release.outputs.hash }}" 41 | uv run --script scripts/release.py generate-notes --directory src/ $HASH > RELEASE_NOTES.md 42 | cat RELEASE_NOTES.md 43 | 44 | - name: Release notes 45 | uses: actions/upload-artifact@v4 46 | with: 47 | name: release-notes 48 | path: RELEASE_NOTES.md 49 | 50 | - name: Create python matrix 51 | id: create-pypi-packages 52 | run: | 53 | HASH="${{ steps.last-release.outputs.hash }}" 54 | PYPI=$(uv run --script scripts/release.py generate-matrix --pypi --directory src $HASH) 55 | echo "pypi_packages $PYPI" 56 | echo "pypi_packages=$PYPI" >> $GITHUB_OUTPUT 57 | 58 | - name: Create npm matrix 59 | id: create-npm-packages 60 | run: | 61 | HASH="${{ steps.last-release.outputs.hash }}" 62 | NPM=$(uv run --script scripts/release.py generate-matrix --npm --directory src $HASH) 63 | echo "npm_packages $NPM" 64 | echo "npm_packages=$NPM" >> $GITHUB_OUTPUT 65 | 66 | update-packages: 67 | needs: [create-metadata] 68 | if: ${{ needs.create-metadata.outputs.npm_packages != '[]' || needs.create-metadata.outputs.pypi_packages != '[]' }} 69 | runs-on: ubuntu-latest 70 | environment: release 71 | outputs: 72 | changes_made: ${{ steps.commit.outputs.changes_made }} 73 | steps: 74 | - uses: actions/checkout@v4 75 | with: 76 | fetch-depth: 0 77 | 78 | - name: Install uv 79 | uses: astral-sh/setup-uv@v5 80 | 81 | - name: Update packages 82 | run: | 83 | HASH="${{ needs.create-metadata.outputs.hash }}" 84 | uv run --script scripts/release.py update-packages --directory src/ $HASH 85 | 86 | - name: Configure git 87 | run: | 88 | git config --global user.name "GitHub Actions" 89 | git config --global user.email "actions@github.com" 90 | 91 | - name: Commit changes 92 | id: commit 93 | run: | 94 | VERSION="${{ needs.create-metadata.outputs.version }}" 95 | git add -u 96 | if git diff-index --quiet HEAD; then 97 | echo "changes_made=false" >> $GITHUB_OUTPUT 98 | else 99 | git commit -m 'Automatic update of packages' 100 | git tag -a "$VERSION" -m "Release $VERSION" 101 | git push origin "$VERSION" 102 | echo "changes_made=true" >> $GITHUB_OUTPUT 103 | fi 104 | 105 | publish-pypi: 106 | needs: [update-packages, create-metadata] 107 | strategy: 108 | fail-fast: false 109 | matrix: 110 | package: ${{ fromJson(needs.create-metadata.outputs.pypi_packages) }} 111 | name: Build ${{ matrix.package }} 112 | environment: release 113 | permissions: 114 | id-token: write # Required for trusted publishing 115 | runs-on: ubuntu-latest 116 | steps: 117 | - uses: actions/checkout@v4 118 | with: 119 | ref: ${{ needs.create-metadata.outputs.version }} 120 | 121 | - name: Install uv 122 | uses: astral-sh/setup-uv@v5 123 | 124 | - name: Set up Python 125 | uses: actions/setup-python@v5 126 | with: 127 | python-version-file: "src/${{ matrix.package }}/.python-version" 128 | 129 | - name: Install dependencies 130 | working-directory: src/${{ matrix.package }} 131 | run: uv sync --frozen --all-extras --dev 132 | 133 | - name: Run pyright 134 | working-directory: src/${{ matrix.package }} 135 | run: uv run --frozen pyright 136 | 137 | - name: Build package 138 | working-directory: src/${{ matrix.package }} 139 | run: uv build 140 | 141 | - name: Publish package to PyPI 142 | uses: pypa/gh-action-pypi-publish@release/v1 143 | with: 144 | packages-dir: src/${{ matrix.package }}/dist 145 | 146 | publish-npm: 147 | needs: [update-packages, create-metadata] 148 | strategy: 149 | fail-fast: false 150 | matrix: 151 | package: ${{ fromJson(needs.create-metadata.outputs.npm_packages) }} 152 | name: Build ${{ matrix.package }} 153 | environment: release 154 | runs-on: ubuntu-latest 155 | steps: 156 | - uses: actions/checkout@v4 157 | with: 158 | ref: ${{ needs.create-metadata.outputs.version }} 159 | 160 | - uses: actions/setup-node@v4 161 | with: 162 | node-version: 22 163 | cache: npm 164 | registry-url: 'https://registry.npmjs.org' 165 | 166 | - name: Install dependencies 167 | working-directory: src/${{ matrix.package }} 168 | run: npm ci 169 | 170 | - name: Check if version exists on npm 171 | working-directory: src/${{ matrix.package }} 172 | run: | 173 | VERSION=$(jq -r .version package.json) 174 | if npm view --json | jq -e --arg version "$VERSION" '[.[]][0].versions | contains([$version])'; then 175 | echo "Version $VERSION already exists on npm" 176 | exit 1 177 | fi 178 | echo "Version $VERSION is new, proceeding with publish" 179 | 180 | - name: Build package 181 | working-directory: src/${{ matrix.package }} 182 | run: npm run build 183 | 184 | - name: Publish package 185 | working-directory: src/${{ matrix.package }} 186 | run: | 187 | npm publish --access public 188 | env: 189 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 190 | 191 | create-release: 192 | needs: [update-packages, create-metadata, publish-pypi, publish-npm] 193 | if: needs.update-packages.outputs.changes_made == 'true' 194 | runs-on: ubuntu-latest 195 | environment: release 196 | permissions: 197 | contents: write 198 | steps: 199 | - uses: actions/checkout@v4 200 | 201 | - name: Download release notes 202 | uses: actions/download-artifact@v4 203 | with: 204 | name: release-notes 205 | 206 | - name: Create release 207 | env: 208 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN}} 209 | run: | 210 | VERSION="${{ needs.create-metadata.outputs.version }}" 211 | gh release create "$VERSION" \ 212 | --title "Release $VERSION" \ 213 | --notes-file RELEASE_NOTES.md 214 | 215 | - name: Docker MCP images 216 | uses: peter-evans/repository-dispatch@v3 217 | with: 218 | token: ${{ secrets.DOCKER_TOKEN }} 219 | repository: docker/labs-ai-tools-for-devs 220 | event-type: build-mcp-images 221 | client-payload: '{"ref": "${{ needs.create-metadata.outputs.version }}"}' 222 | -------------------------------------------------------------------------------- /.github/workflows/typescript.yml: -------------------------------------------------------------------------------- 1 | name: TypeScript 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | detect-packages: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | packages: ${{ steps.find-packages.outputs.packages }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Find JS packages 19 | id: find-packages 20 | working-directory: src 21 | run: | 22 | PACKAGES=$(find . -name package.json -not -path "*/node_modules/*" -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]') 23 | echo "packages=$PACKAGES" >> $GITHUB_OUTPUT 24 | 25 | build: 26 | needs: [detect-packages] 27 | strategy: 28 | matrix: 29 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 30 | name: Build ${{ matrix.package }} 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v4 34 | 35 | - uses: actions/setup-node@v4 36 | with: 37 | node-version: 22 38 | cache: npm 39 | 40 | - name: Install dependencies 41 | working-directory: src/${{ matrix.package }} 42 | run: npm ci 43 | 44 | - name: Build package 45 | working-directory: src/${{ matrix.package }} 46 | run: npm run build 47 | 48 | publish: 49 | runs-on: ubuntu-latest 50 | needs: [build, detect-packages] 51 | if: github.event_name == 'release' 52 | environment: release 53 | 54 | strategy: 55 | matrix: 56 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 57 | name: Publish ${{ matrix.package }} 58 | 59 | permissions: 60 | contents: read 61 | id-token: write 62 | 63 | steps: 64 | - uses: actions/checkout@v4 65 | - uses: actions/setup-node@v4 66 | with: 67 | node-version: 22 68 | cache: npm 69 | registry-url: "https://registry.npmjs.org" 70 | 71 | - name: Install dependencies 72 | working-directory: src/${{ matrix.package }} 73 | run: npm ci 74 | 75 | - name: Publish package 76 | working-directory: src/${{ matrix.package }} 77 | run: npm publish --access public 78 | env: 79 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 80 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | 132 | build/ 133 | 134 | gcp-oauth.keys.json 135 | .*-server-credentials.json 136 | 137 | # Byte-compiled / optimized / DLL files 138 | __pycache__/ 139 | *.py[cod] 140 | *$py.class 141 | 142 | # C extensions 143 | *.so 144 | 145 | # Distribution / packaging 146 | .Python 147 | build/ 148 | develop-eggs/ 149 | dist/ 150 | downloads/ 151 | eggs/ 152 | .eggs/ 153 | lib/ 154 | lib64/ 155 | parts/ 156 | sdist/ 157 | var/ 158 | wheels/ 159 | share/python-wheels/ 160 | *.egg-info/ 161 | .installed.cfg 162 | *.egg 163 | MANIFEST 164 | 165 | # PyInstaller 166 | # Usually these files are written by a python script from a template 167 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 168 | *.manifest 169 | *.spec 170 | 171 | # Installer logs 172 | pip-log.txt 173 | pip-delete-this-directory.txt 174 | 175 | # Unit test / coverage reports 176 | htmlcov/ 177 | .tox/ 178 | .nox/ 179 | .coverage 180 | .coverage.* 181 | .cache 182 | nosetests.xml 183 | coverage.xml 184 | *.cover 185 | *.py,cover 186 | .hypothesis/ 187 | .pytest_cache/ 188 | cover/ 189 | 190 | # Translations 191 | *.mo 192 | *.pot 193 | 194 | # Django stuff: 195 | *.log 196 | local_settings.py 197 | db.sqlite3 198 | db.sqlite3-journal 199 | 200 | # Flask stuff: 201 | instance/ 202 | .webassets-cache 203 | 204 | # Scrapy stuff: 205 | .scrapy 206 | 207 | # Sphinx documentation 208 | docs/_build/ 209 | 210 | # PyBuilder 211 | .pybuilder/ 212 | target/ 213 | 214 | # Jupyter Notebook 215 | .ipynb_checkpoints 216 | 217 | # IPython 218 | profile_default/ 219 | ipython_config.py 220 | 221 | # pyenv 222 | # For a library or package, you might want to ignore these files since the code is 223 | # intended to run in multiple environments; otherwise, check them in: 224 | # .python-version 225 | 226 | # pipenv 227 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 228 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 229 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 230 | # install all needed dependencies. 231 | #Pipfile.lock 232 | 233 | # poetry 234 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 235 | # This is especially recommended for binary packages to ensure reproducibility, and is more 236 | # commonly ignored for libraries. 237 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 238 | #poetry.lock 239 | 240 | # pdm 241 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 242 | #pdm.lock 243 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 244 | # in version control. 245 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 246 | .pdm.toml 247 | .pdm-python 248 | .pdm-build/ 249 | 250 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 251 | __pypackages__/ 252 | 253 | # Celery stuff 254 | celerybeat-schedule 255 | celerybeat.pid 256 | 257 | # SageMath parsed files 258 | *.sage.py 259 | 260 | # Environments 261 | .env 262 | .venv 263 | env/ 264 | venv/ 265 | ENV/ 266 | env.bak/ 267 | venv.bak/ 268 | 269 | # Spyder project settings 270 | .spyderproject 271 | .spyproject 272 | 273 | # Rope project settings 274 | .ropeproject 275 | 276 | # mkdocs documentation 277 | /site 278 | 279 | # mypy 280 | .mypy_cache/ 281 | .dmypy.json 282 | dmypy.json 283 | 284 | # Pyre type checker 285 | .pyre/ 286 | 287 | # pytype static type analyzer 288 | .pytype/ 289 | 290 | # Cython debug symbols 291 | cython_debug/ 292 | 293 | .DS_Store 294 | 295 | # PyCharm 296 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 297 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 298 | # and can be added to the global gitignore or merged into this file. For a more nuclear 299 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 300 | #.idea/ 301 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | registry="https://registry.npmjs.org/" 2 | @modelcontextprotocol:registry="https://registry.npmjs.org/" 3 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | mcp-coc@anthropic.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to MCP Servers 2 | 3 | Thank you for your interest in contributing to the Model Context Protocol (MCP) servers! This document provides guidelines and instructions for contributing. 4 | 5 | ## Types of Contributions 6 | 7 | ### 1. New Servers 8 | 9 | The repository contains reference implementations, as well as a list of community servers. 10 | We generally don't accept new servers into the repository. We do accept pull requests to the [README.md](./README.md) 11 | adding a reference to your servers. 12 | 13 | Please keep lists in alphabetical order to minimize merge conflicts when adding new items. 14 | 15 | - Check the [modelcontextprotocol.io](https://modelcontextprotocol.io) documentation 16 | - Ensure your server doesn't duplicate existing functionality 17 | - Consider whether your server would be generally useful to others 18 | - Follow [security best practices](https://modelcontextprotocol.io/docs/concepts/transports#security-considerations) from the MCP documentation 19 | - Create a PR adding a link to your server to the [README.md](./README.md). 20 | 21 | ### 2. Improvements to Existing Servers 22 | Enhancements to existing servers are welcome! This includes: 23 | 24 | - Bug fixes 25 | - Performance improvements 26 | - New features 27 | - Security enhancements 28 | 29 | ### 3. Documentation 30 | Documentation improvements are always welcome: 31 | 32 | - Fixing typos or unclear instructions 33 | - Adding examples 34 | - Improving setup instructions 35 | - Adding troubleshooting guides 36 | 37 | ## Getting Started 38 | 39 | 1. Fork the repository 40 | 2. Clone your fork: 41 | ```bash 42 | git clone https://github.com/your-username/servers.git 43 | ``` 44 | 3. Add the upstream remote: 45 | ```bash 46 | git remote add upstream https://github.com/modelcontextprotocol/servers.git 47 | ``` 48 | 4. Create a branch: 49 | ```bash 50 | git checkout -b my-feature 51 | ``` 52 | 53 | ## Development Guidelines 54 | 55 | ### Code Style 56 | - Follow the existing code style in the repository 57 | - Include appropriate type definitions 58 | - Add comments for complex logic 59 | 60 | ### Documentation 61 | - Include a detailed README.md in your server directory 62 | - Document all configuration options 63 | - Provide setup instructions 64 | - Include usage examples 65 | 66 | ### Security 67 | - Follow security best practices 68 | - Implement proper input validation 69 | - Handle errors appropriately 70 | - Document security considerations 71 | 72 | ## Submitting Changes 73 | 74 | 1. Commit your changes: 75 | ```bash 76 | git add . 77 | git commit -m "Description of changes" 78 | ``` 79 | 2. Push to your fork: 80 | ```bash 81 | git push origin my-feature 82 | ``` 83 | 3. Create a Pull Request through GitHub 84 | 85 | ### Pull Request Guidelines 86 | 87 | - Thoroughly test your changes 88 | - Fill out the pull request template completely 89 | - Link any related issues 90 | - Provide clear description of changes 91 | - Include any necessary documentation updates 92 | - Add screenshots for UI changes 93 | - List any breaking changes 94 | 95 | ## Community 96 | 97 | - Participate in [GitHub Discussions](https://github.com/orgs/modelcontextprotocol/discussions) 98 | - Follow the [Code of Conduct](CODE_OF_CONDUCT.md) 99 | 100 | ## Questions? 101 | 102 | - Check the [documentation](https://modelcontextprotocol.io) 103 | - Ask in GitHub Discussions 104 | 105 | Thank you for contributing to MCP Servers! 106 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Anthropic, PBC 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | Thank you for helping us keep our MCP servers secure. 3 | 4 | These servers are maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project. 5 | 6 | The security of our systems and user data is Anthropic’s top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities. 7 | 8 | ## Vulnerability Disclosure Program 9 | 10 | Our Vulnerability Program guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp). We ask that any validated vulnerability in this functionality be reported through the [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability). 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/servers", 3 | "private": true, 4 | "version": "0.6.2", 5 | "description": "Model Context Protocol servers", 6 | "license": "MIT", 7 | "author": "Anthropic, PBC (https://anthropic.com)", 8 | "homepage": "https://modelcontextprotocol.io", 9 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 10 | "type": "module", 11 | "workspaces": [ 12 | "src/*" 13 | ], 14 | "files": [], 15 | "scripts": { 16 | "build": "npm run build --workspaces", 17 | "watch": "npm run watch --workspaces", 18 | "publish-all": "npm publish --workspaces --access public", 19 | "link-all": "npm link --workspaces" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/server-everything": "*", 23 | "@modelcontextprotocol/server-memory": "*", 24 | "@modelcontextprotocol/server-filesystem": "*", 25 | "@modelcontextprotocol/server-sequential-thinking": "*" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /scripts/release.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env uv run --script 2 | # /// script 3 | # requires-python = ">=3.12" 4 | # dependencies = [ 5 | # "click>=8.1.8", 6 | # "tomlkit>=0.13.2" 7 | # ] 8 | # /// 9 | import sys 10 | import re 11 | import click 12 | from pathlib import Path 13 | import json 14 | import tomlkit 15 | import datetime 16 | import subprocess 17 | from dataclasses import dataclass 18 | from typing import Any, Iterator, NewType, Protocol 19 | 20 | 21 | Version = NewType("Version", str) 22 | GitHash = NewType("GitHash", str) 23 | 24 | 25 | class GitHashParamType(click.ParamType): 26 | name = "git_hash" 27 | 28 | def convert( 29 | self, value: Any, param: click.Parameter | None, ctx: click.Context | None 30 | ) -> GitHash | None: 31 | if value is None: 32 | return None 33 | 34 | if not (8 <= len(value) <= 40): 35 | self.fail(f"Git hash must be between 8 and 40 characters, got {len(value)}") 36 | 37 | if not re.match(r"^[0-9a-fA-F]+$", value): 38 | self.fail("Git hash must contain only hex digits (0-9, a-f)") 39 | 40 | try: 41 | # Verify hash exists in repo 42 | subprocess.run( 43 | ["git", "rev-parse", "--verify", value], check=True, capture_output=True 44 | ) 45 | except subprocess.CalledProcessError: 46 | self.fail(f"Git hash {value} not found in repository") 47 | 48 | return GitHash(value.lower()) 49 | 50 | 51 | GIT_HASH = GitHashParamType() 52 | 53 | 54 | class Package(Protocol): 55 | path: Path 56 | 57 | def package_name(self) -> str: ... 58 | 59 | def update_version(self, version: Version) -> None: ... 60 | 61 | 62 | @dataclass 63 | class NpmPackage: 64 | path: Path 65 | 66 | def package_name(self) -> str: 67 | with open(self.path / "package.json", "r") as f: 68 | return json.load(f)["name"] 69 | 70 | def update_version(self, version: Version): 71 | with open(self.path / "package.json", "r+") as f: 72 | data = json.load(f) 73 | data["version"] = version 74 | f.seek(0) 75 | json.dump(data, f, indent=2) 76 | f.truncate() 77 | 78 | 79 | @dataclass 80 | class PyPiPackage: 81 | path: Path 82 | 83 | def package_name(self) -> str: 84 | with open(self.path / "pyproject.toml") as f: 85 | toml_data = tomlkit.parse(f.read()) 86 | name = toml_data.get("project", {}).get("name") 87 | if not name: 88 | raise Exception("No name in pyproject.toml project section") 89 | return str(name) 90 | 91 | def update_version(self, version: Version): 92 | # Update version in pyproject.toml 93 | with open(self.path / "pyproject.toml") as f: 94 | data = tomlkit.parse(f.read()) 95 | data["project"]["version"] = version 96 | 97 | with open(self.path / "pyproject.toml", "w") as f: 98 | f.write(tomlkit.dumps(data)) 99 | 100 | 101 | def has_changes(path: Path, git_hash: GitHash) -> bool: 102 | """Check if any files changed between current state and git hash""" 103 | try: 104 | output = subprocess.run( 105 | ["git", "diff", "--name-only", git_hash, "--", "."], 106 | cwd=path, 107 | check=True, 108 | capture_output=True, 109 | text=True, 110 | ) 111 | 112 | changed_files = [Path(f) for f in output.stdout.splitlines()] 113 | relevant_files = [f for f in changed_files if f.suffix in [".py", ".ts"]] 114 | return len(relevant_files) >= 1 115 | except subprocess.CalledProcessError: 116 | return False 117 | 118 | 119 | def gen_version() -> Version: 120 | """Generate version based on current date""" 121 | now = datetime.datetime.now() 122 | return Version(f"{now.year}.{now.month}.{now.day}") 123 | 124 | 125 | def find_changed_packages(directory: Path, git_hash: GitHash) -> Iterator[Package]: 126 | for path in directory.glob("*/package.json"): 127 | if has_changes(path.parent, git_hash): 128 | yield NpmPackage(path.parent) 129 | for path in directory.glob("*/pyproject.toml"): 130 | if has_changes(path.parent, git_hash): 131 | yield PyPiPackage(path.parent) 132 | 133 | 134 | @click.group() 135 | def cli(): 136 | pass 137 | 138 | 139 | @cli.command("update-packages") 140 | @click.option( 141 | "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd() 142 | ) 143 | @click.argument("git_hash", type=GIT_HASH) 144 | def update_packages(directory: Path, git_hash: GitHash) -> int: 145 | # Detect package type 146 | path = directory.resolve(strict=True) 147 | version = gen_version() 148 | 149 | for package in find_changed_packages(path, git_hash): 150 | name = package.package_name() 151 | package.update_version(version) 152 | 153 | click.echo(f"{name}@{version}") 154 | 155 | return 0 156 | 157 | 158 | @cli.command("generate-notes") 159 | @click.option( 160 | "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd() 161 | ) 162 | @click.argument("git_hash", type=GIT_HASH) 163 | def generate_notes(directory: Path, git_hash: GitHash) -> int: 164 | # Detect package type 165 | path = directory.resolve(strict=True) 166 | version = gen_version() 167 | 168 | click.echo(f"# Release : v{version}") 169 | click.echo("") 170 | click.echo("## Updated packages") 171 | for package in find_changed_packages(path, git_hash): 172 | name = package.package_name() 173 | click.echo(f"- {name}@{version}") 174 | 175 | return 0 176 | 177 | 178 | @cli.command("generate-version") 179 | def generate_version() -> int: 180 | # Detect package type 181 | click.echo(gen_version()) 182 | return 0 183 | 184 | 185 | @cli.command("generate-matrix") 186 | @click.option( 187 | "--directory", type=click.Path(exists=True, path_type=Path), default=Path.cwd() 188 | ) 189 | @click.option("--npm", is_flag=True, default=False) 190 | @click.option("--pypi", is_flag=True, default=False) 191 | @click.argument("git_hash", type=GIT_HASH) 192 | def generate_matrix(directory: Path, git_hash: GitHash, pypi: bool, npm: bool) -> int: 193 | # Detect package type 194 | path = directory.resolve(strict=True) 195 | version = gen_version() 196 | 197 | changes = [] 198 | for package in find_changed_packages(path, git_hash): 199 | pkg = package.path.relative_to(path) 200 | if npm and isinstance(package, NpmPackage): 201 | changes.append(str(pkg)) 202 | if pypi and isinstance(package, PyPiPackage): 203 | changes.append(str(pkg)) 204 | 205 | click.echo(json.dumps(changes)) 206 | return 0 207 | 208 | 209 | if __name__ == "__main__": 210 | sys.exit(cli()) 211 | -------------------------------------------------------------------------------- /src/everything/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # MCP "Everything" Server - Development Guidelines 2 | 3 | ## Build, Test & Run Commands 4 | - Build: `npm run build` - Compiles TypeScript to JavaScript 5 | - Watch mode: `npm run watch` - Watches for changes and rebuilds automatically 6 | - Run server: `npm run start` - Starts the MCP server using stdio transport 7 | - Run SSE server: `npm run start:sse` - Starts the MCP server with SSE transport 8 | - Prepare release: `npm run prepare` - Builds the project for publishing 9 | 10 | ## Code Style Guidelines 11 | - Use ES modules with `.js` extension in import paths 12 | - Strictly type all functions and variables with TypeScript 13 | - Follow zod schema patterns for tool input validation 14 | - Prefer async/await over callbacks and Promise chains 15 | - Place all imports at top of file, grouped by external then internal 16 | - Use descriptive variable names that clearly indicate purpose 17 | - Implement proper cleanup for timers and resources in server shutdown 18 | - Follow camelCase for variables/functions, PascalCase for types/classes, UPPER_CASE for constants 19 | - Handle errors with try/catch blocks and provide clear error messages 20 | - Use consistent indentation (2 spaces) and trailing commas in multi-line objects -------------------------------------------------------------------------------- /src/everything/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22.12-alpine AS builder 2 | 3 | COPY src/everything /app 4 | COPY tsconfig.json /tsconfig.json 5 | 6 | WORKDIR /app 7 | 8 | RUN --mount=type=cache,target=/root/.npm npm install 9 | 10 | FROM node:22-alpine AS release 11 | 12 | WORKDIR /app 13 | 14 | COPY --from=builder /app/dist /app/dist 15 | COPY --from=builder /app/package.json /app/package.json 16 | COPY --from=builder /app/package-lock.json /app/package-lock.json 17 | 18 | ENV NODE_ENV=production 19 | 20 | RUN npm ci --ignore-scripts --omit-dev 21 | 22 | CMD ["node", "dist/index.js"] -------------------------------------------------------------------------------- /src/everything/README.md: -------------------------------------------------------------------------------- 1 | # Everything MCP Server 2 | 3 | This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities. 4 | 5 | ## Components 6 | 7 | ### Tools 8 | 9 | 1. `echo` 10 | - Simple tool to echo back input messages 11 | - Input: 12 | - `message` (string): Message to echo back 13 | - Returns: Text content with echoed message 14 | 15 | 2. `add` 16 | - Adds two numbers together 17 | - Inputs: 18 | - `a` (number): First number 19 | - `b` (number): Second number 20 | - Returns: Text result of the addition 21 | 22 | 3. `longRunningOperation` 23 | - Demonstrates progress notifications for long operations 24 | - Inputs: 25 | - `duration` (number, default: 10): Duration in seconds 26 | - `steps` (number, default: 5): Number of progress steps 27 | - Returns: Completion message with duration and steps 28 | - Sends progress notifications during execution 29 | 30 | 4. `sampleLLM` 31 | - Demonstrates LLM sampling capability using MCP sampling feature 32 | - Inputs: 33 | - `prompt` (string): The prompt to send to the LLM 34 | - `maxTokens` (number, default: 100): Maximum tokens to generate 35 | - Returns: Generated LLM response 36 | 37 | 5. `getTinyImage` 38 | - Returns a small test image 39 | - No inputs required 40 | - Returns: Base64 encoded PNG image data 41 | 42 | 6. `printEnv` 43 | - Prints all environment variables 44 | - Useful for debugging MCP server configuration 45 | - No inputs required 46 | - Returns: JSON string of all environment variables 47 | 48 | 7. `annotatedMessage` 49 | - Demonstrates how annotations can be used to provide metadata about content 50 | - Inputs: 51 | - `messageType` (enum: "error" | "success" | "debug"): Type of message to demonstrate different annotation patterns 52 | - `includeImage` (boolean, default: false): Whether to include an example image 53 | - Returns: Content with varying annotations: 54 | - Error messages: High priority (1.0), visible to both user and assistant 55 | - Success messages: Medium priority (0.7), user-focused 56 | - Debug messages: Low priority (0.3), assistant-focused 57 | - Optional image: Medium priority (0.5), user-focused 58 | - Example annotations: 59 | ```json 60 | { 61 | "priority": 1.0, 62 | "audience": ["user", "assistant"] 63 | } 64 | ``` 65 | 66 | 8. `getResourceReference` 67 | - Returns a resource reference that can be used by MCP clients 68 | - Inputs: 69 | - `resourceId` (number, 1-100): ID of the resource to reference 70 | - Returns: A resource reference with: 71 | - Text introduction 72 | - Embedded resource with `type: "resource"` 73 | - Text instruction for using the resource URI 74 | 75 | ### Resources 76 | 77 | The server provides 100 test resources in two formats: 78 | - Even numbered resources: 79 | - Plaintext format 80 | - URI pattern: `test://static/resource/{even_number}` 81 | - Content: Simple text description 82 | 83 | - Odd numbered resources: 84 | - Binary blob format 85 | - URI pattern: `test://static/resource/{odd_number}` 86 | - Content: Base64 encoded binary data 87 | 88 | Resource features: 89 | - Supports pagination (10 items per page) 90 | - Allows subscribing to resource updates 91 | - Demonstrates resource templates 92 | - Auto-updates subscribed resources every 5 seconds 93 | 94 | ### Prompts 95 | 96 | 1. `simple_prompt` 97 | - Basic prompt without arguments 98 | - Returns: Single message exchange 99 | 100 | 2. `complex_prompt` 101 | - Advanced prompt demonstrating argument handling 102 | - Required arguments: 103 | - `temperature` (number): Temperature setting 104 | - Optional arguments: 105 | - `style` (string): Output style preference 106 | - Returns: Multi-turn conversation with images 107 | 108 | 3. `resource_prompt` 109 | - Demonstrates embedding resource references in prompts 110 | - Required arguments: 111 | - `resourceId` (number): ID of the resource to embed (1-100) 112 | - Returns: Multi-turn conversation with an embedded resource reference 113 | - Shows how to include resources directly in prompt messages 114 | 115 | ### Logging 116 | 117 | The server sends random-leveled log messages every 15 seconds, e.g.: 118 | 119 | ```json 120 | { 121 | "method": "notifications/message", 122 | "params": { 123 | "level": "info", 124 | "data": "Info-level message" 125 | } 126 | } 127 | ``` 128 | 129 | ## Usage with Claude Desktop (uses [stdio Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#stdio)) 130 | 131 | Add to your `claude_desktop_config.json`: 132 | 133 | ```json 134 | { 135 | "mcpServers": { 136 | "everything": { 137 | "command": "npx", 138 | "args": [ 139 | "-y", 140 | "@modelcontextprotocol/server-everything" 141 | ] 142 | } 143 | } 144 | } 145 | ``` 146 | 147 | ## Usage with VS Code 148 | 149 | For quick installation, use of of the one-click install buttons below... 150 | 151 | [![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-everything%22%5D%7D&quality=insiders) 152 | 153 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=everything&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Feverything%22%5D%7D&quality=insiders) 154 | 155 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`. 156 | 157 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 158 | 159 | > Note that the `mcp` key is not needed in the `.vscode/mcp.json` file. 160 | 161 | #### NPX 162 | 163 | ```json 164 | { 165 | "mcp": { 166 | "servers": { 167 | "everything": { 168 | "command": "npx", 169 | "args": ["-y", "@modelcontextprotocol/server-everything"] 170 | } 171 | } 172 | } 173 | } 174 | ``` 175 | 176 | ## Running from source with [HTTP+SSE Transport](https://modelcontextprotocol.io/specification/2024-11-05/basic/transports#http-with-sse) (deprecated as of [2025-03-26](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports)) 177 | 178 | ```shell 179 | cd src/everything 180 | npm install 181 | npm run start:sse 182 | ``` 183 | 184 | ## Run from source with [Streamable HTTP Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http) 185 | 186 | ```shell 187 | cd src/everything 188 | npm install 189 | npm run start:streamableHttp 190 | ``` 191 | 192 | ## Running as an installed package 193 | ### Install 194 | ```shell 195 | npm install -g @modelcontextprotocol/server-everything@latest 196 | ```` 197 | 198 | ### Run the default (stdio) server 199 | ```shell 200 | npx @modelcontextprotocol/server-everything 201 | ``` 202 | 203 | ### Or specify stdio explicitly 204 | ```shell 205 | npx @modelcontextprotocol/server-everything stdio 206 | ``` 207 | 208 | ### Run the SSE server 209 | ```shell 210 | npx @modelcontextprotocol/server-everything sse 211 | ``` 212 | 213 | ### Run the streamable HTTP server 214 | ```shell 215 | npx @modelcontextprotocol/server-everything streamableHttp 216 | ``` 217 | 218 | -------------------------------------------------------------------------------- /src/everything/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Parse command line arguments first 4 | const args = process.argv.slice(2); 5 | const scriptName = args[0] || 'stdio'; 6 | 7 | async function run() { 8 | try { 9 | // Dynamically import only the requested module to prevent all modules from initializing 10 | switch (scriptName) { 11 | case 'stdio': 12 | // Import and run the default server 13 | await import('./stdio.js'); 14 | break; 15 | case 'sse': 16 | // Import and run the SSE server 17 | await import('./sse.js'); 18 | break; 19 | case 'streamableHttp': 20 | // Import and run the streamable HTTP server 21 | await import('./streamableHttp.js'); 22 | break; 23 | default: 24 | console.error(`Unknown script: ${scriptName}`); 25 | console.log('Available scripts:'); 26 | console.log('- stdio'); 27 | console.log('- sse'); 28 | console.log('- streamableHttp'); 29 | process.exit(1); 30 | } 31 | } catch (error) { 32 | console.error('Error running script:', error); 33 | process.exit(1); 34 | } 35 | } 36 | 37 | run(); 38 | -------------------------------------------------------------------------------- /src/everything/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-everything", 3 | "version": "0.6.2", 4 | "description": "MCP server that exercises all the features of the MCP protocol", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-everything": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch", 20 | "start": "node dist/index.js", 21 | "start:sse": "node dist/sse.js", 22 | "start:streamableHttp": "node dist/streamableHttp.js" 23 | }, 24 | "dependencies": { 25 | "@modelcontextprotocol/sdk": "^1.12.0", 26 | "express": "^4.21.1", 27 | "zod": "^3.23.8", 28 | "zod-to-json-schema": "^3.23.5" 29 | }, 30 | "devDependencies": { 31 | "@types/express": "^5.0.0", 32 | "shx": "^0.3.4", 33 | "typescript": "^5.6.2" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/everything/sse.ts: -------------------------------------------------------------------------------- 1 | import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js"; 2 | import express from "express"; 3 | import { createServer } from "./everything.js"; 4 | 5 | console.error('Starting SSE server...'); 6 | 7 | const app = express(); 8 | 9 | const transports: Map = new Map(); 10 | 11 | app.get("/sse", async (req, res) => { 12 | let transport: SSEServerTransport; 13 | const { server, cleanup } = createServer(); 14 | 15 | if (req?.query?.sessionId) { 16 | const sessionId = (req?.query?.sessionId as string); 17 | transport = transports.get(sessionId) as SSEServerTransport; 18 | console.error("Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.", transport.sessionId); 19 | } else { 20 | // Create and store transport for new session 21 | transport = new SSEServerTransport("/message", res); 22 | transports.set(transport.sessionId, transport); 23 | 24 | // Connect server to transport 25 | await server.connect(transport); 26 | console.error("Client Connected: ", transport.sessionId); 27 | 28 | // Handle close of connection 29 | server.onclose = async () => { 30 | console.error("Client Disconnected: ", transport.sessionId); 31 | transports.delete(transport.sessionId); 32 | await cleanup(); 33 | }; 34 | 35 | } 36 | 37 | }); 38 | 39 | app.post("/message", async (req, res) => { 40 | const sessionId = (req?.query?.sessionId as string); 41 | const transport = transports.get(sessionId); 42 | if (transport) { 43 | console.error("Client Message from", sessionId); 44 | await transport.handlePostMessage(req, res); 45 | } else { 46 | console.error(`No transport found for sessionId ${sessionId}`) 47 | } 48 | }); 49 | 50 | const PORT = process.env.PORT || 3001; 51 | app.listen(PORT, () => { 52 | console.error(`Server is running on port ${PORT}`); 53 | }); 54 | -------------------------------------------------------------------------------- /src/everything/stdio.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 4 | import { createServer } from "./everything.js"; 5 | 6 | console.error('Starting default (STDIO) server...'); 7 | 8 | async function main() { 9 | const transport = new StdioServerTransport(); 10 | const {server, cleanup} = createServer(); 11 | 12 | await server.connect(transport); 13 | 14 | // Cleanup on exit 15 | process.on("SIGINT", async () => { 16 | await cleanup(); 17 | await server.close(); 18 | process.exit(0); 19 | }); 20 | } 21 | 22 | main().catch((error) => { 23 | console.error("Server error:", error); 24 | process.exit(1); 25 | }); 26 | 27 | -------------------------------------------------------------------------------- /src/everything/streamableHttp.ts: -------------------------------------------------------------------------------- 1 | import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; 2 | import { InMemoryEventStore } from '@modelcontextprotocol/sdk/examples/shared/inMemoryEventStore.js'; 3 | import express, { Request, Response } from "express"; 4 | import { createServer } from "./everything.js"; 5 | import { randomUUID } from 'node:crypto'; 6 | 7 | console.error('Starting Streamable HTTP server...'); 8 | 9 | const app = express(); 10 | 11 | const transports: Map = new Map(); 12 | 13 | app.post('/mcp', async (req: Request, res: Response) => { 14 | console.error('Received MCP POST request'); 15 | try { 16 | // Check for existing session ID 17 | const sessionId = req.headers['mcp-session-id'] as string | undefined; 18 | let transport: StreamableHTTPServerTransport; 19 | 20 | if (sessionId && transports.has(sessionId)) { 21 | // Reuse existing transport 22 | transport = transports.get(sessionId)!; 23 | } else if (!sessionId) { 24 | 25 | const { server, cleanup } = createServer(); 26 | 27 | // New initialization request 28 | const eventStore = new InMemoryEventStore(); 29 | transport = new StreamableHTTPServerTransport({ 30 | sessionIdGenerator: () => randomUUID(), 31 | eventStore, // Enable resumability 32 | onsessioninitialized: (sessionId: string) => { 33 | // Store the transport by session ID when session is initialized 34 | // This avoids race conditions where requests might come in before the session is stored 35 | console.error(`Session initialized with ID: ${sessionId}`); 36 | transports.set(sessionId, transport); 37 | } 38 | }); 39 | 40 | 41 | // Set up onclose handler to clean up transport when closed 42 | server.onclose = async () => { 43 | const sid = transport.sessionId; 44 | if (sid && transports.has(sid)) { 45 | console.error(`Transport closed for session ${sid}, removing from transports map`); 46 | transports.delete(sid); 47 | await cleanup(); 48 | } 49 | }; 50 | 51 | // Connect the transport to the MCP server BEFORE handling the request 52 | // so responses can flow back through the same transport 53 | await server.connect(transport); 54 | 55 | await transport.handleRequest(req, res); 56 | return; // Already handled 57 | } else { 58 | // Invalid request - no session ID or not initialization request 59 | res.status(400).json({ 60 | jsonrpc: '2.0', 61 | error: { 62 | code: -32000, 63 | message: 'Bad Request: No valid session ID provided', 64 | }, 65 | id: req?.body?.id, 66 | }); 67 | return; 68 | } 69 | 70 | // Handle the request with existing transport - no need to reconnect 71 | // The existing transport is already connected to the server 72 | await transport.handleRequest(req, res); 73 | } catch (error) { 74 | console.error('Error handling MCP request:', error); 75 | if (!res.headersSent) { 76 | res.status(500).json({ 77 | jsonrpc: '2.0', 78 | error: { 79 | code: -32603, 80 | message: 'Internal server error', 81 | }, 82 | id: req?.body?.id, 83 | }); 84 | return; 85 | } 86 | } 87 | }); 88 | 89 | // Handle GET requests for SSE streams (using built-in support from StreamableHTTP) 90 | app.get('/mcp', async (req: Request, res: Response) => { 91 | console.error('Received MCP GET request'); 92 | const sessionId = req.headers['mcp-session-id'] as string | undefined; 93 | if (!sessionId || !transports.has(sessionId)) { 94 | res.status(400).json({ 95 | jsonrpc: '2.0', 96 | error: { 97 | code: -32000, 98 | message: 'Bad Request: No valid session ID provided', 99 | }, 100 | id: req?.body?.id, 101 | }); 102 | return; 103 | } 104 | 105 | // Check for Last-Event-ID header for resumability 106 | const lastEventId = req.headers['last-event-id'] as string | undefined; 107 | if (lastEventId) { 108 | console.error(`Client reconnecting with Last-Event-ID: ${lastEventId}`); 109 | } else { 110 | console.error(`Establishing new SSE stream for session ${sessionId}`); 111 | } 112 | 113 | const transport = transports.get(sessionId); 114 | await transport!.handleRequest(req, res); 115 | }); 116 | 117 | // Handle DELETE requests for session termination (according to MCP spec) 118 | app.delete('/mcp', async (req: Request, res: Response) => { 119 | const sessionId = req.headers['mcp-session-id'] as string | undefined; 120 | if (!sessionId || !transports.has(sessionId)) { 121 | res.status(400).json({ 122 | jsonrpc: '2.0', 123 | error: { 124 | code: -32000, 125 | message: 'Bad Request: No valid session ID provided', 126 | }, 127 | id: req?.body?.id, 128 | }); 129 | return; 130 | } 131 | 132 | console.error(`Received session termination request for session ${sessionId}`); 133 | 134 | try { 135 | const transport = transports.get(sessionId); 136 | await transport!.handleRequest(req, res); 137 | } catch (error) { 138 | console.error('Error handling session termination:', error); 139 | if (!res.headersSent) { 140 | res.status(500).json({ 141 | jsonrpc: '2.0', 142 | error: { 143 | code: -32603, 144 | message: 'Error handling session termination', 145 | }, 146 | id: req?.body?.id, 147 | }); 148 | return; 149 | } 150 | } 151 | }); 152 | 153 | // Start the server 154 | const PORT = process.env.PORT || 3001; 155 | app.listen(PORT, () => { 156 | console.error(`MCP Streamable HTTP Server listening on port ${PORT}`); 157 | }); 158 | 159 | // Handle server shutdown 160 | process.on('SIGINT', async () => { 161 | console.error('Shutting down server...'); 162 | 163 | // Close all active transports to properly clean up resources 164 | for (const sessionId in transports) { 165 | try { 166 | console.error(`Closing transport for session ${sessionId}`); 167 | await transports.get(sessionId)!.close(); 168 | transports.delete(sessionId); 169 | } catch (error) { 170 | console.error(`Error closing transport for session ${sessionId}:`, error); 171 | } 172 | } 173 | 174 | console.error('Server shutdown complete'); 175 | process.exit(0); 176 | }); 177 | -------------------------------------------------------------------------------- /src/everything/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/fetch/.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /src/fetch/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use a Python image with uv pre-installed 2 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv 3 | 4 | # Install the project into `/app` 5 | WORKDIR /app 6 | 7 | # Enable bytecode compilation 8 | ENV UV_COMPILE_BYTECODE=1 9 | 10 | # Copy from the cache instead of linking since it's a mounted volume 11 | ENV UV_LINK_MODE=copy 12 | 13 | # Install the project's dependencies using the lockfile and settings 14 | RUN --mount=type=cache,target=/root/.cache/uv \ 15 | --mount=type=bind,source=uv.lock,target=uv.lock \ 16 | --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ 17 | uv sync --frozen --no-install-project --no-dev --no-editable 18 | 19 | # Then, add the rest of the project source code and install it 20 | # Installing separately from its dependencies allows optimal layer caching 21 | ADD . /app 22 | RUN --mount=type=cache,target=/root/.cache/uv \ 23 | uv sync --frozen --no-dev --no-editable 24 | 25 | FROM python:3.12-slim-bookworm 26 | 27 | WORKDIR /app 28 | 29 | COPY --from=uv /root/.local /root/.local 30 | COPY --from=uv --chown=app:app /app/.venv /app/.venv 31 | 32 | # Place executables in the environment at the front of the path 33 | ENV PATH="/app/.venv/bin:$PATH" 34 | 35 | # when running the container, add --db-path and a bind mount to the host's db file 36 | ENTRYPOINT ["mcp-server-fetch"] 37 | -------------------------------------------------------------------------------- /src/fetch/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Anthropic, PBC. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/fetch/README.md: -------------------------------------------------------------------------------- 1 | # Fetch MCP Server 2 | 3 | A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption. 4 | 5 | > [!CAUTION] 6 | > This server can access local/internal IP addresses and may represent a security risk. Exercise caution when using this MCP server to ensure this does not expose any sensitive data. 7 | 8 | The fetch tool will truncate the response, but by using the `start_index` argument, you can specify where to start the content extraction. This lets models read a webpage in chunks, until they find the information they need. 9 | 10 | ### Available Tools 11 | 12 | - `fetch` - Fetches a URL from the internet and extracts its contents as markdown. 13 | - `url` (string, required): URL to fetch 14 | - `max_length` (integer, optional): Maximum number of characters to return (default: 5000) 15 | - `start_index` (integer, optional): Start content from this character index (default: 0) 16 | - `raw` (boolean, optional): Get raw content without markdown conversion (default: false) 17 | 18 | ### Prompts 19 | 20 | - **fetch** 21 | - Fetch a URL and extract its contents as markdown 22 | - Arguments: 23 | - `url` (string, required): URL to fetch 24 | 25 | ## Installation 26 | 27 | Optionally: Install node.js, this will cause the fetch server to use a different HTML simplifier that is more robust. 28 | 29 | ### Using uv (recommended) 30 | 31 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 32 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-fetch*. 33 | 34 | ### Using PIP 35 | 36 | Alternatively you can install `mcp-server-fetch` via pip: 37 | 38 | ``` 39 | pip install mcp-server-fetch 40 | ``` 41 | 42 | After installation, you can run it as a script using: 43 | 44 | ``` 45 | python -m mcp_server_fetch 46 | ``` 47 | 48 | ## Configuration 49 | 50 | ### Configure for Claude.app 51 | 52 | Add to your Claude settings: 53 | 54 |
55 | Using uvx 56 | 57 | ```json 58 | { 59 | "mcpServers": { 60 | "fetch": { 61 | "command": "uvx", 62 | "args": ["mcp-server-fetch"] 63 | } 64 | } 65 | } 66 | ``` 67 |
68 | 69 |
70 | Using docker 71 | 72 | ```json 73 | { 74 | "mcpServers": { 75 | "fetch": { 76 | "command": "docker", 77 | "args": ["run", "-i", "--rm", "mcp/fetch"] 78 | } 79 | } 80 | } 81 | ``` 82 |
83 | 84 |
85 | Using pip installation 86 | 87 | ```json 88 | { 89 | "mcpServers": { 90 | "fetch": { 91 | "command": "python", 92 | "args": ["-m", "mcp_server_fetch"] 93 | } 94 | } 95 | } 96 | ``` 97 |
98 | 99 | ### Configure for VS Code 100 | 101 | For quick installation, use one of the one-click install buttons below... 102 | 103 | [![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-fetch%22%5D%7D&quality=insiders) 104 | 105 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=fetch&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ffetch%22%5D%7D&quality=insiders) 106 | 107 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`. 108 | 109 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 110 | 111 | > Note that the `mcp` key is needed when using the `mcp.json` file. 112 | 113 |
114 | Using uvx 115 | 116 | ```json 117 | { 118 | "mcp": { 119 | "servers": { 120 | "fetch": { 121 | "command": "uvx", 122 | "args": ["mcp-server-fetch"] 123 | } 124 | } 125 | } 126 | } 127 | ``` 128 |
129 | 130 |
131 | Using Docker 132 | 133 | ```json 134 | { 135 | "mcp": { 136 | "servers": { 137 | "fetch": { 138 | "command": "docker", 139 | "args": ["run", "-i", "--rm", "mcp/fetch"] 140 | } 141 | } 142 | } 143 | } 144 | ``` 145 |
146 | 147 | ### Customization - robots.txt 148 | 149 | By default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if 150 | the request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the 151 | `args` list in the configuration. 152 | 153 | ### Customization - User-agent 154 | 155 | By default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the 156 | server will use either the user-agent 157 | ``` 158 | ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers) 159 | ``` 160 | or 161 | ``` 162 | ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers) 163 | ``` 164 | 165 | This can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration. 166 | 167 | ### Customization - Proxy 168 | 169 | The server can be configured to use a proxy by using the `--proxy-url` argument. 170 | 171 | ## Debugging 172 | 173 | You can use the MCP inspector to debug the server. For uvx installations: 174 | 175 | ``` 176 | npx @modelcontextprotocol/inspector uvx mcp-server-fetch 177 | ``` 178 | 179 | Or if you've installed the package in a specific directory or are developing on it: 180 | 181 | ``` 182 | cd path/to/servers/src/fetch 183 | npx @modelcontextprotocol/inspector uv run mcp-server-fetch 184 | ``` 185 | 186 | ## Contributing 187 | 188 | We encourage contributions to help expand and improve mcp-server-fetch. Whether you want to add new tools, enhance existing functionality, or improve documentation, your input is valuable. 189 | 190 | For examples of other MCP servers and implementation patterns, see: 191 | https://github.com/modelcontextprotocol/servers 192 | 193 | Pull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-fetch even more powerful and useful. 194 | 195 | ## License 196 | 197 | mcp-server-fetch is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 198 | -------------------------------------------------------------------------------- /src/fetch/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-fetch" 3 | version = "0.6.3" 4 | description = "A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [{ name = "Anthropic, PBC." }] 8 | maintainers = [{ name = "Jack Adamson", email = "jadamson@anthropic.com" }] 9 | keywords = ["http", "mcp", "llm", "automation"] 10 | license = { text = "MIT" } 11 | classifiers = [ 12 | "Development Status :: 4 - Beta", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Programming Language :: Python :: 3", 16 | "Programming Language :: Python :: 3.10", 17 | ] 18 | dependencies = [ 19 | "httpx<0.28", 20 | "markdownify>=0.13.1", 21 | "mcp>=1.1.3", 22 | "protego>=0.3.1", 23 | "pydantic>=2.0.0", 24 | "readabilipy>=0.2.0", 25 | "requests>=2.32.3", 26 | ] 27 | 28 | [project.scripts] 29 | mcp-server-fetch = "mcp_server_fetch:main" 30 | 31 | [build-system] 32 | requires = ["hatchling"] 33 | build-backend = "hatchling.build" 34 | 35 | [tool.uv] 36 | dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3"] 37 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import serve 2 | 3 | 4 | def main(): 5 | """MCP Fetch Server - HTTP fetching functionality for MCP""" 6 | import argparse 7 | import asyncio 8 | 9 | parser = argparse.ArgumentParser( 10 | description="give a model the ability to make web requests" 11 | ) 12 | parser.add_argument("--user-agent", type=str, help="Custom User-Agent string") 13 | parser.add_argument( 14 | "--ignore-robots-txt", 15 | action="store_true", 16 | help="Ignore robots.txt restrictions", 17 | ) 18 | parser.add_argument("--proxy-url", type=str, help="Proxy URL to use for requests") 19 | 20 | args = parser.parse_args() 21 | asyncio.run(serve(args.user_agent, args.ignore_robots_txt, args.proxy_url)) 22 | 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/__main__.py: -------------------------------------------------------------------------------- 1 | # __main__.py 2 | 3 | from mcp_server_fetch import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/server.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated, Tuple 2 | from urllib.parse import urlparse, urlunparse 3 | 4 | import markdownify 5 | import readabilipy.simple_json 6 | from mcp.shared.exceptions import McpError 7 | from mcp.server import Server 8 | from mcp.server.stdio import stdio_server 9 | from mcp.types import ( 10 | ErrorData, 11 | GetPromptResult, 12 | Prompt, 13 | PromptArgument, 14 | PromptMessage, 15 | TextContent, 16 | Tool, 17 | INVALID_PARAMS, 18 | INTERNAL_ERROR, 19 | ) 20 | from protego import Protego 21 | from pydantic import BaseModel, Field, AnyUrl 22 | 23 | DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)" 24 | DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)" 25 | 26 | 27 | def extract_content_from_html(html: str) -> str: 28 | """Extract and convert HTML content to Markdown format. 29 | 30 | Args: 31 | html: Raw HTML content to process 32 | 33 | Returns: 34 | Simplified markdown version of the content 35 | """ 36 | ret = readabilipy.simple_json.simple_json_from_html_string( 37 | html, use_readability=True 38 | ) 39 | if not ret["content"]: 40 | return "Page failed to be simplified from HTML" 41 | content = markdownify.markdownify( 42 | ret["content"], 43 | heading_style=markdownify.ATX, 44 | ) 45 | return content 46 | 47 | 48 | def get_robots_txt_url(url: str) -> str: 49 | """Get the robots.txt URL for a given website URL. 50 | 51 | Args: 52 | url: Website URL to get robots.txt for 53 | 54 | Returns: 55 | URL of the robots.txt file 56 | """ 57 | # Parse the URL into components 58 | parsed = urlparse(url) 59 | 60 | # Reconstruct the base URL with just scheme, netloc, and /robots.txt path 61 | robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", "")) 62 | 63 | return robots_url 64 | 65 | 66 | async def check_may_autonomously_fetch_url(url: str, user_agent: str, proxy_url: str | None = None) -> None: 67 | """ 68 | Check if the URL can be fetched by the user agent according to the robots.txt file. 69 | Raises a McpError if not. 70 | """ 71 | from httpx import AsyncClient, HTTPError 72 | 73 | robot_txt_url = get_robots_txt_url(url) 74 | 75 | async with AsyncClient(proxies=proxy_url) as client: 76 | try: 77 | response = await client.get( 78 | robot_txt_url, 79 | follow_redirects=True, 80 | headers={"User-Agent": user_agent}, 81 | ) 82 | except HTTPError: 83 | raise McpError(ErrorData( 84 | code=INTERNAL_ERROR, 85 | message=f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue", 86 | )) 87 | if response.status_code in (401, 403): 88 | raise McpError(ErrorData( 89 | code=INTERNAL_ERROR, 90 | message=f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt", 91 | )) 92 | elif 400 <= response.status_code < 500: 93 | return 94 | robot_txt = response.text 95 | processed_robot_txt = "\n".join( 96 | line for line in robot_txt.splitlines() if not line.strip().startswith("#") 97 | ) 98 | robot_parser = Protego.parse(processed_robot_txt) 99 | if not robot_parser.can_fetch(str(url), user_agent): 100 | raise McpError(ErrorData( 101 | code=INTERNAL_ERROR, 102 | message=f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, " 103 | f"{user_agent}\n" 104 | f"{url}" 105 | f"\n{robot_txt}\n\n" 106 | f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n" 107 | f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.", 108 | )) 109 | 110 | 111 | async def fetch_url( 112 | url: str, user_agent: str, force_raw: bool = False, proxy_url: str | None = None 113 | ) -> Tuple[str, str]: 114 | """ 115 | Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information. 116 | """ 117 | from httpx import AsyncClient, HTTPError 118 | 119 | async with AsyncClient(proxies=proxy_url) as client: 120 | try: 121 | response = await client.get( 122 | url, 123 | follow_redirects=True, 124 | headers={"User-Agent": user_agent}, 125 | timeout=30, 126 | ) 127 | except HTTPError as e: 128 | raise McpError(ErrorData(code=INTERNAL_ERROR, message=f"Failed to fetch {url}: {e!r}")) 129 | if response.status_code >= 400: 130 | raise McpError(ErrorData( 131 | code=INTERNAL_ERROR, 132 | message=f"Failed to fetch {url} - status code {response.status_code}", 133 | )) 134 | 135 | page_raw = response.text 136 | 137 | content_type = response.headers.get("content-type", "") 138 | is_page_html = ( 139 | " None: 186 | """Run the fetch MCP server. 187 | 188 | Args: 189 | custom_user_agent: Optional custom User-Agent string to use for requests 190 | ignore_robots_txt: Whether to ignore robots.txt restrictions 191 | proxy_url: Optional proxy URL to use for requests 192 | """ 193 | server = Server("mcp-fetch") 194 | user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS 195 | user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL 196 | 197 | @server.list_tools() 198 | async def list_tools() -> list[Tool]: 199 | return [ 200 | Tool( 201 | name="fetch", 202 | description="""Fetches a URL from the internet and optionally extracts its contents as markdown. 203 | 204 | Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""", 205 | inputSchema=Fetch.model_json_schema(), 206 | ) 207 | ] 208 | 209 | @server.list_prompts() 210 | async def list_prompts() -> list[Prompt]: 211 | return [ 212 | Prompt( 213 | name="fetch", 214 | description="Fetch a URL and extract its contents as markdown", 215 | arguments=[ 216 | PromptArgument( 217 | name="url", description="URL to fetch", required=True 218 | ) 219 | ], 220 | ) 221 | ] 222 | 223 | @server.call_tool() 224 | async def call_tool(name, arguments: dict) -> list[TextContent]: 225 | try: 226 | args = Fetch(**arguments) 227 | except ValueError as e: 228 | raise McpError(ErrorData(code=INVALID_PARAMS, message=str(e))) 229 | 230 | url = str(args.url) 231 | if not url: 232 | raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required")) 233 | 234 | if not ignore_robots_txt: 235 | await check_may_autonomously_fetch_url(url, user_agent_autonomous, proxy_url) 236 | 237 | content, prefix = await fetch_url( 238 | url, user_agent_autonomous, force_raw=args.raw, proxy_url=proxy_url 239 | ) 240 | original_length = len(content) 241 | if args.start_index >= original_length: 242 | content = "No more content available." 243 | else: 244 | truncated_content = content[args.start_index : args.start_index + args.max_length] 245 | if not truncated_content: 246 | content = "No more content available." 247 | else: 248 | content = truncated_content 249 | actual_content_length = len(truncated_content) 250 | remaining_content = original_length - (args.start_index + actual_content_length) 251 | # Only add the prompt to continue fetching if there is still remaining content 252 | if actual_content_length == args.max_length and remaining_content > 0: 253 | next_start = args.start_index + actual_content_length 254 | content += f"\n\nContent truncated. Call the fetch tool with a start_index of {next_start} to get more content." 255 | return [TextContent(type="text", text=f"{prefix}Contents of {url}:\n{content}")] 256 | 257 | @server.get_prompt() 258 | async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult: 259 | if not arguments or "url" not in arguments: 260 | raise McpError(ErrorData(code=INVALID_PARAMS, message="URL is required")) 261 | 262 | url = arguments["url"] 263 | 264 | try: 265 | content, prefix = await fetch_url(url, user_agent_manual, proxy_url=proxy_url) 266 | # TODO: after SDK bug is addressed, don't catch the exception 267 | except McpError as e: 268 | return GetPromptResult( 269 | description=f"Failed to fetch {url}", 270 | messages=[ 271 | PromptMessage( 272 | role="user", 273 | content=TextContent(type="text", text=str(e)), 274 | ) 275 | ], 276 | ) 277 | return GetPromptResult( 278 | description=f"Contents of {url}", 279 | messages=[ 280 | PromptMessage( 281 | role="user", content=TextContent(type="text", text=prefix + content) 282 | ) 283 | ], 284 | ) 285 | 286 | options = server.create_initialization_options() 287 | async with stdio_server() as (read_stream, write_stream): 288 | await server.run(read_stream, write_stream, options, raise_exceptions=True) 289 | -------------------------------------------------------------------------------- /src/filesystem/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22.12-alpine AS builder 2 | 3 | WORKDIR /app 4 | 5 | COPY src/filesystem /app 6 | COPY tsconfig.json /tsconfig.json 7 | 8 | RUN --mount=type=cache,target=/root/.npm npm install 9 | 10 | RUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev 11 | 12 | 13 | FROM node:22-alpine AS release 14 | 15 | WORKDIR /app 16 | 17 | COPY --from=builder /app/dist /app/dist 18 | COPY --from=builder /app/package.json /app/package.json 19 | COPY --from=builder /app/package-lock.json /app/package-lock.json 20 | 21 | ENV NODE_ENV=production 22 | 23 | RUN npm ci --ignore-scripts --omit-dev 24 | 25 | ENTRYPOINT ["node", "/app/dist/index.js"] -------------------------------------------------------------------------------- /src/filesystem/README.md: -------------------------------------------------------------------------------- 1 | # Filesystem MCP Server 2 | 3 | Node.js server implementing Model Context Protocol (MCP) for filesystem operations. 4 | 5 | ## Features 6 | 7 | - Read/write files 8 | - Create/list/delete directories 9 | - Move files/directories 10 | - Search files 11 | - Get file metadata 12 | 13 | **Note**: The server will only allow operations within directories specified via `args`. 14 | 15 | ## API 16 | 17 | ### Resources 18 | 19 | - `file://system`: File system operations interface 20 | 21 | ### Tools 22 | 23 | - **read_file** 24 | - Read complete contents of a file 25 | - Input: `path` (string) 26 | - Reads complete file contents with UTF-8 encoding 27 | 28 | - **read_multiple_files** 29 | - Read multiple files simultaneously 30 | - Input: `paths` (string[]) 31 | - Failed reads won't stop the entire operation 32 | 33 | - **write_file** 34 | - Create new file or overwrite existing (exercise caution with this) 35 | - Inputs: 36 | - `path` (string): File location 37 | - `content` (string): File content 38 | 39 | - **edit_file** 40 | - Make selective edits using advanced pattern matching and formatting 41 | - Features: 42 | - Line-based and multi-line content matching 43 | - Whitespace normalization with indentation preservation 44 | - Multiple simultaneous edits with correct positioning 45 | - Indentation style detection and preservation 46 | - Git-style diff output with context 47 | - Preview changes with dry run mode 48 | - Inputs: 49 | - `path` (string): File to edit 50 | - `edits` (array): List of edit operations 51 | - `oldText` (string): Text to search for (can be substring) 52 | - `newText` (string): Text to replace with 53 | - `dryRun` (boolean): Preview changes without applying (default: false) 54 | - Returns detailed diff and match information for dry runs, otherwise applies changes 55 | - Best Practice: Always use dryRun first to preview changes before applying them 56 | 57 | - **create_directory** 58 | - Create new directory or ensure it exists 59 | - Input: `path` (string) 60 | - Creates parent directories if needed 61 | - Succeeds silently if directory exists 62 | 63 | - **list_directory** 64 | - List directory contents with [FILE] or [DIR] prefixes 65 | - Input: `path` (string) 66 | 67 | - **move_file** 68 | - Move or rename files and directories 69 | - Inputs: 70 | - `source` (string) 71 | - `destination` (string) 72 | - Fails if destination exists 73 | 74 | - **search_files** 75 | - Recursively search for files/directories 76 | - Inputs: 77 | - `path` (string): Starting directory 78 | - `pattern` (string): Search pattern 79 | - `excludePatterns` (string[]): Exclude any patterns. Glob formats are supported. 80 | - Case-insensitive matching 81 | - Returns full paths to matches 82 | 83 | - **get_file_info** 84 | - Get detailed file/directory metadata 85 | - Input: `path` (string) 86 | - Returns: 87 | - Size 88 | - Creation time 89 | - Modified time 90 | - Access time 91 | - Type (file/directory) 92 | - Permissions 93 | 94 | - **list_allowed_directories** 95 | - List all directories the server is allowed to access 96 | - No input required 97 | - Returns: 98 | - Directories that this server can read/write from 99 | 100 | ## Usage with Claude Desktop 101 | Add this to your `claude_desktop_config.json`: 102 | 103 | Note: you can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server. 104 | 105 | ### Docker 106 | Note: all directories must be mounted to `/projects` by default. 107 | 108 | ```json 109 | { 110 | "mcpServers": { 111 | "filesystem": { 112 | "command": "docker", 113 | "args": [ 114 | "run", 115 | "-i", 116 | "--rm", 117 | "--mount", "type=bind,src=/Users/username/Desktop,dst=/projects/Desktop", 118 | "--mount", "type=bind,src=/path/to/other/allowed/dir,dst=/projects/other/allowed/dir,ro", 119 | "--mount", "type=bind,src=/path/to/file.txt,dst=/projects/path/to/file.txt", 120 | "mcp/filesystem", 121 | "/projects" 122 | ] 123 | } 124 | } 125 | } 126 | ``` 127 | 128 | ### NPX 129 | 130 | ```json 131 | { 132 | "mcpServers": { 133 | "filesystem": { 134 | "command": "npx", 135 | "args": [ 136 | "-y", 137 | "@modelcontextprotocol/server-filesystem", 138 | "/Users/username/Desktop", 139 | "/path/to/other/allowed/dir" 140 | ] 141 | } 142 | } 143 | } 144 | ``` 145 | 146 | ## Usage with VS Code 147 | 148 | For quick installation, click the installation buttons below... 149 | 150 | [![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-filesystem%22%2C%22%24%7BworkspaceFolder%7D%22%5D%7D&quality=insiders) 151 | 152 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=filesystem&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fprojects%2Fworkspace%22%2C%22mcp%2Ffilesystem%22%2C%22%2Fprojects%22%5D%7D&quality=insiders) 153 | 154 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open Settings (JSON)`. 155 | 156 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 157 | 158 | > Note that the `mcp` key is not needed in the `.vscode/mcp.json` file. 159 | 160 | You can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server. 161 | 162 | ### Docker 163 | Note: all directories must be mounted to `/projects` by default. 164 | 165 | ```json 166 | { 167 | "mcp": { 168 | "servers": { 169 | "filesystem": { 170 | "command": "docker", 171 | "args": [ 172 | "run", 173 | "-i", 174 | "--rm", 175 | "--mount", "type=bind,src=${workspaceFolder},dst=/projects/workspace", 176 | "mcp/filesystem", 177 | "/projects" 178 | ] 179 | } 180 | } 181 | } 182 | } 183 | ``` 184 | 185 | ### NPX 186 | 187 | ```json 188 | { 189 | "mcp": { 190 | "servers": { 191 | "filesystem": { 192 | "command": "npx", 193 | "args": [ 194 | "-y", 195 | "@modelcontextprotocol/server-filesystem", 196 | "${workspaceFolder}" 197 | ] 198 | } 199 | } 200 | } 201 | } 202 | ``` 203 | 204 | ## Build 205 | 206 | Docker build: 207 | 208 | ```bash 209 | docker build -t mcp/filesystem -f src/filesystem/Dockerfile . 210 | ``` 211 | 212 | ## License 213 | 214 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 215 | -------------------------------------------------------------------------------- /src/filesystem/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-filesystem", 3 | "version": "0.6.2", 4 | "description": "MCP server for filesystem access", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-filesystem": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0", 23 | "diff": "^5.1.0", 24 | "glob": "^10.3.10", 25 | "minimatch": "^10.0.1", 26 | "zod-to-json-schema": "^3.23.5" 27 | }, 28 | "devDependencies": { 29 | "@types/diff": "^5.0.9", 30 | "@types/minimatch": "^5.1.2", 31 | "@types/node": "^22", 32 | "shx": "^0.3.4", 33 | "typescript": "^5.3.3" 34 | } 35 | } -------------------------------------------------------------------------------- /src/filesystem/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": ".", 6 | "moduleResolution": "NodeNext", 7 | "module": "NodeNext" 8 | }, 9 | "include": [ 10 | "./**/*.ts" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /src/git/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .venv 3 | -------------------------------------------------------------------------------- /src/git/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /src/git/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use a Python image with uv pre-installed 2 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv 3 | 4 | # Install the project into `/app` 5 | WORKDIR /app 6 | 7 | # Enable bytecode compilation 8 | ENV UV_COMPILE_BYTECODE=1 9 | 10 | # Copy from the cache instead of linking since it's a mounted volume 11 | ENV UV_LINK_MODE=copy 12 | 13 | # Install the project's dependencies using the lockfile and settings 14 | RUN --mount=type=cache,target=/root/.cache/uv \ 15 | --mount=type=bind,source=uv.lock,target=uv.lock \ 16 | --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ 17 | uv sync --frozen --no-install-project --no-dev --no-editable 18 | 19 | # Then, add the rest of the project source code and install it 20 | # Installing separately from its dependencies allows optimal layer caching 21 | ADD . /app 22 | RUN --mount=type=cache,target=/root/.cache/uv \ 23 | uv sync --frozen --no-dev --no-editable 24 | 25 | FROM python:3.12-slim-bookworm 26 | 27 | RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* 28 | 29 | WORKDIR /app 30 | 31 | COPY --from=uv /root/.local /root/.local 32 | COPY --from=uv --chown=app:app /app/.venv /app/.venv 33 | 34 | # Place executables in the environment at the front of the path 35 | ENV PATH="/app/.venv/bin:$PATH" 36 | 37 | # when running the container, add --db-path and a bind mount to the host's db file 38 | ENTRYPOINT ["mcp-server-git"] 39 | -------------------------------------------------------------------------------- /src/git/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Anthropic, PBC. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/git/README.md: -------------------------------------------------------------------------------- 1 | # mcp-server-git: A git MCP server 2 | 3 | ## Overview 4 | 5 | A Model Context Protocol server for Git repository interaction and automation. This server provides tools to read, search, and manipulate Git repositories via Large Language Models. 6 | 7 | Please note that mcp-server-git is currently in early development. The functionality and available tools are subject to change and expansion as we continue to develop and improve the server. 8 | 9 | ### Tools 10 | 11 | 1. `git_status` 12 | - Shows the working tree status 13 | - Input: 14 | - `repo_path` (string): Path to Git repository 15 | - Returns: Current status of working directory as text output 16 | 17 | 2. `git_diff_unstaged` 18 | - Shows changes in working directory not yet staged 19 | - Input: 20 | - `repo_path` (string): Path to Git repository 21 | - Returns: Diff output of unstaged changes 22 | 23 | 3. `git_diff_staged` 24 | - Shows changes that are staged for commit 25 | - Input: 26 | - `repo_path` (string): Path to Git repository 27 | - Returns: Diff output of staged changes 28 | 29 | 4. `git_diff` 30 | - Shows differences between branches or commits 31 | - Inputs: 32 | - `repo_path` (string): Path to Git repository 33 | - `target` (string): Target branch or commit to compare with 34 | - Returns: Diff output comparing current state with target 35 | 36 | 5. `git_commit` 37 | - Records changes to the repository 38 | - Inputs: 39 | - `repo_path` (string): Path to Git repository 40 | - `message` (string): Commit message 41 | - Returns: Confirmation with new commit hash 42 | 43 | 6. `git_add` 44 | - Adds file contents to the staging area 45 | - Inputs: 46 | - `repo_path` (string): Path to Git repository 47 | - `files` (string[]): Array of file paths to stage 48 | - Returns: Confirmation of staged files 49 | 50 | 7. `git_reset` 51 | - Unstages all staged changes 52 | - Input: 53 | - `repo_path` (string): Path to Git repository 54 | - Returns: Confirmation of reset operation 55 | 56 | 8. `git_log` 57 | - Shows the commit logs 58 | - Inputs: 59 | - `repo_path` (string): Path to Git repository 60 | - `max_count` (number, optional): Maximum number of commits to show (default: 10) 61 | - Returns: Array of commit entries with hash, author, date, and message 62 | 63 | 9. `git_create_branch` 64 | - Creates a new branch 65 | - Inputs: 66 | - `repo_path` (string): Path to Git repository 67 | - `branch_name` (string): Name of the new branch 68 | - `start_point` (string, optional): Starting point for the new branch 69 | - Returns: Confirmation of branch creation 70 | 10. `git_checkout` 71 | - Switches branches 72 | - Inputs: 73 | - `repo_path` (string): Path to Git repository 74 | - `branch_name` (string): Name of branch to checkout 75 | - Returns: Confirmation of branch switch 76 | 11. `git_show` 77 | - Shows the contents of a commit 78 | - Inputs: 79 | - `repo_path` (string): Path to Git repository 80 | - `revision` (string): The revision (commit hash, branch name, tag) to show 81 | - Returns: Contents of the specified commit 82 | 12. `git_init` 83 | - Initializes a Git repository 84 | - Inputs: 85 | - `repo_path` (string): Path to directory to initialize git repo 86 | - Returns: Confirmation of repository initialization 87 | 88 | ## Installation 89 | 90 | ### Using uv (recommended) 91 | 92 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 93 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-git*. 94 | 95 | ### Using PIP 96 | 97 | Alternatively you can install `mcp-server-git` via pip: 98 | 99 | ``` 100 | pip install mcp-server-git 101 | ``` 102 | 103 | After installation, you can run it as a script using: 104 | 105 | ``` 106 | python -m mcp_server_git 107 | ``` 108 | 109 | ## Configuration 110 | 111 | ### Usage with Claude Desktop 112 | 113 | Add this to your `claude_desktop_config.json`: 114 | 115 |
116 | Using uvx 117 | 118 | ```json 119 | "mcpServers": { 120 | "git": { 121 | "command": "uvx", 122 | "args": ["mcp-server-git", "--repository", "path/to/git/repo"] 123 | } 124 | } 125 | ``` 126 |
127 | 128 |
129 | Using docker 130 | 131 | * Note: replace '/Users/username' with the a path that you want to be accessible by this tool 132 | 133 | ```json 134 | "mcpServers": { 135 | "git": { 136 | "command": "docker", 137 | "args": ["run", "--rm", "-i", "--mount", "type=bind,src=/Users/username,dst=/Users/username", "mcp/git"] 138 | } 139 | } 140 | ``` 141 |
142 | 143 |
144 | Using pip installation 145 | 146 | ```json 147 | "mcpServers": { 148 | "git": { 149 | "command": "python", 150 | "args": ["-m", "mcp_server_git", "--repository", "path/to/git/repo"] 151 | } 152 | } 153 | ``` 154 |
155 | 156 | ### Usage with VS Code 157 | 158 | For quick installation, use one of the one-click install buttons below... 159 | 160 | [![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-git%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-git%22%5D%7D&quality=insiders) 161 | 162 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fworkspace%22%2C%22mcp%2Fgit%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=git&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22--mount%22%2C%22type%3Dbind%2Csrc%3D%24%7BworkspaceFolder%7D%2Cdst%3D%2Fworkspace%22%2C%22mcp%2Fgit%22%5D%7D&quality=insiders) 163 | 164 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open Settings (JSON)`. 165 | 166 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 167 | 168 | > Note that the `mcp` key is not needed in the `.vscode/mcp.json` file. 169 | 170 | ```json 171 | { 172 | "mcp": { 173 | "servers": { 174 | "git": { 175 | "command": "uvx", 176 | "args": ["mcp-server-git"] 177 | } 178 | } 179 | } 180 | } 181 | ``` 182 | 183 | For Docker installation: 184 | 185 | ```json 186 | { 187 | "mcp": { 188 | "servers": { 189 | "git": { 190 | "command": "docker", 191 | "args": [ 192 | "run", 193 | "--rm", 194 | "-i", 195 | "--mount", "type=bind,src=${workspaceFolder},dst=/workspace", 196 | "mcp/git" 197 | ] 198 | } 199 | } 200 | } 201 | } 202 | ``` 203 | 204 | ### Usage with [Zed](https://github.com/zed-industries/zed) 205 | 206 | Add to your Zed settings.json: 207 | 208 |
209 | Using uvx 210 | 211 | ```json 212 | "context_servers": [ 213 | "mcp-server-git": { 214 | "command": { 215 | "path": "uvx", 216 | "args": ["mcp-server-git"] 217 | } 218 | } 219 | ], 220 | ``` 221 |
222 | 223 |
224 | Using pip installation 225 | 226 | ```json 227 | "context_servers": { 228 | "mcp-server-git": { 229 | "command": { 230 | "path": "python", 231 | "args": ["-m", "mcp_server_git"] 232 | } 233 | } 234 | }, 235 | ``` 236 |
237 | 238 | ## Debugging 239 | 240 | You can use the MCP inspector to debug the server. For uvx installations: 241 | 242 | ``` 243 | npx @modelcontextprotocol/inspector uvx mcp-server-git 244 | ``` 245 | 246 | Or if you've installed the package in a specific directory or are developing on it: 247 | 248 | ``` 249 | cd path/to/servers/src/git 250 | npx @modelcontextprotocol/inspector uv run mcp-server-git 251 | ``` 252 | 253 | Running `tail -n 20 -f ~/Library/Logs/Claude/mcp*.log` will show the logs from the server and may 254 | help you debug any issues. 255 | 256 | ## Development 257 | 258 | If you are doing local development, there are two ways to test your changes: 259 | 260 | 1. Run the MCP inspector to test your changes. See [Debugging](#debugging) for run instructions. 261 | 262 | 2. Test using the Claude desktop app. Add the following to your `claude_desktop_config.json`: 263 | 264 | ### Docker 265 | 266 | ```json 267 | { 268 | "mcpServers": { 269 | "git": { 270 | "command": "docker", 271 | "args": [ 272 | "run", 273 | "--rm", 274 | "-i", 275 | "--mount", "type=bind,src=/Users/username/Desktop,dst=/projects/Desktop", 276 | "--mount", "type=bind,src=/path/to/other/allowed/dir,dst=/projects/other/allowed/dir,ro", 277 | "--mount", "type=bind,src=/path/to/file.txt,dst=/projects/path/to/file.txt", 278 | "mcp/git" 279 | ] 280 | } 281 | } 282 | } 283 | ``` 284 | 285 | ### UVX 286 | ```json 287 | { 288 | "mcpServers": { 289 | "git": { 290 | "command": "uv", 291 | "args": [ 292 | "--directory", 293 | "//mcp-servers/src/git", 294 | "run", 295 | "mcp-server-git" 296 | ] 297 | } 298 | } 299 | ``` 300 | 301 | ## Build 302 | 303 | Docker build: 304 | 305 | ```bash 306 | cd src/git 307 | docker build -t mcp/git . 308 | ``` 309 | 310 | ## License 311 | 312 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 313 | -------------------------------------------------------------------------------- /src/git/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-git" 3 | version = "0.6.2" 4 | description = "A Model Context Protocol server providing tools to read, search, and manipulate Git repositories programmatically via LLMs" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [{ name = "Anthropic, PBC." }] 8 | maintainers = [{ name = "David Soria Parra", email = "davidsp@anthropic.com" }] 9 | keywords = ["git", "mcp", "llm", "automation"] 10 | license = { text = "MIT" } 11 | classifiers = [ 12 | "Development Status :: 4 - Beta", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Programming Language :: Python :: 3", 16 | "Programming Language :: Python :: 3.10", 17 | ] 18 | dependencies = [ 19 | "click>=8.1.7", 20 | "gitpython>=3.1.43", 21 | "mcp>=1.0.0", 22 | "pydantic>=2.0.0", 23 | ] 24 | 25 | [project.scripts] 26 | mcp-server-git = "mcp_server_git:main" 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | 32 | [tool.uv] 33 | dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3", "pytest>=8.0.0"] 34 | 35 | [tool.pytest.ini_options] 36 | testpaths = ["tests"] 37 | python_files = "test_*.py" 38 | python_classes = "Test*" 39 | python_functions = "test_*" -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/__init__.py: -------------------------------------------------------------------------------- 1 | import click 2 | from pathlib import Path 3 | import logging 4 | import sys 5 | from .server import serve 6 | 7 | @click.command() 8 | @click.option("--repository", "-r", type=Path, help="Git repository path") 9 | @click.option("-v", "--verbose", count=True) 10 | def main(repository: Path | None, verbose: bool) -> None: 11 | """MCP Git Server - Git functionality for MCP""" 12 | import asyncio 13 | 14 | logging_level = logging.WARN 15 | if verbose == 1: 16 | logging_level = logging.INFO 17 | elif verbose >= 2: 18 | logging_level = logging.DEBUG 19 | 20 | logging.basicConfig(level=logging_level, stream=sys.stderr) 21 | asyncio.run(serve(repository)) 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/__main__.py: -------------------------------------------------------------------------------- 1 | # __main__.py 2 | 3 | from mcp_server_git import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/server.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from typing import Sequence 4 | from mcp.server import Server 5 | from mcp.server.session import ServerSession 6 | from mcp.server.stdio import stdio_server 7 | from mcp.types import ( 8 | ClientCapabilities, 9 | TextContent, 10 | Tool, 11 | ListRootsResult, 12 | RootsCapability, 13 | ) 14 | from enum import Enum 15 | import git 16 | from pydantic import BaseModel 17 | 18 | class GitStatus(BaseModel): 19 | repo_path: str 20 | 21 | class GitDiffUnstaged(BaseModel): 22 | repo_path: str 23 | 24 | class GitDiffStaged(BaseModel): 25 | repo_path: str 26 | 27 | class GitDiff(BaseModel): 28 | repo_path: str 29 | target: str 30 | 31 | class GitCommit(BaseModel): 32 | repo_path: str 33 | message: str 34 | 35 | class GitAdd(BaseModel): 36 | repo_path: str 37 | files: list[str] 38 | 39 | class GitReset(BaseModel): 40 | repo_path: str 41 | 42 | class GitLog(BaseModel): 43 | repo_path: str 44 | max_count: int = 10 45 | 46 | class GitCreateBranch(BaseModel): 47 | repo_path: str 48 | branch_name: str 49 | base_branch: str | None = None 50 | 51 | class GitCheckout(BaseModel): 52 | repo_path: str 53 | branch_name: str 54 | 55 | class GitShow(BaseModel): 56 | repo_path: str 57 | revision: str 58 | 59 | class GitInit(BaseModel): 60 | repo_path: str 61 | 62 | class GitTools(str, Enum): 63 | STATUS = "git_status" 64 | DIFF_UNSTAGED = "git_diff_unstaged" 65 | DIFF_STAGED = "git_diff_staged" 66 | DIFF = "git_diff" 67 | COMMIT = "git_commit" 68 | ADD = "git_add" 69 | RESET = "git_reset" 70 | LOG = "git_log" 71 | CREATE_BRANCH = "git_create_branch" 72 | CHECKOUT = "git_checkout" 73 | SHOW = "git_show" 74 | INIT = "git_init" 75 | 76 | def git_status(repo: git.Repo) -> str: 77 | return repo.git.status() 78 | 79 | def git_diff_unstaged(repo: git.Repo) -> str: 80 | return repo.git.diff() 81 | 82 | def git_diff_staged(repo: git.Repo) -> str: 83 | return repo.git.diff("--cached") 84 | 85 | def git_diff(repo: git.Repo, target: str) -> str: 86 | return repo.git.diff(target) 87 | 88 | def git_commit(repo: git.Repo, message: str) -> str: 89 | commit = repo.index.commit(message) 90 | return f"Changes committed successfully with hash {commit.hexsha}" 91 | 92 | def git_add(repo: git.Repo, files: list[str]) -> str: 93 | repo.index.add(files) 94 | return "Files staged successfully" 95 | 96 | def git_reset(repo: git.Repo) -> str: 97 | repo.index.reset() 98 | return "All staged changes reset" 99 | 100 | def git_log(repo: git.Repo, max_count: int = 10) -> list[str]: 101 | commits = list(repo.iter_commits(max_count=max_count)) 102 | log = [] 103 | for commit in commits: 104 | log.append( 105 | f"Commit: {commit.hexsha}\n" 106 | f"Author: {commit.author}\n" 107 | f"Date: {commit.authored_datetime}\n" 108 | f"Message: {commit.message}\n" 109 | ) 110 | return log 111 | 112 | def git_create_branch(repo: git.Repo, branch_name: str, base_branch: str | None = None) -> str: 113 | if base_branch: 114 | base = repo.refs[base_branch] 115 | else: 116 | base = repo.active_branch 117 | 118 | repo.create_head(branch_name, base) 119 | return f"Created branch '{branch_name}' from '{base.name}'" 120 | 121 | def git_checkout(repo: git.Repo, branch_name: str) -> str: 122 | repo.git.checkout(branch_name) 123 | return f"Switched to branch '{branch_name}'" 124 | 125 | def git_init(repo_path: str) -> str: 126 | try: 127 | repo = git.Repo.init(path=repo_path, mkdir=True) 128 | return f"Initialized empty Git repository in {repo.git_dir}" 129 | except Exception as e: 130 | return f"Error initializing repository: {str(e)}" 131 | 132 | def git_show(repo: git.Repo, revision: str) -> str: 133 | commit = repo.commit(revision) 134 | output = [ 135 | f"Commit: {commit.hexsha}\n" 136 | f"Author: {commit.author}\n" 137 | f"Date: {commit.authored_datetime}\n" 138 | f"Message: {commit.message}\n" 139 | ] 140 | if commit.parents: 141 | parent = commit.parents[0] 142 | diff = parent.diff(commit, create_patch=True) 143 | else: 144 | diff = commit.diff(git.NULL_TREE, create_patch=True) 145 | for d in diff: 146 | output.append(f"\n--- {d.a_path}\n+++ {d.b_path}\n") 147 | output.append(d.diff.decode('utf-8')) 148 | return "".join(output) 149 | 150 | async def serve(repository: Path | None) -> None: 151 | logger = logging.getLogger(__name__) 152 | 153 | if repository is not None: 154 | try: 155 | git.Repo(repository) 156 | logger.info(f"Using repository at {repository}") 157 | except git.InvalidGitRepositoryError: 158 | logger.error(f"{repository} is not a valid Git repository") 159 | return 160 | 161 | server = Server("mcp-git") 162 | 163 | @server.list_tools() 164 | async def list_tools() -> list[Tool]: 165 | return [ 166 | Tool( 167 | name=GitTools.STATUS, 168 | description="Shows the working tree status", 169 | inputSchema=GitStatus.schema(), 170 | ), 171 | Tool( 172 | name=GitTools.DIFF_UNSTAGED, 173 | description="Shows changes in the working directory that are not yet staged", 174 | inputSchema=GitDiffUnstaged.schema(), 175 | ), 176 | Tool( 177 | name=GitTools.DIFF_STAGED, 178 | description="Shows changes that are staged for commit", 179 | inputSchema=GitDiffStaged.schema(), 180 | ), 181 | Tool( 182 | name=GitTools.DIFF, 183 | description="Shows differences between branches or commits", 184 | inputSchema=GitDiff.schema(), 185 | ), 186 | Tool( 187 | name=GitTools.COMMIT, 188 | description="Records changes to the repository", 189 | inputSchema=GitCommit.schema(), 190 | ), 191 | Tool( 192 | name=GitTools.ADD, 193 | description="Adds file contents to the staging area", 194 | inputSchema=GitAdd.schema(), 195 | ), 196 | Tool( 197 | name=GitTools.RESET, 198 | description="Unstages all staged changes", 199 | inputSchema=GitReset.schema(), 200 | ), 201 | Tool( 202 | name=GitTools.LOG, 203 | description="Shows the commit logs", 204 | inputSchema=GitLog.schema(), 205 | ), 206 | Tool( 207 | name=GitTools.CREATE_BRANCH, 208 | description="Creates a new branch from an optional base branch", 209 | inputSchema=GitCreateBranch.schema(), 210 | ), 211 | Tool( 212 | name=GitTools.CHECKOUT, 213 | description="Switches branches", 214 | inputSchema=GitCheckout.schema(), 215 | ), 216 | Tool( 217 | name=GitTools.SHOW, 218 | description="Shows the contents of a commit", 219 | inputSchema=GitShow.schema(), 220 | ), 221 | Tool( 222 | name=GitTools.INIT, 223 | description="Initialize a new Git repository", 224 | inputSchema=GitInit.schema(), 225 | ) 226 | ] 227 | 228 | async def list_repos() -> Sequence[str]: 229 | async def by_roots() -> Sequence[str]: 230 | if not isinstance(server.request_context.session, ServerSession): 231 | raise TypeError("server.request_context.session must be a ServerSession") 232 | 233 | if not server.request_context.session.check_client_capability( 234 | ClientCapabilities(roots=RootsCapability()) 235 | ): 236 | return [] 237 | 238 | roots_result: ListRootsResult = await server.request_context.session.list_roots() 239 | logger.debug(f"Roots result: {roots_result}") 240 | repo_paths = [] 241 | for root in roots_result.roots: 242 | path = root.uri.path 243 | try: 244 | git.Repo(path) 245 | repo_paths.append(str(path)) 246 | except git.InvalidGitRepositoryError: 247 | pass 248 | return repo_paths 249 | 250 | def by_commandline() -> Sequence[str]: 251 | return [str(repository)] if repository is not None else [] 252 | 253 | cmd_repos = by_commandline() 254 | root_repos = await by_roots() 255 | return [*root_repos, *cmd_repos] 256 | 257 | @server.call_tool() 258 | async def call_tool(name: str, arguments: dict) -> list[TextContent]: 259 | repo_path = Path(arguments["repo_path"]) 260 | 261 | # Handle git init separately since it doesn't require an existing repo 262 | if name == GitTools.INIT: 263 | result = git_init(str(repo_path)) 264 | return [TextContent( 265 | type="text", 266 | text=result 267 | )] 268 | 269 | # For all other commands, we need an existing repo 270 | repo = git.Repo(repo_path) 271 | 272 | match name: 273 | case GitTools.STATUS: 274 | status = git_status(repo) 275 | return [TextContent( 276 | type="text", 277 | text=f"Repository status:\n{status}" 278 | )] 279 | 280 | case GitTools.DIFF_UNSTAGED: 281 | diff = git_diff_unstaged(repo) 282 | return [TextContent( 283 | type="text", 284 | text=f"Unstaged changes:\n{diff}" 285 | )] 286 | 287 | case GitTools.DIFF_STAGED: 288 | diff = git_diff_staged(repo) 289 | return [TextContent( 290 | type="text", 291 | text=f"Staged changes:\n{diff}" 292 | )] 293 | 294 | case GitTools.DIFF: 295 | diff = git_diff(repo, arguments["target"]) 296 | return [TextContent( 297 | type="text", 298 | text=f"Diff with {arguments['target']}:\n{diff}" 299 | )] 300 | 301 | case GitTools.COMMIT: 302 | result = git_commit(repo, arguments["message"]) 303 | return [TextContent( 304 | type="text", 305 | text=result 306 | )] 307 | 308 | case GitTools.ADD: 309 | result = git_add(repo, arguments["files"]) 310 | return [TextContent( 311 | type="text", 312 | text=result 313 | )] 314 | 315 | case GitTools.RESET: 316 | result = git_reset(repo) 317 | return [TextContent( 318 | type="text", 319 | text=result 320 | )] 321 | 322 | case GitTools.LOG: 323 | log = git_log(repo, arguments.get("max_count", 10)) 324 | return [TextContent( 325 | type="text", 326 | text="Commit history:\n" + "\n".join(log) 327 | )] 328 | 329 | case GitTools.CREATE_BRANCH: 330 | result = git_create_branch( 331 | repo, 332 | arguments["branch_name"], 333 | arguments.get("base_branch") 334 | ) 335 | return [TextContent( 336 | type="text", 337 | text=result 338 | )] 339 | 340 | case GitTools.CHECKOUT: 341 | result = git_checkout(repo, arguments["branch_name"]) 342 | return [TextContent( 343 | type="text", 344 | text=result 345 | )] 346 | 347 | case GitTools.SHOW: 348 | result = git_show(repo, arguments["revision"]) 349 | return [TextContent( 350 | type="text", 351 | text=result 352 | )] 353 | 354 | case _: 355 | raise ValueError(f"Unknown tool: {name}") 356 | 357 | options = server.create_initialization_options() 358 | async with stdio_server() as (read_stream, write_stream): 359 | await server.run(read_stream, write_stream, options, raise_exceptions=True) 360 | -------------------------------------------------------------------------------- /src/git/tests/test_server.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pathlib import Path 3 | import git 4 | from mcp_server_git.server import git_checkout 5 | import shutil 6 | 7 | @pytest.fixture 8 | def test_repository(tmp_path: Path): 9 | repo_path = tmp_path / "temp_test_repo" 10 | test_repo = git.Repo.init(repo_path) 11 | 12 | Path(repo_path / "test.txt").write_text("test") 13 | test_repo.index.add(["test.txt"]) 14 | test_repo.index.commit("initial commit") 15 | 16 | yield test_repo 17 | 18 | shutil.rmtree(repo_path) 19 | 20 | def test_git_checkout_existing_branch(test_repository): 21 | test_repository.git.branch("test-branch") 22 | result = git_checkout(test_repository, "test-branch") 23 | 24 | assert "Switched to branch 'test-branch'" in result 25 | assert test_repository.active_branch.name == "test-branch" 26 | 27 | def test_git_checkout_nonexistent_branch(test_repository): 28 | 29 | with pytest.raises(git.GitCommandError): 30 | git_checkout(test_repository, "nonexistent-branch") -------------------------------------------------------------------------------- /src/memory/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22.12-alpine AS builder 2 | 3 | COPY src/memory /app 4 | COPY tsconfig.json /tsconfig.json 5 | 6 | WORKDIR /app 7 | 8 | RUN --mount=type=cache,target=/root/.npm npm install 9 | 10 | RUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev 11 | 12 | FROM node:22-alpine AS release 13 | 14 | COPY --from=builder /app/dist /app/dist 15 | COPY --from=builder /app/package.json /app/package.json 16 | COPY --from=builder /app/package-lock.json /app/package-lock.json 17 | 18 | ENV NODE_ENV=production 19 | 20 | WORKDIR /app 21 | 22 | RUN npm ci --ignore-scripts --omit-dev 23 | 24 | ENTRYPOINT ["node", "dist/index.js"] -------------------------------------------------------------------------------- /src/memory/README.md: -------------------------------------------------------------------------------- 1 | # Knowledge Graph Memory Server 2 | 3 | A basic implementation of persistent memory using a local knowledge graph. This lets Claude remember information about the user across chats. 4 | 5 | ## Core Concepts 6 | 7 | ### Entities 8 | Entities are the primary nodes in the knowledge graph. Each entity has: 9 | - A unique name (identifier) 10 | - An entity type (e.g., "person", "organization", "event") 11 | - A list of observations 12 | 13 | Example: 14 | ```json 15 | { 16 | "name": "John_Smith", 17 | "entityType": "person", 18 | "observations": ["Speaks fluent Spanish"] 19 | } 20 | ``` 21 | 22 | ### Relations 23 | Relations define directed connections between entities. They are always stored in active voice and describe how entities interact or relate to each other. 24 | 25 | Example: 26 | ```json 27 | { 28 | "from": "John_Smith", 29 | "to": "Anthropic", 30 | "relationType": "works_at" 31 | } 32 | ``` 33 | ### Observations 34 | Observations are discrete pieces of information about an entity. They are: 35 | 36 | - Stored as strings 37 | - Attached to specific entities 38 | - Can be added or removed independently 39 | - Should be atomic (one fact per observation) 40 | 41 | Example: 42 | ```json 43 | { 44 | "entityName": "John_Smith", 45 | "observations": [ 46 | "Speaks fluent Spanish", 47 | "Graduated in 2019", 48 | "Prefers morning meetings" 49 | ] 50 | } 51 | ``` 52 | 53 | ## API 54 | 55 | ### Tools 56 | - **create_entities** 57 | - Create multiple new entities in the knowledge graph 58 | - Input: `entities` (array of objects) 59 | - Each object contains: 60 | - `name` (string): Entity identifier 61 | - `entityType` (string): Type classification 62 | - `observations` (string[]): Associated observations 63 | - Ignores entities with existing names 64 | 65 | - **create_relations** 66 | - Create multiple new relations between entities 67 | - Input: `relations` (array of objects) 68 | - Each object contains: 69 | - `from` (string): Source entity name 70 | - `to` (string): Target entity name 71 | - `relationType` (string): Relationship type in active voice 72 | - Skips duplicate relations 73 | 74 | - **add_observations** 75 | - Add new observations to existing entities 76 | - Input: `observations` (array of objects) 77 | - Each object contains: 78 | - `entityName` (string): Target entity 79 | - `contents` (string[]): New observations to add 80 | - Returns added observations per entity 81 | - Fails if entity doesn't exist 82 | 83 | - **delete_entities** 84 | - Remove entities and their relations 85 | - Input: `entityNames` (string[]) 86 | - Cascading deletion of associated relations 87 | - Silent operation if entity doesn't exist 88 | 89 | - **delete_observations** 90 | - Remove specific observations from entities 91 | - Input: `deletions` (array of objects) 92 | - Each object contains: 93 | - `entityName` (string): Target entity 94 | - `observations` (string[]): Observations to remove 95 | - Silent operation if observation doesn't exist 96 | 97 | - **delete_relations** 98 | - Remove specific relations from the graph 99 | - Input: `relations` (array of objects) 100 | - Each object contains: 101 | - `from` (string): Source entity name 102 | - `to` (string): Target entity name 103 | - `relationType` (string): Relationship type 104 | - Silent operation if relation doesn't exist 105 | 106 | - **read_graph** 107 | - Read the entire knowledge graph 108 | - No input required 109 | - Returns complete graph structure with all entities and relations 110 | 111 | - **search_nodes** 112 | - Search for nodes based on query 113 | - Input: `query` (string) 114 | - Searches across: 115 | - Entity names 116 | - Entity types 117 | - Observation content 118 | - Returns matching entities and their relations 119 | 120 | - **open_nodes** 121 | - Retrieve specific nodes by name 122 | - Input: `names` (string[]) 123 | - Returns: 124 | - Requested entities 125 | - Relations between requested entities 126 | - Silently skips non-existent nodes 127 | 128 | # Usage with Claude Desktop 129 | 130 | ### Setup 131 | 132 | Add this to your claude_desktop_config.json: 133 | 134 | #### Docker 135 | 136 | ```json 137 | { 138 | "mcpServers": { 139 | "memory": { 140 | "command": "docker", 141 | "args": ["run", "-i", "-v", "claude-memory:/app/dist", "--rm", "mcp/memory"] 142 | } 143 | } 144 | } 145 | ``` 146 | 147 | #### NPX 148 | ```json 149 | { 150 | "mcpServers": { 151 | "memory": { 152 | "command": "npx", 153 | "args": [ 154 | "-y", 155 | "@modelcontextprotocol/server-memory" 156 | ] 157 | } 158 | } 159 | } 160 | ``` 161 | 162 | #### NPX with custom setting 163 | 164 | The server can be configured using the following environment variables: 165 | 166 | ```json 167 | { 168 | "mcpServers": { 169 | "memory": { 170 | "command": "npx", 171 | "args": [ 172 | "-y", 173 | "@modelcontextprotocol/server-memory" 174 | ], 175 | "env": { 176 | "MEMORY_FILE_PATH": "/path/to/custom/memory.json" 177 | } 178 | } 179 | } 180 | } 181 | ``` 182 | 183 | - `MEMORY_FILE_PATH`: Path to the memory storage JSON file (default: `memory.json` in the server directory) 184 | 185 | # VS Code Installation Instructions 186 | 187 | For quick installation, use one of the one-click installation buttons below: 188 | 189 | [![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-memory%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-memory%22%5D%7D&quality=insiders) 190 | 191 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22-v%22%2C%22claude-memory%3A%2Fapp%2Fdist%22%2C%22--rm%22%2C%22mcp%2Fmemory%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=memory&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22-v%22%2C%22claude-memory%3A%2Fapp%2Fdist%22%2C%22--rm%22%2C%22mcp%2Fmemory%22%5D%7D&quality=insiders) 192 | 193 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open Settings (JSON)`. 194 | 195 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 196 | 197 | > Note that the `mcp` key is not needed in the `.vscode/mcp.json` file. 198 | 199 | #### NPX 200 | 201 | ```json 202 | { 203 | "mcp": { 204 | "servers": { 205 | "memory": { 206 | "command": "npx", 207 | "args": [ 208 | "-y", 209 | "@modelcontextprotocol/server-memory" 210 | ] 211 | } 212 | } 213 | } 214 | } 215 | ``` 216 | 217 | #### Docker 218 | 219 | ```json 220 | { 221 | "mcp": { 222 | "servers": { 223 | "memory": { 224 | "command": "docker", 225 | "args": [ 226 | "run", 227 | "-i", 228 | "-v", 229 | "claude-memory:/app/dist", 230 | "--rm", 231 | "mcp/memory" 232 | ] 233 | } 234 | } 235 | } 236 | } 237 | ``` 238 | 239 | ### System Prompt 240 | 241 | The prompt for utilizing memory depends on the use case. Changing the prompt will help the model determine the frequency and types of memories created. 242 | 243 | Here is an example prompt for chat personalization. You could use this prompt in the "Custom Instructions" field of a [Claude.ai Project](https://www.anthropic.com/news/projects). 244 | 245 | ``` 246 | Follow these steps for each interaction: 247 | 248 | 1. User Identification: 249 | - You should assume that you are interacting with default_user 250 | - If you have not identified default_user, proactively try to do so. 251 | 252 | 2. Memory Retrieval: 253 | - Always begin your chat by saying only "Remembering..." and retrieve all relevant information from your knowledge graph 254 | - Always refer to your knowledge graph as your "memory" 255 | 256 | 3. Memory 257 | - While conversing with the user, be attentive to any new information that falls into these categories: 258 | a) Basic Identity (age, gender, location, job title, education level, etc.) 259 | b) Behaviors (interests, habits, etc.) 260 | c) Preferences (communication style, preferred language, etc.) 261 | d) Goals (goals, targets, aspirations, etc.) 262 | e) Relationships (personal and professional relationships up to 3 degrees of separation) 263 | 264 | 4. Memory Update: 265 | - If any new information was gathered during the interaction, update your memory as follows: 266 | a) Create entities for recurring organizations, people, and significant events 267 | b) Connect them to the current entities using relations 268 | b) Store facts about them as observations 269 | ``` 270 | 271 | ## Building 272 | 273 | Docker: 274 | 275 | ```sh 276 | docker build -t mcp/memory -f src/memory/Dockerfile . 277 | ``` 278 | 279 | ## License 280 | 281 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 282 | -------------------------------------------------------------------------------- /src/memory/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | } from "@modelcontextprotocol/sdk/types.js"; 9 | import { promises as fs } from 'fs'; 10 | import path from 'path'; 11 | import { fileURLToPath } from 'url'; 12 | 13 | // Define memory file path using environment variable with fallback 14 | const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json'); 15 | 16 | // If MEMORY_FILE_PATH is just a filename, put it in the same directory as the script 17 | const MEMORY_FILE_PATH = process.env.MEMORY_FILE_PATH 18 | ? path.isAbsolute(process.env.MEMORY_FILE_PATH) 19 | ? process.env.MEMORY_FILE_PATH 20 | : path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH) 21 | : defaultMemoryPath; 22 | 23 | // We are storing our memory using entities, relations, and observations in a graph structure 24 | interface Entity { 25 | name: string; 26 | entityType: string; 27 | observations: string[]; 28 | } 29 | 30 | interface Relation { 31 | from: string; 32 | to: string; 33 | relationType: string; 34 | } 35 | 36 | interface KnowledgeGraph { 37 | entities: Entity[]; 38 | relations: Relation[]; 39 | } 40 | 41 | // The KnowledgeGraphManager class contains all operations to interact with the knowledge graph 42 | class KnowledgeGraphManager { 43 | private async loadGraph(): Promise { 44 | try { 45 | const data = await fs.readFile(MEMORY_FILE_PATH, "utf-8"); 46 | const lines = data.split("\n").filter(line => line.trim() !== ""); 47 | return lines.reduce((graph: KnowledgeGraph, line) => { 48 | const item = JSON.parse(line); 49 | if (item.type === "entity") graph.entities.push(item as Entity); 50 | if (item.type === "relation") graph.relations.push(item as Relation); 51 | return graph; 52 | }, { entities: [], relations: [] }); 53 | } catch (error) { 54 | if (error instanceof Error && 'code' in error && (error as any).code === "ENOENT") { 55 | return { entities: [], relations: [] }; 56 | } 57 | throw error; 58 | } 59 | } 60 | 61 | private async saveGraph(graph: KnowledgeGraph): Promise { 62 | const lines = [ 63 | ...graph.entities.map(e => JSON.stringify({ type: "entity", ...e })), 64 | ...graph.relations.map(r => JSON.stringify({ type: "relation", ...r })), 65 | ]; 66 | await fs.writeFile(MEMORY_FILE_PATH, lines.join("\n")); 67 | } 68 | 69 | async createEntities(entities: Entity[]): Promise { 70 | const graph = await this.loadGraph(); 71 | const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name)); 72 | graph.entities.push(...newEntities); 73 | await this.saveGraph(graph); 74 | return newEntities; 75 | } 76 | 77 | async createRelations(relations: Relation[]): Promise { 78 | const graph = await this.loadGraph(); 79 | const newRelations = relations.filter(r => !graph.relations.some(existingRelation => 80 | existingRelation.from === r.from && 81 | existingRelation.to === r.to && 82 | existingRelation.relationType === r.relationType 83 | )); 84 | graph.relations.push(...newRelations); 85 | await this.saveGraph(graph); 86 | return newRelations; 87 | } 88 | 89 | async addObservations(observations: { entityName: string; contents: string[] }[]): Promise<{ entityName: string; addedObservations: string[] }[]> { 90 | const graph = await this.loadGraph(); 91 | const results = observations.map(o => { 92 | const entity = graph.entities.find(e => e.name === o.entityName); 93 | if (!entity) { 94 | throw new Error(`Entity with name ${o.entityName} not found`); 95 | } 96 | const newObservations = o.contents.filter(content => !entity.observations.includes(content)); 97 | entity.observations.push(...newObservations); 98 | return { entityName: o.entityName, addedObservations: newObservations }; 99 | }); 100 | await this.saveGraph(graph); 101 | return results; 102 | } 103 | 104 | async deleteEntities(entityNames: string[]): Promise { 105 | const graph = await this.loadGraph(); 106 | graph.entities = graph.entities.filter(e => !entityNames.includes(e.name)); 107 | graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to)); 108 | await this.saveGraph(graph); 109 | } 110 | 111 | async deleteObservations(deletions: { entityName: string; observations: string[] }[]): Promise { 112 | const graph = await this.loadGraph(); 113 | deletions.forEach(d => { 114 | const entity = graph.entities.find(e => e.name === d.entityName); 115 | if (entity) { 116 | entity.observations = entity.observations.filter(o => !d.observations.includes(o)); 117 | } 118 | }); 119 | await this.saveGraph(graph); 120 | } 121 | 122 | async deleteRelations(relations: Relation[]): Promise { 123 | const graph = await this.loadGraph(); 124 | graph.relations = graph.relations.filter(r => !relations.some(delRelation => 125 | r.from === delRelation.from && 126 | r.to === delRelation.to && 127 | r.relationType === delRelation.relationType 128 | )); 129 | await this.saveGraph(graph); 130 | } 131 | 132 | async readGraph(): Promise { 133 | return this.loadGraph(); 134 | } 135 | 136 | // Very basic search function 137 | async searchNodes(query: string): Promise { 138 | const graph = await this.loadGraph(); 139 | 140 | // Filter entities 141 | const filteredEntities = graph.entities.filter(e => 142 | e.name.toLowerCase().includes(query.toLowerCase()) || 143 | e.entityType.toLowerCase().includes(query.toLowerCase()) || 144 | e.observations.some(o => o.toLowerCase().includes(query.toLowerCase())) 145 | ); 146 | 147 | // Create a Set of filtered entity names for quick lookup 148 | const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); 149 | 150 | // Filter relations to only include those between filtered entities 151 | const filteredRelations = graph.relations.filter(r => 152 | filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) 153 | ); 154 | 155 | const filteredGraph: KnowledgeGraph = { 156 | entities: filteredEntities, 157 | relations: filteredRelations, 158 | }; 159 | 160 | return filteredGraph; 161 | } 162 | 163 | async openNodes(names: string[]): Promise { 164 | const graph = await this.loadGraph(); 165 | 166 | // Filter entities 167 | const filteredEntities = graph.entities.filter(e => names.includes(e.name)); 168 | 169 | // Create a Set of filtered entity names for quick lookup 170 | const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); 171 | 172 | // Filter relations to only include those between filtered entities 173 | const filteredRelations = graph.relations.filter(r => 174 | filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) 175 | ); 176 | 177 | const filteredGraph: KnowledgeGraph = { 178 | entities: filteredEntities, 179 | relations: filteredRelations, 180 | }; 181 | 182 | return filteredGraph; 183 | } 184 | } 185 | 186 | const knowledgeGraphManager = new KnowledgeGraphManager(); 187 | 188 | 189 | // The server instance and tools exposed to Claude 190 | const server = new Server({ 191 | name: "memory-server", 192 | version: "0.6.3", 193 | }, { 194 | capabilities: { 195 | tools: {}, 196 | }, 197 | },); 198 | 199 | server.setRequestHandler(ListToolsRequestSchema, async () => { 200 | return { 201 | tools: [ 202 | { 203 | name: "create_entities", 204 | description: "Create multiple new entities in the knowledge graph", 205 | inputSchema: { 206 | type: "object", 207 | properties: { 208 | entities: { 209 | type: "array", 210 | items: { 211 | type: "object", 212 | properties: { 213 | name: { type: "string", description: "The name of the entity" }, 214 | entityType: { type: "string", description: "The type of the entity" }, 215 | observations: { 216 | type: "array", 217 | items: { type: "string" }, 218 | description: "An array of observation contents associated with the entity" 219 | }, 220 | }, 221 | required: ["name", "entityType", "observations"], 222 | }, 223 | }, 224 | }, 225 | required: ["entities"], 226 | }, 227 | }, 228 | { 229 | name: "create_relations", 230 | description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", 231 | inputSchema: { 232 | type: "object", 233 | properties: { 234 | relations: { 235 | type: "array", 236 | items: { 237 | type: "object", 238 | properties: { 239 | from: { type: "string", description: "The name of the entity where the relation starts" }, 240 | to: { type: "string", description: "The name of the entity where the relation ends" }, 241 | relationType: { type: "string", description: "The type of the relation" }, 242 | }, 243 | required: ["from", "to", "relationType"], 244 | }, 245 | }, 246 | }, 247 | required: ["relations"], 248 | }, 249 | }, 250 | { 251 | name: "add_observations", 252 | description: "Add new observations to existing entities in the knowledge graph", 253 | inputSchema: { 254 | type: "object", 255 | properties: { 256 | observations: { 257 | type: "array", 258 | items: { 259 | type: "object", 260 | properties: { 261 | entityName: { type: "string", description: "The name of the entity to add the observations to" }, 262 | contents: { 263 | type: "array", 264 | items: { type: "string" }, 265 | description: "An array of observation contents to add" 266 | }, 267 | }, 268 | required: ["entityName", "contents"], 269 | }, 270 | }, 271 | }, 272 | required: ["observations"], 273 | }, 274 | }, 275 | { 276 | name: "delete_entities", 277 | description: "Delete multiple entities and their associated relations from the knowledge graph", 278 | inputSchema: { 279 | type: "object", 280 | properties: { 281 | entityNames: { 282 | type: "array", 283 | items: { type: "string" }, 284 | description: "An array of entity names to delete" 285 | }, 286 | }, 287 | required: ["entityNames"], 288 | }, 289 | }, 290 | { 291 | name: "delete_observations", 292 | description: "Delete specific observations from entities in the knowledge graph", 293 | inputSchema: { 294 | type: "object", 295 | properties: { 296 | deletions: { 297 | type: "array", 298 | items: { 299 | type: "object", 300 | properties: { 301 | entityName: { type: "string", description: "The name of the entity containing the observations" }, 302 | observations: { 303 | type: "array", 304 | items: { type: "string" }, 305 | description: "An array of observations to delete" 306 | }, 307 | }, 308 | required: ["entityName", "observations"], 309 | }, 310 | }, 311 | }, 312 | required: ["deletions"], 313 | }, 314 | }, 315 | { 316 | name: "delete_relations", 317 | description: "Delete multiple relations from the knowledge graph", 318 | inputSchema: { 319 | type: "object", 320 | properties: { 321 | relations: { 322 | type: "array", 323 | items: { 324 | type: "object", 325 | properties: { 326 | from: { type: "string", description: "The name of the entity where the relation starts" }, 327 | to: { type: "string", description: "The name of the entity where the relation ends" }, 328 | relationType: { type: "string", description: "The type of the relation" }, 329 | }, 330 | required: ["from", "to", "relationType"], 331 | }, 332 | description: "An array of relations to delete" 333 | }, 334 | }, 335 | required: ["relations"], 336 | }, 337 | }, 338 | { 339 | name: "read_graph", 340 | description: "Read the entire knowledge graph", 341 | inputSchema: { 342 | type: "object", 343 | properties: {}, 344 | }, 345 | }, 346 | { 347 | name: "search_nodes", 348 | description: "Search for nodes in the knowledge graph based on a query", 349 | inputSchema: { 350 | type: "object", 351 | properties: { 352 | query: { type: "string", description: "The search query to match against entity names, types, and observation content" }, 353 | }, 354 | required: ["query"], 355 | }, 356 | }, 357 | { 358 | name: "open_nodes", 359 | description: "Open specific nodes in the knowledge graph by their names", 360 | inputSchema: { 361 | type: "object", 362 | properties: { 363 | names: { 364 | type: "array", 365 | items: { type: "string" }, 366 | description: "An array of entity names to retrieve", 367 | }, 368 | }, 369 | required: ["names"], 370 | }, 371 | }, 372 | ], 373 | }; 374 | }); 375 | 376 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 377 | const { name, arguments: args } = request.params; 378 | 379 | if (!args) { 380 | throw new Error(`No arguments provided for tool: ${name}`); 381 | } 382 | 383 | switch (name) { 384 | case "create_entities": 385 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createEntities(args.entities as Entity[]), null, 2) }] }; 386 | case "create_relations": 387 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createRelations(args.relations as Relation[]), null, 2) }] }; 388 | case "add_observations": 389 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.addObservations(args.observations as { entityName: string; contents: string[] }[]), null, 2) }] }; 390 | case "delete_entities": 391 | await knowledgeGraphManager.deleteEntities(args.entityNames as string[]); 392 | return { content: [{ type: "text", text: "Entities deleted successfully" }] }; 393 | case "delete_observations": 394 | await knowledgeGraphManager.deleteObservations(args.deletions as { entityName: string; observations: string[] }[]); 395 | return { content: [{ type: "text", text: "Observations deleted successfully" }] }; 396 | case "delete_relations": 397 | await knowledgeGraphManager.deleteRelations(args.relations as Relation[]); 398 | return { content: [{ type: "text", text: "Relations deleted successfully" }] }; 399 | case "read_graph": 400 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.readGraph(), null, 2) }] }; 401 | case "search_nodes": 402 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.searchNodes(args.query as string), null, 2) }] }; 403 | case "open_nodes": 404 | return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.openNodes(args.names as string[]), null, 2) }] }; 405 | default: 406 | throw new Error(`Unknown tool: ${name}`); 407 | } 408 | }); 409 | 410 | async function main() { 411 | const transport = new StdioServerTransport(); 412 | await server.connect(transport); 413 | console.error("Knowledge Graph MCP Server running on stdio"); 414 | } 415 | 416 | main().catch((error) => { 417 | console.error("Fatal error in main():", error); 418 | process.exit(1); 419 | }); 420 | -------------------------------------------------------------------------------- /src/memory/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-memory", 3 | "version": "0.6.3", 4 | "description": "MCP server for enabling memory for Claude through a knowledge graph", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-memory": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "1.0.1" 23 | }, 24 | "devDependencies": { 25 | "@types/node": "^22", 26 | "shx": "^0.3.4", 27 | "typescript": "^5.6.2" 28 | } 29 | } -------------------------------------------------------------------------------- /src/memory/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/sequentialthinking/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22.12-alpine AS builder 2 | 3 | COPY src/sequentialthinking /app 4 | COPY tsconfig.json /tsconfig.json 5 | 6 | WORKDIR /app 7 | 8 | RUN --mount=type=cache,target=/root/.npm npm install 9 | 10 | RUN --mount=type=cache,target=/root/.npm-production npm ci --ignore-scripts --omit-dev 11 | 12 | FROM node:22-alpine AS release 13 | 14 | COPY --from=builder /app/dist /app/dist 15 | COPY --from=builder /app/package.json /app/package.json 16 | COPY --from=builder /app/package-lock.json /app/package-lock.json 17 | 18 | ENV NODE_ENV=production 19 | 20 | WORKDIR /app 21 | 22 | RUN npm ci --ignore-scripts --omit-dev 23 | 24 | ENTRYPOINT ["node", "dist/index.js"] 25 | -------------------------------------------------------------------------------- /src/sequentialthinking/README.md: -------------------------------------------------------------------------------- 1 | # Sequential Thinking MCP Server 2 | 3 | An MCP server implementation that provides a tool for dynamic and reflective problem-solving through a structured thinking process. 4 | 5 | ## Features 6 | 7 | - Break down complex problems into manageable steps 8 | - Revise and refine thoughts as understanding deepens 9 | - Branch into alternative paths of reasoning 10 | - Adjust the total number of thoughts dynamically 11 | - Generate and verify solution hypotheses 12 | 13 | ## Tool 14 | 15 | ### sequential_thinking 16 | 17 | Facilitates a detailed, step-by-step thinking process for problem-solving and analysis. 18 | 19 | **Inputs:** 20 | - `thought` (string): The current thinking step 21 | - `nextThoughtNeeded` (boolean): Whether another thought step is needed 22 | - `thoughtNumber` (integer): Current thought number 23 | - `totalThoughts` (integer): Estimated total thoughts needed 24 | - `isRevision` (boolean, optional): Whether this revises previous thinking 25 | - `revisesThought` (integer, optional): Which thought is being reconsidered 26 | - `branchFromThought` (integer, optional): Branching point thought number 27 | - `branchId` (string, optional): Branch identifier 28 | - `needsMoreThoughts` (boolean, optional): If more thoughts are needed 29 | 30 | ## Usage 31 | 32 | The Sequential Thinking tool is designed for: 33 | - Breaking down complex problems into steps 34 | - Planning and design with room for revision 35 | - Analysis that might need course correction 36 | - Problems where the full scope might not be clear initially 37 | - Tasks that need to maintain context over multiple steps 38 | - Situations where irrelevant information needs to be filtered out 39 | 40 | ## Configuration 41 | 42 | ### Usage with Claude Desktop 43 | 44 | Add this to your `claude_desktop_config.json`: 45 | 46 | #### npx 47 | 48 | ```json 49 | { 50 | "mcpServers": { 51 | "sequential-thinking": { 52 | "command": "npx", 53 | "args": [ 54 | "-y", 55 | "@modelcontextprotocol/server-sequential-thinking" 56 | ] 57 | } 58 | } 59 | } 60 | ``` 61 | 62 | #### docker 63 | 64 | ```json 65 | { 66 | "mcpServers": { 67 | "sequentialthinking": { 68 | "command": "docker", 69 | "args": [ 70 | "run", 71 | "--rm", 72 | "-i", 73 | "mcp/sequentialthinking" 74 | ] 75 | } 76 | } 77 | } 78 | ``` 79 | 80 | ### Usage with VS Code 81 | 82 | For quick installation, click one of the installation buttons below... 83 | 84 | [![Install with NPX in VS Code](https://img.shields.io/badge/VS_Code-NPM-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-sequential-thinking%22%5D%7D) [![Install with NPX in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-NPM-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40modelcontextprotocol%2Fserver-sequential-thinking%22%5D%7D&quality=insiders) 85 | 86 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22mcp%2Fsequentialthinking%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=sequentialthinking&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22--rm%22%2C%22-i%22%2C%22mcp%2Fsequentialthinking%22%5D%7D&quality=insiders) 87 | 88 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open Settings (JSON)`. 89 | 90 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 91 | 92 | > Note that the `mcp` key is not needed in the `.vscode/mcp.json` file. 93 | 94 | For NPX installation: 95 | 96 | ```json 97 | { 98 | "mcp": { 99 | "servers": { 100 | "sequential-thinking": { 101 | "command": "npx", 102 | "args": [ 103 | "-y", 104 | "@modelcontextprotocol/server-sequential-thinking" 105 | ] 106 | } 107 | } 108 | } 109 | } 110 | ``` 111 | 112 | For Docker installation: 113 | 114 | ```json 115 | { 116 | "mcp": { 117 | "servers": { 118 | "sequential-thinking": { 119 | "command": "docker", 120 | "args": [ 121 | "run", 122 | "--rm", 123 | "-i", 124 | "mcp/sequentialthinking" 125 | ] 126 | } 127 | } 128 | } 129 | } 130 | ``` 131 | 132 | ## Building 133 | 134 | Docker: 135 | 136 | ```bash 137 | docker build -t mcp/sequentialthinking -f src/sequentialthinking/Dockerfile . 138 | ``` 139 | 140 | ## License 141 | 142 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 143 | -------------------------------------------------------------------------------- /src/sequentialthinking/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | Tool, 9 | } from "@modelcontextprotocol/sdk/types.js"; 10 | // Fixed chalk import for ESM 11 | import chalk from 'chalk'; 12 | 13 | interface ThoughtData { 14 | thought: string; 15 | thoughtNumber: number; 16 | totalThoughts: number; 17 | isRevision?: boolean; 18 | revisesThought?: number; 19 | branchFromThought?: number; 20 | branchId?: string; 21 | needsMoreThoughts?: boolean; 22 | nextThoughtNeeded: boolean; 23 | } 24 | 25 | class SequentialThinkingServer { 26 | private thoughtHistory: ThoughtData[] = []; 27 | private branches: Record = {}; 28 | 29 | private validateThoughtData(input: unknown): ThoughtData { 30 | const data = input as Record; 31 | 32 | if (!data.thought || typeof data.thought !== 'string') { 33 | throw new Error('Invalid thought: must be a string'); 34 | } 35 | if (!data.thoughtNumber || typeof data.thoughtNumber !== 'number') { 36 | throw new Error('Invalid thoughtNumber: must be a number'); 37 | } 38 | if (!data.totalThoughts || typeof data.totalThoughts !== 'number') { 39 | throw new Error('Invalid totalThoughts: must be a number'); 40 | } 41 | if (typeof data.nextThoughtNeeded !== 'boolean') { 42 | throw new Error('Invalid nextThoughtNeeded: must be a boolean'); 43 | } 44 | 45 | return { 46 | thought: data.thought, 47 | thoughtNumber: data.thoughtNumber, 48 | totalThoughts: data.totalThoughts, 49 | nextThoughtNeeded: data.nextThoughtNeeded, 50 | isRevision: data.isRevision as boolean | undefined, 51 | revisesThought: data.revisesThought as number | undefined, 52 | branchFromThought: data.branchFromThought as number | undefined, 53 | branchId: data.branchId as string | undefined, 54 | needsMoreThoughts: data.needsMoreThoughts as boolean | undefined, 55 | }; 56 | } 57 | 58 | private formatThought(thoughtData: ThoughtData): string { 59 | const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; 60 | 61 | let prefix = ''; 62 | let context = ''; 63 | 64 | if (isRevision) { 65 | prefix = chalk.yellow('🔄 Revision'); 66 | context = ` (revising thought ${revisesThought})`; 67 | } else if (branchFromThought) { 68 | prefix = chalk.green('🌿 Branch'); 69 | context = ` (from thought ${branchFromThought}, ID: ${branchId})`; 70 | } else { 71 | prefix = chalk.blue('💭 Thought'); 72 | context = ''; 73 | } 74 | 75 | const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`; 76 | const border = '─'.repeat(Math.max(header.length, thought.length) + 4); 77 | 78 | return ` 79 | ┌${border}┐ 80 | │ ${header} │ 81 | ├${border}┤ 82 | │ ${thought.padEnd(border.length - 2)} │ 83 | └${border}┘`; 84 | } 85 | 86 | public processThought(input: unknown): { content: Array<{ type: string; text: string }>; isError?: boolean } { 87 | try { 88 | const validatedInput = this.validateThoughtData(input); 89 | 90 | if (validatedInput.thoughtNumber > validatedInput.totalThoughts) { 91 | validatedInput.totalThoughts = validatedInput.thoughtNumber; 92 | } 93 | 94 | this.thoughtHistory.push(validatedInput); 95 | 96 | if (validatedInput.branchFromThought && validatedInput.branchId) { 97 | if (!this.branches[validatedInput.branchId]) { 98 | this.branches[validatedInput.branchId] = []; 99 | } 100 | this.branches[validatedInput.branchId].push(validatedInput); 101 | } 102 | 103 | const formattedThought = this.formatThought(validatedInput); 104 | console.error(formattedThought); 105 | 106 | return { 107 | content: [{ 108 | type: "text", 109 | text: JSON.stringify({ 110 | thoughtNumber: validatedInput.thoughtNumber, 111 | totalThoughts: validatedInput.totalThoughts, 112 | nextThoughtNeeded: validatedInput.nextThoughtNeeded, 113 | branches: Object.keys(this.branches), 114 | thoughtHistoryLength: this.thoughtHistory.length 115 | }, null, 2) 116 | }] 117 | }; 118 | } catch (error) { 119 | return { 120 | content: [{ 121 | type: "text", 122 | text: JSON.stringify({ 123 | error: error instanceof Error ? error.message : String(error), 124 | status: 'failed' 125 | }, null, 2) 126 | }], 127 | isError: true 128 | }; 129 | } 130 | } 131 | } 132 | 133 | const SEQUENTIAL_THINKING_TOOL: Tool = { 134 | name: "sequentialthinking", 135 | description: `A detailed tool for dynamic and reflective problem-solving through thoughts. 136 | This tool helps analyze problems through a flexible thinking process that can adapt and evolve. 137 | Each thought can build on, question, or revise previous insights as understanding deepens. 138 | 139 | When to use this tool: 140 | - Breaking down complex problems into steps 141 | - Planning and design with room for revision 142 | - Analysis that might need course correction 143 | - Problems where the full scope might not be clear initially 144 | - Problems that require a multi-step solution 145 | - Tasks that need to maintain context over multiple steps 146 | - Situations where irrelevant information needs to be filtered out 147 | 148 | Key features: 149 | - You can adjust total_thoughts up or down as you progress 150 | - You can question or revise previous thoughts 151 | - You can add more thoughts even after reaching what seemed like the end 152 | - You can express uncertainty and explore alternative approaches 153 | - Not every thought needs to build linearly - you can branch or backtrack 154 | - Generates a solution hypothesis 155 | - Verifies the hypothesis based on the Chain of Thought steps 156 | - Repeats the process until satisfied 157 | - Provides a correct answer 158 | 159 | Parameters explained: 160 | - thought: Your current thinking step, which can include: 161 | * Regular analytical steps 162 | * Revisions of previous thoughts 163 | * Questions about previous decisions 164 | * Realizations about needing more analysis 165 | * Changes in approach 166 | * Hypothesis generation 167 | * Hypothesis verification 168 | - next_thought_needed: True if you need more thinking, even if at what seemed like the end 169 | - thought_number: Current number in sequence (can go beyond initial total if needed) 170 | - total_thoughts: Current estimate of thoughts needed (can be adjusted up/down) 171 | - is_revision: A boolean indicating if this thought revises previous thinking 172 | - revises_thought: If is_revision is true, which thought number is being reconsidered 173 | - branch_from_thought: If branching, which thought number is the branching point 174 | - branch_id: Identifier for the current branch (if any) 175 | - needs_more_thoughts: If reaching end but realizing more thoughts needed 176 | 177 | You should: 178 | 1. Start with an initial estimate of needed thoughts, but be ready to adjust 179 | 2. Feel free to question or revise previous thoughts 180 | 3. Don't hesitate to add more thoughts if needed, even at the "end" 181 | 4. Express uncertainty when present 182 | 5. Mark thoughts that revise previous thinking or branch into new paths 183 | 6. Ignore information that is irrelevant to the current step 184 | 7. Generate a solution hypothesis when appropriate 185 | 8. Verify the hypothesis based on the Chain of Thought steps 186 | 9. Repeat the process until satisfied with the solution 187 | 10. Provide a single, ideally correct answer as the final output 188 | 11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached`, 189 | inputSchema: { 190 | type: "object", 191 | properties: { 192 | thought: { 193 | type: "string", 194 | description: "Your current thinking step" 195 | }, 196 | nextThoughtNeeded: { 197 | type: "boolean", 198 | description: "Whether another thought step is needed" 199 | }, 200 | thoughtNumber: { 201 | type: "integer", 202 | description: "Current thought number", 203 | minimum: 1 204 | }, 205 | totalThoughts: { 206 | type: "integer", 207 | description: "Estimated total thoughts needed", 208 | minimum: 1 209 | }, 210 | isRevision: { 211 | type: "boolean", 212 | description: "Whether this revises previous thinking" 213 | }, 214 | revisesThought: { 215 | type: "integer", 216 | description: "Which thought is being reconsidered", 217 | minimum: 1 218 | }, 219 | branchFromThought: { 220 | type: "integer", 221 | description: "Branching point thought number", 222 | minimum: 1 223 | }, 224 | branchId: { 225 | type: "string", 226 | description: "Branch identifier" 227 | }, 228 | needsMoreThoughts: { 229 | type: "boolean", 230 | description: "If more thoughts are needed" 231 | } 232 | }, 233 | required: ["thought", "nextThoughtNeeded", "thoughtNumber", "totalThoughts"] 234 | } 235 | }; 236 | 237 | const server = new Server( 238 | { 239 | name: "sequential-thinking-server", 240 | version: "0.2.0", 241 | }, 242 | { 243 | capabilities: { 244 | tools: {}, 245 | }, 246 | } 247 | ); 248 | 249 | const thinkingServer = new SequentialThinkingServer(); 250 | 251 | server.setRequestHandler(ListToolsRequestSchema, async () => ({ 252 | tools: [SEQUENTIAL_THINKING_TOOL], 253 | })); 254 | 255 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 256 | if (request.params.name === "sequentialthinking") { 257 | return thinkingServer.processThought(request.params.arguments); 258 | } 259 | 260 | return { 261 | content: [{ 262 | type: "text", 263 | text: `Unknown tool: ${request.params.name}` 264 | }], 265 | isError: true 266 | }; 267 | }); 268 | 269 | async function runServer() { 270 | const transport = new StdioServerTransport(); 271 | await server.connect(transport); 272 | console.error("Sequential Thinking MCP Server running on stdio"); 273 | } 274 | 275 | runServer().catch((error) => { 276 | console.error("Fatal error running server:", error); 277 | process.exit(1); 278 | }); 279 | -------------------------------------------------------------------------------- /src/sequentialthinking/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-sequential-thinking", 3 | "version": "0.6.2", 4 | "description": "MCP server for sequential thinking and problem solving", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-sequential-thinking": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0", 23 | "chalk": "^5.3.0", 24 | "yargs": "^17.7.2" 25 | }, 26 | "devDependencies": { 27 | "@types/node": "^22", 28 | "@types/yargs": "^17.0.32", 29 | "shx": "^0.3.4", 30 | "typescript": "^5.3.3" 31 | } 32 | } -------------------------------------------------------------------------------- /src/sequentialthinking/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": ".", 6 | "moduleResolution": "NodeNext", 7 | "module": "NodeNext" 8 | }, 9 | "include": ["./**/*.ts"] 10 | } 11 | -------------------------------------------------------------------------------- /src/time/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /src/time/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use a Python image with uv pre-installed 2 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS uv 3 | 4 | # Install the project into `/app` 5 | WORKDIR /app 6 | 7 | # Enable bytecode compilation 8 | ENV UV_COMPILE_BYTECODE=1 9 | 10 | # Copy from the cache instead of linking since it's a mounted volume 11 | ENV UV_LINK_MODE=copy 12 | 13 | # Install the project's dependencies using the lockfile and settings 14 | RUN --mount=type=cache,target=/root/.cache/uv \ 15 | --mount=type=bind,source=uv.lock,target=uv.lock \ 16 | --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ 17 | uv sync --frozen --no-install-project --no-dev --no-editable 18 | 19 | # Then, add the rest of the project source code and install it 20 | # Installing separately from its dependencies allows optimal layer caching 21 | ADD . /app 22 | RUN --mount=type=cache,target=/root/.cache/uv \ 23 | uv sync --frozen --no-dev --no-editable 24 | 25 | FROM python:3.12-slim-bookworm 26 | 27 | WORKDIR /app 28 | 29 | COPY --from=uv /root/.local /root/.local 30 | COPY --from=uv --chown=app:app /app/.venv /app/.venv 31 | 32 | # Place executables in the environment at the front of the path 33 | ENV PATH="/app/.venv/bin:$PATH" 34 | 35 | # when running the container, add --db-path and a bind mount to the host's db file 36 | ENTRYPOINT ["mcp-server-time"] 37 | -------------------------------------------------------------------------------- /src/time/README.md: -------------------------------------------------------------------------------- 1 | # Time MCP Server 2 | 3 | A Model Context Protocol server that provides time and timezone conversion capabilities. This server enables LLMs to get current time information and perform timezone conversions using IANA timezone names, with automatic system timezone detection. 4 | 5 | ### Available Tools 6 | 7 | - `get_current_time` - Get current time in a specific timezone or system timezone. 8 | - Required arguments: 9 | - `timezone` (string): IANA timezone name (e.g., 'America/New_York', 'Europe/London') 10 | 11 | - `convert_time` - Convert time between timezones. 12 | - Required arguments: 13 | - `source_timezone` (string): Source IANA timezone name 14 | - `time` (string): Time in 24-hour format (HH:MM) 15 | - `target_timezone` (string): Target IANA timezone name 16 | 17 | ## Installation 18 | 19 | ### Using uv (recommended) 20 | 21 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 22 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-time*. 23 | 24 | ### Using PIP 25 | 26 | Alternatively you can install `mcp-server-time` via pip: 27 | 28 | ```bash 29 | pip install mcp-server-time 30 | ``` 31 | 32 | After installation, you can run it as a script using: 33 | 34 | ```bash 35 | python -m mcp_server_time 36 | ``` 37 | 38 | ## Configuration 39 | 40 | ### Configure for Claude.app 41 | 42 | Add to your Claude settings: 43 | 44 |
45 | Using uvx 46 | 47 | ```json 48 | { 49 | "mcpServers": { 50 | "time": { 51 | "command": "uvx", 52 | "args": ["mcp-server-time"] 53 | } 54 | } 55 | } 56 | ``` 57 |
58 | 59 |
60 | Using docker 61 | 62 | ```json 63 | { 64 | "mcpServers": { 65 | "time": { 66 | "command": "docker", 67 | "args": ["run", "-i", "--rm", "mcp/time"] 68 | } 69 | } 70 | } 71 | ``` 72 |
73 | 74 |
75 | Using pip installation 76 | 77 | ```json 78 | { 79 | "mcpServers": { 80 | "time": { 81 | "command": "python", 82 | "args": ["-m", "mcp_server_time"] 83 | } 84 | } 85 | } 86 | ``` 87 |
88 | 89 | ### Configure for Zed 90 | 91 | Add to your Zed settings.json: 92 | 93 |
94 | Using uvx 95 | 96 | ```json 97 | "context_servers": [ 98 | "mcp-server-time": { 99 | "command": "uvx", 100 | "args": ["mcp-server-time"] 101 | } 102 | ], 103 | ``` 104 |
105 | 106 |
107 | Using pip installation 108 | 109 | ```json 110 | "context_servers": { 111 | "mcp-server-time": { 112 | "command": "python", 113 | "args": ["-m", "mcp_server_time"] 114 | } 115 | }, 116 | ``` 117 |
118 | 119 | ### Configure for VS Code 120 | 121 | For quick installation, use one of the one-click install buttons below... 122 | 123 | [![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-time%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22mcp-server-time%22%5D%7D&quality=insiders) 124 | 125 | [![Install with Docker in VS Code](https://img.shields.io/badge/VS_Code-Docker-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ftime%22%5D%7D) [![Install with Docker in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-Docker-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=time&config=%7B%22command%22%3A%22docker%22%2C%22args%22%3A%5B%22run%22%2C%22-i%22%2C%22--rm%22%2C%22mcp%2Ftime%22%5D%7D&quality=insiders) 126 | 127 | For manual installation, add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`. 128 | 129 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others. 130 | 131 | > Note that the `mcp` key is needed when using the `mcp.json` file. 132 | 133 |
134 | Using uvx 135 | 136 | ```json 137 | { 138 | "mcp": { 139 | "servers": { 140 | "time": { 141 | "command": "uvx", 142 | "args": ["mcp-server-time"] 143 | } 144 | } 145 | } 146 | } 147 | ``` 148 |
149 | 150 |
151 | Using Docker 152 | 153 | ```json 154 | { 155 | "mcp": { 156 | "servers": { 157 | "time": { 158 | "command": "docker", 159 | "args": ["run", "-i", "--rm", "mcp/time"] 160 | } 161 | } 162 | } 163 | } 164 | ``` 165 |
166 | 167 | ### Customization - System Timezone 168 | 169 | By default, the server automatically detects your system's timezone. You can override this by adding the argument `--local-timezone` to the `args` list in the configuration. 170 | 171 | Example: 172 | ```json 173 | { 174 | "command": "python", 175 | "args": ["-m", "mcp_server_time", "--local-timezone=America/New_York"] 176 | } 177 | ``` 178 | 179 | ## Example Interactions 180 | 181 | 1. Get current time: 182 | ```json 183 | { 184 | "name": "get_current_time", 185 | "arguments": { 186 | "timezone": "Europe/Warsaw" 187 | } 188 | } 189 | ``` 190 | Response: 191 | ```json 192 | { 193 | "timezone": "Europe/Warsaw", 194 | "datetime": "2024-01-01T13:00:00+01:00", 195 | "is_dst": false 196 | } 197 | ``` 198 | 199 | 2. Convert time between timezones: 200 | ```json 201 | { 202 | "name": "convert_time", 203 | "arguments": { 204 | "source_timezone": "America/New_York", 205 | "time": "16:30", 206 | "target_timezone": "Asia/Tokyo" 207 | } 208 | } 209 | ``` 210 | Response: 211 | ```json 212 | { 213 | "source": { 214 | "timezone": "America/New_York", 215 | "datetime": "2024-01-01T12:30:00-05:00", 216 | "is_dst": false 217 | }, 218 | "target": { 219 | "timezone": "Asia/Tokyo", 220 | "datetime": "2024-01-01T12:30:00+09:00", 221 | "is_dst": false 222 | }, 223 | "time_difference": "+13.0h", 224 | } 225 | ``` 226 | 227 | ## Debugging 228 | 229 | You can use the MCP inspector to debug the server. For uvx installations: 230 | 231 | ```bash 232 | npx @modelcontextprotocol/inspector uvx mcp-server-time 233 | ``` 234 | 235 | Or if you've installed the package in a specific directory or are developing on it: 236 | 237 | ```bash 238 | cd path/to/servers/src/time 239 | npx @modelcontextprotocol/inspector uv run mcp-server-time 240 | ``` 241 | 242 | ## Examples of Questions for Claude 243 | 244 | 1. "What time is it now?" (will use system timezone) 245 | 2. "What time is it in Tokyo?" 246 | 3. "When it's 4 PM in New York, what time is it in London?" 247 | 4. "Convert 9:30 AM Tokyo time to New York time" 248 | 249 | ## Build 250 | 251 | Docker build: 252 | 253 | ```bash 254 | cd src/time 255 | docker build -t mcp/time . 256 | ``` 257 | 258 | ## Contributing 259 | 260 | We encourage contributions to help expand and improve mcp-server-time. Whether you want to add new time-related tools, enhance existing functionality, or improve documentation, your input is valuable. 261 | 262 | For examples of other MCP servers and implementation patterns, see: 263 | https://github.com/modelcontextprotocol/servers 264 | 265 | Pull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-time even more powerful and useful. 266 | 267 | ## License 268 | 269 | mcp-server-time is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 270 | -------------------------------------------------------------------------------- /src/time/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-time" 3 | version = "0.6.2" 4 | description = "A Model Context Protocol server providing tools for time queries and timezone conversions for LLMs" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [ 8 | { name = "Mariusz 'maledorak' Korzekwa", email = "mariusz@korzekwa.dev" }, 9 | ] 10 | keywords = ["time", "timezone", "mcp", "llm"] 11 | license = { text = "MIT" } 12 | classifiers = [ 13 | "Development Status :: 4 - Beta", 14 | "Intended Audience :: Developers", 15 | "License :: OSI Approved :: MIT License", 16 | "Programming Language :: Python :: 3", 17 | "Programming Language :: Python :: 3.10", 18 | ] 19 | dependencies = [ 20 | "mcp>=1.0.0", 21 | "pydantic>=2.0.0", 22 | "tzdata>=2024.2", 23 | ] 24 | 25 | [project.scripts] 26 | mcp-server-time = "mcp_server_time:main" 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | 32 | [tool.uv] 33 | dev-dependencies = [ 34 | "freezegun>=1.5.1", 35 | "pyright>=1.1.389", 36 | "pytest>=8.3.3", 37 | "ruff>=0.8.1", 38 | ] 39 | -------------------------------------------------------------------------------- /src/time/src/mcp_server_time/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import serve 2 | 3 | 4 | def main(): 5 | """MCP Time Server - Time and timezone conversion functionality for MCP""" 6 | import argparse 7 | import asyncio 8 | 9 | parser = argparse.ArgumentParser( 10 | description="give a model the ability to handle time queries and timezone conversions" 11 | ) 12 | parser.add_argument("--local-timezone", type=str, help="Override local timezone") 13 | 14 | args = parser.parse_args() 15 | asyncio.run(serve(args.local_timezone)) 16 | 17 | 18 | if __name__ == "__main__": 19 | main() 20 | -------------------------------------------------------------------------------- /src/time/src/mcp_server_time/__main__.py: -------------------------------------------------------------------------------- 1 | from mcp_server_time import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /src/time/src/mcp_server_time/server.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from enum import Enum 3 | import json 4 | from typing import Sequence 5 | 6 | from zoneinfo import ZoneInfo 7 | from mcp.server import Server 8 | from mcp.server.stdio import stdio_server 9 | from mcp.types import Tool, TextContent, ImageContent, EmbeddedResource 10 | from mcp.shared.exceptions import McpError 11 | 12 | from pydantic import BaseModel 13 | 14 | 15 | class TimeTools(str, Enum): 16 | GET_CURRENT_TIME = "get_current_time" 17 | CONVERT_TIME = "convert_time" 18 | 19 | 20 | class TimeResult(BaseModel): 21 | timezone: str 22 | datetime: str 23 | is_dst: bool 24 | 25 | 26 | class TimeConversionResult(BaseModel): 27 | source: TimeResult 28 | target: TimeResult 29 | time_difference: str 30 | 31 | 32 | class TimeConversionInput(BaseModel): 33 | source_tz: str 34 | time: str 35 | target_tz_list: list[str] 36 | 37 | 38 | def get_local_tz(local_tz_override: str | None = None) -> ZoneInfo: 39 | if local_tz_override: 40 | return ZoneInfo(local_tz_override) 41 | 42 | # Get local timezone from datetime.now() 43 | tzinfo = datetime.now().astimezone(tz=None).tzinfo 44 | if tzinfo is not None: 45 | return ZoneInfo(str(tzinfo)) 46 | raise McpError("Could not determine local timezone - tzinfo is None") 47 | 48 | 49 | def get_zoneinfo(timezone_name: str) -> ZoneInfo: 50 | try: 51 | return ZoneInfo(timezone_name) 52 | except Exception as e: 53 | raise McpError(f"Invalid timezone: {str(e)}") 54 | 55 | 56 | class TimeServer: 57 | def get_current_time(self, timezone_name: str) -> TimeResult: 58 | """Get current time in specified timezone""" 59 | timezone = get_zoneinfo(timezone_name) 60 | current_time = datetime.now(timezone) 61 | 62 | return TimeResult( 63 | timezone=timezone_name, 64 | datetime=current_time.isoformat(timespec="seconds"), 65 | is_dst=bool(current_time.dst()), 66 | ) 67 | 68 | def convert_time( 69 | self, source_tz: str, time_str: str, target_tz: str 70 | ) -> TimeConversionResult: 71 | """Convert time between timezones""" 72 | source_timezone = get_zoneinfo(source_tz) 73 | target_timezone = get_zoneinfo(target_tz) 74 | 75 | try: 76 | parsed_time = datetime.strptime(time_str, "%H:%M").time() 77 | except ValueError: 78 | raise ValueError("Invalid time format. Expected HH:MM [24-hour format]") 79 | 80 | now = datetime.now(source_timezone) 81 | source_time = datetime( 82 | now.year, 83 | now.month, 84 | now.day, 85 | parsed_time.hour, 86 | parsed_time.minute, 87 | tzinfo=source_timezone, 88 | ) 89 | 90 | target_time = source_time.astimezone(target_timezone) 91 | source_offset = source_time.utcoffset() or timedelta() 92 | target_offset = target_time.utcoffset() or timedelta() 93 | hours_difference = (target_offset - source_offset).total_seconds() / 3600 94 | 95 | if hours_difference.is_integer(): 96 | time_diff_str = f"{hours_difference:+.1f}h" 97 | else: 98 | # For fractional hours like Nepal's UTC+5:45 99 | time_diff_str = f"{hours_difference:+.2f}".rstrip("0").rstrip(".") + "h" 100 | 101 | return TimeConversionResult( 102 | source=TimeResult( 103 | timezone=source_tz, 104 | datetime=source_time.isoformat(timespec="seconds"), 105 | is_dst=bool(source_time.dst()), 106 | ), 107 | target=TimeResult( 108 | timezone=target_tz, 109 | datetime=target_time.isoformat(timespec="seconds"), 110 | is_dst=bool(target_time.dst()), 111 | ), 112 | time_difference=time_diff_str, 113 | ) 114 | 115 | 116 | async def serve(local_timezone: str | None = None) -> None: 117 | server = Server("mcp-time") 118 | time_server = TimeServer() 119 | local_tz = str(get_local_tz(local_timezone)) 120 | 121 | @server.list_tools() 122 | async def list_tools() -> list[Tool]: 123 | """List available time tools.""" 124 | return [ 125 | Tool( 126 | name=TimeTools.GET_CURRENT_TIME.value, 127 | description="Get current time in a specific timezones", 128 | inputSchema={ 129 | "type": "object", 130 | "properties": { 131 | "timezone": { 132 | "type": "string", 133 | "description": f"IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use '{local_tz}' as local timezone if no timezone provided by the user.", 134 | } 135 | }, 136 | "required": ["timezone"], 137 | }, 138 | ), 139 | Tool( 140 | name=TimeTools.CONVERT_TIME.value, 141 | description="Convert time between timezones", 142 | inputSchema={ 143 | "type": "object", 144 | "properties": { 145 | "source_timezone": { 146 | "type": "string", 147 | "description": f"Source IANA timezone name (e.g., 'America/New_York', 'Europe/London'). Use '{local_tz}' as local timezone if no source timezone provided by the user.", 148 | }, 149 | "time": { 150 | "type": "string", 151 | "description": "Time to convert in 24-hour format (HH:MM)", 152 | }, 153 | "target_timezone": { 154 | "type": "string", 155 | "description": f"Target IANA timezone name (e.g., 'Asia/Tokyo', 'America/San_Francisco'). Use '{local_tz}' as local timezone if no target timezone provided by the user.", 156 | }, 157 | }, 158 | "required": ["source_timezone", "time", "target_timezone"], 159 | }, 160 | ), 161 | ] 162 | 163 | @server.call_tool() 164 | async def call_tool( 165 | name: str, arguments: dict 166 | ) -> Sequence[TextContent | ImageContent | EmbeddedResource]: 167 | """Handle tool calls for time queries.""" 168 | try: 169 | match name: 170 | case TimeTools.GET_CURRENT_TIME.value: 171 | timezone = arguments.get("timezone") 172 | if not timezone: 173 | raise ValueError("Missing required argument: timezone") 174 | 175 | result = time_server.get_current_time(timezone) 176 | 177 | case TimeTools.CONVERT_TIME.value: 178 | if not all( 179 | k in arguments 180 | for k in ["source_timezone", "time", "target_timezone"] 181 | ): 182 | raise ValueError("Missing required arguments") 183 | 184 | result = time_server.convert_time( 185 | arguments["source_timezone"], 186 | arguments["time"], 187 | arguments["target_timezone"], 188 | ) 189 | case _: 190 | raise ValueError(f"Unknown tool: {name}") 191 | 192 | return [ 193 | TextContent(type="text", text=json.dumps(result.model_dump(), indent=2)) 194 | ] 195 | 196 | except Exception as e: 197 | raise ValueError(f"Error processing mcp-server-time query: {str(e)}") 198 | 199 | options = server.create_initialization_options() 200 | async with stdio_server() as (read_stream, write_stream): 201 | await server.run(read_stream, write_stream, options) 202 | -------------------------------------------------------------------------------- /src/time/test/time_server_test.py: -------------------------------------------------------------------------------- 1 | 2 | from freezegun import freeze_time 3 | from mcp.shared.exceptions import McpError 4 | import pytest 5 | 6 | from mcp_server_time.server import TimeServer 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "test_time,timezone,expected", 11 | [ 12 | # UTC+1 non-DST 13 | ( 14 | "2024-01-01 12:00:00+00:00", 15 | "Europe/Warsaw", 16 | { 17 | "timezone": "Europe/Warsaw", 18 | "datetime": "2024-01-01T13:00:00+01:00", 19 | "is_dst": False, 20 | }, 21 | ), 22 | # UTC non-DST 23 | ( 24 | "2024-01-01 12:00:00+00:00", 25 | "Europe/London", 26 | { 27 | "timezone": "Europe/London", 28 | "datetime": "2024-01-01T12:00:00+00:00", 29 | "is_dst": False, 30 | }, 31 | ), 32 | # UTC-5 non-DST 33 | ( 34 | "2024-01-01 12:00:00-00:00", 35 | "America/New_York", 36 | { 37 | "timezone": "America/New_York", 38 | "datetime": "2024-01-01T07:00:00-05:00", 39 | "is_dst": False, 40 | }, 41 | ), 42 | # UTC+1 DST 43 | ( 44 | "2024-03-31 12:00:00+00:00", 45 | "Europe/Warsaw", 46 | { 47 | "timezone": "Europe/Warsaw", 48 | "datetime": "2024-03-31T14:00:00+02:00", 49 | "is_dst": True, 50 | }, 51 | ), 52 | # UTC DST 53 | ( 54 | "2024-03-31 12:00:00+00:00", 55 | "Europe/London", 56 | { 57 | "timezone": "Europe/London", 58 | "datetime": "2024-03-31T13:00:00+01:00", 59 | "is_dst": True, 60 | }, 61 | ), 62 | # UTC-5 DST 63 | ( 64 | "2024-03-31 12:00:00-00:00", 65 | "America/New_York", 66 | { 67 | "timezone": "America/New_York", 68 | "datetime": "2024-03-31T08:00:00-04:00", 69 | "is_dst": True, 70 | }, 71 | ), 72 | ], 73 | ) 74 | def test_get_current_time(test_time, timezone, expected): 75 | with freeze_time(test_time): 76 | time_server = TimeServer() 77 | result = time_server.get_current_time(timezone) 78 | assert result.timezone == expected["timezone"] 79 | assert result.datetime == expected["datetime"] 80 | assert result.is_dst == expected["is_dst"] 81 | 82 | 83 | def test_get_current_time_with_invalid_timezone(): 84 | time_server = TimeServer() 85 | with pytest.raises( 86 | McpError, 87 | match=r"Invalid timezone: 'No time zone found with key Invalid/Timezone'", 88 | ): 89 | time_server.get_current_time("Invalid/Timezone") 90 | 91 | 92 | @pytest.mark.parametrize( 93 | "source_tz,time_str,target_tz,expected_error", 94 | [ 95 | ( 96 | "invalid_tz", 97 | "12:00", 98 | "Europe/London", 99 | "Invalid timezone: 'No time zone found with key invalid_tz'", 100 | ), 101 | ( 102 | "Europe/Warsaw", 103 | "12:00", 104 | "invalid_tz", 105 | "Invalid timezone: 'No time zone found with key invalid_tz'", 106 | ), 107 | ( 108 | "Europe/Warsaw", 109 | "25:00", 110 | "Europe/London", 111 | "Invalid time format. Expected HH:MM [24-hour format]", 112 | ), 113 | ], 114 | ) 115 | def test_convert_time_errors(source_tz, time_str, target_tz, expected_error): 116 | time_server = TimeServer() 117 | with pytest.raises((McpError, ValueError), match=expected_error): 118 | time_server.convert_time(source_tz, time_str, target_tz) 119 | 120 | 121 | @pytest.mark.parametrize( 122 | "test_time,source_tz,time_str,target_tz,expected", 123 | [ 124 | # Basic case: Standard time conversion between Warsaw and London (1 hour difference) 125 | # Warsaw is UTC+1, London is UTC+0 126 | ( 127 | "2024-01-01 00:00:00+00:00", 128 | "Europe/Warsaw", 129 | "12:00", 130 | "Europe/London", 131 | { 132 | "source": { 133 | "timezone": "Europe/Warsaw", 134 | "datetime": "2024-01-01T12:00:00+01:00", 135 | "is_dst": False, 136 | }, 137 | "target": { 138 | "timezone": "Europe/London", 139 | "datetime": "2024-01-01T11:00:00+00:00", 140 | "is_dst": False, 141 | }, 142 | "time_difference": "-1.0h", 143 | }, 144 | ), 145 | # Reverse case of above: London to Warsaw conversion 146 | # Shows how time difference is positive when going east 147 | ( 148 | "2024-01-01 00:00:00+00:00", 149 | "Europe/London", 150 | "12:00", 151 | "Europe/Warsaw", 152 | { 153 | "source": { 154 | "timezone": "Europe/London", 155 | "datetime": "2024-01-01T12:00:00+00:00", 156 | "is_dst": False, 157 | }, 158 | "target": { 159 | "timezone": "Europe/Warsaw", 160 | "datetime": "2024-01-01T13:00:00+01:00", 161 | "is_dst": False, 162 | }, 163 | "time_difference": "+1.0h", 164 | }, 165 | ), 166 | # Edge case: Different DST periods between Europe and USA 167 | # Europe ends DST on Oct 27, while USA waits until Nov 3 168 | # This creates a one-week period where Europe is in standard time but USA still observes DST 169 | ( 170 | "2024-10-28 00:00:00+00:00", 171 | "Europe/Warsaw", 172 | "12:00", 173 | "America/New_York", 174 | { 175 | "source": { 176 | "timezone": "Europe/Warsaw", 177 | "datetime": "2024-10-28T12:00:00+01:00", 178 | "is_dst": False, 179 | }, 180 | "target": { 181 | "timezone": "America/New_York", 182 | "datetime": "2024-10-28T07:00:00-04:00", 183 | "is_dst": True, 184 | }, 185 | "time_difference": "-5.0h", 186 | }, 187 | ), 188 | # Follow-up to previous case: After both regions end DST 189 | # Shows how time difference increases by 1 hour when USA also ends DST 190 | ( 191 | "2024-11-04 00:00:00+00:00", 192 | "Europe/Warsaw", 193 | "12:00", 194 | "America/New_York", 195 | { 196 | "source": { 197 | "timezone": "Europe/Warsaw", 198 | "datetime": "2024-11-04T12:00:00+01:00", 199 | "is_dst": False, 200 | }, 201 | "target": { 202 | "timezone": "America/New_York", 203 | "datetime": "2024-11-04T06:00:00-05:00", 204 | "is_dst": False, 205 | }, 206 | "time_difference": "-6.0h", 207 | }, 208 | ), 209 | # Edge case: Nepal's unusual UTC+5:45 offset 210 | # One of the few time zones using 45-minute offset 211 | ( 212 | "2024-01-01 00:00:00+00:00", 213 | "Europe/Warsaw", 214 | "12:00", 215 | "Asia/Kathmandu", 216 | { 217 | "source": { 218 | "timezone": "Europe/Warsaw", 219 | "datetime": "2024-01-01T12:00:00+01:00", 220 | "is_dst": False, 221 | }, 222 | "target": { 223 | "timezone": "Asia/Kathmandu", 224 | "datetime": "2024-01-01T16:45:00+05:45", 225 | "is_dst": False, 226 | }, 227 | "time_difference": "+4.75h", 228 | }, 229 | ), 230 | # Reverse case for Nepal 231 | # Demonstrates how 45-minute offset works in opposite direction 232 | ( 233 | "2024-01-01 00:00:00+00:00", 234 | "Asia/Kathmandu", 235 | "12:00", 236 | "Europe/Warsaw", 237 | { 238 | "source": { 239 | "timezone": "Asia/Kathmandu", 240 | "datetime": "2024-01-01T12:00:00+05:45", 241 | "is_dst": False, 242 | }, 243 | "target": { 244 | "timezone": "Europe/Warsaw", 245 | "datetime": "2024-01-01T07:15:00+01:00", 246 | "is_dst": False, 247 | }, 248 | "time_difference": "-4.75h", 249 | }, 250 | ), 251 | # Edge case: Lord Howe Island's unique DST rules 252 | # One of the few places using 30-minute DST shift 253 | # During summer (DST), they use UTC+11 254 | ( 255 | "2024-01-01 00:00:00+00:00", 256 | "Europe/Warsaw", 257 | "12:00", 258 | "Australia/Lord_Howe", 259 | { 260 | "source": { 261 | "timezone": "Europe/Warsaw", 262 | "datetime": "2024-01-01T12:00:00+01:00", 263 | "is_dst": False, 264 | }, 265 | "target": { 266 | "timezone": "Australia/Lord_Howe", 267 | "datetime": "2024-01-01T22:00:00+11:00", 268 | "is_dst": True, 269 | }, 270 | "time_difference": "+10.0h", 271 | }, 272 | ), 273 | # Second Lord Howe Island case: During their standard time 274 | # Shows transition to UTC+10:30 after DST ends 275 | ( 276 | "2024-04-07 00:00:00+00:00", 277 | "Europe/Warsaw", 278 | "12:00", 279 | "Australia/Lord_Howe", 280 | { 281 | "source": { 282 | "timezone": "Europe/Warsaw", 283 | "datetime": "2024-04-07T12:00:00+02:00", 284 | "is_dst": True, 285 | }, 286 | "target": { 287 | "timezone": "Australia/Lord_Howe", 288 | "datetime": "2024-04-07T20:30:00+10:30", 289 | "is_dst": False, 290 | }, 291 | "time_difference": "+8.5h", 292 | }, 293 | ), 294 | # Edge case: Date line crossing with Samoa 295 | # Demonstrates how a single time conversion can result in a date change 296 | # Samoa is UTC+13, creating almost a full day difference with Warsaw 297 | ( 298 | "2024-01-01 00:00:00+00:00", 299 | "Europe/Warsaw", 300 | "23:00", 301 | "Pacific/Apia", 302 | { 303 | "source": { 304 | "timezone": "Europe/Warsaw", 305 | "datetime": "2024-01-01T23:00:00+01:00", 306 | "is_dst": False, 307 | }, 308 | "target": { 309 | "timezone": "Pacific/Apia", 310 | "datetime": "2024-01-02T11:00:00+13:00", 311 | "is_dst": False, 312 | }, 313 | "time_difference": "+12.0h", 314 | }, 315 | ), 316 | # Edge case: Iran's unusual half-hour offset 317 | # Demonstrates conversion with Iran's UTC+3:30 timezone 318 | ( 319 | "2024-03-21 00:00:00+00:00", 320 | "Europe/Warsaw", 321 | "12:00", 322 | "Asia/Tehran", 323 | { 324 | "source": { 325 | "timezone": "Europe/Warsaw", 326 | "datetime": "2024-03-21T12:00:00+01:00", 327 | "is_dst": False, 328 | }, 329 | "target": { 330 | "timezone": "Asia/Tehran", 331 | "datetime": "2024-03-21T14:30:00+03:30", 332 | "is_dst": False, 333 | }, 334 | "time_difference": "+2.5h", 335 | }, 336 | ), 337 | # Edge case: Venezuela's unusual -4:30 offset (historical) 338 | # In 2016, Venezuela moved from -4:30 to -4:00 339 | # Useful for testing historical dates 340 | ( 341 | "2016-04-30 00:00:00+00:00", # Just before the change 342 | "Europe/Warsaw", 343 | "12:00", 344 | "America/Caracas", 345 | { 346 | "source": { 347 | "timezone": "Europe/Warsaw", 348 | "datetime": "2016-04-30T12:00:00+02:00", 349 | "is_dst": True, 350 | }, 351 | "target": { 352 | "timezone": "America/Caracas", 353 | "datetime": "2016-04-30T05:30:00-04:30", 354 | "is_dst": False, 355 | }, 356 | "time_difference": "-6.5h", 357 | }, 358 | ), 359 | # Edge case: Israel's variable DST 360 | # Israel's DST changes don't follow a fixed pattern 361 | # They often change dates year-to-year based on Hebrew calendar 362 | ( 363 | "2024-10-27 00:00:00+00:00", 364 | "Europe/Warsaw", 365 | "12:00", 366 | "Asia/Jerusalem", 367 | { 368 | "source": { 369 | "timezone": "Europe/Warsaw", 370 | "datetime": "2024-10-27T12:00:00+01:00", 371 | "is_dst": False, 372 | }, 373 | "target": { 374 | "timezone": "Asia/Jerusalem", 375 | "datetime": "2024-10-27T13:00:00+02:00", 376 | "is_dst": False, 377 | }, 378 | "time_difference": "+1.0h", 379 | }, 380 | ), 381 | # Edge case: Antarctica/Troll station 382 | # Only timezone that uses UTC+0 in winter and UTC+2 in summer 383 | # One of the few zones with exactly 2 hours DST difference 384 | ( 385 | "2024-03-31 00:00:00+00:00", 386 | "Europe/Warsaw", 387 | "12:00", 388 | "Antarctica/Troll", 389 | { 390 | "source": { 391 | "timezone": "Europe/Warsaw", 392 | "datetime": "2024-03-31T12:00:00+02:00", 393 | "is_dst": True, 394 | }, 395 | "target": { 396 | "timezone": "Antarctica/Troll", 397 | "datetime": "2024-03-31T12:00:00+02:00", 398 | "is_dst": True, 399 | }, 400 | "time_difference": "+0.0h", 401 | }, 402 | ), 403 | # Edge case: Kiribati date line anomaly 404 | # After skipping Dec 31, 1994, eastern Kiribati is UTC+14 405 | # The furthest forward timezone in the world 406 | ( 407 | "2024-01-01 00:00:00+00:00", 408 | "Europe/Warsaw", 409 | "23:00", 410 | "Pacific/Kiritimati", 411 | { 412 | "source": { 413 | "timezone": "Europe/Warsaw", 414 | "datetime": "2024-01-01T23:00:00+01:00", 415 | "is_dst": False, 416 | }, 417 | "target": { 418 | "timezone": "Pacific/Kiritimati", 419 | "datetime": "2024-01-02T12:00:00+14:00", 420 | "is_dst": False, 421 | }, 422 | "time_difference": "+13.0h", 423 | }, 424 | ), 425 | # Edge case: Chatham Islands, New Zealand 426 | # Uses unusual 45-minute offset AND observes DST 427 | # UTC+12:45 in standard time, UTC+13:45 in DST 428 | ( 429 | "2024-01-01 00:00:00+00:00", 430 | "Europe/Warsaw", 431 | "12:00", 432 | "Pacific/Chatham", 433 | { 434 | "source": { 435 | "timezone": "Europe/Warsaw", 436 | "datetime": "2024-01-01T12:00:00+01:00", 437 | "is_dst": False, 438 | }, 439 | "target": { 440 | "timezone": "Pacific/Chatham", 441 | "datetime": "2024-01-02T00:45:00+13:45", 442 | "is_dst": True, 443 | }, 444 | "time_difference": "+12.75h", 445 | }, 446 | ), 447 | ], 448 | ) 449 | def test_convert_time(test_time, source_tz, time_str, target_tz, expected): 450 | with freeze_time(test_time): 451 | time_server = TimeServer() 452 | result = time_server.convert_time(source_tz, time_str, target_tz) 453 | 454 | assert result.source.timezone == expected["source"]["timezone"] 455 | assert result.target.timezone == expected["target"]["timezone"] 456 | assert result.source.datetime == expected["source"]["datetime"] 457 | assert result.target.datetime == expected["target"]["datetime"] 458 | assert result.source.is_dst == expected["source"]["is_dst"] 459 | assert result.target.is_dst == expected["target"]["is_dst"] 460 | assert result.time_difference == expected["time_difference"] 461 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "skipLibCheck": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "resolveJsonModule": true 11 | }, 12 | "include": ["src/**/*"], 13 | "exclude": ["node_modules"] 14 | } 15 | --------------------------------------------------------------------------------