├── .gitattributes ├── .github ├── pull_request_template.md └── workflows │ ├── python.yml │ └── typescript.yml ├── .gitignore ├── .npmrc ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── package-lock.json ├── package.json ├── src ├── brave-search │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── everything │ ├── README.md │ ├── everything.ts │ ├── index.ts │ ├── package.json │ ├── sse.ts │ └── tsconfig.json ├── fetch │ ├── .python-version │ ├── LICENSE │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ └── mcp_server_fetch │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ └── server.py │ └── uv.lock ├── filesystem │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── gdrive │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── git │ ├── .gitignore │ ├── .python-version │ ├── LICENSE │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ └── mcp_server_git │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ └── server.py │ └── uv.lock ├── github │ ├── README.md │ ├── index.ts │ ├── package-lock.json │ ├── package.json │ ├── schemas.ts │ └── tsconfig.json ├── google-maps │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── memory │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── postgres │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── puppeteer │ ├── .env.example │ ├── .gitignore │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json ├── sentry │ ├── .python-version │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ └── mcp_server_sentry │ │ │ ├── __init__.py │ │ │ └── server.py │ └── uv.lock ├── slack │ ├── README.md │ ├── index.ts │ ├── package.json │ └── tsconfig.json └── sqlite │ ├── .python-version │ ├── README.md │ ├── pyproject.toml │ ├── src │ └── mcp_server_sqlite │ │ ├── __init__.py │ │ └── server.py │ └── uv.lock └── tsconfig.json /.gitattributes: -------------------------------------------------------------------------------- 1 | package-lock.json linguist-generated=true 2 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Description 4 | 5 | ## Server Details 6 | 7 | - Server: 8 | - Changes to: 9 | 10 | ## Motivation and Context 11 | 12 | 13 | ## How Has This Been Tested? 14 | 15 | 16 | ## Breaking Changes 17 | 18 | 19 | ## Types of changes 20 | 21 | - [ ] New MCP Server 22 | - [ ] Bug fix (non-breaking change which fixes an issue) 23 | - [ ] New feature (non-breaking change which adds functionality) 24 | - [ ] Breaking change (fix or feature that would cause existing functionality to change) 25 | - [ ] Documentation update 26 | 27 | ## Checklist 28 | 29 | - [ ] I have read the [MCP Protocol Documentation](https://modelcontextprotocol.io) 30 | - [ ] My server follows MCP security best practices 31 | - [ ] I have updated the server's README accordingly 32 | - [ ] I have tested this with an LLM client 33 | - [ ] My code follows the repository's style guidelines 34 | - [ ] New and existing tests pass locally 35 | - [ ] I have added appropriate error handling 36 | - [ ] I have documented all environment variables and configuration options 37 | 38 | ## Additional context 39 | 40 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Python 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | detect-packages: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | packages: ${{ steps.find-packages.outputs.packages }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Find Python packages 20 | id: find-packages 21 | working-directory: src 22 | run: | 23 | PACKAGES=$(find . -name pyproject.toml -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]') 24 | echo "packages=$PACKAGES" >> $GITHUB_OUTPUT 25 | 26 | build: 27 | needs: [detect-packages] 28 | strategy: 29 | matrix: 30 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 31 | name: Build ${{ matrix.package }} 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - name: Install uv 37 | uses: astral-sh/setup-uv@v3 38 | 39 | - name: Set up Python 40 | uses: actions/setup-python@v5 41 | with: 42 | python-version-file: "src/${{ matrix.package }}/.python-version" 43 | 44 | - name: Install dependencies 45 | working-directory: src/${{ matrix.package }} 46 | run: uv sync --frozen --all-extras --dev 47 | 48 | - name: Run pyright 49 | working-directory: src/${{ matrix.package }} 50 | run: uv run --frozen pyright 51 | 52 | - name: Build package 53 | working-directory: src/${{ matrix.package }} 54 | run: uv build 55 | 56 | - name: Upload artifacts 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: dist-${{ matrix.package }} 60 | path: src/${{ matrix.package }}/dist/ 61 | 62 | publish: 63 | runs-on: ubuntu-latest 64 | needs: [build, detect-packages] 65 | if: github.event_name == 'release' 66 | 67 | strategy: 68 | matrix: 69 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 70 | name: Publish ${{ matrix.package }} 71 | 72 | environment: release 73 | permissions: 74 | id-token: write # Required for trusted publishing 75 | 76 | steps: 77 | - name: Download artifacts 78 | uses: actions/download-artifact@v4 79 | with: 80 | name: dist-${{ matrix.package }} 81 | path: dist/ 82 | 83 | - name: Publish package to PyPI 84 | uses: pypa/gh-action-pypi-publish@release/v1 85 | -------------------------------------------------------------------------------- /.github/workflows/typescript.yml: -------------------------------------------------------------------------------- 1 | name: TypeScript 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | detect-packages: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | packages: ${{ steps.find-packages.outputs.packages }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Find JS packages 19 | id: find-packages 20 | working-directory: src 21 | run: | 22 | PACKAGES=$(find . -name package.json -not -path "*/node_modules/*" -exec dirname {} \; | sed 's/^\.\///' | jq -R -s -c 'split("\n")[:-1]') 23 | echo "packages=$PACKAGES" >> $GITHUB_OUTPUT 24 | 25 | build: 26 | needs: [detect-packages] 27 | strategy: 28 | matrix: 29 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 30 | name: Build ${{ matrix.package }} 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v4 34 | 35 | - uses: actions/setup-node@v4 36 | with: 37 | node-version: 18 38 | cache: npm 39 | 40 | - name: Install dependencies 41 | working-directory: src/${{ matrix.package }} 42 | run: npm ci 43 | 44 | - name: Build package 45 | working-directory: src/${{ matrix.package }} 46 | run: npm run build 47 | 48 | publish: 49 | runs-on: ubuntu-latest 50 | needs: [build, detect-packages] 51 | if: github.event_name == 'release' 52 | environment: release 53 | 54 | strategy: 55 | matrix: 56 | package: ${{ fromJson(needs.detect-packages.outputs.packages) }} 57 | name: Publish ${{ matrix.package }} 58 | 59 | permissions: 60 | contents: read 61 | id-token: write 62 | 63 | steps: 64 | - uses: actions/checkout@v4 65 | - uses: actions/setup-node@v4 66 | with: 67 | node-version: 18 68 | cache: npm 69 | registry-url: "https://registry.npmjs.org" 70 | 71 | - name: Install dependencies 72 | working-directory: src/${{ matrix.package }} 73 | run: npm ci 74 | 75 | - name: Publish package 76 | working-directory: src/${{ matrix.package }} 77 | run: npm publish # --provenance 78 | env: 79 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 80 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | 132 | build/ 133 | 134 | gcp-oauth.keys.json 135 | .*-server-credentials.json 136 | 137 | # Byte-compiled / optimized / DLL files 138 | __pycache__/ 139 | *.py[cod] 140 | *$py.class 141 | 142 | # C extensions 143 | *.so 144 | 145 | # Distribution / packaging 146 | .Python 147 | build/ 148 | develop-eggs/ 149 | dist/ 150 | downloads/ 151 | eggs/ 152 | .eggs/ 153 | lib/ 154 | lib64/ 155 | parts/ 156 | sdist/ 157 | var/ 158 | wheels/ 159 | share/python-wheels/ 160 | *.egg-info/ 161 | .installed.cfg 162 | *.egg 163 | MANIFEST 164 | 165 | # PyInstaller 166 | # Usually these files are written by a python script from a template 167 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 168 | *.manifest 169 | *.spec 170 | 171 | # Installer logs 172 | pip-log.txt 173 | pip-delete-this-directory.txt 174 | 175 | # Unit test / coverage reports 176 | htmlcov/ 177 | .tox/ 178 | .nox/ 179 | .coverage 180 | .coverage.* 181 | .cache 182 | nosetests.xml 183 | coverage.xml 184 | *.cover 185 | *.py,cover 186 | .hypothesis/ 187 | .pytest_cache/ 188 | cover/ 189 | 190 | # Translations 191 | *.mo 192 | *.pot 193 | 194 | # Django stuff: 195 | *.log 196 | local_settings.py 197 | db.sqlite3 198 | db.sqlite3-journal 199 | 200 | # Flask stuff: 201 | instance/ 202 | .webassets-cache 203 | 204 | # Scrapy stuff: 205 | .scrapy 206 | 207 | # Sphinx documentation 208 | docs/_build/ 209 | 210 | # PyBuilder 211 | .pybuilder/ 212 | target/ 213 | 214 | # Jupyter Notebook 215 | .ipynb_checkpoints 216 | 217 | # IPython 218 | profile_default/ 219 | ipython_config.py 220 | 221 | # pyenv 222 | # For a library or package, you might want to ignore these files since the code is 223 | # intended to run in multiple environments; otherwise, check them in: 224 | # .python-version 225 | 226 | # pipenv 227 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 228 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 229 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 230 | # install all needed dependencies. 231 | #Pipfile.lock 232 | 233 | # poetry 234 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 235 | # This is especially recommended for binary packages to ensure reproducibility, and is more 236 | # commonly ignored for libraries. 237 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 238 | #poetry.lock 239 | 240 | # pdm 241 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 242 | #pdm.lock 243 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 244 | # in version control. 245 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 246 | .pdm.toml 247 | .pdm-python 248 | .pdm-build/ 249 | 250 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 251 | __pypackages__/ 252 | 253 | # Celery stuff 254 | celerybeat-schedule 255 | celerybeat.pid 256 | 257 | # SageMath parsed files 258 | *.sage.py 259 | 260 | # Environments 261 | .env 262 | .venv 263 | env/ 264 | venv/ 265 | ENV/ 266 | env.bak/ 267 | venv.bak/ 268 | 269 | # Spyder project settings 270 | .spyderproject 271 | .spyproject 272 | 273 | # Rope project settings 274 | .ropeproject 275 | 276 | # mkdocs documentation 277 | /site 278 | 279 | # mypy 280 | .mypy_cache/ 281 | .dmypy.json 282 | dmypy.json 283 | 284 | # Pyre type checker 285 | .pyre/ 286 | 287 | # pytype static type analyzer 288 | .pytype/ 289 | 290 | # Cython debug symbols 291 | cython_debug/ 292 | 293 | .DS_Store 294 | 295 | # PyCharm 296 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 297 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 298 | # and can be added to the global gitignore or merged into this file. For a more nuclear 299 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 300 | #.idea/ 301 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | registry="https://registry.npmjs.org/" 2 | @modelcontextprotocol:registry="https://registry.npmjs.org/" 3 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | mcp-coc@anthropic.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to MCP Servers 2 | 3 | Thank you for your interest in contributing to the Model Context Protocol (MCP) servers! This document provides guidelines and instructions for contributing. 4 | 5 | ## Types of Contributions 6 | 7 | ### 1. New Servers 8 | Adding a new server is a valuable way to contribute. Before creating a new server: 9 | 10 | - Check the [modelcontextprotocol.io](https://modelcontextprotocol.io) documentation 11 | - Ensure your server doesn't duplicate existing functionality 12 | - Consider whether your server would be generally useful to others 13 | - Follow [security best practices](https://modelcontextprotocol.io/docs/concepts/transports#security-considerations) from the MCP documentation 14 | 15 | ### 2. Improvements to Existing Servers 16 | Enhancements to existing servers are welcome! This includes: 17 | 18 | - Bug fixes 19 | - Performance improvements 20 | - New features 21 | - Security enhancements 22 | 23 | ### 3. Documentation 24 | Documentation improvements are always welcome: 25 | 26 | - Fixing typos or unclear instructions 27 | - Adding examples 28 | - Improving setup instructions 29 | - Adding troubleshooting guides 30 | 31 | ## Getting Started 32 | 33 | 1. Fork the repository 34 | 2. Clone your fork: 35 | ```bash 36 | git clone https://github.com/your-username/servers.git 37 | ``` 38 | 3. Add the upstream remote: 39 | ```bash 40 | git remote add upstream https://github.com/modelcontextprotocol/servers.git 41 | ``` 42 | 4. Create a branch: 43 | ```bash 44 | git checkout -b my-feature 45 | ``` 46 | 47 | ## Development Guidelines 48 | 49 | ### Code Style 50 | - Follow the existing code style in the repository 51 | - Include appropriate type definitions 52 | - Add comments for complex logic 53 | 54 | ### Documentation 55 | - Include a detailed README.md in your server directory 56 | - Document all configuration options 57 | - Provide setup instructions 58 | - Include usage examples 59 | 60 | ### Security 61 | - Follow security best practices 62 | - Implement proper input validation 63 | - Handle errors appropriately 64 | - Document security considerations 65 | 66 | ## Submitting Changes 67 | 68 | 1. Commit your changes: 69 | ```bash 70 | git add . 71 | git commit -m "Description of changes" 72 | ``` 73 | 2. Push to your fork: 74 | ```bash 75 | git push origin my-feature 76 | ``` 77 | 3. Create a Pull Request through GitHub 78 | 79 | ### Pull Request Guidelines 80 | 81 | - Thoroughly test your changes 82 | - Fill out the pull request template completely 83 | - Link any related issues 84 | - Provide clear description of changes 85 | - Include any necessary documentation updates 86 | - Add screenshots for UI changes 87 | - List any breaking changes 88 | 89 | ## Community 90 | 91 | - Participate in [GitHub Discussions](https://github.com/modelcontextprotocol/servers/discussions) 92 | - Follow the [Code of Conduct](CODE_OF_CONDUCT.md) 93 | 94 | ## Questions? 95 | 96 | - Check the [documentation](https://modelcontextprotocol.io) 97 | - Ask in GitHub Discussions 98 | 99 | Thank you for contributing to MCP Servers! -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Anthropic, PBC 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP servers 2 | 3 | **You're probably looking for the [Puppeteer (Browserbase Version)](src/puppeteer) server. 4 | You can access it [here](src/puppeteer).** 5 | 6 | --- 7 | 8 | A collection of reference implementations and community-contributed servers for the [Model Context Protocol](https://modelcontextprotocol.io/) (MCP). This repository showcases the versatility and extensibility of MCP, demonstrating how it can be used to give Large Language Models (LLMs) secure, controlled access to tools and data sources. 9 | 10 | Each MCP server is implemented with either the [Typescript MCP SDK](https://github.com/modelcontextprotocol/typescript-sdk) or [Python MCP SDK](https://github.com/modelcontextprotocol/python-sdk). 11 | 12 | ## 🌟 Featured Servers 13 | 14 | - **[Filesystem](src/filesystem)** - Secure file operations with configurable access controls 15 | - **[GitHub](src/github)** - Repository management, file operations, and GitHub API integration 16 | - **[Google Drive](src/gdrive)** - File access and search capabilities for Google Drive 17 | - **[PostgreSQL](src/postgres)** - Read-only database access with schema inspection 18 | - **[Slack](src/slack)** - Channel management and messaging capabilities 19 | - **[Memory](src/memory)** - Knowledge graph-based persistent memory system 20 | - **[Puppeteer](src/puppeteer)** - Browser automation and web scraping 21 | - **[Brave Search](src/brave-search)** - Web and local search using Brave's Search API 22 | - **[Google Maps](src/google-maps)** - Location services, directions, and place details 23 | - **[Fetch](src/fetch)** - Web content fetching and conversion for efficient LLM usage 24 | 25 | ## 🚀 Getting Started 26 | 27 | ### Using MCP Servers in this Repository 28 | Typescript-based servers in this repository can be used directly with `npx`. 29 | 30 | For example, this will start the [Memory](src/memory) server: 31 | ```sh 32 | npx -y @modelcontextprotocol/server-memory 33 | ``` 34 | 35 | Python-based servers in this repository can be used directly with [`uvx`](https://docs.astral.sh/uv/concepts/tools/) or [`pip`](https://pypi.org/project/pip/). `uvx` is recommended for ease of use and setup. 36 | 37 | For example, this will start the [Git](src/git) server: 38 | ```sh 39 | # With uvx 40 | uvx mcp-server-git 41 | 42 | # With pip 43 | pip install mcp-server-git 44 | python -m mcp_server_git 45 | ``` 46 | 47 | Follow [these](https://docs.astral.sh/uv/getting-started/installation/) instructions to install `uv` / `uvx` and [these](https://pip.pypa.io/en/stable/installation/) to install `pip`. 48 | 49 | ### Using an MCP Client 50 | However, running a server on its own isn't very useful, and should instead be configured into an MCP client. For example, here's the Claude Desktop configuration to use the above server: 51 | 52 | ```json 53 | { 54 | "mcpServers": { 55 | "memory": { 56 | "command": "npx", 57 | "args": ["-y", "@modelcontextprotocol/server-memory"] 58 | } 59 | } 60 | } 61 | ``` 62 | 63 | Additional examples of using the Claude Desktop as an MCP client might look like: 64 | 65 | ```json 66 | { 67 | "mcpServers": { 68 | "filesystem": { 69 | "command": "npx", 70 | "args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/files"] 71 | }, 72 | "git": { 73 | "command": "uvx", 74 | "args": ["mcp-server-git", "--repository", "path/to/git/repo"] 75 | }, 76 | "github": { 77 | "command": "npx", 78 | "args": ["-y", "@modelcontextprotocol/server-github"], 79 | "env": { 80 | "GITHUB_PERSONAL_ACCESS_TOKEN": "" 81 | } 82 | }, 83 | "postgres": { 84 | "command": "npx", 85 | "args": ["-y", "@modelcontextprotocol/server-postgres", "postgresql://localhost/mydb"] 86 | } 87 | } 88 | } 89 | ``` 90 | 91 | ## 🛠️ Creating Your Own Server 92 | 93 | Interested in creating your own MCP server? Visit the official documentation at [modelcontextprotocol.io](https://modelcontextprotocol.io/introduction) for comprehensive guides, best practices, and technical details on implementing MCP servers. 94 | 95 | ## 🤝 Contributing 96 | 97 | See [CONTRIBUTING.md](CONTRIBUTING.md) for information about contributing to this repository. 98 | 99 | ## 🔒 Security 100 | 101 | See [SECURITY.md](SECURITY.md) for reporting security vulnerabilities. 102 | 103 | ## 📜 License 104 | 105 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 106 | 107 | ## 💬 Community 108 | 109 | - [GitHub Discussions](https://github.com/modelcontextprotocol/servers/discussions) 110 | 111 | ## ⭐ Support 112 | 113 | If you find MCP servers useful, please consider starring the repository and contributing new servers or improvements! 114 | 115 | --- 116 | 117 | Managed by Anthropic, but built together with the community. The Model Context Protocol is open source and we encourage everyone to contribute their own servers and improvements! 118 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | Thank you for helping us keep our MCP servers secure. 3 | 4 | These servers are maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project. 5 | 6 | The security of our systems and user data is Anthropic’s top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities. 7 | 8 | ## Vulnerability Disclosure Program 9 | 10 | Our Vulnerability Program guidelines are defined on our [HackerOne program page](https://hackerone.com/anthropic-vdp). We ask that any validated vulnerability in this functionality be reported through the [submission form](https://hackerone.com/anthropic-vdp/reports/new?type=team&report_type=vulnerability). 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/servers", 3 | "private": true, 4 | "version": "0.5.1", 5 | "description": "Model Context Protocol servers", 6 | "license": "MIT", 7 | "author": "Anthropic, PBC (https://anthropic.com)", 8 | "homepage": "https://modelcontextprotocol.io", 9 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 10 | "type": "module", 11 | "workspaces": [ 12 | "src/*" 13 | ], 14 | "files": [], 15 | "scripts": { 16 | "build": "npm run build --workspaces", 17 | "watch": "npm run watch --workspaces", 18 | "publish-all": "npm publish --workspaces --access public", 19 | "link-all": "npm link --workspaces" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/server-everything": "*", 23 | "@modelcontextprotocol/server-gdrive": "*", 24 | "@modelcontextprotocol/server-postgres": "*", 25 | "@modelcontextprotocol/server-puppeteer": "*", 26 | "@modelcontextprotocol/server-slack": "*", 27 | "@modelcontextprotocol/server-brave-search": "*", 28 | "@modelcontextprotocol/server-memory": "*", 29 | "@modelcontextprotocol/server-filesystem": "*" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/brave-search/README.md: -------------------------------------------------------------------------------- 1 | # Brave Search MCP Server 2 | 3 | An MCP server implementation that integrates the Brave Search API, providing both web and local search capabilities. 4 | 5 | ## Features 6 | 7 | - **Web Search**: General queries, news, articles, with pagination and freshness controls 8 | - **Local Search**: Find businesses, restaurants, and services with detailed information 9 | - **Flexible Filtering**: Control result types, safety levels, and content freshness 10 | - **Smart Fallbacks**: Local search automatically falls back to web when no results are found 11 | 12 | ## Tools 13 | 14 | - **brave_web_search** 15 | - Execute web searches with pagination and filtering 16 | - Inputs: 17 | - `query` (string): Search terms 18 | - `count` (number, optional): Results per page (max 20) 19 | - `offset` (number, optional): Pagination offset (max 9) 20 | 21 | - **brave_local_search** 22 | - Search for local businesses and services 23 | - Inputs: 24 | - `query` (string): Local search terms 25 | - `count` (number, optional): Number of results (max 20) 26 | - Automatically falls back to web search if no local results found 27 | 28 | 29 | ## Configuration 30 | 31 | ### Getting an API Key 32 | 1. Sign up for a [Brave Search API account](https://brave.com/search/api/) 33 | 2. Choose a plan (Free tier available with 2,000 queries/month) 34 | 3. Generate your API key [from the developer dashboard](https://api.search.brave.com/app/keys) 35 | 36 | ### Usage with Claude Desktop 37 | Add this to your `claude_desktop_config.json`: 38 | 39 | ```json 40 | { 41 | "brave-search": { 42 | "command": "npx", 43 | "args": ["-y", "@modelcontextprotocol/server-brave-search"], 44 | "env": { 45 | "BRAVE_API_KEY": "YOUR_API_KEY_HERE" 46 | } 47 | } 48 | } 49 | ``` 50 | 51 | ## License 52 | 53 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 54 | -------------------------------------------------------------------------------- /src/brave-search/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | Tool, 9 | } from "@modelcontextprotocol/sdk/types.js"; 10 | 11 | const WEB_SEARCH_TOOL: Tool = { 12 | name: "brave_web_search", 13 | description: 14 | "Performs a web search using the Brave Search API, ideal for general queries, news, articles, and online content. " + 15 | "Use this for broad information gathering, recent events, or when you need diverse web sources. " + 16 | "Supports pagination, content filtering, and freshness controls. " + 17 | "Maximum 20 results per request, with offset for pagination. ", 18 | inputSchema: { 19 | type: "object", 20 | properties: { 21 | query: { 22 | type: "string", 23 | description: "Search query (max 400 chars, 50 words)" 24 | }, 25 | count: { 26 | type: "number", 27 | description: "Number of results (1-20, default 10)", 28 | default: 10 29 | }, 30 | offset: { 31 | type: "number", 32 | description: "Pagination offset (max 9, default 0)", 33 | default: 0 34 | }, 35 | }, 36 | required: ["query"], 37 | }, 38 | }; 39 | 40 | const LOCAL_SEARCH_TOOL: Tool = { 41 | name: "brave_local_search", 42 | description: 43 | "Searches for local businesses and places using Brave's Local Search API. " + 44 | "Best for queries related to physical locations, businesses, restaurants, services, etc. " + 45 | "Returns detailed information including:\n" + 46 | "- Business names and addresses\n" + 47 | "- Ratings and review counts\n" + 48 | "- Phone numbers and opening hours\n" + 49 | "Use this when the query implies 'near me' or mentions specific locations. " + 50 | "Automatically falls back to web search if no local results are found.", 51 | inputSchema: { 52 | type: "object", 53 | properties: { 54 | query: { 55 | type: "string", 56 | description: "Local search query (e.g. 'pizza near Central Park')" 57 | }, 58 | count: { 59 | type: "number", 60 | description: "Number of results (1-20, default 5)", 61 | default: 5 62 | }, 63 | }, 64 | required: ["query"] 65 | } 66 | }; 67 | 68 | // Server implementation 69 | const server = new Server( 70 | { 71 | name: "example-servers/brave-search", 72 | version: "0.1.0", 73 | }, 74 | { 75 | capabilities: { 76 | tools: {}, 77 | }, 78 | }, 79 | ); 80 | 81 | // Check for API key 82 | const BRAVE_API_KEY = process.env.BRAVE_API_KEY!; 83 | if (!BRAVE_API_KEY) { 84 | console.error("Error: BRAVE_API_KEY environment variable is required"); 85 | process.exit(1); 86 | } 87 | 88 | const RATE_LIMIT = { 89 | perSecond: 1, 90 | perMonth: 15000 91 | }; 92 | 93 | let requestCount = { 94 | second: 0, 95 | month: 0, 96 | lastReset: Date.now() 97 | }; 98 | 99 | function checkRateLimit() { 100 | const now = Date.now(); 101 | if (now - requestCount.lastReset > 1000) { 102 | requestCount.second = 0; 103 | requestCount.lastReset = now; 104 | } 105 | if (requestCount.second >= RATE_LIMIT.perSecond || 106 | requestCount.month >= RATE_LIMIT.perMonth) { 107 | throw new Error('Rate limit exceeded'); 108 | } 109 | requestCount.second++; 110 | requestCount.month++; 111 | } 112 | 113 | interface BraveWeb { 114 | web?: { 115 | results?: Array<{ 116 | title: string; 117 | description: string; 118 | url: string; 119 | language?: string; 120 | published?: string; 121 | rank?: number; 122 | }>; 123 | }; 124 | locations?: { 125 | results?: Array<{ 126 | id: string; // Required by API 127 | title?: string; 128 | }>; 129 | }; 130 | } 131 | 132 | interface BraveLocation { 133 | id: string; 134 | name: string; 135 | address: { 136 | streetAddress?: string; 137 | addressLocality?: string; 138 | addressRegion?: string; 139 | postalCode?: string; 140 | }; 141 | coordinates?: { 142 | latitude: number; 143 | longitude: number; 144 | }; 145 | phone?: string; 146 | rating?: { 147 | ratingValue?: number; 148 | ratingCount?: number; 149 | }; 150 | openingHours?: string[]; 151 | priceRange?: string; 152 | } 153 | 154 | interface BravePoiResponse { 155 | results: BraveLocation[]; 156 | } 157 | 158 | interface BraveDescription { 159 | descriptions: {[id: string]: string}; 160 | } 161 | 162 | function isBraveWebSearchArgs(args: unknown): args is { query: string; count?: number } { 163 | return ( 164 | typeof args === "object" && 165 | args !== null && 166 | "query" in args && 167 | typeof (args as { query: string }).query === "string" 168 | ); 169 | } 170 | 171 | function isBraveLocalSearchArgs(args: unknown): args is { query: string; count?: number } { 172 | return ( 173 | typeof args === "object" && 174 | args !== null && 175 | "query" in args && 176 | typeof (args as { query: string }).query === "string" 177 | ); 178 | } 179 | 180 | async function performWebSearch(query: string, count: number = 10, offset: number = 0) { 181 | checkRateLimit(); 182 | const url = new URL('https://api.search.brave.com/res/v1/web/search'); 183 | url.searchParams.set('q', query); 184 | url.searchParams.set('count', Math.min(count, 20).toString()); // API limit 185 | url.searchParams.set('offset', offset.toString()); 186 | 187 | const response = await fetch(url, { 188 | headers: { 189 | 'Accept': 'application/json', 190 | 'Accept-Encoding': 'gzip', 191 | 'X-Subscription-Token': BRAVE_API_KEY 192 | } 193 | }); 194 | 195 | if (!response.ok) { 196 | throw new Error(`Brave API error: ${response.status} ${response.statusText}\n${await response.text()}`); 197 | } 198 | 199 | const data = await response.json() as BraveWeb; 200 | 201 | // Extract just web results 202 | const results = (data.web?.results || []).map(result => ({ 203 | title: result.title || '', 204 | description: result.description || '', 205 | url: result.url || '' 206 | })); 207 | 208 | return results.map(r => 209 | `Title: ${r.title}\nDescription: ${r.description}\nURL: ${r.url}` 210 | ).join('\n\n'); 211 | } 212 | 213 | async function performLocalSearch(query: string, count: number = 5) { 214 | checkRateLimit(); 215 | // Initial search to get location IDs 216 | const webUrl = new URL('https://api.search.brave.com/res/v1/web/search'); 217 | webUrl.searchParams.set('q', query); 218 | webUrl.searchParams.set('search_lang', 'en'); 219 | webUrl.searchParams.set('result_filter', 'locations'); 220 | webUrl.searchParams.set('count', Math.min(count, 20).toString()); 221 | 222 | const webResponse = await fetch(webUrl, { 223 | headers: { 224 | 'Accept': 'application/json', 225 | 'Accept-Encoding': 'gzip', 226 | 'X-Subscription-Token': BRAVE_API_KEY 227 | } 228 | }); 229 | 230 | if (!webResponse.ok) { 231 | throw new Error(`Brave API error: ${webResponse.status} ${webResponse.statusText}\n${await webResponse.text()}`); 232 | } 233 | 234 | const webData = await webResponse.json() as BraveWeb; 235 | const locationIds = webData.locations?.results?.filter((r): r is {id: string; title?: string} => r.id != null).map(r => r.id) || []; 236 | 237 | if (locationIds.length === 0) { 238 | return performWebSearch(query, count); // Fallback to web search 239 | } 240 | 241 | // Get POI details and descriptions in parallel 242 | const [poisData, descriptionsData] = await Promise.all([ 243 | getPoisData(locationIds), 244 | getDescriptionsData(locationIds) 245 | ]); 246 | 247 | return formatLocalResults(poisData, descriptionsData); 248 | } 249 | 250 | async function getPoisData(ids: string[]): Promise { 251 | checkRateLimit(); 252 | const url = new URL('https://api.search.brave.com/res/v1/local/pois'); 253 | ids.filter(Boolean).forEach(id => url.searchParams.append('ids', id)); 254 | const response = await fetch(url, { 255 | headers: { 256 | 'Accept': 'application/json', 257 | 'Accept-Encoding': 'gzip', 258 | 'X-Subscription-Token': BRAVE_API_KEY 259 | } 260 | }); 261 | 262 | if (!response.ok) { 263 | throw new Error(`Brave API error: ${response.status} ${response.statusText}\n${await response.text()}`); 264 | } 265 | 266 | const poisResponse = await response.json() as BravePoiResponse; 267 | return poisResponse; 268 | } 269 | 270 | async function getDescriptionsData(ids: string[]): Promise { 271 | checkRateLimit(); 272 | const url = new URL('https://api.search.brave.com/res/v1/local/descriptions'); 273 | ids.filter(Boolean).forEach(id => url.searchParams.append('ids', id)); 274 | const response = await fetch(url, { 275 | headers: { 276 | 'Accept': 'application/json', 277 | 'Accept-Encoding': 'gzip', 278 | 'X-Subscription-Token': BRAVE_API_KEY 279 | } 280 | }); 281 | 282 | if (!response.ok) { 283 | throw new Error(`Brave API error: ${response.status} ${response.statusText}\n${await response.text()}`); 284 | } 285 | 286 | const descriptionsData = await response.json() as BraveDescription; 287 | return descriptionsData; 288 | } 289 | 290 | function formatLocalResults(poisData: BravePoiResponse, descData: BraveDescription): string { 291 | return (poisData.results || []).map(poi => { 292 | const address = [ 293 | poi.address?.streetAddress ?? '', 294 | poi.address?.addressLocality ?? '', 295 | poi.address?.addressRegion ?? '', 296 | poi.address?.postalCode ?? '' 297 | ].filter(part => part !== '').join(', ') || 'N/A'; 298 | 299 | return `Name: ${poi.name} 300 | Address: ${address} 301 | Phone: ${poi.phone || 'N/A'} 302 | Rating: ${poi.rating?.ratingValue ?? 'N/A'} (${poi.rating?.ratingCount ?? 0} reviews) 303 | Price Range: ${poi.priceRange || 'N/A'} 304 | Hours: ${(poi.openingHours || []).join(', ') || 'N/A'} 305 | Description: ${descData.descriptions[poi.id] || 'No description available'} 306 | `; 307 | }).join('\n---\n') || 'No local results found'; 308 | } 309 | 310 | // Tool handlers 311 | server.setRequestHandler(ListToolsRequestSchema, async () => ({ 312 | tools: [WEB_SEARCH_TOOL, LOCAL_SEARCH_TOOL], 313 | })); 314 | 315 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 316 | try { 317 | const { name, arguments: args } = request.params; 318 | 319 | if (!args) { 320 | throw new Error("No arguments provided"); 321 | } 322 | 323 | switch (name) { 324 | case "brave_web_search": { 325 | if (!isBraveWebSearchArgs(args)) { 326 | throw new Error("Invalid arguments for brave_web_search"); 327 | } 328 | const { query, count = 10 } = args; 329 | const results = await performWebSearch(query, count); 330 | return { 331 | content: [{ type: "text", text: results }], 332 | isError: false, 333 | }; 334 | } 335 | 336 | case "brave_local_search": { 337 | if (!isBraveLocalSearchArgs(args)) { 338 | throw new Error("Invalid arguments for brave_local_search"); 339 | } 340 | const { query, count = 5 } = args; 341 | const results = await performLocalSearch(query, count); 342 | return { 343 | content: [{ type: "text", text: results }], 344 | isError: false, 345 | }; 346 | } 347 | 348 | default: 349 | return { 350 | content: [{ type: "text", text: `Unknown tool: ${name}` }], 351 | isError: true, 352 | }; 353 | } 354 | } catch (error) { 355 | return { 356 | content: [ 357 | { 358 | type: "text", 359 | text: `Error: ${error instanceof Error ? error.message : String(error)}`, 360 | }, 361 | ], 362 | isError: true, 363 | }; 364 | } 365 | }); 366 | 367 | async function runServer() { 368 | const transport = new StdioServerTransport(); 369 | await server.connect(transport); 370 | console.error("Brave Search MCP Server running on stdio"); 371 | } 372 | 373 | runServer().catch((error) => { 374 | console.error("Fatal error running server:", error); 375 | process.exit(1); 376 | }); 377 | -------------------------------------------------------------------------------- /src/brave-search/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-brave-search", 3 | "version": "0.5.2", 4 | "description": "MCP server for Brave Search API integration", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-brave-search": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0" 23 | }, 24 | "devDependencies": { 25 | "@types/node": "^20.10.0", 26 | "shx": "^0.3.4", 27 | "typescript": "^5.6.2" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/brave-search/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/everything/README.md: -------------------------------------------------------------------------------- 1 | # Everything MCP Server 2 | 3 | This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities. 4 | 5 | ## Components 6 | 7 | ### Tools 8 | 9 | 1. `echo` 10 | - Simple tool to echo back input messages 11 | - Input: 12 | - `message` (string): Message to echo back 13 | - Returns: Text content with echoed message 14 | 15 | 2. `add` 16 | - Adds two numbers together 17 | - Inputs: 18 | - `a` (number): First number 19 | - `b` (number): Second number 20 | - Returns: Text result of the addition 21 | 22 | 3. `longRunningOperation` 23 | - Demonstrates progress notifications for long operations 24 | - Inputs: 25 | - `duration` (number, default: 10): Duration in seconds 26 | - `steps` (number, default: 5): Number of progress steps 27 | - Returns: Completion message with duration and steps 28 | - Sends progress notifications during execution 29 | 30 | 4. `sampleLLM` 31 | - Demonstrates LLM sampling capability using MCP sampling feature 32 | - Inputs: 33 | - `prompt` (string): The prompt to send to the LLM 34 | - `maxTokens` (number, default: 100): Maximum tokens to generate 35 | - Returns: Generated LLM response 36 | 37 | 5. `getTinyImage` 38 | - Returns a small test image 39 | - No inputs required 40 | - Returns: Base64 encoded PNG image data 41 | 42 | ### Resources 43 | 44 | The server provides 100 test resources in two formats: 45 | - Even numbered resources: 46 | - Plaintext format 47 | - URI pattern: `test://static/resource/{even_number}` 48 | - Content: Simple text description 49 | 50 | - Odd numbered resources: 51 | - Binary blob format 52 | - URI pattern: `test://static/resource/{odd_number}` 53 | - Content: Base64 encoded binary data 54 | 55 | Resource features: 56 | - Supports pagination (10 items per page) 57 | - Allows subscribing to resource updates 58 | - Demonstrates resource templates 59 | - Auto-updates subscribed resources every 5 seconds 60 | 61 | ### Prompts 62 | 63 | 1. `simple_prompt` 64 | - Basic prompt without arguments 65 | - Returns: Single message exchange 66 | 67 | 2. `complex_prompt` 68 | - Advanced prompt demonstrating argument handling 69 | - Required arguments: 70 | - `temperature` (number): Temperature setting 71 | - Optional arguments: 72 | - `style` (string): Output style preference 73 | - Returns: Multi-turn conversation with images 74 | 75 | ## Usage with Claude Desktop 76 | 77 | Add to your `claude_desktop_config.json`: 78 | 79 | ```json 80 | { 81 | "everything": { 82 | "command": "npx", 83 | "args": ["-y", "@modelcontextprotocol/server-everything"] 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/everything/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 4 | import { createServer } from "./everything.js"; 5 | 6 | async function main() { 7 | const transport = new StdioServerTransport(); 8 | const { server, cleanup } = createServer(); 9 | 10 | await server.connect(transport); 11 | 12 | // Cleanup on exit 13 | process.on("SIGINT", async () => { 14 | await cleanup(); 15 | await server.close(); 16 | process.exit(0); 17 | }); 18 | } 19 | 20 | main().catch((error) => { 21 | console.error("Server error:", error); 22 | process.exit(1); 23 | }); 24 | -------------------------------------------------------------------------------- /src/everything/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-everything", 3 | "version": "0.5.1", 4 | "description": "MCP server that exercises all the features of the MCP protocol", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-everything": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0", 23 | "express": "^4.21.1", 24 | "zod": "^3.23.8", 25 | "zod-to-json-schema": "^3.23.5" 26 | }, 27 | "devDependencies": { 28 | "@types/express": "^5.0.0", 29 | "shx": "^0.3.4", 30 | "typescript": "^5.6.2" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/everything/sse.ts: -------------------------------------------------------------------------------- 1 | import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js"; 2 | import express from "express"; 3 | import { createServer } from "./everything.js"; 4 | 5 | const app = express(); 6 | 7 | const { server, cleanup } = createServer(); 8 | 9 | let transport: SSEServerTransport; 10 | 11 | app.get("/sse", async (req, res) => { 12 | console.log("Received connection"); 13 | transport = new SSEServerTransport("/message", res); 14 | await server.connect(transport); 15 | 16 | server.onclose = async () => { 17 | await cleanup(); 18 | await server.close(); 19 | process.exit(0); 20 | }; 21 | }); 22 | 23 | app.post("/message", async (req, res) => { 24 | console.log("Received message"); 25 | 26 | await transport.handlePostMessage(req, res); 27 | }); 28 | 29 | const PORT = process.env.PORT || 3001; 30 | app.listen(PORT, () => { 31 | console.log(`Server is running on port ${PORT}`); 32 | }); 33 | -------------------------------------------------------------------------------- /src/everything/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/fetch/.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /src/fetch/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Anthropic, PBC. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/fetch/README.md: -------------------------------------------------------------------------------- 1 | # Fetch MCP Server 2 | 3 | A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption. 4 | 5 | Presently the server only supports fetching HTML content. 6 | 7 | ### Available Tools 8 | 9 | - `fetch` - Fetches a URL from the internet and extracts its contents as markdown. 10 | 11 | ### Prompts 12 | 13 | - **fetch** 14 | - Fetch a URL and extract its contents as markdown 15 | - Argument: `url` (string, required): URL to fetch 16 | 17 | ## Installation 18 | 19 | ### Using uv (recommended) 20 | 21 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 22 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-fetch*. 23 | 24 | ### Using PIP 25 | 26 | Alternatively you can install `mcp-server-fetch` via pip: 27 | 28 | ``` 29 | pip install mcp-server-fetch 30 | ``` 31 | 32 | After installation, you can run it as a script using: 33 | 34 | ``` 35 | python -m mcp_server_fetch 36 | ``` 37 | 38 | ## Configuration 39 | 40 | ### Configure for Claude.app 41 | 42 | Add to your Claude settings: 43 | 44 |
45 | Using uvx 46 | 47 | ```json 48 | "mcpServers": { 49 | "fetch": { 50 | "command": "uvx", 51 | "args": ["mcp-server-fetch"] 52 | } 53 | } 54 | ``` 55 |
56 | 57 |
58 | Using pip installation 59 | 60 | ```json 61 | "mcpServers": { 62 | "fetch": { 63 | "command": "python", 64 | "args": ["-m", "mcp_server_fetch"] 65 | } 66 | } 67 | ``` 68 |
69 | 70 | ### Configure for Zed 71 | 72 | Add to your Zed settings.json: 73 | 74 |
75 | Using uvx 76 | 77 | ```json 78 | "context_servers": [ 79 | "mcp-server-fetch": { 80 | "command": "uvx", 81 | "args": ["mcp-server-fetch"] 82 | } 83 | ], 84 | ``` 85 |
86 | 87 |
88 | Using pip installation 89 | 90 | ```json 91 | "context_servers": { 92 | "mcp-server-fetch": { 93 | "command": "python", 94 | "args": ["-m", "mcp_server_fetch"] 95 | } 96 | }, 97 | ``` 98 |
99 | 100 | ### Customization - robots.txt 101 | 102 | By default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if 103 | the request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the 104 | `args` list in the configuration. 105 | 106 | ### Customization - User-agent 107 | 108 | By default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the 109 | server will use either the user-agent 110 | ``` 111 | ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers) 112 | ``` 113 | or 114 | ``` 115 | ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers) 116 | ``` 117 | 118 | This can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration. 119 | 120 | ## Debugging 121 | 122 | You can use the MCP inspector to debug the server. For uvx installations: 123 | 124 | ``` 125 | npx @modelcontextprotocol/inspector uvx mcp-server-fetch 126 | ``` 127 | 128 | Or if you've installed the package in a specific directory or are developing on it: 129 | 130 | ``` 131 | cd path/to/servers/src/fetch 132 | npx @modelcontextprotocol/inspector uv run mcp-server-fetch 133 | ``` 134 | 135 | ## Contributing 136 | 137 | We encourage contributions to help expand and improve mcp-server-fetch. Whether you want to add new tools, enhance existing functionality, or improve documentation, your input is valuable. 138 | 139 | For examples of other MCP servers and implementation patterns, see: 140 | https://github.com/modelcontextprotocol/servers 141 | 142 | Pull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-fetch even more powerful and useful. 143 | 144 | ## License 145 | 146 | mcp-server-fetch is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 147 | -------------------------------------------------------------------------------- /src/fetch/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-fetch" 3 | version = "0.1.2" 4 | description = "A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [{ name = "Anthropic, PBC." }] 8 | maintainers = [{ name = "Jack Adamson", email = "jadamson@anthropic.com" }] 9 | keywords = ["http", "mcp", "llm", "automation"] 10 | license = { text = "MIT" } 11 | classifiers = [ 12 | "Development Status :: 4 - Beta", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Programming Language :: Python :: 3", 16 | "Programming Language :: Python :: 3.10", 17 | ] 18 | dependencies = [ 19 | "markdownify>=0.13.1", 20 | "mcp>=1.0.0", 21 | "protego>=0.3.1", 22 | "pydantic>=2.0.0", 23 | "readabilipy>=0.2.0", 24 | "requests>=2.32.3", 25 | ] 26 | 27 | [project.scripts] 28 | mcp-server-fetch = "mcp_server_fetch:main" 29 | 30 | [build-system] 31 | requires = ["hatchling"] 32 | build-backend = "hatchling.build" 33 | 34 | [tool.uv] 35 | dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3"] 36 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import serve 2 | 3 | 4 | def main(): 5 | """MCP Fetch Server - HTTP fetching functionality for MCP""" 6 | import argparse 7 | import asyncio 8 | 9 | parser = argparse.ArgumentParser( 10 | description="give a model the ability to make web requests" 11 | ) 12 | parser.add_argument("--user-agent", type=str, help="Custom User-Agent string") 13 | parser.add_argument( 14 | "--ignore-robots-txt", 15 | action="store_true", 16 | help="Ignore robots.txt restrictions", 17 | ) 18 | 19 | args = parser.parse_args() 20 | asyncio.run(serve(args.user_agent, args.ignore_robots_txt)) 21 | 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/__main__.py: -------------------------------------------------------------------------------- 1 | # __main__.py 2 | 3 | from mcp_server_fetch import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /src/fetch/src/mcp_server_fetch/server.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from urllib.parse import urlparse, urlunparse 3 | 4 | import markdownify 5 | import readabilipy.simple_json 6 | from mcp.shared.exceptions import McpError 7 | from mcp.server import Server 8 | from mcp.server.stdio import stdio_server 9 | from mcp.types import ( 10 | GetPromptResult, 11 | Prompt, 12 | PromptArgument, 13 | PromptMessage, 14 | TextContent, 15 | Tool, 16 | INVALID_PARAMS, 17 | INTERNAL_ERROR, 18 | ) 19 | from protego import Protego 20 | from pydantic import BaseModel, Field 21 | 22 | DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)" 23 | DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)" 24 | 25 | 26 | def extract_content(html: str) -> str: 27 | ret = readabilipy.simple_json.simple_json_from_html_string(html) 28 | if not ret["plain_content"]: 29 | return "Page failed to be simplified from HTML" 30 | content = markdownify.markdownify( 31 | ret["plain_content"], 32 | heading_style=markdownify.ATX, 33 | ) 34 | return content 35 | 36 | 37 | def get_robots_txt_url(url: str) -> str: 38 | # Parse the URL into components 39 | parsed = urlparse(url) 40 | 41 | # Reconstruct the base URL with just scheme, netloc, and /robots.txt path 42 | robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", "")) 43 | 44 | return robots_url 45 | 46 | 47 | async def check_may_autonomously_fetch_url(url: str, user_agent: str): 48 | """ 49 | Check if the URL can be fetched by the user agent according to the robots.txt file. 50 | Raises an McpError if not. 51 | """ 52 | from httpx import AsyncClient, HTTPError 53 | 54 | robot_txt_url = get_robots_txt_url(url) 55 | 56 | async with AsyncClient() as client: 57 | try: 58 | response = await client.get( 59 | robot_txt_url, headers={"User-Agent": user_agent} 60 | ) 61 | except HTTPError: 62 | raise McpError( 63 | INTERNAL_ERROR, 64 | f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue", 65 | ) 66 | if response.status_code in (401, 403): 67 | raise McpError( 68 | INTERNAL_ERROR, 69 | f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt", 70 | ) 71 | elif 400 <= response.status_code < 500: 72 | return 73 | robot_txt = response.text 74 | processed_robot_txt = "\n".join( 75 | line for line in robot_txt.splitlines() if not line.strip().startswith("#") 76 | ) 77 | robot_parser = Protego.parse(processed_robot_txt) 78 | if not robot_parser.can_fetch(url, user_agent): 79 | raise McpError( 80 | INTERNAL_ERROR, 81 | f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, " 82 | f"{user_agent}\n" 83 | f"{url}" 84 | f"\n{robot_txt}\n\n" 85 | f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n" 86 | f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.", 87 | ) 88 | 89 | 90 | async def fetch_url(url: str, user_agent: str) -> str: 91 | from httpx import AsyncClient, HTTPError 92 | 93 | async with AsyncClient() as client: 94 | try: 95 | response = await client.get( 96 | url, follow_redirects=True, headers={"User-Agent": user_agent} 97 | ) 98 | except HTTPError: 99 | raise McpError(INTERNAL_ERROR, f"Failed to fetch {url}") 100 | if response.status_code >= 400: 101 | raise McpError( 102 | INTERNAL_ERROR, 103 | f"Failed to fetch {url} - status code {response.status_code}", 104 | ) 105 | 106 | page_html = response.text 107 | 108 | return extract_content(page_html) 109 | 110 | 111 | class Fetch(BaseModel): 112 | url: str = Field(..., description="URL to fetch") 113 | 114 | 115 | async def serve( 116 | custom_user_agent: Optional[str] = None, ignore_robots_txt: bool = False 117 | ) -> None: 118 | server = Server("mcp-fetch") 119 | user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS 120 | user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL 121 | 122 | @server.list_tools() 123 | async def list_tools() -> list[Tool]: 124 | return [ 125 | Tool( 126 | name="fetch", 127 | description="""Fetches a URL from the internet and extracts its contents as markdown. 128 | 129 | Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""", 130 | inputSchema=Fetch.model_json_schema(), 131 | ) 132 | ] 133 | 134 | @server.list_prompts() 135 | async def list_prompts() -> list[Prompt]: 136 | return [ 137 | Prompt( 138 | name="fetch", 139 | description="Fetch a URL and extract its contents as markdown", 140 | arguments=[ 141 | PromptArgument( 142 | name="url", description="URL to fetch", required=True 143 | ) 144 | ], 145 | ) 146 | ] 147 | 148 | @server.call_tool() 149 | async def call_tool(name, arguments: dict) -> list[TextContent]: 150 | url = arguments.get("url") 151 | if not url: 152 | raise McpError(INVALID_PARAMS, "URL is required") 153 | 154 | if not ignore_robots_txt: 155 | await check_may_autonomously_fetch_url(url, user_agent_autonomous) 156 | 157 | content = await fetch_url(url, user_agent_autonomous) 158 | return [TextContent(type="text", text=f"Contents of {url}:\n{content}")] 159 | 160 | @server.get_prompt() 161 | async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult: 162 | if not arguments or "url" not in arguments: 163 | raise McpError(INVALID_PARAMS, "URL is required") 164 | 165 | url = arguments["url"] 166 | 167 | try: 168 | content = await fetch_url(url, user_agent_manual) 169 | # TODO: after SDK bug is addressed, don't catch the exception 170 | except McpError as e: 171 | return GetPromptResult( 172 | description=f"Failed to fetch {url}", 173 | messages=[ 174 | PromptMessage( 175 | role="user", 176 | content=TextContent(type="text", text=str(e)), 177 | ) 178 | ], 179 | ) 180 | return GetPromptResult( 181 | description=f"Contents of {url}", 182 | messages=[ 183 | PromptMessage( 184 | role="user", content=TextContent(type="text", text=content) 185 | ) 186 | ], 187 | ) 188 | 189 | options = server.create_initialization_options() 190 | async with stdio_server() as (read_stream, write_stream): 191 | await server.run(read_stream, write_stream, options, raise_exceptions=True) 192 | -------------------------------------------------------------------------------- /src/filesystem/README.md: -------------------------------------------------------------------------------- 1 | # Filesystem MCP Server 2 | 3 | Node.js server implementing Model Context Protocol (MCP) for filesystem operations. 4 | 5 | ## Features 6 | 7 | - Read/write files 8 | - Create/list/delete directories 9 | - Move files/directories 10 | - Search files 11 | - Get file metadata 12 | 13 | **Note**: The server will only allow operations within directories specified via `args`. 14 | 15 | ## API 16 | 17 | ### Resources 18 | 19 | - `file://system`: File system operations interface 20 | 21 | ### Tools 22 | 23 | - **read_file** 24 | - Read complete contents of a file 25 | - Input: `path` (string) 26 | - Reads complete file contents with UTF-8 encoding 27 | 28 | - **read_multiple_files** 29 | - Read multiple files simultaneously 30 | - Input: `paths` (string[]) 31 | - Failed reads won't stop the entire operation 32 | 33 | - **write_file** 34 | - Create new file or overwrite existing (exercise caution with this) 35 | - Inputs: 36 | - `path` (string): File location 37 | - `content` (string): File content 38 | 39 | - **create_directory** 40 | - Create new directory or ensure it exists 41 | - Input: `path` (string) 42 | - Creates parent directories if needed 43 | - Succeeds silently if directory exists 44 | 45 | - **list_directory** 46 | - List directory contents with [FILE] or [DIR] prefixes 47 | - Input: `path` (string) 48 | 49 | - **move_file** 50 | - Move or rename files and directories 51 | - Inputs: 52 | - `source` (string) 53 | - `destination` (string) 54 | - Fails if destination exists 55 | 56 | - **search_files** 57 | - Recursively search for files/directories 58 | - Inputs: 59 | - `path` (string): Starting directory 60 | - `pattern` (string): Search pattern 61 | - Case-insensitive matching 62 | - Returns full paths to matches 63 | 64 | - **get_file_info** 65 | - Get detailed file/directory metadata 66 | - Input: `path` (string) 67 | - Returns: 68 | - Size 69 | - Creation time 70 | - Modified time 71 | - Access time 72 | - Type (file/directory) 73 | - Permissions 74 | 75 | - **list_allowed_directories** 76 | - List all directories the server is allowed to access 77 | - No input required 78 | - Returns: 79 | - Directories that this server can read/write from 80 | 81 | ## Usage with Claude Desktop 82 | Add this to your `claude_desktop_config.json`: 83 | ```json 84 | { 85 | "filesystem": { 86 | "command": "npx", 87 | "args": ["-y", "@modelcontextprotocol/server-filesystem", "/Users/username/Desktop", "/path/to/other/allowed/dir"] 88 | } 89 | } 90 | ``` 91 | 92 | ## License 93 | 94 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 95 | -------------------------------------------------------------------------------- /src/filesystem/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | ToolSchema, 9 | } from "@modelcontextprotocol/sdk/types.js"; 10 | import fs from "fs/promises"; 11 | import path from "path"; 12 | import os from 'os'; 13 | import { z } from "zod"; 14 | import { zodToJsonSchema } from "zod-to-json-schema"; 15 | 16 | // Command line argument parsing 17 | const args = process.argv.slice(2); 18 | if (args.length === 0) { 19 | console.error("Usage: mcp-server-filesystem [additional-directories...]"); 20 | process.exit(1); 21 | } 22 | 23 | // Normalize all paths consistently 24 | function normalizePath(p: string): string { 25 | return path.normalize(p).toLowerCase(); 26 | } 27 | 28 | function expandHome(filepath: string): string { 29 | if (filepath.startsWith('~/') || filepath === '~') { 30 | return path.join(os.homedir(), filepath.slice(1)); 31 | } 32 | return filepath; 33 | } 34 | 35 | // Store allowed directories in normalized form 36 | const allowedDirectories = args.map(dir => 37 | normalizePath(path.resolve(expandHome(dir))) 38 | ); 39 | 40 | // Validate that all directories exist and are accessible 41 | await Promise.all(args.map(async (dir) => { 42 | try { 43 | const stats = await fs.stat(dir); 44 | if (!stats.isDirectory()) { 45 | console.error(`Error: ${dir} is not a directory`); 46 | process.exit(1); 47 | } 48 | } catch (error) { 49 | console.error(`Error accessing directory ${dir}:`, error); 50 | process.exit(1); 51 | } 52 | })); 53 | 54 | // Security utilities 55 | async function validatePath(requestedPath: string): Promise { 56 | const expandedPath = expandHome(requestedPath); 57 | const absolute = path.isAbsolute(expandedPath) 58 | ? path.resolve(expandedPath) 59 | : path.resolve(process.cwd(), expandedPath); 60 | 61 | const normalizedRequested = normalizePath(absolute); 62 | 63 | // Check if path is within allowed directories 64 | const isAllowed = allowedDirectories.some(dir => normalizedRequested.startsWith(dir)); 65 | if (!isAllowed) { 66 | throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`); 67 | } 68 | 69 | // Handle symlinks by checking their real path 70 | try { 71 | const realPath = await fs.realpath(absolute); 72 | const normalizedReal = normalizePath(realPath); 73 | const isRealPathAllowed = allowedDirectories.some(dir => normalizedReal.startsWith(dir)); 74 | if (!isRealPathAllowed) { 75 | throw new Error("Access denied - symlink target outside allowed directories"); 76 | } 77 | return realPath; 78 | } catch (error) { 79 | // For new files that don't exist yet, verify parent directory 80 | const parentDir = path.dirname(absolute); 81 | try { 82 | const realParentPath = await fs.realpath(parentDir); 83 | const normalizedParent = normalizePath(realParentPath); 84 | const isParentAllowed = allowedDirectories.some(dir => normalizedParent.startsWith(dir)); 85 | if (!isParentAllowed) { 86 | throw new Error("Access denied - parent directory outside allowed directories"); 87 | } 88 | return absolute; 89 | } catch { 90 | throw new Error(`Parent directory does not exist: ${parentDir}`); 91 | } 92 | } 93 | } 94 | 95 | // Schema definitions 96 | const ReadFileArgsSchema = z.object({ 97 | path: z.string(), 98 | }); 99 | 100 | const ReadMultipleFilesArgsSchema = z.object({ 101 | paths: z.array(z.string()), 102 | }); 103 | 104 | const WriteFileArgsSchema = z.object({ 105 | path: z.string(), 106 | content: z.string(), 107 | }); 108 | 109 | const CreateDirectoryArgsSchema = z.object({ 110 | path: z.string(), 111 | }); 112 | 113 | const ListDirectoryArgsSchema = z.object({ 114 | path: z.string(), 115 | }); 116 | 117 | const MoveFileArgsSchema = z.object({ 118 | source: z.string(), 119 | destination: z.string(), 120 | }); 121 | 122 | const SearchFilesArgsSchema = z.object({ 123 | path: z.string(), 124 | pattern: z.string(), 125 | }); 126 | 127 | const GetFileInfoArgsSchema = z.object({ 128 | path: z.string(), 129 | }); 130 | 131 | const ToolInputSchema = ToolSchema.shape.inputSchema; 132 | type ToolInput = z.infer; 133 | 134 | interface FileInfo { 135 | size: number; 136 | created: Date; 137 | modified: Date; 138 | accessed: Date; 139 | isDirectory: boolean; 140 | isFile: boolean; 141 | permissions: string; 142 | } 143 | 144 | // Server setup 145 | const server = new Server( 146 | { 147 | name: "secure-filesystem-server", 148 | version: "0.2.0", 149 | }, 150 | { 151 | capabilities: { 152 | tools: {}, 153 | }, 154 | }, 155 | ); 156 | 157 | // Tool implementations 158 | async function getFileStats(filePath: string): Promise { 159 | const stats = await fs.stat(filePath); 160 | return { 161 | size: stats.size, 162 | created: stats.birthtime, 163 | modified: stats.mtime, 164 | accessed: stats.atime, 165 | isDirectory: stats.isDirectory(), 166 | isFile: stats.isFile(), 167 | permissions: stats.mode.toString(8).slice(-3), 168 | }; 169 | } 170 | 171 | async function searchFiles( 172 | rootPath: string, 173 | pattern: string, 174 | ): Promise { 175 | const results: string[] = []; 176 | 177 | async function search(currentPath: string) { 178 | const entries = await fs.readdir(currentPath, { withFileTypes: true }); 179 | 180 | for (const entry of entries) { 181 | const fullPath = path.join(currentPath, entry.name); 182 | 183 | try { 184 | // Validate each path before processing 185 | await validatePath(fullPath); 186 | 187 | if (entry.name.toLowerCase().includes(pattern.toLowerCase())) { 188 | results.push(fullPath); 189 | } 190 | 191 | if (entry.isDirectory()) { 192 | await search(fullPath); 193 | } 194 | } catch (error) { 195 | // Skip invalid paths during search 196 | continue; 197 | } 198 | } 199 | } 200 | 201 | await search(rootPath); 202 | return results; 203 | } 204 | 205 | // Tool handlers 206 | server.setRequestHandler(ListToolsRequestSchema, async () => { 207 | return { 208 | tools: [ 209 | { 210 | name: "read_file", 211 | description: 212 | "Read the complete contents of a file from the file system. " + 213 | "Handles various text encodings and provides detailed error messages " + 214 | "if the file cannot be read. Use this tool when you need to examine " + 215 | "the contents of a single file. Only works within allowed directories.", 216 | inputSchema: zodToJsonSchema(ReadFileArgsSchema) as ToolInput, 217 | }, 218 | { 219 | name: "read_multiple_files", 220 | description: 221 | "Read the contents of multiple files simultaneously. This is more " + 222 | "efficient than reading files one by one when you need to analyze " + 223 | "or compare multiple files. Each file's content is returned with its " + 224 | "path as a reference. Failed reads for individual files won't stop " + 225 | "the entire operation. Only works within allowed directories.", 226 | inputSchema: zodToJsonSchema(ReadMultipleFilesArgsSchema) as ToolInput, 227 | }, 228 | { 229 | name: "write_file", 230 | description: 231 | "Create a new file or completely overwrite an existing file with new content. " + 232 | "Use with caution as it will overwrite existing files without warning. " + 233 | "Handles text content with proper encoding. Only works within allowed directories.", 234 | inputSchema: zodToJsonSchema(WriteFileArgsSchema) as ToolInput, 235 | }, 236 | { 237 | name: "create_directory", 238 | description: 239 | "Create a new directory or ensure a directory exists. Can create multiple " + 240 | "nested directories in one operation. If the directory already exists, " + 241 | "this operation will succeed silently. Perfect for setting up directory " + 242 | "structures for projects or ensuring required paths exist. Only works within allowed directories.", 243 | inputSchema: zodToJsonSchema(CreateDirectoryArgsSchema) as ToolInput, 244 | }, 245 | { 246 | name: "list_directory", 247 | description: 248 | "Get a detailed listing of all files and directories in a specified path. " + 249 | "Results clearly distinguish between files and directories with [FILE] and [DIR] " + 250 | "prefixes. This tool is essential for understanding directory structure and " + 251 | "finding specific files within a directory. Only works within allowed directories.", 252 | inputSchema: zodToJsonSchema(ListDirectoryArgsSchema) as ToolInput, 253 | }, 254 | { 255 | name: "move_file", 256 | description: 257 | "Move or rename files and directories. Can move files between directories " + 258 | "and rename them in a single operation. If the destination exists, the " + 259 | "operation will fail. Works across different directories and can be used " + 260 | "for simple renaming within the same directory. Both source and destination must be within allowed directories.", 261 | inputSchema: zodToJsonSchema(MoveFileArgsSchema) as ToolInput, 262 | }, 263 | { 264 | name: "search_files", 265 | description: 266 | "Recursively search for files and directories matching a pattern. " + 267 | "Searches through all subdirectories from the starting path. The search " + 268 | "is case-insensitive and matches partial names. Returns full paths to all " + 269 | "matching items. Great for finding files when you don't know their exact location. " + 270 | "Only searches within allowed directories.", 271 | inputSchema: zodToJsonSchema(SearchFilesArgsSchema) as ToolInput, 272 | }, 273 | { 274 | name: "get_file_info", 275 | description: 276 | "Retrieve detailed metadata about a file or directory. Returns comprehensive " + 277 | "information including size, creation time, last modified time, permissions, " + 278 | "and type. This tool is perfect for understanding file characteristics " + 279 | "without reading the actual content. Only works within allowed directories.", 280 | inputSchema: zodToJsonSchema(GetFileInfoArgsSchema) as ToolInput, 281 | }, 282 | { 283 | name: "list_allowed_directories", 284 | description: 285 | "Returns the list of directories that this server is allowed to access. " + 286 | "Use this to understand which directories are available before trying to access files.", 287 | inputSchema: { 288 | type: "object", 289 | properties: {}, 290 | required: [], 291 | }, 292 | }, 293 | ], 294 | }; 295 | }); 296 | 297 | 298 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 299 | try { 300 | const { name, arguments: args } = request.params; 301 | 302 | switch (name) { 303 | case "read_file": { 304 | const parsed = ReadFileArgsSchema.safeParse(args); 305 | if (!parsed.success) { 306 | throw new Error(`Invalid arguments for read_file: ${parsed.error}`); 307 | } 308 | const validPath = await validatePath(parsed.data.path); 309 | const content = await fs.readFile(validPath, "utf-8"); 310 | return { 311 | content: [{ type: "text", text: content }], 312 | }; 313 | } 314 | 315 | case "read_multiple_files": { 316 | const parsed = ReadMultipleFilesArgsSchema.safeParse(args); 317 | if (!parsed.success) { 318 | throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`); 319 | } 320 | const results = await Promise.all( 321 | parsed.data.paths.map(async (filePath: string) => { 322 | try { 323 | const validPath = await validatePath(filePath); 324 | const content = await fs.readFile(validPath, "utf-8"); 325 | return `${filePath}:\n${content}\n`; 326 | } catch (error) { 327 | const errorMessage = error instanceof Error ? error.message : String(error); 328 | return `${filePath}: Error - ${errorMessage}`; 329 | } 330 | }), 331 | ); 332 | return { 333 | content: [{ type: "text", text: results.join("\n---\n") }], 334 | }; 335 | } 336 | 337 | case "write_file": { 338 | const parsed = WriteFileArgsSchema.safeParse(args); 339 | if (!parsed.success) { 340 | throw new Error(`Invalid arguments for write_file: ${parsed.error}`); 341 | } 342 | const validPath = await validatePath(parsed.data.path); 343 | await fs.writeFile(validPath, parsed.data.content, "utf-8"); 344 | return { 345 | content: [{ type: "text", text: `Successfully wrote to ${parsed.data.path}` }], 346 | }; 347 | } 348 | 349 | case "create_directory": { 350 | const parsed = CreateDirectoryArgsSchema.safeParse(args); 351 | if (!parsed.success) { 352 | throw new Error(`Invalid arguments for create_directory: ${parsed.error}`); 353 | } 354 | const validPath = await validatePath(parsed.data.path); 355 | await fs.mkdir(validPath, { recursive: true }); 356 | return { 357 | content: [{ type: "text", text: `Successfully created directory ${parsed.data.path}` }], 358 | }; 359 | } 360 | 361 | case "list_directory": { 362 | const parsed = ListDirectoryArgsSchema.safeParse(args); 363 | if (!parsed.success) { 364 | throw new Error(`Invalid arguments for list_directory: ${parsed.error}`); 365 | } 366 | const validPath = await validatePath(parsed.data.path); 367 | const entries = await fs.readdir(validPath, { withFileTypes: true }); 368 | const formatted = entries 369 | .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) 370 | .join("\n"); 371 | return { 372 | content: [{ type: "text", text: formatted }], 373 | }; 374 | } 375 | 376 | case "move_file": { 377 | const parsed = MoveFileArgsSchema.safeParse(args); 378 | if (!parsed.success) { 379 | throw new Error(`Invalid arguments for move_file: ${parsed.error}`); 380 | } 381 | const validSourcePath = await validatePath(parsed.data.source); 382 | const validDestPath = await validatePath(parsed.data.destination); 383 | await fs.rename(validSourcePath, validDestPath); 384 | return { 385 | content: [{ type: "text", text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }], 386 | }; 387 | } 388 | 389 | case "search_files": { 390 | const parsed = SearchFilesArgsSchema.safeParse(args); 391 | if (!parsed.success) { 392 | throw new Error(`Invalid arguments for search_files: ${parsed.error}`); 393 | } 394 | const validPath = await validatePath(parsed.data.path); 395 | const results = await searchFiles(validPath, parsed.data.pattern); 396 | return { 397 | content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matches found" }], 398 | }; 399 | } 400 | 401 | case "get_file_info": { 402 | const parsed = GetFileInfoArgsSchema.safeParse(args); 403 | if (!parsed.success) { 404 | throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`); 405 | } 406 | const validPath = await validatePath(parsed.data.path); 407 | const info = await getFileStats(validPath); 408 | return { 409 | content: [{ type: "text", text: Object.entries(info) 410 | .map(([key, value]) => `${key}: ${value}`) 411 | .join("\n") }], 412 | }; 413 | } 414 | 415 | case "list_allowed_directories": { 416 | return { 417 | content: [{ 418 | type: "text", 419 | text: `Allowed directories:\n${allowedDirectories.join('\n')}` 420 | }], 421 | }; 422 | } 423 | 424 | default: 425 | throw new Error(`Unknown tool: ${name}`); 426 | } 427 | } catch (error) { 428 | const errorMessage = error instanceof Error ? error.message : String(error); 429 | return { 430 | content: [{ type: "text", text: `Error: ${errorMessage}` }], 431 | isError: true, 432 | }; 433 | } 434 | }); 435 | 436 | // Start server 437 | async function runServer() { 438 | const transport = new StdioServerTransport(); 439 | await server.connect(transport); 440 | console.error("Secure MCP Filesystem Server running on stdio"); 441 | console.error("Allowed directories:", allowedDirectories); 442 | } 443 | 444 | runServer().catch((error) => { 445 | console.error("Fatal error running server:", error); 446 | process.exit(1); 447 | }); -------------------------------------------------------------------------------- /src/filesystem/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-filesystem", 3 | "version": "0.5.1", 4 | "description": "MCP server for filesystem access", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-filesystem": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0", 23 | "glob": "^10.3.10", 24 | "zod-to-json-schema": "^3.23.5" 25 | }, 26 | "devDependencies": { 27 | "@types/node": "^20.11.0", 28 | "shx": "^0.3.4", 29 | "typescript": "^5.3.3" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/filesystem/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": ".", 6 | "moduleResolution": "NodeNext", 7 | "module": "NodeNext" 8 | }, 9 | "include": [ 10 | "./**/*.ts" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /src/gdrive/README.md: -------------------------------------------------------------------------------- 1 | # Google Drive server 2 | 3 | This MCP server integrates with Google Drive to allow listing, reading, and searching over files. 4 | 5 | ## Components 6 | 7 | ### Tools 8 | 9 | - **search** 10 | - Search for files in Google Drive 11 | - Input: `query` (string): Search query 12 | - Returns file names and MIME types of matching files 13 | 14 | ### Resources 15 | 16 | The server provides access to Google Drive files: 17 | 18 | - **Files** (`gdrive:///`) 19 | - Supports all file types 20 | - Google Workspace files are automatically exported: 21 | - Docs → Markdown 22 | - Sheets → CSV 23 | - Presentations → Plain text 24 | - Drawings → PNG 25 | - Other files are provided in their native format 26 | 27 | ## Getting started 28 | 29 | 1. [Create a new Google Cloud project](https://console.cloud.google.com/projectcreate) 30 | 2. [Enable the Google Drive API](https://console.cloud.google.com/workspace-api/products) 31 | 3. [Configure an OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent) ("internal" is fine for testing) 32 | 4. Add OAuth scope `https://www.googleapis.com/auth/drive.readonly` 33 | 5. [Create an OAuth Client ID](https://console.cloud.google.com/apis/credentials/oauthclient) for application type "Desktop App" 34 | 6. Download the JSON file of your client's OAuth keys 35 | 7. Rename the key file to `gcp-oauth.keys.json` and place into the root of this repo (i.e. `servers/gcp-oauth.keys.json`) 36 | 37 | Make sure to build the server with either `npm run build` or `npm run watch`. 38 | 39 | ### Authentication 40 | 41 | To authenticate and save credentials: 42 | 43 | 1. Run the server with the `auth` argument: `node ./dist auth` 44 | 2. This will open an authentication flow in your system browser 45 | 3. Complete the authentication process 46 | 4. Credentials will be saved in the root of this repo (i.e. `servers/.gdrive-server-credentials.json`) 47 | 48 | ### Usage with Desktop App 49 | 50 | To integrate this server with the desktop app, add the following to your app's server configuration: 51 | 52 | ```json 53 | { 54 | "gdrive": { 55 | "command": "npx", 56 | "args": ["-y", "@modelcontextprotocol/server-gdrive"] 57 | } 58 | } 59 | ``` 60 | 61 | ## License 62 | 63 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 64 | -------------------------------------------------------------------------------- /src/gdrive/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { authenticate } from "@google-cloud/local-auth"; 4 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 5 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 6 | import { 7 | CallToolRequestSchema, 8 | ListResourcesRequestSchema, 9 | ListToolsRequestSchema, 10 | ReadResourceRequestSchema, 11 | } from "@modelcontextprotocol/sdk/types.js"; 12 | import fs from "fs"; 13 | import { google } from "googleapis"; 14 | import path from "path"; 15 | 16 | const drive = google.drive("v3"); 17 | 18 | const server = new Server( 19 | { 20 | name: "example-servers/gdrive", 21 | version: "0.1.0", 22 | }, 23 | { 24 | capabilities: { 25 | resources: {}, 26 | tools: {}, 27 | }, 28 | }, 29 | ); 30 | 31 | server.setRequestHandler(ListResourcesRequestSchema, async (request) => { 32 | const pageSize = 10; 33 | const params: any = { 34 | pageSize, 35 | fields: "nextPageToken, files(id, name, mimeType)", 36 | }; 37 | 38 | if (request.params?.cursor) { 39 | params.pageToken = request.params.cursor; 40 | } 41 | 42 | const res = await drive.files.list(params); 43 | const files = res.data.files!; 44 | 45 | return { 46 | resources: files.map((file) => ({ 47 | uri: `gdrive:///${file.id}`, 48 | mimeType: file.mimeType, 49 | name: file.name, 50 | })), 51 | nextCursor: res.data.nextPageToken, 52 | }; 53 | }); 54 | 55 | server.setRequestHandler(ReadResourceRequestSchema, async (request) => { 56 | const fileId = request.params.uri.replace("gdrive:///", ""); 57 | 58 | // First get file metadata to check mime type 59 | const file = await drive.files.get({ 60 | fileId, 61 | fields: "mimeType", 62 | }); 63 | 64 | // For Google Docs/Sheets/etc we need to export 65 | if (file.data.mimeType?.startsWith("application/vnd.google-apps")) { 66 | let exportMimeType: string; 67 | switch (file.data.mimeType) { 68 | case "application/vnd.google-apps.document": 69 | exportMimeType = "text/markdown"; 70 | break; 71 | case "application/vnd.google-apps.spreadsheet": 72 | exportMimeType = "text/csv"; 73 | break; 74 | case "application/vnd.google-apps.presentation": 75 | exportMimeType = "text/plain"; 76 | break; 77 | case "application/vnd.google-apps.drawing": 78 | exportMimeType = "image/png"; 79 | break; 80 | default: 81 | exportMimeType = "text/plain"; 82 | } 83 | 84 | const res = await drive.files.export( 85 | { fileId, mimeType: exportMimeType }, 86 | { responseType: "text" }, 87 | ); 88 | 89 | return { 90 | contents: [ 91 | { 92 | uri: request.params.uri, 93 | mimeType: exportMimeType, 94 | text: res.data, 95 | }, 96 | ], 97 | }; 98 | } 99 | 100 | // For regular files download content 101 | const res = await drive.files.get( 102 | { fileId, alt: "media" }, 103 | { responseType: "arraybuffer" }, 104 | ); 105 | const mimeType = file.data.mimeType || "application/octet-stream"; 106 | if (mimeType.startsWith("text/") || mimeType === "application/json") { 107 | return { 108 | contents: [ 109 | { 110 | uri: request.params.uri, 111 | mimeType: mimeType, 112 | text: Buffer.from(res.data as ArrayBuffer).toString("utf-8"), 113 | }, 114 | ], 115 | }; 116 | } else { 117 | return { 118 | contents: [ 119 | { 120 | uri: request.params.uri, 121 | mimeType: mimeType, 122 | blob: Buffer.from(res.data as ArrayBuffer).toString("base64"), 123 | }, 124 | ], 125 | }; 126 | } 127 | }); 128 | 129 | server.setRequestHandler(ListToolsRequestSchema, async () => { 130 | return { 131 | tools: [ 132 | { 133 | name: "search", 134 | description: "Search for files in Google Drive", 135 | inputSchema: { 136 | type: "object", 137 | properties: { 138 | query: { 139 | type: "string", 140 | description: "Search query", 141 | }, 142 | }, 143 | required: ["query"], 144 | }, 145 | }, 146 | ], 147 | }; 148 | }); 149 | 150 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 151 | if (request.params.name === "search") { 152 | const userQuery = request.params.arguments?.query as string; 153 | const escapedQuery = userQuery.replace(/\\/g, "\\\\").replace(/'/g, "\\'"); 154 | const formattedQuery = `fullText contains '${escapedQuery}'`; 155 | 156 | const res = await drive.files.list({ 157 | q: formattedQuery, 158 | pageSize: 10, 159 | fields: "files(id, name, mimeType, modifiedTime, size)", 160 | }); 161 | 162 | const fileList = res.data.files 163 | ?.map((file: any) => `${file.name} (${file.mimeType})`) 164 | .join("\n"); 165 | return { 166 | content: [ 167 | { 168 | type: "text", 169 | text: `Found ${res.data.files?.length ?? 0} files:\n${fileList}`, 170 | }, 171 | ], 172 | isError: false, 173 | }; 174 | } 175 | throw new Error("Tool not found"); 176 | }); 177 | 178 | const credentialsPath = path.join( 179 | path.dirname(new URL(import.meta.url).pathname), 180 | "../../../.gdrive-server-credentials.json", 181 | ); 182 | 183 | async function authenticateAndSaveCredentials() { 184 | console.log("Launching auth flow…"); 185 | const auth = await authenticate({ 186 | keyfilePath: path.join( 187 | path.dirname(new URL(import.meta.url).pathname), 188 | "../../../gcp-oauth.keys.json", 189 | ), 190 | scopes: ["https://www.googleapis.com/auth/drive.readonly"], 191 | }); 192 | fs.writeFileSync(credentialsPath, JSON.stringify(auth.credentials)); 193 | console.log("Credentials saved. You can now run the server."); 194 | } 195 | 196 | async function loadCredentialsAndRunServer() { 197 | if (!fs.existsSync(credentialsPath)) { 198 | console.error( 199 | "Credentials not found. Please run with 'auth' argument first.", 200 | ); 201 | process.exit(1); 202 | } 203 | 204 | const credentials = JSON.parse(fs.readFileSync(credentialsPath, "utf-8")); 205 | const auth = new google.auth.OAuth2(); 206 | auth.setCredentials(credentials); 207 | google.options({ auth }); 208 | 209 | console.log("Credentials loaded. Starting server."); 210 | const transport = new StdioServerTransport(); 211 | await server.connect(transport); 212 | } 213 | 214 | if (process.argv[2] === "auth") { 215 | authenticateAndSaveCredentials().catch(console.error); 216 | } else { 217 | loadCredentialsAndRunServer().catch(console.error); 218 | } 219 | -------------------------------------------------------------------------------- /src/gdrive/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-gdrive", 3 | "version": "0.5.1", 4 | "description": "MCP server for interacting with Google Drive", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-gdrive": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@google-cloud/local-auth": "^3.0.1", 23 | "@modelcontextprotocol/sdk": "0.5.0", 24 | "googleapis": "^144.0.0" 25 | }, 26 | "devDependencies": { 27 | "@types/node": "^22.9.3", 28 | "shx": "^0.3.4", 29 | "typescript": "^5.6.2" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/gdrive/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/git/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .venv 3 | -------------------------------------------------------------------------------- /src/git/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /src/git/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Anthropic, PBC. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/git/README.md: -------------------------------------------------------------------------------- 1 | # mcp-server-git: A git MCP server 2 | 3 | ## Overview 4 | 5 | A Model Context Protocol server for Git repository interaction and automation. This server provides tools to read, search, and manipulate Git repositories via Large Language Models. 6 | 7 | Please note that mcp-server-git is currently in early development. The functionality and available tools are subject to change and expansion as we continue to develop and improve the server. 8 | 9 | ### Tools 10 | 11 | 1. `git_status` 12 | - Shows the working tree status 13 | - Input: 14 | - `repo_path` (string): Path to Git repository 15 | - Returns: Current status of working directory as text output 16 | 17 | 2. `git_diff_unstaged` 18 | - Shows changes in working directory not yet staged 19 | - Input: 20 | - `repo_path` (string): Path to Git repository 21 | - Returns: Diff output of unstaged changes 22 | 23 | 3. `git_diff_staged` 24 | - Shows changes that are staged for commit 25 | - Input: 26 | - `repo_path` (string): Path to Git repository 27 | - Returns: Diff output of staged changes 28 | 29 | 4. `git_commit` 30 | - Records changes to the repository 31 | - Inputs: 32 | - `repo_path` (string): Path to Git repository 33 | - `message` (string): Commit message 34 | - Returns: Confirmation with new commit hash 35 | 36 | 5. `git_add` 37 | - Adds file contents to the staging area 38 | - Inputs: 39 | - `repo_path` (string): Path to Git repository 40 | - `files` (string[]): Array of file paths to stage 41 | - Returns: Confirmation of staged files 42 | 43 | 6. `git_reset` 44 | - Unstages all staged changes 45 | - Input: 46 | - `repo_path` (string): Path to Git repository 47 | - Returns: Confirmation of reset operation 48 | 49 | 7. `git_log` 50 | - Shows the commit logs 51 | - Inputs: 52 | - `repo_path` (string): Path to Git repository 53 | - `max_count` (number, optional): Maximum number of commits to show (default: 10) 54 | - Returns: Array of commit entries with hash, author, date, and message 55 | 56 | 57 | ## Installation 58 | 59 | ### Using uv (recommended) 60 | 61 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 62 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-git*. 63 | 64 | ### Using PIP 65 | 66 | Alternatively you can install `mcp-server-git` via pip: 67 | 68 | ``` 69 | pip install mcp-server-git 70 | ``` 71 | 72 | After installation, you can run it as a script using: 73 | 74 | ``` 75 | python -m mcp_server_git 76 | ``` 77 | 78 | ## Configuration 79 | 80 | ### Usage with Claude Desktop 81 | 82 | Add this to your `claude_desktop_config.json`: 83 | 84 |
85 | Using uvx 86 | 87 | ```json 88 | "mcpServers": { 89 | "git": { 90 | "command": "uvx", 91 | "args": ["mcp-server-git", "--repository", "path/to/git/repo"] 92 | } 93 | } 94 | ``` 95 |
96 | 97 |
98 | Using pip installation 99 | 100 | ```json 101 | "mcpServers": { 102 | "git": { 103 | "command": "python", 104 | "args": ["-m", "mcp_server_git", "--repository", "path/to/git/repo"] 105 | } 106 | } 107 | ``` 108 |
109 | 110 | ### Usage with [Zed](https://github.com/zed-industries/zed) 111 | 112 | Add to your Zed settings.json: 113 | 114 |
115 | Using uvx 116 | 117 | ```json 118 | "context_servers": [ 119 | "mcp-server-git": { 120 | "command": "uvx", 121 | "args": ["mcp-server-git"] 122 | } 123 | ], 124 | ``` 125 |
126 | 127 |
128 | Using pip installation 129 | 130 | ```json 131 | "context_servers": { 132 | "mcp-server-git": { 133 | "command": "python", 134 | "args": ["-m", "mcp_server_git"] 135 | } 136 | }, 137 | ``` 138 |
139 | 140 | ## Debugging 141 | 142 | You can use the MCP inspector to debug the server. For uvx installations: 143 | 144 | ``` 145 | npx @modelcontextprotocol/inspector uvx mcp-server-git 146 | ``` 147 | 148 | Or if you've installed the package in a specific directory or are developing on it: 149 | 150 | ``` 151 | cd path/to/servers/src/git 152 | npx @modelcontextprotocol/inspector uv run mcp-server-git 153 | ``` 154 | 155 | ## License 156 | 157 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 158 | -------------------------------------------------------------------------------- /src/git/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-git" 3 | version = "0.5.1" 4 | description = "A Model Context Protocol server providing tools to read, search, and manipulate Git repositories programmatically via LLMs" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | authors = [{ name = "Anthropic, PBC." }] 8 | maintainers = [{ name = "David Soria Parra", email = "davidsp@anthropic.com" }] 9 | keywords = ["git", "mcp", "llm", "automation"] 10 | license = { text = "MIT" } 11 | classifiers = [ 12 | "Development Status :: 4 - Beta", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Programming Language :: Python :: 3", 16 | "Programming Language :: Python :: 3.10", 17 | ] 18 | dependencies = [ 19 | "click>=8.1.7", 20 | "gitpython>=3.1.43", 21 | "mcp>=0.6.0", 22 | "pydantic>=2.0.0", 23 | ] 24 | 25 | [project.scripts] 26 | mcp-server-git = "mcp_server_git:main" 27 | 28 | [build-system] 29 | requires = ["hatchling"] 30 | build-backend = "hatchling.build" 31 | 32 | [tool.uv] 33 | dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3"] 34 | -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/__init__.py: -------------------------------------------------------------------------------- 1 | import click 2 | from functools import partial 3 | from pathlib import Path 4 | import logging 5 | import sys 6 | from .server import serve 7 | 8 | @click.command() 9 | @click.option("--repository", "-r", type=Path, help="Git repository path") 10 | @click.option("-v", "--verbose", count=True) 11 | def main(repository: Path | None, verbose: bool) -> None: 12 | """MCP Git Server - Git functionality for MCP""" 13 | import asyncio 14 | 15 | logging_level = logging.WARN 16 | if verbose == 1: 17 | logging_level = logging.INFO 18 | elif verbose >= 2: 19 | logging_level = logging.DEBUG 20 | 21 | logging.basicConfig(level=logging_level, stream=sys.stderr) 22 | asyncio.run(serve(repository)) 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/__main__.py: -------------------------------------------------------------------------------- 1 | # __main__.py 2 | 3 | from mcp_server_git import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /src/git/src/mcp_server_git/server.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | from pathlib import Path 4 | from typing import Sequence 5 | from mcp.server import Server 6 | from mcp.server.session import ServerSession 7 | from mcp.server.stdio import stdio_server 8 | from mcp.types import ( 9 | ClientCapabilities, 10 | TextContent, 11 | Tool, 12 | ListRootsResult, 13 | RootsCapability, 14 | ) 15 | from enum import Enum 16 | import git 17 | from pydantic import BaseModel, Field 18 | from typing import List, Optional 19 | 20 | class GitStatus(BaseModel): 21 | repo_path: str 22 | 23 | class GitDiffUnstaged(BaseModel): 24 | repo_path: str 25 | 26 | class GitDiffStaged(BaseModel): 27 | repo_path: str 28 | 29 | class GitCommit(BaseModel): 30 | repo_path: str 31 | message: str 32 | 33 | class GitAdd(BaseModel): 34 | repo_path: str 35 | files: List[str] 36 | 37 | class GitReset(BaseModel): 38 | repo_path: str 39 | 40 | class GitLog(BaseModel): 41 | repo_path: str 42 | max_count: int = 10 43 | 44 | class GitTools(str, Enum): 45 | STATUS = "git_status" 46 | DIFF_UNSTAGED = "git_diff_unstaged" 47 | DIFF_STAGED = "git_diff_staged" 48 | COMMIT = "git_commit" 49 | ADD = "git_add" 50 | RESET = "git_reset" 51 | LOG = "git_log" 52 | 53 | def git_status(repo: git.Repo) -> str: 54 | return repo.git.status() 55 | 56 | def git_diff_unstaged(repo: git.Repo) -> str: 57 | return repo.git.diff() 58 | 59 | def git_diff_staged(repo: git.Repo) -> str: 60 | return repo.git.diff("--cached") 61 | 62 | def git_commit(repo: git.Repo, message: str) -> str: 63 | commit = repo.index.commit(message) 64 | return f"Changes committed successfully with hash {commit.hexsha}" 65 | 66 | def git_add(repo: git.Repo, files: list[str]) -> str: 67 | repo.index.add(files) 68 | return "Files staged successfully" 69 | 70 | def git_reset(repo: git.Repo) -> str: 71 | repo.index.reset() 72 | return "All staged changes reset" 73 | 74 | def git_log(repo: git.Repo, max_count: int = 10) -> list[str]: 75 | commits = list(repo.iter_commits(max_count=max_count)) 76 | log = [] 77 | for commit in commits: 78 | log.append( 79 | f"Commit: {commit.hexsha}\n" 80 | f"Author: {commit.author}\n" 81 | f"Date: {commit.authored_datetime}\n" 82 | f"Message: {commit.message}\n" 83 | ) 84 | return log 85 | 86 | async def serve(repository: Path | None) -> None: 87 | logger = logging.getLogger(__name__) 88 | 89 | if repository is not None: 90 | try: 91 | git.Repo(repository) 92 | logger.info(f"Using repository at {repository}") 93 | except git.InvalidGitRepositoryError: 94 | logger.error(f"{repository} is not a valid Git repository") 95 | return 96 | 97 | server = Server("mcp-git") 98 | 99 | @server.list_tools() 100 | async def list_tools() -> list[Tool]: 101 | return [ 102 | Tool( 103 | name=GitTools.STATUS, 104 | description="Shows the working tree status", 105 | inputSchema=GitStatus.schema(), 106 | ), 107 | Tool( 108 | name=GitTools.DIFF_UNSTAGED, 109 | description="Shows changes in the working directory that are not yet staged", 110 | inputSchema=GitDiffUnstaged.schema(), 111 | ), 112 | Tool( 113 | name=GitTools.DIFF_STAGED, 114 | description="Shows changes that are staged for commit", 115 | inputSchema=GitDiffStaged.schema(), 116 | ), 117 | Tool( 118 | name=GitTools.COMMIT, 119 | description="Records changes to the repository", 120 | inputSchema=GitCommit.schema(), 121 | ), 122 | Tool( 123 | name=GitTools.ADD, 124 | description="Adds file contents to the staging area", 125 | inputSchema=GitAdd.schema(), 126 | ), 127 | Tool( 128 | name=GitTools.RESET, 129 | description="Unstages all staged changes", 130 | inputSchema=GitReset.schema(), 131 | ), 132 | Tool( 133 | name=GitTools.LOG, 134 | description="Shows the commit logs", 135 | inputSchema=GitLog.schema(), 136 | ), 137 | ] 138 | 139 | async def list_repos() -> Sequence[str]: 140 | async def by_roots() -> Sequence[str]: 141 | if not isinstance(server.request_context.session, ServerSession): 142 | raise TypeError("server.request_context.session must be a ServerSession") 143 | 144 | if not server.request_context.session.check_client_capability( 145 | ClientCapabilities(roots=RootsCapability()) 146 | ): 147 | return [] 148 | 149 | roots_result: ListRootsResult = await server.request_context.session.list_roots() 150 | logger.debug(f"Roots result: {roots_result}") 151 | repo_paths = [] 152 | for root in roots_result.roots: 153 | path = root.uri.path 154 | try: 155 | git.Repo(path) 156 | repo_paths.append(str(path)) 157 | except git.InvalidGitRepositoryError: 158 | pass 159 | return repo_paths 160 | 161 | def by_commandline() -> Sequence[str]: 162 | return [str(repository)] if repository is not None else [] 163 | 164 | cmd_repos = by_commandline() 165 | root_repos = await by_roots() 166 | return [*root_repos, *cmd_repos] 167 | 168 | @server.call_tool() 169 | async def call_tool(name: str, arguments: dict) -> list[TextContent]: 170 | repo_path = Path(arguments["repo_path"]) 171 | repo = git.Repo(repo_path) 172 | 173 | match name: 174 | case GitTools.STATUS: 175 | status = git_status(repo) 176 | return [TextContent( 177 | type="text", 178 | text=f"Repository status:\n{status}" 179 | )] 180 | 181 | case GitTools.DIFF_UNSTAGED: 182 | diff = git_diff_unstaged(repo) 183 | return [TextContent( 184 | type="text", 185 | text=f"Unstaged changes:\n{diff}" 186 | )] 187 | 188 | case GitTools.DIFF_STAGED: 189 | diff = git_diff_staged(repo) 190 | return [TextContent( 191 | type="text", 192 | text=f"Staged changes:\n{diff}" 193 | )] 194 | 195 | case GitTools.COMMIT: 196 | result = git_commit(repo, arguments["message"]) 197 | return [TextContent( 198 | type="text", 199 | text=result 200 | )] 201 | 202 | case GitTools.ADD: 203 | result = git_add(repo, arguments["files"]) 204 | return [TextContent( 205 | type="text", 206 | text=result 207 | )] 208 | 209 | case GitTools.RESET: 210 | result = git_reset(repo) 211 | return [TextContent( 212 | type="text", 213 | text=result 214 | )] 215 | 216 | case GitTools.LOG: 217 | log = git_log(repo, arguments.get("max_count", 10)) 218 | return [TextContent( 219 | type="text", 220 | text="Commit history:\n" + "\n".join(log) 221 | )] 222 | 223 | case _: 224 | raise ValueError(f"Unknown tool: {name}") 225 | 226 | options = server.create_initialization_options() 227 | async with stdio_server() as (read_stream, write_stream): 228 | await server.run(read_stream, write_stream, options, raise_exceptions=True) 229 | -------------------------------------------------------------------------------- /src/github/README.md: -------------------------------------------------------------------------------- 1 | # GitHub MCP Server 2 | 3 | MCP Server for the GitHub API, enabling file operations, repository management, and more. 4 | 5 | ### Features 6 | 7 | - **Automatic Branch Creation**: When creating/updating files or pushing changes, branches are automatically created if they don't exist 8 | - **Comprehensive Error Handling**: Clear error messages for common issues 9 | - **Git History Preservation**: Operations maintain proper Git history without force pushing 10 | - **Batch Operations**: Support for both single-file and multi-file operations 11 | 12 | 13 | ## Tools 14 | 15 | 1. `create_or_update_file` 16 | - Create or update a single file in a repository 17 | - Inputs: 18 | - `owner` (string): Repository owner (username or organization) 19 | - `repo` (string): Repository name 20 | - `path` (string): Path where to create/update the file 21 | - `content` (string): Content of the file 22 | - `message` (string): Commit message 23 | - `branch` (string): Branch to create/update the file in 24 | - `sha` (optional string): SHA of file being replaced (for updates) 25 | - Returns: File content and commit details 26 | 27 | 2. `push_files` 28 | - Push multiple files in a single commit 29 | - Inputs: 30 | - `owner` (string): Repository owner 31 | - `repo` (string): Repository name 32 | - `branch` (string): Branch to push to 33 | - `files` (array): Files to push, each with `path` and `content` 34 | - `message` (string): Commit message 35 | - Returns: Updated branch reference 36 | 37 | 3. `search_repositories` 38 | - Search for GitHub repositories 39 | - Inputs: 40 | - `query` (string): Search query 41 | - `page` (optional number): Page number for pagination 42 | - `perPage` (optional number): Results per page (max 100) 43 | - Returns: Repository search results 44 | 45 | 4. `create_repository` 46 | - Create a new GitHub repository 47 | - Inputs: 48 | - `name` (string): Repository name 49 | - `description` (optional string): Repository description 50 | - `private` (optional boolean): Whether repo should be private 51 | - `autoInit` (optional boolean): Initialize with README 52 | - Returns: Created repository details 53 | 54 | 5. `get_file_contents` 55 | - Get contents of a file or directory 56 | - Inputs: 57 | - `owner` (string): Repository owner 58 | - `repo` (string): Repository name 59 | - `path` (string): Path to file/directory 60 | - `branch` (optional string): Branch to get contents from 61 | - Returns: File/directory contents 62 | 63 | 6. `create_issue` 64 | - Create a new issue 65 | - Inputs: 66 | - `owner` (string): Repository owner 67 | - `repo` (string): Repository name 68 | - `title` (string): Issue title 69 | - `body` (optional string): Issue description 70 | - `assignees` (optional string[]): Usernames to assign 71 | - `labels` (optional string[]): Labels to add 72 | - `milestone` (optional number): Milestone number 73 | - Returns: Created issue details 74 | 75 | 7. `create_pull_request` 76 | - Create a new pull request 77 | - Inputs: 78 | - `owner` (string): Repository owner 79 | - `repo` (string): Repository name 80 | - `title` (string): PR title 81 | - `body` (optional string): PR description 82 | - `head` (string): Branch containing changes 83 | - `base` (string): Branch to merge into 84 | - `draft` (optional boolean): Create as draft PR 85 | - `maintainer_can_modify` (optional boolean): Allow maintainer edits 86 | - Returns: Created pull request details 87 | 88 | 8. `fork_repository` 89 | - Fork a repository 90 | - Inputs: 91 | - `owner` (string): Repository owner 92 | - `repo` (string): Repository name 93 | - `organization` (optional string): Organization to fork to 94 | - Returns: Forked repository details 95 | 96 | 9. `create_branch` 97 | - Create a new branch 98 | - Inputs: 99 | - `owner` (string): Repository owner 100 | - `repo` (string): Repository name 101 | - `branch` (string): Name for new branch 102 | - `from_branch` (optional string): Source branch (defaults to repo default) 103 | - Returns: Created branch reference 104 | 105 | ## Setup 106 | 107 | ### Personal Access Token 108 | [Create a GitHub Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) with appropriate permissions: 109 | - Go to [Personal access tokens](https://github.com/settings/tokens) (in GitHub Settings > Developer settings) 110 | - Select which repositories you'd like this token to have access to (Public, All, or Select) 111 | - Create a token with the `repo` scope ("Full control of private repositories") 112 | - Alternatively, if working only with public repositories, select only the `public_repo` scope 113 | - Copy the generated token 114 | 115 | ### Usage with Claude Desktop 116 | To use this with Claude Desktop, add the following to your `claude_desktop_config.json`: 117 | 118 | ```json 119 | { 120 | "github": { 121 | "command": "npx", 122 | "args": ["-y", "@modelcontextprotocol/server-github"], 123 | "env": { 124 | "GITHUB_PERSONAL_ACCESS_TOKEN": "" 125 | } 126 | } 127 | } 128 | ``` 129 | 130 | ## License 131 | 132 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 133 | -------------------------------------------------------------------------------- /src/github/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-github", 3 | "version": "0.5.1", 4 | "description": "MCP server for using the GitHub API", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-github": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.6.0", 23 | "@types/node-fetch": "^2.6.12", 24 | "node-fetch": "^3.3.2", 25 | "zod-to-json-schema": "^3.23.5" 26 | }, 27 | "devDependencies": { 28 | "shx": "^0.3.4", 29 | "typescript": "^5.6.2" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/github/schemas.ts: -------------------------------------------------------------------------------- 1 | import { z } from 'zod'; 2 | 3 | // Base schemas for common types 4 | export const GitHubAuthorSchema = z.object({ 5 | name: z.string(), 6 | email: z.string(), 7 | date: z.string() 8 | }); 9 | 10 | // Repository related schemas 11 | export const GitHubOwnerSchema = z.object({ 12 | login: z.string(), 13 | id: z.number(), 14 | node_id: z.string(), 15 | avatar_url: z.string(), 16 | url: z.string(), 17 | html_url: z.string(), 18 | type: z.string() 19 | }); 20 | 21 | export const GitHubRepositorySchema = z.object({ 22 | id: z.number(), 23 | node_id: z.string(), 24 | name: z.string(), 25 | full_name: z.string(), 26 | private: z.boolean(), 27 | owner: GitHubOwnerSchema, 28 | html_url: z.string(), 29 | description: z.string().nullable(), 30 | fork: z.boolean(), 31 | url: z.string(), 32 | created_at: z.string(), 33 | updated_at: z.string(), 34 | pushed_at: z.string(), 35 | git_url: z.string(), 36 | ssh_url: z.string(), 37 | clone_url: z.string(), 38 | default_branch: z.string() 39 | }); 40 | 41 | // File content schemas 42 | export const GitHubFileContentSchema = z.object({ 43 | type: z.string(), 44 | encoding: z.string(), 45 | size: z.number(), 46 | name: z.string(), 47 | path: z.string(), 48 | content: z.string(), 49 | sha: z.string(), 50 | url: z.string(), 51 | git_url: z.string(), 52 | html_url: z.string(), 53 | download_url: z.string() 54 | }); 55 | 56 | export const GitHubDirectoryContentSchema = z.object({ 57 | type: z.string(), 58 | size: z.number(), 59 | name: z.string(), 60 | path: z.string(), 61 | sha: z.string(), 62 | url: z.string(), 63 | git_url: z.string(), 64 | html_url: z.string(), 65 | download_url: z.string().nullable() 66 | }); 67 | 68 | export const GitHubContentSchema = z.union([ 69 | GitHubFileContentSchema, 70 | z.array(GitHubDirectoryContentSchema) 71 | ]); 72 | 73 | // Operation schemas 74 | export const FileOperationSchema = z.object({ 75 | path: z.string(), 76 | content: z.string() 77 | }); 78 | 79 | // Tree and commit schemas 80 | export const GitHubTreeEntrySchema = z.object({ 81 | path: z.string(), 82 | mode: z.enum(['100644', '100755', '040000', '160000', '120000']), 83 | type: z.enum(['blob', 'tree', 'commit']), 84 | size: z.number().optional(), 85 | sha: z.string(), 86 | url: z.string() 87 | }); 88 | 89 | export const GitHubTreeSchema = z.object({ 90 | sha: z.string(), 91 | url: z.string(), 92 | tree: z.array(GitHubTreeEntrySchema), 93 | truncated: z.boolean() 94 | }); 95 | 96 | export const GitHubCommitSchema = z.object({ 97 | sha: z.string(), 98 | node_id: z.string(), 99 | url: z.string(), 100 | author: GitHubAuthorSchema, 101 | committer: GitHubAuthorSchema, 102 | message: z.string(), 103 | tree: z.object({ 104 | sha: z.string(), 105 | url: z.string() 106 | }), 107 | parents: z.array(z.object({ 108 | sha: z.string(), 109 | url: z.string() 110 | })) 111 | }); 112 | 113 | // Reference schema 114 | export const GitHubReferenceSchema = z.object({ 115 | ref: z.string(), 116 | node_id: z.string(), 117 | url: z.string(), 118 | object: z.object({ 119 | sha: z.string(), 120 | type: z.string(), 121 | url: z.string() 122 | }) 123 | }); 124 | 125 | // Input schemas for operations 126 | export const CreateRepositoryOptionsSchema = z.object({ 127 | name: z.string(), 128 | description: z.string().optional(), 129 | private: z.boolean().optional(), 130 | auto_init: z.boolean().optional() 131 | }); 132 | 133 | export const CreateIssueOptionsSchema = z.object({ 134 | title: z.string(), 135 | body: z.string().optional(), 136 | assignees: z.array(z.string()).optional(), 137 | milestone: z.number().optional(), 138 | labels: z.array(z.string()).optional() 139 | }); 140 | 141 | export const CreatePullRequestOptionsSchema = z.object({ 142 | title: z.string(), 143 | body: z.string().optional(), 144 | head: z.string(), 145 | base: z.string(), 146 | maintainer_can_modify: z.boolean().optional(), 147 | draft: z.boolean().optional() 148 | }); 149 | 150 | export const CreateBranchOptionsSchema = z.object({ 151 | ref: z.string(), 152 | sha: z.string() 153 | }); 154 | 155 | // Response schemas for operations 156 | export const GitHubCreateUpdateFileResponseSchema = z.object({ 157 | content: GitHubFileContentSchema.nullable(), 158 | commit: z.object({ 159 | sha: z.string(), 160 | node_id: z.string(), 161 | url: z.string(), 162 | html_url: z.string(), 163 | author: GitHubAuthorSchema, 164 | committer: GitHubAuthorSchema, 165 | message: z.string(), 166 | tree: z.object({ 167 | sha: z.string(), 168 | url: z.string() 169 | }), 170 | parents: z.array(z.object({ 171 | sha: z.string(), 172 | url: z.string(), 173 | html_url: z.string() 174 | })) 175 | }) 176 | }); 177 | 178 | export const GitHubSearchResponseSchema = z.object({ 179 | total_count: z.number(), 180 | incomplete_results: z.boolean(), 181 | items: z.array(GitHubRepositorySchema) 182 | }); 183 | 184 | // Fork related schemas 185 | export const GitHubForkParentSchema = z.object({ 186 | name: z.string(), 187 | full_name: z.string(), 188 | owner: z.object({ 189 | login: z.string(), 190 | id: z.number(), 191 | avatar_url: z.string() 192 | }), 193 | html_url: z.string() 194 | }); 195 | 196 | export const GitHubForkSchema = GitHubRepositorySchema.extend({ 197 | parent: GitHubForkParentSchema, 198 | source: GitHubForkParentSchema 199 | }); 200 | 201 | // Issue related schemas 202 | export const GitHubLabelSchema = z.object({ 203 | id: z.number(), 204 | node_id: z.string(), 205 | url: z.string(), 206 | name: z.string(), 207 | color: z.string(), 208 | default: z.boolean(), 209 | description: z.string().optional() 210 | }); 211 | 212 | export const GitHubIssueAssigneeSchema = z.object({ 213 | login: z.string(), 214 | id: z.number(), 215 | avatar_url: z.string(), 216 | url: z.string(), 217 | html_url: z.string() 218 | }); 219 | 220 | export const GitHubMilestoneSchema = z.object({ 221 | url: z.string(), 222 | html_url: z.string(), 223 | labels_url: z.string(), 224 | id: z.number(), 225 | node_id: z.string(), 226 | number: z.number(), 227 | title: z.string(), 228 | description: z.string(), 229 | state: z.string() 230 | }); 231 | 232 | export const GitHubIssueSchema = z.object({ 233 | url: z.string(), 234 | repository_url: z.string(), 235 | labels_url: z.string(), 236 | comments_url: z.string(), 237 | events_url: z.string(), 238 | html_url: z.string(), 239 | id: z.number(), 240 | node_id: z.string(), 241 | number: z.number(), 242 | title: z.string(), 243 | user: GitHubIssueAssigneeSchema, 244 | labels: z.array(GitHubLabelSchema), 245 | state: z.string(), 246 | locked: z.boolean(), 247 | assignee: GitHubIssueAssigneeSchema.nullable(), 248 | assignees: z.array(GitHubIssueAssigneeSchema), 249 | milestone: GitHubMilestoneSchema.nullable(), 250 | comments: z.number(), 251 | created_at: z.string(), 252 | updated_at: z.string(), 253 | closed_at: z.string().nullable(), 254 | body: z.string() 255 | }); 256 | 257 | // Pull Request related schemas 258 | export const GitHubPullRequestHeadSchema = z.object({ 259 | label: z.string(), 260 | ref: z.string(), 261 | sha: z.string(), 262 | user: GitHubIssueAssigneeSchema, 263 | repo: GitHubRepositorySchema 264 | }); 265 | 266 | export const GitHubPullRequestSchema = z.object({ 267 | url: z.string(), 268 | id: z.number(), 269 | node_id: z.string(), 270 | html_url: z.string(), 271 | diff_url: z.string(), 272 | patch_url: z.string(), 273 | issue_url: z.string(), 274 | number: z.number(), 275 | state: z.string(), 276 | locked: z.boolean(), 277 | title: z.string(), 278 | user: GitHubIssueAssigneeSchema, 279 | body: z.string(), 280 | created_at: z.string(), 281 | updated_at: z.string(), 282 | closed_at: z.string().nullable(), 283 | merged_at: z.string().nullable(), 284 | merge_commit_sha: z.string(), 285 | assignee: GitHubIssueAssigneeSchema.nullable(), 286 | assignees: z.array(GitHubIssueAssigneeSchema), 287 | head: GitHubPullRequestHeadSchema, 288 | base: GitHubPullRequestHeadSchema 289 | }); 290 | 291 | const RepoParamsSchema = z.object({ 292 | owner: z.string().describe("Repository owner (username or organization)"), 293 | repo: z.string().describe("Repository name") 294 | }); 295 | 296 | export const CreateOrUpdateFileSchema = RepoParamsSchema.extend({ 297 | path: z.string().describe("Path where to create/update the file"), 298 | content: z.string().describe("Content of the file"), 299 | message: z.string().describe("Commit message"), 300 | branch: z.string().describe("Branch to create/update the file in"), 301 | sha: z.string().optional() 302 | .describe("SHA of the file being replaced (required when updating existing files)") 303 | }); 304 | 305 | export const SearchRepositoriesSchema = z.object({ 306 | query: z.string().describe("Search query (see GitHub search syntax)"), 307 | page: z.number().optional().describe("Page number for pagination (default: 1)"), 308 | perPage: z.number().optional().describe("Number of results per page (default: 30, max: 100)") 309 | }); 310 | 311 | export const CreateRepositorySchema = z.object({ 312 | name: z.string().describe("Repository name"), 313 | description: z.string().optional().describe("Repository description"), 314 | private: z.boolean().optional().describe("Whether the repository should be private"), 315 | autoInit: z.boolean().optional().describe("Initialize with README.md") 316 | }); 317 | 318 | export const GetFileContentsSchema = RepoParamsSchema.extend({ 319 | path: z.string().describe("Path to the file or directory"), 320 | branch: z.string().optional().describe("Branch to get contents from") 321 | }); 322 | 323 | export const PushFilesSchema = RepoParamsSchema.extend({ 324 | branch: z.string().describe("Branch to push to (e.g., 'main' or 'master')"), 325 | files: z.array(z.object({ 326 | path: z.string().describe("Path where to create the file"), 327 | content: z.string().describe("Content of the file") 328 | })).describe("Array of files to push"), 329 | message: z.string().describe("Commit message") 330 | }); 331 | 332 | export const CreateIssueSchema = RepoParamsSchema.extend({ 333 | title: z.string().describe("Issue title"), 334 | body: z.string().optional().describe("Issue body/description"), 335 | assignees: z.array(z.string()).optional().describe("Array of usernames to assign"), 336 | labels: z.array(z.string()).optional().describe("Array of label names"), 337 | milestone: z.number().optional().describe("Milestone number to assign") 338 | }); 339 | 340 | export const CreatePullRequestSchema = RepoParamsSchema.extend({ 341 | title: z.string().describe("Pull request title"), 342 | body: z.string().optional().describe("Pull request body/description"), 343 | head: z.string().describe("The name of the branch where your changes are implemented"), 344 | base: z.string().describe("The name of the branch you want the changes pulled into"), 345 | draft: z.boolean().optional().describe("Whether to create the pull request as a draft"), 346 | maintainer_can_modify: z.boolean().optional() 347 | .describe("Whether maintainers can modify the pull request") 348 | }); 349 | 350 | export const ForkRepositorySchema = RepoParamsSchema.extend({ 351 | organization: z.string().optional() 352 | .describe("Optional: organization to fork to (defaults to your personal account)") 353 | }); 354 | 355 | export const CreateBranchSchema = RepoParamsSchema.extend({ 356 | branch: z.string().describe("Name for the new branch"), 357 | from_branch: z.string().optional() 358 | .describe("Optional: source branch to create from (defaults to the repository's default branch)") 359 | }); 360 | 361 | // Export types 362 | export type GitHubAuthor = z.infer; 363 | export type GitHubFork = z.infer; 364 | export type GitHubIssue = z.infer; 365 | export type GitHubPullRequest = z.infer;export type GitHubRepository = z.infer; 366 | export type GitHubFileContent = z.infer; 367 | export type GitHubDirectoryContent = z.infer; 368 | export type GitHubContent = z.infer; 369 | export type FileOperation = z.infer; 370 | export type GitHubTree = z.infer; 371 | export type GitHubCommit = z.infer; 372 | export type GitHubReference = z.infer; 373 | export type CreateRepositoryOptions = z.infer; 374 | export type CreateIssueOptions = z.infer; 375 | export type CreatePullRequestOptions = z.infer; 376 | export type CreateBranchOptions = z.infer; 377 | export type GitHubCreateUpdateFileResponse = z.infer; 378 | export type GitHubSearchResponse = z.infer; -------------------------------------------------------------------------------- /src/github/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/google-maps/README.md: -------------------------------------------------------------------------------- 1 | # Google Maps MCP Server 2 | 3 | MCP Server for the Google Maps API. 4 | 5 | ## Tools 6 | 7 | 1. `geocode` 8 | - Convert address to coordinates 9 | - Input: `address` (string) 10 | - Returns: location, formatted_address, place_id 11 | 12 | 2. `reverse_geocode` 13 | - Convert coordinates to address 14 | - Inputs: 15 | - `latitude` (number) 16 | - `longitude` (number) 17 | - Returns: formatted_address, place_id, address_components 18 | 19 | 3. `search_places` 20 | - Search for places using text query 21 | - Inputs: 22 | - `query` (string) 23 | - `location` (optional): { latitude: number, longitude: number } 24 | - `radius` (optional): number (meters, max 50000) 25 | - Returns: array of places with names, addresses, locations 26 | 27 | 4. `get_place_details` 28 | - Get detailed information about a place 29 | - Input: `place_id` (string) 30 | - Returns: name, address, contact info, ratings, reviews, opening hours 31 | 32 | 5. `get_distance_matrix` 33 | - Calculate distances and times between points 34 | - Inputs: 35 | - `origins` (string[]) 36 | - `destinations` (string[]) 37 | - `mode` (optional): "driving" | "walking" | "bicycling" | "transit" 38 | - Returns: distances and durations matrix 39 | 40 | 6. `get_elevation` 41 | - Get elevation data for locations 42 | - Input: `locations` (array of {latitude, longitude}) 43 | - Returns: elevation data for each point 44 | 45 | 7. `get_directions` 46 | - Get directions between points 47 | - Inputs: 48 | - `origin` (string) 49 | - `destination` (string) 50 | - `mode` (optional): "driving" | "walking" | "bicycling" | "transit" 51 | - Returns: route details with steps, distance, duration 52 | 53 | ## Setup 54 | 55 | ### API Key 56 | Get a Google Maps API key by following the instructions [here](https://developers.google.com/maps/documentation/javascript/get-api-key#create-api-keys). 57 | 58 | ### Usage with Claude Desktop 59 | 60 | Add the following to your `claude_desktop_config.json`: 61 | 62 | ```json 63 | { 64 | "google-maps": { 65 | "command": "npx", 66 | "args": ["-y", "@modelcontextprotocol/server-google-maps"], 67 | "env": { 68 | "GOOGLE_MAPS_API_KEY": "" 69 | } 70 | } 71 | } 72 | ``` 73 | 74 | ## License 75 | 76 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 77 | -------------------------------------------------------------------------------- /src/google-maps/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-google-maps", 3 | "version": "0.5.1", 4 | "description": "MCP server for using the Google Maps API", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-google-maps": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.6.0", 23 | "@types/node-fetch": "^2.6.12", 24 | "node-fetch": "^3.3.2" 25 | }, 26 | "devDependencies": { 27 | "shx": "^0.3.4", 28 | "typescript": "^5.6.2" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/google-maps/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/memory/README.md: -------------------------------------------------------------------------------- 1 | # Knowledge Graph Memory Server 2 | A basic implementation of persistent memory using a local knowledge graph. This lets Claude remember information about the user across chats. 3 | 4 | ## Core Concepts 5 | 6 | ### Entities 7 | Entities are the primary nodes in the knowledge graph. Each entity has: 8 | - A unique name (identifier) 9 | - An entity type (e.g., "person", "organization", "event") 10 | - A list of observations 11 | 12 | Example: 13 | ```json 14 | { 15 | "name": "John_Smith", 16 | "entityType": "person", 17 | "observations": ["Speaks fluent Spanish"] 18 | } 19 | ``` 20 | 21 | ### Relations 22 | Relations define directed connections between entities. They are always stored in active voice and describe how entities interact or relate to each other. 23 | 24 | Example: 25 | ```json 26 | { 27 | "from": "John_Smith", 28 | "to": "Anthropic", 29 | "relationType": "works_at" 30 | } 31 | ``` 32 | ### Observations 33 | Observations are discrete pieces of information about an entity. They are: 34 | 35 | - Stored as strings 36 | - Attached to specific entities 37 | - Can be added or removed independently 38 | - Should be atomic (one fact per observation) 39 | 40 | Example: 41 | ```json 42 | { 43 | "entityName": "John_Smith", 44 | "observations": [ 45 | "Speaks fluent Spanish", 46 | "Graduated in 2019", 47 | "Prefers morning meetings" 48 | ] 49 | } 50 | ``` 51 | 52 | ## API 53 | 54 | ### Tools 55 | - **create_entities** 56 | - Create multiple new entities in the knowledge graph 57 | - Input: `entities` (array of objects) 58 | - Each object contains: 59 | - `name` (string): Entity identifier 60 | - `entityType` (string): Type classification 61 | - `observations` (string[]): Associated observations 62 | - Ignores entities with existing names 63 | 64 | - **create_relations** 65 | - Create multiple new relations between entities 66 | - Input: `relations` (array of objects) 67 | - Each object contains: 68 | - `from` (string): Source entity name 69 | - `to` (string): Target entity name 70 | - `relationType` (string): Relationship type in active voice 71 | - Skips duplicate relations 72 | 73 | - **add_observations** 74 | - Add new observations to existing entities 75 | - Input: `observations` (array of objects) 76 | - Each object contains: 77 | - `entityName` (string): Target entity 78 | - `contents` (string[]): New observations to add 79 | - Returns added observations per entity 80 | - Fails if entity doesn't exist 81 | 82 | - **delete_entities** 83 | - Remove entities and their relations 84 | - Input: `entityNames` (string[]) 85 | - Cascading deletion of associated relations 86 | - Silent operation if entity doesn't exist 87 | 88 | - **delete_observations** 89 | - Remove specific observations from entities 90 | - Input: `deletions` (array of objects) 91 | - Each object contains: 92 | - `entityName` (string): Target entity 93 | - `observations` (string[]): Observations to remove 94 | - Silent operation if observation doesn't exist 95 | 96 | - **delete_relations** 97 | - Remove specific relations from the graph 98 | - Input: `relations` (array of objects) 99 | - Each object contains: 100 | - `from` (string): Source entity name 101 | - `to` (string): Target entity name 102 | - `relationType` (string): Relationship type 103 | - Silent operation if relation doesn't exist 104 | 105 | - **read_graph** 106 | - Read the entire knowledge graph 107 | - No input required 108 | - Returns complete graph structure with all entities and relations 109 | 110 | - **search_nodes** 111 | - Search for nodes based on query 112 | - Input: `query` (string) 113 | - Searches across: 114 | - Entity names 115 | - Entity types 116 | - Observation content 117 | - Returns matching entities and their relations 118 | 119 | - **open_nodes** 120 | - Retrieve specific nodes by name 121 | - Input: `names` (string[]) 122 | - Returns: 123 | - Requested entities 124 | - Relations between requested entities 125 | - Silently skips non-existent nodes 126 | 127 | # Usage with Claude Desktop 128 | 129 | ### Setup 130 | Add this to your claude_desktop_config.json: 131 | ```json 132 | { 133 | "memory": { 134 | "command": "npx", 135 | "args": ["-y", "@modelcontextprotocol/server-memory"] 136 | } 137 | } 138 | ``` 139 | 140 | ### System Prompt 141 | 142 | The prompt for utilizing memory depends on the use case. Changing the prompt will help the model determine the frequency and types of memories created. 143 | 144 | Here is an example prompt for chat personalization. You could use this prompt in the "Custom Instructions" field of a [Claude.ai Project](https://www.anthropic.com/news/projects). 145 | 146 | ``` 147 | Follow these steps for each interaction: 148 | 149 | 1. User Identification: 150 | - You should assume that you are interacting with default_user 151 | - If you have not identified default_user, proactively try to do so. 152 | 153 | 2. Memory Retrieval: 154 | - Always begin your chat by saying only "Remembering..." and retrieve all relevant information from your knowledge graph 155 | - Always refer to your knowledge graph as your "memory" 156 | 157 | 3. Memory 158 | - While conversing with the user, be attentive to any new information that falls into these categories: 159 | a) Basic Identity (age, gender, location, job title, education level, etc.) 160 | b) Behaviors (interests, habits, etc.) 161 | c) Preferences (communication style, preferred language, etc.) 162 | d) Goals (goals, targets, aspirations, etc.) 163 | e) Relationships (personal and professional relationships up to 3 degrees of separation) 164 | 165 | 4. Memory Update: 166 | - If any new information was gathered during the interaction, update your memory as follows: 167 | a) Create entities for recurring organizations, people, and significant events 168 | b) Connect them to the current entities using relations 169 | b) Store facts about them as observations 170 | ``` 171 | 172 | ## License 173 | 174 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 175 | -------------------------------------------------------------------------------- /src/memory/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | } from "@modelcontextprotocol/sdk/types.js"; 9 | import { promises as fs } from 'fs'; 10 | import path from 'path'; 11 | import { fileURLToPath } from 'url'; 12 | 13 | 14 | // Define the path to the JSONL file, you can change this to your desired local path 15 | const __dirname = path.dirname(fileURLToPath(import.meta.url)); 16 | const MEMORY_FILE_PATH = path.join(__dirname, 'memory.json'); 17 | 18 | // We are storing our memory using entities, relations, and observations in a graph structure 19 | interface Entity { 20 | name: string; 21 | entityType: string; 22 | observations: string[]; 23 | } 24 | 25 | interface Relation { 26 | from: string; 27 | to: string; 28 | relationType: string; 29 | } 30 | 31 | interface KnowledgeGraph { 32 | entities: Entity[]; 33 | relations: Relation[]; 34 | } 35 | 36 | // The KnowledgeGraphManager class contains all operations to interact with the knowledge graph 37 | class KnowledgeGraphManager { 38 | private async loadGraph(): Promise { 39 | try { 40 | const data = await fs.readFile(MEMORY_FILE_PATH, "utf-8"); 41 | const lines = data.split("\n").filter(line => line.trim() !== ""); 42 | return lines.reduce((graph: KnowledgeGraph, line) => { 43 | const item = JSON.parse(line); 44 | if (item.type === "entity") graph.entities.push(item as Entity); 45 | if (item.type === "relation") graph.relations.push(item as Relation); 46 | return graph; 47 | }, { entities: [], relations: [] }); 48 | } catch (error) { 49 | if (error instanceof Error && 'code' in error && (error as any).code === "ENOENT") { 50 | return { entities: [], relations: [] }; 51 | } 52 | throw error; 53 | } 54 | } 55 | 56 | private async saveGraph(graph: KnowledgeGraph): Promise { 57 | const lines = [ 58 | ...graph.entities.map(e => JSON.stringify({ type: "entity", ...e })), 59 | ...graph.relations.map(r => JSON.stringify({ type: "relation", ...r })), 60 | ]; 61 | await fs.writeFile(MEMORY_FILE_PATH, lines.join("\n")); 62 | } 63 | 64 | async createEntities(entities: Entity[]): Promise { 65 | const graph = await this.loadGraph(); 66 | const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name)); 67 | graph.entities.push(...newEntities); 68 | await this.saveGraph(graph); 69 | return newEntities; 70 | } 71 | 72 | async createRelations(relations: Relation[]): Promise { 73 | const graph = await this.loadGraph(); 74 | const newRelations = relations.filter(r => !graph.relations.some(existingRelation => 75 | existingRelation.from === r.from && 76 | existingRelation.to === r.to && 77 | existingRelation.relationType === r.relationType 78 | )); 79 | graph.relations.push(...newRelations); 80 | await this.saveGraph(graph); 81 | return newRelations; 82 | } 83 | 84 | async addObservations(observations: { entityName: string; contents: string[] }[]): Promise<{ entityName: string; addedObservations: string[] }[]> { 85 | const graph = await this.loadGraph(); 86 | const results = observations.map(o => { 87 | const entity = graph.entities.find(e => e.name === o.entityName); 88 | if (!entity) { 89 | throw new Error(`Entity with name ${o.entityName} not found`); 90 | } 91 | const newObservations = o.contents.filter(content => !entity.observations.includes(content)); 92 | entity.observations.push(...newObservations); 93 | return { entityName: o.entityName, addedObservations: newObservations }; 94 | }); 95 | await this.saveGraph(graph); 96 | return results; 97 | } 98 | 99 | async deleteEntities(entityNames: string[]): Promise { 100 | const graph = await this.loadGraph(); 101 | graph.entities = graph.entities.filter(e => !entityNames.includes(e.name)); 102 | graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to)); 103 | await this.saveGraph(graph); 104 | } 105 | 106 | async deleteObservations(deletions: { entityName: string; observations: string[] }[]): Promise { 107 | const graph = await this.loadGraph(); 108 | deletions.forEach(d => { 109 | const entity = graph.entities.find(e => e.name === d.entityName); 110 | if (entity) { 111 | entity.observations = entity.observations.filter(o => !d.observations.includes(o)); 112 | } 113 | }); 114 | await this.saveGraph(graph); 115 | } 116 | 117 | async deleteRelations(relations: Relation[]): Promise { 118 | const graph = await this.loadGraph(); 119 | graph.relations = graph.relations.filter(r => !relations.some(delRelation => 120 | r.from === delRelation.from && 121 | r.to === delRelation.to && 122 | r.relationType === delRelation.relationType 123 | )); 124 | await this.saveGraph(graph); 125 | } 126 | 127 | async readGraph(): Promise { 128 | return this.loadGraph(); 129 | } 130 | 131 | // Very basic search function 132 | async searchNodes(query: string): Promise { 133 | const graph = await this.loadGraph(); 134 | 135 | // Filter entities 136 | const filteredEntities = graph.entities.filter(e => 137 | e.name.toLowerCase().includes(query.toLowerCase()) || 138 | e.entityType.toLowerCase().includes(query.toLowerCase()) || 139 | e.observations.some(o => o.toLowerCase().includes(query.toLowerCase())) 140 | ); 141 | 142 | // Create a Set of filtered entity names for quick lookup 143 | const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); 144 | 145 | // Filter relations to only include those between filtered entities 146 | const filteredRelations = graph.relations.filter(r => 147 | filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) 148 | ); 149 | 150 | const filteredGraph: KnowledgeGraph = { 151 | entities: filteredEntities, 152 | relations: filteredRelations, 153 | }; 154 | 155 | return filteredGraph; 156 | } 157 | 158 | async openNodes(names: string[]): Promise { 159 | const graph = await this.loadGraph(); 160 | 161 | // Filter entities 162 | const filteredEntities = graph.entities.filter(e => names.includes(e.name)); 163 | 164 | // Create a Set of filtered entity names for quick lookup 165 | const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); 166 | 167 | // Filter relations to only include those between filtered entities 168 | const filteredRelations = graph.relations.filter(r => 169 | filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) 170 | ); 171 | 172 | const filteredGraph: KnowledgeGraph = { 173 | entities: filteredEntities, 174 | relations: filteredRelations, 175 | }; 176 | 177 | return filteredGraph; 178 | } 179 | } 180 | 181 | const knowledgeGraphManager = new KnowledgeGraphManager(); 182 | 183 | 184 | // The server instance and tools exposed to Claude 185 | const server = new Server({ 186 | name: "memory-server", 187 | version: "1.0.0", 188 | }, { 189 | capabilities: { 190 | tools: {}, 191 | }, 192 | },); 193 | 194 | server.setRequestHandler(ListToolsRequestSchema, async () => { 195 | return { 196 | tools: [ 197 | { 198 | name: "create_entities", 199 | description: "Create multiple new entities in the knowledge graph", 200 | inputSchema: { 201 | type: "object", 202 | properties: { 203 | entities: { 204 | type: "array", 205 | items: { 206 | type: "object", 207 | properties: { 208 | name: { type: "string", description: "The name of the entity" }, 209 | entityType: { type: "string", description: "The type of the entity" }, 210 | observations: { 211 | type: "array", 212 | items: { type: "string" }, 213 | description: "An array of observation contents associated with the entity" 214 | }, 215 | }, 216 | required: ["name", "entityType", "observations"], 217 | }, 218 | }, 219 | }, 220 | required: ["entities"], 221 | }, 222 | }, 223 | { 224 | name: "create_relations", 225 | description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", 226 | inputSchema: { 227 | type: "object", 228 | properties: { 229 | relations: { 230 | type: "array", 231 | items: { 232 | type: "object", 233 | properties: { 234 | from: { type: "string", description: "The name of the entity where the relation starts" }, 235 | to: { type: "string", description: "The name of the entity where the relation ends" }, 236 | relationType: { type: "string", description: "The type of the relation" }, 237 | }, 238 | required: ["from", "to", "relationType"], 239 | }, 240 | }, 241 | }, 242 | required: ["relations"], 243 | }, 244 | }, 245 | { 246 | name: "add_observations", 247 | description: "Add new observations to existing entities in the knowledge graph", 248 | inputSchema: { 249 | type: "object", 250 | properties: { 251 | observations: { 252 | type: "array", 253 | items: { 254 | type: "object", 255 | properties: { 256 | entityName: { type: "string", description: "The name of the entity to add the observations to" }, 257 | contents: { 258 | type: "array", 259 | items: { type: "string" }, 260 | description: "An array of observation contents to add" 261 | }, 262 | }, 263 | required: ["entityName", "contents"], 264 | }, 265 | }, 266 | }, 267 | required: ["observations"], 268 | }, 269 | }, 270 | { 271 | name: "delete_entities", 272 | description: "Delete multiple entities and their associated relations from the knowledge graph", 273 | inputSchema: { 274 | type: "object", 275 | properties: { 276 | entityNames: { 277 | type: "array", 278 | items: { type: "string" }, 279 | description: "An array of entity names to delete" 280 | }, 281 | }, 282 | required: ["entityNames"], 283 | }, 284 | }, 285 | { 286 | name: "delete_observations", 287 | description: "Delete specific observations from entities in the knowledge graph", 288 | inputSchema: { 289 | type: "object", 290 | properties: { 291 | deletions: { 292 | type: "array", 293 | items: { 294 | type: "object", 295 | properties: { 296 | entityName: { type: "string", description: "The name of the entity containing the observations" }, 297 | observations: { 298 | type: "array", 299 | items: { type: "string" }, 300 | description: "An array of observations to delete" 301 | }, 302 | }, 303 | required: ["entityName", "observations"], 304 | }, 305 | }, 306 | }, 307 | required: ["deletions"], 308 | }, 309 | }, 310 | { 311 | name: "delete_relations", 312 | description: "Delete multiple relations from the knowledge graph", 313 | inputSchema: { 314 | type: "object", 315 | properties: { 316 | relations: { 317 | type: "array", 318 | items: { 319 | type: "object", 320 | properties: { 321 | from: { type: "string", description: "The name of the entity where the relation starts" }, 322 | to: { type: "string", description: "The name of the entity where the relation ends" }, 323 | relationType: { type: "string", description: "The type of the relation" }, 324 | }, 325 | required: ["from", "to", "relationType"], 326 | }, 327 | description: "An array of relations to delete" 328 | }, 329 | }, 330 | required: ["relations"], 331 | }, 332 | }, 333 | { 334 | name: "read_graph", 335 | description: "Read the entire knowledge graph", 336 | inputSchema: { 337 | type: "object", 338 | properties: {}, 339 | }, 340 | }, 341 | { 342 | name: "search_nodes", 343 | description: "Search for nodes in the knowledge graph based on a query", 344 | inputSchema: { 345 | type: "object", 346 | properties: { 347 | query: { type: "string", description: "The search query to match against entity names, types, and observation content" }, 348 | }, 349 | required: ["query"], 350 | }, 351 | }, 352 | { 353 | name: "open_nodes", 354 | description: "Open specific nodes in the knowledge graph by their names", 355 | inputSchema: { 356 | type: "object", 357 | properties: { 358 | names: { 359 | type: "array", 360 | items: { type: "string" }, 361 | description: "An array of entity names to retrieve", 362 | }, 363 | }, 364 | required: ["names"], 365 | }, 366 | }, 367 | ], 368 | }; 369 | }); 370 | 371 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 372 | const { name, arguments: args } = request.params; 373 | 374 | if (!args) { 375 | throw new Error(`No arguments provided for tool: ${name}`); 376 | } 377 | 378 | switch (name) { 379 | case "create_entities": 380 | return { toolResult: await knowledgeGraphManager.createEntities(args.entities as Entity[]) }; 381 | case "create_relations": 382 | return { toolResult: await knowledgeGraphManager.createRelations(args.relations as Relation[]) }; 383 | case "add_observations": 384 | return { toolResult: await knowledgeGraphManager.addObservations(args.observations as { entityName: string; contents: string[] }[]) }; 385 | case "delete_entities": 386 | await knowledgeGraphManager.deleteEntities(args.entityNames as string[]); 387 | return { toolResult: "Entities deleted successfully" }; 388 | case "delete_observations": 389 | await knowledgeGraphManager.deleteObservations(args.deletions as { entityName: string; observations: string[] }[]); 390 | return { toolResult: "Observations deleted successfully" }; 391 | case "delete_relations": 392 | await knowledgeGraphManager.deleteRelations(args.relations as Relation[]); 393 | return { toolResult: "Relations deleted successfully" }; 394 | case "read_graph": 395 | return { toolResult: await knowledgeGraphManager.readGraph() }; 396 | case "search_nodes": 397 | return { toolResult: await knowledgeGraphManager.searchNodes(args.query as string) }; 398 | case "open_nodes": 399 | return { toolResult: await knowledgeGraphManager.openNodes(args.names as string[]) }; 400 | default: 401 | throw new Error(`Unknown tool: ${name}`); 402 | } 403 | }); 404 | 405 | async function main() { 406 | const transport = new StdioServerTransport(); 407 | await server.connect(transport); 408 | console.error("Knowledge Graph MCP Server running on stdio"); 409 | } 410 | 411 | main().catch((error) => { 412 | console.error("Fatal error in main():", error); 413 | process.exit(1); 414 | }); -------------------------------------------------------------------------------- /src/memory/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-memory", 3 | "version": "0.5.1", 4 | "description": "MCP server for enabling memory for Claude through a knowledge graph", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-memory": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0" 23 | }, 24 | "devDependencies": { 25 | "@types/node": "^22.9.3", 26 | "shx": "^0.3.4", 27 | "typescript": "^5.6.2" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/memory/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/postgres/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL 2 | 3 | A Model Context Protocol server that provides read-only access to PostgreSQL databases. This server enables LLMs to inspect database schemas and execute read-only queries. 4 | 5 | ## Components 6 | 7 | ### Tools 8 | 9 | - **query** 10 | - Execute read-only SQL queries against the connected database 11 | - Input: `sql` (string): The SQL query to execute 12 | - All queries are executed within a READ ONLY transaction 13 | 14 | ### Resources 15 | 16 | The server provides schema information for each table in the database: 17 | 18 | - **Table Schemas** (`postgres:////schema`) 19 | - JSON schema information for each table 20 | - Includes column names and data types 21 | - Automatically discovered from database metadata 22 | 23 | ## Usage with Claude Desktop 24 | 25 | To use this server with the Claude Desktop app, add the following configuration to the "mcpServers" section of your `claude_desktop_config.json`: 26 | 27 | ```json 28 | { 29 | "postgres": { 30 | "command": "npx", 31 | "args": ["-y", "@modelcontextprotocol/server-postgres", "postgresql://localhost/mydb"] 32 | } 33 | } 34 | ``` 35 | 36 | Replace `/mydb` with your database name. 37 | 38 | ## License 39 | 40 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 41 | -------------------------------------------------------------------------------- /src/postgres/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 5 | import { 6 | CallToolRequestSchema, 7 | ListResourcesRequestSchema, 8 | ListToolsRequestSchema, 9 | ReadResourceRequestSchema, 10 | } from "@modelcontextprotocol/sdk/types.js"; 11 | import pg from "pg"; 12 | 13 | const server = new Server( 14 | { 15 | name: "example-servers/postgres", 16 | version: "0.1.0", 17 | }, 18 | { 19 | capabilities: { 20 | resources: {}, 21 | tools: {}, 22 | }, 23 | }, 24 | ); 25 | 26 | const args = process.argv.slice(2); 27 | if (args.length === 0) { 28 | console.error("Please provide a database URL as a command-line argument"); 29 | process.exit(1); 30 | } 31 | 32 | const databaseUrl = args[0]; 33 | 34 | const resourceBaseUrl = new URL(databaseUrl); 35 | resourceBaseUrl.protocol = "postgres:"; 36 | resourceBaseUrl.password = ""; 37 | 38 | const pool = new pg.Pool({ 39 | connectionString: databaseUrl, 40 | }); 41 | 42 | const SCHEMA_PATH = "schema"; 43 | 44 | server.setRequestHandler(ListResourcesRequestSchema, async () => { 45 | const client = await pool.connect(); 46 | try { 47 | const result = await client.query( 48 | "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'", 49 | ); 50 | return { 51 | resources: result.rows.map((row) => ({ 52 | uri: new URL(`${row.table_name}/${SCHEMA_PATH}`, resourceBaseUrl).href, 53 | mimeType: "application/json", 54 | name: `"${row.table_name}" database schema`, 55 | })), 56 | }; 57 | } finally { 58 | client.release(); 59 | } 60 | }); 61 | 62 | server.setRequestHandler(ReadResourceRequestSchema, async (request) => { 63 | const resourceUrl = new URL(request.params.uri); 64 | 65 | const pathComponents = resourceUrl.pathname.split("/"); 66 | const schema = pathComponents.pop(); 67 | const tableName = pathComponents.pop(); 68 | 69 | if (schema !== SCHEMA_PATH) { 70 | throw new Error("Invalid resource URI"); 71 | } 72 | 73 | const client = await pool.connect(); 74 | try { 75 | const result = await client.query( 76 | "SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1", 77 | [tableName], 78 | ); 79 | 80 | return { 81 | contents: [ 82 | { 83 | uri: request.params.uri, 84 | mimeType: "application/json", 85 | text: JSON.stringify(result.rows, null, 2), 86 | }, 87 | ], 88 | }; 89 | } finally { 90 | client.release(); 91 | } 92 | }); 93 | 94 | server.setRequestHandler(ListToolsRequestSchema, async () => { 95 | return { 96 | tools: [ 97 | { 98 | name: "query", 99 | description: "Run a read-only SQL query", 100 | inputSchema: { 101 | type: "object", 102 | properties: { 103 | sql: { type: "string" }, 104 | }, 105 | }, 106 | }, 107 | ], 108 | }; 109 | }); 110 | 111 | server.setRequestHandler(CallToolRequestSchema, async (request) => { 112 | if (request.params.name === "query") { 113 | const sql = request.params.arguments?.sql as string; 114 | 115 | const client = await pool.connect(); 116 | try { 117 | await client.query("BEGIN TRANSACTION READ ONLY"); 118 | const result = await client.query(sql); 119 | return { 120 | content: [{ type: "text", text: JSON.stringify(result.rows, null, 2) }], 121 | isError: false, 122 | }; 123 | } catch (error) { 124 | throw error; 125 | } finally { 126 | client 127 | .query("ROLLBACK") 128 | .catch((error) => 129 | console.warn("Could not roll back transaction:", error), 130 | ); 131 | 132 | client.release(); 133 | } 134 | } 135 | throw new Error(`Unknown tool: ${request.params.name}`); 136 | }); 137 | 138 | async function runServer() { 139 | const transport = new StdioServerTransport(); 140 | await server.connect(transport); 141 | } 142 | 143 | runServer().catch(console.error); 144 | -------------------------------------------------------------------------------- /src/postgres/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-postgres", 3 | "version": "0.5.1", 4 | "description": "MCP server for interacting with PostgreSQL databases", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-postgres": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.6.0", 23 | "pg": "^8.13.0" 24 | }, 25 | "devDependencies": { 26 | "@types/pg": "^8.11.10", 27 | "shx": "^0.3.4", 28 | "typescript": "^5.6.2" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/postgres/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/puppeteer/.env.example: -------------------------------------------------------------------------------- 1 | # Browserbase 2 | BROWSERBASE_API_KEY= 3 | BROWSERBASE_PROJECT_ID= 4 | 5 | # Notion 6 | NOTION_API_KEY= 7 | NOTION_PAGE_URL= 8 | NOTION_DATABASE_ID= -------------------------------------------------------------------------------- /src/puppeteer/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | dist/ 3 | node_modules/ 4 | -------------------------------------------------------------------------------- /src/puppeteer/README.md: -------------------------------------------------------------------------------- 1 | # Puppeteer (Browserbase Version) 🅱️ 2 | 3 | A Model Context Protocol server that provides browser automation capabilities using Puppeteer and Browserbase. This server enables LLMs to interact with web pages, take screenshots, and execute JavaScript in a real browser environment on the cloud. 4 | 5 | This comes at scale, and is much cheaper than [Browserbase](https://www.browserbase.com). You can now run 100s of browser sessions on a single machine. 6 | 7 | ## Setup 8 | 9 | Be sure to setup your environment variables in the `.env` file. 10 | 11 | In addition, you should set up your `.claude_desktop_config.json` file to use this server. You can access it through the Claude Desktop app or run `code ~/Library/Application\ Support/Claude/claude_desktop_config.json` in your terminal. 12 | 13 | Your `.claude_desktop_config.json` should look something like this: 14 | 15 | ```json 16 | { 17 | "mcpServers": { 18 | "browserbase": { 19 | "command": "npx", 20 | "args": ["-y", "@modelcontextprotocol/server-puppeteer"], 21 | "env": { 22 | "BROWSERBASE_API_KEY": "YOUR_BROWSERBASE_API_KEY", 23 | "BROWSERBASE_PROJECT_ID": "YOUR_BROWSERBASE_PROJECT_ID", 24 | "NOTION_API_KEY": "YOUR_NOTION_API_KEY", 25 | "NOTION_PAGE_URL": "YOUR_NOTION_PAGE_URL", 26 | "NOTION_DATABASE_ID": "YOUR_NOTION_DATABASE_ID" 27 | } 28 | } 29 | } 30 | } 31 | ``` 32 | 33 | Run the script below to build and link the server to your local `node_modules` folder: 34 | 35 | ```bash 36 | # Check TypeScript version 37 | npx tsc --version 38 | 39 | # Clean and rebuild 40 | rm -rf build/ 41 | npm run build 42 | 43 | # Link the server to your local node_modules folder 44 | npm link 45 | ``` 46 | 47 | Finally, you should restart the Claude Desktop app. You'll be able to see the server in the list of MCP Tools available. 48 | 49 | ## Components 50 | 51 | ### Tools 52 | 53 | - **puppeteer_create_session** 54 | - Create a new cloud browser session using Browserbase 55 | - Input: None required 56 | 57 | - **puppeteer_navigate** 58 | - Navigate to any URL in the browser 59 | - Input: `url` (string) 60 | 61 | - **puppeteer_screenshot** 62 | - Capture screenshots of the entire page or specific elements 63 | - Inputs: 64 | - `name` (string, required): Name for the screenshot 65 | - `selector` (string, optional): CSS selector for element to screenshot 66 | - `width` (number, optional, default: 800): Screenshot width 67 | - `height` (number, optional, default: 600): Screenshot height 68 | 69 | - **puppeteer_click** 70 | - Click elements on the page 71 | - Input: `selector` (string): CSS selector for element to click 72 | 73 | - **puppeteer_fill** 74 | - Fill out input fields 75 | - Inputs: 76 | - `selector` (string): CSS selector for input field 77 | - `value` (string): Value to fill 78 | 79 | - **puppeteer_evaluate** 80 | - Execute JavaScript in the browser console 81 | - Input: `script` (string): JavaScript code to execute 82 | 83 | - **puppeteer_get_content** 84 | - Extract all content from the current page 85 | - Input: `selector` (string, optional): CSS selector to get content from specific elements 86 | 87 | - **puppeteer_parallel_sessions** 88 | - Create multiple browser sessions and navigate to different URLs 89 | - Input: `sessions` (array): Array of objects containing: 90 | - `url` (string): URL to navigate to 91 | - `id` (string): Session identifier 92 | 93 | - **notion_read_page** 94 | - Read content from a Notion page 95 | - Input: `pageUrl` (string, optional): URL of the page to read 96 | 97 | - **notion_update_page** 98 | - Update content in a Notion page 99 | - Inputs: 100 | - `pageId` (string): ID of the page to update 101 | - `content` (string): Content to update 102 | 103 | - **notion_append_content** 104 | - Append content to a Notion page 105 | - Inputs: 106 | - `pageId` (string): ID of the page to append to 107 | - `content` (string): Content to append 108 | 109 | - **notion_read_comments** 110 | - Read comments from a Notion page 111 | - Input: `pageId` (string): ID of the page to read comments from 112 | 113 | - **notion_add_comment** 114 | - Add a comment to a Notion page 115 | - Inputs: 116 | - `pageId` (string): ID of the page to comment on 117 | - `comment` (string): Comment text 118 | 119 | - **notion_add_to_database** 120 | - Add a new entry to a Notion database 121 | - Inputs: 122 | - `databaseId` (string, optional): ID of the database 123 | - `title` (string, required): Title of the entry 124 | - `tags` (array, optional): Array of tags to add to the entry 125 | - `properties` (object, optional): Additional properties for the database entry 126 | - `content` (string, optional): Content for the page 127 | 128 | ## Key Features 129 | 130 | - Cloud platform 131 | - Scalable infrastructure 132 | - Browser automation 133 | - Console log monitoring 134 | - Screenshot capabilities 135 | - JavaScript execution 136 | - Basic web interaction (navigation, clicking, form filling) 137 | 138 | ## License 139 | 140 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 141 | -------------------------------------------------------------------------------- /src/puppeteer/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-puppeteer", 3 | "version": "0.5.1", 4 | "description": "MCP server for browser automation using Puppeteer", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-puppeteer": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.5.0", 23 | "puppeteer": "^23.4.0" 24 | }, 25 | "devDependencies": { 26 | "shx": "^0.3.4", 27 | "typescript": "^5.6.2" 28 | } 29 | } -------------------------------------------------------------------------------- /src/puppeteer/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/sentry/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /src/sentry/README.md: -------------------------------------------------------------------------------- 1 | # mcp-server-sentry: A Sentry MCP server 2 | 3 | ## Overview 4 | 5 | A Model Context Protocol server for retrieving and analyzing issues from Sentry.io. This server provides tools to inspect error reports, stacktraces, and other debugging information from your Sentry account. 6 | 7 | ### Tools 8 | 9 | 1. `get-sentry-issue` 10 | - Retrieve and analyze a Sentry issue by ID or URL 11 | - Input: 12 | - `issue_id_or_url` (string): Sentry issue ID or URL to analyze 13 | - Returns: Issue details including: 14 | - Title 15 | - Issue ID 16 | - Status 17 | - Level 18 | - First seen timestamp 19 | - Last seen timestamp 20 | - Event count 21 | - Full stacktrace 22 | 23 | ### Prompts 24 | 25 | 1. `sentry-issue` 26 | - Retrieve issue details from Sentry 27 | - Input: 28 | - `issue_id_or_url` (string): Sentry issue ID or URL 29 | - Returns: Formatted issue details as conversation context 30 | 31 | ## Installation 32 | 33 | ### Using uv (recommended) 34 | 35 | When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will 36 | use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-sentry*. 37 | 38 | ### Using PIP 39 | 40 | Alternatively you can install `mcp-server-sentry` via pip: 41 | 42 | ``` 43 | pip install mcp-server-sentry 44 | ``` 45 | 46 | After installation, you can run it as a script using: 47 | 48 | ``` 49 | python -m mcp_server_sentry 50 | ``` 51 | 52 | ## Configuration 53 | 54 | ### Usage with Claude Desktop 55 | 56 | Add this to your `claude_desktop_config.json`: 57 | 58 |
59 | Using uvx 60 | 61 | ```json 62 | "mcpServers": { 63 | "sentry": { 64 | "command": "uvx", 65 | "args": ["mcp-server-sentry", "--auth-token", "YOUR_SENTRY_TOKEN"] 66 | } 67 | } 68 | ``` 69 |
70 | 71 |
72 | Using pip installation 73 | 74 | ```json 75 | "mcpServers": { 76 | "sentry": { 77 | "command": "python", 78 | "args": ["-m", "mcp_server_sentry", "--auth-token", "YOUR_SENTRY_TOKEN"] 79 | } 80 | } 81 | ``` 82 |
83 | 84 | ### Usage with [Zed](https://github.com/zed-industries/zed) 85 | 86 | Add to your Zed settings.json: 87 | 88 |
89 | Using uvx 90 | 91 | ```json 92 | "context_servers": [ 93 | "mcp-server-sentry": { 94 | "command": "uvx", 95 | "args": ["mcp-server-sentry", "--auth-token", "YOUR_SENTRY_TOKEN"] 96 | } 97 | ], 98 | ``` 99 |
100 | 101 |
102 | Using pip installation 103 | 104 | ```json 105 | "context_servers": { 106 | "mcp-server-sentry": { 107 | "command": "python", 108 | "args": ["-m", "mcp_server_sentry", "--auth-token", "YOUR_SENTRY_TOKEN"] 109 | } 110 | }, 111 | ``` 112 |
113 | 114 | ## Debugging 115 | 116 | You can use the MCP inspector to debug the server. For uvx installations: 117 | 118 | ``` 119 | npx @modelcontextprotocol/inspector uvx mcp-server-sentry --auth-token YOUR_SENTRY_TOKEN 120 | ``` 121 | 122 | Or if you've installed the package in a specific directory or are developing on it: 123 | 124 | ``` 125 | cd path/to/servers/src/sentry 126 | npx @modelcontextprotocol/inspector uv run mcp-server-sentry --auth-token YOUR_SENTRY_TOKEN 127 | ``` 128 | 129 | ## License 130 | 131 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 132 | -------------------------------------------------------------------------------- /src/sentry/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-sentry" 3 | version = "0.5.1" 4 | description = "MCP server for retrieving issues from sentry.io" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = ["mcp>=0.9.1"] 8 | 9 | [build-system] 10 | requires = ["hatchling"] 11 | build-backend = "hatchling.build" 12 | 13 | [tool.uv] 14 | dev-dependencies = ["pyright>=1.1.389", "pytest>=8.3.3", "ruff>=0.8.0"] 15 | 16 | [project.scripts] 17 | mcp-server-sentry = "mcp_server_sentry:main" 18 | -------------------------------------------------------------------------------- /src/sentry/src/mcp_server_sentry/__init__.py: -------------------------------------------------------------------------------- 1 | from . import server 2 | import asyncio 3 | 4 | 5 | def main(): 6 | """Main entry point for the package.""" 7 | asyncio.run(server.main()) 8 | 9 | 10 | # Optionally expose other important items at package level 11 | __all__ = ["main", "server"] 12 | -------------------------------------------------------------------------------- /src/sentry/src/mcp_server_sentry/server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from dataclasses import dataclass 3 | from typing import Any 4 | from urllib.parse import urlparse 5 | 6 | import click 7 | import httpx 8 | import mcp.types as types 9 | from mcp.server import NotificationOptions, Server 10 | from mcp.server.models import InitializationOptions 11 | from mcp.shared.exceptions import McpError 12 | import mcp.server.stdio 13 | 14 | SENTRY_API_BASE = "https://sentry.io/api/0/" 15 | MISSING_AUTH_TOKEN_MESSAGE = ( 16 | """Sentry authentication token not found. Please specify your Sentry auth token.""" 17 | ) 18 | 19 | 20 | @dataclass 21 | class SentryIssueData: 22 | title: str 23 | issue_id: str 24 | status: str 25 | level: str 26 | first_seen: str 27 | last_seen: str 28 | count: int 29 | stacktrace: str 30 | 31 | def to_text(self) -> str: 32 | return f""" 33 | Sentry Issue: {self.title} 34 | Issue ID: {self.issue_id} 35 | Status: {self.status} 36 | Level: {self.level} 37 | First Seen: {self.first_seen} 38 | Last Seen: {self.last_seen} 39 | Event Count: {self.count} 40 | 41 | {self.stacktrace} 42 | """ 43 | 44 | def to_prompt_result(self) -> types.GetPromptResult: 45 | return types.GetPromptResult( 46 | description=f"Sentry Issue: {self.title}", 47 | messages=[ 48 | types.PromptMessage( 49 | role="user", content=types.TextContent(type="text", text=self.to_text()) 50 | ) 51 | ], 52 | ) 53 | 54 | def to_tool_result(self) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: 55 | return [types.TextContent(type="text", text=self.to_text())] 56 | 57 | 58 | class SentryError(Exception): 59 | pass 60 | 61 | 62 | def extract_issue_id(issue_id_or_url: str) -> str: 63 | """ 64 | Extracts the Sentry issue ID from either a full URL or a standalone ID. 65 | 66 | This function validates the input and returns the numeric issue ID. 67 | It raises SentryError for invalid inputs, including empty strings, 68 | non-Sentry URLs, malformed paths, and non-numeric IDs. 69 | """ 70 | if not issue_id_or_url: 71 | raise SentryError("Missing issue_id_or_url argument") 72 | 73 | if issue_id_or_url.startswith(("http://", "https://")): 74 | parsed_url = urlparse(issue_id_or_url) 75 | if not parsed_url.hostname or not parsed_url.hostname.endswith(".sentry.io"): 76 | raise SentryError("Invalid Sentry URL. Must be a URL ending with .sentry.io") 77 | 78 | path_parts = parsed_url.path.strip("/").split("/") 79 | if len(path_parts) < 2 or path_parts[0] != "issues": 80 | raise SentryError( 81 | "Invalid Sentry issue URL. Path must contain '/issues/{issue_id}'" 82 | ) 83 | 84 | issue_id = path_parts[-1] 85 | else: 86 | issue_id = issue_id_or_url 87 | 88 | if not issue_id.isdigit(): 89 | raise SentryError("Invalid Sentry issue ID. Must be a numeric value.") 90 | 91 | return issue_id 92 | 93 | 94 | def create_stacktrace(latest_event: dict) -> str: 95 | """ 96 | Creates a formatted stacktrace string from the latest Sentry event. 97 | 98 | This function extracts exception information and stacktrace details from the 99 | provided event dictionary, formatting them into a human-readable string. 100 | It handles multiple exceptions and includes file, line number, and function 101 | information for each frame in the stacktrace. 102 | 103 | Args: 104 | latest_event (dict): A dictionary containing the latest Sentry event data. 105 | 106 | Returns: 107 | str: A formatted string containing the stacktrace information, 108 | or "No stacktrace found" if no relevant data is present. 109 | """ 110 | stacktraces = [] 111 | for entry in latest_event.get("entries", []): 112 | if entry["type"] != "exception": 113 | continue 114 | 115 | exception_data = entry["data"]["values"] 116 | for exception in exception_data: 117 | exception_type = exception.get("type", "Unknown") 118 | exception_value = exception.get("value", "") 119 | stacktrace = exception.get("stacktrace") 120 | 121 | stacktrace_text = f"Exception: {exception_type}: {exception_value}\n\n" 122 | if stacktrace: 123 | stacktrace_text += "Stacktrace:\n" 124 | for frame in stacktrace.get("frames", []): 125 | filename = frame.get("filename", "Unknown") 126 | lineno = frame.get("lineNo", "?") 127 | function = frame.get("function", "Unknown") 128 | 129 | stacktrace_text += f"{filename}:{lineno} in {function}\n" 130 | 131 | if "context" in frame: 132 | context = frame["context"] 133 | for ctx_line in context: 134 | stacktrace_text += f" {ctx_line[1]}\n" 135 | 136 | stacktrace_text += "\n" 137 | 138 | stacktraces.append(stacktrace_text) 139 | 140 | return "\n".join(stacktraces) if stacktraces else "No stacktrace found" 141 | 142 | 143 | async def handle_sentry_issue( 144 | http_client: httpx.AsyncClient, auth_token: str, issue_id_or_url: str 145 | ) -> SentryIssueData: 146 | try: 147 | issue_id = extract_issue_id(issue_id_or_url) 148 | 149 | response = await http_client.get( 150 | f"issues/{issue_id}/", headers={"Authorization": f"Bearer {auth_token}"} 151 | ) 152 | if response.status_code == 401: 153 | raise McpError( 154 | "Error: Unauthorized. Please check your MCP_SENTRY_AUTH_TOKEN token." 155 | ) 156 | response.raise_for_status() 157 | issue_data = response.json() 158 | 159 | # Get issue hashes 160 | hashes_response = await http_client.get( 161 | f"issues/{issue_id}/hashes/", 162 | headers={"Authorization": f"Bearer {auth_token}"}, 163 | ) 164 | hashes_response.raise_for_status() 165 | hashes = hashes_response.json() 166 | 167 | if not hashes: 168 | raise McpError("No Sentry events found for this issue") 169 | 170 | latest_event = hashes[0]["latestEvent"] 171 | stacktrace = create_stacktrace(latest_event) 172 | 173 | return SentryIssueData( 174 | title=issue_data["title"], 175 | issue_id=issue_id, 176 | status=issue_data["status"], 177 | level=issue_data["level"], 178 | first_seen=issue_data["firstSeen"], 179 | last_seen=issue_data["lastSeen"], 180 | count=issue_data["count"], 181 | stacktrace=stacktrace 182 | ) 183 | 184 | except SentryError as e: 185 | raise McpError(str(e)) 186 | except httpx.HTTPStatusError as e: 187 | raise McpError(f"Error fetching Sentry issue: {str(e)}") 188 | except Exception as e: 189 | raise McpError(f"An error occurred: {str(e)}") 190 | 191 | 192 | async def serve(auth_token: str) -> Server: 193 | server = Server("sentry") 194 | http_client = httpx.AsyncClient(base_url=SENTRY_API_BASE) 195 | 196 | @server.list_prompts() 197 | async def handle_list_prompts() -> list[types.Prompt]: 198 | return [ 199 | types.Prompt( 200 | name="sentry-issue", 201 | description="Retrieve a Sentry issue by ID or URL", 202 | arguments=[ 203 | types.PromptArgument( 204 | name="issue_id_or_url", 205 | description="Sentry issue ID or URL", 206 | required=True, 207 | ) 208 | ], 209 | ) 210 | ] 211 | 212 | @server.get_prompt() 213 | async def handle_get_prompt( 214 | name: str, arguments: dict[str, str] | None 215 | ) -> types.GetPromptResult: 216 | if name != "sentry-issue": 217 | raise ValueError(f"Unknown prompt: {name}") 218 | 219 | issue_id_or_url = (arguments or {}).get("issue_id_or_url", "") 220 | issue_data = await handle_sentry_issue(http_client, auth_token, issue_id_or_url) 221 | return issue_data.to_prompt_result() 222 | 223 | @server.list_tools() 224 | async def handle_list_tools() -> list[types.Tool]: 225 | return [ 226 | types.Tool( 227 | name="get-sentry-issue", 228 | description="""Retrieve and analyze a Sentry issue by ID or URL. Use this tool when you need to: 229 | - Investigate production errors and crashes 230 | - Access detailed stacktraces from Sentry 231 | - Analyze error patterns and frequencies 232 | - Get information about when issues first/last occurred 233 | - Review error counts and status""", 234 | inputSchema={ 235 | "type": "object", 236 | "properties": { 237 | "issue_id_or_url": { 238 | "type": "string", 239 | "description": "Sentry issue ID or URL to analyze" 240 | } 241 | }, 242 | "required": ["issue_id_or_url"] 243 | } 244 | ) 245 | ] 246 | 247 | @server.call_tool() 248 | async def handle_call_tool( 249 | name: str, arguments: dict | None 250 | ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: 251 | if name != "get-sentry-issue": 252 | raise ValueError(f"Unknown tool: {name}") 253 | 254 | if not arguments or "issue_id_or_url" not in arguments: 255 | raise ValueError("Missing issue_id_or_url argument") 256 | 257 | issue_data = await handle_sentry_issue(http_client, auth_token, arguments["issue_id_or_url"]) 258 | return issue_data.to_tool_result() 259 | 260 | return server 261 | 262 | @click.command() 263 | @click.option( 264 | "--auth-token", 265 | envvar="SENTRY_TOKEN", 266 | required=True, 267 | help="Sentry authentication token", 268 | ) 269 | def main(auth_token: str): 270 | async def _run(): 271 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): 272 | server = await serve(auth_token) 273 | await server.run( 274 | read_stream, 275 | write_stream, 276 | InitializationOptions( 277 | server_name="sentry", 278 | server_version="0.4.1", 279 | capabilities=server.get_capabilities( 280 | notification_options=NotificationOptions(), 281 | experimental_capabilities={}, 282 | ), 283 | ), 284 | ) 285 | 286 | asyncio.run(_run()) 287 | -------------------------------------------------------------------------------- /src/slack/README.md: -------------------------------------------------------------------------------- 1 | # Slack MCP Server 2 | 3 | MCP Server for the Slack API, enabling Claude to interact with Slack workspaces. 4 | 5 | ## Tools 6 | 7 | 1. `slack_list_channels` 8 | - List public channels in the workspace 9 | - Optional inputs: 10 | - `limit` (number, default: 100, max: 200): Maximum number of channels to return 11 | - `cursor` (string): Pagination cursor for next page 12 | - Returns: List of channels with their IDs and information 13 | 14 | 2. `slack_post_message` 15 | - Post a new message to a Slack channel 16 | - Required inputs: 17 | - `channel_id` (string): The ID of the channel to post to 18 | - `text` (string): The message text to post 19 | - Returns: Message posting confirmation and timestamp 20 | 21 | 3. `slack_reply_to_thread` 22 | - Reply to a specific message thread 23 | - Required inputs: 24 | - `channel_id` (string): The channel containing the thread 25 | - `thread_ts` (string): Timestamp of the parent message 26 | - `text` (string): The reply text 27 | - Returns: Reply confirmation and timestamp 28 | 29 | 4. `slack_add_reaction` 30 | - Add an emoji reaction to a message 31 | - Required inputs: 32 | - `channel_id` (string): The channel containing the message 33 | - `timestamp` (string): Message timestamp to react to 34 | - `reaction` (string): Emoji name without colons 35 | - Returns: Reaction confirmation 36 | 37 | 5. `slack_get_channel_history` 38 | - Get recent messages from a channel 39 | - Required inputs: 40 | - `channel_id` (string): The channel ID 41 | - Optional inputs: 42 | - `limit` (number, default: 10): Number of messages to retrieve 43 | - Returns: List of messages with their content and metadata 44 | 45 | 6. `slack_get_thread_replies` 46 | - Get all replies in a message thread 47 | - Required inputs: 48 | - `channel_id` (string): The channel containing the thread 49 | - `thread_ts` (string): Timestamp of the parent message 50 | - Returns: List of replies with their content and metadata 51 | 52 | 7. `slack_search_messages` 53 | - Search for messages across channels 54 | - Required inputs: 55 | - `query` (string): The search query 56 | - Optional inputs: 57 | - `count` (number, default: 5): Number of results to return 58 | - Returns: Matching messages with their context 59 | 60 | 8. `slack_get_users` 61 | - Get list of workspace users with basic profile information 62 | - Optional inputs: 63 | - `cursor` (string): Pagination cursor for next page 64 | - `limit` (number, default: 100, max: 200): Maximum users to return 65 | - Returns: List of users with their basic profiles 66 | 67 | 9. `slack_get_user_profile` 68 | - Get detailed profile information for a specific user 69 | - Required inputs: 70 | - `user_id` (string): The user's ID 71 | - Returns: Detailed user profile information 72 | 73 | ## Setup 74 | 75 | 1. Create a Slack App: 76 | - Visit the [Slack Apps page](https://api.slack.com/apps) 77 | - Click "Create New App" 78 | - Choose "From scratch" 79 | - Name your app and select your workspace 80 | 81 | 2. Configure Bot Token Scopes: 82 | Navigate to "OAuth & Permissions" and add these scopes: 83 | - `channels:history` - View messages and other content in public channels 84 | - `channels:read` - View basic channel information 85 | - `chat:write` - Send messages as the app 86 | - `reactions:write` - Add emoji reactions to messages 87 | - `users:read` - View users and their basic information 88 | 89 | 4. Install App to Workspace: 90 | - Click "Install to Workspace" and authorize the app 91 | - Save the "Bot User OAuth Token" that starts with `xoxb-` 92 | 93 | 5. Get your Team ID (starts with a `T`) by following [this guidance](https://slack.com/help/articles/221769328-Locate-your-Slack-URL-or-ID#find-your-workspace-or-org-id) 94 | 95 | ### Usage with Claude Desktop 96 | 97 | Add the following to your `claude_desktop_config.json`: 98 | 99 | ```json 100 | { 101 | "slack": { 102 | "command": "npx", 103 | "args": ["-y", "@modelcontextprotocol/server-slack"], 104 | "env": { 105 | "SLACK_BOT_TOKEN": "xoxb-your-bot-token", 106 | "SLACK_TEAM_ID": "T01234567" 107 | } 108 | } 109 | } 110 | ``` 111 | 112 | ### Troubleshooting 113 | 114 | If you encounter permission errors, verify that: 115 | 1. All required scopes are added to your Slack app 116 | 2. The app is properly installed to your workspace 117 | 3. The tokens and workspace ID are correctly copied to your configuration 118 | 4. The app has been added to the channels it needs to access 119 | 120 | ## License 121 | 122 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 123 | -------------------------------------------------------------------------------- /src/slack/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { Server } from "@modelcontextprotocol/sdk/server/index.js"; 3 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; 4 | import { 5 | CallToolRequest, 6 | CallToolRequestSchema, 7 | ListToolsRequestSchema, 8 | Tool, 9 | } from "@modelcontextprotocol/sdk/types.js"; 10 | 11 | // Type definitions for tool arguments 12 | interface ListChannelsArgs { 13 | limit?: number; 14 | cursor?: string; 15 | } 16 | 17 | interface PostMessageArgs { 18 | channel_id: string; 19 | text: string; 20 | } 21 | 22 | interface ReplyToThreadArgs { 23 | channel_id: string; 24 | thread_ts: string; 25 | text: string; 26 | } 27 | 28 | interface AddReactionArgs { 29 | channel_id: string; 30 | timestamp: string; 31 | reaction: string; 32 | } 33 | 34 | interface GetChannelHistoryArgs { 35 | channel_id: string; 36 | limit?: number; 37 | } 38 | 39 | interface GetThreadRepliesArgs { 40 | channel_id: string; 41 | thread_ts: string; 42 | } 43 | 44 | interface GetUsersArgs { 45 | cursor?: string; 46 | limit?: number; 47 | } 48 | 49 | interface GetUserProfileArgs { 50 | user_id: string; 51 | } 52 | 53 | // Tool definitions 54 | const listChannelsTool: Tool = { 55 | name: "slack_list_channels", 56 | description: "List public channels in the workspace with pagination", 57 | inputSchema: { 58 | type: "object", 59 | properties: { 60 | limit: { 61 | type: "number", 62 | description: 63 | "Maximum number of channels to return (default 100, max 200)", 64 | default: 100, 65 | }, 66 | cursor: { 67 | type: "string", 68 | description: "Pagination cursor for next page of results", 69 | }, 70 | }, 71 | }, 72 | }; 73 | 74 | const postMessageTool: Tool = { 75 | name: "slack_post_message", 76 | description: "Post a new message to a Slack channel", 77 | inputSchema: { 78 | type: "object", 79 | properties: { 80 | channel_id: { 81 | type: "string", 82 | description: "The ID of the channel to post to", 83 | }, 84 | text: { 85 | type: "string", 86 | description: "The message text to post", 87 | }, 88 | }, 89 | required: ["channel_id", "text"], 90 | }, 91 | }; 92 | 93 | const replyToThreadTool: Tool = { 94 | name: "slack_reply_to_thread", 95 | description: "Reply to a specific message thread in Slack", 96 | inputSchema: { 97 | type: "object", 98 | properties: { 99 | channel_id: { 100 | type: "string", 101 | description: "The ID of the channel containing the thread", 102 | }, 103 | thread_ts: { 104 | type: "string", 105 | description: "The timestamp of the parent message", 106 | }, 107 | text: { 108 | type: "string", 109 | description: "The reply text", 110 | }, 111 | }, 112 | required: ["channel_id", "thread_ts", "text"], 113 | }, 114 | }; 115 | 116 | const addReactionTool: Tool = { 117 | name: "slack_add_reaction", 118 | description: "Add a reaction emoji to a message", 119 | inputSchema: { 120 | type: "object", 121 | properties: { 122 | channel_id: { 123 | type: "string", 124 | description: "The ID of the channel containing the message", 125 | }, 126 | timestamp: { 127 | type: "string", 128 | description: "The timestamp of the message to react to", 129 | }, 130 | reaction: { 131 | type: "string", 132 | description: "The name of the emoji reaction (without ::)", 133 | }, 134 | }, 135 | required: ["channel_id", "timestamp", "reaction"], 136 | }, 137 | }; 138 | 139 | const getChannelHistoryTool: Tool = { 140 | name: "slack_get_channel_history", 141 | description: "Get recent messages from a channel", 142 | inputSchema: { 143 | type: "object", 144 | properties: { 145 | channel_id: { 146 | type: "string", 147 | description: "The ID of the channel", 148 | }, 149 | limit: { 150 | type: "number", 151 | description: "Number of messages to retrieve (default 10)", 152 | default: 10, 153 | }, 154 | }, 155 | required: ["channel_id"], 156 | }, 157 | }; 158 | 159 | const getThreadRepliesTool: Tool = { 160 | name: "slack_get_thread_replies", 161 | description: "Get all replies in a message thread", 162 | inputSchema: { 163 | type: "object", 164 | properties: { 165 | channel_id: { 166 | type: "string", 167 | description: "The ID of the channel containing the thread", 168 | }, 169 | thread_ts: { 170 | type: "string", 171 | description: "The timestamp of the parent message", 172 | }, 173 | }, 174 | required: ["channel_id", "thread_ts"], 175 | }, 176 | }; 177 | 178 | const getUsersTool: Tool = { 179 | name: "slack_get_users", 180 | description: 181 | "Get a list of all users in the workspace with their basic profile information", 182 | inputSchema: { 183 | type: "object", 184 | properties: { 185 | cursor: { 186 | type: "string", 187 | description: "Pagination cursor for next page of results", 188 | }, 189 | limit: { 190 | type: "number", 191 | description: "Maximum number of users to return (default 100, max 200)", 192 | default: 100, 193 | }, 194 | }, 195 | }, 196 | }; 197 | 198 | const getUserProfileTool: Tool = { 199 | name: "slack_get_user_profile", 200 | description: "Get detailed profile information for a specific user", 201 | inputSchema: { 202 | type: "object", 203 | properties: { 204 | user_id: { 205 | type: "string", 206 | description: "The ID of the user", 207 | }, 208 | }, 209 | required: ["user_id"], 210 | }, 211 | }; 212 | 213 | class SlackClient { 214 | private botHeaders: { Authorization: string; "Content-Type": string }; 215 | 216 | constructor(botToken: string) { 217 | this.botHeaders = { 218 | Authorization: `Bearer ${botToken}`, 219 | "Content-Type": "application/json", 220 | }; 221 | } 222 | 223 | async getChannels(limit: number = 100, cursor?: string): Promise { 224 | const params = new URLSearchParams({ 225 | types: "public_channel", 226 | exclude_archived: "true", 227 | limit: Math.min(limit, 200).toString(), 228 | team_id: process.env.SLACK_TEAM_ID!, 229 | }); 230 | 231 | if (cursor) { 232 | params.append("cursor", cursor); 233 | } 234 | 235 | const response = await fetch( 236 | `https://slack.com/api/conversations.list?${params}`, 237 | { headers: this.botHeaders }, 238 | ); 239 | 240 | return response.json(); 241 | } 242 | 243 | async postMessage(channel_id: string, text: string): Promise { 244 | const response = await fetch("https://slack.com/api/chat.postMessage", { 245 | method: "POST", 246 | headers: this.botHeaders, 247 | body: JSON.stringify({ 248 | channel: channel_id, 249 | text: text, 250 | }), 251 | }); 252 | 253 | return response.json(); 254 | } 255 | 256 | async postReply( 257 | channel_id: string, 258 | thread_ts: string, 259 | text: string, 260 | ): Promise { 261 | const response = await fetch("https://slack.com/api/chat.postMessage", { 262 | method: "POST", 263 | headers: this.botHeaders, 264 | body: JSON.stringify({ 265 | channel: channel_id, 266 | thread_ts: thread_ts, 267 | text: text, 268 | }), 269 | }); 270 | 271 | return response.json(); 272 | } 273 | 274 | async addReaction( 275 | channel_id: string, 276 | timestamp: string, 277 | reaction: string, 278 | ): Promise { 279 | const response = await fetch("https://slack.com/api/reactions.add", { 280 | method: "POST", 281 | headers: this.botHeaders, 282 | body: JSON.stringify({ 283 | channel: channel_id, 284 | timestamp: timestamp, 285 | name: reaction, 286 | }), 287 | }); 288 | 289 | return response.json(); 290 | } 291 | 292 | async getChannelHistory( 293 | channel_id: string, 294 | limit: number = 10, 295 | ): Promise { 296 | const params = new URLSearchParams({ 297 | channel: channel_id, 298 | limit: limit.toString(), 299 | }); 300 | 301 | const response = await fetch( 302 | `https://slack.com/api/conversations.history?${params}`, 303 | { headers: this.botHeaders }, 304 | ); 305 | 306 | return response.json(); 307 | } 308 | 309 | async getThreadReplies(channel_id: string, thread_ts: string): Promise { 310 | const params = new URLSearchParams({ 311 | channel: channel_id, 312 | ts: thread_ts, 313 | }); 314 | 315 | const response = await fetch( 316 | `https://slack.com/api/conversations.replies?${params}`, 317 | { headers: this.botHeaders }, 318 | ); 319 | 320 | return response.json(); 321 | } 322 | 323 | async getUsers(limit: number = 100, cursor?: string): Promise { 324 | const params = new URLSearchParams({ 325 | limit: Math.min(limit, 200).toString(), 326 | team_id: process.env.SLACK_TEAM_ID!, 327 | }); 328 | 329 | if (cursor) { 330 | params.append("cursor", cursor); 331 | } 332 | 333 | const response = await fetch(`https://slack.com/api/users.list?${params}`, { 334 | headers: this.botHeaders, 335 | }); 336 | 337 | return response.json(); 338 | } 339 | 340 | async getUserProfile(user_id: string): Promise { 341 | const params = new URLSearchParams({ 342 | user: user_id, 343 | include_labels: "true", 344 | }); 345 | 346 | const response = await fetch( 347 | `https://slack.com/api/users.profile.get?${params}`, 348 | { headers: this.botHeaders }, 349 | ); 350 | 351 | return response.json(); 352 | } 353 | } 354 | 355 | async function main() { 356 | const botToken = process.env.SLACK_BOT_TOKEN; 357 | const teamId = process.env.SLACK_TEAM_ID; 358 | 359 | if (!botToken || !teamId) { 360 | console.error( 361 | "Please set SLACK_BOT_TOKEN and SLACK_TEAM_ID environment variables", 362 | ); 363 | process.exit(1); 364 | } 365 | 366 | console.error("Starting Slack MCP Server..."); 367 | const server = new Server( 368 | { 369 | name: "Slack MCP Server", 370 | version: "1.0.0", 371 | }, 372 | { 373 | capabilities: { 374 | tools: {}, 375 | }, 376 | }, 377 | ); 378 | 379 | const slackClient = new SlackClient(botToken); 380 | 381 | server.setRequestHandler( 382 | CallToolRequestSchema, 383 | async (request: CallToolRequest) => { 384 | console.error("Received CallToolRequest:", request); 385 | try { 386 | if (!request.params.arguments) { 387 | throw new Error("No arguments provided"); 388 | } 389 | 390 | switch (request.params.name) { 391 | case "slack_list_channels": { 392 | const args = request.params 393 | .arguments as unknown as ListChannelsArgs; 394 | const response = await slackClient.getChannels( 395 | args.limit, 396 | args.cursor, 397 | ); 398 | return { 399 | content: [{ type: "text", text: JSON.stringify(response) }], 400 | }; 401 | } 402 | 403 | case "slack_post_message": { 404 | const args = request.params.arguments as unknown as PostMessageArgs; 405 | if (!args.channel_id || !args.text) { 406 | throw new Error( 407 | "Missing required arguments: channel_id and text", 408 | ); 409 | } 410 | const response = await slackClient.postMessage( 411 | args.channel_id, 412 | args.text, 413 | ); 414 | return { 415 | content: [{ type: "text", text: JSON.stringify(response) }], 416 | }; 417 | } 418 | 419 | case "slack_reply_to_thread": { 420 | const args = request.params 421 | .arguments as unknown as ReplyToThreadArgs; 422 | if (!args.channel_id || !args.thread_ts || !args.text) { 423 | throw new Error( 424 | "Missing required arguments: channel_id, thread_ts, and text", 425 | ); 426 | } 427 | const response = await slackClient.postReply( 428 | args.channel_id, 429 | args.thread_ts, 430 | args.text, 431 | ); 432 | return { 433 | content: [{ type: "text", text: JSON.stringify(response) }], 434 | }; 435 | } 436 | 437 | case "slack_add_reaction": { 438 | const args = request.params.arguments as unknown as AddReactionArgs; 439 | if (!args.channel_id || !args.timestamp || !args.reaction) { 440 | throw new Error( 441 | "Missing required arguments: channel_id, timestamp, and reaction", 442 | ); 443 | } 444 | const response = await slackClient.addReaction( 445 | args.channel_id, 446 | args.timestamp, 447 | args.reaction, 448 | ); 449 | return { 450 | content: [{ type: "text", text: JSON.stringify(response) }], 451 | }; 452 | } 453 | 454 | case "slack_get_channel_history": { 455 | const args = request.params 456 | .arguments as unknown as GetChannelHistoryArgs; 457 | if (!args.channel_id) { 458 | throw new Error("Missing required argument: channel_id"); 459 | } 460 | const response = await slackClient.getChannelHistory( 461 | args.channel_id, 462 | args.limit, 463 | ); 464 | return { 465 | content: [{ type: "text", text: JSON.stringify(response) }], 466 | }; 467 | } 468 | 469 | case "slack_get_thread_replies": { 470 | const args = request.params 471 | .arguments as unknown as GetThreadRepliesArgs; 472 | if (!args.channel_id || !args.thread_ts) { 473 | throw new Error( 474 | "Missing required arguments: channel_id and thread_ts", 475 | ); 476 | } 477 | const response = await slackClient.getThreadReplies( 478 | args.channel_id, 479 | args.thread_ts, 480 | ); 481 | return { 482 | content: [{ type: "text", text: JSON.stringify(response) }], 483 | }; 484 | } 485 | 486 | case "slack_get_users": { 487 | const args = request.params.arguments as unknown as GetUsersArgs; 488 | const response = await slackClient.getUsers( 489 | args.limit, 490 | args.cursor, 491 | ); 492 | return { 493 | content: [{ type: "text", text: JSON.stringify(response) }], 494 | }; 495 | } 496 | 497 | case "slack_get_user_profile": { 498 | const args = request.params 499 | .arguments as unknown as GetUserProfileArgs; 500 | if (!args.user_id) { 501 | throw new Error("Missing required argument: user_id"); 502 | } 503 | const response = await slackClient.getUserProfile(args.user_id); 504 | return { 505 | content: [{ type: "text", text: JSON.stringify(response) }], 506 | }; 507 | } 508 | 509 | default: 510 | throw new Error(`Unknown tool: ${request.params.name}`); 511 | } 512 | } catch (error) { 513 | console.error("Error executing tool:", error); 514 | return { 515 | content: [ 516 | { 517 | type: "text", 518 | text: JSON.stringify({ 519 | error: error instanceof Error ? error.message : String(error), 520 | }), 521 | }, 522 | ], 523 | }; 524 | } 525 | }, 526 | ); 527 | 528 | server.setRequestHandler(ListToolsRequestSchema, async () => { 529 | console.error("Received ListToolsRequest"); 530 | return { 531 | tools: [ 532 | listChannelsTool, 533 | postMessageTool, 534 | replyToThreadTool, 535 | addReactionTool, 536 | getChannelHistoryTool, 537 | getThreadRepliesTool, 538 | getUsersTool, 539 | getUserProfileTool, 540 | ], 541 | }; 542 | }); 543 | 544 | const transport = new StdioServerTransport(); 545 | console.error("Connecting server to transport..."); 546 | await server.connect(transport); 547 | 548 | console.error("Slack MCP Server running on stdio"); 549 | } 550 | 551 | main().catch((error) => { 552 | console.error("Fatal error in main():", error); 553 | process.exit(1); 554 | }); 555 | -------------------------------------------------------------------------------- /src/slack/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@modelcontextprotocol/server-slack", 3 | "version": "0.5.1", 4 | "description": "MCP server for interacting with Slack", 5 | "license": "MIT", 6 | "author": "Anthropic, PBC (https://anthropic.com)", 7 | "homepage": "https://modelcontextprotocol.io", 8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues", 9 | "type": "module", 10 | "bin": { 11 | "mcp-server-slack": "dist/index.js" 12 | }, 13 | "files": [ 14 | "dist" 15 | ], 16 | "scripts": { 17 | "build": "tsc && shx chmod +x dist/*.js", 18 | "prepare": "npm run build", 19 | "watch": "tsc --watch" 20 | }, 21 | "dependencies": { 22 | "@modelcontextprotocol/sdk": "0.6.0" 23 | }, 24 | "devDependencies": { 25 | "@types/node": "^22.9.3", 26 | "shx": "^0.3.4", 27 | "typescript": "^5.6.2" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/slack/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "." 6 | }, 7 | "include": [ 8 | "./**/*.ts" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /src/sqlite/.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /src/sqlite/README.md: -------------------------------------------------------------------------------- 1 | # SQLite MCP Server 2 | 3 | ## Overview 4 | A Model Context Protocol (MCP) server implementation that provides database interaction and business intelligence capabilities through SQLite. This server enables running SQL queries, analyzing business data, and automatically generating business insight memos that can be enhanced with Claude's analysis when an Anthropic API key is provided. 5 | 6 | ## Components 7 | 8 | ### Resources 9 | The server exposes a single dynamic resource: 10 | - `memo://insights`: A continuously updated business insights memo that aggregates discovered insights during analysis 11 | - Auto-updates as new insights are discovered via the append-insight tool 12 | - Optional enhancement through Claude for professional formatting (requires Anthropic API key) 13 | 14 | ### Prompts 15 | The server provides a demonstration prompt: 16 | - `mcp-demo`: Interactive prompt that guides users through database operations 17 | - Required argument: `topic` - The business domain to analyze 18 | - Generates appropriate database schemas and sample data 19 | - Guides users through analysis and insight generation 20 | - Integrates with the business insights memo 21 | 22 | ### Tools 23 | The server offers six core tools: 24 | 25 | #### Query Tools 26 | - `read-query` 27 | - Execute SELECT queries to read data from the database 28 | - Input: 29 | - `query` (string): The SELECT SQL query to execute 30 | - Returns: Query results as array of objects 31 | 32 | - `write-query` 33 | - Execute INSERT, UPDATE, or DELETE queries 34 | - Input: 35 | - `query` (string): The SQL modification query 36 | - Returns: `{ affected_rows: number }` 37 | 38 | - `create-table` 39 | - Create new tables in the database 40 | - Input: 41 | - `query` (string): CREATE TABLE SQL statement 42 | - Returns: Confirmation of table creation 43 | 44 | #### Schema Tools 45 | - `list-tables` 46 | - Get a list of all tables in the database 47 | - No input required 48 | - Returns: Array of table names 49 | 50 | - `describe-table` 51 | - View schema information for a specific table 52 | - Input: 53 | - `table_name` (string): Name of table to describe 54 | - Returns: Array of column definitions with names and types 55 | 56 | #### Analysis Tools 57 | - `append-insight` 58 | - Add new business insights to the memo resource 59 | - Input: 60 | - `insight` (string): Business insight discovered from data analysis 61 | - Returns: Confirmation of insight addition 62 | - Triggers update of memo://insights resource 63 | 64 | 65 | ## Usage with Claude Desktop 66 | 67 | ```bash 68 | # Add the server to your claude_desktop_config.json 69 | "mcpServers": { 70 | "sqlite": { 71 | "command": "uv", 72 | "args": [ 73 | "--directory", 74 | "parent_of_servers_repo/servers/src/sqlite", 75 | "run", 76 | "mcp-server-sqlite", 77 | "--db-path", 78 | "~/test.db" 79 | ] 80 | } 81 | } 82 | ``` 83 | 84 | ## License 85 | 86 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository. 87 | -------------------------------------------------------------------------------- /src/sqlite/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server-sqlite" 3 | version = "0.5.1" 4 | description = "A simple SQLite MCP server" 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = ["mcp>=0.9.1"] 8 | 9 | [build-system] 10 | requires = ["hatchling"] 11 | build-backend = "hatchling.build" 12 | 13 | [tool.uv] 14 | dev-dependencies = ["pyright>=1.1.389"] 15 | 16 | [project.scripts] 17 | mcp-server-sqlite = "mcp_server_sqlite:main" 18 | -------------------------------------------------------------------------------- /src/sqlite/src/mcp_server_sqlite/__init__.py: -------------------------------------------------------------------------------- 1 | from . import server 2 | import asyncio 3 | import argparse 4 | import os 5 | 6 | 7 | def main(): 8 | """Main entry point for the package.""" 9 | parser = argparse.ArgumentParser(description='SQLite MCP Server') 10 | parser.add_argument('--db-path', 11 | default="./sqlite_mcp_server.db", 12 | help='Path to SQLite database file') 13 | 14 | args = parser.parse_args() 15 | asyncio.run(server.main(args.db_path)) 16 | 17 | 18 | # Optionally expose other important items at package level 19 | __all__ = ["main", "server"] 20 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "skipLibCheck": true, 9 | "forceConsistentCasingInFileNames": true, 10 | "resolveJsonModule": true 11 | }, 12 | "include": ["src/**/*"], 13 | "exclude": ["node_modules"] 14 | } 15 | --------------------------------------------------------------------------------