├── .dockerignore
├── .env.example
├── .env.test
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── ci.yaml
│ ├── docs
│ └── release-checklist.md
│ └── publish.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.MD
├── CONTRIBUTING.MD
├── Dockerfile
├── LICENSE
├── README.md
├── llms-full.txt
├── pyproject.toml
├── smithery.yaml
├── supabase_mcp
├── __init__.py
├── api_manager
│ ├── __init__.py
│ ├── api_manager.py
│ ├── api_safety_config.py
│ ├── api_spec_manager.py
│ └── specs
│ │ └── api_spec.json
├── db_client
│ ├── __init__.py
│ ├── db_client.py
│ └── db_safety_config.py
├── exceptions.py
├── logger.py
├── main.py
├── queries.py
├── sdk_client
│ ├── __init__.py
│ ├── auth_admin_models.py
│ ├── auth_admin_sdk_spec.py
│ └── python_client.py
├── settings.py
└── validators.py
├── tests
├── __init__.py
├── api_manager
│ ├── test_api_manager.py
│ ├── test_safety_config.py
│ └── test_spec_manager.py
├── conftest.py
├── sdk_client
│ ├── test_auth_admin_models.py
│ ├── test_python_client.py
│ └── test_sdk_client_integration.py
├── test_db_client.py
├── test_main.py
└── test_settings.py
└── uv.lock
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | build/
8 | develop-eggs/
9 | dist/
10 | downloads/
11 | eggs/
12 | .eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | wheels/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 | .pytest_cache/
23 | .coverage
24 | htmlcov/
25 | .tox/
26 | .nox/
27 |
28 | # Virtual Environment
29 | .env
30 | .venv
31 | env/
32 | venv/
33 | ENV/
34 | env.bak/
35 | venv.bak/
36 |
37 | # macOS
38 | .DS_Store
39 | .AppleDouble
40 | .LSOverride
41 | Icon
42 | ._*
43 | .DocumentRevisions-V100
44 | .fseventsd
45 | .Spotlight-V100
46 | .TemporaryItems
47 | .Trashes
48 | .VolumeIcon.icns
49 | .com.apple.timemachine.donotpresent
50 |
51 | # IDEs and Editors
52 | .idea/
53 | .vscode/
54 | *.swp
55 | *.swo
56 | *~
57 | .project
58 | .classpath
59 | .settings/
60 | *.sublime-workspace
61 | *.sublime-project
62 |
63 | # Local development
64 | .env.mcp
65 | .env.mcp2
66 | *.log
67 | logs/
68 |
69 | # Ignore local assets
70 | assets/
71 | *.gif
72 | *.mp4
73 |
74 | # Generated version file
75 | supabase_mcp/_version.py
76 |
77 | # Docs
78 | .llms-full.txt
79 |
80 | # Docker specific ignores
81 | Dockerfile
82 | .dockerignore
83 | docker-compose.yml
84 | docker-compose.yaml
85 |
86 | # Git
87 | .git/
88 | .github/
89 | .gitignore
90 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | # .env.mcp.example
2 | SUPABASE_PROJECT_REF=your-project-ref # default is local supabase project ref if not set
3 | SUPABASE_DB_PASSWORD=your-db-password # default is local supabase db password if not set
4 |
--------------------------------------------------------------------------------
/.env.test:
--------------------------------------------------------------------------------
1 | SUPABASE_PROJECT_REF=test-project-ref
2 | SUPABASE_DB_PASSWORD=test-db-password
3 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: alexander-zuev
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Report an issuewith the ser
4 | title: "An issue with doing X when Y under conditions Z"
5 | labels: bug
6 | assignees: alexander-zuev
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **Steps to Reproduce**
14 |
15 | 1.
16 | 2.
17 | 3.
18 |
19 | **Connection Details**
20 |
21 | - Connection type:
22 | - Using password with special characters?
23 |
24 |
25 | **Screenshots**
26 | If applicable, add screenshots to help explain your problem.
27 |
28 | ** Logs**
29 |
37 |
38 | **Additional context**
39 | Add any other context about the problem here.
40 |
41 | **Checklist**
42 |
43 | - [ ] I've included the server logs
44 | - [ ] I've checked the README troubleshooting section
45 | - [ ] I've verified my connection settings are correct
46 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea to improve this MCP server
4 | title: "I want X so that I can do Y and gain Z"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Description
2 |
3 |
4 |
5 | ## Type of Change
6 |
7 | - [ ] Bug fix (non-breaking change which fixes an issue)
8 | - [ ] New feature (non-breaking change which adds functionality)
9 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
10 | - [ ] Documentation update
11 | - [ ] Performance improvement
12 | - [ ] Code refactoring (no functional changes)
13 | - [ ] Test updates
14 | - [ ] CI/CD or build process changes
15 | - [ ] Other (please describe):
16 |
17 | ## Checklist
18 | - [ ] I have performed a self-review of my own code
19 | - [ ] I have made corresponding changes to the documentation
20 | - [ ] New and existing unit tests pass locally with my changes
21 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 |
10 | env:
11 | UV_VERSION: "0.6.1" # Pin uv version to avoid breaking changes
12 |
13 | jobs:
14 | test:
15 | runs-on: ubuntu-latest
16 | env:
17 | SUPABASE_PROJECT_REF: ${{ secrets.SUPABASE_PROJECT_REF }}
18 | SUPABASE_DB_PASSWORD: ${{ secrets.SUPABASE_DB_PASSWORD }}
19 | SUPABASE_ACCESS_TOKEN: ${{ secrets.SUPABASE_ACCESS_TOKEN }}
20 | steps:
21 | - uses: actions/checkout@v4
22 |
23 | - name: Set up Python 3.12
24 | uses: actions/setup-python@v5
25 | with:
26 | python-version: "3.12"
27 |
28 | - name: Install uv
29 | uses: astral-sh/setup-uv@v5
30 | with:
31 | version: ${{ env.UV_VERSION }}
32 |
33 | - name: Create venv and install dependencies
34 | run: |
35 | # Create venv and install dependencies
36 | uv venv
37 | source .venv/bin/activate
38 | uv sync --group dev --frozen
39 |
40 | - name: Run tests
41 | run: |
42 | source .venv/bin/activate
43 | pytest
44 |
45 | - name: Build distribution packages
46 | run: |
47 | uv build --no-sources
48 | # Verify dist contains both wheel and tar.gz
49 | test -f dist/*.whl
50 | test -f dist/*.tar.gz
51 |
--------------------------------------------------------------------------------
/.github/workflows/docs/release-checklist.md:
--------------------------------------------------------------------------------
1 | # Release Checklist
2 |
3 | Pre-release
4 | 1. Tests pass
5 | 2. CI passes
6 | 3. Build succeeds
7 | 4. Clean install succeeds
8 | 5. Documentation is up to date
9 | 6. Changelog is up to date
10 | 7. Tag and release on GitHub
11 | 8. Release is published to PyPI
12 |
13 | Post-release
14 | - Clean install from PyPi works
15 |
16 |
17 |
18 | ## v0.3.0 - 2025-02-22
19 |
20 | 1. Tests pass - [X]
21 | 2. CI passes - [X]
22 | 3. Build succeeds - [X]
23 | 4. Clean install succeeds - [X]
24 | 5. Documentation is up to date - [X]
25 | 6. Changelog is up to date - [X]
26 | 7. Tag and release on GitHub - [X]
27 | 8. Release is published to PyPI - [X]
28 | 9. Clean install from PyPI works - [X]
29 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yaml:
--------------------------------------------------------------------------------
1 | name: Publish to PyPI
2 |
3 | on:
4 | release:
5 | types: [published]
6 | branches: [main] # Only trigger for releases from main
7 |
8 | env:
9 | UV_VERSION: "0.6.0" # Pin uv version to avoid breaking changes
10 |
11 | jobs:
12 | build-and-publish:
13 | runs-on: ubuntu-latest
14 | environment:
15 | name: pypi
16 | url: https://pypi.org/project/supabase-mcp-server/
17 | permissions:
18 | id-token: write # Required for trusted publishing
19 | contents: read
20 |
21 | steps:
22 | - uses: actions/checkout@v4
23 | with:
24 | fetch-depth: 0 # Required for proper version detection
25 |
26 | - name: Install uv
27 | uses: astral-sh/setup-uv@v5
28 | with:
29 | version: ${{ env.UV_VERSION }}
30 |
31 | - name: Set up Python
32 | uses: actions/setup-python@v5
33 | with:
34 | python-version: "3.12"
35 |
36 | - name: Build package
37 | run: uv build --no-sources
38 |
39 | - name: Verify package installation and entry points
40 | env:
41 | SUPABASE_PROJECT_REF: ${{ secrets.SUPABASE_PROJECT_REF }}
42 | SUPABASE_DB_PASSWORD: ${{ secrets.SUPABASE_DB_PASSWORD }}
43 | run: |
44 | # Create a new venv for testing
45 | uv venv
46 | source .venv/bin/activate
47 |
48 | # Install the built wheel
49 | uv pip install dist/*.whl
50 |
51 | echo "Testing supabase-mcp-server entry point..."
52 | # Run with --help to test basic functionality without needing actual connection
53 | if ! uv run supabase-mcp-server --help; then
54 | echo "❌ supabase-mcp-server --help failed"
55 | exit 1
56 | fi
57 | echo "✅ supabase-mcp-server --help succeeded"
58 |
59 | - name: Publish to PyPI
60 | uses: pypa/gh-action-pypi-publish@release/v1
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | build/
8 | develop-eggs/
9 | dist/
10 | downloads/
11 | eggs/
12 | .eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | wheels/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 | .pytest_cache/
23 | .coverage
24 | htmlcov/
25 | .tox/
26 | .nox/
27 |
28 | # Virtual Environment
29 | .env
30 | .venv
31 | env/
32 | venv/
33 | ENV/
34 | env.bak/
35 | venv.bak/
36 |
37 | # macOS
38 | .DS_Store
39 | .AppleDouble
40 | .LSOverride
41 | Icon
42 | ._*
43 | .DocumentRevisions-V100
44 | .fseventsd
45 | .Spotlight-V100
46 | .TemporaryItems
47 | .Trashes
48 | .VolumeIcon.icns
49 | .com.apple.timemachine.donotpresent
50 |
51 | # IDEs and Editors
52 | .idea/
53 | .vscode/
54 | *.swp
55 | *.swo
56 | *~
57 | .project
58 | .classpath
59 | .settings/
60 | *.sublime-workspace
61 | *.sublime-project
62 |
63 | # Local development
64 | .env.mcp
65 | .env.mcp2
66 | *.log
67 | logs/
68 |
69 |
70 | # Ignore local assets
71 | assets/
72 | *.gif
73 | *.mp4
74 |
75 | # Generated version file
76 | supabase_mcp/_version.py
77 |
78 | # Docs
79 | .llms-full.txt
80 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | # === Syntax & Basic Checks ===
3 | - repo: https://github.com/pre-commit/pre-commit-hooks
4 | rev: v5.0.0
5 | hooks:
6 | - id: check-ast
7 | name: Validate Python syntax
8 | - id: check-toml
9 | name: Validate TOML files
10 | - id: mixed-line-ending
11 | name: Normalize line endings
12 | args: ['--fix=lf']
13 | - id: trailing-whitespace
14 | name: Remove trailing whitespace
15 | - id: end-of-file-fixer
16 | name: Ensure file ends with newline
17 |
18 | # === Security ===
19 | - repo: https://github.com/pre-commit/pre-commit-hooks
20 | rev: v5.0.0
21 | hooks:
22 | - id: detect-private-key
23 | name: Check for private keys
24 | stages: [pre-commit, pre-push, manual]
25 | - id: check-merge-conflict
26 | name: Check for merge conflicts
27 | stages: [pre-commit, manual]
28 | - id: debug-statements
29 | name: Check for debugger imports
30 | stages: [pre-commit, manual]
31 |
32 | # === Type Checking ===
33 |
34 | - repo: https://github.com/pre-commit/mirrors-mypy
35 | rev: "v1.15.0"
36 | hooks:
37 | - id: mypy
38 | name: Run mypy type checker
39 | args: [
40 | "--config-file=pyproject.toml",
41 | "--show-error-codes",
42 | "--pretty",
43 | ]
44 | additional_dependencies: [
45 | "types-requests",
46 | "types-aiofiles",
47 | "types-pytz",
48 | "pydantic",
49 | "chainlit",
50 | "anthropic",
51 | "fastapi",
52 | "httpx",
53 | "tiktoken",
54 | "weave",
55 | "chromadb",
56 | "cohere",
57 | "langchain"
58 | ]
59 | entry: bash -c 'mypy "$@" || true' --
60 |
61 | # === Code Quality & Style ===
62 | - repo: https://github.com/astral-sh/ruff-pre-commit
63 | rev: v0.9.7
64 | hooks:
65 | - id: ruff
66 | name: Run Ruff linter
67 | args: [
68 | --fix,
69 | --exit-zero,
70 | --quiet,
71 | ]
72 | types_or: [python, pyi, jupyter]
73 | files: ^(src|tests)/
74 | exclude: ^src/experimental/
75 | verbose: false
76 | - id: ruff-format
77 | name: Run Ruff formatter
78 | types_or: [python, pyi, jupyter]
79 |
80 | # === Documentation Checks ===
81 | - repo: https://github.com/tcort/markdown-link-check
82 | rev: v3.13.6
83 | hooks:
84 | - id: markdown-link-check
85 | name: Check Markdown links
86 | description: Extracts links from markdown texts and checks they're all alive
87 | stages: [pre-commit, pre-push, manual]
88 |
89 | # === Testing ===
90 | - repo: local
91 | hooks:
92 | - id: pytest
93 | name: Run tests
94 | entry: pytest
95 | language: system
96 | types: [python]
97 | pass_filenames: false
98 | args: [
99 | "--no-header", ]
100 | stages: [pre-commit, pre-push]
101 |
102 | # === Build Check ===
103 | - repo: local
104 | hooks:
105 | - id: build-check
106 | name: Check build
107 | entry: uv build
108 | language: system
109 | pass_filenames: false
110 | stages: [pre-commit, pre-push]
111 | - id: version-check
112 | name: Check package version
113 | # Print version from the built package
114 | entry: python -c "from supabase_mcp import __version__; print('📦 Package version:', __version__)"
115 | language: system
116 | verbose: true
117 | pass_filenames: false
118 | stages: [pre-commit, pre-push]
119 |
--------------------------------------------------------------------------------
/CHANGELOG.MD:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
6 |
7 |
8 | ## [0.3.6] - 2025-02-26
9 | ### Added
10 | - Added `call_auth_admin_method` which enables MCP server to manage users in your database (create, update, delete, confirm). All Auth SDK methods are supported
11 | - Added `get_auth_admin_methods_spec` to retrieve documentation for all available Auth Admin methods. Response objects now use attribute access (dot notation) instead of dictionary access.
12 |
13 | ### Fixed
14 | - Fixed an issue with improper encoding of database passwords. Previously passwords containing "%" symbol led to connection failures
15 |
16 |
17 | ## [0.3.5] - 2025-02-26
18 | ### Fixed
19 | - Fixed an issue with `get_tables` so that it reliably returns foreign tables and views
20 | - Updated docs to describe how to setup mcp.json with project-specific MCPs
21 | - Expanded and improved test suite to cover each MCP tool
22 |
23 |
24 | ## [0.3.4] - 2025-02-25
25 | ### Fixed
26 | - Improved `get_tables` to return foreign data tables
27 |
28 |
29 | ## [0.3.3] - 2025-02-25
30 | ### Fixed
31 | - Fixed a bug with `readonly` scope being incorrectly managed in db client
32 |
33 | ## [0.3.2] - 2025-02-25
34 | ### Fixed
35 | - Fixed a bug preventing execution of DDL commands (create, alter tables, etc.)
36 |
37 | ## [0.3.1] - 2025-02-23
38 | ### Changed
39 | - Significantly improved docs to make install, configuration, usage instructions super clear
40 |
41 |
42 | ## [0.3.0] - 2025-02-23
43 | ### Added
44 | - Full support for read-write SQL operations:
45 | - Implemented safety mode system with read-only (default) and read-write modes
46 | - Added mode switching with automatic reset to read-only
47 | - Enhanced transaction support for testing write operations
48 | - Improved error handling for read-only violations
49 | - Support for Supabase Management API
50 | - Introduces supabase management API integration with safe (enabled by default) and yolo modes
51 | - Includes the following tools:
52 | - `send_management_api_request` to send arbitrary requests to Supabase Management API, with auto-injection of project ref and safety mode control.
53 | - `get_management_api_spec` to get the enriched API specification with safety information
54 | - `get_management_api_safety_rules` to get all safety rules including blocked and unsafe operations with human-readable explanations
55 | - `live_dangerously` to switch to yolo mode
56 | - Safety features:
57 | - Divides API methods into `safe`, `unsafe` and `blocked` categories based on the risk of the operation
58 | - Allows to switch between safe and yolo modes dynamically
59 | - Blocked operations (delete project, delete database) are not allowed regardless of the mode
60 |
61 |
62 | ## [0.2.2] - 2025-02-20
63 | ### Added
64 | - Support for different Supabase regions:
65 | - Configuration via `SUPABASE_REGION` environment variable
66 | - Validation for all 16 supported AWS regions
67 | - Default to `us-east-1` for backward compatibility
68 | - Enhanced logging for region information
69 | - Comprehensive documentation and examples
70 |
71 | ## [0.2.1] - 2025-02-19
72 | ### Added
73 | - Package distribution support:
74 | - PyPI package publishing setup
75 | - Installation via `pipx` and `uv`
76 | - Entry point scripts for direct execution
77 | - Smithery.ai deployment configuration
78 |
79 | ### Changed
80 | - BREAKING: Installation and execution methods:
81 | - Switched from direct script execution to proper module structure
82 | - Updated Cursor/Windsurf configuration for package-based execution
83 | - Improved setup instructions in README
84 |
85 | ## [0.2.0] - 2025-02-18
86 | Intermediary release for package distribution support
87 |
88 | ## [0.1.0] - 2025-02-16
89 | ### Added
90 | - Initial release
91 | - Basic MCP server functionality
92 | - Supabase database connection support
93 | - Integration with Cursor and Windsurf IDEs
94 |
95 | [0.3.0]: https://github.com/alexander-zuev/supabase-mcp-server/releases/tag/v0.3.0
96 | [0.2.2]: https://github.com/alexander-zuev/supabase-mcp-server/releases/tag/v0.2.2
97 | [0.2.1]: https://github.com/alexander-zuev/supabase-mcp-server/releases/tag/v0.2.1
98 | [0.2.0]: https://github.com/alexander-zuev/supabase-mcp-server/releases/tag/v0.2.0-dev0
99 | [0.1.0]: https://github.com/alexander-zuev/supabase-mcp-server/releases/tag/v0.1.0
100 |
--------------------------------------------------------------------------------
/CONTRIBUTING.MD:
--------------------------------------------------------------------------------
1 | # Contributing to Supabase MCP Server
2 |
3 | Thank you for your interest in Supabase MCP Server. This project aims to maintain a high quality standard I've set for it. I welcome and carefully review all contributions. Please read the following guidelines carefully.
4 |
5 | ## 🤓 Important: Pre-Contribution Requirements
6 |
7 | 1. **Required: Open a Discussion First**
8 | - **All contributions** must start with a GitHub Discussion before any code is written
9 | - Explain your proposed changes, why they're needed, and how they align with the project's vision
10 | - Wait for explicit approval from the maintainer before proceeding
11 | - PRs without a prior approved discussion will be closed immediately without review
12 |
13 | 2. **Project Vision**
14 | - This project follows a specific development vision maintained by the owner
15 | - Not all feature ideas will be accepted, even if well-implemented
16 | - The maintainer reserves the right to decline contributions that don't align with the project's direction
17 |
18 | ## 🛠️ Contribution Process (Only After Discussion Approval)
19 |
20 | 1. **Fork the repository:** Click the "Fork" button in the top right corner of the GitHub page.
21 |
22 | 2. **Create a new branch:** Create a branch with a descriptive name related to your contribution.
23 | ```bash
24 | git checkout -b feature/your-approved-feature
25 | ```
26 |
27 | 3. **Quality Requirements:**
28 | - **Test Coverage:** All code changes must include appropriate tests
29 | - **Documentation:** Update all relevant documentation
30 | - **Code Style:** Follow the existing code style and patterns
31 | - **Commit Messages:** Use clear, descriptive commit messages
32 |
33 | 4. **Make your changes:** Implement the changes that were approved in the discussion.
34 |
35 | 5. **Test thoroughly:** Ensure all tests pass and add new tests for your changes.
36 | ```bash
37 | # Run tests
38 | pytest
39 | ```
40 |
41 | 6. **Commit your changes:** Use clear, descriptive commit messages that explain what you've done.
42 | ```bash
43 | git commit -m "feat: implement approved feature X"
44 | ```
45 |
46 | 7. **Push your branch:** Push your changes to your forked repository.
47 | ```bash
48 | git push origin feature/your-approved-feature
49 | ```
50 |
51 | 8. **Create a pull request:**
52 | - Go to the original repository on GitHub
53 | - Click "New Pull Request"
54 | - Select "compare across forks"
55 | - Select your fork and branch as the source
56 | - Add a detailed description that references the approved discussion
57 | - Include information about how you've tested the changes
58 | - Submit the pull request
59 |
60 | 9. **Review Process:**
61 | - PRs will be reviewed when time permits
62 | - Be prepared to make requested changes
63 | - The maintainer may request significant revisions
64 | - PRs may be rejected even after review if they don't meet quality standards
65 |
66 | ## ⚠️ Grounds for Immediate Rejection
67 |
68 | Your PR will be closed without review if:
69 | - No prior discussion was opened and approved
70 | - Tests are missing or failing
71 | - Documentation is not updated
72 | - Code quality doesn't meet project standards
73 | - PR description is inadequate
74 | - Changes don't align with the approved discussion
75 |
76 | ## 🤔 Why These Requirements?
77 |
78 | - This project is maintained by a single developer (me) with limited review time
79 | - Quality and consistency are prioritized over quantity of contributions
80 | - The project follows a specific vision that I want to maintain
81 |
82 | ## 🌟 Acceptable Contributions
83 |
84 | The following types of contributions are most welcome:
85 | - Bug fixes with clear reproduction steps
86 | - Performance improvements with benchmarks
87 | - Documentation improvements
88 | - New features that have been pre-approved via discussion
89 |
90 | ## 💡 Alternative Ways to Contribute
91 |
92 | If you have ideas but don't want to go through this process:
93 | - Fork the project and build your own version
94 | - Share your use case in Discussions
95 | - Report bugs with detailed reproduction steps
96 |
97 | Thank you for understanding and respecting these guidelines. They help maintain the quality and direction of the project.
98 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.13-slim-bookworm as builder
2 |
3 | FROM python:3.13-slim
4 |
5 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
6 |
7 |
8 | ENV SETUPTOOLS_SCM_PRETEND_VERSION_FOR_SUPABASE_MCP_SERVER=0.1.0
9 | ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.1.0
10 |
11 |
12 | # Set working directory
13 | WORKDIR /app
14 |
15 | # Install system dependencies for psycopg2
16 | RUN apt-get update && apt-get install -y \
17 | build-essential \
18 | libpq-dev \
19 | && apt-get clean \
20 | && rm -rf /var/lib/apt/lists/*
21 |
22 | # Copy project files
23 | COPY pyproject.toml .
24 | COPY smithery.yaml .
25 | COPY supabase_mcp/ ./supabase_mcp/
26 | COPY README.md .
27 |
28 | # Upgrade pip and install pipx
29 | RUN pip install --upgrade pip
30 | RUN pip install pipx
31 |
32 | # Add pipx binary directory to PATH
33 | ENV PATH="/root/.local/bin:$PATH"
34 |
35 | # Install project dependencies using uv
36 | RUN uv pip install --no-cache-dir --system .
37 |
38 | # Set environment variables (these will be overridden by Smithery.ai config)
39 | ENV SUPABASE_PROJECT_REF=""
40 | ENV SUPABASE_DB_PASSWORD=""
41 | ENV SUPABASE_REGION="us-east-1"
42 |
43 | # Expose any ports needed (if applicable)
44 | # This MCP server communicates via stdin/stdout according to smithery.yaml
45 |
46 | # Set the entrypoint to the command that Smithery expects
47 | ENTRYPOINT ["uv", "run", "supabase_mcp/main.py"]
48 |
49 | # Default command if no arguments are provided
50 | CMD ["--help"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [2025] [Alexander Zuev]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Supabase MCP Server
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Let Cursor & Windsurf manage your Supabase and run SQL queries. Autonomously. In a safe way.
19 |
20 |
21 | [](https://star-history.com/#alexander-zuev/supabase-mcp-server&Date)
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 | A feature-rich MCP server that enables Cursor and Windsurf to safely interact with Supabase databases. It provides tools for database management, SQL query execution, and Supabase Management API access with built-in safety controls.
35 |
36 | ## Table of contents
37 |
38 | Getting started •
39 | Feature overview •
40 | Troubleshooting •
41 | Roadmap
42 |
43 |
44 | ## ✨ Key features
45 | - 💻 Compatible with Cursor, Windsurf, Cline and other MCP clients supporting `stdio` protocol
46 | - 🔐 Control read-only and read-write modes of SQL query execution
47 | - 🔄 Robust transaction handling for both direct and pooled database connections
48 | - 💻 Manage your Supabase projects with Supabase Management API
49 | - 🧑💻 Manage users with Supabase Auth Admin methods via Python SDK
50 | - 🔨 Pre-built tools to help Cursor & Windsurf work with MCP more effectively
51 | - 📦 Dead-simple install & setup via package manager (uv, pipx, etc.)
52 |
53 | ## Getting Started
54 |
55 | ### Prerequisites
56 | Installing the server requires the following on your system:
57 | - Python 3.12+
58 | - PostgresSQL 16+
59 |
60 | If you plan to install via `uv`, ensure it's [installed](https://docs.astral.sh/uv/getting-started/installation/#__tabbed_1_1).
61 |
62 | ### PostgreSQL Installation
63 | > ⚠️ **Important**: PostgreSQL must be installed BEFORE installing project dependencies, as psycopg2 requires PostgreSQL development libraries during compilation.
64 |
65 | **MacOS**
66 | ```bash
67 | brew install postgresql@16
68 | ```
69 |
70 | **Windows**
71 | - Download and install PostgreSQL 16+ from https://www.postgresql.org/download/windows/
72 | - Ensure "PostgreSQL Server" and "Command Line Tools" are selected during installation
73 |
74 | ### Step 1. MCP Server Installation
75 |
76 | Since v0.2.0 I introduced support for package installation. You can use your favorite Python package manager to install the server via:
77 |
78 | ```bash
79 | # if pipx is installed (recommended)
80 | pipx install supabase-mcp-server
81 |
82 | # if uv is installed
83 | uv pip install supabase-mcp-server
84 | ```
85 |
86 | `pipx` is recommended because it creates isolated environments for each package.
87 |
88 | You can also install the server manually by cloning the repository and running `pipx` install -editable . from the root directory.
89 |
90 | > ⚠️ If you run into psycopg2 compilation issues, you might be missing PostgreSQL development packages. See above.
91 |
92 | #### Installing from source
93 | If you would like to install from source, for example for local development:
94 | ```bash
95 | uv venv
96 | # On Mac
97 | source .venv/bin/activate
98 | # On Windows
99 | .venv\Scripts\activate
100 | # Install package in editable mode
101 | uv pip install -e .
102 | ```
103 |
104 | #### Installing via Smithery.ai
105 | Please report any issues with Smithery, as I haven't tested it yet.
106 |
107 | To install Supabase MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@alexander-zuev/supabase-mcp):
108 |
109 | ```bash
110 | npx -y @smithery/cli install @alexander-zuev/supabase-mcp --client claude
111 | ```
112 |
113 | ### Step 2. Configuration
114 |
115 | After installing the package, you'll need to configure your database connection settings. The server supports both local and remote Supabase instances.
116 |
117 | #### Local Supabase instance (Default)
118 | Server is pre-configured to connect to the local Supabase instance using default settings:
119 | - `Host`: 127.0.0.1:54322
120 | - `Password`: postgres
121 |
122 | >💡 As long as you didn't modify the default settings and you want to connect to the local instance, you don't need to set environment variables.
123 |
124 | #### Remote Supabase instance
125 |
126 | > ⚠️ **IMPORTANT WARNING**: Session pooling connections are not supported and there are no plans to support it yet. Let me know if you feel there is a use case for supporting this in an MCP server
127 |
128 | For remote Supabase projects, you need to configure:
129 | - `SUPABASE_PROJECT_REF` - Your project reference (found in project URL)
130 | - `SUPABASE_DB_PASSWORD` - Your database password
131 | - `SUPABASE_REGION` - (Optional) Defaults to `us-east-1`
132 | - `SUPABASE_ACCESS_TOKEN` - (Optional) For Management API access
133 |
134 | You can get your SUPABASE_PROJECT_REF from your project's dashboard URL:
135 | - `https://supabase.com/dashboard/project/`
136 |
137 | The server supports all Supabase regions:
138 | - `us-west-1` - West US (North California)
139 | - `us-east-1` - East US (North Virginia) - default
140 | - `us-east-2` - East US (Ohio)
141 | - `ca-central-1` - Canada (Central)
142 | - `eu-west-1` - West EU (Ireland)
143 | - `eu-west-2` - West Europe (London)
144 | - `eu-west-3` - West EU (Paris)
145 | - `eu-central-1` - Central EU (Frankfurt)
146 | - `eu-central-2` - Central Europe (Zurich)
147 | - `eu-north-1` - North EU (Stockholm)
148 | - `ap-south-1` - South Asia (Mumbai)
149 | - `ap-southeast-1` - Southeast Asia (Singapore)
150 | - `ap-northeast-1` - Northeast Asia (Tokyo)
151 | - `ap-northeast-2` - Northeast Asia (Seoul)
152 | - `ap-southeast-2` - Oceania (Sydney)
153 | - `sa-east-1` - South America (São Paulo)
154 |
155 | Method of MCP configuration differs between Cursor and Windsurf. Read the relevant section to understand how to configure connection.
156 |
157 | ##### Cursor
158 | Since v0.46 there are two ways to configure MCP servers in Cursor:
159 | - per project basis -> create `mcp.json` in your project / repo folder and `.env` to configure connection
160 | - globally -> create an MCP server in Settings and configure using `.env` which is supported by this MCP server only
161 |
162 |
163 | You can create project-specific MCP by:
164 | - creating .cursor folder in your repo, if doesn't exist
165 | - creating or updating `mcp.json` file with the following settings
166 |
167 | > ⚠ **Environment variables**: If you are configuring MCP server on a per-project basis you still need to create .env file for connection settings to be picked up. I wasn't able to configure mcp.json to pick up my env vars 😔
168 |
169 | ```json
170 | {
171 | "mcpServers": {
172 | "filesystem": {
173 | "command": "supabase-mcp-server",
174 | }
175 | }
176 | }
177 | ```
178 |
179 | Alternatively, if you want to configure MCP servers globally (i.e. not for each project), you can use configure connection settings by updating an `.env` file in a global config folder by running the following commands:
180 | ```bash
181 | # Create config directory and navigate to it
182 | # On macOS/Linux
183 | mkdir -p ~/.config/supabase-mcp
184 | cd ~/.config/supabase-mcp
185 |
186 | # On Windows (in PowerShell)
187 | mkdir -Force "$env:APPDATA\supabase-mcp"
188 | cd "$env:APPDATA\supabase-mcp"
189 | ```
190 | This creates the necessary config folder where your environment file will be stored.
191 |
192 | ```bash
193 | # Create and edit .env file
194 | # On macOS/Linux
195 | nano ~/.config/supabase-mcp/.env
196 |
197 | # On Windows (PowerShell)
198 | notepad "$env:APPDATA\supabase-mcp\.env"
199 | ```
200 |
201 | This will open the .env file. Once the file is open, copy & paste the following:
202 | ```bash
203 | SUPABASE_PROJECT_REF=your-project-ref
204 | SUPABASE_DB_PASSWORD=your-db-password
205 | SUPABASE_REGION=us-east-1 # optional, defaults to us-east-1
206 | SUPABASE_ACCESS_TOKEN=your-access-token # optional, for management API
207 | ```
208 |
209 | Verify the file exists - you should see the values you have just set:
210 | ```bash
211 | # On macOS/Linux
212 | cat ~/.config/supabase-mcp/.env
213 |
214 | # On Windows (PowerShell)
215 | Get-Content "$env:APPDATA\supabase-mcp\.env"
216 | ```
217 |
218 | You can find global config file:
219 | - Windows: `%APPDATA%/supabase-mcp/.env`
220 | - macOS/Linux: `~/.config/supabase-mcp/.env`
221 |
222 |
223 | ##### Windsurf
224 | Windsurf supports de facto standard .json format for MCP Servers configuration. You can configure the server in mcp_config.json file:
225 | ```json
226 | {
227 | "mcpServers": {
228 | "supabase": {
229 | "command": "/Users/username/.local/bin/supabase-mcp-server", // update path
230 | "env": {
231 | "SUPABASE_PROJECT_REF": "your-project-ref",
232 | "SUPABASE_DB_PASSWORD": "your-db-password",
233 | "SUPABASE_REGION": "us-east-1", // optional, defaults to us-east-1
234 | "SUPABASE_ACCESS_TOKEN": "your-access-token" // optional, for management API
235 | }
236 | }
237 | }
238 | }
239 | ```
240 | > 💡 **Finding the server path**:
241 | > - macOS/Linux: Run `which supabase-mcp-server`
242 | > - Windows: Run `where supabase-mcp-server`
243 |
244 | #### Configuration Precedence
245 | The server looks for configuration in this order:
246 | 1. Environment variables (highest priority)
247 | 2. Local `.env` file in current directory
248 | 3. Global config file:
249 | - Windows: `%APPDATA%/supabase-mcp/.env`
250 | - macOS/Linux: `~/.config/supabase-mcp/.env`
251 | 4. Default settings (local development)
252 |
253 | ### Step 3. Running MCP Server in Cursor/Windsurf
254 |
255 | In general, any MCP client that supports `stdio` protocol should work with this MCP server (Cline, for example) but I haven't tested it with anything except Cursor/Windsurf.
256 |
257 | #### Cursor
258 | Go to Settings -> Features -> MCP Servers and add a new server with this configuration:
259 | ```bash
260 | # can be set to any name
261 | name: supabase
262 | type: command
263 | # if you installed with pipx
264 | command: supabase-mcp-server
265 | # if you installed with uv
266 | command: uv run supabase-mcp-server
267 | ```
268 |
269 | If configuration is correct, you should see a green dot indicator and the number of tools exposed by the server.
270 | 
271 |
272 | #### Windsurf
273 | Go to Cascade -> Click on the hammer icon -> Configure -> Fill in the configuration:
274 | ```json
275 | {
276 | "mcpServers": {
277 | "supabase": {
278 | "command": "/Users/username/.local/bin/supabase-mcp-server", // update path
279 | "env": {
280 | "SUPABASE_PROJECT_REF": "your-project-ref",
281 | "SUPABASE_DB_PASSWORD": "your-db-password",
282 | "SUPABASE_REGION": "us-east-1", // optional, defaults to us-east-1
283 | "SUPABASE_ACCESS_TOKEN": "your-access-token" // optional, for management API
284 | }
285 | }
286 | }
287 | }
288 | ```
289 | If configuration is correct, you should see green dot indicator and clickable supabase server in the list of available servers.
290 |
291 | 
292 |
293 | ### Troubleshooting
294 |
295 | Here are some tips & tricks that might help you:
296 | - **Debug installation** - run `supabase-mcp-server` directly from the terminal to see if it works. If it doesn't, there might be an issue with the installation.
297 | - **MCP Server configuration** - if the above step works, it means the server is installed and configured correctly. As long as you provided the right command, IDE should be able to connect. Make sure to provide the right path to the server executable.
298 | - **Environment variables** - to connect to the right database, make sure you either set env variables in `mcp_config.json` or in `.env` file placed in a global config directory (`~/.config/supabase-mcp/.env` on macOS/Linux or `%APPDATA%\supabase-mcp\.env` on Windows).
299 | - **Accessing logs** - The MCP server writes detailed logs to a file:
300 | - Log file location:
301 | - macOS/Linux: `~/.local/share/supabase-mcp/mcp_server.log`
302 | - Windows: `%USERPROFILE%\.local\share\supabase-mcp\mcp_server.log`
303 | - Logs include connection status, configuration details, and operation results
304 | - View logs using any text editor or terminal commands:
305 | ```bash
306 | # On macOS/Linux
307 | cat ~/.local/share/supabase-mcp/mcp_server.log
308 |
309 | # On Windows (PowerShell)
310 | Get-Content "$env:USERPROFILE\.local\share\supabase-mcp\mcp_server.log"
311 | ```
312 |
313 | If you are stuck or any of the instructions above are incorrect, please raise an issue.
314 |
315 | ### MCP Inspector
316 | A super useful tool to help debug MCP server issues is MCP Inspector. If you installed from source, you can run `supabase-mcp-inspector` from the project repo and it will run the inspector instance. Coupled with logs this will give you complete overview over what's happening in the server.
317 | > 📝 Running `supabase-mcp-inspector`, if installed from package, doesn't work properly - I will validate and fix in the cominng release.
318 |
319 | ## Feature Overview
320 |
321 | ### Database query tools
322 |
323 | Since v0.3.0 server supports both read-only and data modification operations:
324 |
325 | - **Read operations**: SELECT queries for data retrieval
326 | - **Data Manipulation Language (DML)**: INSERT, UPDATE, DELETE operations for data changes
327 | - **Data Definition Language (DDL)**: CREATE, ALTER, DROP operations for schema changes*
328 |
329 | *Note: DDL operations require:
330 | 1. Read-write mode enabled via `live_dangerously`
331 | 2. Sufficient permissions for the connected database role
332 |
333 | #### Transaction Handling
334 |
335 | The server supports two approaches for executing write operations:
336 |
337 | 1. **Explicit Transaction Control** (Recommended):
338 | ```sql
339 | BEGIN;
340 | CREATE TABLE public.test_table (id SERIAL PRIMARY KEY, name TEXT);
341 | COMMIT;
342 | ```
343 |
344 | 2. **Single Statements**:
345 | ```sql
346 | CREATE TABLE public.test_table (id SERIAL PRIMARY KEY, name TEXT);
347 | ```
348 |
349 | For DDL operations (CREATE/ALTER/DROP), tool description appropriately guides Cursor/Windsurft to use explicit transaction control with BEGIN/COMMIT blocks.
350 |
351 | #### Connection Types
352 |
353 | This MCP server uses::
354 | - **Direct Database Connection**: when connecting to a local Supabase instance
355 | - **Transaction Pooler Connections**: when connecting to a remote Supabase instance
356 |
357 |
358 | When connecting via Supabase's Transaction Pooler, some complex transaction patterns may not work as expected. For schema changes in these environments, use explicit transaction blocks or consider using Supabase migrations or the SQL Editor in the dashboard.
359 |
360 | Available database tools:
361 | - `get_db_schemas` - Lists all database schemas with their sizes and table counts
362 | - `get_tables` - Lists all tables in a schema with their sizes, row counts, and metadata
363 | - `get_table_schema` - Gets detailed table structure including columns, keys, and relationships
364 | - `execute_sql_query` - Executes raw SQL queries with comprehensive support for all PostgreSQL operations:
365 | - Supports all query types (SELECT, INSERT, UPDATE, DELETE, CREATE, ALTER, DROP, etc.)
366 | - Handles transaction control statements (BEGIN, COMMIT, ROLLBACK)
367 |
368 |
369 | - Supported modes:
370 | - `read-only` - only read-only queries are allowed (default mode)
371 | - `read-write` - all SQL operations are allowed when explicitly enabled
372 | - Safety features:
373 | - Starts in read-only mode by default
374 | - Requires explicit mode switch for write operations
375 | - Automatically resets to read-only mode after write operations
376 | - Intelligent transaction state detection to prevent errors
377 | - SQL query validation [TODO]
378 |
379 | ### Management API tools
380 | Since v0.3.0 server supports sending arbitrary requests to Supabase Management API with auto-injection of project ref and safety mode control:
381 | - Includes the following tools:
382 | - `send_management_api_request` to send arbitrary requests to Supabase Management API, with auto-injection of project ref and safety mode control
383 | - `get_management_api_spec` to get the enriched API specification with safety information
384 | - `get_management_api_safety_rules` to get all safety rules including blocked and unsafe operations with human-readable explanations
385 | - `live_dangerously` to switch between safe and unsafe modes
386 | - Safety features:
387 | - Divides API methods into `safe`, `unsafe` and `blocked` categories based on the risk of the operation
388 | - Allows to switch between safe and unsafe modes dynamically
389 | - Blocked operations (delete project, delete database) are not allowed regardless of the mode
390 |
391 | ### Auth Admin tools
392 | I was planning to add support for Python SDK methods to the MCP server. Upon consideration I decided to only add support for Auth admin methods as I often found myself manually creating test users which was prone to errors and time consuming. Now I can just ask Cursor to create a test user and it will be done seamlessly. Check out the full Auth Admin SDK method docs to know what it can do.
393 |
394 | Since v0.3.6 server supports direct access to Supabase Auth Admin methods via Python SDK:
395 | - Includes the following tools:
396 | - `get_auth_admin_methods_spec` to retrieve documentation for all available Auth Admin methods
397 | - `call_auth_admin_method` to directly invoke Auth Admin methods with proper parameter handling
398 | - Supported methods:
399 | - `get_user_by_id`: Retrieve a user by their ID
400 | - `list_users`: List all users with pagination
401 | - `create_user`: Create a new user
402 | - `delete_user`: Delete a user by their ID
403 | - `invite_user_by_email`: Send an invite link to a user's email
404 | - `generate_link`: Generate an email link for various authentication purposes
405 | - `update_user_by_id`: Update user attributes by ID
406 | - `delete_factor`: Delete a factor on a user (currently not implemented in SDK)
407 |
408 | #### Why use Auth Admin SDK instead of raw SQL queries?
409 |
410 | The Auth Admin SDK provides several key advantages over direct SQL manipulation:
411 | - **Functionality**: Enables operations not possible with SQL alone (invites, magic links, MFA)
412 | - **Accuracy**: More reliable then creating and executing raw SQL queries on auth schemas
413 | - **Simplicity**: Offers clear methods with proper validation and error handling
414 |
415 | - Response format:
416 | - All methods return structured Python objects instead of raw dictionaries
417 | - Object attributes can be accessed using dot notation (e.g., `user.id` instead of `user["id"]`)
418 | - Edge cases and limitations:
419 | - UUID validation: Many methods require valid UUID format for user IDs and will return specific validation errors
420 | - Email configuration: Methods like `invite_user_by_email` and `generate_link` require email sending to be configured in your Supabase project
421 | - Link types: When generating links, different link types have different requirements:
422 | - `signup` links don't require the user to exist
423 | - `magiclink` and `recovery` links require the user to already exist in the system
424 | - Error handling: The server provides detailed error messages from the Supabase API, which may differ from the dashboard interface
425 | - Method availability: Some methods like `delete_factor` are exposed in the API but not fully implemented in the SDK
426 |
427 | ## Roadmap
428 |
429 | - 📦 Simplified installation via package manager - ✅ (v0.2.0)
430 | - 🌎 Support for different Supabase regions - ✅ (v0.2.2)
431 | - 🎮 Programmatic access to Supabase management API with safety controls - ✅ (v0.3.0)
432 | - 👷♂️ Read and read-write database SQL queries with safety controls - ✅ (v0.3.0)
433 | - 🔄 Robust transaction handling for both direct and pooled connections - ✅ (v0.3.2)
434 | - 🐍 Support methods and objects available in native Python SDK - ✅ (v0.3.6)
435 | - 🔍 Stronger SQL query validation (read vs write operations)
436 | - 📝 Automatic versioning of DDL queries(?)
437 | - 🪵 Tools / resources to more easily access database, edge functions logs (?)
438 | - 👨💻 Supabase CLI integration (?)
439 |
440 |
441 |
442 | ### Connect to Supabase logs
443 |
444 | I'm planning to research, if it's possible to connect to Supabase db logs which might be useful for debugging (if not already supported.)
445 |
446 |
447 | ---
448 |
449 | Enjoy! ☺️
450 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling", "hatch-vcs"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "supabase-mcp-server"
7 | version = "0.1.0"
8 | # dynamic = ["version"]
9 | description = "Unofficial Supabase MCP server that enables Cursor and Windsurf to manage your database and execute SQL queries"
10 | readme = "README.md"
11 | requires-python = ">=3.13"
12 | dependencies = [
13 | "mcp[cli]>=1.2.1",
14 | "psycopg2>=2.9.10",
15 | "supabase>=2.13.0",
16 | "tenacity>=9.0.0",
17 | ]
18 | authors = [
19 | {name = "Alexander Zuev", email = "azuev@outlook.com"}
20 | ]
21 | keywords = ["supabase", "mcp", "cursor", "windsurf"]
22 | license = "Apache-2.0"
23 | classifiers = [
24 | "Development Status :: 4 - Beta",
25 | "Intended Audience :: Developers",
26 | "License :: OSI Approved :: Apache Software License",
27 | "Programming Language :: Python :: 3.13",
28 | "Topic :: Software Development :: Libraries :: Python Modules",
29 | "Topic :: Database :: Database Engines/Servers",
30 | ]
31 |
32 | [project.urls]
33 | Homepage = "https://github.com/Nopsled/supabase-mcp-server"
34 | Repository = "https://github.com/Nopsled/supabase-mcp-server.git"
35 | Changelog = "https://github.com/Nopsled/supabase-mcp-server/blob/main/CHANGELOG.MD"
36 | Documentation = "https://github.com/Nopsled/supabase-mcp-server#readme"
37 |
38 |
39 |
40 | [tool.hatch.build.targets.wheel]
41 | packages = ["supabase_mcp"]
42 |
43 | [tool.uv]
44 | package = true
45 |
46 | [tool.hatch.version]
47 | source = "vcs"
48 | raw-options = { version_scheme = "no-guess-dev" }
49 |
50 | [tool.hatch.build.hooks.vcs]
51 | version-file = "supabase_mcp/_version.py"
52 |
53 | [project.scripts]
54 | supabase-mcp-server = "supabase_mcp.main:run"
55 | supabase-mcp-inspector = "supabase_mcp.main:inspector"
56 |
57 |
58 | # Configure PyPI publishing
59 | [[tool.uv.index]]
60 | name = "pypi"
61 | url = "https://pypi.org/simple/"
62 | publish-url = "https://upload.pypi.org/legacy/"
63 |
64 | [tool.ruff]
65 | target-version = "py313"
66 | line-length = 120
67 | select = [
68 | "E", # pycodestyle errors
69 | "W", # pycodestyle warnings
70 | "F", # pyflakes
71 | "I", # isort
72 | "B", # flake8-bugbear
73 | "C4", # flake8-comprehensions
74 | "UP", # pyupgrade
75 | ]
76 | ignore = []
77 |
78 | [tool.ruff.format]
79 | quote-style = "double"
80 | indent-style = "space"
81 | skip-magic-trailing-comma = false
82 | line-ending = "auto"
83 |
84 |
85 | [tool.mypy]
86 | python_version = "3.13"
87 | strict = true
88 | ignore_missing_imports = true
89 | disallow_untyped_defs = true
90 | disallow_incomplete_defs = true
91 | check_untyped_defs = true
92 | disallow_untyped_decorators = true
93 | no_implicit_optional = true
94 | warn_redundant_casts = true
95 | warn_unused_ignores = true
96 | warn_return_any = true
97 | warn_unreachable = true
98 |
99 | [tool.pytest]
100 | testpaths = ["tests"]
101 | python_files = ["test_*.py"]
102 | python_classes = ["Test*"]
103 | python_functions = ["test_*"]
104 | addopts = "-v -ra -q"
105 | asyncio_mode = "strict"
106 |
107 | [tool.pytest.ini_options]
108 | asyncio_default_fixture_loop_scope = "function"
109 | markers = [
110 | "unit: marks a test as a unit test",
111 | "integration: marks a test as an integration test that requires database access"
112 | ]
113 |
114 | [dependency-groups]
115 | dev = [
116 | "pytest>=8.3.4",
117 | "pytest-asyncio>=0.25.3",
118 | "pytest-mock>=3.14.0",
119 | ]
120 |
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | required:
9 | - supabaseProjectRef
10 | - supabaseDbPassword
11 | properties:
12 | supabaseProjectRef:
13 | type: string
14 | description: The project reference of Supabase project you want to connect to.
15 | supabaseDbPassword:
16 | type: string
17 | description: The database password of Supabase project you want to connect to.
18 | commandFunction:
19 | # A function that produces the CLI command to start the MCP on stdio.
20 | |-
21 | (config) => ({ command: 'uv', args: ['--directory', '.', 'run', 'supabase_mcp/main.py'], env: { SUPABASE_PROJECT_REF: config.supabaseProjectRef, SUPABASE_DB_PASSWORD: config.supabaseDbPassword } })
22 |
--------------------------------------------------------------------------------
/supabase_mcp/__init__.py:
--------------------------------------------------------------------------------
1 | """Supabase MCP Server package."""
2 |
3 | from supabase_mcp._version import __version__, version, version_tuple
4 |
5 | __all__ = ["__version__", "version", "version_tuple"]
6 |
--------------------------------------------------------------------------------
/supabase_mcp/api_manager/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deploya-labs/mcp-supabase/b6d9f060f3195da280a419f15d2e898a72e6db08/supabase_mcp/api_manager/__init__.py
--------------------------------------------------------------------------------
/supabase_mcp/api_manager/api_manager.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import logging
4 | from json import JSONDecodeError
5 | from typing import Literal
6 |
7 | import httpx
8 | from httpx import HTTPStatusError
9 | from tenacity import (
10 | after_log,
11 | before_log,
12 | retry,
13 | retry_if_exception_type,
14 | stop_after_attempt,
15 | wait_exponential,
16 | )
17 |
18 | from supabase_mcp.api_manager.api_safety_config import SafetyConfig, SafetyLevel
19 | from supabase_mcp.api_manager.api_spec_manager import ApiSpecManager
20 | from supabase_mcp.exceptions import (
21 | APIClientError,
22 | APIConnectionError,
23 | APIError,
24 | APIResponseError,
25 | SafetyError,
26 | UnexpectedError,
27 | )
28 | from supabase_mcp.logger import logger
29 | from supabase_mcp.settings import settings
30 |
31 |
32 | class SupabaseApiManager:
33 | """
34 | Manages the Supabase Management API.
35 | """
36 |
37 | _instance: SupabaseApiManager | None = None
38 |
39 | def __init__(self):
40 | self._mode: Literal[SafetyLevel.SAFE, SafetyLevel.UNSAFE] = SafetyLevel.SAFE # Start in safe mode
41 | self.safety_config = SafetyConfig()
42 | self.spec_manager = None
43 | self.client = self.create_httpx_client()
44 |
45 | @classmethod
46 | async def create(cls) -> SupabaseApiManager:
47 | """Factory method to create and initialize an API manager"""
48 | manager = cls()
49 | manager.spec_manager = await ApiSpecManager.create() # Use the running event loop
50 | return manager
51 |
52 | @classmethod
53 | async def get_manager(cls) -> SupabaseApiManager:
54 | """Get the singleton instance"""
55 | if cls._instance is None:
56 | cls._instance = await cls.create()
57 | return cls._instance
58 |
59 | def create_httpx_client(self) -> httpx.AsyncClient:
60 | """Creates a new httpx client"""
61 | client = httpx.AsyncClient(
62 | base_url="https://api.supabase.com",
63 | headers={"Authorization": f"Bearer {settings.supabase_access_token}", "Content-Type": "application/json"},
64 | )
65 | logger.info("Initialized Supabase Management API client")
66 | return client
67 |
68 | @property
69 | def mode(self) -> SafetyLevel:
70 | """Current operation mode"""
71 | return self._mode
72 |
73 | def switch_mode(self, mode: Literal[SafetyLevel.SAFE, SafetyLevel.UNSAFE]) -> None:
74 | """Switch between safe and unsafe operation modes"""
75 | self._mode = mode
76 | logger.info(f"Switched to {self._mode.value} mode")
77 |
78 | def get_spec(self) -> dict:
79 | """Retrieves enriched spec from spec manager"""
80 | return self.spec_manager.get_spec()
81 |
82 | def get_safety_rules(self) -> str:
83 | """
84 | Get safety rules with human-readable descriptions.
85 |
86 | Returns:
87 | str: Human readable safety rules explanation
88 | """
89 | blocked_ops = self.safety_config.BLOCKED_OPERATIONS
90 | unsafe_ops = self.safety_config.UNSAFE_OPERATIONS
91 |
92 | # Create human-readable explanations
93 | blocked_summary = "\n".join([f"- {method} {path}" for method, paths in blocked_ops.items() for path in paths])
94 |
95 | unsafe_summary = "\n".join([f"- {method} {path}" for method, paths in unsafe_ops.items() for path in paths])
96 |
97 | return f"""MCP Server Safety Rules:
98 |
99 | BLOCKED Operations (never allowed by the server):
100 | {blocked_summary}
101 |
102 | UNSAFE Operations (require unsafe mode):
103 | {unsafe_summary}
104 |
105 | Current mode: {self.mode}
106 | In safe mode, only read operations are allowed.
107 | Use live_dangerously() to enable unsafe mode for write operations.
108 | """
109 |
110 | @retry(
111 | retry=retry_if_exception_type(APIConnectionError),
112 | stop=stop_after_attempt(3),
113 | wait=wait_exponential(multiplier=1, min=4, max=15),
114 | before=before_log(logger, logging.DEBUG),
115 | after=after_log(logger, logging.DEBUG),
116 | )
117 | async def execute_request(
118 | self,
119 | method: str,
120 | path: str,
121 | request_params: dict | None = None,
122 | request_body: dict | None = None,
123 | ) -> dict:
124 | """
125 | Execute Management API request with safety validation.
126 |
127 | Args:
128 | method: HTTP method (GET, POST, etc)
129 | path: API path (/v1/projects etc)
130 | request_params: Optional query parameters as dict
131 | request_body: Optional request body as dict
132 |
133 | Returns:
134 | API response as dict
135 |
136 | Raises:
137 | SafetyError: If operation not allowed
138 | APIError: If request fails
139 | """
140 | # Replace project ref
141 | if "{ref}" in path:
142 | path = path.replace("{ref}", settings.supabase_project_ref)
143 |
144 | # Safety check
145 | allowed, reason, level = self.safety_config.is_operation_allowed(method, path)
146 |
147 | if level == SafetyLevel.BLOCKED:
148 | logger.warning(f"Blocked operation attempted: {method} {path}")
149 | raise SafetyError(
150 | f"Operation blocked: {reason}, check all safety rules here: {self.safety_config.list_all_rules()}"
151 | )
152 |
153 | if level == SafetyLevel.UNSAFE and self.mode == SafetyLevel.SAFE:
154 | logger.warning(f"Unsafe operation attempted in safe mode: {method} {path}")
155 | raise SafetyError(
156 | f"Operation requires YOLO mode: {reason}. Use live_dangerously() to enable YOLO mode. Check all safety rules here: {self.safety_config.list_all_rules()}"
157 | )
158 |
159 | # Execute request
160 | logger.info(
161 | "Executing API request: method=%s, url=%s, params=%s, request_body=%s",
162 | method,
163 | path,
164 | request_params,
165 | request_body,
166 | )
167 | try:
168 | # Build and send request
169 | request = self.client.build_request(method=method, url=path, params=request_params, json=request_body)
170 | response = await self.client.send(request)
171 |
172 | # Try to parse error response body if available
173 | error_body = None
174 | try:
175 | error_body = response.json() if response.content else None
176 | except JSONDecodeError:
177 | error_body = {"raw_content": response.text} if response.text else None
178 |
179 | # Handle API errors (4xx, 5xx)
180 | try:
181 | response.raise_for_status()
182 | except HTTPStatusError as e:
183 | error_message = f"API request failed: {e.response.status_code}"
184 | if error_body and isinstance(error_body, dict):
185 | error_message = error_body.get("message", error_message)
186 |
187 | if 400 <= e.response.status_code < 500:
188 | raise APIClientError(
189 | message=error_message,
190 | status_code=e.response.status_code,
191 | response_body=error_body,
192 | ) from e
193 | # Parse successful response
194 | try:
195 | return response.json()
196 | except JSONDecodeError as e:
197 | raise APIResponseError(
198 | message=f"Failed to parse API response as JSON: {str(e)}",
199 | status_code=response.status_code,
200 | response_body={"raw_content": response.text},
201 | ) from e
202 |
203 | except (httpx.ConnectError, httpx.TimeoutException, httpx.NetworkError) as e:
204 | raise APIConnectionError(message=f"Connection error: {str(e)}") from e
205 | except Exception as e:
206 | if isinstance(e, (APIError, SafetyError)):
207 | raise
208 | logger.exception("Unexpected error during API request")
209 | raise UnexpectedError(message=f"Unexpected error during API request: {str(e)}") from e
210 |
211 | async def close(self):
212 | """Close HTTP client"""
213 | await self.client.aclose()
214 |
--------------------------------------------------------------------------------
/supabase_mcp/api_manager/api_safety_config.py:
--------------------------------------------------------------------------------
1 | # supabase_mcp/api_manager/config.py
2 | from enum import Enum
3 |
4 |
5 | class SafetyLevel(Enum):
6 | SAFE = "safe"
7 | UNSAFE = "unsafe"
8 | BLOCKED = "blocked"
9 |
10 |
11 | class SafetyConfig:
12 | """Configuration for Supabase Management API safety checks"""
13 |
14 | # Permanently blocked operations - never allowed
15 | BLOCKED_OPERATIONS = {
16 | "DELETE": [
17 | "/v1/projects/{ref}", # Delete project
18 | "/v1/organizations/{slug}", # Delete organization
19 | "/v1/projects/{ref}/database", # Delete database
20 | ]
21 | }
22 |
23 | # Unsafe operations - require YOLO mode
24 | UNSAFE_OPERATIONS = {
25 | "POST": [
26 | "/v1/projects", # Create project
27 | "/v1/organizations", # Create org
28 | "/v1/projects/{ref}/restore", # Restore project
29 | "/v1/projects/{ref}/pause", # Pause project - can impact production
30 | ],
31 | "PATCH": [
32 | "/v1/projects/{ref}/config/auth", # Auth config
33 | "/v1/projects/{ref}/config/database", # DB config
34 | "/v1/projects/{ref}/config/pooler", # Connection pooling changes - can impact DB performance
35 | ],
36 | "PUT": [
37 | "/v1/projects/{ref}/config/secrets", # Update secrets
38 | "/v1/projects/{ref}/config/database/postgres", # Postgres config changes - critical DB settings
39 | ],
40 | }
41 |
42 | def list_all_rules(self) -> str:
43 | """List all safety rules"""
44 | return f"Blocked operations: {self.BLOCKED_OPERATIONS}\nUnsafe operations: {self.UNSAFE_OPERATIONS}"
45 |
46 | def is_operation_allowed(self, method: str, path: str) -> tuple[bool, str, SafetyLevel]:
47 | """Determine operation safety level and status"""
48 | # Check blocked first
49 | if self._is_blocked(method, path):
50 | return False, "Operation is blocked for safety", SafetyLevel.BLOCKED
51 |
52 | # Check if unsafe
53 | if self._is_unsafe(method, path):
54 | return True, "Operation requires YOLO mode", SafetyLevel.UNSAFE
55 |
56 | # Default to safe
57 | return True, "Operation allowed", SafetyLevel.SAFE
58 |
59 | def _is_blocked(self, method: str, path: str) -> bool:
60 | return self._path_matches_patterns(method, path, self.BLOCKED_OPERATIONS)
61 |
62 | def _is_unsafe(self, method: str, path: str) -> bool:
63 | return self._path_matches_patterns(method, path, self.UNSAFE_OPERATIONS)
64 |
65 | def _path_matches_patterns(self, method: str, path: str, patterns: dict) -> bool:
66 | """Check if path matches any pattern"""
67 | if method not in patterns:
68 | return False
69 |
70 | for pattern in patterns[method]:
71 | if self._path_matches(pattern, path):
72 | return True
73 | return False
74 |
75 | def _path_matches(self, pattern: str, path: str) -> bool:
76 | """Check if path matches pattern with parameters"""
77 | pattern_parts = pattern.split("/")
78 | path_parts = path.split("/")
79 |
80 | if len(pattern_parts) != len(path_parts):
81 | return False
82 |
83 | return all(
84 | p1 == p2 or (p1.startswith("{") and p1.endswith("}"))
85 | for p1, p2 in zip(pattern_parts, path_parts, strict=False)
86 | )
87 |
--------------------------------------------------------------------------------
/supabase_mcp/api_manager/api_spec_manager.py:
--------------------------------------------------------------------------------
1 | import json
2 | from dataclasses import dataclass
3 | from pathlib import Path
4 |
5 | import httpx
6 |
7 | from supabase_mcp.api_manager.api_safety_config import SafetyConfig
8 | from supabase_mcp.logger import logger
9 |
10 | # Constants
11 | SPEC_URL = "https://api.supabase.com/api/v1-json"
12 | LOCAL_SPEC_PATH = Path(__file__).parent / "specs" / "api_spec.json"
13 |
14 |
15 | @dataclass
16 | class ValidationResult:
17 | """Result of request validation against OpenAPI spec"""
18 |
19 | is_valid: bool
20 | error: str | None = None
21 | operation_id: str | None = None
22 | operation_info: dict | None = None
23 |
24 |
25 | class ApiSpecManager:
26 | """
27 | Manages the OpenAPI specification for the Supabase Management API.
28 | Handles spec loading, caching, and validation.
29 | """
30 |
31 | def __init__(self):
32 | self.safety_config = SafetyConfig()
33 | self.spec: dict | None = None
34 |
35 | @classmethod
36 | async def create(cls) -> "ApiSpecManager":
37 | """Async factory method to create and initialize a ApiSpecManager"""
38 | manager = cls()
39 | await manager.on_startup()
40 | return manager
41 |
42 | async def on_startup(self) -> None:
43 | """Load and enrich spec on startup"""
44 | # Try to fetch latest spec
45 | raw_spec = await self._fetch_remote_spec()
46 |
47 | if not raw_spec:
48 | # If remote fetch fails, use our fallback spec
49 | logger.info("Using fallback API spec")
50 | raw_spec = self._load_local_spec()
51 |
52 | self.spec = raw_spec
53 |
54 | async def _fetch_remote_spec(self) -> dict | None:
55 | """
56 | Fetch latest OpenAPI spec from Supabase API.
57 | Returns None if fetch fails.
58 | """
59 | try:
60 | async with httpx.AsyncClient() as client:
61 | response = await client.get(SPEC_URL)
62 | if response.status_code == 200:
63 | return response.json()
64 | logger.warning(f"Failed to fetch API spec: {response.status_code}")
65 | return None
66 | except Exception as e:
67 | logger.warning(f"Error fetching API spec: {e}")
68 | return None
69 |
70 | def _load_local_spec(self) -> dict:
71 | """
72 | Load OpenAPI spec from local file.
73 | This is our fallback spec shipped with the server.
74 | """
75 | try:
76 | with open(LOCAL_SPEC_PATH) as f:
77 | return json.load(f)
78 | except FileNotFoundError:
79 | logger.error(f"Local spec not found at {LOCAL_SPEC_PATH}")
80 | raise
81 | except json.JSONDecodeError as e:
82 | logger.error(f"Invalid JSON in local spec: {e}")
83 | raise
84 |
85 | def get_spec(self) -> dict:
86 | """Retrieve the enriched spec."""
87 | if self.spec is None:
88 | logger.error("OpenAPI spec not loaded by spec manager")
89 | raise ValueError("OpenAPI spec not loaded")
90 | return self.spec
91 |
--------------------------------------------------------------------------------
/supabase_mcp/db_client/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deploya-labs/mcp-supabase/b6d9f060f3195da280a419f15d2e898a72e6db08/supabase_mcp/db_client/__init__.py
--------------------------------------------------------------------------------
/supabase_mcp/db_client/db_client.py:
--------------------------------------------------------------------------------
1 | import urllib.parse
2 | from dataclasses import dataclass
3 | from typing import Any, Literal
4 |
5 | import psycopg2
6 | from psycopg2 import errors as psycopg2_errors
7 | from psycopg2.extras import RealDictCursor
8 | from psycopg2.pool import SimpleConnectionPool
9 | from tenacity import retry, stop_after_attempt, wait_exponential
10 |
11 | from supabase_mcp.db_client.db_safety_config import DbSafetyLevel
12 | from supabase_mcp.exceptions import ConnectionError, PermissionError, QueryError
13 | from supabase_mcp.logger import logger
14 | from supabase_mcp.settings import Settings, settings
15 | from supabase_mcp.validators import validate_transaction_control
16 |
17 |
18 | @dataclass
19 | class QueryResult:
20 | """Represents a query result with metadata."""
21 |
22 | rows: list[dict[str, Any]]
23 | count: int
24 | status: str
25 |
26 |
27 | class SupabaseClient:
28 | """Connects to Supabase PostgreSQL database directly."""
29 |
30 | _instance = None # Singleton instance
31 |
32 | def __init__(
33 | self,
34 | project_ref: str | None = None,
35 | db_password: str | None = None,
36 | settings_instance: Settings | None = None,
37 | _mode: Literal[DbSafetyLevel.RO, DbSafetyLevel.RW] = DbSafetyLevel.RW, # Start in RW mode
38 | ):
39 | """Initialize the PostgreSQL connection pool.
40 |
41 | Args:
42 | project_ref: Optional Supabase project reference. If not provided, will be taken from settings.
43 | db_password: Optional database password. If not provided, will be taken from settings.
44 | settings_instance: Optional Settings instance. If not provided, will use global settings.
45 | """
46 | self._pool = None
47 | self._settings = settings_instance or settings
48 | self.project_ref = project_ref or self._settings.supabase_project_ref
49 | self.db_password = db_password or self._settings.supabase_db_password
50 | self.db_url = self._get_db_url_from_supabase()
51 | self._mode = _mode
52 |
53 | def _get_db_url_from_supabase(self) -> str:
54 | """Create PostgreSQL connection string from settings."""
55 | encoded_password = urllib.parse.quote_plus(self.db_password)
56 |
57 | if self.project_ref.startswith("127.0.0.1"):
58 | # Local development
59 | return f"postgresql://postgres:{encoded_password}@{self.project_ref}/postgres"
60 |
61 | # Production Supabase
62 | return (
63 | f"postgresql://postgres.{self.project_ref}:{encoded_password}"
64 | f"@aws-0-{self._settings.supabase_region}.pooler.supabase.com:6543/postgres"
65 | )
66 |
67 | @retry(
68 | stop=stop_after_attempt(3),
69 | wait=wait_exponential(multiplier=1, min=4, max=15),
70 | )
71 | def _get_pool(self):
72 | """Get or create PostgreSQL connection pool with better error handling."""
73 | if self._pool is None:
74 | try:
75 | logger.debug(f"Creating connection pool for: {self.db_url.split('@')[1]}")
76 | self._pool = SimpleConnectionPool(
77 | minconn=1,
78 | maxconn=10,
79 | cursor_factory=RealDictCursor,
80 | dsn=self.db_url,
81 | )
82 | # Test the connection
83 | with self._pool.getconn() as conn:
84 | self._pool.putconn(conn)
85 | logger.info("✓ Created PostgreSQL connection pool")
86 | except psycopg2.OperationalError as e:
87 | logger.error(f"Failed to connect to database: {e}")
88 | raise ConnectionError(f"Could not connect to database: {e}") from e
89 | except Exception as e:
90 | logger.exception("Unexpected error creating connection pool")
91 | raise ConnectionError(f"Unexpected connection error: {e}") from e
92 | return self._pool
93 |
94 | @classmethod
95 | def create(
96 | cls,
97 | project_ref: str | None = None,
98 | db_password: str | None = None,
99 | settings_instance: Settings | None = None,
100 | ) -> "SupabaseClient":
101 | """Create and return a configured SupabaseClient instance.
102 |
103 | Args:
104 | project_ref: Optional Supabase project reference
105 | db_password: Optional database password
106 | settings_instance: Optional Settings instance
107 | """
108 | if cls._instance is None:
109 | cls._instance = cls(
110 | project_ref=project_ref,
111 | db_password=db_password,
112 | settings_instance=settings_instance,
113 | )
114 | return cls._instance
115 |
116 | @classmethod
117 | def reset(cls):
118 | """Reset the singleton instance cleanly"""
119 | if hasattr(cls, "_instance") and cls._instance is not None:
120 | # Close any connections if needed
121 | if hasattr(cls._instance, "close"):
122 | cls._instance.close()
123 | # Reset to None
124 | cls._instance = None
125 |
126 | def close(self):
127 | """Explicitly close the connection pool."""
128 | if self._pool is not None:
129 | try:
130 | self._pool.closeall()
131 | self._pool = None
132 | logger.info("Closed PostgreSQL connection pool")
133 | except Exception as e:
134 | logger.error(f"Error closing connection pool: {e}")
135 |
136 | @property
137 | def mode(self) -> DbSafetyLevel:
138 | """Current operation mode"""
139 | return self._mode
140 |
141 | def switch_mode(self, mode: Literal[DbSafetyLevel.RO, DbSafetyLevel.RW]) -> None:
142 | """Switch the database connection mode."""
143 | self._mode = mode
144 | logger.info(f"Switched to {self.mode.value} mode")
145 |
146 | def execute_query(self, query: str, params: tuple = None) -> QueryResult:
147 | """Execute a SQL query and return structured results.
148 |
149 | Args:
150 | query: SQL query to execute
151 | params: Optional query parameters to prevent SQL injection
152 |
153 | Returns:
154 | QueryResult containing rows and metadata
155 |
156 | Raises:
157 | ConnectionError: When database connection fails
158 | QueryError: When query execution fails (schema or general errors)
159 | PermissionError: When user lacks required privileges
160 | """
161 | if self._pool is None:
162 | self._pool = self._get_pool()
163 |
164 | pool = self._get_pool()
165 | conn = pool.getconn()
166 | try:
167 | # Check if we are in transaction mode
168 | in_transaction = conn.status == psycopg2.extensions.STATUS_IN_TRANSACTION
169 | logger.debug(f"Connection state before query: {conn.status}")
170 |
171 | has_transaction_control = validate_transaction_control(query)
172 | logger.debug(f"Has transaction control: {has_transaction_control}")
173 |
174 | # Define readonly once at the top so it's available throughout the function
175 | readonly = self.mode == DbSafetyLevel.RO
176 |
177 | # Set session only if not in transaction mode
178 | if not in_transaction:
179 | conn.set_session(readonly=readonly)
180 |
181 | with conn.cursor() as cur:
182 | try:
183 | cur.execute(query, params)
184 |
185 | # Fetch results if available
186 | rows = []
187 | if cur.description: # If query returns data
188 | rows = cur.fetchall() or []
189 |
190 | # Only auto-commit if not in write mode AND query doesn't contain
191 | if not readonly and not has_transaction_control:
192 | conn.commit()
193 |
194 | status = cur.statusmessage
195 | logger.debug(f"Query status: {status}")
196 | return QueryResult(rows=rows, count=len(rows), status=status)
197 |
198 | except psycopg2_errors.InsufficientPrivilege as e:
199 | logger.error(f"Permission denied: {e}")
200 | raise PermissionError(
201 | f"Access denied: {str(e)}. Use live_dangerously('database', True) for write operations."
202 | ) from e
203 | except (
204 | psycopg2_errors.UndefinedTable,
205 | psycopg2_errors.UndefinedColumn,
206 | ) as e:
207 | logger.error(f"Schema error: {e}")
208 | raise QueryError(str(e)) from e
209 | except psycopg2.Error as e:
210 | if not readonly:
211 | try:
212 | conn.rollback()
213 | logger.debug("Transaction rolled back due to error")
214 | except Exception as rollback_error:
215 | logger.warning(f"Failed to rollback transaction: {rollback_error}")
216 | logger.error(f"Database error: {e.pgerror}")
217 | raise QueryError(f"Query failed: {str(e)}") from e
218 | finally:
219 | if pool and conn:
220 | pool.putconn(conn)
221 |
--------------------------------------------------------------------------------
/supabase_mcp/db_client/db_safety_config.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class DbSafetyLevel(str, Enum):
5 | RO = "ro"
6 | RW = "rw"
7 |
--------------------------------------------------------------------------------
/supabase_mcp/exceptions.py:
--------------------------------------------------------------------------------
1 | class DatabaseError(Exception):
2 | """Base class for database-related errors."""
3 |
4 | pass
5 |
6 |
7 | class ConnectionError(DatabaseError):
8 | """Raised when connection to database fails."""
9 |
10 | pass
11 |
12 |
13 | class PermissionError(DatabaseError):
14 | """Raised when user lacks required privileges."""
15 |
16 | pass
17 |
18 |
19 | class QueryError(DatabaseError):
20 | """Raised when query execution fails."""
21 |
22 | pass
23 |
24 |
25 | class TimeoutError(DatabaseError):
26 | """Raised when a query execution exceeds the specified timeout."""
27 |
28 | pass
29 |
30 |
31 | class ValidationError(Exception):
32 | """Raised when input validation fails."""
33 |
34 | pass
35 |
36 |
37 | class SafetyError(Exception):
38 | """Operation not allowed due to safety rules"""
39 |
40 | pass
41 |
42 |
43 | class APIError(Exception):
44 | """Base class for API-related errors"""
45 |
46 | def __init__(
47 | self,
48 | message: str,
49 | status_code: int | None = None,
50 | response_body: dict | None = None,
51 | ):
52 | self.status_code = status_code
53 | self.response_body = response_body
54 | super().__init__(message)
55 |
56 |
57 | class APIConnectionError(APIError):
58 | """Failed to connect to API"""
59 |
60 | pass
61 |
62 |
63 | class PythonSDKError(Exception):
64 | """Failed to create Python SDK client or call Python SDK method"""
65 |
66 | pass
67 |
68 |
69 | class APIResponseError(APIError):
70 | """Failed to process API response"""
71 |
72 | pass
73 |
74 |
75 | class APIClientError(APIError):
76 | """Client-side error (4xx)"""
77 |
78 | pass
79 |
80 |
81 | class APIServerError(APIError):
82 | """Server-side error (5xx)"""
83 |
84 | pass
85 |
86 |
87 | class UnexpectedError(APIError):
88 | """Unexpected error during API operation"""
89 |
90 | pass
91 |
--------------------------------------------------------------------------------
/supabase_mcp/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from pathlib import Path
3 |
4 |
5 | def setup_logger():
6 | """Configure logging for the MCP server."""
7 | logger = logging.getLogger("supabase-mcp")
8 |
9 | # Remove existing handlers to avoid duplicate logs
10 | if logger.hasHandlers():
11 | logger.handlers.clear()
12 |
13 | # Define a consistent log directory in the user's home folder
14 | log_dir = Path.home() / ".local" / "share" / "supabase-mcp"
15 | log_dir.mkdir(parents=True, exist_ok=True) # Ensure the directory exists
16 |
17 | # Define the log file path
18 | log_file = log_dir / "mcp_server.log"
19 |
20 | # Create a file handler (only logs to file, no stdout)
21 | file_handler = logging.FileHandler(log_file)
22 |
23 | # Create formatter
24 | formatter = logging.Formatter("[%(asctime)s] %(levelname)-8s %(message)s", datefmt="%y/%m/%d %H:%M:%S")
25 |
26 | # Add formatter to file handler
27 | file_handler.setFormatter(formatter)
28 |
29 | # Add handler to logger
30 | logger.addHandler(file_handler)
31 |
32 | # Set level
33 | logger.setLevel(logging.INFO)
34 |
35 | return logger
36 |
37 |
38 | logger = setup_logger()
39 |
--------------------------------------------------------------------------------
/supabase_mcp/main.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Literal
3 |
4 | from mcp.server.fastmcp import FastMCP
5 |
6 | from supabase_mcp.api_manager.api_manager import SupabaseApiManager
7 | from supabase_mcp.api_manager.api_safety_config import SafetyLevel
8 | from supabase_mcp.db_client.db_client import SupabaseClient
9 | from supabase_mcp.db_client.db_safety_config import DbSafetyLevel
10 | from supabase_mcp.logger import logger
11 | from supabase_mcp.queries import PreBuiltQueries
12 | from supabase_mcp.sdk_client.python_client import SupabaseSDKClient
13 | from supabase_mcp.settings import settings
14 | from supabase_mcp.validators import (
15 | validate_schema_name,
16 | validate_sql_query,
17 | validate_table_name,
18 | )
19 |
20 | try:
21 | mcp = FastMCP("supabase")
22 | supabase = SupabaseClient.create()
23 | except Exception as e:
24 | logger.error(f"Failed to create Supabase client: {e}")
25 | raise e
26 |
27 |
28 | @mcp.tool(description="List all database schemas with their sizes and table counts.")
29 | async def get_db_schemas():
30 | """Get all accessible database schemas with their total sizes and number of tables."""
31 | query = PreBuiltQueries.get_schemas_query()
32 | result = supabase.execute_query(query)
33 | return result
34 |
35 |
36 | @mcp.tool(
37 | description="List all tables, foreign tables, and views in a schema with their sizes, row counts, and metadata."
38 | )
39 | async def get_tables(schema_name: str):
40 | """Get all tables, foreign tables, and views from a schema with size, row count, column count, and index information."""
41 | schema_name = validate_schema_name(schema_name)
42 | query = PreBuiltQueries.get_tables_in_schema_query(schema_name)
43 | return supabase.execute_query(query)
44 |
45 |
46 | @mcp.tool(description="Get detailed table structure including columns, keys, and relationships.")
47 | async def get_table_schema(schema_name: str, table: str):
48 | """Get table schema including column definitions, primary keys, and foreign key relationships."""
49 | schema_name = validate_schema_name(schema_name)
50 | table = validate_table_name(table)
51 | query = PreBuiltQueries.get_table_schema_query(schema_name, table)
52 | return supabase.execute_query(query)
53 |
54 |
55 | @mcp.tool(
56 | description="""
57 | Query the database with a raw SQL query.
58 |
59 | IMPORTANT USAGE GUIDELINES:
60 | 1. For READ operations (SELECT):
61 | - Use simple SELECT statements
62 | - Example: SELECT * FROM public.users LIMIT 10;
63 |
64 | 2. For WRITE operations (INSERT/UPDATE/DELETE/CREATE/ALTER/DROP):
65 | - ALWAYS wrap in explicit BEGIN/COMMIT blocks
66 | - Example:
67 | BEGIN;
68 | CREATE TABLE public.test_table (id SERIAL PRIMARY KEY, name TEXT);
69 | COMMIT;
70 |
71 | 3. NEVER mix READ and WRITE operations in the same query
72 | 4. NEVER use single DDL statements without transaction control
73 | 5. Remember to enable unsafe mode first with live_dangerously('database', True)
74 | 6. For auth operations (primarily creating, updating, deleting users, generating links, etc), prefer using the Auth Admin SDK methods
75 | instead of direct SQL manipulation to ensure correctness and prevent security issues
76 |
77 | TRANSACTION HANDLING:
78 | - The server detects BEGIN/COMMIT/ROLLBACK keywords to respect your transaction control
79 | - When you use these keywords, the server will not interfere with your transactions
80 | - For queries without transaction control, the server will auto-commit in write mode
81 |
82 | Failure to follow these guidelines will result in errors.
83 | """
84 | )
85 | async def execute_sql_query(query: str):
86 | """Execute an SQL query with validation."""
87 | query = validate_sql_query(query)
88 | return supabase.execute_query(query)
89 |
90 |
91 | # Core Tools
92 | @mcp.tool(
93 | description="""
94 | Execute a Supabase Management API request. Use paths exactly as defined in the API spec -
95 | the {ref} parameter will be automatically injected from settings.
96 |
97 | Parameters:
98 | - method: HTTP method (GET, POST, PUT, PATCH, DELETE)
99 | - path: API path (e.g. /v1/projects/{ref}/functions)
100 | - request_params: Query parameters as dict (e.g. {"key": "value"}) - use empty dict {} if not needed
101 | - request_body: Request body as dict (e.g. {"name": "test"}) - use empty dict {} if not needed
102 |
103 | Examples:
104 | 1. GET request with params:
105 | method: "GET"
106 | path: "/v1/projects/{ref}/functions"
107 | request_params: {"name": "test"}
108 | request_body: {}
109 |
110 | 2. POST request with body:
111 | method: "POST"
112 | path: "/v1/projects/{ref}/functions"
113 | request_params: {}
114 | request_body: {"name": "test-function", "slug": "test-function"}
115 | """
116 | )
117 | async def send_management_api_request(
118 | method: str,
119 | path: str, # URL path
120 | request_params: dict, # Query parameters as dict
121 | request_body: dict, # Request body as dict
122 | ) -> dict:
123 | """
124 | Execute a Management API request.
125 |
126 | Args:
127 | method: HTTP method (GET, POST, etc)
128 | path: API path exactly as in spec, {ref} will be auto-injected
129 | request_params: Query parameters as dict if needed (e.g. {"key": "value"})
130 | request_body: Request body as dict for POST/PUT/PATCH (e.g. {"name": "test"})
131 |
132 | Example:
133 | To get a function details, use:
134 | path="/v1/projects/{ref}/functions/{function_slug}"
135 | The {ref} will be auto-injected, only function_slug needs to be provided
136 | """
137 | api_manager = await SupabaseApiManager.get_manager()
138 | return await api_manager.execute_request(method, path, request_params, request_body)
139 |
140 |
141 | @mcp.tool(
142 | description="""
143 | Toggle unsafe mode for either Management API or Database operations.
144 | In safe mode (default):
145 | - API: only read operations allowed
146 | - Database: only SELECT queries allowed
147 | In unsafe mode:
148 | - API: state-changing operations permitted (except blocked ones)
149 | - Database: all SQL operations permitted
150 | """
151 | )
152 | async def live_dangerously(service: Literal["api", "database"], enable: bool = False) -> dict:
153 | """
154 | Toggle between safe and unsafe operation modes for a specific service.
155 |
156 | Args:
157 | service: Which service to toggle ("api" or "database")
158 | enable: True to enable unsafe mode, False for safe mode
159 |
160 | Returns:
161 | dict: Current mode status for the specified service
162 | """
163 | if service == "api":
164 | api_manager = await SupabaseApiManager.get_manager()
165 | api_manager.switch_mode(SafetyLevel.UNSAFE if enable else SafetyLevel.SAFE)
166 | return {"service": "api", "mode": api_manager.mode}
167 | else: # database
168 | supabase.switch_mode(DbSafetyLevel.RW if enable else DbSafetyLevel.RO)
169 | return {"service": "database", "mode": supabase.mode}
170 |
171 |
172 | @mcp.tool(
173 | description="""
174 | Get the latests complete Management API specification.
175 | Use this to understand available operations and their requirements.
176 | """
177 | )
178 | async def get_management_api_spec() -> dict:
179 | """
180 | Get enriched API specification with safety information.
181 |
182 | Returns:
183 | dict: OpenAPI spec with added safety metadata per operation
184 | """
185 | api_manager = await SupabaseApiManager.get_manager()
186 | return api_manager.get_spec()
187 |
188 |
189 | @mcp.tool(description="Get all safety rules for the Supabase Management API")
190 | async def get_management_api_safety_rules() -> dict:
191 | """Returns all safety rules including blocked and unsafe operations with human-readable explanations"""
192 | api_manager = await SupabaseApiManager.get_manager()
193 | return api_manager.get_safety_rules()
194 |
195 |
196 | @mcp.tool(
197 | description="""
198 | Get Python SDK methods specification for Auth Admin. Returns a python dictionary of all Auth Python SDK methods.
199 | Use this to understand the available methods and their required parameters.
200 | """
201 | )
202 | async def get_auth_admin_methods_spec() -> dict:
203 | """Returns the Python SDK spec"""
204 | sdk_client = await SupabaseSDKClient.get_instance()
205 | return sdk_client.return_python_sdk_spec()
206 |
207 |
208 | @mcp.tool(
209 | description="""
210 | Call an Auth Admin method from Supabase Python SDK. Returns the result of the method call.
211 |
212 | Available methods:
213 | - get_user_by_id: Retrieve a user by their ID
214 | - list_users: List all users with pagination
215 | - create_user: Create a new user
216 | - delete_user: Delete a user by their ID
217 | - invite_user_by_email: Send an invite link to a user's email
218 | - generate_link: Generate an email link for various authentication purposes
219 | - update_user_by_id: Update user attributes by ID
220 | - delete_factor: Delete a factor on a user
221 |
222 | Each method requires specific parameters. For nested parameters, follow the structure exactly:
223 |
224 | Examples:
225 | 1. Get user by ID:
226 | method: "get_user_by_id"
227 | params: {"uid": "user-uuid-here"}
228 |
229 | 2. Create user:
230 | method: "create_user"
231 | params: {
232 | "attributes": {
233 | "email": "user@example.com",
234 | "password": "secure-password",
235 | "email_confirm": true,
236 | "user_metadata": {"name": "John Doe"}
237 | }
238 | }
239 |
240 | 3. Generate link:
241 | method: "generate_link"
242 | params: {
243 | "params": {
244 | "type": "signup",
245 | "email": "user@example.com",
246 | "password": "secure-password",
247 | "options": {
248 | "data": {"name": "John Doe"},
249 | "redirect_to": "https://example.com/welcome"
250 | }
251 | }
252 | }
253 |
254 | Use get_auth_admin_methods_spec() to see full documentation for all methods.
255 | """
256 | )
257 | async def call_auth_admin_method(method: str, params: dict) -> dict:
258 | """Calls a method of the Python SDK client"""
259 | sdk_client = await SupabaseSDKClient.get_instance()
260 | return await sdk_client.call_auth_admin_method(method, params)
261 |
262 |
263 | def run():
264 | """Run the Supabase MCP server."""
265 | if settings.supabase_project_ref.startswith("127.0.0.1"):
266 | logger.info(
267 | "Starting Supabase MCP server to connect to local project: %s",
268 | settings.supabase_project_ref,
269 | )
270 | else:
271 | logger.info(
272 | "Starting Supabase MCP server to connect to project ref: %s (region: %s)",
273 | settings.supabase_project_ref,
274 | settings.supabase_region,
275 | )
276 | if settings.supabase_access_token:
277 | logger.info("Personal access token detected - using for Management API")
278 | if settings.supabase_service_role_key:
279 | logger.info("Service role key detected - using for Python SDK")
280 | mcp.run()
281 |
282 |
283 | if __name__ == "__main__":
284 | run()
285 |
286 |
287 | def inspector():
288 | """Inspector mode - same as mcp dev"""
289 | logger.info("Starting Supabase MCP server inspector")
290 |
291 | import importlib.util
292 |
293 | from mcp.cli.cli import dev # Import from correct module
294 |
295 | # Get the package location
296 | spec = importlib.util.find_spec("supabase_mcp")
297 | if spec and spec.origin:
298 | package_dir = str(Path(spec.origin).parent)
299 | file_spec = str(Path(package_dir) / "main.py")
300 | logger.info(f"Using file spec: {file_spec}")
301 | return dev(file_spec=file_spec)
302 | else:
303 | raise ImportError("Could not find supabase_mcp package")
304 |
--------------------------------------------------------------------------------
/supabase_mcp/queries.py:
--------------------------------------------------------------------------------
1 | class PreBuiltQueries:
2 | @staticmethod
3 | def get_schemas_query() -> str:
4 | """Returns SQL query to get all accessible schemas"""
5 | return """
6 | SELECT
7 | s.schema_name,
8 | COALESCE(pg_size_pretty(sum(COALESCE(
9 | CASE WHEN t.table_type = 'regular'
10 | THEN pg_total_relation_size(
11 | quote_ident(t.schema_name) || '.' || quote_ident(t.table_name)
12 | )
13 | ELSE 0
14 | END, 0)
15 | )), '0 B') as total_size,
16 | COUNT(t.table_name) as table_count
17 | FROM information_schema.schemata s
18 | LEFT JOIN (
19 | -- Regular tables
20 | SELECT
21 | schemaname as schema_name,
22 | tablename as table_name,
23 | 'regular' as table_type
24 | FROM pg_tables
25 |
26 | UNION ALL
27 |
28 | -- Foreign tables
29 | SELECT
30 | foreign_table_schema as schema_name,
31 | foreign_table_name as table_name,
32 | 'foreign' as table_type
33 | FROM information_schema.foreign_tables
34 | ) t ON t.schema_name = s.schema_name
35 | WHERE s.schema_name NOT IN ('pg_catalog', 'information_schema')
36 | AND s.schema_name NOT LIKE 'pg_%'
37 | AND s.schema_name NOT LIKE 'pg_toast%'
38 | GROUP BY s.schema_name
39 | ORDER BY
40 | COUNT(t.table_name) DESC, -- Schemas with most tables first
41 | total_size DESC, -- Then by size
42 | s.schema_name; -- Then alphabetically
43 | """
44 |
45 | @staticmethod
46 | def get_tables_in_schema_query(schema_name: str) -> str:
47 | """Returns SQL query to get all tables in a schema with descriptions"""
48 | return f"""
49 | (
50 | -- Regular tables & views: full metadata available
51 | SELECT
52 | t.table_name,
53 | obj_description(pc.oid) AS description,
54 | pg_total_relation_size(format('%I.%I', t.table_schema, t.table_name)) AS size_bytes,
55 | pg_stat_get_live_tuples(pc.oid) AS row_count,
56 | (SELECT COUNT(*) FROM information_schema.columns c
57 | WHERE c.table_schema = t.table_schema
58 | AND c.table_name = t.table_name) AS column_count,
59 | (SELECT COUNT(*) FROM pg_indexes i
60 | WHERE i.schemaname = t.table_schema
61 | AND i.tablename = t.table_name) AS index_count,
62 | t.table_type
63 | FROM information_schema.tables t
64 | JOIN pg_class pc
65 | ON pc.relname = t.table_name
66 | AND pc.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '{schema_name}')
67 | WHERE t.table_schema = '{schema_name}'
68 | AND t.table_type IN ('BASE TABLE', 'VIEW')
69 | )
70 | UNION ALL
71 | (
72 | -- Foreign tables: limited metadata (size & row count functions don't apply)
73 | SELECT
74 | ft.foreign_table_name AS table_name,
75 | (
76 | SELECT obj_description(
77 | (quote_ident(ft.foreign_table_schema) || '.' || quote_ident(ft.foreign_table_name))::regclass
78 | )
79 | ) AS description,
80 | 0 AS size_bytes,
81 | NULL AS row_count,
82 | (SELECT COUNT(*) FROM information_schema.columns c
83 | WHERE c.table_schema = ft.foreign_table_schema
84 | AND c.table_name = ft.foreign_table_name) AS column_count,
85 | 0 AS index_count,
86 | 'FOREIGN TABLE' AS table_type
87 | FROM information_schema.foreign_tables ft
88 | WHERE ft.foreign_table_schema = '{schema_name}'
89 | )
90 | ORDER BY size_bytes DESC;
91 | """
92 |
93 | @staticmethod
94 | def get_table_schema_query(schema_name: str, table: str) -> str:
95 | """Returns SQL query to get detailed table schema with column descriptions"""
96 | return f"""
97 | SELECT DISTINCT
98 | c.column_name,
99 | c.data_type,
100 | c.is_nullable,
101 | c.column_default,
102 | col_description(pc.oid, c.ordinal_position) as column_description,
103 | c.ordinal_position,
104 | CASE WHEN pk.column_name IS NOT NULL THEN true ELSE false END as is_primary_key,
105 | fk.foreign_table_name,
106 | fk.foreign_column_name
107 | FROM information_schema.columns c
108 | JOIN pg_class pc
109 | ON pc.relname = '{table}'
110 | AND pc.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '{schema_name}')
111 | LEFT JOIN (
112 | SELECT ccu.column_name
113 | FROM information_schema.table_constraints tc
114 | JOIN information_schema.constraint_column_usage ccu
115 | ON tc.constraint_name = ccu.constraint_name
116 | WHERE tc.table_schema = '{schema_name}'
117 | AND tc.table_name = '{table}'
118 | AND tc.constraint_type = 'PRIMARY KEY'
119 | ) pk ON c.column_name = pk.column_name
120 | LEFT JOIN (
121 | SELECT
122 | kcu.column_name,
123 | ccu.table_name as foreign_table_name,
124 | ccu.column_name as foreign_column_name
125 | FROM information_schema.table_constraints tc
126 | JOIN information_schema.key_column_usage kcu
127 | ON tc.constraint_name = kcu.constraint_name
128 | JOIN information_schema.constraint_column_usage ccu
129 | ON tc.constraint_name = ccu.constraint_name
130 | WHERE tc.table_schema = '{schema_name}'
131 | AND tc.table_name = '{table}'
132 | AND tc.constraint_type = 'FOREIGN KEY'
133 | ) fk ON c.column_name = fk.column_name
134 | WHERE c.table_schema = '{schema_name}'
135 | AND c.table_name = '{table}'
136 | ORDER BY c.ordinal_position;
137 | """
138 |
--------------------------------------------------------------------------------
/supabase_mcp/sdk_client/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deploya-labs/mcp-supabase/b6d9f060f3195da280a419f15d2e898a72e6db08/supabase_mcp/sdk_client/__init__.py
--------------------------------------------------------------------------------
/supabase_mcp/sdk_client/auth_admin_models.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Literal
2 |
3 | from pydantic import BaseModel, model_validator
4 |
5 |
6 | class GetUserByIdParams(BaseModel):
7 | uid: str
8 |
9 |
10 | class ListUsersParams(BaseModel):
11 | page: int | None = 1
12 | per_page: int | None = 50
13 |
14 |
15 | class CreateUserParams(BaseModel):
16 | email: str | None = None
17 | password: str | None = None
18 | email_confirm: bool | None = False
19 | phone: str | None = None
20 | phone_confirm: bool | None = False
21 | user_metadata: dict[str, Any] | None = None
22 | app_metadata: dict[str, Any] | None = None
23 | role: str | None = None
24 | ban_duration: str | None = None
25 | nonce: str | None = None
26 |
27 | @model_validator(mode="after")
28 | def check_email_or_phone(self) -> "CreateUserParams":
29 | if not self.email and not self.phone:
30 | raise ValueError("Either email or phone must be provided")
31 | return self
32 |
33 |
34 | class DeleteUserParams(BaseModel):
35 | id: str
36 | should_soft_delete: bool | None = False
37 |
38 |
39 | class InviteUserByEmailParams(BaseModel):
40 | email: str
41 | options: dict[str, Any] | None = None
42 |
43 |
44 | class GenerateLinkParams(BaseModel):
45 | type: Literal[
46 | "signup", "invite", "magiclink", "recovery", "email_change_current", "email_change_new", "phone_change"
47 | ]
48 | email: str
49 | password: str | None = None
50 | new_email: str | None = None
51 | options: dict[str, Any] | None = None
52 |
53 | @model_validator(mode="after")
54 | def validate_required_fields(self) -> "GenerateLinkParams":
55 | # Check password for signup
56 | if self.type == "signup" and not self.password:
57 | raise ValueError("Password is required for signup links")
58 |
59 | # Check new_email for email change
60 | if self.type in ["email_change_current", "email_change_new"] and not self.new_email:
61 | raise ValueError("new_email is required for email change links")
62 |
63 | return self
64 |
65 |
66 | class UpdateUserByIdParams(BaseModel):
67 | uid: str
68 | email: str | None = None
69 | password: str | None = None
70 | email_confirm: bool | None = False
71 | phone: str | None = None
72 | phone_confirm: bool | None = False
73 | user_metadata: dict[str, Any] | None = None
74 | app_metadata: dict[str, Any] | None = None
75 | role: str | None = None
76 | ban_duration: str | None = None
77 | nonce: str | None = None
78 |
79 |
80 | class DeleteFactorParams(BaseModel):
81 | id: str
82 | user_id: str
83 |
84 |
85 | # Map method names to their parameter models
86 | PARAM_MODELS = {
87 | "get_user_by_id": GetUserByIdParams,
88 | "list_users": ListUsersParams,
89 | "create_user": CreateUserParams,
90 | "delete_user": DeleteUserParams,
91 | "invite_user_by_email": InviteUserByEmailParams,
92 | "generate_link": GenerateLinkParams,
93 | "update_user_by_id": UpdateUserByIdParams,
94 | "delete_factor": DeleteFactorParams,
95 | }
96 |
--------------------------------------------------------------------------------
/supabase_mcp/sdk_client/auth_admin_sdk_spec.py:
--------------------------------------------------------------------------------
1 | def get_auth_admin_methods_spec() -> dict:
2 | """Returns a detailed specification of all Auth Admin methods."""
3 | return {
4 | "get_user_by_id": {
5 | "description": "Retrieve a user by their ID",
6 | "parameters": {"uid": {"type": "string", "description": "The user's UUID", "required": True}},
7 | "returns": {"type": "object", "description": "User object containing all user data"},
8 | "example": {
9 | "request": {"uid": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b"},
10 | "response": {
11 | "id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
12 | "email": "user@example.com",
13 | "phone": "",
14 | "created_at": "2023-01-01T00:00:00Z",
15 | "confirmed_at": "2023-01-01T00:00:00Z",
16 | "last_sign_in_at": "2023-01-01T00:00:00Z",
17 | "user_metadata": {"name": "John Doe"},
18 | "app_metadata": {},
19 | },
20 | },
21 | },
22 | "list_users": {
23 | "description": "List all users with pagination",
24 | "parameters": {
25 | "page": {
26 | "type": "integer",
27 | "description": "Page number (starts at 1)",
28 | "required": False,
29 | "default": 1,
30 | },
31 | "per_page": {
32 | "type": "integer",
33 | "description": "Number of users per page",
34 | "required": False,
35 | "default": 50,
36 | },
37 | },
38 | "returns": {"type": "object", "description": "Paginated list of users with metadata"},
39 | "example": {
40 | "request": {"page": 1, "per_page": 10},
41 | "response": {
42 | "users": [
43 | {
44 | "id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
45 | "email": "user@example.com",
46 | "user_metadata": {"name": "John Doe"},
47 | }
48 | ],
49 | "aud": "authenticated",
50 | "total_count": 100,
51 | "next_page": 2,
52 | },
53 | },
54 | },
55 | "create_user": {
56 | "description": "Create a new user. Does not send a confirmation email by default.",
57 | "parameters": {
58 | "email": {"type": "string", "description": "The user's email address", "required": False},
59 | "password": {"type": "string", "description": "The user's password", "required": False},
60 | "email_confirm": {
61 | "type": "boolean",
62 | "description": "Confirms the user's email address if set to true",
63 | "required": False,
64 | "default": False,
65 | },
66 | "phone": {
67 | "type": "string",
68 | "description": "The user's phone number with country code",
69 | "required": False,
70 | },
71 | "phone_confirm": {
72 | "type": "boolean",
73 | "description": "Confirms the user's phone number if set to true",
74 | "required": False,
75 | "default": False,
76 | },
77 | "user_metadata": {
78 | "type": "object",
79 | "description": "A custom data object to store the user's metadata",
80 | "required": False,
81 | },
82 | "app_metadata": {
83 | "type": "object",
84 | "description": "A custom data object to store the user's application specific metadata",
85 | "required": False,
86 | },
87 | "role": {
88 | "type": "string",
89 | "description": "The role claim set in the user's access token JWT",
90 | "required": False,
91 | },
92 | "ban_duration": {
93 | "type": "string",
94 | "description": "Determines how long a user is banned for",
95 | "required": False,
96 | },
97 | "nonce": {
98 | "type": "string",
99 | "description": "The nonce sent for reauthentication if the user's password is to be updated",
100 | "required": False,
101 | },
102 | },
103 | "returns": {"type": "object", "description": "Created user object"},
104 | "example": {
105 | "request": {
106 | "email": "new@example.com",
107 | "password": "secure-password",
108 | "email_confirm": True,
109 | "user_metadata": {"name": "New User"},
110 | },
111 | "response": {
112 | "id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
113 | "email": "new@example.com",
114 | "email_confirmed_at": "2023-01-01T00:00:00Z",
115 | "user_metadata": {"name": "New User"},
116 | },
117 | },
118 | "notes": "Either email or phone must be provided. Use invite_user_by_email() if you want to send an email invite.",
119 | },
120 | "delete_user": {
121 | "description": "Delete a user by their ID. Requires a service_role key.",
122 | "parameters": {
123 | "id": {"type": "string", "description": "The user's UUID", "required": True},
124 | "should_soft_delete": {
125 | "type": "boolean",
126 | "description": "If true, the user will be soft-deleted (preserving their data but disabling the account). Defaults to false.",
127 | "required": False,
128 | "default": False,
129 | },
130 | },
131 | "returns": {"type": "object", "description": "Success message"},
132 | "example": {
133 | "request": {"id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b"},
134 | "response": {"message": "User deleted successfully"},
135 | },
136 | "notes": "This function should only be called on a server. Never expose your service_role key in the browser.",
137 | },
138 | "invite_user_by_email": {
139 | "description": "Sends an invite link to a user's email address. Typically used by administrators to invite users to join the application.",
140 | "parameters": {
141 | "email": {"type": "string", "description": "The email address of the user", "required": True},
142 | "options": {
143 | "type": "object",
144 | "description": "Optional settings for the invite",
145 | "required": False,
146 | "properties": {
147 | "data": {
148 | "type": "object",
149 | "description": "A custom data object to store additional metadata about the user. Maps to auth.users.user_metadata",
150 | "required": False,
151 | },
152 | "redirect_to": {
153 | "type": "string",
154 | "description": "The URL which will be appended to the email link. Once clicked the user will end up on this URL",
155 | "required": False,
156 | },
157 | },
158 | },
159 | },
160 | "returns": {"type": "object", "description": "User object for the invited user"},
161 | "example": {
162 | "request": {
163 | "email": "invite@example.com",
164 | "options": {"data": {"name": "John Doe"}, "redirect_to": "https://example.com/welcome"},
165 | },
166 | "response": {
167 | "id": "a1a1a1a1-a1a1-a1a1-a1a1-a1a1a1a1a1a1",
168 | "email": "invite@example.com",
169 | "role": "authenticated",
170 | "email_confirmed_at": None,
171 | "invited_at": "2023-01-01T00:00:00Z",
172 | },
173 | },
174 | "notes": "Note that PKCE is not supported when using invite_user_by_email. This is because the browser initiating the invite is often different from the browser accepting the invite.",
175 | },
176 | "generate_link": {
177 | "description": "Generate an email link for various authentication purposes. Handles user creation for signup, invite and magiclink types.",
178 | "parameters": {
179 | "type": {
180 | "type": "string",
181 | "description": "Link type: 'signup', 'invite', 'magiclink', 'recovery', 'email_change_current', 'email_change_new', 'phone_change'",
182 | "required": True,
183 | "enum": [
184 | "signup",
185 | "invite",
186 | "magiclink",
187 | "recovery",
188 | "email_change_current",
189 | "email_change_new",
190 | "phone_change",
191 | ],
192 | },
193 | "email": {"type": "string", "description": "User's email address", "required": True},
194 | "password": {
195 | "type": "string",
196 | "description": "User's password. Only required if type is signup",
197 | "required": False,
198 | },
199 | "new_email": {
200 | "type": "string",
201 | "description": "New email address. Only required if type is email_change_current or email_change_new",
202 | "required": False,
203 | },
204 | "options": {
205 | "type": "object",
206 | "description": "Additional options for the link",
207 | "required": False,
208 | "properties": {
209 | "data": {
210 | "type": "object",
211 | "description": "Custom JSON object containing user metadata. Only accepted if type is signup, invite, or magiclink",
212 | "required": False,
213 | },
214 | "redirect_to": {
215 | "type": "string",
216 | "description": "A redirect URL which will be appended to the generated email link",
217 | "required": False,
218 | },
219 | },
220 | },
221 | },
222 | "returns": {"type": "object", "description": "Generated link details"},
223 | "example": {
224 | "request": {
225 | "type": "signup",
226 | "email": "user@example.com",
227 | "password": "secure-password",
228 | "options": {"data": {"name": "John Doe"}, "redirect_to": "https://example.com/welcome"},
229 | },
230 | "response": {
231 | "action_link": "https://your-project.supabase.co/auth/v1/verify?token=...",
232 | "email_otp": "123456",
233 | "hashed_token": "...",
234 | "redirect_to": "https://example.com/welcome",
235 | "verification_type": "signup",
236 | },
237 | },
238 | "notes": "generate_link() only generates the email link for email_change_email if the Secure email change is enabled in your project's email auth provider settings.",
239 | },
240 | "update_user_by_id": {
241 | "description": "Update user attributes by ID. Requires a service_role key.",
242 | "parameters": {
243 | "uid": {"type": "string", "description": "The user's UUID", "required": True},
244 | "email": {"type": "string", "description": "The user's email", "required": False},
245 | "phone": {"type": "string", "description": "The user's phone", "required": False},
246 | "password": {"type": "string", "description": "The user's password", "required": False},
247 | "email_confirm": {
248 | "type": "boolean",
249 | "description": "Confirms the user's email address if set to true",
250 | "required": False,
251 | },
252 | "phone_confirm": {
253 | "type": "boolean",
254 | "description": "Confirms the user's phone number if set to true",
255 | "required": False,
256 | },
257 | "user_metadata": {
258 | "type": "object",
259 | "description": "A custom data object to store the user's metadata. Maps to auth.users.raw_user_meta_data column",
260 | "required": False,
261 | },
262 | "app_metadata": {
263 | "type": "object",
264 | "description": "A custom data object to store the user's application specific metadata. Maps to auth.users.app_metadata column",
265 | "required": False,
266 | },
267 | "role": {
268 | "type": "string",
269 | "description": "The role claim set in the user's access token JWT",
270 | "required": False,
271 | },
272 | "ban_duration": {
273 | "type": "string",
274 | "description": "Determines how long a user is banned for",
275 | "required": False,
276 | },
277 | "nonce": {
278 | "type": "string",
279 | "description": "The nonce sent for reauthentication if the user's password is to be updated",
280 | "required": False,
281 | },
282 | },
283 | "returns": {"type": "object", "description": "Updated user object"},
284 | "example": {
285 | "request": {
286 | "uid": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
287 | "email": "updated@example.com",
288 | "user_metadata": {"name": "Updated Name"},
289 | },
290 | "response": {
291 | "id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
292 | "email": "updated@example.com",
293 | "user_metadata": {"name": "Updated Name"},
294 | },
295 | },
296 | "notes": "This function should only be called on a server. Never expose your service_role key in the browser.",
297 | },
298 | "delete_factor": {
299 | "description": "Deletes a factor on a user. This will log the user out of all active sessions if the deleted factor was verified.",
300 | "parameters": {
301 | "user_id": {
302 | "type": "string",
303 | "description": "ID of the user whose factor is being deleted",
304 | "required": True,
305 | },
306 | "id": {"type": "string", "description": "ID of the MFA factor to delete", "required": True},
307 | },
308 | "returns": {"type": "object", "description": "Success message"},
309 | "example": {
310 | "request": {"user_id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b", "id": "totp-factor-id-123"},
311 | "response": {"message": "Factor deleted successfully"},
312 | },
313 | "notes": "This will log the user out of all active sessions if the deleted factor was verified.",
314 | },
315 | }
316 |
--------------------------------------------------------------------------------
/supabase_mcp/sdk_client/python_client.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Any, TypeVar
4 |
5 | from pydantic import BaseModel, ValidationError
6 | from supabase import AsyncClient, create_async_client
7 | from supabase.lib.client_options import ClientOptions
8 |
9 | from supabase_mcp.exceptions import PythonSDKError
10 | from supabase_mcp.logger import logger
11 | from supabase_mcp.sdk_client.auth_admin_models import (
12 | PARAM_MODELS,
13 | CreateUserParams,
14 | DeleteFactorParams,
15 | DeleteUserParams,
16 | GenerateLinkParams,
17 | GetUserByIdParams,
18 | InviteUserByEmailParams,
19 | ListUsersParams,
20 | UpdateUserByIdParams,
21 | )
22 | from supabase_mcp.sdk_client.auth_admin_sdk_spec import get_auth_admin_methods_spec
23 | from supabase_mcp.settings import settings
24 |
25 | T = TypeVar("T", bound=BaseModel)
26 |
27 |
28 | class IncorrectSDKParamsError(PythonSDKError):
29 | """Error raised when the parameters passed to the SDK are incorrect."""
30 |
31 | pass
32 |
33 |
34 | class SupabaseSDKClient:
35 | """Supabase Python SDK client, which exposes functionality related to Auth admin of the Python SDK."""
36 |
37 | _instance: SupabaseSDKClient | None = None
38 |
39 | def __init__(self, project_ref: str, service_role_key: str):
40 | self.client: AsyncClient | None = None
41 | self.project_ref = project_ref
42 | self.service_role_key = service_role_key
43 |
44 | def get_supabase_url(self) -> str:
45 | """Returns the Supabase URL based on the project reference"""
46 | return f"https://{self.project_ref}.supabase.co"
47 |
48 | @classmethod
49 | async def create(
50 | cls,
51 | project_ref: str = settings.supabase_project_ref,
52 | service_role_key: str = settings.supabase_service_role_key,
53 | ) -> SupabaseSDKClient:
54 | if cls._instance is None:
55 | try:
56 | cls._instance = cls(project_ref, service_role_key)
57 | supabase_url = cls._instance.get_supabase_url()
58 | cls._instance.client = await create_async_client(
59 | supabase_url,
60 | service_role_key,
61 | options=ClientOptions(
62 | auto_refresh_token=False,
63 | persist_session=False,
64 | ),
65 | )
66 | logger.info(f"Created Supabase SDK client for project {project_ref}")
67 | except Exception as e:
68 | logger.error(f"Error creating Supabase SDK client: {e}")
69 | raise PythonSDKError(f"Error creating Supabase SDK client: {e}") from e
70 | return cls._instance
71 |
72 | @classmethod
73 | async def get_instance(cls) -> SupabaseSDKClient:
74 | """Returns the singleton instance"""
75 | if cls._instance is None:
76 | await cls.create()
77 | return cls._instance
78 |
79 | def return_python_sdk_spec(self) -> dict:
80 | """Returns the Python SDK spec"""
81 | return get_auth_admin_methods_spec()
82 |
83 | def _validate_params(self, method: str, params: dict, param_model_cls: type[T]) -> T:
84 | """Validate parameters using the appropriate Pydantic model"""
85 | try:
86 | return param_model_cls.model_validate(params)
87 | except ValidationError as e:
88 | raise PythonSDKError(f"Invalid parameters for method {method}: {str(e)}") from e
89 |
90 | async def _get_user_by_id(self, params: GetUserByIdParams) -> dict:
91 | """Get user by ID implementation"""
92 | admin_auth_client = self.client.auth.admin
93 | result = await admin_auth_client.get_user_by_id(params.uid)
94 | return result
95 |
96 | async def _list_users(self, params: ListUsersParams) -> dict:
97 | """List users implementation"""
98 | admin_auth_client = self.client.auth.admin
99 | result = await admin_auth_client.list_users(page=params.page, per_page=params.per_page)
100 | return result
101 |
102 | async def _create_user(self, params: CreateUserParams) -> dict:
103 | """Create user implementation"""
104 | admin_auth_client = self.client.auth.admin
105 | user_data = params.model_dump(exclude_none=True)
106 | result = await admin_auth_client.create_user(user_data)
107 | return result
108 |
109 | async def _delete_user(self, params: DeleteUserParams) -> dict:
110 | """Delete user implementation"""
111 | admin_auth_client = self.client.auth.admin
112 | result = await admin_auth_client.delete_user(params.id, should_soft_delete=params.should_soft_delete)
113 | return result
114 |
115 | async def _invite_user_by_email(self, params: InviteUserByEmailParams) -> dict:
116 | """Invite user by email implementation"""
117 | admin_auth_client = self.client.auth.admin
118 | options = params.options if params.options else {}
119 | result = await admin_auth_client.invite_user_by_email(params.email, options)
120 | return result
121 |
122 | async def _generate_link(self, params: GenerateLinkParams) -> dict:
123 | """Generate link implementation"""
124 | admin_auth_client = self.client.auth.admin
125 |
126 | # Create a params dictionary as expected by the SDK
127 | params_dict = params.model_dump(exclude_none=True)
128 |
129 | try:
130 | # The SDK expects a single 'params' parameter containing all the fields
131 | result = await admin_auth_client.generate_link(params=params_dict)
132 | return result
133 | except TypeError as e:
134 | # Catch parameter errors and provide a more helpful message
135 | error_msg = str(e)
136 | if "unexpected keyword argument" in error_msg:
137 | raise IncorrectSDKParamsError(
138 | f"Incorrect parameters for generate_link: {error_msg}. "
139 | f"Please check the SDK specification for the correct parameter structure."
140 | ) from e
141 | raise
142 |
143 | async def _update_user_by_id(self, params: UpdateUserByIdParams) -> dict:
144 | """Update user by ID implementation"""
145 | admin_auth_client = self.client.auth.admin
146 | uid = params.uid
147 | # Remove uid from attributes as it's passed separately
148 | attributes = params.model_dump(exclude={"uid"}, exclude_none=True)
149 | result = await admin_auth_client.update_user_by_id(uid, attributes)
150 | return result
151 |
152 | async def _delete_factor(self, params: DeleteFactorParams) -> dict:
153 | """Delete factor implementation"""
154 | # This method is not implemented in the Supabase SDK yet
155 | raise NotImplementedError("The delete_factor method is not implemented in the Supabase SDK yet")
156 |
157 | async def call_auth_admin_method(self, method: str, params: dict) -> Any:
158 | """Calls a method of the Python SDK client"""
159 | if not self.client:
160 | raise PythonSDKError("Python SDK client not initialized")
161 |
162 | # Validate method exists
163 | if method not in PARAM_MODELS:
164 | available_methods = ", ".join(PARAM_MODELS.keys())
165 | raise PythonSDKError(f"Unknown method: {method}. Available methods: {available_methods}")
166 |
167 | # Get the appropriate model class and validate parameters
168 | param_model_cls = PARAM_MODELS[method]
169 | validated_params = self._validate_params(method, params, param_model_cls)
170 |
171 | # Method dispatch using a dictionary of method implementations
172 | method_handlers = {
173 | "get_user_by_id": self._get_user_by_id,
174 | "list_users": self._list_users,
175 | "create_user": self._create_user,
176 | "delete_user": self._delete_user,
177 | "invite_user_by_email": self._invite_user_by_email,
178 | "generate_link": self._generate_link,
179 | "update_user_by_id": self._update_user_by_id,
180 | "delete_factor": self._delete_factor,
181 | }
182 |
183 | # Call the appropriate method handler
184 | try:
185 | handler = method_handlers.get(method)
186 | if not handler:
187 | raise PythonSDKError(f"Method {method} is not implemented")
188 |
189 | return await handler(validated_params)
190 | except Exception as e:
191 | if isinstance(e, IncorrectSDKParamsError):
192 | # Re-raise our custom error without wrapping it
193 | raise e
194 | logger.error(f"Error calling {method}: {e}")
195 | raise PythonSDKError(f"Error calling {method}: {str(e)}") from e
196 |
--------------------------------------------------------------------------------
/supabase_mcp/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from typing import Literal
4 |
5 | from pydantic import Field, field_validator
6 | from pydantic_settings import BaseSettings, SettingsConfigDict
7 |
8 | from supabase_mcp.logger import logger
9 |
10 | SUPPORTED_REGIONS = Literal[
11 | "us-west-1", # West US (North California)
12 | "us-east-1", # East US (North Virginia)
13 | "us-east-2", # East US (Ohio)
14 | "ca-central-1", # Canada (Central)
15 | "eu-west-1", # West EU (Ireland)
16 | "eu-west-2", # West Europe (London)
17 | "eu-west-3", # West EU (Paris)
18 | "eu-central-1", # Central EU (Frankfurt)
19 | "eu-central-2", # Central Europe (Zurich)
20 | "eu-north-1", # North EU (Stockholm)
21 | "ap-south-1", # South Asia (Mumbai)
22 | "ap-southeast-1", # Southeast Asia (Singapore)
23 | "ap-northeast-1", # Northeast Asia (Tokyo)
24 | "ap-northeast-2", # Northeast Asia (Seoul)
25 | "ap-southeast-2", # Oceania (Sydney)
26 | "sa-east-1", # South America (São Paulo)
27 | ]
28 |
29 |
30 | def find_config_file() -> str | None:
31 | """Find the .env file in order of precedence:
32 | 1. Current working directory (where command is run)
33 | 2. Global config:
34 | - Windows: %APPDATA%/supabase-mcp/.env
35 | - macOS/Linux: ~/.config/supabase-mcp/.env
36 | """
37 | logger.info("Searching for configuration files...")
38 |
39 | # 1. Check current directory
40 | cwd_config = Path.cwd() / ".env"
41 | if cwd_config.exists():
42 | logger.info(f"Found local config file: {cwd_config}")
43 | return str(cwd_config)
44 |
45 | # 2. Check global config
46 | home = Path.home()
47 | if os.name == "nt": # Windows
48 | global_config = Path(os.environ.get("APPDATA", "")) / "supabase-mcp" / ".env"
49 | else: # macOS/Linux
50 | global_config = home / ".config" / "supabase-mcp" / ".env"
51 |
52 | if global_config.exists():
53 | logger.info(f"Found global config file: {global_config}")
54 | return str(global_config)
55 |
56 | logger.warning("No config files found, using default settings")
57 | return None
58 |
59 |
60 | class Settings(BaseSettings):
61 | """Initializes settings for Supabase MCP server."""
62 |
63 | supabase_project_ref: str = Field(
64 | default="127.0.0.1:54322", # Local Supabase default
65 | description="Supabase project ref",
66 | alias="SUPABASE_PROJECT_REF",
67 | )
68 | supabase_db_password: str = Field(
69 | default="postgres", # Local Supabase default
70 | description="Supabase db password",
71 | alias="SUPABASE_DB_PASSWORD",
72 | )
73 | supabase_region: str = Field(
74 | default="eu-central-1", # West EU (Ireland) - Supabase's default region
75 | description="Supabase region for connection",
76 | alias="SUPABASE_REGION",
77 | )
78 | supabase_access_token: str | None = Field(
79 | default=None,
80 | description="Optional personal access token for accessing Supabase Management API",
81 | alias="SUPABASE_ACCESS_TOKEN",
82 | )
83 | supabase_service_role_key: str | None = Field(
84 | default=None,
85 | description="Optional service role key for accessing Python SDK",
86 | alias="SUPABASE_SERVICE_ROLE_KEY",
87 | )
88 |
89 | @field_validator("supabase_region")
90 | @classmethod
91 | def validate_region(cls, v: str) -> str:
92 | """Validate that the region is supported by Supabase."""
93 | if v not in SUPPORTED_REGIONS.__args__:
94 | supported = "\n - ".join([""] + list(SUPPORTED_REGIONS.__args__))
95 | raise ValueError(f"Region '{v}' is not supported. Supported regions are:{supported}")
96 | return v
97 |
98 | @classmethod
99 | def with_config(cls, config_file: str | None = None) -> "Settings":
100 | """Create Settings with specific config file.
101 |
102 | Args:
103 | config_file: Path to .env file to use, or None for no config file
104 | """
105 |
106 | # Create a new Settings class with the specific config
107 | class SettingsWithConfig(cls):
108 | model_config = SettingsConfigDict(env_file=config_file, env_file_encoding="utf-8")
109 |
110 | instance = SettingsWithConfig()
111 |
112 | # Log configuration source and precedence
113 | env_vars_present = any(var in os.environ for var in ["SUPABASE_PROJECT_REF", "SUPABASE_DB_PASSWORD"])
114 |
115 | if env_vars_present:
116 | logger.warning("Using environment variables (highest precedence)")
117 | if config_file:
118 | logger.warning(f"Note: Config file {config_file} exists but environment variables take precedence")
119 | for var in ["SUPABASE_PROJECT_REF", "SUPABASE_DB_PASSWORD"]:
120 | if var in os.environ:
121 | logger.info(f"Using {var} from environment")
122 | elif config_file:
123 | logger.info(f"Using settings from config file: {config_file}")
124 | else:
125 | logger.info("Using default settings (local development)")
126 |
127 | # Log final configuration
128 | logger.info("Final configuration:")
129 | logger.info(f" Project ref: {instance.supabase_project_ref}")
130 | logger.info(f" Password: {'*' * len(instance.supabase_db_password)}")
131 | logger.info(f" Region: {instance.supabase_region}")
132 | logger.info(
133 | f" Service role key: {'*' * len(instance.supabase_service_role_key) if instance.supabase_service_role_key else 'Not set'}"
134 | )
135 | return instance
136 |
137 |
138 | # Module-level singleton - maintains existing interface
139 | settings = Settings.with_config(find_config_file())
140 |
--------------------------------------------------------------------------------
/supabase_mcp/validators.py:
--------------------------------------------------------------------------------
1 | from supabase_mcp.exceptions import ValidationError
2 |
3 |
4 | def validate_schema_name(schema_name: str) -> str:
5 | """Validate schema name.
6 |
7 | Rules:
8 | - Must be a string
9 | - Cannot be empty
10 | - Cannot contain spaces or special characters
11 | """
12 | if not isinstance(schema_name, str):
13 | raise ValidationError("Schema name must be a string")
14 | if not schema_name.strip():
15 | raise ValidationError("Schema name cannot be empty")
16 | if " " in schema_name:
17 | raise ValidationError("Schema name cannot contain spaces")
18 | return schema_name
19 |
20 |
21 | def validate_table_name(table: str) -> str:
22 | """Validate table name.
23 |
24 | Rules:
25 | - Must be a string
26 | - Cannot be empty
27 | - Cannot contain spaces or special characters
28 | """
29 | if not isinstance(table, str):
30 | raise ValidationError("Table name must be a string")
31 | if not table.strip():
32 | raise ValidationError("Table name cannot be empty")
33 | if " " in table:
34 | raise ValidationError("Table name cannot contain spaces")
35 | return table
36 |
37 |
38 | def validate_sql_query(query: str) -> str:
39 | """Validate SQL query.
40 |
41 | Rules:
42 | - Must be a string
43 | - Cannot be empty
44 | """
45 | if not isinstance(query, str):
46 | raise ValidationError("Query must be a string")
47 | if not query.strip():
48 | raise ValidationError("Query cannot be empty")
49 |
50 | return query
51 |
52 |
53 | def validate_transaction_control(query: str) -> bool:
54 | """Validate if the query has transaction control.
55 |
56 | Rules:
57 | - Must be a string
58 | - Cannot be empty
59 | """
60 | return any(x in query.upper() for x in ["BEGIN", "COMMIT", "ROLLBACK"])
61 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deploya-labs/mcp-supabase/b6d9f060f3195da280a419f15d2e898a72e6db08/tests/__init__.py
--------------------------------------------------------------------------------
/tests/api_manager/test_api_manager.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import pytest
4 | import pytest_asyncio
5 |
6 | from supabase_mcp.api_manager.api_manager import SupabaseApiManager
7 | from supabase_mcp.api_manager.api_safety_config import SafetyLevel
8 | from supabase_mcp.exceptions import SafetyError
9 |
10 |
11 | @pytest_asyncio.fixture
12 | async def api_manager():
13 | """Fixture that provides an API manager instance"""
14 | manager = await SupabaseApiManager.get_manager()
15 | try:
16 | yield manager
17 | finally:
18 | await manager.close() # Cleanup after tests
19 |
20 |
21 | class TestApiManager:
22 | """Tests for the Supabase Management API manager"""
23 |
24 | @pytest.mark.asyncio
25 | async def test_safety_modes(self, api_manager):
26 | """Test API manager safety modes"""
27 | # Start in safe mode
28 | assert api_manager.mode == SafetyLevel.SAFE
29 |
30 | # Switch to unsafe mode
31 | api_manager.switch_mode(SafetyLevel.UNSAFE)
32 | assert api_manager.mode == SafetyLevel.UNSAFE
33 |
34 | # Switch back to safe mode
35 | api_manager.switch_mode(SafetyLevel.SAFE)
36 | assert api_manager.mode == SafetyLevel.SAFE
37 |
38 | def test_mode_safety(self, api_manager):
39 | """Test that unsafe operations are blocked in safe mode"""
40 | api_manager.switch_mode(SafetyLevel.SAFE)
41 |
42 | unsafe_operations = [
43 | ("PATCH", "/v1/projects/123/config/auth"), # Auth config
44 | ("PUT", "/v1/projects/123/config/database/postgres"), # DB config
45 | ]
46 |
47 | for method, path in unsafe_operations:
48 | with pytest.raises(SafetyError) as exc:
49 | asyncio.run(api_manager.execute_request(method, path)) # Keep asyncio.run() in a sync test
50 | assert "requires YOLO mode" in str(exc.value)
51 |
52 | # Should work in unsafe mode
53 | api_manager.switch_mode(SafetyLevel.UNSAFE)
54 | for method, path in unsafe_operations:
55 | try:
56 | asyncio.run(api_manager.execute_request(method, path))
57 | except Exception as e:
58 | assert not isinstance(e, SafetyError), f"Should not raise SafetyError in unsafe mode: {e}"
59 |
60 | @pytest.mark.asyncio
61 | async def test_spec_loading(self, api_manager):
62 | """Test that API spec is properly loaded"""
63 | spec = api_manager.get_spec()
64 | assert isinstance(spec, dict)
65 | assert "paths" in spec # OpenAPI spec should have paths
66 | assert "components" in spec # OpenAPI spec should have components
67 |
--------------------------------------------------------------------------------
/tests/api_manager/test_safety_config.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import pytest
4 |
5 | from supabase_mcp.api_manager.api_manager import SupabaseApiManager
6 | from supabase_mcp.api_manager.api_safety_config import SafetyConfig, SafetyLevel
7 |
8 |
9 | @pytest.fixture
10 | def safety_config():
11 | return SafetyConfig()
12 |
13 |
14 | @pytest.fixture
15 | async def api_manager():
16 | """Fixture that provides an API manager instance"""
17 | manager = SupabaseApiManager.create() # Properly await the async function
18 | try:
19 | yield manager
20 | finally:
21 | asyncio.run(manager.close()) # Cleanup after tests
22 |
23 |
24 | class TestPathMatching:
25 | """Tests for path matching functionality"""
26 |
27 | def test_basic_path_matching(self, safety_config):
28 | """Test basic path matching with and without parameters"""
29 | # Direct matches
30 | assert safety_config._path_matches("/v1/projects", "/v1/projects")
31 |
32 | # Parameter matches
33 | assert safety_config._path_matches("/v1/projects/{ref}", "/v1/projects/123")
34 | assert safety_config._path_matches("/v1/projects/{ref}/functions/{slug}", "/v1/projects/123/functions/my-func")
35 |
36 | # Non-matches
37 | assert not safety_config._path_matches("/v1/projects", "/v1/other")
38 | assert not safety_config._path_matches("/v1/projects/{ref}", "/v1/projects/123/extra")
39 | assert not safety_config._path_matches("/v1/projects", "/v1/projects/123")
40 |
41 |
42 | class TestSafetyLevels:
43 | """Tests for operation safety level determination"""
44 |
45 | def test_blocked_operations(self, safety_config):
46 | """Test blocked operations are correctly identified"""
47 | test_cases = [
48 | ("DELETE", "/v1/projects/123"), # Delete project
49 | ("DELETE", "/v1/organizations/myorg"), # Delete org
50 | ("DELETE", "/v1/projects/123/database"), # Delete database
51 | ]
52 |
53 | for method, path in test_cases:
54 | allowed, reason, level = safety_config.is_operation_allowed(method, path)
55 | assert not allowed, f"Operation {method} {path} should be blocked"
56 | assert level == SafetyLevel.BLOCKED
57 | assert "blocked" in reason.lower()
58 |
59 | def test_unsafe_operations(self, safety_config):
60 | """Test unsafe operations are correctly identified"""
61 | test_cases = [
62 | ("POST", "/v1/projects"), # Create project
63 | ("POST", "/v1/organizations"), # Create org
64 | ("PATCH", "/v1/projects/123/config/auth"), # Auth config
65 | ("PUT", "/v1/projects/123/config/secrets"), # Secrets
66 | ("PATCH", "/v1/projects/123/config/pooler"), # Pooler config
67 | ("PUT", "/v1/projects/123/config/database/postgres"), # Postgres config
68 | ]
69 |
70 | for method, path in test_cases:
71 | allowed, reason, level = safety_config.is_operation_allowed(method, path)
72 | assert allowed, f"Operation {method} {path} should be allowed but unsafe"
73 | assert level == SafetyLevel.UNSAFE
74 | assert "yolo" in reason.lower()
75 |
76 | def test_safe_operations(self, safety_config):
77 | """Test safe operations are correctly identified"""
78 | test_cases = [
79 | ("GET", "/v1/projects"), # List projects
80 | ("GET", "/v1/projects/123/config"), # Get config
81 | ("GET", "/v1/organizations"), # List orgs
82 | ]
83 |
84 | for method, path in test_cases:
85 | allowed, reason, level = safety_config.is_operation_allowed(method, path)
86 | assert allowed, f"Operation {method} {path} should be allowed"
87 | assert level == SafetyLevel.SAFE
88 | assert "allowed" in reason.lower()
89 |
90 |
91 | class TestEdgeCases:
92 | """Tests for edge cases and error handling"""
93 |
94 | def test_unknown_method(self, safety_config):
95 | """Test handling of unknown HTTP methods"""
96 | allowed, reason, level = safety_config.is_operation_allowed("INVALID", "/v1/projects")
97 | assert level == SafetyLevel.SAFE # Unknown methods default to safe
98 | assert allowed
99 |
100 | def test_empty_path(self, safety_config):
101 | """Test handling of empty paths"""
102 | allowed, reason, level = safety_config.is_operation_allowed("GET", "")
103 | assert level == SafetyLevel.SAFE
104 | assert allowed
105 |
106 | def test_rule_listing(self, safety_config):
107 | """Test rule listing functionality"""
108 | rules = safety_config.list_all_rules()
109 | assert "Blocked operations" in rules
110 | assert "Unsafe operations" in rules
111 | # Verify key operations are listed
112 | assert "/v1/projects/{ref}" in rules # Blocked operation
113 | assert "/v1/projects/{ref}/config/auth" in rules # Unsafe operation
114 |
--------------------------------------------------------------------------------
/tests/api_manager/test_spec_manager.py:
--------------------------------------------------------------------------------
1 | import json
2 | from unittest.mock import AsyncMock, MagicMock, mock_open, patch
3 |
4 | import httpx
5 | import pytest
6 | import pytest_asyncio
7 |
8 | from supabase_mcp.api_manager.api_spec_manager import ApiSpecManager
9 |
10 | # Test data
11 | SAMPLE_SPEC = {"openapi": "3.0.0", "paths": {"/v1/test": {"get": {"operationId": "test"}}}}
12 |
13 |
14 | @pytest_asyncio.fixture
15 | async def api_spec_manager():
16 | manager = await ApiSpecManager.create()
17 | yield manager
18 |
19 |
20 | # Local Spec Tests
21 | def test_load_local_spec_success(api_spec_manager):
22 | """Test successful loading of local spec file"""
23 | mock_file = mock_open(read_data=json.dumps(SAMPLE_SPEC))
24 |
25 | with patch("builtins.open", mock_file):
26 | result = api_spec_manager._load_local_spec()
27 |
28 | assert result == SAMPLE_SPEC
29 | mock_file.assert_called_once()
30 |
31 |
32 | def test_load_local_spec_file_not_found(api_spec_manager):
33 | """Test handling of missing local spec file"""
34 | with patch("builtins.open", side_effect=FileNotFoundError), pytest.raises(FileNotFoundError):
35 | api_spec_manager._load_local_spec()
36 |
37 |
38 | def test_load_local_spec_invalid_json(api_spec_manager):
39 | """Test handling of invalid JSON in local spec"""
40 | mock_file = mock_open(read_data="invalid json")
41 |
42 | with patch("builtins.open", mock_file), pytest.raises(json.JSONDecodeError):
43 | api_spec_manager._load_local_spec()
44 |
45 |
46 | # Remote Spec Tests
47 | @pytest.mark.asyncio
48 | async def test_fetch_remote_spec_success(api_spec_manager):
49 | """Test successful remote spec fetch"""
50 | mock_response = MagicMock()
51 | mock_response.status_code = 200
52 | mock_response.json.return_value = SAMPLE_SPEC
53 |
54 | mock_client = AsyncMock()
55 | mock_client.get.return_value = mock_response
56 | mock_client.__aenter__.return_value = mock_client # Mock async context manager
57 |
58 | with patch("httpx.AsyncClient", return_value=mock_client):
59 | result = await api_spec_manager._fetch_remote_spec()
60 |
61 | assert result == SAMPLE_SPEC
62 | mock_client.get.assert_called_once()
63 |
64 |
65 | @pytest.mark.asyncio
66 | async def test_fetch_remote_spec_api_error(api_spec_manager):
67 | """Test handling of API error during remote fetch"""
68 | mock_response = MagicMock()
69 | mock_response.status_code = 500
70 |
71 | mock_client = AsyncMock()
72 | mock_client.get.return_value = mock_response
73 | mock_client.__aenter__.return_value = mock_client # Mock async context manager
74 |
75 | with patch("httpx.AsyncClient", return_value=mock_client):
76 | result = await api_spec_manager._fetch_remote_spec()
77 |
78 | assert result is None
79 |
80 |
81 | @pytest.mark.asyncio
82 | async def test_fetch_remote_spec_network_error(api_spec_manager):
83 | """Test handling of network error during remote fetch"""
84 | mock_client = AsyncMock()
85 | mock_client.get.side_effect = httpx.NetworkError("Network error")
86 |
87 | with patch("httpx.AsyncClient", return_value=mock_client):
88 | result = await api_spec_manager._fetch_remote_spec()
89 |
90 | assert result is None
91 |
92 |
93 | # Startup Flow Tests
94 | @pytest.mark.asyncio
95 | async def test_startup_remote_success(api_spec_manager):
96 | """Test successful startup with remote fetch"""
97 | mock_fetch = AsyncMock(return_value=SAMPLE_SPEC)
98 |
99 | with patch.object(api_spec_manager, "_fetch_remote_spec", mock_fetch):
100 | await api_spec_manager.on_startup()
101 |
102 | assert api_spec_manager.spec == SAMPLE_SPEC
103 | mock_fetch.assert_called_once()
104 |
105 |
106 | @pytest.mark.asyncio
107 | async def test_startup_remote_fail_local_fallback(api_spec_manager):
108 | """Test fallback to local spec when remote fetch fails"""
109 | mock_fetch = AsyncMock(return_value=None)
110 | mock_local = MagicMock(return_value=SAMPLE_SPEC)
111 |
112 | with (
113 | patch.object(api_spec_manager, "_fetch_remote_spec", mock_fetch),
114 | patch.object(api_spec_manager, "_load_local_spec", mock_local),
115 | ):
116 | await api_spec_manager.on_startup()
117 |
118 | assert api_spec_manager.spec == SAMPLE_SPEC
119 | mock_fetch.assert_called_once()
120 | mock_local.assert_called_once()
121 |
122 |
123 | @pytest.mark.asyncio
124 | async def test_startup_both_fail(api_spec_manager):
125 | """Test handling when both remote and local spec loading fail"""
126 | mock_fetch = AsyncMock(return_value=None)
127 | mock_local = MagicMock(side_effect=FileNotFoundError)
128 |
129 | with (
130 | patch.object(api_spec_manager, "_fetch_remote_spec", mock_fetch),
131 | patch.object(api_spec_manager, "_load_local_spec", mock_local),
132 | pytest.raises(FileNotFoundError),
133 | ):
134 | await api_spec_manager.on_startup()
135 |
136 | mock_fetch.assert_called_once()
137 | mock_local.assert_called_once()
138 |
139 |
140 | # Get Spec Tests
141 | def test_get_spec_success(api_spec_manager):
142 | """Test getting loaded spec"""
143 | api_spec_manager.spec = SAMPLE_SPEC
144 | result = api_spec_manager.get_spec()
145 | assert result == SAMPLE_SPEC
146 |
147 |
148 | def test_get_spec_not_loaded(api_spec_manager):
149 | """Test error when spec not loaded"""
150 | api_spec_manager.spec = None
151 | with pytest.raises(ValueError, match="OpenAPI spec not loaded"):
152 | api_spec_manager.get_spec()
153 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections.abc import Generator
3 | from pathlib import Path
4 |
5 | import pytest
6 | from dotenv import load_dotenv
7 |
8 | from supabase_mcp.db_client.db_client import SupabaseClient
9 | from supabase_mcp.logger import logger
10 | from supabase_mcp.settings import Settings
11 |
12 |
13 | def load_test_env() -> dict:
14 | """Load test environment variables from .env.test file"""
15 | env_test_path = Path(__file__).parent.parent / ".env.test"
16 | if not env_test_path.exists():
17 | raise FileNotFoundError(f"Test environment file not found at {env_test_path}")
18 |
19 | load_dotenv(env_test_path)
20 | return {
21 | "SUPABASE_PROJECT_REF": os.getenv("SUPABASE_PROJECT_REF"),
22 | "SUPABASE_DB_PASSWORD": os.getenv("SUPABASE_DB_PASSWORD"),
23 | }
24 |
25 |
26 | @pytest.fixture
27 | def clean_environment() -> Generator[None, None, None]:
28 | """Fixture to provide a clean environment without Supabase-related variables"""
29 | # Store original environment
30 | original_env = dict(os.environ)
31 |
32 | # Remove Supabase-related environment variables
33 | for key in ["SUPABASE_PROJECT_REF", "SUPABASE_DB_PASSWORD"]:
34 | os.environ.pop(key, None)
35 |
36 | yield
37 |
38 | # Restore original environment
39 | os.environ.clear()
40 | os.environ.update(original_env)
41 |
42 |
43 | @pytest.fixture
44 | def clean_settings(clean_environment) -> Generator[Settings, None, None]:
45 | """Fixture to provide a clean Settings instance without any environment variables"""
46 |
47 | # Clear SupabaseClient singleton
48 | if hasattr(SupabaseClient, "_instance"):
49 | delattr(SupabaseClient, "_instance")
50 |
51 | settings = Settings()
52 | logger.info(f"Clean settings initialized: {settings}")
53 | yield settings
54 |
55 |
56 | @pytest.fixture
57 | def custom_connection_settings() -> Generator[Settings, None, None]:
58 | """Fixture that provides Settings instance for integration tests using .env.test"""
59 |
60 | # Clear SupabaseClient singleton
61 | SupabaseClient.reset()
62 |
63 | # Load test environment
64 | test_env = load_test_env()
65 | original_env = dict(os.environ)
66 |
67 | # Set up test environment
68 | os.environ.update(test_env)
69 |
70 | # Create fresh settings instance
71 | settings = Settings()
72 | logger.info(f"Custom connection settings initialized: {settings}")
73 |
74 | yield settings
75 |
76 | # Restore original environment
77 | os.environ.clear()
78 | os.environ.update(original_env)
79 |
80 |
81 | @pytest.fixture
82 | def custom_connection_client(custom_connection_settings):
83 | """Fixture providing a client connected to test database"""
84 | client = SupabaseClient(settings_instance=custom_connection_settings)
85 | yield client
86 | client.close() # Ensure connection is closed after test
87 |
88 |
89 | @pytest.fixture
90 | def integration_client() -> Generator[SupabaseClient, None, None]:
91 | """Fixture providing a client connected to a database for integration tests.
92 |
93 | This fixture uses the default settings for connecting to the database,
94 | which makes it work automatically with local Supabase or CI environments.
95 | """
96 | # Reset the SupabaseClient singleton to ensure we get a fresh instance
97 | SupabaseClient.reset()
98 |
99 | # Create client using default settings
100 | client = SupabaseClient.create()
101 |
102 | # Log connection details (without credentials)
103 | db_url_parts = client.db_url.split("@")
104 | if len(db_url_parts) > 1:
105 | safe_conn_info = db_url_parts[1]
106 | else:
107 | safe_conn_info = "unknown"
108 | logger.info(f"Integration client connecting to: {safe_conn_info}")
109 |
110 | yield client
111 |
112 | # Clean up
113 | client.close()
114 |
--------------------------------------------------------------------------------
/tests/sdk_client/test_auth_admin_models.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pydantic import ValidationError
3 |
4 | from supabase_mcp.sdk_client.auth_admin_models import (
5 | PARAM_MODELS,
6 | CreateUserParams,
7 | DeleteFactorParams,
8 | DeleteUserParams,
9 | GenerateLinkParams,
10 | GetUserByIdParams,
11 | InviteUserByEmailParams,
12 | ListUsersParams,
13 | UpdateUserByIdParams,
14 | )
15 |
16 |
17 | class TestModelConversion:
18 | """Test conversion from JSON data to models and validation"""
19 |
20 | def test_get_user_by_id_conversion(self):
21 | """Test conversion of get_user_by_id JSON data"""
22 | # Valid payload
23 | valid_payload = {"uid": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b"}
24 | params = GetUserByIdParams.model_validate(valid_payload)
25 | assert params.uid == valid_payload["uid"]
26 |
27 | # Invalid payload (missing required uid)
28 | invalid_payload = {}
29 | with pytest.raises(ValidationError) as excinfo:
30 | GetUserByIdParams.model_validate(invalid_payload)
31 | assert "uid" in str(excinfo.value)
32 |
33 | def test_list_users_conversion(self):
34 | """Test conversion of list_users JSON data"""
35 | # Valid payload with custom values
36 | valid_payload = {"page": 2, "per_page": 20}
37 | params = ListUsersParams.model_validate(valid_payload)
38 | assert params.page == valid_payload["page"]
39 | assert params.per_page == valid_payload["per_page"]
40 |
41 | # Valid payload with defaults
42 | empty_payload = {}
43 | params = ListUsersParams.model_validate(empty_payload)
44 | assert params.page == 1
45 | assert params.per_page == 50
46 |
47 | # Invalid payload (non-integer values)
48 | invalid_payload = {"page": "not-a-number", "per_page": "also-not-a-number"}
49 | with pytest.raises(ValidationError) as excinfo:
50 | ListUsersParams.model_validate(invalid_payload)
51 | assert "page" in str(excinfo.value)
52 |
53 | def test_create_user_conversion(self):
54 | """Test conversion of create_user JSON data"""
55 | # Valid payload with email
56 | valid_payload = {
57 | "email": "test@example.com",
58 | "password": "secure-password",
59 | "email_confirm": True,
60 | "user_metadata": {"name": "Test User"},
61 | }
62 | params = CreateUserParams.model_validate(valid_payload)
63 | assert params.email == valid_payload["email"]
64 | assert params.password == valid_payload["password"]
65 | assert params.email_confirm is True
66 | assert params.user_metadata == valid_payload["user_metadata"]
67 |
68 | # Valid payload with phone
69 | valid_phone_payload = {
70 | "phone": "+1234567890",
71 | "password": "secure-password",
72 | "phone_confirm": True,
73 | }
74 | params = CreateUserParams.model_validate(valid_phone_payload)
75 | assert params.phone == valid_phone_payload["phone"]
76 | assert params.password == valid_phone_payload["password"]
77 | assert params.phone_confirm is True
78 |
79 | # Invalid payload (missing both email and phone)
80 | invalid_payload = {"password": "secure-password"}
81 | with pytest.raises(ValidationError) as excinfo:
82 | CreateUserParams.model_validate(invalid_payload)
83 | assert "Either email or phone must be provided" in str(excinfo.value)
84 |
85 | def test_delete_user_conversion(self):
86 | """Test conversion of delete_user JSON data"""
87 | # Valid payload with custom values
88 | valid_payload = {"id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b", "should_soft_delete": True}
89 | params = DeleteUserParams.model_validate(valid_payload)
90 | assert params.id == valid_payload["id"]
91 | assert params.should_soft_delete is True
92 |
93 | # Valid payload with defaults
94 | valid_payload = {"id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b"}
95 | params = DeleteUserParams.model_validate(valid_payload)
96 | assert params.id == valid_payload["id"]
97 | assert params.should_soft_delete is False
98 |
99 | # Invalid payload (missing id)
100 | invalid_payload = {"should_soft_delete": True}
101 | with pytest.raises(ValidationError) as excinfo:
102 | DeleteUserParams.model_validate(invalid_payload)
103 | assert "id" in str(excinfo.value)
104 |
105 | def test_invite_user_by_email_conversion(self):
106 | """Test conversion of invite_user_by_email JSON data"""
107 | # Valid payload with options
108 | valid_payload = {
109 | "email": "invite@example.com",
110 | "options": {"data": {"name": "Invited User"}, "redirect_to": "https://example.com/welcome"},
111 | }
112 | params = InviteUserByEmailParams.model_validate(valid_payload)
113 | assert params.email == valid_payload["email"]
114 | assert params.options == valid_payload["options"]
115 |
116 | # Valid payload without options
117 | valid_payload = {"email": "invite@example.com"}
118 | params = InviteUserByEmailParams.model_validate(valid_payload)
119 | assert params.email == valid_payload["email"]
120 | assert params.options is None
121 |
122 | # Invalid payload (missing email)
123 | invalid_payload = {"options": {"data": {"name": "Invited User"}}}
124 | with pytest.raises(ValidationError) as excinfo:
125 | InviteUserByEmailParams.model_validate(invalid_payload)
126 | assert "email" in str(excinfo.value)
127 |
128 | def test_generate_link_conversion(self):
129 | """Test conversion of generate_link JSON data"""
130 | # Valid signup link payload
131 | valid_signup_payload = {
132 | "type": "signup",
133 | "email": "user@example.com",
134 | "password": "secure-password",
135 | "options": {"data": {"name": "New User"}, "redirect_to": "https://example.com/welcome"},
136 | }
137 | params = GenerateLinkParams.model_validate(valid_signup_payload)
138 | assert params.type == valid_signup_payload["type"]
139 | assert params.email == valid_signup_payload["email"]
140 | assert params.password == valid_signup_payload["password"]
141 | assert params.options == valid_signup_payload["options"]
142 |
143 | # Valid email_change link payload
144 | valid_email_change_payload = {
145 | "type": "email_change_current",
146 | "email": "user@example.com",
147 | "new_email": "new@example.com",
148 | }
149 | params = GenerateLinkParams.model_validate(valid_email_change_payload)
150 | assert params.type == valid_email_change_payload["type"]
151 | assert params.email == valid_email_change_payload["email"]
152 | assert params.new_email == valid_email_change_payload["new_email"]
153 |
154 | # Invalid payload (missing password for signup)
155 | invalid_signup_payload = {
156 | "type": "signup",
157 | "email": "user@example.com",
158 | }
159 | with pytest.raises(ValidationError) as excinfo:
160 | GenerateLinkParams.model_validate(invalid_signup_payload)
161 | assert "Password is required for signup links" in str(excinfo.value)
162 |
163 | # Invalid payload (missing new_email for email_change)
164 | invalid_email_change_payload = {
165 | "type": "email_change_current",
166 | "email": "user@example.com",
167 | }
168 | with pytest.raises(ValidationError) as excinfo:
169 | GenerateLinkParams.model_validate(invalid_email_change_payload)
170 | assert "new_email is required for email change links" in str(excinfo.value)
171 |
172 | # Invalid payload (invalid type)
173 | invalid_type_payload = {
174 | "type": "invalid-type",
175 | "email": "user@example.com",
176 | }
177 | with pytest.raises(ValidationError) as excinfo:
178 | GenerateLinkParams.model_validate(invalid_type_payload)
179 | assert "type" in str(excinfo.value)
180 |
181 | def test_update_user_by_id_conversion(self):
182 | """Test conversion of update_user_by_id JSON data"""
183 | # Valid payload
184 | valid_payload = {
185 | "uid": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
186 | "email": "updated@example.com",
187 | "user_metadata": {"name": "Updated User"},
188 | }
189 | params = UpdateUserByIdParams.model_validate(valid_payload)
190 | assert params.uid == valid_payload["uid"]
191 | assert params.email == valid_payload["email"]
192 | assert params.user_metadata == valid_payload["user_metadata"]
193 |
194 | # Invalid payload (missing uid)
195 | invalid_payload = {
196 | "email": "updated@example.com",
197 | "user_metadata": {"name": "Updated User"},
198 | }
199 | with pytest.raises(ValidationError) as excinfo:
200 | UpdateUserByIdParams.model_validate(invalid_payload)
201 | assert "uid" in str(excinfo.value)
202 |
203 | def test_delete_factor_conversion(self):
204 | """Test conversion of delete_factor JSON data"""
205 | # Valid payload
206 | valid_payload = {
207 | "user_id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
208 | "id": "totp-factor-id-123",
209 | }
210 | params = DeleteFactorParams.model_validate(valid_payload)
211 | assert params.user_id == valid_payload["user_id"]
212 | assert params.id == valid_payload["id"]
213 |
214 | # Invalid payload (missing user_id)
215 | invalid_payload = {
216 | "id": "totp-factor-id-123",
217 | }
218 | with pytest.raises(ValidationError) as excinfo:
219 | DeleteFactorParams.model_validate(invalid_payload)
220 | assert "user_id" in str(excinfo.value)
221 |
222 | # Invalid payload (missing id)
223 | invalid_payload = {
224 | "user_id": "d0e8c69f-e0c3-4a1c-b6d6-9a6c756a6a4b",
225 | }
226 | with pytest.raises(ValidationError) as excinfo:
227 | DeleteFactorParams.model_validate(invalid_payload)
228 | assert "id" in str(excinfo.value)
229 |
230 | def test_param_models_mapping(self):
231 | """Test PARAM_MODELS mapping functionality"""
232 | # Test that all methods have the correct corresponding model
233 | method_model_pairs = [
234 | ("get_user_by_id", GetUserByIdParams),
235 | ("list_users", ListUsersParams),
236 | ("create_user", CreateUserParams),
237 | ("delete_user", DeleteUserParams),
238 | ("invite_user_by_email", InviteUserByEmailParams),
239 | ("generate_link", GenerateLinkParams),
240 | ("update_user_by_id", UpdateUserByIdParams),
241 | ("delete_factor", DeleteFactorParams),
242 | ]
243 |
244 | for method, expected_model in method_model_pairs:
245 | assert method in PARAM_MODELS
246 | assert PARAM_MODELS[method] == expected_model
247 |
248 | # Test actual validation of data through PARAM_MODELS mapping
249 | method = "create_user"
250 | model_class = PARAM_MODELS[method]
251 |
252 | valid_payload = {"email": "test@example.com", "password": "secure-password"}
253 |
254 | params = model_class.model_validate(valid_payload)
255 | assert params.email == valid_payload["email"]
256 | assert params.password == valid_payload["password"]
257 |
--------------------------------------------------------------------------------
/tests/sdk_client/test_python_client.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock, MagicMock, patch
2 |
3 | import pytest
4 | import pytest_asyncio
5 |
6 | from supabase_mcp.exceptions import PythonSDKError
7 | from supabase_mcp.sdk_client.auth_admin_models import PARAM_MODELS
8 | from supabase_mcp.sdk_client.python_client import SupabaseSDKClient
9 |
10 |
11 | @pytest_asyncio.fixture
12 | async def mock_supabase_client():
13 | """Mock the Supabase client"""
14 | mock_client = MagicMock()
15 | mock_client.auth = MagicMock()
16 | mock_client.auth.admin = MagicMock()
17 |
18 | # Mock all the admin methods
19 | for method_name in PARAM_MODELS.keys():
20 | setattr(mock_client.auth.admin, method_name, AsyncMock(return_value={"success": True}))
21 |
22 | return mock_client
23 |
24 |
25 | @pytest_asyncio.fixture
26 | async def sdk_client(mock_supabase_client):
27 | """Create a test instance of the SDK client with a mocked Supabase client"""
28 | # Reset singleton before test
29 | SupabaseSDKClient._instance = None
30 |
31 | # Create client with test project info and mocked create_async_client
32 | with patch("supabase_mcp.sdk_client.python_client.create_async_client", return_value=mock_supabase_client):
33 | client = await SupabaseSDKClient.create("test-project", "test-key")
34 | yield client
35 |
36 | # Reset singleton after test
37 | SupabaseSDKClient._instance = None
38 |
39 |
40 | @pytest.mark.asyncio
41 | class TestSDKClient:
42 | """Test the Supabase SDK client focusing on core functionality"""
43 |
44 | async def test_url_construction(self):
45 | """Test URL construction from project reference"""
46 | client = SupabaseSDKClient("my-project", "my-key")
47 | url = client.get_supabase_url()
48 | assert url == "https://my-project.supabase.co"
49 |
50 | async def test_singleton_pattern(self, sdk_client):
51 | """Test that get_instance returns the singleton instance"""
52 | # First call to get_instance should return the existing instance
53 | instance1 = await SupabaseSDKClient.get_instance()
54 | assert instance1 is sdk_client
55 |
56 | # Second call should return the same instance
57 | instance2 = await SupabaseSDKClient.get_instance()
58 | assert instance2 is instance1
59 |
60 | async def test_return_sdk_spec(self, sdk_client):
61 | """Test that return_python_sdk_spec returns the SDK spec"""
62 | with patch("supabase_mcp.sdk_client.python_client.get_auth_admin_methods_spec") as mock_get_spec:
63 | mock_get_spec.return_value = {"test": "spec"}
64 | result = sdk_client.return_python_sdk_spec()
65 | assert result == {"test": "spec"}
66 |
67 | async def test_client_init_error(self):
68 | """Test error handling during client initialization"""
69 | with patch("supabase_mcp.sdk_client.python_client.create_async_client") as mock_create:
70 | mock_create.side_effect = Exception("Connection error")
71 |
72 | with pytest.raises(PythonSDKError) as excinfo:
73 | await SupabaseSDKClient.create("error-project", "error-key")
74 |
75 | assert "Error creating Supabase SDK client" in str(excinfo.value)
76 |
77 | async def test_call_auth_admin_method_validation(self, sdk_client):
78 | """Test parameter validation when calling auth admin methods"""
79 | # Valid parameters
80 | valid_params = {"uid": "test-user-id"}
81 | result = await sdk_client.call_auth_admin_method("get_user_by_id", valid_params)
82 | assert result == {"success": True}
83 |
84 | # Invalid parameters (missing required field)
85 | invalid_params = {}
86 | with pytest.raises(PythonSDKError) as excinfo:
87 | await sdk_client.call_auth_admin_method("get_user_by_id", invalid_params)
88 | assert "Invalid parameters" in str(excinfo.value)
89 |
90 | # Unknown method
91 | with pytest.raises(PythonSDKError) as excinfo:
92 | await sdk_client.call_auth_admin_method("unknown_method", {})
93 | assert "Unknown method" in str(excinfo.value)
94 |
95 | async def test_method_exception_handling(self, sdk_client, mock_supabase_client):
96 | """Test exception handling when auth admin methods raise errors"""
97 | # Mock the get_user_by_id method to raise an exception
98 | error_message = "User not found"
99 | mock_supabase_client.auth.admin.get_user_by_id.side_effect = Exception(error_message)
100 |
101 | # Call the method and check that the exception is properly wrapped
102 | with pytest.raises(PythonSDKError) as excinfo:
103 | await sdk_client.call_auth_admin_method("get_user_by_id", {"uid": "nonexistent-id"})
104 |
105 | assert error_message in str(excinfo.value)
106 | assert "Error calling get_user_by_id" in str(excinfo.value)
107 |
--------------------------------------------------------------------------------
/tests/sdk_client/test_sdk_client_integration.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import uuid
4 | from datetime import datetime
5 |
6 | import pytest
7 | import pytest_asyncio
8 |
9 | from supabase_mcp.exceptions import PythonSDKError
10 | from supabase_mcp.sdk_client.python_client import SupabaseSDKClient
11 |
12 | # Unique identifier for test users to avoid conflicts
13 | TEST_ID = f"test-{int(time.time())}-{uuid.uuid4().hex[:6]}"
14 |
15 |
16 | # Create unique test emails
17 | def get_test_email(prefix="user"):
18 | """Generate a unique test email"""
19 | return f"a.zuev+{prefix}-{TEST_ID}@outlook.com"
20 |
21 |
22 | @pytest_asyncio.fixture
23 | async def sdk_client():
24 | """
25 | Create a SupabaseSDKClient instance for integration testing.
26 | Uses environment variables directly.
27 | """
28 | # Reset the singleton to ensure we get a fresh instance
29 | SupabaseSDKClient._instance = None
30 |
31 | # Get Supabase credentials from environment variables
32 | project_ref = os.environ.get("SUPABASE_PROJECT_REF")
33 | service_role_key = os.environ.get("SUPABASE_SERVICE_ROLE_KEY")
34 |
35 | if not project_ref or not service_role_key:
36 | pytest.skip("SUPABASE_PROJECT_REF or SUPABASE_SERVICE_ROLE_KEY environment variables not set")
37 |
38 | client = await SupabaseSDKClient.create(project_ref, service_role_key)
39 | yield client
40 |
41 | # Cleanup after tests
42 | SupabaseSDKClient._instance = None
43 |
44 |
45 | @pytest.mark.asyncio
46 | class TestSDKClientIntegration:
47 | """
48 | Integration tests for the SupabaseSDKClient.
49 | These tests make actual API calls to the Supabase Auth service.
50 | """
51 |
52 | async def test_list_users(self, sdk_client):
53 | """Test listing users with pagination"""
54 | # Create test parameters
55 | list_params = {"page": 1, "per_page": 10}
56 |
57 | # List users
58 | result = await sdk_client.call_auth_admin_method("list_users", list_params)
59 |
60 | # Verify response format
61 | assert result is not None
62 | assert hasattr(result, "__iter__") # Should be iterable (list of users)
63 | assert len(result) >= 0 # Should have users or be empty
64 |
65 | # Check that the first user has expected attributes if there are any users
66 | if len(result) > 0:
67 | first_user = result[0]
68 | assert hasattr(first_user, "id")
69 | assert hasattr(first_user, "email")
70 | assert hasattr(first_user, "user_metadata")
71 |
72 | # Test with invalid parameters (negative page number)
73 | invalid_params = {"page": -1, "per_page": 10}
74 | with pytest.raises(PythonSDKError) as excinfo:
75 | await sdk_client.call_auth_admin_method("list_users", invalid_params)
76 |
77 | # The actual error message contains "Bad Pagination Parameters" instead of "Invalid parameters"
78 | assert "Bad Pagination Parameters" in str(excinfo.value)
79 |
80 | async def test_get_user_by_id(self, sdk_client):
81 | """Test retrieving a user by ID"""
82 | # First create a user to get
83 | test_email = get_test_email("get")
84 | create_params = {
85 | "email": test_email,
86 | "password": f"Password123!{TEST_ID}",
87 | "email_confirm": True,
88 | "user_metadata": {"name": "Test User", "test_id": TEST_ID},
89 | }
90 |
91 | # Create the user
92 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
93 | assert create_result is not None
94 | assert hasattr(create_result, "user")
95 | user_id = create_result.user.id
96 |
97 | try:
98 | # Get the user by ID
99 | get_params = {"uid": user_id}
100 | get_result = await sdk_client.call_auth_admin_method("get_user_by_id", get_params)
101 |
102 | # Verify user data
103 | assert get_result is not None
104 | assert hasattr(get_result, "user")
105 | assert get_result.user.id == user_id
106 | assert get_result.user.email == test_email
107 | assert get_result.user.user_metadata["test_id"] == TEST_ID
108 |
109 | # Test with invalid parameters (non-existent user ID)
110 | invalid_params = {"uid": "non-existent-user-id"}
111 | with pytest.raises(PythonSDKError) as excinfo:
112 | await sdk_client.call_auth_admin_method("get_user_by_id", invalid_params)
113 |
114 | # The actual error message contains "user_id must be an UUID" instead of "user not found"
115 | assert "user_id must be an UUID" in str(excinfo.value)
116 |
117 | finally:
118 | # Clean up - delete the test user
119 | delete_params = {"id": user_id}
120 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
121 |
122 | async def test_create_user(self, sdk_client):
123 | """Test creating a new user"""
124 | # Create a new test user
125 | test_email = get_test_email("create")
126 | create_params = {
127 | "email": test_email,
128 | "password": f"Password123!{TEST_ID}",
129 | "email_confirm": True,
130 | "user_metadata": {"name": "Test User", "test_id": TEST_ID},
131 | }
132 |
133 | # Create the user
134 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
135 | assert create_result is not None
136 | assert hasattr(create_result, "user")
137 | assert hasattr(create_result.user, "id")
138 | user_id = create_result.user.id
139 |
140 | try:
141 | # Verify user was created
142 | get_params = {"uid": user_id}
143 | get_result = await sdk_client.call_auth_admin_method("get_user_by_id", get_params)
144 | assert get_result is not None
145 | assert hasattr(get_result, "user")
146 | assert get_result.user.email == test_email
147 |
148 | # Test with invalid parameters (missing required fields)
149 | invalid_params = {"user_metadata": {"name": "Invalid User"}}
150 | with pytest.raises(PythonSDKError) as excinfo:
151 | await sdk_client.call_auth_admin_method("create_user", invalid_params)
152 |
153 | assert "Invalid parameters" in str(excinfo.value)
154 |
155 | finally:
156 | # Clean up - delete the test user
157 | delete_params = {"id": user_id}
158 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
159 |
160 | async def test_update_user_by_id(self, sdk_client):
161 | """Test updating a user's attributes"""
162 | # Create a new test user
163 | test_email = get_test_email("update")
164 | create_params = {
165 | "email": test_email,
166 | "password": f"Password123!{TEST_ID}",
167 | "email_confirm": True,
168 | "user_metadata": {"name": "Before Update", "test_id": TEST_ID},
169 | }
170 |
171 | # Create the user
172 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
173 | assert hasattr(create_result, "user")
174 | user_id = create_result.user.id
175 |
176 | try:
177 | # Update the user
178 | update_params = {
179 | "uid": user_id,
180 | "user_metadata": {
181 | "name": "After Update",
182 | "test_id": TEST_ID,
183 | "updated_at": datetime.now().isoformat(),
184 | },
185 | }
186 |
187 | update_result = await sdk_client.call_auth_admin_method("update_user_by_id", update_params)
188 |
189 | # Verify user was updated
190 | assert update_result is not None
191 | assert hasattr(update_result, "user")
192 | assert update_result.user.id == user_id
193 | assert update_result.user.user_metadata["name"] == "After Update"
194 | assert "updated_at" in update_result.user.user_metadata
195 |
196 | # Test with invalid parameters (non-existent user ID)
197 | invalid_params = {"uid": "non-existent-user-id", "user_metadata": {"name": "Invalid Update"}}
198 | with pytest.raises(PythonSDKError) as excinfo:
199 | await sdk_client.call_auth_admin_method("update_user_by_id", invalid_params)
200 |
201 | # The actual error message contains "user_id must be an uuid" instead of "user not found"
202 | assert "user_id must be an uuid" in str(excinfo.value).lower()
203 |
204 | finally:
205 | # Clean up - delete the test user
206 | delete_params = {"id": user_id}
207 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
208 |
209 | async def test_delete_user(self, sdk_client):
210 | """Test deleting a user"""
211 | # Create a new test user
212 | test_email = get_test_email("delete")
213 | create_params = {
214 | "email": test_email,
215 | "password": f"Password123!{TEST_ID}",
216 | "email_confirm": True,
217 | "user_metadata": {"name": "Delete Test User", "test_id": TEST_ID},
218 | }
219 |
220 | # Create the user
221 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
222 | assert hasattr(create_result, "user")
223 | user_id = create_result.user.id
224 |
225 | # Delete the user
226 | delete_params = {"id": user_id}
227 | # The delete_user method returns None on success, so we just check that it doesn't raise an exception
228 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
229 |
230 | # No need to assert on the result, as the API returns None on success
231 | # We'll verify deletion by trying to get the user and expecting an error
232 |
233 | # Verify user no longer exists
234 | get_params = {"uid": user_id}
235 | with pytest.raises(PythonSDKError) as excinfo:
236 | await sdk_client.call_auth_admin_method("get_user_by_id", get_params)
237 |
238 | assert "user not found" in str(excinfo.value).lower() or "not found" in str(excinfo.value).lower()
239 |
240 | # Test with invalid parameters (non-UUID format user ID)
241 | invalid_params = {"id": "non-existent-user-id"}
242 | with pytest.raises(PythonSDKError) as excinfo:
243 | await sdk_client.call_auth_admin_method("delete_user", invalid_params)
244 |
245 | # The API validates UUID format before checking if user exists
246 | assert "user_id must be an uuid" in str(excinfo.value).lower()
247 |
248 | async def test_invite_user_by_email(self, sdk_client):
249 | """Test inviting a user by email"""
250 | # Create invite parameters
251 | test_email = get_test_email("invite")
252 | invite_params = {
253 | "email": test_email,
254 | "options": {"data": {"name": "Invited User", "test_id": TEST_ID, "invited_at": datetime.now().isoformat()}},
255 | }
256 |
257 | # Invite the user
258 | try:
259 | result = await sdk_client.call_auth_admin_method("invite_user_by_email", invite_params)
260 |
261 | # Verify response
262 | assert result is not None
263 | assert hasattr(result, "user")
264 | assert result.user.email == test_email
265 | assert hasattr(result.user, "invited_at")
266 |
267 | # Clean up - delete the invited user
268 | if hasattr(result.user, "id"):
269 | delete_params = {"id": result.user.id}
270 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
271 |
272 | # Test with invalid parameters (missing email)
273 | invalid_params = {"options": {"data": {"name": "Invalid Invite"}}}
274 | with pytest.raises(PythonSDKError) as excinfo:
275 | await sdk_client.call_auth_admin_method("invite_user_by_email", invalid_params)
276 |
277 | assert "Invalid parameters" in str(excinfo.value)
278 |
279 | except PythonSDKError as e:
280 | # Some Supabase instances may have email sending disabled,
281 | # so we'll check if the error is related to that
282 | if "sending emails is not configured" in str(e).lower():
283 | pytest.skip("Email sending is not configured in this Supabase instance")
284 | else:
285 | raise
286 |
287 | async def test_generate_link(self, sdk_client):
288 | """Test generating authentication links"""
289 | # Test different link types
290 | link_types = ["signup", "magiclink", "recovery"]
291 | created_user_ids = []
292 |
293 | for link_type in link_types:
294 | test_email = get_test_email(f"link-{link_type}")
295 |
296 | # For magiclink and recovery, we need to create the user first
297 | if link_type in ["magiclink", "recovery"]:
298 | # Create a user first
299 | create_params = {
300 | "email": test_email,
301 | "password": f"Password123!{TEST_ID}",
302 | "email_confirm": True,
303 | "user_metadata": {"name": f"{link_type.capitalize()} User", "test_id": TEST_ID},
304 | }
305 |
306 | try:
307 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
308 | if hasattr(create_result, "user") and hasattr(create_result.user, "id"):
309 | created_user_ids.append(create_result.user.id)
310 | except PythonSDKError as e:
311 | pytest.skip(f"Failed to create user for {link_type} test: {str(e)}")
312 | continue
313 |
314 | # Different parameters based on link type
315 | if link_type == "signup":
316 | link_params = {
317 | "type": link_type,
318 | "email": test_email,
319 | "password": f"Password123!{TEST_ID}",
320 | "options": {
321 | "data": {"name": f"{link_type.capitalize()} User", "test_id": TEST_ID},
322 | "redirect_to": "https://example.com/welcome",
323 | },
324 | }
325 | else:
326 | link_params = {
327 | "type": link_type,
328 | "email": test_email,
329 | "options": {
330 | "data": {"name": f"{link_type.capitalize()} User", "test_id": TEST_ID},
331 | "redirect_to": "https://example.com/welcome",
332 | },
333 | }
334 |
335 | try:
336 | # Generate link
337 | result = await sdk_client.call_auth_admin_method("generate_link", link_params)
338 |
339 | # Verify response
340 | assert result is not None
341 | assert hasattr(result, "properties")
342 | assert hasattr(result.properties, "action_link")
343 |
344 | # If a user was created during link generation (for signup), store ID for cleanup
345 | if hasattr(result, "user") and hasattr(result.user, "id"):
346 | created_user_ids.append(result.user.id)
347 |
348 | except PythonSDKError as e:
349 | # Some Supabase instances may have email sending disabled
350 | if "sending emails is not configured" in str(e).lower():
351 | pytest.skip(f"Email sending is not configured for {link_type} links")
352 | else:
353 | raise
354 |
355 | # Test with invalid parameters (invalid link type)
356 | invalid_params = {"type": "invalid_type", "email": get_test_email("invalid")}
357 | with pytest.raises(PythonSDKError) as excinfo:
358 | await sdk_client.call_auth_admin_method("generate_link", invalid_params)
359 |
360 | assert "Invalid parameters" in str(excinfo.value) or "invalid type" in str(excinfo.value).lower()
361 |
362 | # Clean up any created users
363 | for user_id in created_user_ids:
364 | try:
365 | delete_params = {"id": user_id}
366 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
367 | except Exception:
368 | pass
369 |
370 | async def test_delete_factor(self, sdk_client):
371 | """Test deleting an MFA factor"""
372 | # Create a test user
373 | test_email = get_test_email("factor")
374 | create_params = {
375 | "email": test_email,
376 | "password": f"Password123!{TEST_ID}",
377 | "email_confirm": True,
378 | "user_metadata": {"name": "Factor Test User", "test_id": TEST_ID},
379 | }
380 |
381 | # Create the user
382 | create_result = await sdk_client.call_auth_admin_method("create_user", create_params)
383 | assert hasattr(create_result, "user")
384 | user_id = create_result.user.id
385 |
386 | try:
387 | # Attempt to delete a factor (this will likely fail as the method is not implemented)
388 | delete_factor_params = {"user_id": user_id, "id": "non-existent-factor-id"}
389 |
390 | try:
391 | await sdk_client.call_auth_admin_method("delete_factor", delete_factor_params)
392 | # If it succeeds (unlikely), we should assert something
393 | assert False, "delete_factor should not succeed as it's not implemented"
394 | except PythonSDKError as e:
395 | # We expect this to fail with a specific error message
396 | assert "not implemented" in str(e).lower() or "method not found" in str(e).lower()
397 |
398 | finally:
399 | # Clean up - delete the test user
400 | delete_params = {"id": user_id}
401 | await sdk_client.call_auth_admin_method("delete_user", delete_params)
402 |
403 | async def test_empty_parameters(self, sdk_client):
404 | """Test validation errors with empty parameters for various methods"""
405 | # Test methods with empty parameters
406 | methods = ["get_user_by_id", "create_user", "update_user_by_id", "delete_user", "generate_link"]
407 |
408 | for method in methods:
409 | empty_params = {}
410 |
411 | # Should raise PythonSDKError containing validation error details
412 | with pytest.raises(PythonSDKError) as excinfo:
413 | await sdk_client.call_auth_admin_method(method, empty_params)
414 |
415 | # Verify error message contains validation info
416 | assert "Invalid parameters" in str(excinfo.value)
417 |
--------------------------------------------------------------------------------
/tests/test_db_client.py:
--------------------------------------------------------------------------------
1 | import os
2 | import urllib.parse
3 |
4 | import pytest
5 |
6 | from supabase_mcp.db_client.db_client import QueryResult, SupabaseClient
7 | from supabase_mcp.db_client.db_safety_config import DbSafetyLevel
8 | from supabase_mcp.exceptions import QueryError
9 |
10 |
11 | # Connection string tests
12 | def test_connection_string_local_default():
13 | """Test connection string generation with local development defaults"""
14 | client = SupabaseClient(project_ref="127.0.0.1:54322", db_password="postgres")
15 | assert client.db_url == "postgresql://postgres:postgres@127.0.0.1:54322/postgres"
16 |
17 |
18 | def test_connection_string_integration(custom_connection_settings):
19 | """Test connection string generation with integration settings from .env.test"""
20 | client = SupabaseClient(settings_instance=custom_connection_settings)
21 | # Use urllib.parse.quote_plus to encode the password in the expected URL
22 | encoded_password = urllib.parse.quote_plus(custom_connection_settings.supabase_db_password)
23 | expected_url = (
24 | f"postgresql://postgres.{custom_connection_settings.supabase_project_ref}:"
25 | f"{encoded_password}@aws-0-us-east-1.pooler.supabase.com:6543/postgres"
26 | )
27 | assert client.db_url == expected_url
28 |
29 |
30 | def test_connection_string_explicit_params():
31 | """Test connection string generation with explicit parameters"""
32 | client = SupabaseClient(project_ref="my-project", db_password="my-password")
33 | expected_url = "postgresql://postgres.my-project:my-password@aws-0-us-east-1.pooler.supabase.com:6543/postgres"
34 | assert client.db_url == expected_url
35 |
36 |
37 | @pytest.mark.skipif(not os.getenv("CI"), reason="Test only runs in CI environment")
38 | def test_connection_string_ci():
39 | """Test connection string generation in CI environment"""
40 | # Just create the client using singleton method
41 | client = SupabaseClient.create()
42 |
43 | # Verify we're using a remote connection, not localhost
44 | assert "127.0.0.1" not in client.db_url, "CI should use remote DB, not localhost"
45 |
46 | # Verify we have the expected format without exposing credentials
47 | assert "postgresql://postgres." in client.db_url, "Connection string should use Supabase format"
48 | assert "pooler.supabase.com" in client.db_url, "Connection string should use Supabase pooler"
49 |
50 | # Verify the client can actually connect (this is a better test than checking the URL)
51 | try:
52 | result = client.execute_query("SELECT 1 as connection_test")
53 | assert result.rows[0]["connection_test"] == 1, "Connection test query failed"
54 | except Exception as e:
55 | pytest.fail(f"Connection failed: {e}")
56 |
57 |
58 | # Safety mode tests
59 | def test_client_default_mode():
60 | """Test client initializes in read-only mode by default"""
61 | client = SupabaseClient(project_ref="127.0.0.1:54322", db_password="postgres")
62 | assert client.mode == DbSafetyLevel.RO
63 |
64 |
65 | def test_client_explicit_mode():
66 | """Test client respects explicit mode setting"""
67 | client = SupabaseClient(project_ref="127.0.0.1:54322", db_password="postgres", _mode=DbSafetyLevel.RW)
68 | assert client.mode == DbSafetyLevel.RW
69 |
70 |
71 | def test_mode_switching():
72 | """Test mode switching works correctly"""
73 | client = SupabaseClient(project_ref="127.0.0.1:54322", db_password="postgres")
74 | assert client.mode == DbSafetyLevel.RO
75 |
76 | client.switch_mode(DbSafetyLevel.RW)
77 | assert client.mode == DbSafetyLevel.RW
78 |
79 | client.switch_mode(DbSafetyLevel.RO)
80 | assert client.mode == DbSafetyLevel.RO
81 |
82 |
83 | # Query execution tests
84 | @pytest.mark.integration
85 | def test_readonly_query_execution(integration_client):
86 | """Test read-only query executes successfully in both modes"""
87 | # Test in read-only mode
88 | result = integration_client.execute_query("SELECT 1 as num")
89 | assert isinstance(result, QueryResult)
90 | assert result.rows == [{"num": 1}]
91 |
92 | # Should also work in read-write mode
93 | integration_client.switch_mode(DbSafetyLevel.RW)
94 | result = integration_client.execute_query("SELECT 1 as num")
95 | assert result.rows == [{"num": 1}]
96 |
97 |
98 | @pytest.mark.integration
99 | def test_write_query_fails_in_readonly(integration_client):
100 | """Test write query fails in read-only mode"""
101 | with pytest.raises(QueryError) as exc_info:
102 | integration_client.execute_query("CREATE TEMPORARY TABLE IF NOT EXISTS test_table (id int)")
103 | assert "read-only transaction" in str(exc_info.value)
104 |
105 |
106 | @pytest.mark.integration
107 | def test_query_error_handling(integration_client):
108 | """Test various query error scenarios"""
109 | # Test schema error
110 | with pytest.raises(QueryError) as exc_info:
111 | integration_client.execute_query("SELECT * FROM nonexistent_table")
112 | assert "relation" in str(exc_info.value)
113 |
114 | # Test syntax error
115 | with pytest.raises(QueryError) as exc_info:
116 | integration_client.execute_query("INVALID SQL")
117 | assert "syntax error" in str(exc_info.value).lower()
118 |
119 |
120 | @pytest.mark.integration
121 | def test_transaction_commit_in_write_mode(integration_client):
122 | """Test that transactions are properly committed in write mode"""
123 | # Switch to write mode
124 | integration_client.switch_mode(DbSafetyLevel.RW)
125 |
126 | try:
127 | # Use explicit transaction control with a regular table (not temporary)
128 | integration_client.execute_query("""
129 | BEGIN;
130 | CREATE TABLE IF NOT EXISTS public.test_commit (id SERIAL PRIMARY KEY, value TEXT);
131 | INSERT INTO public.test_commit (value) VALUES ('test_value');
132 | COMMIT;
133 | """)
134 |
135 | # Verify data was committed by querying it back
136 | result = integration_client.execute_query("SELECT value FROM public.test_commit")
137 |
138 | # Check that we got the expected result
139 | assert len(result.rows) == 1
140 | assert result.rows[0]["value"] == "test_value"
141 |
142 | finally:
143 | # Clean up - drop the table
144 | try:
145 | integration_client.execute_query("DROP TABLE IF EXISTS public.test_commit")
146 | except Exception as e:
147 | print(f"Cleanup error: {e}")
148 |
149 | # Switch back to read-only mode
150 | integration_client.switch_mode(DbSafetyLevel.RO)
151 |
152 |
153 | @pytest.mark.integration
154 | def test_explicit_transaction_control(integration_client):
155 | """Test explicit transaction control with BEGIN/COMMIT"""
156 | # Switch to write mode
157 | integration_client.switch_mode(DbSafetyLevel.RW)
158 |
159 | try:
160 | # Create a test table
161 | integration_client.execute_query("""
162 | BEGIN;
163 | CREATE TABLE IF NOT EXISTS public.transaction_test (id SERIAL PRIMARY KEY, data TEXT);
164 | COMMIT;
165 | """)
166 |
167 | # Test transaction that should be committed
168 | integration_client.execute_query("""
169 | BEGIN;
170 | INSERT INTO public.transaction_test (data) VALUES ('committed_data');
171 | COMMIT;
172 | """)
173 |
174 | # Verify data was committed
175 | result = integration_client.execute_query("SELECT data FROM public.transaction_test")
176 | assert len(result.rows) == 1
177 | assert result.rows[0]["data"] == "committed_data"
178 |
179 | finally:
180 | # Clean up
181 | try:
182 | integration_client.execute_query("DROP TABLE IF EXISTS public.transaction_test")
183 | except Exception as e:
184 | print(f"Cleanup error: {e}")
185 |
186 | # Switch back to read-only mode
187 | integration_client.switch_mode(DbSafetyLevel.RO)
188 |
189 |
190 | @pytest.mark.integration
191 | def test_savepoint_and_rollback(integration_client):
192 | """Test savepoint and rollback functionality within transactions"""
193 | # Switch to write mode
194 | integration_client.switch_mode(DbSafetyLevel.RW)
195 |
196 | try:
197 | # Create a test table
198 | integration_client.execute_query("""
199 | BEGIN;
200 | CREATE TABLE IF NOT EXISTS public.savepoint_test (id SERIAL PRIMARY KEY, data TEXT);
201 | COMMIT;
202 | """)
203 |
204 | # Test transaction with savepoint and rollback
205 | integration_client.execute_query("""
206 | BEGIN;
207 | INSERT INTO public.savepoint_test (data) VALUES ('data1');
208 | SAVEPOINT sp1;
209 | INSERT INTO public.savepoint_test (data) VALUES ('data2');
210 | ROLLBACK TO sp1;
211 | INSERT INTO public.savepoint_test (data) VALUES ('data3');
212 | COMMIT;
213 | """)
214 |
215 | # Verify only data1 and data3 were committed (data2 was rolled back)
216 | result = integration_client.execute_query("""
217 | SELECT data FROM public.savepoint_test ORDER BY id
218 | """)
219 |
220 | assert len(result.rows) == 2
221 | assert result.rows[0]["data"] == "data1"
222 | assert result.rows[1]["data"] == "data3"
223 |
224 | finally:
225 | # Clean up
226 | try:
227 | integration_client.execute_query("DROP TABLE IF EXISTS public.savepoint_test")
228 | except Exception as e:
229 | print(f"Cleanup error: {e}")
230 |
231 | # Switch back to read-only mode
232 | integration_client.switch_mode(DbSafetyLevel.RO)
233 | assert integration_client.mode == DbSafetyLevel.RO
234 |
--------------------------------------------------------------------------------
/tests/test_main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | import subprocess
4 | from unittest.mock import ANY, patch
5 |
6 | import pytest
7 |
8 | from supabase_mcp.logger import logger
9 | from supabase_mcp.main import inspector, mcp, run
10 |
11 | # === UNIT TESTS ===
12 |
13 |
14 | @pytest.mark.unit
15 | def test_mcp_server_initializes():
16 | """Test that MCP server initializes with default configuration and tools"""
17 | # Verify server name
18 | assert mcp.name == "supabase"
19 |
20 | # Verify tools are properly registered using the actual MCP protocol
21 | tools = asyncio.run(mcp.list_tools())
22 | assert len(tools) >= 4, "Expected at least 4 tools to be registered"
23 |
24 | # Verify each tool has proper MCP protocol structure
25 | for tool in tools:
26 | assert tool.name, "Tool must have a name"
27 | assert tool.description, "Tool must have a description"
28 | assert tool.inputSchema, "Tool must have an input schema"
29 |
30 | # Verify our core tools are registered
31 | tool_names = {tool.name for tool in tools}
32 | required_tools = {"get_db_schemas", "get_tables", "get_table_schema", "execute_sql_query"}
33 | assert required_tools.issubset(tool_names), f"Missing required tools. Found: {tool_names}"
34 |
35 |
36 | @pytest.mark.unit
37 | def test_run_server_starts():
38 | """Test that server run function executes without errors"""
39 | with patch("supabase_mcp.main.mcp.run") as mock_run:
40 | run()
41 | mock_run.assert_called_once()
42 |
43 |
44 | @pytest.mark.unit
45 | def test_inspector_mode():
46 | """Test that inspector mode initializes correctly"""
47 | with patch("mcp.cli.cli.dev") as mock_dev:
48 | inspector()
49 | mock_dev.assert_called_once_with(file_spec=ANY)
50 |
51 |
52 | @pytest.mark.unit
53 | def test_server_command_starts():
54 | """Test that the server command executes without errors"""
55 | result = subprocess.run(
56 | ["supabase-mcp-server"],
57 | capture_output=True,
58 | text=True,
59 | timeout=2, # Kill after 2 seconds since it's a server
60 | )
61 | assert result.returncode == 0, f"Server command failed: {result.stderr}"
62 |
63 |
64 | @pytest.mark.unit
65 | def test_mcp_server_tools():
66 | """Test that all expected tools are registered and accessible"""
67 | tools = asyncio.run(mcp.list_tools())
68 |
69 | # Verify we have all our tools
70 | tool_names = {tool.name for tool in tools}
71 |
72 | # All tools defined in main.py
73 | all_required_tools = {
74 | "get_db_schemas",
75 | "get_tables",
76 | "get_table_schema",
77 | "execute_sql_query",
78 | "send_management_api_request",
79 | "live_dangerously",
80 | "get_management_api_spec",
81 | "get_management_api_safety_rules",
82 | }
83 |
84 | assert all_required_tools.issubset(tool_names), (
85 | f"Missing required tools. Found: {tool_names}, Expected: {all_required_tools}"
86 | )
87 |
88 | # Verify tools have descriptions
89 | for tool in tools:
90 | assert tool.description, f"Tool {tool.name} missing description"
91 | assert tool.inputSchema is not None, f"Tool {tool.name} missing input schema"
92 |
93 |
94 | # === INTEGRATION TESTS ===
95 |
96 |
97 | @pytest.mark.integration
98 | @pytest.mark.asyncio
99 | async def test_db_tools_execution():
100 | """Integration test that verifies DB tools actually work
101 |
102 | Requires:
103 | - SUPABASE_PROJECT_REF
104 | - SUPABASE_DB_PASSWORD
105 | environment variables to be set
106 | """
107 |
108 |
109 | @pytest.mark.integration
110 | @pytest.mark.asyncio
111 | async def test_get_db_schemas_tool(integration_client):
112 | """Test the get_db_schemas tool retrieves schema information properly.
113 |
114 | This test checks:
115 | 1. The tool executes successfully
116 | 2. Returns data in the expected format
117 | 3. Contains at least the public schema
118 | """
119 | # Call the actual tool function from main.py
120 | from supabase_mcp.main import get_db_schemas
121 |
122 | # Execute the tool
123 | result = await get_db_schemas()
124 |
125 | # Verify result structure (should be a QueryResult)
126 | assert hasattr(result, "rows"), "Result should have rows attribute"
127 | assert hasattr(result, "count"), "Result should have count attribute"
128 | assert hasattr(result, "status"), "Result should have status attribute"
129 |
130 | # Verify we have some data
131 | assert result.count > 0, "Should return at least some schemas"
132 |
133 | # Get schema names for inspection
134 | schema_names = [schema["schema_name"] for schema in result.rows]
135 |
136 | # In Supabase, we at least expect the public schema to be available
137 | assert "public" in schema_names, "Expected 'public' schema not found"
138 |
139 | # Log available schemas for debugging
140 | logger.info(f"Available schemas: {schema_names}")
141 |
142 | # Verify schema structure
143 | first_schema = result.rows[0]
144 | expected_fields = ["schema_name", "total_size", "table_count"]
145 | for field in expected_fields:
146 | assert field in first_schema, f"Schema result missing '{field}' field"
147 |
148 |
149 | @pytest.mark.integration
150 | @pytest.mark.asyncio
151 | async def test_get_tables_tool(integration_client):
152 | """Test the get_tables tool retrieves table information from a schema.
153 |
154 | This test checks:
155 | 1. The tool executes successfully
156 | 2. Returns data in the expected format
157 | """
158 | # Call the actual tool function from main.py
159 | from supabase_mcp.main import get_tables
160 |
161 | # Execute the tool for the public schema
162 | result = await get_tables("public")
163 |
164 | # Verify result structure (should be a QueryResult)
165 | assert hasattr(result, "rows"), "Result should have rows attribute"
166 | assert hasattr(result, "count"), "Result should have count attribute"
167 | assert hasattr(result, "status"), "Result should have status attribute"
168 |
169 | # Log result for debugging
170 | logger.info(f"Found {result.count} tables in public schema")
171 |
172 | # If tables exist, verify their structure
173 | if result.count > 0:
174 | # Log table names
175 | table_names = [table.get("table_name") for table in result.rows]
176 | logger.info(f"Tables in public schema: {table_names}")
177 |
178 | # Verify table structure
179 | first_table = result.rows[0]
180 | expected_fields = ["table_name", "table_type"]
181 | for field in expected_fields:
182 | assert field in first_table, f"Table result missing '{field}' field"
183 |
184 |
185 | @pytest.mark.integration
186 | @pytest.mark.asyncio
187 | async def test_get_table_schema_tool(integration_client):
188 | """Test the get_table_schema tool retrieves column information for a table.
189 |
190 | This test checks:
191 | 1. The tool executes successfully
192 | 2. Returns data in the expected format
193 | 3. Contains expected column information
194 | """
195 | # Call the actual tool functions from main.py
196 | from supabase_mcp.main import get_table_schema, get_tables
197 |
198 | # First get available tables in public schema
199 | tables_result = await get_tables("public")
200 |
201 | # Skip test if no tables available
202 | if tables_result.count == 0:
203 | pytest.skip("No tables available in public schema to test table schema")
204 |
205 | # Get the first table name to test with
206 | first_table = tables_result.rows[0]["table_name"]
207 | logger.info(f"Testing schema for table: {first_table}")
208 |
209 | # Execute the get_table_schema tool
210 | result = await get_table_schema("public", first_table)
211 |
212 | # Verify result structure
213 | assert hasattr(result, "rows"), "Result should have rows attribute"
214 | assert hasattr(result, "count"), "Result should have count attribute"
215 | assert hasattr(result, "status"), "Result should have status attribute"
216 |
217 | # Verify we have column data
218 | logger.info(f"Found {result.count} columns for table {first_table}")
219 |
220 | # If columns exist, verify their structure
221 | if result.count > 0:
222 | # Verify column structure
223 | first_column = result.rows[0]
224 | expected_fields = ["column_name", "data_type", "is_nullable"]
225 | for field in expected_fields:
226 | assert field in first_column, f"Column result missing '{field}' field"
227 |
228 | # Log column names for debugging
229 | column_names = [column.get("column_name") for column in result.rows]
230 | logger.info(f"Columns in {first_table}: {column_names}")
231 |
232 |
233 | @pytest.mark.integration
234 | @pytest.mark.asyncio
235 | async def test_execute_sql_query_tool(integration_client):
236 | """Test the execute_sql_query tool runs arbitrary SQL queries.
237 |
238 | This test checks:
239 | 1. The tool executes successfully
240 | 2. Returns data in the expected format
241 | 3. Can handle multiple query types
242 | """
243 | # Call the actual tool function from main.py
244 | from supabase_mcp.main import execute_sql_query
245 |
246 | # Test a simple SELECT query
247 | result = await execute_sql_query("SELECT 1 as number, 'test' as text")
248 |
249 | # Verify result structure
250 | assert hasattr(result, "rows"), "Result should have rows attribute"
251 | assert hasattr(result, "count"), "Result should have count attribute"
252 | assert hasattr(result, "status"), "Result should have status attribute"
253 |
254 | # Verify data matches what we expect
255 | assert result.count == 1, "Expected exactly one row"
256 | assert result.rows[0]["number"] == 1, "First column should be 1"
257 | assert result.rows[0]["text"] == "test", "Second column should be 'test'"
258 |
259 | # Test a query with no results
260 | result = await execute_sql_query(
261 | "SELECT * FROM information_schema.tables WHERE table_name = 'nonexistent_table_xyz123'"
262 | )
263 | assert result.count == 0, "Should return zero rows for non-matching query"
264 |
265 | # Test a more complex query that joins tables
266 | complex_result = await execute_sql_query("""
267 | SELECT
268 | table_schema,
269 | table_name,
270 | column_name
271 | FROM
272 | information_schema.columns
273 | WHERE
274 | table_schema = 'public'
275 | LIMIT 5
276 | """)
277 |
278 | # Log result for debugging
279 | logger.info(f"Complex query returned {complex_result.count} rows")
280 |
281 | # Verify structure of complex query result
282 | if complex_result.count > 0:
283 | expected_fields = ["table_schema", "table_name", "column_name"]
284 | for field in expected_fields:
285 | assert field in complex_result.rows[0], f"Result missing '{field}' field"
286 |
287 |
288 | @pytest.mark.integration
289 | @pytest.mark.asyncio
290 | @pytest.mark.skipif(not os.getenv("CI"), reason="Management API test only runs in CI environment")
291 | async def test_management_api_request_tool(integration_client):
292 | """Test the send_management_api_request tool for accessing Management API.
293 |
294 | This test:
295 | 1. Only runs in CI environments where proper credentials are set up
296 | 2. Makes a simple GET request to the API
297 | 3. Verifies the tool handles requests correctly
298 |
299 | Requires:
300 | - SUPABASE_ACCESS_TOKEN environment variable to be set
301 | - Running in a CI environment
302 | """
303 | from supabase_mcp.api_manager.api_manager import SupabaseApiManager
304 | from supabase_mcp.main import send_management_api_request
305 |
306 | # Create a dedicated API manager for this test
307 | api_manager = await SupabaseApiManager.create()
308 |
309 | # Patch the get_manager method to return our dedicated instance
310 | with patch("supabase_mcp.api_manager.api_manager.SupabaseApiManager.get_manager", return_value=api_manager):
311 | try:
312 | # Make a simple GET request to list projects (a safe read-only operation)
313 | # This should work with any valid access token
314 | result = await send_management_api_request(
315 | method="GET", path="/v1/projects", request_params={}, request_body={}
316 | )
317 |
318 | # Verify we got a valid response - the API returns a list of projects
319 | assert isinstance(result, list), "Result should be a list of projects"
320 |
321 | # If we got project data, verify it has the expected structure
322 | if len(result) > 0:
323 | # Check the first project has expected fields
324 | project = result[0]
325 | assert isinstance(project, dict), "Project items should be dictionaries"
326 | assert "id" in project, "Project should have an ID"
327 | assert "name" in project, "Project should have a name"
328 | assert "database" in project, "Project should have database info"
329 |
330 | logger.info(f"Successfully retrieved {len(result)} projects")
331 | else:
332 | logger.warning("API returned an empty list of projects")
333 | finally:
334 | # Ensure we close the client even if the test fails
335 | await api_manager.close()
336 |
337 |
338 | @pytest.mark.unit
339 | @pytest.mark.asyncio
340 | async def test_live_dangerously_tool():
341 | """Test the live_dangerously tool for toggling safety modes.
342 |
343 | This test checks:
344 | 1. The tool correctly toggles between safe and unsafe modes
345 | 2. Works for both API and database services
346 | 3. Returns the appropriate status information
347 | """
348 | from supabase_mcp.api_manager.api_safety_config import SafetyLevel
349 | from supabase_mcp.main import live_dangerously
350 |
351 | # Test database service mode switching
352 | # Start with safe mode
353 | result = await live_dangerously(service="database", enable=False)
354 | assert result["service"] == "database", "Response should identify database service"
355 | assert result["mode"] == "ro", "Database should be in read-only mode"
356 |
357 | # Switch to unsafe mode
358 | result = await live_dangerously(service="database", enable=True)
359 | assert result["service"] == "database", "Response should identify database service"
360 | assert result["mode"] == "rw", "Database should be in read-write mode"
361 |
362 | # Switch back to safe mode
363 | result = await live_dangerously(service="database", enable=False)
364 | assert result["service"] == "database", "Response should identify database service"
365 | assert result["mode"] == "ro", "Database should be in read-only mode"
366 |
367 | # Test API service mode switching
368 | # Start with safe mode
369 | result = await live_dangerously(service="api", enable=False)
370 | assert result["service"] == "api", "Response should identify API service"
371 | # Compare with the Enum value or check its string value
372 | assert result["mode"] == SafetyLevel.SAFE or result["mode"].value == "safe", "API should be in safe mode"
373 |
374 | # Switch to unsafe mode
375 | result = await live_dangerously(service="api", enable=True)
376 | assert result["service"] == "api", "Response should identify API service"
377 | assert result["mode"] == SafetyLevel.UNSAFE or result["mode"].value == "unsafe", "API should be in unsafe mode"
378 |
379 | # Switch back to safe mode
380 | result = await live_dangerously(service="api", enable=False)
381 | assert result["service"] == "api", "Response should identify API service"
382 | assert result["mode"] == SafetyLevel.SAFE or result["mode"].value == "safe", "API should be in safe mode"
383 |
384 | # Log final state
385 | logger.info("Successfully tested mode switching for both database and API services")
386 |
387 |
388 | @pytest.mark.unit
389 | @pytest.mark.asyncio
390 | async def test_get_management_api_spec_tool():
391 | """Test the get_management_api_spec tool returns the API specification.
392 |
393 | This test checks:
394 | 1. The tool returns a valid OpenAPI specification
395 | 2. The specification contains the expected structure
396 | """
397 | from supabase_mcp.main import get_management_api_spec
398 |
399 | # Get the API spec
400 | spec = await get_management_api_spec()
401 |
402 | # Verify result is a dictionary
403 | assert isinstance(spec, dict), "API spec should be a dictionary"
404 |
405 | # Verify spec has standard OpenAPI fields
406 | assert "openapi" in spec, "Spec should contain 'openapi' version field"
407 | assert "paths" in spec, "Spec should contain 'paths' section"
408 | assert "info" in spec, "Spec should contain 'info' section"
409 |
410 | # Verify paths contains API endpoints
411 | assert isinstance(spec["paths"], dict), "Paths should be a dictionary"
412 | assert len(spec["paths"]) > 0, "Spec should contain at least one path"
413 |
414 | # Log some basic spec info
415 | logger.info(f"API spec version: {spec.get('openapi')}")
416 | logger.info(f"API contains {len(spec['paths'])} endpoints")
417 |
418 |
419 | @pytest.mark.unit
420 | @pytest.mark.asyncio
421 | async def test_get_management_api_safety_rules_tool():
422 | """Test the get_management_api_safety_rules tool returns safety information.
423 |
424 | This test checks:
425 | 1. The tool returns safety rule information
426 | 2. The rules contain information about blocked and unsafe operations
427 | """
428 | from supabase_mcp.main import get_management_api_safety_rules
429 |
430 | # Get the safety rules
431 | rules = await get_management_api_safety_rules()
432 |
433 | # Verify result structure and content
434 | assert isinstance(rules, str), "Safety rules should be returned as a string"
435 |
436 | # Check for expected sections in the rules
437 | assert "BLOCKED Operations" in rules, "Rules should mention blocked operations"
438 | assert "UNSAFE Operations" in rules, "Rules should mention unsafe operations"
439 | assert "Current mode" in rules, "Rules should mention current mode"
440 |
441 | # Log the rules for debugging
442 | logger.info("Successfully retrieved Management API safety rules")
443 |
--------------------------------------------------------------------------------
/tests/test_settings.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import pytest
4 | from pydantic import ValidationError
5 |
6 | from supabase_mcp.settings import SUPPORTED_REGIONS, Settings
7 |
8 |
9 | @pytest.fixture(autouse=True)
10 | def reset_settings_singleton():
11 | """Reset the Settings singleton before each test"""
12 | # Clear singleton instance if it exists
13 | if hasattr(Settings, "_instance"):
14 | delattr(Settings, "_instance")
15 | yield
16 | # Clean up after test
17 | if hasattr(Settings, "_instance"):
18 | delattr(Settings, "_instance")
19 |
20 |
21 | def test_settings_default_values(clean_environment):
22 | """Test default values (no config file, no env vars)"""
23 | settings = Settings.with_config() # No config file
24 | assert settings.supabase_project_ref == "127.0.0.1:54322"
25 | assert settings.supabase_db_password == "postgres"
26 |
27 |
28 | def test_settings_from_env_test(clean_environment):
29 | """Test loading from .env.test"""
30 | settings = Settings.with_config(".env.test")
31 | assert settings.supabase_project_ref == "test-project-ref"
32 | assert settings.supabase_db_password == "test-db-password"
33 |
34 |
35 | def test_settings_from_env_vars(clean_environment):
36 | """Test env vars take precedence over config file"""
37 | env_values = {"SUPABASE_PROJECT_REF": "from-env", "SUPABASE_DB_PASSWORD": "env-password"}
38 | with patch.dict("os.environ", env_values, clear=True):
39 | settings = Settings.with_config(".env.test") # Even with config file
40 | assert settings.supabase_project_ref == "from-env"
41 | assert settings.supabase_db_password == "env-password"
42 |
43 |
44 | def test_settings_region_validation():
45 | """Test region validation."""
46 | # Test default region
47 | settings = Settings()
48 | assert settings.supabase_region == "us-east-1"
49 |
50 | # Test valid region from environment
51 | env_values = {"SUPABASE_REGION": "ap-southeast-1"}
52 | with patch.dict("os.environ", env_values, clear=True):
53 | settings = Settings()
54 | assert settings.supabase_region == "ap-southeast-1"
55 |
56 | # Test invalid region
57 | with pytest.raises(ValidationError) as exc_info:
58 | env_values = {"SUPABASE_REGION": "invalid-region"}
59 | with patch.dict("os.environ", env_values, clear=True):
60 | Settings()
61 | assert "Region 'invalid-region' is not supported" in str(exc_info.value)
62 |
63 |
64 | def test_supported_regions():
65 | """Test that all supported regions are valid."""
66 | for region in SUPPORTED_REGIONS.__args__:
67 | env_values = {"SUPABASE_REGION": region}
68 | with patch.dict("os.environ", env_values, clear=True):
69 | settings = Settings()
70 | assert settings.supabase_region == region
71 |
--------------------------------------------------------------------------------