├── .gitignore ├── LICENSE ├── README.md ├── compose.yaml └── servers ├── filesystem ├── .dockerignore ├── Dockerfile ├── README.md ├── compose.yaml ├── config.py ├── main.py └── requirements.txt ├── get-user-info ├── .dockerignore ├── Dockerfile ├── README.md ├── compose.yaml ├── main.py └── requirements.txt ├── git ├── README.md ├── main.py └── requirements.txt ├── mcp-proxy ├── README.md ├── main.py └── requirements.txt ├── memory ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── compose.yaml ├── main.py └── requirements.txt ├── slack ├── Dockerfile ├── README.md ├── compose.yaml ├── main.py └── requirements.txt ├── summarizer-tool ├── README.md ├── __init__.py ├── docker-compose.yml ├── main.py ├── requirements.txt └── summarizers │ ├── __init__.py │ ├── base.py │ └── text_summarizer.py ├── time ├── .dockerignore ├── Dockerfile ├── README.md ├── compose.yaml ├── main.py └── requirements.txt └── weather ├── .dockerignore ├── Dockerfile ├── README.md ├── compose.yaml ├── main.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /build 4 | /.svelte-kit 5 | /package 6 | .env 7 | .env.* 8 | !.env.example 9 | vite.config.js.timestamp-* 10 | vite.config.ts.timestamp-* 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | 16 | # C extensions 17 | *.so 18 | 19 | # Pyodide distribution 20 | static/pyodide/* 21 | !static/pyodide/pyodide-lock.json 22 | 23 | # Distribution / packaging 24 | .Python 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib64/ 32 | parts/ 33 | sdist/ 34 | var/ 35 | wheels/ 36 | share/python-wheels/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | MANIFEST 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .nox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *.cover 62 | *.py,cover 63 | .hypothesis/ 64 | .pytest_cache/ 65 | cover/ 66 | 67 | # Translations 68 | *.mo 69 | *.pot 70 | 71 | # Django stuff: 72 | *.log 73 | local_settings.py 74 | db.sqlite3 75 | db.sqlite3-journal 76 | 77 | # Flask stuff: 78 | instance/ 79 | .webassets-cache 80 | 81 | # Scrapy stuff: 82 | .scrapy 83 | 84 | # Sphinx documentation 85 | docs/_build/ 86 | 87 | # PyBuilder 88 | .pybuilder/ 89 | target/ 90 | 91 | # Jupyter Notebook 92 | .ipynb_checkpoints 93 | 94 | # IPython 95 | profile_default/ 96 | ipython_config.py 97 | 98 | # pyenv 99 | # For a library or package, you might want to ignore these files since the code is 100 | # intended to run in multiple environments; otherwise, check them in: 101 | # .python-version 102 | 103 | # pipenv 104 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 105 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 106 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 107 | # install all needed dependencies. 108 | #Pipfile.lock 109 | 110 | # poetry 111 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 112 | # This is especially recommended for binary packages to ensure reproducibility, and is more 113 | # commonly ignored for libraries. 114 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 115 | #poetry.lock 116 | 117 | # pdm 118 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 119 | #pdm.lock 120 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 121 | # in version control. 122 | # https://pdm.fming.dev/#use-with-ide 123 | .pdm.toml 124 | 125 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 126 | __pypackages__/ 127 | 128 | # Celery stuff 129 | celerybeat-schedule 130 | celerybeat.pid 131 | 132 | # SageMath parsed files 133 | *.sage.py 134 | 135 | # Environments 136 | .env 137 | .venv 138 | env/ 139 | venv/ 140 | ENV/ 141 | env.bak/ 142 | venv.bak/ 143 | 144 | # Spyder project settings 145 | .spyderproject 146 | .spyproject 147 | 148 | # Rope project settings 149 | .ropeproject 150 | 151 | # mkdocs documentation 152 | /site 153 | 154 | # mypy 155 | .mypy_cache/ 156 | .dmypy.json 157 | dmypy.json 158 | 159 | # Pyre type checker 160 | .pyre/ 161 | 162 | # pytype static type analyzer 163 | .pytype/ 164 | 165 | # Cython debug symbols 166 | cython_debug/ 167 | 168 | # PyCharm 169 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 170 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 171 | # and can be added to the global gitignore or merged into this file. For a more nuclear 172 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 173 | .idea/ 174 | 175 | # Logs 176 | logs 177 | *.log 178 | npm-debug.log* 179 | yarn-debug.log* 180 | yarn-error.log* 181 | lerna-debug.log* 182 | .pnpm-debug.log* 183 | 184 | # Diagnostic reports (https://nodejs.org/api/report.html) 185 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 186 | 187 | # Runtime data 188 | pids 189 | *.pid 190 | *.seed 191 | *.pid.lock 192 | 193 | # Directory for instrumented libs generated by jscoverage/JSCover 194 | lib-cov 195 | 196 | # Coverage directory used by tools like istanbul 197 | coverage 198 | *.lcov 199 | 200 | # nyc test coverage 201 | .nyc_output 202 | 203 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 204 | .grunt 205 | 206 | # Bower dependency directory (https://bower.io/) 207 | bower_components 208 | 209 | # node-waf configuration 210 | .lock-wscript 211 | 212 | # Compiled binary addons (https://nodejs.org/api/addons.html) 213 | build/Release 214 | 215 | # Dependency directories 216 | node_modules/ 217 | jspm_packages/ 218 | 219 | # Snowpack dependency directory (https://snowpack.dev/) 220 | web_modules/ 221 | 222 | # TypeScript cache 223 | *.tsbuildinfo 224 | 225 | # Optional npm cache directory 226 | .npm 227 | 228 | # Optional eslint cache 229 | .eslintcache 230 | 231 | # Optional stylelint cache 232 | .stylelintcache 233 | 234 | # Microbundle cache 235 | .rpt2_cache/ 236 | .rts2_cache_cjs/ 237 | .rts2_cache_es/ 238 | .rts2_cache_umd/ 239 | 240 | # Optional REPL history 241 | .node_repl_history 242 | 243 | # Output of 'npm pack' 244 | *.tgz 245 | 246 | # Yarn Integrity file 247 | .yarn-integrity 248 | 249 | # dotenv environment variable files 250 | .env 251 | .env.development.local 252 | .env.test.local 253 | .env.production.local 254 | .env.local 255 | 256 | # parcel-bundler cache (https://parceljs.org/) 257 | .cache 258 | .parcel-cache 259 | 260 | # Next.js build output 261 | .next 262 | out 263 | 264 | # Nuxt.js build / generate output 265 | .nuxt 266 | dist 267 | 268 | # Gatsby files 269 | .cache/ 270 | # Comment in the public line in if your project uses Gatsby and not Next.js 271 | # https://nextjs.org/blog/next-9-1#public-directory-support 272 | # public 273 | 274 | # vuepress build output 275 | .vuepress/dist 276 | 277 | # vuepress v2.x temp and cache directory 278 | .temp 279 | .cache 280 | 281 | # Docusaurus cache and generated files 282 | .docusaurus 283 | 284 | # Serverless directories 285 | .serverless/ 286 | 287 | # FuseBox cache 288 | .fusebox/ 289 | 290 | # DynamoDB Local files 291 | .dynamodb/ 292 | 293 | # TernJS port file 294 | .tern-port 295 | 296 | # Stores VSCode versions used for testing VSCode extensions 297 | .vscode-test 298 | 299 | # yarn v2 300 | .yarn/cache 301 | .yarn/unplugged 302 | .yarn/build-state.yml 303 | .yarn/install-state.gz 304 | .pnp.* 305 | 306 | # cypress artifacts 307 | cypress/videos 308 | cypress/screenshots 309 | .vscode/settings.json 310 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Timothy Jaeryang Baek 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🌟 OpenAPI Tool Servers 2 | 3 | This repository provides reference OpenAPI Tool Server implementations making it easy and secure for developers to integrate external tooling and data sources into LLM agents and workflows. Designed for maximum ease of use and minimal learning curve, these implementations utilize the widely adopted and battle-tested [OpenAPI specification](https://www.openapis.org/) as the standard protocol. 4 | 5 | By leveraging OpenAPI, we eliminate the need for a proprietary or unfamiliar communication protocol, ensuring you can quickly and confidently build or integrate servers. This means less time spent figuring out custom interfaces and more time building powerful tools that enhance your AI applications. 6 | 7 | ## ☝️ Why OpenAPI? 8 | 9 | - **Established Standard**: OpenAPI is a widely used, production-proven API standard backed by thousands of tools, companies, and communities. 10 | 11 | - **No Reinventing the Wheel**: No additional documentation or proprietary spec confusion. If you build REST APIs or use OpenAPI today, you're already set. 12 | 13 | - **Easy Integration & Hosting**: Deploy your tool servers externally or locally without vendor lock-in or complex configurations. 14 | 15 | - **Strong Security Focus**: Built around HTTP/REST APIs, OpenAPI inherently supports widely used, secure communication methods including HTTPS and well-proven authentication standards (OAuth, JWT, API Keys). 16 | 17 | - **Future-Friendly & Stable**: Unlike less mature or experimental protocols, OpenAPI promises reliability, stability, and long-term community support. 18 | 19 | ## 🚀 Quickstart 20 | 21 | Get started quickly with our reference FastAPI-based implementations provided in the `servers/` directory. (You can adapt these examples into your preferred stack as needed, such as using [FastAPI](https://fastapi.tiangolo.com/), [FastOpenAPI](https://github.com/mr-fatalyst/fastopenapi) or any other OpenAPI-compatible library): 22 | 23 | ```bash 24 | git clone https://github.com/open-webui/openapi-servers 25 | cd openapi-servers 26 | 27 | # Example: Installing dependencies for a specific server 'filesystem' 28 | cd servers/filesystem 29 | pip install -r requirements.txt 30 | uvicorn main:app --host 0.0.0.0 --reload 31 | ``` 32 | 33 | Or using Docker: 34 | 35 | ```bash 36 | cd servers/filesystem 37 | docker compose up 38 | ``` 39 | 40 | Now, simply point your OpenAPI-compatible clients or AI agents to your local or publicly deployed URL—no configuration headaches, no complicated transports. 41 | 42 | ## 📂 Server Reference Implementations 43 | 44 | Reference implementations provided in this repository demonstrate common use-cases clearly and simply: 45 | 46 | - [**Filesystem Access**](servers/filesystem) - Manage local file operations safely with configurable restrictions. 47 | - [**Git Server**](servers/git) - Expose Git repositories for searching, reading, and possibly writing via controlled API endpoints. 48 | - [**Memory & Knowledge Graph**](servers/memory) - Persistent memory management and semantic knowledge querying using popular and reliable storage techniques. 49 | - [**Weather Server**](servers/weather) - Provide current weather conditions and forecasts from trusted public APIs. 50 | - [**Get User Info Server**](servers/get-user-info) - Access and return enriched user profile information from authentication providers or internal systems. 51 | - [**WIP: Web Search & Fetch**](servers/web-search) - Retrieve and convert web-based content securely into structured API results usable by LLMs. 52 | - [**WIP: Database Server**](servers/database) - Query and inspect database schemas across common DB engines like PostgreSQL, MySQL, and SQLite. 53 | 54 | 55 | (More examples and reference implementations will be actively developed and continually updated.) 56 | 57 | 58 | > [!IMPORTANT] 59 | > 💡 Contribute Your Server! 60 | > 61 | > We strongly encourage the community to contribute their own OpenAPI tool server examples! This is more important than it might seem: The world doesn’t need another closed protocol or proprietary format gatekeeping innovation—we need clearly defined, open, and composable APIs backed by open documentation and proven tools. OpenAPI is the future-proof foundation we can all build on—together. 62 | > 63 | > Let’s build an open ecosystem where every tool speaks the same language—yours. 64 | 65 | ## 🔌 Bridge MCP → OpenAPI (Optional) 66 | 67 | For the easiest way to expose your MCP tools as OpenAPI-compatible APIs, we recommend using [mcpo](https://github.com/open-webui/mcpo). This enables tool providers who initially implemented MCP servers to expose them effortlessly as standard OpenAPI-compatible APIs, ensuring existing MCP servers and resources remain accessible without additional hassle. 68 | 69 | **Quick Usage:** 70 | ```bash 71 | uvx mcpo --port 8000 -- uvx mcp-server-time --local-timezone=America/New_York 72 | ``` 73 | 74 | Alternatively, we also provide a simple Python-based proxy server: 75 | 76 | **Example:** 77 | ```bash 78 | cd servers/mcp-proxy 79 | pip install -r requirements.txt 80 | python main.py --host 0.0.0.0 --port 8000 -- uvx mcp-server-time --local-timezone=America/New_York 81 | ``` 82 | 83 | Both methods help bridge existing MCP servers with OpenAPI clients, removing transport and security complexities during integration or migration. 84 | 85 | ## 🔃 Bridge OpenAPI → MCP (Optional) 86 | 87 | In addition to turning MCP tool servers into OpenAPI-compatible APIs (via mcpo), it's also possible to go the other direction: expose your OpenAPI-compatible tool server as an MCP server. This makes it easy to integrate any OpenAPI tool into environments or agents that expect an MCP-compatible interface. 88 | 89 | Several community-maintained projects are available to help with this: 90 | 91 | - [🌉 openapi-mcp-server](https://github.com/janwilmake/openapi-mcp-server) 92 | Acts as a translator from any OpenAPI spec to an MCP tool, allowing for easy reuse of RESTful APIs within MCP-only frameworks. 93 | 94 | - [🔁 mcp-openapi-server](https://github.com/ivo-toby/mcp-openapi-server) 95 | A lightweight adapter that converts OpenAPI-described endpoints to usable MCP tool servers on the fly. 96 | 97 | - [🌀 mcp-openapi-proxy](https://github.com/matthewhand/mcp-openapi-proxy) 98 | Wraps OpenAPI endpoints in a proxy that re-expresses them in MCP-compatible format with minimal configuration. 99 | 100 | - [⚡ fastapi_mcp](https://github.com/tadata-org/fastapi_mcp) 101 | A FastAPI extension that allows you to serve native FastAPI endpoints directly through the MCP protocol. 102 | 103 | With these bridges, you gain full interoperability in both directions: 104 | 105 | - Run OpenAPI tools using existing MCP pipelines 106 | - Upgrade or replace MCP-native tools with more stable, secure, OpenAPI-based implementations 107 | 108 | This two-way bridge is a major step forward in simplifying tool architecture, empowering developers to choose the most efficient and future-proof infrastructure without compromising compatibility. 109 | 110 | ## 📜 License 111 | 112 | Licensed under [MIT License](LICENSE). 113 | 114 | ## 🌱 Open WebUI Community 115 | 116 | - For general discussions, technical exchange, and announcements, visit our [Community Discussions](https://github.com/open-webui/openapi-servers/discussions) page. 117 | - Have ideas or feedback? Please open an issue! 118 | -------------------------------------------------------------------------------- /compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | filesystem-server: 3 | build: 4 | context: ./servers/filesystem 5 | ports: 6 | - 8081:8000 7 | memory-server: 8 | build: 9 | context: ./servers/memory 10 | ports: 11 | - 8082:8000 12 | volumes: 13 | - memory:/app/data:rw 14 | time-server: 15 | build: 16 | context: ./servers/time 17 | ports: 18 | - 8083:8000 19 | 20 | volumes: 21 | memory: 22 | -------------------------------------------------------------------------------- /servers/filesystem/.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /servers/filesystem/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Run the application. 51 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000 52 | -------------------------------------------------------------------------------- /servers/filesystem/README.md: -------------------------------------------------------------------------------- 1 | # 🗂️ Filesystem Tool Server 2 | 3 | A FastAPI-powered server to interact with your filesystem via OpenAPI. 4 | 5 | ## 🚀 Quickstart 6 | 7 | Clone the repo and run the server: 8 | 9 | ```bash 10 | git clone https://github.com/open-webui/openapi-servers 11 | cd openapi-servers/servers/filesystem 12 | pip install -r requirements.txt 13 | uvicorn main:app --host 0.0.0.0 --reload 14 | ``` 15 | 16 | 📡 Your Filesystem server will be live at: 17 | http://localhost:8000/docs 18 | 19 | --- 20 | 21 | Built for plug & play ⚡ -------------------------------------------------------------------------------- /servers/filesystem/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | 8 | -------------------------------------------------------------------------------- /servers/filesystem/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | 4 | # Constants 5 | ALLOWED_DIRECTORIES = [ 6 | str(pathlib.Path(os.path.expanduser("~/tmp")).resolve()) 7 | ] # 👈 Replace with your paths -------------------------------------------------------------------------------- /servers/filesystem/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException, Body 2 | from fastapi.responses import PlainTextResponse 3 | from fastapi.middleware.cors import CORSMiddleware 4 | 5 | 6 | from pydantic import BaseModel, Field 7 | import os 8 | import pathlib 9 | import asyncio 10 | from typing import List, Optional, Literal, Dict, Union 11 | import difflib 12 | import shutil 13 | from datetime import datetime, timezone, timedelta 14 | import json 15 | import secrets 16 | from config import ALLOWED_DIRECTORIES 17 | 18 | app = FastAPI( 19 | title="Secure Filesystem API", 20 | version="0.1.1", 21 | description="A secure file manipulation server for reading, editing, writing, listing, and searching files with access restrictions.", 22 | ) 23 | 24 | origins = ["*"] 25 | 26 | app.add_middleware( 27 | CORSMiddleware, 28 | allow_origins=origins, 29 | allow_credentials=True, 30 | allow_methods=["*"], 31 | allow_headers=["*"], 32 | ) 33 | 34 | # ------------------------------------------------------------------------------ 35 | # Utility functions 36 | # ------------------------------------------------------------------------------ 37 | 38 | 39 | def normalize_path(requested_path: str) -> pathlib.Path: 40 | requested = pathlib.Path(os.path.expanduser(requested_path)).resolve() 41 | for allowed in ALLOWED_DIRECTORIES: 42 | if str(requested).lower().startswith(allowed.lower()): # Case-insensitive check 43 | return requested 44 | raise HTTPException( 45 | status_code=403, 46 | detail={ 47 | "error": "Access Denied", 48 | "requested_path": str(requested), 49 | "message": "Requested path is outside allowed directories.", 50 | "allowed_directories": ALLOWED_DIRECTORIES, 51 | }, 52 | ) 53 | 54 | 55 | # ------------------------------------------------------------------------------ 56 | # Pydantic Schemas 57 | # ------------------------------------------------------------------------------ 58 | 59 | 60 | class ReadFileRequest(BaseModel): 61 | path: str = Field(..., description="Path to the file to read") 62 | 63 | 64 | class WriteFileRequest(BaseModel): 65 | path: str = Field( 66 | ..., description="Path to write to. Existing file will be overwritten." 67 | ) 68 | content: str = Field(..., description="UTF-8 encoded text content to write.") 69 | 70 | 71 | class EditOperation(BaseModel): 72 | oldText: str = Field( 73 | ..., description="Text to find and replace (exact match required)" 74 | ) 75 | newText: str = Field(..., description="Replacement text") 76 | 77 | 78 | class EditFileRequest(BaseModel): 79 | path: str = Field(..., description="Path to the file to edit.") 80 | edits: List[EditOperation] = Field(..., description="List of edits to apply.") 81 | dryRun: bool = Field( 82 | False, description="If true, only return diff without modifying file." 83 | ) 84 | 85 | 86 | class CreateDirectoryRequest(BaseModel): 87 | path: str = Field( 88 | ..., 89 | description="Directory path to create. Intermediate dirs are created automatically.", 90 | ) 91 | 92 | 93 | class ListDirectoryRequest(BaseModel): 94 | path: str = Field(..., description="Directory path to list contents for.") 95 | 96 | 97 | class DirectoryTreeRequest(BaseModel): 98 | path: str = Field( 99 | ..., description="Directory path for which to return recursive tree." 100 | ) 101 | 102 | 103 | class SearchFilesRequest(BaseModel): 104 | path: str = Field(..., description="Base directory to search in.") 105 | pattern: str = Field( 106 | ..., description="Filename pattern (case-insensitive substring match)." 107 | ) 108 | excludePatterns: Optional[List[str]] = Field( 109 | default=[], description="Patterns to exclude." 110 | ) 111 | 112 | 113 | class SearchContentRequest(BaseModel): 114 | path: str = Field(..., description="Base directory to search within.") 115 | search_query: str = Field(..., description="Text content to search for (case-insensitive).") 116 | recursive: bool = Field( 117 | default=True, description="Whether to search recursively in subdirectories." 118 | ) 119 | file_pattern: Optional[str] = Field( 120 | default="*", description="Glob pattern to filter files to search within (e.g., '*.py')." 121 | ) 122 | 123 | 124 | class DeletePathRequest(BaseModel): 125 | path: str = Field(..., description="Path to the file or directory to delete.") 126 | recursive: bool = Field( 127 | default=False, description="If true and path is a directory, delete recursively. Required if directory is not empty." 128 | ) 129 | confirmation_token: Optional[str] = Field( 130 | default=None, description="Token required for confirming deletion after initial request." 131 | ) 132 | 133 | 134 | class MovePathRequest(BaseModel): 135 | source_path: str = Field(..., description="The current path of the file or directory.") 136 | destination_path: str = Field(..., description="The new path for the file or directory.") 137 | 138 | 139 | class GetMetadataRequest(BaseModel): 140 | path: str = Field(..., description="Path to the file or directory to get metadata for.") 141 | 142 | 143 | # ------------------------------------------------------------------------------ 144 | # Global state for pending confirmations 145 | # ------------------------------------------------------------------------------ 146 | 147 | # --- Confirmation Token State Management (using a file) --- 148 | CONFIRMATION_FILE = pathlib.Path("./.pending_confirmations.json") 149 | CONFIRMATION_TTL_SECONDS = 60 # Token validity period 150 | 151 | def load_confirmations() -> Dict[str, Dict]: 152 | """Loads pending confirmations from the JSON file.""" 153 | if not CONFIRMATION_FILE.exists(): 154 | return {} 155 | try: 156 | with CONFIRMATION_FILE.open("r") as f: 157 | data = json.load(f) 158 | # Convert expiry string back to datetime object 159 | now = datetime.now(timezone.utc) 160 | valid_confirmations = {} 161 | for token, details in data.items(): 162 | try: 163 | details["expiry"] = datetime.fromisoformat(details["expiry"]) 164 | # Clean up expired tokens during load 165 | if details["expiry"] > now: 166 | valid_confirmations[token] = details 167 | except (ValueError, TypeError, KeyError): 168 | print(f"Warning: Skipping invalid confirmation data for token {token}") 169 | continue # Skip invalid entries 170 | return valid_confirmations 171 | except (json.JSONDecodeError, IOError) as e: 172 | print(f"Error loading confirmations file: {e}. Returning empty dict.") 173 | return {} 174 | 175 | def save_confirmations(confirmations: Dict[str, Dict]): 176 | """Saves pending confirmations to the JSON file.""" 177 | try: 178 | # Convert datetime objects to ISO strings for JSON serialization 179 | serializable_confirmations = {} 180 | for token, details in confirmations.items(): 181 | serializable_details = details.copy() 182 | serializable_details["expiry"] = details["expiry"].isoformat() 183 | serializable_confirmations[token] = serializable_details 184 | 185 | with CONFIRMATION_FILE.open("w") as f: 186 | json.dump(serializable_confirmations, f, indent=2) 187 | except IOError as e: 188 | print(f"Error saving confirmations file: {e}") 189 | 190 | # Clean up the file on startup if it exists from a previous run 191 | if CONFIRMATION_FILE.exists(): 192 | # print("Cleaning up stale confirmation file on startup.") 193 | try: 194 | CONFIRMATION_FILE.unlink() 195 | except OSError as e: 196 | # print(f"Warning: Could not delete stale confirmation file: {e}") # Removed print 197 | pass # Silently ignore if cleanup fails, not critical 198 | 199 | # ------------------------------------------------------------------------------ 200 | # Routes 201 | # ------------------------------------------------------------------------------ 202 | 203 | 204 | class SuccessResponse(BaseModel): 205 | message: str = Field(..., description="Success message indicating the operation was completed.") 206 | 207 | 208 | class ReadFileResponse(BaseModel): 209 | content: str = Field(..., description="UTF-8 encoded text content of the file.") 210 | 211 | 212 | class DiffResponse(BaseModel): 213 | diff: str = Field(..., description="Unified diff output comparing original and modified content.") 214 | 215 | 216 | class ConfirmationRequiredResponse(BaseModel): 217 | message: str = Field(..., description="Message indicating confirmation is required.") 218 | confirmation_token: str = Field(..., description="Token needed for the confirmation step.") 219 | expires_at: datetime = Field(..., description="UTC timestamp when the token expires.") 220 | 221 | 222 | @app.post("/read_file", response_model=ReadFileResponse, summary="Read a file") # Changed response_class to response_model 223 | async def read_file(data: ReadFileRequest = Body(...)): 224 | """ 225 | Read the entire contents of a file and return as JSON. 226 | """ 227 | path = normalize_path(data.path) 228 | try: 229 | file_content = path.read_text(encoding="utf-8") 230 | return ReadFileResponse(content=file_content) # Return Pydantic model instance 231 | except FileNotFoundError: 232 | raise HTTPException(status_code=404, detail=f"File not found: {data.path}") 233 | except PermissionError: 234 | raise HTTPException(status_code=403, detail=f"Permission denied for file: {data.path}") 235 | except Exception as e: 236 | # More specific error for generic read issues 237 | raise HTTPException(status_code=500, detail=f"Failed to read file {data.path}: {str(e)}") 238 | 239 | 240 | @app.post("/write_file", response_model=SuccessResponse, summary="Write to a file") 241 | async def write_file(data: WriteFileRequest = Body(...)): 242 | """ 243 | Write content to a file, overwriting if it exists. Returns JSON success message. 244 | """ 245 | path = normalize_path(data.path) 246 | try: 247 | path.write_text(data.content, encoding="utf-8") 248 | return SuccessResponse(message=f"Successfully wrote to {data.path}") 249 | except PermissionError: 250 | raise HTTPException(status_code=403, detail=f"Permission denied to write to {data.path}") 251 | except Exception as e: 252 | raise HTTPException(status_code=500, detail=f"Failed to write to {data.path}: {str(e)}") 253 | 254 | @app.post( 255 | "/edit_file", 256 | response_model=Union[SuccessResponse, DiffResponse], # Use Union for multiple response types 257 | summary="Edit a file with diff" 258 | ) 259 | async def edit_file(data: EditFileRequest = Body(...)): 260 | """ 261 | Apply a list of edits to a text file. 262 | Returns JSON success message or JSON diff on dry-run. 263 | """ 264 | path = normalize_path(data.path) 265 | try: 266 | original = path.read_text(encoding="utf-8") 267 | except FileNotFoundError: 268 | raise HTTPException(status_code=404, detail=f"File not found: {data.path}") 269 | except PermissionError: 270 | raise HTTPException(status_code=403, detail=f"Permission denied to read file: {data.path}") 271 | except Exception as e: 272 | raise HTTPException(status_code=500, detail=f"Failed to read file {data.path} for editing: {str(e)}") 273 | 274 | modified = original 275 | try: 276 | for edit in data.edits: 277 | if edit.oldText not in modified: 278 | raise HTTPException( 279 | status_code=400, 280 | detail=f"Edit failed: oldText not found in content: '{edit.oldText[:50]}...'", 281 | ) 282 | modified = modified.replace(edit.oldText, edit.newText, 1) 283 | 284 | if data.dryRun: 285 | diff_output = difflib.unified_diff( 286 | original.splitlines(keepends=True), 287 | modified.splitlines(keepends=True), 288 | fromfile=f"a/{data.path}", 289 | tofile=f"b/{data.path}", 290 | ) 291 | return DiffResponse(diff="".join(diff_output)) # Return JSON diff 292 | 293 | # Write changes if not dry run 294 | path.write_text(modified, encoding="utf-8") 295 | return SuccessResponse(message=f"Successfully edited file {data.path}") # Return JSON success 296 | 297 | except PermissionError: 298 | raise HTTPException(status_code=403, detail=f"Permission denied to write edited file: {data.path}") 299 | except Exception as e: 300 | # Catch errors during writing the modified file 301 | raise HTTPException(status_code=500, detail=f"Failed to write edited file {data.path}: {str(e)}") 302 | 303 | 304 | @app.post( 305 | "/create_directory", response_model=SuccessResponse, summary="Create a directory" 306 | ) 307 | async def create_directory(data: CreateDirectoryRequest = Body(...)): 308 | """ 309 | Create a new directory recursively. Returns JSON success message. 310 | """ 311 | dir_path = normalize_path(data.path) 312 | try: 313 | dir_path.mkdir(parents=True, exist_ok=True) 314 | return SuccessResponse(message=f"Successfully created directory {data.path}") 315 | except PermissionError: 316 | raise HTTPException(status_code=403, detail=f"Permission denied to create directory {data.path}") 317 | except Exception as e: 318 | raise HTTPException(status_code=500, detail=f"Failed to create directory {data.path}: {str(e)}") 319 | 320 | 321 | @app.post( 322 | "/list_directory", summary="List a directory" 323 | ) 324 | async def list_directory(data: ListDirectoryRequest = Body(...)): 325 | """ 326 | List contents of a directory. 327 | """ 328 | dir_path = normalize_path(data.path) 329 | if not dir_path.is_dir(): 330 | raise HTTPException(status_code=400, detail="Provided path is not a directory") 331 | 332 | listing = [] 333 | for entry in dir_path.iterdir(): 334 | entry_type = "directory" if entry.is_dir() else "file" 335 | listing.append({"name": entry.name, "type": entry_type}) 336 | 337 | # Return the list directly, FastAPI will serialize it to JSON 338 | return listing 339 | 340 | 341 | @app.post("/directory_tree", summary="Recursive directory tree") 342 | async def directory_tree(data: DirectoryTreeRequest = Body(...)): 343 | """ 344 | Recursively return a tree structure of a directory. 345 | """ 346 | base_path = normalize_path(data.path) 347 | 348 | def build_tree(current: pathlib.Path): 349 | entries = [] 350 | for item in current.iterdir(): 351 | entry = { 352 | "name": item.name, 353 | "type": "directory" if item.is_dir() else "file", 354 | } 355 | if item.is_dir(): 356 | entry["children"] = build_tree(item) 357 | entries.append(entry) 358 | return entries 359 | 360 | return build_tree(base_path) 361 | 362 | 363 | @app.post("/search_files", summary="Search for files") 364 | async def search_files(data: SearchFilesRequest = Body(...)): 365 | """ 366 | Search files and directories matching a pattern. 367 | """ 368 | base_path = normalize_path(data.path) 369 | results = [] 370 | 371 | for root, dirs, files in os.walk(base_path): 372 | root_path = pathlib.Path(root) 373 | # Apply exclusion patterns 374 | excluded = False 375 | for pattern in data.excludePatterns: 376 | if pathlib.Path(root).match(pattern): 377 | excluded = True 378 | break 379 | if excluded: 380 | continue 381 | for item in files + dirs: 382 | if data.pattern.lower() in item.lower(): 383 | result_path = root_path / item 384 | if any(str(result_path).startswith(alt) for alt in ALLOWED_DIRECTORIES): 385 | results.append(str(result_path)) 386 | 387 | return {"matches": results or ["No matches found"]} 388 | 389 | 390 | @app.post( 391 | "/delete_path", 392 | response_model=Union[SuccessResponse, ConfirmationRequiredResponse], # Updated response model 393 | summary="Delete a file or directory (two-step confirmation)" 394 | ) 395 | async def delete_path(data: DeletePathRequest = Body(...)): 396 | """ 397 | Delete a specified file or directory using a two-step confirmation process. 398 | 399 | 1. Initial request (without confirmation_token): Returns a confirmation token. 400 | 2. Confirmation request (with token): Executes the deletion if the token is valid 401 | and matches the original request parameters (path, recursive). 402 | 403 | Use 'recursive=True' to delete non-empty directories. 404 | """ 405 | pending_confirmations = load_confirmations() # Load state from file 406 | path = normalize_path(data.path) 407 | now = datetime.now(timezone.utc) 408 | 409 | # --- Step 2: Confirmation Request --- 410 | if data.confirmation_token: 411 | # print(f"Attempting confirmation with token: {data.confirmation_token}") # Removed print 412 | if data.confirmation_token not in pending_confirmations: 413 | # print(f"Error: Token '{data.confirmation_token}' not found in pending_confirmations.") # Removed print 414 | raise HTTPException(status_code=400, detail="Invalid or expired confirmation token.") 415 | 416 | confirmation_data = pending_confirmations[data.confirmation_token] 417 | 418 | # Validate token expiry 419 | if now > confirmation_data["expiry"]: 420 | del pending_confirmations[data.confirmation_token] # Clean up expired token 421 | save_confirmations(pending_confirmations) # Save updated state 422 | raise HTTPException(status_code=400, detail="Confirmation token has expired.") 423 | 424 | # Validate request parameters match 425 | if confirmation_data["path"] != data.path or confirmation_data["recursive"] != data.recursive: 426 | raise HTTPException( 427 | status_code=400, 428 | detail="Request parameters (path, recursive) do not match the original request for this token." 429 | ) 430 | 431 | # --- Parameters match and token is valid: Proceed with deletion --- 432 | del pending_confirmations[data.confirmation_token] # Consume the token 433 | save_confirmations(pending_confirmations) # Save updated state 434 | 435 | try: 436 | if not path.exists(): 437 | # Path might have been deleted between requests, treat as success or specific error? 438 | # For now, raise 404 as it doesn't exist *now*. 439 | raise HTTPException(status_code=404, detail=f"Path not found: {data.path}") 440 | 441 | if path.is_file(): 442 | path.unlink() 443 | return SuccessResponse(message=f"Successfully deleted file: {data.path}") 444 | elif path.is_dir(): 445 | if data.recursive: 446 | shutil.rmtree(path) 447 | return SuccessResponse(message=f"Successfully deleted directory recursively: {data.path}") 448 | else: 449 | try: 450 | path.rmdir() 451 | return SuccessResponse(message=f"Successfully deleted empty directory: {data.path}") 452 | except OSError as e: 453 | raise HTTPException( 454 | status_code=400, 455 | detail=f"Directory not empty. Use 'recursive=True' to delete non-empty directories. Original error: {e}" 456 | ) 457 | else: 458 | raise HTTPException(status_code=400, detail=f"Path is not a file or directory: {data.path}") 459 | 460 | except PermissionError: 461 | raise HTTPException(status_code=403, detail=f"Permission denied to delete {data.path}") 462 | except Exception as e: 463 | raise HTTPException(status_code=500, detail=f"Failed to delete {data.path}: {e}") 464 | 465 | # --- Step 1: Initial Request (No Token Provided) --- 466 | else: 467 | # Check if path exists before generating token 468 | if not path.exists(): 469 | raise HTTPException(status_code=404, detail=f"Path not found: {data.path}") 470 | 471 | # Generate token and expiry 472 | token = secrets.token_hex(3)[:5] # Generate 6 hex chars (3 bytes), take first 5 473 | expiry_time = now + timedelta(seconds=CONFIRMATION_TTL_SECONDS) 474 | 475 | # Store confirmation details 476 | pending_confirmations[token] = { 477 | "path": data.path, 478 | "recursive": data.recursive, 479 | "expiry": expiry_time, 480 | } 481 | save_confirmations(pending_confirmations) # Save updated state 482 | 483 | # Return confirmation required response 484 | # Construct the user-friendly message 485 | confirmation_message = f"`Confirm deletion of file: {data.path} with token {token}`" 486 | return ConfirmationRequiredResponse( 487 | message=confirmation_message, 488 | confirmation_token=token, 489 | expires_at=expiry_time, 490 | ) 491 | 492 | 493 | @app.post("/move_path", response_model=SuccessResponse, summary="Move or rename a file or directory") 494 | async def move_path(data: MovePathRequest = Body(...)): 495 | """ 496 | Move or rename a file or directory from source_path to destination_path. 497 | Both paths must be within the allowed directories. 498 | Returns JSON success message. 499 | """ 500 | source = normalize_path(data.source_path) 501 | destination = normalize_path(data.destination_path) 502 | 503 | try: 504 | if not source.exists(): 505 | raise HTTPException(status_code=404, detail=f"Source path not found: {data.source_path}") 506 | 507 | shutil.move(str(source), str(destination)) 508 | return SuccessResponse(message=f"Successfully moved '{data.source_path}' to '{data.destination_path}'") 509 | 510 | except PermissionError: 511 | raise HTTPException(status_code=403, detail=f"Permission denied for move operation involving '{data.source_path}' or '{data.destination_path}'") 512 | except Exception as e: 513 | raise HTTPException(status_code=500, detail=f"Failed to move '{data.source_path}' to '{data.destination_path}': {e}") 514 | 515 | 516 | @app.post("/get_metadata", summary="Get file or directory metadata") 517 | async def get_metadata(data: GetMetadataRequest = Body(...)): 518 | """ 519 | Retrieve metadata for a specified file or directory path. 520 | """ 521 | path = normalize_path(data.path) 522 | 523 | try: 524 | if not path.exists(): 525 | raise HTTPException(status_code=404, detail=f"Path not found: {data.path}") 526 | 527 | stat_result = path.stat() 528 | 529 | # Determine type 530 | if path.is_file(): 531 | file_type = "file" 532 | elif path.is_dir(): 533 | file_type = "directory" 534 | else: 535 | file_type = "other" # Should generally not happen for existing paths normalized 536 | 537 | # Format timestamps (use UTC for consistency) 538 | mod_time = datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).isoformat() 539 | # Creation time (st_birthtime) is macOS/BSD specific, st_ctime is metadata change time on Linux 540 | # Use st_ctime as a fallback if st_birthtime isn't available 541 | try: 542 | create_time = datetime.fromtimestamp(stat_result.st_birthtime, tz=timezone.utc).isoformat() 543 | except AttributeError: 544 | create_time = datetime.fromtimestamp(stat_result.st_ctime, tz=timezone.utc).isoformat() 545 | 546 | 547 | metadata = { 548 | "path": str(path), 549 | "type": file_type, 550 | "size_bytes": stat_result.st_size, 551 | "modification_time_utc": mod_time, 552 | "creation_time_utc": create_time, # Note platform differences in definition 553 | "last_metadata_change_time_utc": datetime.fromtimestamp(stat_result.st_ctime, tz=timezone.utc).isoformat(), 554 | } 555 | return metadata 556 | 557 | except PermissionError: 558 | raise HTTPException(status_code=403, detail=f"Permission denied to access metadata for {data.path}") 559 | except Exception as e: 560 | raise HTTPException(status_code=500, detail=f"Failed to get metadata for {data.path}: {e}") 561 | 562 | 563 | @app.post("/search_content", summary="Search for content within files") 564 | async def search_content(data: SearchContentRequest = Body(...)): 565 | """ 566 | Search for text content within files in a specified directory. 567 | """ 568 | base_path = normalize_path(data.path) 569 | results = [] 570 | search_query_lower = data.search_query.lower() 571 | 572 | if not base_path.is_dir(): 573 | raise HTTPException(status_code=400, detail="Provided path is not a directory") 574 | 575 | iterator = base_path.rglob(data.file_pattern) if data.recursive else base_path.glob(data.file_pattern) 576 | 577 | for item_path in iterator: 578 | if item_path.is_file(): 579 | try: 580 | # Read file line by line to handle potentially large files and different encodings 581 | with item_path.open("r", encoding="utf-8", errors="ignore") as f: 582 | for line_num, line in enumerate(f, 1): 583 | if search_query_lower in line.lower(): 584 | results.append( 585 | { 586 | "file_path": str(item_path), 587 | "line_number": line_num, 588 | "line_content": line.strip(), 589 | } 590 | ) 591 | except Exception as e: 592 | # Log or handle files that cannot be read (e.g., permission errors, binary files) 593 | print(f"Could not read or search file {item_path}: {e}") 594 | continue 595 | 596 | return {"matches": results or ["No matches found"]} 597 | 598 | 599 | @app.get("/list_allowed_directories", summary="List access-permitted directories") 600 | async def list_allowed_directories(): 601 | """ 602 | Show all directories this server can access. 603 | """ 604 | return {"allowed_directories": ALLOWED_DIRECTORIES} 605 | -------------------------------------------------------------------------------- /servers/filesystem/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | -------------------------------------------------------------------------------- /servers/get-user-info/.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /servers/get-user-info/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Run the application. 51 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000 52 | -------------------------------------------------------------------------------- /servers/get-user-info/README.md: -------------------------------------------------------------------------------- 1 | # 🔐 User Info Proxy API 2 | 3 | A lightweight FastAPI microservice that forwards an Authorization Bearer token to an internal authentication server and returns user details. 4 | 5 | ## 🚀 Features 6 | 7 | - 🔁 Forwards Bearer tokens to your internal auth endpoint 8 | - 🔒 Built-in error handling 9 | 10 | ## 📦 Endpoints 11 | 12 | ### GET /get_session_user_info 13 | 14 | Forward your existing Bearer token and get authenticated user details. 15 | 16 | 📥 Headers: 17 | 18 | Authorization: Bearer YOUR_TOKEN 19 | 20 | 📤 Response: 21 | 22 | { 23 | "id": "user-id", 24 | "email": "user@example.com", 25 | "name": "Jane Doe", 26 | ... 27 | } 28 | 29 | ## ⚙️ Setup 30 | 31 | 1. Set your auth backend base URL: 32 | 33 | ``` 34 | export OPEN_WEBUI_BASE_URL=http://your-open-webui.com 35 | ``` 36 | 37 | 2. Run the service: 38 | 39 | ``` 40 | uvicorn main:app --host 0.0.0.0 --reload 41 | ``` 42 | 43 | ## 🧩 Environment Variables 44 | 45 | | Name | Description | Default | 46 | |---------------------|--------------------------------------|----------------------| 47 | | OPEN_WEBUI_BASE_URL | Base URL of the internal auth server | http://localhost:3000 | 48 | 49 | ## 🍿 Example 50 | 51 | curl -H "Authorization: Bearer " http://localhost:8000/get_user_info 52 | 53 | ## 🧪 Tech Stack 54 | 55 | - Python 3.11+ 56 | - FastAPI ⚡ 57 | 58 | --- 59 | 60 | Made with ❤️ by your backend team. 61 | -------------------------------------------------------------------------------- /servers/get-user-info/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | 8 | -------------------------------------------------------------------------------- /servers/get-user-info/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException, Request 2 | from fastapi.middleware.cors import CORSMiddleware 3 | import aiohttp 4 | import os 5 | 6 | OPEN_WEBUI_BASE_URL = os.getenv("OPEN_WEBUI_BASE_URL", "http://localhost:8080") 7 | 8 | app = FastAPI( 9 | title="User Info Proxy API", 10 | version="1.0.0", 11 | description="Fetch user details from the internal authentication server.", 12 | ) 13 | 14 | app.add_middleware( 15 | CORSMiddleware, 16 | allow_origins=["*"], # You may restrict this to certain domains 17 | allow_credentials=True, 18 | allow_methods=["*"], 19 | allow_headers=["*"], 20 | ) 21 | 22 | 23 | @app.get( 24 | "/get_session_user_info", 25 | summary="Forward auth token and retrieve session user details", 26 | description="Get user info from internal auth service using Authorization Bearer token.", 27 | ) 28 | async def get_session_user_info(request: Request): 29 | auth_header = request.headers.get("Authorization") 30 | 31 | print(f"Received Authorization header: {auth_header}") 32 | 33 | if not auth_header or not auth_header.startswith("Bearer "): 34 | raise HTTPException( 35 | status_code=401, detail="Missing or invalid Authorization header" 36 | ) 37 | 38 | try: 39 | async with aiohttp.ClientSession() as session: 40 | async with session.get( 41 | f"{OPEN_WEBUI_BASE_URL}/api/v1/auths/", 42 | headers={"Authorization": auth_header}, 43 | timeout=aiohttp.ClientTimeout(total=10.0), 44 | ) as resp: 45 | 46 | if resp.status != 200: 47 | raise HTTPException( 48 | status_code=resp.status, detail="Failed to retrieve user info" 49 | ) 50 | 51 | data = await resp.json() 52 | 53 | return { 54 | "id": data.get("id"), 55 | "role": data.get("role"), 56 | "name": data.get("name"), 57 | "email": data.get("email"), 58 | } 59 | 60 | except aiohttp.ClientError as exc: 61 | raise HTTPException( 62 | status_code=502, detail=f"Error connecting to auth service: {exc}" 63 | ) 64 | -------------------------------------------------------------------------------- /servers/get-user-info/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | 6 | aiohttp -------------------------------------------------------------------------------- /servers/git/README.md: -------------------------------------------------------------------------------- 1 | # 🧰 Git Tool Server 2 | 3 | ⚡️ A FastAPI-based Git tool server implementation. 4 | 5 | > 🚧 Untested MCP Port – Contribution Welcome! 6 | 7 | ## 🚀 Quickstart 8 | 9 | ```bash 10 | git clone https://github.com/open-webui/openapi-servers 11 | cd openapi-servers/servers/git 12 | 13 | pip install -r requirements.txt 14 | uvicorn main:app --host 0.0.0.0 --reload 15 | ``` 16 | 17 | ✅ You're now running the Git tool server! -------------------------------------------------------------------------------- /servers/git/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from fastapi.middleware.cors import CORSMiddleware 3 | 4 | import logging 5 | from pathlib import Path 6 | from typing import List, Optional 7 | from enum import Enum 8 | import git 9 | from pydantic import BaseModel, Field 10 | 11 | app = FastAPI( 12 | title="Git Management API", 13 | version="0.1.0", 14 | description="An API to manage Git repositories with explicit endpoints, inputs, and outputs for better OpenAPI schemas.", 15 | ) 16 | 17 | origins = ["*"] 18 | 19 | app.add_middleware( 20 | CORSMiddleware, 21 | allow_origins=origins, 22 | allow_credentials=True, 23 | allow_methods=["*"], 24 | allow_headers=["*"], 25 | ) 26 | 27 | 28 | # ----------------- ENUMS ----------------- 29 | 30 | 31 | class GitTools(str, Enum): 32 | STATUS = "status" 33 | DIFF_UNSTAGED = "diff_unstaged" 34 | DIFF_STAGED = "diff_staged" 35 | DIFF = "diff" 36 | COMMIT = "commit" 37 | ADD = "add" 38 | RESET = "reset" 39 | LOG = "log" 40 | CREATE_BRANCH = "create_branch" 41 | CHECKOUT = "checkout" 42 | SHOW = "show" 43 | INIT = "init" 44 | 45 | 46 | # ----------------- MODELS ----------------- 47 | 48 | 49 | class GitRepoPath(BaseModel): 50 | repo_path: str = Field(..., description="File system path to the Git repository.") 51 | 52 | 53 | class GitStatusRequest(GitRepoPath): 54 | pass 55 | 56 | 57 | class GitDiffUnstagedRequest(GitRepoPath): 58 | pass 59 | 60 | 61 | class GitDiffStagedRequest(GitRepoPath): 62 | pass 63 | 64 | 65 | class GitDiffRequest(GitRepoPath): 66 | target: str = Field(..., description="The branch or commit to diff against.") 67 | 68 | 69 | class GitCommitRequest(GitRepoPath): 70 | message: str = Field(..., description="Commit message for recording the change.") 71 | 72 | 73 | class GitAddRequest(GitRepoPath): 74 | files: List[str] = Field( 75 | ..., description="List of file paths to add to the staging area." 76 | ) 77 | 78 | 79 | class GitResetRequest(GitRepoPath): 80 | pass 81 | 82 | 83 | class GitLogRequest(GitRepoPath): 84 | max_count: int = Field(10, description="Maximum number of commits to retrieve.") 85 | 86 | 87 | class GitCreateBranchRequest(GitRepoPath): 88 | branch_name: str = Field(..., description="Name of the branch to create.") 89 | base_branch: Optional[str] = Field( 90 | None, description="Optional base branch name to create the new branch from." 91 | ) 92 | 93 | 94 | class GitCheckoutRequest(GitRepoPath): 95 | branch_name: str = Field(..., description="Branch name to checkout.") 96 | 97 | 98 | class GitShowRequest(GitRepoPath): 99 | revision: str = Field( 100 | ..., description="The commit hash or branch/tag name to show." 101 | ) 102 | 103 | 104 | class GitInitRequest(GitRepoPath): 105 | pass 106 | 107 | 108 | class TextResponse(BaseModel): 109 | result: str = Field(..., description="Description of the operation result.") 110 | 111 | 112 | class LogResponse(BaseModel): 113 | commits: List[str] = Field( 114 | ..., description="A list of formatted commit log entries." 115 | ) 116 | 117 | 118 | # ----------------- UTILITY FUNCTIONS ----------------- 119 | 120 | 121 | def get_repo(repo_path: str) -> git.Repo: 122 | try: 123 | return git.Repo(repo_path) 124 | except git.InvalidGitRepositoryError: 125 | raise HTTPException( 126 | status_code=400, detail=f"Invalid Git repository at '{repo_path}'" 127 | ) 128 | 129 | 130 | # ----------------- API ENDPOINTS ----------------- 131 | 132 | 133 | @app.post( 134 | "/status", 135 | response_model=TextResponse, 136 | description="Get the current status of the Git repository.", 137 | ) 138 | def get_status(request: GitStatusRequest): 139 | repo = get_repo(request.repo_path) 140 | status = repo.git.status() 141 | return TextResponse(result=status) 142 | 143 | 144 | @app.post( 145 | "/diff_unstaged", 146 | response_model=TextResponse, 147 | description="Get differences of unstaged changes.", 148 | ) 149 | def diff_unstaged(request: GitDiffUnstagedRequest): 150 | repo = get_repo(request.repo_path) 151 | diff = repo.git.diff() 152 | return TextResponse(result=diff) 153 | 154 | 155 | @app.post( 156 | "/diff_staged", 157 | response_model=TextResponse, 158 | description="Get differences of staged changes.", 159 | ) 160 | def diff_staged(request: GitDiffStagedRequest): 161 | repo = get_repo(request.repo_path) 162 | diff = repo.git.diff("--cached") 163 | return TextResponse(result=diff) 164 | 165 | 166 | @app.post( 167 | "/diff", 168 | response_model=TextResponse, 169 | description="Get comparison between two branches or commits.", 170 | ) 171 | def diff_target(request: GitDiffRequest): 172 | repo = get_repo(request.repo_path) 173 | diff = repo.git.diff(request.target) 174 | return TextResponse(result=diff) 175 | 176 | 177 | @app.post( 178 | "/commit", 179 | response_model=TextResponse, 180 | description="Commit staged changes to the repository.", 181 | ) 182 | def commit_changes(request: GitCommitRequest): 183 | repo = get_repo(request.repo_path) 184 | commit = repo.index.commit(request.message) 185 | return TextResponse(result=f"Committed changes with hash {commit.hexsha}") 186 | 187 | 188 | @app.post("/add", response_model=TextResponse, description="Stage files for commit.") 189 | def add_files(request: GitAddRequest): 190 | repo = get_repo(request.repo_path) 191 | repo.index.add(request.files) 192 | return TextResponse(result="Files staged successfully.") 193 | 194 | 195 | @app.post( 196 | "/reset", response_model=TextResponse, description="Unstage all staged changes." 197 | ) 198 | def reset_changes(request: GitResetRequest): 199 | repo = get_repo(request.repo_path) 200 | repo.index.reset() 201 | return TextResponse(result="All staged changes reset.") 202 | 203 | 204 | @app.post( 205 | "/log", 206 | response_model=LogResponse, 207 | description="Get recent commit history of the repository.", 208 | ) 209 | def get_log(request: GitLogRequest): 210 | repo = get_repo(request.repo_path) 211 | commits = [ 212 | f"Commit: {commit.hexsha}\n" 213 | f"Author: {commit.author}\n" 214 | f"Date: {commit.authored_datetime}\n" 215 | f"Message: {commit.message.strip()}\n" 216 | for commit in repo.iter_commits(max_count=request.max_count) 217 | ] 218 | return LogResponse(commits=commits) 219 | 220 | 221 | @app.post( 222 | "/create_branch", response_model=TextResponse, description="Create a new branch." 223 | ) 224 | def create_branch(request: GitCreateBranchRequest): 225 | repo = get_repo(request.repo_path) 226 | if request.base_branch is None: 227 | base_branch = repo.active_branch 228 | else: 229 | base_branch = repo.refs[request.base_branch] 230 | repo.create_head(request.branch_name, base_branch) 231 | return TextResponse( 232 | result=f"Created branch '{request.branch_name}' from '{base_branch}'." 233 | ) 234 | 235 | 236 | @app.post( 237 | "/checkout", response_model=TextResponse, description="Checkout an existing branch." 238 | ) 239 | def checkout_branch(request: GitCheckoutRequest): 240 | repo = get_repo(request.repo_path) 241 | repo.git.checkout(request.branch_name) 242 | return TextResponse(result=f"Switched to branch '{request.branch_name}'.") 243 | 244 | 245 | @app.post( 246 | "/show", 247 | response_model=TextResponse, 248 | description="Show details and diff of a specific commit.", 249 | ) 250 | def show_revision(request: GitShowRequest): 251 | repo = get_repo(request.repo_path) 252 | commit = repo.commit(request.revision) 253 | details = ( 254 | f"Commit: {commit.hexsha}\n" 255 | f"Author: {commit.author}\n" 256 | f"Date: {commit.authored_datetime}\n" 257 | f"Message: {commit.message.strip()}\n" 258 | ) 259 | diff = commit.diff( 260 | commit.parents[0] if commit.parents else git.NULL_TREE, create_patch=True 261 | ) 262 | diff_text = "\n".join(d.diff.decode("utf-8") for d in diff) 263 | return TextResponse(result=details + "\n" + diff_text) 264 | 265 | 266 | @app.post( 267 | "/init", response_model=TextResponse, description="Initialize a new Git repository." 268 | ) 269 | def init_repo(request: GitInitRequest): 270 | try: 271 | repo = git.Repo.init(path=request.repo_path, mkdir=True) 272 | return TextResponse( 273 | result=f"Initialized empty Git repository at '{repo.git_dir}'" 274 | ) 275 | except Exception as e: 276 | raise HTTPException(status_code=500, detail=str(e)) 277 | -------------------------------------------------------------------------------- /servers/git/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | 6 | pytz 7 | python-dateutil -------------------------------------------------------------------------------- /servers/mcp-proxy/README.md: -------------------------------------------------------------------------------- 1 | # 🔄 MCP → OpenAPI Proxy (Reference) 2 | 3 | This folder contains a minimal Python reference implementation demonstrating how to expose MCP tool servers as OpenAPI-compatible REST APIs. 4 | 5 | ⚠️ This is a REFERENCE implementation and is not actively maintained. It exists for educational purposes or for those needing direct customization. 6 | 7 | ✅ For a production-ready, feature-rich solution, we strongly recommend using the maintained `mcpo` tool instead: 8 | 9 | 👉 https://github.com/open-webui/mcpo — Contributions welcome! 10 | 11 | ## 🔧 Quick Start 12 | 13 | Make sure uvx is installed and available. 14 | 15 | Install dependencies: 16 | 17 | ```bash 18 | pip install -r requirements.txt 19 | ``` 20 | 21 | Run the server: 22 | 23 | ```bash 24 | python main.py --host 0.0.0.0 --port 8000 -- uvx mcp-server-time --local-timezone=America/New_York 25 | ``` 26 | 27 | Your MCP server will now be available as an OpenAPI-compatible API. 28 | 29 | ## 📝 License 30 | 31 | MIT -------------------------------------------------------------------------------- /servers/mcp-proxy/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Body 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from pydantic import create_model 4 | 5 | 6 | from mcp import ClientSession, StdioServerParameters, types 7 | from mcp.client.stdio import stdio_client 8 | 9 | import argparse 10 | import sys 11 | from typing import Dict, Any 12 | 13 | import asyncio 14 | import uvicorn 15 | import json 16 | import os 17 | 18 | 19 | async def create_dynamic_endpoints(app: FastAPI, session: ClientSession): 20 | tools_result = await session.list_tools() 21 | tools = tools_result.tools 22 | 23 | for tool in tools: 24 | print(tool) 25 | endpoint_name = tool.name 26 | endpoint_description = tool.description 27 | schema = tool.inputSchema 28 | 29 | # Dynamically creating a Pydantic model for validation and openAPI coverage 30 | model_fields = {} 31 | required_fields = schema.get("required", []) 32 | 33 | for param_name, param_schema in schema["properties"].items(): 34 | param_type = param_schema["type"] 35 | param_desc = param_schema.get("description", "") 36 | python_type = str # default 37 | 38 | if param_type == "string": 39 | python_type = str 40 | elif param_type == "integer": 41 | python_type = int 42 | elif param_type == "boolean": 43 | python_type = bool 44 | elif param_type == "number": 45 | python_type = float 46 | elif param_type == "object": 47 | python_type = Dict[str, Any] 48 | elif param_type == "array": 49 | python_type = list 50 | # Expand as needed. PRs welcome! 51 | 52 | default_value = ... if param_name in required_fields else None 53 | model_fields[param_name] = ( 54 | python_type, 55 | Body(default_value, description=param_desc), 56 | ) 57 | 58 | FormModel = create_model(f"{endpoint_name}_form_model", **model_fields) 59 | 60 | def make_endpoint_func(endpoint_name: str, FormModel): 61 | async def tool(form_data: FormModel): 62 | args = form_data.model_dump() 63 | print(f"Calling {endpoint_name} with arguments:", args) 64 | 65 | tool_call_result = await session.call_tool( 66 | endpoint_name, arguments=args 67 | ) 68 | 69 | response = [] 70 | for content in tool_call_result.content: 71 | 72 | text = content.text 73 | if isinstance(text, str): 74 | try: 75 | text = json.loads(text) 76 | except json.JSONDecodeError: 77 | pass 78 | response.append(text) 79 | 80 | return response 81 | 82 | return tool 83 | 84 | tool = make_endpoint_func(endpoint_name, FormModel) 85 | 86 | # Add endpoint to FastAPI with tool descriptions 87 | app.post( 88 | f"/{endpoint_name}", 89 | summary=endpoint_name.replace("_", " ").title(), 90 | description=endpoint_description, 91 | )(tool) 92 | 93 | 94 | async def run(host: str, port: int, server_cmd: list[str]): 95 | server_params = StdioServerParameters( 96 | command=server_cmd[0], 97 | args=server_cmd[1:], 98 | env={**os.environ}, 99 | ) 100 | 101 | # Open connection to MCP first: 102 | async with stdio_client(server_params) as (read, write): 103 | async with ClientSession(read, write) as session: 104 | result = await session.initialize() 105 | 106 | server_name = ( 107 | result.serverInfo.name 108 | if hasattr(result, "serverInfo") and hasattr(result.serverInfo, "name") 109 | else None 110 | ) 111 | 112 | server_description = ( 113 | f"{server_name.capitalize()} MCP OpenAPI Proxy" 114 | if server_name 115 | else "Automatically generated API endpoints based on MCP tool schemas." 116 | ) 117 | 118 | server_version = ( 119 | result.serverInfo.version 120 | if hasattr(result, "serverInfo") 121 | and hasattr(result.serverInfo, "version") 122 | else "1.0" 123 | ) 124 | 125 | app = FastAPI( 126 | title=server_name if server_name else "MCP OpenAPI Proxy", 127 | description=server_description, 128 | version=server_version, 129 | ) 130 | 131 | origins = ["*"] 132 | 133 | app.add_middleware( 134 | CORSMiddleware, 135 | allow_origins=origins, 136 | allow_credentials=True, 137 | allow_methods=["*"], 138 | allow_headers=["*"], 139 | ) 140 | 141 | # Dynamic endpoint creation 142 | await create_dynamic_endpoints(app, session) 143 | 144 | config = uvicorn.Config(app=app, host=host, port=port, log_level="info") 145 | server = uvicorn.Server(config) 146 | await server.serve() 147 | 148 | 149 | def parse_args(): 150 | # Separate user args before and after "--" 151 | if "--" not in sys.argv: 152 | print("Usage: python main.py --host 0.0.0.0 --port 8000 -- your_mcp_command") 153 | sys.exit(1) 154 | 155 | split_index = sys.argv.index("--") 156 | proxy_args = sys.argv[1:split_index] 157 | mcp_args = sys.argv[split_index + 1 :] 158 | 159 | parser = argparse.ArgumentParser(description="FastAPI MCP OpenAPI Proxy") 160 | parser.add_argument("--host", type=str, default="0.0.0.0", help="Host to listen on") 161 | parser.add_argument("--port", type=int, default=8000, help="Port to listen on") 162 | 163 | args = parser.parse_args(proxy_args) 164 | 165 | if not mcp_args: 166 | print("Error: You must specify the MCP server command after '--'") 167 | sys.exit(1) 168 | 169 | return args.host, args.port, mcp_args 170 | 171 | 172 | if __name__ == "__main__": 173 | host, port, server_cmd = parse_args() 174 | asyncio.run(run(host, port, server_cmd)) 175 | -------------------------------------------------------------------------------- /servers/mcp-proxy/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | 6 | mcp -------------------------------------------------------------------------------- /servers/memory/.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /servers/memory/.gitignore: -------------------------------------------------------------------------------- 1 | memory.json -------------------------------------------------------------------------------- /servers/memory/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Change the ownership of /app/data directory and its contents to appuser 42 | RUN mkdir -p /app/data && touch /app/data/memory.json && chown -R ${UID}:${UID} /app/data 43 | 44 | # Set a flag for the location of the database 45 | ENV MEMORY_FILE_PATH="/app/data/memory.json" 46 | 47 | # Switch to the non-privileged user to run the application. 48 | USER appuser 49 | 50 | # Copy the source code into the container. 51 | COPY . . 52 | 53 | # Expose the port that the application listens on. 54 | EXPOSE 8000 55 | 56 | # Run the application. 57 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000 58 | -------------------------------------------------------------------------------- /servers/memory/README.md: -------------------------------------------------------------------------------- 1 | # 🧠 Memory Tool Server 2 | 3 | A plug-and-play server for memory tools using FastAPI. 4 | 5 | ## 🚀 Quickstart 6 | 7 | Clone the repo and start the memory server: 8 | 9 | ```bash 10 | git clone https://github.com/open-webui/openapi-servers 11 | cd openapi-servers/servers/memory 12 | pip install -r requirements.txt 13 | uvicorn main:app --host 0.0.0.0 --reload 14 | ``` 15 | 16 | That's it – you're live! 🟢 -------------------------------------------------------------------------------- /servers/memory/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | volumes: 8 | - memory:/app/data:rw 9 | 10 | volumes: 11 | memory: 12 | 13 | -------------------------------------------------------------------------------- /servers/memory/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException, Body 2 | from fastapi.middleware.cors import CORSMiddleware 3 | 4 | 5 | from pydantic import BaseModel, Field 6 | from typing import List, Literal, Union 7 | from pathlib import Path 8 | import json 9 | import os 10 | 11 | app = FastAPI( 12 | title="Knowledge Graph Server", 13 | version="1.0.0", 14 | description="A structured knowledge graph memory system that supports entity and relation storage, observation tracking, and manipulation.", 15 | ) 16 | 17 | origins = ["*"] 18 | 19 | app.add_middleware( 20 | CORSMiddleware, 21 | allow_origins=origins, 22 | allow_credentials=True, 23 | allow_methods=["*"], 24 | allow_headers=["*"], 25 | ) 26 | 27 | 28 | # ----- Persistence Setup ----- 29 | MEMORY_FILE_PATH_ENV = os.getenv("MEMORY_FILE_PATH", "memory.json") 30 | MEMORY_FILE_PATH = Path( 31 | MEMORY_FILE_PATH_ENV 32 | if Path(MEMORY_FILE_PATH_ENV).is_absolute() 33 | else Path(__file__).parent / MEMORY_FILE_PATH_ENV 34 | ) 35 | 36 | 37 | # ----- Data Models ----- 38 | class Entity(BaseModel): 39 | name: str = Field(..., description="The name of the entity") 40 | entityType: str = Field(..., description="The type of the entity") 41 | observations: List[str] = Field( 42 | ..., description="An array of observation contents associated with the entity" 43 | ) 44 | 45 | 46 | class Relation(BaseModel): 47 | from_: str = Field( 48 | ..., 49 | alias="from", 50 | description="The name of the entity where the relation starts", 51 | ) 52 | to: str = Field(..., description="The name of the entity where the relation ends") 53 | relationType: str = Field(..., description="The type of the relation") 54 | 55 | 56 | class KnowledgeGraph(BaseModel): 57 | entities: List[Entity] 58 | relations: List[Relation] 59 | 60 | 61 | class EntityWrapper(BaseModel): 62 | type: Literal["entity"] 63 | name: str 64 | entityType: str 65 | observations: List[str] 66 | 67 | 68 | class RelationWrapper(BaseModel): 69 | type: Literal["relation"] 70 | from_: str = Field(..., alias="from") 71 | to: str 72 | relationType: str 73 | 74 | 75 | # ----- I/O Handlers ----- 76 | def read_graph_file() -> KnowledgeGraph: 77 | if not MEMORY_FILE_PATH.exists(): 78 | return KnowledgeGraph(entities=[], relations=[]) 79 | with open(MEMORY_FILE_PATH, "r", encoding="utf-8") as f: 80 | lines = [line for line in f if line.strip()] 81 | entities = [] 82 | relations = [] 83 | for line in lines: 84 | print(line) 85 | item = json.loads(line) 86 | if item["type"] == "entity": 87 | entities.append( 88 | Entity( 89 | name=item["name"], 90 | entityType=item["entityType"], 91 | observations=item["observations"], 92 | ) 93 | ) 94 | elif item["type"] == "relation": 95 | relations.append(Relation(**item)) 96 | 97 | return KnowledgeGraph(entities=entities, relations=relations) 98 | 99 | 100 | def save_graph(graph: KnowledgeGraph): 101 | lines = [json.dumps({"type": "entity", **e.dict()}) for e in graph.entities] + [ 102 | json.dumps({"type": "relation", **r.dict(by_alias=True)}) 103 | for r in graph.relations 104 | ] 105 | with open(MEMORY_FILE_PATH, "w", encoding="utf-8") as f: 106 | f.write("\n".join(lines)) 107 | 108 | 109 | # ----- Request Models ----- 110 | 111 | 112 | class CreateEntitiesRequest(BaseModel): 113 | entities: List[Entity] = Field(..., description="List of entities to create") 114 | 115 | 116 | class CreateRelationsRequest(BaseModel): 117 | relations: List[Relation] = Field( 118 | ..., description="List of relations to create. All must be in active voice." 119 | ) 120 | 121 | 122 | class ObservationItem(BaseModel): 123 | entityName: str = Field( 124 | ..., description="The name of the entity to add the observations to" 125 | ) 126 | contents: List[str] = Field( 127 | ..., description="An array of observation contents to add" 128 | ) 129 | 130 | 131 | class DeletionItem(BaseModel): 132 | entityName: str = Field( 133 | ..., description="The name of the entity containing the observations" 134 | ) 135 | observations: List[str] = Field( 136 | ..., description="An array of observations to delete" 137 | ) 138 | 139 | 140 | class AddObservationsRequest(BaseModel): 141 | observations: List[ObservationItem] = Field( 142 | ..., 143 | description="A list of observation additions, each specifying an entity and contents to add", 144 | ) 145 | 146 | 147 | class DeleteObservationsRequest(BaseModel): 148 | deletions: List[DeletionItem] = Field( 149 | ..., 150 | description="A list of observation deletions, each specifying an entity and observations to remove", 151 | ) 152 | 153 | 154 | class DeleteEntitiesRequest(BaseModel): 155 | entityNames: List[str] = Field( 156 | ..., description="An array of entity names to delete" 157 | ) 158 | 159 | 160 | class DeleteRelationsRequest(BaseModel): 161 | relations: List[Relation] = Field( 162 | ..., description="An array of relations to delete" 163 | ) 164 | 165 | 166 | class SearchNodesRequest(BaseModel): 167 | query: str = Field( 168 | ..., 169 | description="The search query to match against entity names, types, and observation content", 170 | ) 171 | 172 | 173 | class OpenNodesRequest(BaseModel): 174 | names: List[str] = Field(..., description="An array of entity names to retrieve") 175 | 176 | 177 | # ----- Endpoints ----- 178 | 179 | 180 | @app.post("/create_entities", summary="Create multiple entities in the graph") 181 | def create_entities(req: CreateEntitiesRequest): 182 | graph = read_graph_file() 183 | existing_names = {e.name for e in graph.entities} 184 | new_entities = [e for e in req.entities if e.name not in existing_names] 185 | graph.entities.extend(new_entities) 186 | save_graph(graph) 187 | return new_entities 188 | 189 | 190 | @app.post("/create_relations", summary="Create multiple relations between entities") 191 | def create_relations(req: CreateRelationsRequest): 192 | graph = read_graph_file() 193 | existing = {(r.from_, r.to, r.relationType) for r in graph.relations} 194 | new = [r for r in req.relations if (r.from_, r.to, r.relationType) not in existing] 195 | graph.relations.extend(new) 196 | save_graph(graph) 197 | return new 198 | 199 | 200 | @app.post("/add_observations", summary="Add new observations to existing entities") 201 | def add_observations(req: AddObservationsRequest): 202 | graph = read_graph_file() 203 | results = [] 204 | 205 | for obs in req.observations: 206 | name = obs.entityName.lower() 207 | contents = obs.contents 208 | entity = next((e for e in graph.entities if e.name == name), None) 209 | if not entity: 210 | raise HTTPException(status_code=404, detail=f"Entity {name} not found") 211 | added = [c for c in contents if c not in entity.observations] 212 | entity.observations.extend(added) 213 | results.append({"entityName": name, "addedObservations": added}) 214 | 215 | save_graph(graph) 216 | return results 217 | 218 | 219 | @app.post("/delete_entities", summary="Delete entities and associated relations") 220 | def delete_entities(req: DeleteEntitiesRequest): 221 | graph = read_graph_file() 222 | graph.entities = [e for e in graph.entities if e.name not in req.entityNames] 223 | graph.relations = [ 224 | r 225 | for r in graph.relations 226 | if r.from_ not in req.entityNames and r.to not in req.entityNames 227 | ] 228 | save_graph(graph) 229 | return {"message": "Entities deleted successfully"} 230 | 231 | 232 | @app.post("/delete_observations", summary="Delete specific observations from entities") 233 | def delete_observations(req: DeleteObservationsRequest): 234 | graph = read_graph_file() 235 | 236 | for deletion in req.deletions: 237 | name = deletion.entityName.lower() 238 | to_delete = deletion.observations 239 | entity = next((e for e in graph.entities if e.name == name), None) 240 | if entity: 241 | entity.observations = [ 242 | obs for obs in entity.observations if obs not in to_delete 243 | ] 244 | 245 | save_graph(graph) 246 | return {"message": "Observations deleted successfully"} 247 | 248 | 249 | @app.post("/delete_relations", summary="Delete relations from the graph") 250 | def delete_relations(req: DeleteRelationsRequest): 251 | graph = read_graph_file() 252 | del_set = {(r.from_, r.to, r.relationType) for r in req.relations} 253 | graph.relations = [ 254 | r for r in graph.relations if (r.from_, r.to, r.relationType) not in del_set 255 | ] 256 | save_graph(graph) 257 | return {"message": "Relations deleted successfully"} 258 | 259 | 260 | @app.get( 261 | "/read_graph", response_model=KnowledgeGraph, summary="Read entire knowledge graph" 262 | ) 263 | def read_graph(): 264 | return read_graph_file() 265 | 266 | 267 | @app.post( 268 | "/search_nodes", 269 | response_model=KnowledgeGraph, 270 | summary="Search for nodes by keyword", 271 | ) 272 | def search_nodes(req: SearchNodesRequest): 273 | graph = read_graph_file() 274 | print(graph) 275 | entities = [ 276 | e 277 | for e in graph.entities 278 | if req.query.lower() in e.name.lower() 279 | or req.query.lower() in e.entityType.lower() 280 | or any(req.query.lower() in o.lower() for o in e.observations) 281 | ] 282 | names = {e.name for e in entities} 283 | relations = [r for r in graph.relations if r.from_ in names and r.to in names] 284 | 285 | print(names, relations) 286 | return KnowledgeGraph(entities=entities, relations=relations) 287 | 288 | 289 | @app.post( 290 | "/open_nodes", response_model=KnowledgeGraph, summary="Open specific nodes by name" 291 | ) 292 | def open_nodes(req: OpenNodesRequest): 293 | graph = read_graph_file() 294 | entities = [e for e in graph.entities if e.name in req.names] 295 | names = {e.name for e in entities} 296 | relations = [r for r in graph.relations if r.from_ in names and r.to in names] 297 | return KnowledgeGraph(entities=entities, relations=relations) 298 | -------------------------------------------------------------------------------- /servers/memory/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | 6 | pytz 7 | python-dateutil -------------------------------------------------------------------------------- /servers/slack/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Add a healthcheck to verify the server is running 51 | HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \ 52 | CMD curl --fail http://localhost:8000/ || exit 1 53 | 54 | # Run the application using the JSON array form to avoid shell interpretation issues. 55 | CMD ["uvicorn", "main:app", "--host=0.0.0.0", "--port=8000"] 56 | -------------------------------------------------------------------------------- /servers/slack/README.md: -------------------------------------------------------------------------------- 1 | # 💬 Slack Tool Server 2 | 3 | A powerful FastAPI-based server providing Slack workspace interactions using OpenAPI standards. 4 | 5 | 📦 Built with: 6 | ⚡️ FastAPI • 📜 OpenAPI • 🐍 Python • 💬 Slack API 7 | 8 | --- 9 | 10 | ## 🚀 Quickstart 11 | 12 | Clone the repo and get started: 13 | 14 | ```bash 15 | git clone https://github.com/open-webui/openapi-servers 16 | cd openapi-servers/servers/slack 17 | 18 | # Install dependencies 19 | pip install -r requirements.txt 20 | 21 | # Set up environment variables 22 | export SLACK_BOT_TOKEN="xoxb-your-bot-token" # Required: Your Slack bot token 23 | export SLACK_TEAM_ID="your-team-id" # Required: Your Slack team ID 24 | export SLACK_CHANNEL_IDS="C1,C2" # Optional: Comma-separated channel IDs to restrict access to 25 | export SERVER_API_KEY="your-secret-key" # Optional: If set, requires 'X-API-Key' header for requests 26 | 27 | # Run the server 28 | uvicorn main:app --host 0.0.0.0 --reload 29 | ``` 30 | 31 | --- 32 | 33 | ## 🔍 About 34 | 35 | This server is part of the OpenAPI Tools Collection. It provides a comprehensive interface to Slack workspace operations, including: 36 | 37 | - 📋 List channels with message history 38 | - 📤 Post messages and replies 39 | - 👥 User information and profiles 40 | - 👋 Add reactions to messages 41 | - 📜 View message threads and history 42 | 43 | All functionality is wrapped in a developer-friendly OpenAPI interface, making it perfect for integration with AI agents, automation tools, or custom Slack applications. 44 | 45 | --- 46 | 47 | ## 🔑 Prerequisites 48 | 49 | Most of this is pulled straight from the Slack Python SDK so the barebones readme can easily be supplemented by reading the official docs. To set up, you need to follow these steps: 50 | 51 | 1. **Slack Bot Token**: Create a Slack App and get a Bot User OAuth Token 52 | - Visit [Slack API Apps](https://api.slack.com/apps) 53 | - Create a new app or select existing 54 | - Add necessary bot scopes: 55 | - `channels:history` 56 | - `channels:read` 57 | - `chat:write` 58 | - `reactions:write` 59 | - `users:read` 60 | - `users:read.email` 61 | - Install the app to your workspace 62 | - You'll get the bot token on the last screen. 63 | 2. **Team ID**: Your Slack workspace/team ID 64 | - Found in workspace settings or URL (go to your slack instance via web and it'll be after the slash) 65 | 3. **Channel IDs** (Optional): 66 | - Restrict the server to specific channels 67 | - Comma-separated list of channel IDs 68 | 4. **Server API Key** (`SERVER_API_KEY`, Optional): 69 | - If you set this environment variable to a secret value (e.g., a strong random string), the server will require this key to be passed in the `X-API-Key` HTTP header for all incoming requests. 70 | - This provides a layer of authentication to protect your server endpoint. 71 | - If left unset, the server will accept requests without API key authentication (less secure). 72 | 73 | --- 74 | 75 | ## 🛠️ Available Tools 76 | 77 | The server provides the following Slack tools: 78 | 79 | - `slack_list_channels`: List channels with recent message history 80 | - `slack_post_message`: Send messages to channels 81 | - `slack_reply_to_thread`: Reply to message threads 82 | - `slack_add_reaction`: Add emoji reactions to messages 83 | - `slack_get_channel_history`: Get channel message history 84 | - `slack_get_thread_replies`: Get replies in a thread 85 | - `slack_get_users`: List workspace users 86 | - `slack_get_user_profile`: Get detailed user profiles 87 | 88 | Each tool is available as a dedicated endpoint with full OpenAPI documentation. 89 | 90 | --- 91 | 92 | ## 🌐 API Documentation 93 | 94 | Once running, explore the interactive API documentation: 95 | 96 | 🖥️ Swagger UI: http://localhost:8000/docs 97 | 📄 OpenAPI JSON: http://localhost:8000/openapi.json 98 | 99 | The documentation includes detailed schemas, example requests, and response formats for all available tools. 100 | 101 | --- 102 | 103 | ## 🔒 Security Notes 104 | 105 | - Keep your `SLACK_BOT_TOKEN` secure 106 | - Use environment variables for sensitive credentials 107 | - Consider implementing additional authentication for the API server in production. Setting the `SERVER_API_KEY` environment variable is the recommended way to add basic authentication. 108 | - If `SERVER_API_KEY` is set, ensure clients send the correct key in the `X-API-Key` header. 109 | - Review Slack's [security best practices](https://api.slack.com/authentication/best-practices) 110 | 111 | --- 112 | 113 | Made with ❤️ by the Open WebUI community 🌍 114 | Explore more tools ➡️ https://github.com/open-webui/openapi-servers 115 | -------------------------------------------------------------------------------- /servers/slack/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | 8 | -------------------------------------------------------------------------------- /servers/slack/main.py: -------------------------------------------------------------------------------- 1 | """Slack MCP Server – high‑performance version 2 | ------------------------------------------------ 3 | Showcase‑level code quality and pythonic clarity. 4 | """ 5 | 6 | import os 7 | import asyncio 8 | import logging 9 | import json # For JSONDecodeError 10 | from typing import Optional, List, Dict, Any, Type, Callable 11 | 12 | import httpx 13 | from dotenv import load_dotenv 14 | from fastapi import FastAPI, HTTPException, Body, Depends, Security 15 | from fastapi.middleware.cors import CORSMiddleware 16 | from fastapi.security import APIKeyHeader 17 | from pydantic import BaseModel, Field 18 | 19 | # --------------------------------------------------------------------------- 20 | # Logging 21 | # --------------------------------------------------------------------------- 22 | logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") 23 | logger = logging.getLogger(__name__) 24 | 25 | # --------------------------------------------------------------------------- 26 | # Environment variables 27 | # --------------------------------------------------------------------------- 28 | load_dotenv() 29 | 30 | SLACK_BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN") 31 | SLACK_TEAM_ID = os.getenv("SLACK_TEAM_ID") 32 | SLACK_CHANNEL_IDS_STR = os.getenv("SLACK_CHANNEL_IDS") # Optional 33 | ALLOWED_ORIGINS_STR = os.getenv("ALLOWED_ORIGINS", "*") 34 | SERVER_API_KEY = os.getenv("SERVER_API_KEY") # Optional API key for security 35 | 36 | if not SLACK_BOT_TOKEN: 37 | logger.critical("SLACK_BOT_TOKEN environment variable not set.") 38 | raise ValueError("SLACK_BOT_TOKEN environment variable not set.") 39 | if not SLACK_TEAM_ID: 40 | logger.critical("SLACK_TEAM_ID environment variable not set.") 41 | raise ValueError("SLACK_TEAM_ID environment variable not set.") 42 | 43 | PREDEFINED_CHANNEL_IDS: Optional[List[str]] = ( 44 | [cid.strip() for cid in SLACK_CHANNEL_IDS_STR.split(",")] if SLACK_CHANNEL_IDS_STR else None 45 | ) 46 | 47 | # --------------------------------------------------------------------------- 48 | # FastAPI app setup 49 | # --------------------------------------------------------------------------- 50 | app = FastAPI( 51 | title="Slack API Server", 52 | version="1.0.0", 53 | description="FastAPI server providing Slack functionalities via specific, dynamically generated tool endpoints.", 54 | ) 55 | 56 | # CORS 57 | allow_origins = [origin.strip() for origin in ALLOWED_ORIGINS_STR.split(",")] 58 | if allow_origins == ["*"]: 59 | logger.warning("CORS allow_origins is set to '*' which is insecure for production. Consider setting ALLOWED_ORIGINS environment variable.") 60 | 61 | app.add_middleware( 62 | CORSMiddleware, 63 | allow_origins=allow_origins, 64 | allow_credentials=True, 65 | allow_methods=["*"], 66 | allow_headers=["*"], 67 | ) 68 | 69 | # --------------------------------------------------------------------------- 70 | # API key security 71 | # --------------------------------------------------------------------------- 72 | api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False) 73 | 74 | 75 | async def get_api_key(key: str = Security(api_key_header)): 76 | if SERVER_API_KEY: 77 | if not key: 78 | logger.warning("API Key required but not provided in X-API-Key header.") 79 | raise HTTPException(status_code=401, detail="X-API-Key header required") 80 | if key != SERVER_API_KEY: 81 | logger.warning("Invalid API Key provided.") 82 | raise HTTPException(status_code=401, detail="Invalid API Key") 83 | return key # May be None when not required 84 | 85 | 86 | if not SERVER_API_KEY: 87 | logger.warning("SERVER_API_KEY environment variable is not set. Server will allow unauthenticated requests.") 88 | 89 | # --------------------------------------------------------------------------- 90 | # Pydantic models (arguments & responses) 91 | # --------------------------------------------------------------------------- 92 | 93 | class ListChannelsArgs(BaseModel): 94 | limit: Optional[int] = Field(100, description="Maximum number of channels to return (default 100, max 200)") 95 | cursor: Optional[str] = Field(None, description="Pagination cursor for next page of results") 96 | 97 | 98 | class PostMessageArgs(BaseModel): 99 | channel_id: str = Field(..., description="The ID of the channel to post to") 100 | text: str = Field(..., description="The message text to post") 101 | 102 | 103 | class ReplyToThreadArgs(BaseModel): 104 | channel_id: str = Field(..., description="The ID of the channel containing the thread") 105 | thread_ts: str = Field(..., description="The timestamp of the parent message (e.g., '1234567890.123456')") 106 | text: str = Field(..., description="The reply text") 107 | 108 | 109 | class AddReactionArgs(BaseModel): 110 | channel_id: str = Field(..., description="The ID of the channel containing the message") 111 | timestamp: str = Field(..., description="The timestamp of the message to react to") 112 | reaction: str = Field(..., description="The name of the emoji reaction (without colons)") 113 | 114 | 115 | class GetChannelHistoryArgs(BaseModel): 116 | channel_id: str = Field(..., description="The ID of the channel") 117 | limit: Optional[int] = Field(10, description="Number of messages to retrieve (default 10)") 118 | 119 | 120 | class GetThreadRepliesArgs(BaseModel): 121 | channel_id: str = Field(..., description="The ID of the channel containing the thread") 122 | thread_ts: str = Field(..., description="The timestamp of the parent message (e.g., '1234567890.123456')") 123 | 124 | 125 | class GetUsersArgs(BaseModel): 126 | cursor: Optional[str] = Field(None, description="Pagination cursor for next page of results") 127 | limit: Optional[int] = Field(100, description="Maximum number of users to return (default 100, max 200)") 128 | 129 | 130 | class GetUserProfileArgs(BaseModel): 131 | user_id: str = Field(..., description="The ID of the user") 132 | 133 | 134 | class ToolResponse(BaseModel): 135 | content: Dict[str, Any] = Field(..., description="The JSON response from the Slack API call") 136 | 137 | 138 | # --------------------------------------------------------------------------- 139 | # Slack client (high‑performance) 140 | # --------------------------------------------------------------------------- 141 | 142 | class SlackClient: 143 | """Thin async wrapper over Slack Web API with connection‑pool reuse.""" 144 | 145 | BASE_URL = "https://slack.com/api/" 146 | 147 | def __init__(self, token: str, team_id: str, *, max_connections: int = 20): 148 | self.team_id = team_id 149 | self.headers = { 150 | "Authorization": f"Bearer {token}", 151 | "Content-Type": "application/json; charset=utf-8", 152 | } 153 | limits = httpx.Limits(max_connections=max_connections, max_keepalive_connections=max_connections) 154 | self._client = httpx.AsyncClient( 155 | base_url=self.BASE_URL, 156 | headers=self.headers, 157 | limits=limits, 158 | http2=True, 159 | timeout=10, 160 | ) 161 | 162 | # ---------------- private helpers ---------------- # 163 | async def _request( 164 | self, 165 | method: str, 166 | endpoint: str, 167 | *, 168 | params: Optional[Dict] = None, 169 | json_data: Optional[Dict] = None, 170 | ) -> Dict[str, Any]: 171 | try: 172 | response = await self._client.request(method, endpoint, params=params, json=json_data) 173 | response.raise_for_status() 174 | data = response.json() 175 | if not data.get("ok"): 176 | error_msg = data.get("error", "Unknown Slack API error") 177 | raise HTTPException(status_code=400, detail={"slack_error": error_msg}) 178 | return data 179 | except httpx.HTTPStatusError as e: 180 | if e.response.status_code == 429: 181 | retry_after = e.response.headers.get("Retry-After") 182 | detail = ( 183 | f"Slack API rate limit exceeded. Retry after {retry_after} seconds." 184 | if retry_after 185 | else "Slack API rate limit exceeded." 186 | ) 187 | logger.warning("Rate limit hit: %s", detail) 188 | raise HTTPException(status_code=429, detail=detail, headers={"Retry-After": retry_after} if retry_after else {}) 189 | logger.error("HTTP Error %s - %s", e.response.status_code, e.response.text, exc_info=True) 190 | raise HTTPException(status_code=e.response.status_code, detail="Slack API HTTP Error") 191 | except httpx.RequestError as e: 192 | logger.error("Request Error connecting to Slack API: %s", e, exc_info=True) 193 | raise HTTPException(status_code=503, detail=f"Could not connect to Slack API: {e}") 194 | except json.JSONDecodeError as e: 195 | logger.error("Failed to decode JSON: %s", e, exc_info=True) 196 | raise HTTPException(status_code=502, detail="Invalid JSON from Slack API") 197 | except Exception as e: # noqa: BLE001 198 | logger.exception("Unexpected error during Slack request: %s", e) 199 | raise HTTPException(status_code=500, detail=f"Internal error: {type(e).__name__}") 200 | 201 | # ---------------- public helpers ---------------- # 202 | async def channel_with_history(self, channel_id: str, *, history_limit: int = 1) -> Optional[Dict[str, Any]]: 203 | """Return channel metadata plus ≤ ``history_limit`` recent messages, or None.""" 204 | try: 205 | info = await self._request("GET", "conversations.info", params={"channel": channel_id}) 206 | chan = info["channel"] 207 | if chan.get("is_archived"): 208 | return None 209 | hist = await self._request( 210 | "GET", 211 | "conversations.history", 212 | params={"channel": channel_id, "limit": history_limit}, 213 | ) 214 | chan["history"] = hist.get("messages", []) 215 | return chan 216 | except Exception as exc: # noqa: BLE001 217 | logger.warning("Skipping channel %s – %s", channel_id, exc, exc_info=True) 218 | return None 219 | 220 | # ---------------- API surface ---------------- # 221 | async def get_channel_history(self, args: GetChannelHistoryArgs) -> Dict[str, Any]: 222 | return await self._request("GET", "conversations.history", params={"channel": args.channel_id, "limit": args.limit}) 223 | 224 | async def get_channels(self, args: ListChannelsArgs) -> Dict[str, Any]: # noqa: C901 – keep cohesive 225 | # 1. decide which ids to fetch 226 | if PREDEFINED_CHANNEL_IDS: 227 | ids = PREDEFINED_CHANNEL_IDS 228 | next_cursor = "" 229 | else: 230 | params: Dict[str, Any] = { 231 | "types": "public_channel", 232 | "exclude_archived": "true", 233 | "limit": min(args.limit, 200), 234 | "team_id": self.team_id, 235 | } 236 | if args.cursor: 237 | params["cursor"] = args.cursor 238 | clist = await self._request("GET", "conversations.list", params=params) 239 | ids = [c["id"] for c in clist["channels"]] 240 | next_cursor = clist.get("response_metadata", {}).get("next_cursor", "") 241 | 242 | # 2. fetch metadata + history concurrently under a semaphore 243 | sem = asyncio.Semaphore(10) # adjust parallelism as desired 244 | 245 | async def guarded(cid: str): 246 | async with sem: 247 | return await self.channel_with_history(cid) 248 | 249 | channels = [c for c in await asyncio.gather(*(guarded(cid) for cid in ids)) if c] 250 | return {"ok": True, "channels": channels, "response_metadata": {"next_cursor": next_cursor}} 251 | 252 | async def post_message(self, args: PostMessageArgs) -> Dict[str, Any]: 253 | return await self._request("POST", "chat.postMessage", json_data={"channel": args.channel_id, "text": args.text}) 254 | 255 | async def post_reply(self, args: ReplyToThreadArgs) -> Dict[str, Any]: 256 | return await self._request( 257 | "POST", 258 | "chat.postMessage", 259 | json_data={"channel": args.channel_id, "thread_ts": args.thread_ts, "text": args.text}, 260 | ) 261 | 262 | async def add_reaction(self, args: AddReactionArgs) -> Dict[str, Any]: 263 | return await self._request( 264 | "POST", 265 | "reactions.add", 266 | json_data={"channel": args.channel_id, "timestamp": args.timestamp, "name": args.reaction}, 267 | ) 268 | 269 | async def get_thread_replies(self, args: GetThreadRepliesArgs) -> Dict[str, Any]: 270 | return await self._request("GET", "conversations.replies", params={"channel": args.channel_id, "ts": args.thread_ts}) 271 | 272 | async def get_users(self, args: GetUsersArgs) -> Dict[str, Any]: 273 | params = {"limit": min(args.limit, 200), "team_id": self.team_id} 274 | if args.cursor: 275 | params["cursor"] = args.cursor 276 | return await self._request("GET", "users.list", params=params) 277 | 278 | async def get_user_profile(self, args: GetUserProfileArgs) -> Dict[str, Any]: 279 | return await self._request("GET", "users.profile.get", params={"user": args.user_id, "include_labels": "true"}) 280 | 281 | # ---------------- lifecycle ---------------- # 282 | async def aclose(self) -> None: # call on app shutdown 283 | await self._client.aclose() 284 | 285 | 286 | # --------------------------------------------------------------------------- 287 | # Instantiate Slack client 288 | # --------------------------------------------------------------------------- 289 | slack_client = SlackClient(token=SLACK_BOT_TOKEN, team_id=SLACK_TEAM_ID) 290 | 291 | 292 | # --------------------------------------------------------------------------- 293 | # Dynamic tool mapping / endpoint generation 294 | # --------------------------------------------------------------------------- 295 | TOOL_MAPPING = { 296 | "slack_list_channels": { 297 | "args_model": ListChannelsArgs, 298 | "method": slack_client.get_channels, 299 | "description": "List public or pre-defined channels in the workspace with pagination", 300 | }, 301 | "slack_post_message": { 302 | "args_model": PostMessageArgs, 303 | "method": slack_client.post_message, 304 | "description": "Post a new message to a Slack channel", 305 | }, 306 | "slack_reply_to_thread": { 307 | "args_model": ReplyToThreadArgs, 308 | "method": slack_client.post_reply, 309 | "description": "Reply to a specific message thread in Slack", 310 | }, 311 | "slack_add_reaction": { 312 | "args_model": AddReactionArgs, 313 | "method": slack_client.add_reaction, 314 | "description": "Add a reaction emoji to a message", 315 | }, 316 | "slack_get_channel_history": { 317 | "args_model": GetChannelHistoryArgs, 318 | "method": slack_client.get_channel_history, 319 | "description": "Get recent messages from a channel", 320 | }, 321 | "slack_get_thread_replies": { 322 | "args_model": GetThreadRepliesArgs, 323 | "method": slack_client.get_thread_replies, 324 | "description": "Get all replies in a message thread", 325 | }, 326 | "slack_get_users": { 327 | "args_model": GetUsersArgs, 328 | "method": slack_client.get_users, 329 | "description": "Get a list of all users in the workspace with their basic profile information", 330 | }, 331 | "slack_get_user_profile": { 332 | "args_model": GetUserProfileArgs, 333 | "method": slack_client.get_user_profile, 334 | "description": "Get detailed profile information for a specific user", 335 | }, 336 | } 337 | 338 | 339 | # ---------------- endpoint factory ---------------- # 340 | 341 | def create_endpoint_handler(tool_name: str, method: Callable, args_model: Type[BaseModel]): 342 | async def handler(args: args_model = Body(...), api_key: str = Depends(get_api_key)) -> ToolResponse: # noqa: ANN001 343 | try: 344 | result = await method(args=args) 345 | return {"content": result} 346 | except HTTPException: 347 | raise # re‑raise untouched 348 | except Exception as exc: # noqa: BLE001 349 | logger.exception("Error executing tool %s: %s", tool_name, exc) 350 | raise HTTPException(status_code=500, detail=f"Internal server error: {type(exc).__name__}") 351 | 352 | return handler 353 | 354 | 355 | for name, cfg in TOOL_MAPPING.items(): 356 | app.post( 357 | f"/{name}", 358 | response_model=ToolResponse, 359 | summary=cfg["description"], 360 | description=f"Executes the {name} tool. Arguments are passed in the request body.", 361 | tags=["Slack Tools"], 362 | name=name, 363 | )(create_endpoint_handler(name, cfg["method"], cfg["args_model"])) 364 | 365 | 366 | # --------------------------------------------------------------------------- 367 | # Lifecycle events 368 | # --------------------------------------------------------------------------- 369 | @app.on_event("shutdown") 370 | async def _close_slack_client(): 371 | await slack_client.aclose() 372 | 373 | 374 | # --------------------------------------------------------------------------- 375 | # Root endpoint 376 | # --------------------------------------------------------------------------- 377 | @app.get("/", summary="Root endpoint", include_in_schema=False) 378 | async def read_root(): 379 | return {"message": "Slack API Server is running. See /docs for available tool endpoints."} 380 | -------------------------------------------------------------------------------- /servers/slack/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi>=0.110.0,<0.111.0 2 | uvicorn[standard]>=0.29.0,<0.30.0 3 | pydantic>=2.6.0,<3.0.0 4 | httpx>=0.27.0,<0.28.0 5 | python-dotenv>=1.0.0,<2.0.0 6 | # NOTE: Run 'pip freeze > requirements.txt' in a virtual environment 7 | # to capture the exact versions of all transitive dependencies 8 | # for truly reproducible builds. 9 | -------------------------------------------------------------------------------- /servers/summarizer-tool/README.md: -------------------------------------------------------------------------------- 1 | # 📚 Local Summarizer Agent 2 | 3 | This FastAPI server acts to summarize a given chunk of text. 4 | 5 | It is assumed that you are running an ollama instance in an adjacent container with the default port available. 6 | 7 | ## 📦 Endpoints 8 | ### POST /summarize/text 9 | Summarizes the given block of text 10 | 11 | 📥 Request 12 | 13 | Body: 14 | ``` 15 | { 16 | 'text':'Your blob of text here. It can be unlimited, but is recommended to be within the context window of the LLM you are asking for a summary from.' 17 | } 18 | ``` 19 | 20 | 📤 Response: 21 | 22 | ``` 23 | { 24 | "status": "success", 25 | "summary": "A summary of your text." 26 | } 27 | ``` 28 | 29 | ### POST /summarize/chat 30 | Not yet implemented. Summarizes an exported Open WebUI chat JSON blob. 31 | 32 | ## 🧩 Environment Variables 33 | |Name|Description|Default| 34 | |---|---|---| 35 | |MODEL|The name of the model you are trying to reference. Should match the model in your ollama instance. | llama3| 36 | |MODEL_URL|The URL path to the model you are trying to access.|http://host.docker.internal:11434| 37 | 38 | -------------------------------------------------------------------------------- /servers/summarizer-tool/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-webui/openapi-servers/2b7844d6340e5032fb7b0fd8f46918fc0ce71685/servers/summarizer-tool/__init__.py -------------------------------------------------------------------------------- /servers/summarizer-tool/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | summarizer: 3 | container_name: summarizer 4 | image: python:3-slim 5 | ports: 6 | - 16000:8000 7 | restart: unless-stopped 8 | environment: 9 | - MODEL=llama3 10 | - MODEL_URL=http://host.docker.internal:11434 11 | extra_hosts: 12 | - "host.docker.internal:host-gateway" 13 | volumes: 14 | - .:/app 15 | entrypoint: > 16 | sh -c " 17 | apt update && 18 | apt install -y git && 19 | cd /app && 20 | pip install -r ./requirements.txt && 21 | fastapi run 22 | " 23 | -------------------------------------------------------------------------------- /servers/summarizer-tool/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from .summarizers.text_summarizer import TextSummarizer 4 | 5 | app = FastAPI( 6 | title="Summarizing Server", 7 | version="1.0.0", 8 | description="Leverages an LLM to summarize data", 9 | ) 10 | 11 | summarizers = { 12 | 'TEXT':TextSummarizer() 13 | } 14 | 15 | 16 | class TextRequest(BaseModel): 17 | text: str 18 | 19 | @app.post("/summarize/text") 20 | def summarize_text(data: TextRequest): 21 | try: 22 | result = summarizers['TEXT'].summarize(data.text) 23 | if 'content' in result: 24 | return {"status": "success", "summary":result['content']} 25 | else: 26 | raise HTTPException(status_code=500, detail=str(result['error'])) 27 | except Exception as e: 28 | raise HTTPException(status_code=500, detail=str(e)) -------------------------------------------------------------------------------- /servers/summarizer-tool/requirements.txt: -------------------------------------------------------------------------------- 1 | annotated-types==0.7.0 2 | anyio==4.9.0 3 | certifi==2025.1.31 4 | charset-normalizer==3.4.1 5 | click==8.1.8 6 | dnspython==2.7.0 7 | email_validator==2.2.0 8 | fastapi==0.115.12 9 | fastapi-cli==0.0.7 10 | h11==0.14.0 11 | httpcore==1.0.8 12 | httptools==0.6.4 13 | httpx==0.28.1 14 | idna==3.10 15 | Jinja2==3.1.6 16 | markdown-it-py==3.0.0 17 | MarkupSafe==3.0.2 18 | mdurl==0.1.2 19 | pydantic==2.11.3 20 | pydantic_core==2.33.1 21 | Pygments==2.19.1 22 | python-dotenv==1.1.0 23 | python-multipart==0.0.20 24 | PyYAML==6.0.2 25 | requests==2.32.3 26 | rich==14.0.0 27 | rich-toolkit==0.14.1 28 | shellingham==1.5.4 29 | sniffio==1.3.1 30 | starlette==0.46.2 31 | typer==0.15.2 32 | typing-inspection==0.4.0 33 | typing_extensions==4.13.2 34 | urllib3==2.4.0 35 | uvicorn==0.34.1 36 | uvloop==0.21.0 37 | watchfiles==1.0.5 38 | websockets==15.0.1 39 | -------------------------------------------------------------------------------- /servers/summarizer-tool/summarizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-webui/openapi-servers/2b7844d6340e5032fb7b0fd8f46918fc0ce71685/servers/summarizer-tool/summarizers/__init__.py -------------------------------------------------------------------------------- /servers/summarizer-tool/summarizers/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | class BaseSummarizer(ABC): 4 | @abstractmethod 5 | def summarize(self, data: str) -> dict: 6 | """Summarize data""" 7 | pass -------------------------------------------------------------------------------- /servers/summarizer-tool/summarizers/text_summarizer.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from .base import BaseSummarizer 3 | import os 4 | 5 | MODEL_URL=os.environ.get('MODEL_URL') 6 | MODEL=os.environ.get('MODEL') 7 | SUMMARIZE_PROMPT = """You are the summarizing agent in a long chain of agents. 8 | It is your job to responsibly capture the entirety of what is being described in incoming documents. 9 | You can scrap small details, but you must make sure to hit all the major points. 10 | These documents will be used in RAG down the line. 11 | 12 | 13 | For example, given the following text: 14 | "I've got updates on the tiny brains if\nyou are not familiar with brain\norganoids they are tiny human brains\nthat we can grow from stem cells you can\ngrow them in a literal jar if you want\nto but you can also hook them up to a\ncomputer or llm since a company called\nfinal spark decided to release brain\norganoid computation for industrial use\n" 15 | 16 | You would respond with 17 | 18 | "The speaker is discussing human brain stem cells being grown for industrial use." 19 | 20 | Another example: 21 | hi, i'\''m isopod (formerly hornet)\n \ni'\''m a software engineer\n \ni write code, make costumes, and write music 22 | 23 | You would respond with 24 | Isopod, formerly hornet, is a software engineer who makes costumes and writes music. 25 | 26 | You always sanitize data. You always remove \n. You never mention yourself in your summaries. You never infer, only summarize what is presented. You never describe the text as summarized: you always just give the summary. 27 | """ 28 | 29 | class TextSummarizer(BaseSummarizer): 30 | def summarize(self, data): 31 | payload = { 32 | "model":MODEL, 33 | "system": SUMMARIZE_PROMPT, 34 | "prompt":data, 35 | "stream":False, 36 | "options":{ 37 | "temperature":0.5 38 | } 39 | } 40 | url = MODEL_URL + '/api/generate' 41 | result = requests.post(url=url, json=payload) 42 | if result.status_code == 200: 43 | json_data = result.json() 44 | if 'response' in json_data: 45 | return { 46 | 'type': 'text', 47 | 'source': url, 48 | 'content': json_data['response'] 49 | } 50 | print(result.content) 51 | return { 52 | 'type': 'text', 53 | 'source': url, 54 | 'error': result.status_code 55 | } 56 | 57 | 58 | -------------------------------------------------------------------------------- /servers/time/.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /servers/time/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Run the application. 51 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000 52 | -------------------------------------------------------------------------------- /servers/time/README.md: -------------------------------------------------------------------------------- 1 | # 🕒 Time Tool Server 2 | 3 | Blazingly fast time API server ⚡️ 4 | 5 | ## 🚀 Quickstart 6 | 7 | ```bash 8 | git clone https://github.com/open-webui/openapi-servers 9 | cd openapi-servers/servers/time 10 | pip install -r requirements.txt 11 | uvicorn main:app --host 0.0.0.0 --reload 12 | ``` 13 | 14 | You're live. ⏱️📡 -------------------------------------------------------------------------------- /servers/time/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | 8 | -------------------------------------------------------------------------------- /servers/time/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException, Body 2 | from fastapi.middleware.cors import CORSMiddleware 3 | 4 | 5 | from pydantic import BaseModel, Field 6 | from datetime import datetime, timezone 7 | from typing import Literal 8 | import pytz 9 | from dateutil import parser as dateutil_parser 10 | 11 | app = FastAPI( 12 | title="Secure Time Utilities API", 13 | version="1.0.0", 14 | description="Provides secure UTC/local time retrieval, formatting, timezone conversion, and comparison.", 15 | ) 16 | 17 | 18 | origins = ["*"] 19 | 20 | app.add_middleware( 21 | CORSMiddleware, 22 | allow_origins=origins, 23 | allow_credentials=True, 24 | allow_methods=["*"], 25 | allow_headers=["*"], 26 | ) 27 | 28 | 29 | # ------------------------------- 30 | # Pydantic models 31 | # ------------------------------- 32 | 33 | 34 | class FormatTimeInput(BaseModel): 35 | format: str = Field( 36 | "%Y-%m-%d %H:%M:%S", description="Python strftime format string" 37 | ) 38 | timezone: str = Field( 39 | "UTC", description="IANA timezone name (e.g., UTC, America/New_York)" 40 | ) 41 | 42 | 43 | class ConvertTimeInput(BaseModel): 44 | timestamp: str = Field( 45 | ..., description="ISO 8601 formatted time string (e.g., 2024-01-01T12:00:00Z)" 46 | ) 47 | from_tz: str = Field( 48 | ..., description="Original IANA time zone of input (e.g. UTC or Europe/Berlin)" 49 | ) 50 | to_tz: str = Field(..., description="Target IANA time zone to convert to") 51 | 52 | 53 | class ElapsedTimeInput(BaseModel): 54 | start: str = Field(..., description="Start timestamp in ISO 8601 format") 55 | end: str = Field(..., description="End timestamp in ISO 8601 format") 56 | units: Literal["seconds", "minutes", "hours", "days"] = Field( 57 | "seconds", description="Unit for elapsed time" 58 | ) 59 | 60 | 61 | class ParseTimestampInput(BaseModel): 62 | timestamp: str = Field( 63 | ..., description="Flexible input timestamp string (e.g., 2024-06-01 12:00 PM)" 64 | ) 65 | timezone: str = Field( 66 | "UTC", description="Assumed timezone if none is specified in input" 67 | ) 68 | 69 | 70 | # ------------------------------- 71 | # Routes 72 | # ------------------------------- 73 | 74 | 75 | @app.get("/get_current_utc_time", summary="Current UTC time") 76 | def get_current_utc(): 77 | """ 78 | Returns the current time in UTC in ISO format. 79 | """ 80 | return {"utc": datetime.utcnow().replace(tzinfo=timezone.utc).isoformat()} 81 | 82 | 83 | @app.get("/get_current_local_time", summary="Current Local Time") 84 | def get_current_local(): 85 | """ 86 | Returns the current time in local timezone in ISO format. 87 | """ 88 | return {"local_time": datetime.now().isoformat()} 89 | 90 | 91 | @app.post("/format_time", summary="Format current time") 92 | def format_current_time(data: FormatTimeInput): 93 | """ 94 | Return the current time formatted for a specific timezone and format. 95 | """ 96 | try: 97 | tz = pytz.timezone(data.timezone) 98 | except Exception: 99 | raise HTTPException( 100 | status_code=400, detail=f"Invalid timezone: {data.timezone}" 101 | ) 102 | now = datetime.now(tz) 103 | try: 104 | return {"formatted_time": now.strftime(data.format)} 105 | except Exception as e: 106 | raise HTTPException(status_code=400, detail=f"Invalid format string: {e}") 107 | 108 | 109 | @app.post("/convert_time", summary="Convert between timezones") 110 | def convert_time(data: ConvertTimeInput): 111 | """ 112 | Convert a timestamp from one timezone to another. 113 | """ 114 | try: 115 | from_zone = pytz.timezone(data.from_tz) 116 | to_zone = pytz.timezone(data.to_tz) 117 | except Exception as e: 118 | raise HTTPException(status_code=400, detail=f"Invalid timezone: {e}") 119 | 120 | try: 121 | dt = dateutil_parser.parse(data.timestamp) 122 | if dt.tzinfo is None: 123 | dt = from_zone.localize(dt) 124 | else: 125 | dt = dt.astimezone(from_zone) 126 | converted = dt.astimezone(to_zone) 127 | return {"converted_time": converted.isoformat()} 128 | except Exception as e: 129 | raise HTTPException(status_code=400, detail=f"Invalid timestamp: {e}") 130 | 131 | 132 | @app.post("/elapsed_time", summary="Time elapsed between timestamps") 133 | def elapsed_time(data: ElapsedTimeInput): 134 | """ 135 | Calculate the difference between two timestamps in chosen units. 136 | """ 137 | try: 138 | start_dt = dateutil_parser.parse(data.start) 139 | end_dt = dateutil_parser.parse(data.end) 140 | delta = end_dt - start_dt 141 | except Exception as e: 142 | raise HTTPException(status_code=400, detail=f"Invalid timestamps: {e}") 143 | 144 | seconds = delta.total_seconds() 145 | result = { 146 | "seconds": seconds, 147 | "minutes": seconds / 60, 148 | "hours": seconds / 3600, 149 | "days": seconds / 86400, 150 | } 151 | 152 | return {"elapsed": result[data.units], "unit": data.units} 153 | 154 | 155 | @app.post("/parse_timestamp", summary="Parse and normalize timestamps") 156 | def parse_timestamp(data: ParseTimestampInput): 157 | """ 158 | Parse human-friendly input timestamp and return standardized UTC ISO time. 159 | """ 160 | try: 161 | tz = pytz.timezone(data.timezone) 162 | dt = dateutil_parser.parse(data.timestamp) 163 | if dt.tzinfo is None: 164 | dt = tz.localize(dt) 165 | dt_utc = dt.astimezone(pytz.utc) 166 | return {"utc": dt_utc.isoformat()} 167 | except Exception as e: 168 | raise HTTPException(status_code=400, detail=f"Could not parse: {e}") 169 | 170 | 171 | @app.get("/list_time_zones", summary="All valid time zones") 172 | def list_time_zones(): 173 | """ 174 | Return a list of all valid IANA time zones. 175 | """ 176 | return pytz.all_timezones 177 | -------------------------------------------------------------------------------- /servers/time/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | 6 | pytz 7 | python-dateutil -------------------------------------------------------------------------------- /servers/weather/.dockerignore: -------------------------------------------------------------------------------- 1 | # Include any files or directories that you don't want to be copied to your 2 | # container here (e.g., local build artifacts, temporary files, etc.). 3 | # 4 | # For more help, visit the .dockerignore file reference guide at 5 | # https://docs.docker.com/go/build-context-dockerignore/ 6 | 7 | **/.DS_Store 8 | **/__pycache__ 9 | **/.venv 10 | **/.classpath 11 | **/.dockerignore 12 | **/.env 13 | **/.git 14 | **/.gitignore 15 | **/.project 16 | **/.settings 17 | **/.toolstarget 18 | **/.vs 19 | **/.vscode 20 | **/*.*proj.user 21 | **/*.dbmdl 22 | **/*.jfm 23 | **/bin 24 | **/charts 25 | **/docker-compose* 26 | **/compose.y*ml 27 | **/Dockerfile* 28 | **/node_modules 29 | **/npm-debug.log 30 | **/obj 31 | **/secrets.dev.yaml 32 | **/values.dev.yaml 33 | LICENSE 34 | README.md 35 | -------------------------------------------------------------------------------- /servers/weather/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Comments are provided throughout this file to help you get started. 4 | # If you need more help, visit the Dockerfile reference guide at 5 | # https://docs.docker.com/go/dockerfile-reference/ 6 | 7 | # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 8 | 9 | ARG PYTHON_VERSION=3.10.12 10 | FROM python:${PYTHON_VERSION}-slim as base 11 | 12 | # Prevents Python from writing pyc files. 13 | ENV PYTHONDONTWRITEBYTECODE=1 14 | 15 | # Keeps Python from buffering stdout and stderr to avoid situations where 16 | # the application crashes without emitting any logs due to buffering. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | WORKDIR /app 20 | 21 | # Create a non-privileged user that the app will run under. 22 | # See https://docs.docker.com/go/dockerfile-user-best-practices/ 23 | ARG UID=10001 24 | RUN adduser \ 25 | --disabled-password \ 26 | --gecos "" \ 27 | --home "/nonexistent" \ 28 | --shell "/sbin/nologin" \ 29 | --no-create-home \ 30 | --uid "${UID}" \ 31 | appuser 32 | 33 | # Download dependencies as a separate step to take advantage of Docker's caching. 34 | # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds. 35 | # Leverage a bind mount to requirements.txt to avoid having to copy them into 36 | # into this layer. 37 | RUN --mount=type=cache,target=/root/.cache/pip \ 38 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 39 | python -m pip install -r requirements.txt 40 | 41 | # Switch to the non-privileged user to run the application. 42 | USER appuser 43 | 44 | # Copy the source code into the container. 45 | COPY . . 46 | 47 | # Expose the port that the application listens on. 48 | EXPOSE 8000 49 | 50 | # Run the application. 51 | CMD uvicorn 'main:app' --host=0.0.0.0 --port=8000 52 | -------------------------------------------------------------------------------- /servers/weather/README.md: -------------------------------------------------------------------------------- 1 | # ⛅ Weather Tool Server 2 | 3 | A sleek and simple FastAPI-based server to provide weather data using OpenAPI standards. 4 | 5 | 📦 Built with: 6 | ⚡️ FastAPI • 📜 OpenAPI • 🧰 Python 7 | 8 | --- 9 | 10 | ## 🚀 Quickstart 11 | 12 | Clone the repo and get started in seconds: 13 | 14 | ```bash 15 | git clone https://github.com/open-webui/openapi-servers 16 | cd openapi-servers/servers/weather 17 | 18 | # Install dependencies 19 | pip install -r requirements.txt 20 | 21 | # Run the server 22 | uvicorn main:app --host 0.0.0.0 --reload 23 | ``` 24 | 25 | --- 26 | 27 | ## 🔍 About 28 | 29 | This server is part of the OpenAPI Tools Collection. Use it to fetch real-time weather information, location-based forecasts, and more — all wrapped in a developer-friendly OpenAPI interface. 30 | 31 | Compatible with any OpenAPI-supported ecosystem, including: 32 | 33 | - 🌀 FastAPI 34 | - 📘 Swagger UI 35 | - 🧪 API testing tools 36 | 37 | --- 38 | 39 | ## 🚧 Customization 40 | 41 | Plug in your favorite weather provider API, tailor endpoints, or extend the OpenAPI spec. Ideal for integration into AI agents, automated dashboards, or personal assistants. 42 | 43 | --- 44 | 45 | ## 🌐 API Documentation 46 | 47 | Once running, explore auto-generated interactive docs: 48 | 49 | 🖥️ Swagger UI: http://localhost:8000/docs 50 | 📄 OpenAPI JSON: http://localhost:8000/openapi.json 51 | 52 | --- 53 | 54 | Made with ❤️ by the Open WebUI community 🌍 55 | Explore more tools ➡️ https://github.com/open-webui/openapi-servers -------------------------------------------------------------------------------- /servers/weather/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | server: 3 | build: 4 | context: . 5 | ports: 6 | - 8000:8000 7 | 8 | -------------------------------------------------------------------------------- /servers/weather/main.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import reverse_geocoder as rg # Added reverse_geocoder 3 | from fastapi import FastAPI, HTTPException, Query 4 | from fastapi.middleware.cors import CORSMiddleware 5 | from pydantic import BaseModel, Field 6 | from typing import Optional, List # Removed Literal, no longer needed for query param 7 | 8 | app = FastAPI( 9 | title="Weather API", 10 | version="1.0.0", 11 | description="Provides weather retrieval by latitude and longitude using Open-Meteo.", # Updated description 12 | ) 13 | 14 | origins = ["*"] 15 | 16 | app.add_middleware( 17 | CORSMiddleware, 18 | allow_origins=origins, 19 | allow_credentials=True, 20 | allow_methods=["*"], 21 | allow_headers=["*"], 22 | ) 23 | 24 | # ------------------------------- 25 | # Pydantic models 26 | # ------------------------------- 27 | 28 | class CurrentWeather(BaseModel): 29 | time: str = Field(..., description="ISO 8601 format timestamp") 30 | temperature_2m: float = Field(..., alias="temperature_2m", description="Air temperature at 2 meters above ground") 31 | wind_speed_10m: float = Field(..., alias="wind_speed_10m", description="Wind speed at 10 meters above ground") 32 | 33 | class HourlyUnits(BaseModel): 34 | time: str 35 | temperature_2m: str 36 | relative_humidity_2m: str 37 | wind_speed_10m: str 38 | 39 | class HourlyData(BaseModel): 40 | time: List[str] 41 | temperature_2m: List[float] 42 | relative_humidity_2m: List[int] # Assuming humidity is integer percentage 43 | wind_speed_10m: List[float] 44 | 45 | class WeatherForecastOutput(BaseModel): 46 | latitude: float 47 | longitude: float 48 | generationtime_ms: float 49 | utc_offset_seconds: int 50 | timezone: str 51 | timezone_abbreviation: str 52 | elevation: float 53 | current: CurrentWeather = Field(..., description="Current weather conditions") 54 | hourly_units: HourlyUnits 55 | hourly: HourlyData 56 | 57 | # ------------------------------- 58 | # Routes 59 | # ------------------------------- 60 | 61 | OPEN_METEO_URL = "https://api.open-meteo.com/v1/forecast" 62 | # Countries officially using Fahrenheit 63 | FAHRENHEIT_COUNTRIES = {"US", "LR", "MM"} # USA, Liberia, Myanmar 64 | 65 | @app.get("/forecast", response_model=WeatherForecastOutput, summary="Get current weather and forecast") 66 | def get_weather_forecast( 67 | latitude: float = Query(..., description="Latitude for the location (e.g., 52.52)"), 68 | longitude: float = Query(..., description="Longitude for the location (e.g., 13.41)") 69 | ): 70 | """ 71 | Retrieves current weather conditions and hourly forecast data 72 | for the specified latitude and longitude using the Open-Meteo API. 73 | Temperature unit (Celsius/Fahrenheit) is determined automatically based on location. 74 | """ 75 | # Determine temperature unit based on location 76 | try: 77 | geo_results = rg.search((latitude, longitude), mode=1) # mode=1 for single result 78 | if geo_results: 79 | country_code = geo_results[0]['cc'] 80 | temperature_unit = "fahrenheit" if country_code in FAHRENHEIT_COUNTRIES else "celsius" 81 | else: 82 | # Default to Celsius if country cannot be determined 83 | temperature_unit = "celsius" 84 | except Exception: 85 | # Handle potential errors during geocoding, default to Celsius 86 | temperature_unit = "celsius" 87 | 88 | params = { 89 | "latitude": latitude, 90 | "longitude": longitude, 91 | "current": "temperature_2m,wind_speed_10m", 92 | "hourly": "temperature_2m,relative_humidity_2m,wind_speed_10m", 93 | "timezone": "auto", 94 | "temperature_unit": temperature_unit # Use determined unit 95 | } 96 | try: 97 | response = requests.get(OPEN_METEO_URL, params=params) 98 | response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) 99 | data = response.json() 100 | 101 | # Basic validation to ensure expected keys are present 102 | if "current" not in data or "hourly" not in data: 103 | raise HTTPException(status_code=500, detail="Unexpected response format from Open-Meteo API") 104 | 105 | # Pydantic will automatically validate the structure based on WeatherForecastOutput 106 | return data 107 | 108 | except requests.exceptions.RequestException as e: 109 | raise HTTPException(status_code=503, detail=f"Error connecting to Open-Meteo API: {e}") 110 | except Exception as e: 111 | # Catch other potential errors during processing 112 | raise HTTPException(status_code=500, detail=f"An internal error occurred: {e}") 113 | -------------------------------------------------------------------------------- /servers/weather/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn[standard] 3 | pydantic 4 | python-multipart 5 | pytz 6 | python-dateutil 7 | requests 8 | reverse_geocoder --------------------------------------------------------------------------------