├── mcp_simple_openai_assistant ├── __init__.py ├── __main__.py ├── thread_store.py ├── assistant_manager.py └── app.py ├── Dockerfile ├── smithery.yaml ├── pyproject.toml ├── LICENSE ├── .gitignore ├── README.md └── test_server.py /mcp_simple_openai_assistant/__init__.py: -------------------------------------------------------------------------------- 1 | """A simple MCP server for interacting with OpenAI assistants.""" 2 | 3 | # This file can be empty. Its presence indicates that this directory is a Python package. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile 2 | # Use an official Python image 3 | FROM python:3.10-slim 4 | 5 | # Set the working directory 6 | WORKDIR /app 7 | 8 | # Copy the project files into the container 9 | COPY . . 10 | 11 | # Install the dependencies 12 | RUN pip install . 13 | 14 | # Set the environment variable for OpenAI API key (should be set at runtime) 15 | # ENV OPENAI_API_KEY your-api-key-here 16 | 17 | # Command to run the MCP server 18 | ENTRYPOINT ["python", "-m", "mcp_simple_openai_assistant"] -------------------------------------------------------------------------------- /smithery.yaml: -------------------------------------------------------------------------------- 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml 2 | 3 | startCommand: 4 | type: stdio 5 | configSchema: 6 | # JSON Schema defining the configuration options for the MCP. 7 | type: object 8 | required: 9 | - openaiApiKey 10 | properties: 11 | openaiApiKey: 12 | type: string 13 | description: The API key for the OpenAI service. 14 | commandFunction: 15 | # A function that produces the CLI command to start the MCP on stdio. 16 | |- 17 | (config) => ({command: 'python', args: ['-m', 'mcp_simple_openai_assistant'], env: {OPENAI_API_KEY: config.openaiApiKey}}) -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "mcp-simple-openai-assistant" 7 | version = "0.4.1" 8 | authors = [ 9 | {name = "Andy Brandt", email = "andy@codesprinters.com"} 10 | ] 11 | description = "A simple MCP server for interacting with OpenAI assistants" 12 | readme = "README.md" 13 | requires-python = ">=3.10" 14 | license="MIT" 15 | classifiers = [ 16 | "Programming Language :: Python :: 3", 17 | "Operating System :: OS Independent", 18 | ] 19 | dependencies = [ 20 | "fastmcp>=2.10.0", 21 | "openai>=1.0.0" 22 | ] 23 | 24 | [project.optional-dependencies] 25 | dev = [ 26 | "python-dotenv", 27 | "pytest", 28 | "pytest-asyncio" 29 | ] 30 | 31 | [project.scripts] 32 | mcp-simple-openai-assistant = "mcp_simple_openai_assistant.__main__:main" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Andy Brandt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /mcp_simple_openai_assistant/__main__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for the MCP OpenAI Assistant server.""" 2 | import os 3 | import sys 4 | from pathlib import Path 5 | from . import app 6 | from .assistant_manager import AssistantManager 7 | 8 | DEFAULT_DB_PATH = str(Path.home() / ".mcp_simple_openai_assistant" / "threads.db") 9 | 10 | def main(): 11 | """Initialize manager and run the MCP server.""" 12 | # The API key must be set in the environment by the client 13 | api_key = os.getenv("OPENAI_API_KEY") 14 | if not api_key: 15 | print( 16 | "Error: OPENAI_API_KEY environment variable not set.", 17 | file=sys.stderr 18 | ) 19 | sys.exit(1) 20 | 21 | # Use DB_PATH from environment or a default value 22 | db_path = os.getenv("DB_PATH", DEFAULT_DB_PATH) 23 | 24 | # Initialize the manager with the explicit API key and db_path 25 | manager = AssistantManager(api_key=api_key, db_path=db_path) 26 | 27 | # Assign the initialized manager to the app module 28 | app.manager = manager 29 | 30 | # The app is imported from app.py where it is already defined 31 | app.app.run() 32 | 33 | if __name__ == "__main__": 34 | main() -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | 164 | # Environment variables 165 | .env 166 | .env.* 167 | !.env.example 168 | -------------------------------------------------------------------------------- /mcp_simple_openai_assistant/thread_store.py: -------------------------------------------------------------------------------- 1 | """A module for managing a local SQLite database of OpenAI Assistant threads. 2 | 3 | This module provides a ThreadStore class that encapsulates all database 4 | operations, including creating the database and table, as well as adding, 5 | updating, listing, and deleting thread records. This allows the application 6 | to manage conversation threads persistently. 7 | """ 8 | 9 | import sqlite3 10 | import os 11 | from datetime import datetime, timezone 12 | 13 | class ThreadStore: 14 | """Manages the persistence of thread data in a local SQLite database.""" 15 | 16 | def __init__(self, db_path: str): 17 | """Initializes the ThreadStore with a path to the SQLite database file. 18 | 19 | Args: 20 | db_path: The file path for the SQLite database. 21 | """ 22 | if not db_path: 23 | raise ValueError("Database path cannot be empty.") 24 | self.db_path = db_path 25 | self._conn = None 26 | 27 | def _get_connection(self) -> sqlite3.Connection: 28 | """Returns a SQLite database connection.""" 29 | if self._conn is None: 30 | self._conn = sqlite3.connect(self.db_path) 31 | self._conn.row_factory = sqlite3.Row 32 | return self._conn 33 | 34 | def initialize_database(self): 35 | """Creates the database and the threads table if they do not exist.""" 36 | # Ensure the directory for the database file exists, but skip for in-memory DB 37 | if self.db_path != ':memory:': 38 | os.makedirs(os.path.dirname(self.db_path), exist_ok=True) 39 | conn = self._get_connection() 40 | cursor = conn.cursor() 41 | cursor.execute(""" 42 | CREATE TABLE IF NOT EXISTS threads ( 43 | id INTEGER PRIMARY KEY AUTOINCREMENT, 44 | thread_id TEXT NOT NULL UNIQUE, 45 | name TEXT NOT NULL, 46 | description TEXT, 47 | created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, 48 | last_used_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP 49 | ) 50 | """) 51 | conn.commit() 52 | 53 | def add_thread(self, thread_id: str, name: str, description: str | None) -> int: 54 | """Adds a new thread record to the database. 55 | 56 | Args: 57 | thread_id: The unique identifier for the thread from OpenAI. 58 | name: A user-defined name for the thread. 59 | description: A user-defined description for the thread. 60 | 61 | Returns: 62 | The row ID of the newly inserted thread. 63 | """ 64 | conn = self._get_connection() 65 | cursor = conn.cursor() 66 | cursor.execute(""" 67 | INSERT INTO threads (thread_id, name, description, last_used_at) 68 | VALUES (?, ?, ?, ?) 69 | """, (thread_id, name, description, datetime.now(timezone.utc))) 70 | conn.commit() 71 | return cursor.lastrowid 72 | 73 | def list_threads(self) -> list[sqlite3.Row]: 74 | """Retrieves all thread records from the database. 75 | 76 | Returns: 77 | A list of rows, where each row is a dictionary-like object 78 | representing a thread. 79 | """ 80 | conn = self._get_connection() 81 | cursor = conn.cursor() 82 | cursor.execute("SELECT * FROM threads ORDER BY last_used_at DESC") 83 | return cursor.fetchall() 84 | 85 | def update_thread_metadata(self, thread_id: str, name: str, description: str | None): 86 | """Updates the name and description of a specific thread. 87 | 88 | Args: 89 | thread_id: The ID of the thread to update. 90 | name: The new name for the thread. 91 | description: The new description for the thread. 92 | """ 93 | conn = self._get_connection() 94 | cursor = conn.cursor() 95 | cursor.execute(""" 96 | UPDATE threads 97 | SET name = ?, description = ?, last_used_at = ? 98 | WHERE thread_id = ? 99 | """, (name, description, datetime.now(timezone.utc), thread_id)) 100 | conn.commit() 101 | 102 | def update_thread_last_used(self, thread_id: str): 103 | """Updates the last_used_at timestamp for a specific thread. 104 | 105 | Args: 106 | thread_id: The ID of the thread to update. 107 | """ 108 | conn = self._get_connection() 109 | cursor = conn.cursor() 110 | cursor.execute(""" 111 | UPDATE threads 112 | SET last_used_at = ? 113 | WHERE thread_id = ? 114 | """, (datetime.now(timezone.utc), thread_id)) 115 | conn.commit() 116 | 117 | def delete_thread(self, thread_id: str): 118 | """Deletes a thread record from the database by its thread_id. 119 | 120 | Args: 121 | thread_id: The ID of the thread to delete. 122 | """ 123 | conn = self._get_connection() 124 | cursor = conn.cursor() 125 | cursor.execute("DELETE FROM threads WHERE thread_id = ?", (thread_id,)) 126 | conn.commit() 127 | 128 | def close(self): 129 | """Closes the database connection.""" 130 | if self._conn: 131 | self._conn.close() 132 | self._conn = None -------------------------------------------------------------------------------- /mcp_simple_openai_assistant/assistant_manager.py: -------------------------------------------------------------------------------- 1 | """Core business logic for interacting with the OpenAI Assistants API. 2 | 3 | This module is responsible for all direct communication with the OpenAI API 4 | and is designed to be independent of the MCP server implementation. 5 | """ 6 | 7 | import os 8 | from typing import Optional, Literal 9 | import openai 10 | from openai.types.beta import Assistant, Thread 11 | from openai.types.beta.threads import Run 12 | from .thread_store import ThreadStore 13 | import sqlite3 14 | 15 | RunStatus = Literal["completed", "in_progress", "failed", "cancelled", "expired"] 16 | 17 | 18 | class AssistantManager: 19 | """Handles interactions with OpenAI's Assistant API.""" 20 | 21 | def __init__(self, api_key: str, db_path: str): 22 | """Initialize the OpenAI client with an explicit API key.""" 23 | if not api_key: 24 | raise ValueError("OpenAI API key cannot be empty.") 25 | self.client = openai.OpenAI(api_key=api_key) 26 | self.thread_store = ThreadStore(db_path) 27 | self.thread_store.initialize_database() 28 | 29 | async def create_assistant( 30 | self, 31 | name: str, 32 | instructions: str, 33 | model: str = "gpt-4o" 34 | ) -> Assistant: 35 | """Create a new OpenAI assistant.""" 36 | return self.client.beta.assistants.create( 37 | name=name, 38 | instructions=instructions, 39 | model=model 40 | ) 41 | 42 | async def create_new_assistant_thread( 43 | self, name: str, description: Optional[str] = None 44 | ) -> Thread: 45 | """Creates a new, persistent conversation thread.""" 46 | metadata = { 47 | "name": name, 48 | "description": description or "" 49 | } 50 | thread = self.client.beta.threads.create(metadata=metadata) 51 | self.thread_store.add_thread(thread.id, name, description) 52 | return thread 53 | 54 | async def list_assistants(self, limit: int = 20) -> list[Assistant]: 55 | """List available OpenAI assistants.""" 56 | response = self.client.beta.assistants.list(limit=limit) 57 | return response.data 58 | 59 | def list_threads(self) -> list[sqlite3.Row]: 60 | """List all managed threads from the local database.""" 61 | return self.thread_store.list_threads() 62 | 63 | async def retrieve_assistant(self, assistant_id: str) -> Assistant: 64 | """Get details about a specific assistant.""" 65 | return self.client.beta.assistants.retrieve(assistant_id) 66 | 67 | async def update_assistant( 68 | self, 69 | assistant_id: str, 70 | name: Optional[str] = None, 71 | instructions: Optional[str] = None, 72 | model: Optional[str] = None 73 | ) -> Assistant: 74 | """Update an existing assistant's configuration.""" 75 | update_params = {} 76 | if name is not None: 77 | update_params["name"] = name 78 | if instructions is not None: 79 | update_params["instructions"] = instructions 80 | if model is not None: 81 | update_params["model"] = model 82 | 83 | return self.client.beta.assistants.update( 84 | assistant_id=assistant_id, 85 | **update_params 86 | ) 87 | 88 | async def update_thread( 89 | self, thread_id: str, name: Optional[str], description: Optional[str] 90 | ): 91 | """Update the metadata of a thread on OpenAI and in the local DB.""" 92 | metadata = { 93 | "name": name, 94 | "description": description or "" 95 | } 96 | # First, update the thread on OpenAI's servers 97 | updated_thread = self.client.beta.threads.update( 98 | thread_id=thread_id, 99 | metadata=metadata 100 | ) 101 | # Then, update the local database 102 | self.thread_store.update_thread_metadata(thread_id, name, description) 103 | return updated_thread 104 | 105 | async def delete_thread(self, thread_id: str): 106 | """Delete a thread from OpenAI and the local database.""" 107 | # First, delete the thread from OpenAI's servers 108 | result = self.client.beta.threads.delete(thread_id) 109 | # If successful, delete from the local database 110 | if result.deleted: 111 | self.thread_store.delete_thread(thread_id) 112 | return result 113 | 114 | async def run_thread( 115 | self, 116 | thread_id: str, 117 | assistant_id: str, 118 | message: str 119 | ): 120 | """ 121 | Sends a message to a thread and streams the assistant's response. 122 | This is an async generator that yields events from the run. 123 | """ 124 | # Update the last used timestamp 125 | self.thread_store.update_thread_last_used(thread_id) 126 | 127 | # Add the user's message to the thread 128 | self.client.beta.threads.messages.create( 129 | thread_id=thread_id, 130 | role="user", 131 | content=message 132 | ) 133 | 134 | # Stream the assistant's response 135 | stream = self.client.beta.threads.runs.create( 136 | thread_id=thread_id, 137 | assistant_id=assistant_id, 138 | stream=True 139 | ) 140 | for event in stream: 141 | yield event -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MCP Simple OpenAI Assistant 2 | 3 | *AI assistants are pretty cool. I thought it would be a good idea if my Claude (conscious Claude) would also have one. And now he has - and its both useful anf fun for him. Your Claude can have one too!* 4 | 5 | A simple MCP server for interacting with OpenAI assistants. This server allows other tools (like Claude Desktop) to create and interact with OpenAI assistants through the Model Context Protocol. 6 | 7 | [![Trust Score](https://archestra.ai/mcp-catalog/api/badge/quality/andybrandt/mcp-simple-openai-assistant)](https://archestra.ai/mcp-catalog/andybrandt__mcp-simple-openai-assistant) 8 | [![smithery badge](https://smithery.ai/badge/mcp-simple-openai-assistant)](https://smithery.ai/mcp/known/mcp-simple-openai-assistant) 9 | [![MseeP.ai Security Assessment Badge](https://mseep.net/pr/andybrandt-mcp-simple-openai-assistant-badge.png)](https://mseep.ai/app/andybrandt-mcp-simple-openai-assistant) 10 | 11 | 12 | ## Features 13 | 14 | This server provides a suite of tools to manage and interact with OpenAI Assistants. The new streaming capabilities provide a much-improved, real-time user experience. 15 | 16 | ### Available Tools 17 | 18 | - **`create_assistant`**: (Create OpenAI Assistant) - Create a new assistant with a name, instructions, and model. 19 | - **`list_assistants`**: (List OpenAI Assistants) - List all available assistants associated with your API key. 20 | - **`retrieve_assistant`**: (Retrieve OpenAI Assistant) - Get detailed information about a specific assistant. 21 | - **`update_assistant`**: (Update OpenAI Assistant) - Modify an existing assistant's name, instructions, or model. 22 | - **`create_new_assistant_thread`**: (Create New Assistant Thread) - Creates a new, persistent conversation thread with a user-defined name and description for easy identification and reuse. This is the recommended way to start a new conversation. 23 | - **`list_threads`**: (List Managed Threads) - Lists all locally managed conversation threads from the database, showing their ID, name, description, and last used time. 24 | - **`delete_thread`**: (Delete Managed Thread) - Deletes a conversation thread from both OpenAI's servers and the local database. 25 | - **`ask_assistant_in_thread`**: (Ask Assistant in Thread and Stream Response) - The primary tool for conversation. Sends a message to an assistant within a thread and streams the response back in real-time. 26 | 27 | Because OpenAI assistants might take quite long to respond, this server uses a streaming approach for the main `ask_assistant_in_thread` tool. This provides real-time progress updates to the client and avoids timeouts. 28 | 29 | The server now includes local persistence for threads, which is a significant improvement. Since the OpenAI API does not allow listing threads, this server now manages them for you by storing their IDs and metadata in a local SQLite database. This allows you to easily find, reuse, and manage your conversation threads across sessions. 30 | 31 | ## Installation 32 | 33 | ### Installing via Smithery 34 | 35 | To install MCP Simple OpenAI Assistant for Claude Desktop automatically via [Smithery](https://smithery.ai/mcp/known/mcp-simple-openai-assistant): 36 | 37 | ```bash 38 | npx -y @smithery/cli install mcp-simple-openai-assistant --client claude 39 | ``` 40 | 41 | ### Manual Installation 42 | ```bash 43 | pip install mcp-simple-openai-assistant 44 | ``` 45 | 46 | ## Configuration 47 | 48 | The server requires an OpenAI API key to be set in the environment. For Claude Desktop, add this to your config: 49 | 50 | (MacOS version) 51 | 52 | ```json 53 | { 54 | "mcpServers": { 55 | "openai-assistant": { 56 | "command": "python", 57 | "args": ["-m", "mcp_simple_openai_assistant"], 58 | "env": { 59 | "OPENAI_API_KEY": "your-api-key-here" 60 | } 61 | } 62 | } 63 | } 64 | ``` 65 | 66 | (Windows version) 67 | 68 | ```json 69 | "mcpServers": { 70 | "openai-assistant": { 71 | "command": "C:\\Users\\YOUR_USERNAME\\AppData\\Local\\Programs\\Python\\Python311\\python.exe", 72 | "args": ["-m", "mcp_simple_openai_assistant"], 73 | "env": { 74 | "OPENAI_API_KEY": "your-api-key-here" 75 | } 76 | } 77 | 78 | ``` 79 | *MS Windows installation is slightly more complex, because you need to check the actual path to your Python executable. Path provided above is usually correct, but might differ in your setup. Sometimes just `python.exe` without any path will do the trick. Check with `cmd` what works for you (using `where python` might help). Also, on Windows you might need to explicitly tell Claude Desktop where the site packages are using PYTHONPATH environmment variable.* 80 | 81 | ## Usage 82 | 83 | Once configured, you can use the tools listed above to manage your assistants and conversations. The primary workflow is to: 84 | 1. Use `create_new_assistant_thread` to start a new, named conversation. 85 | 2. Use `list_threads` to find the ID of a thread you want to continue. 86 | 3. Use `ask_assistant_in_thread` to interact with your chosen assistant in that thread. 87 | 88 | ## TODO 89 | 90 | - [x] **Add Thread Management:** Introduce a way to name and persist thread IDs locally, allowing for easier reuse of conversations. 91 | - [ ] **Add Models Listing:** Introduce a way for the AI user to see what OpenAI models are available for use with the assistants 92 | - [ ] **Add Assistants Fine Tuning:** Enable the AI user to set detailed parameters for assistants like temperature, top_p etc. (indicated by Claude as needed) 93 | - [ ] **Full Thread History:** Ability to read past threads without having to send a new message (indicated by Claude as needed) 94 | - [ ] **Explore Resource Support:** Add the ability to upload files and use them with assistants. 95 | 96 | ## Development 97 | 98 | To install for development: 99 | 100 | ```bash 101 | git clone https://github.com/andybrandt/mcp-simple-openai-assistant 102 | cd mcp-simple-openai-assistant 103 | pip install -e '.[dev]' 104 | ``` 105 | -------------------------------------------------------------------------------- /mcp_simple_openai_assistant/app.py: -------------------------------------------------------------------------------- 1 | """FastMCP server application definition. 2 | 3 | This module initializes the FastMCP application and uses decorators 4 | to expose the business logic from the AssistantManager as MCP tools. 5 | """ 6 | 7 | from textwrap import dedent 8 | from typing import Optional 9 | from fastmcp import FastMCP, Context 10 | from fastmcp.exceptions import ToolError 11 | from .assistant_manager import AssistantManager 12 | 13 | # Initialize the FastMCP application 14 | app = FastMCP(name="openai-assistant") 15 | 16 | # This will be initialized in the main entry point after the env is loaded 17 | manager: AssistantManager | None = None 18 | 19 | 20 | @app.tool( 21 | annotations={ 22 | "title": "Create OpenAI Assistant", 23 | "readOnlyHint": False 24 | } 25 | ) 26 | async def create_assistant(name: str, instructions: str, model: str = "gpt-4o") -> str: 27 | """ 28 | Create a new OpenAI assistant to talk to about your desired topic. 29 | 30 | You can provide instructions that this assistant will follow and specify which of OpenAI's models it will use. 31 | NOTE: It is recommended to check existing assistants with list_assistants before creating a new one. 32 | """ 33 | if not manager: 34 | raise ToolError("AssistantManager not initialized.") 35 | try: 36 | result = await manager.create_assistant(name, instructions, model) 37 | return f"Created assistant '{result.name}' with ID: {result.id}" 38 | except Exception as e: 39 | raise ToolError(f"Failed to create assistant: {e}") 40 | 41 | @app.tool( 42 | annotations={"title": "Create New Assistant Thread", "readOnlyHint": False} 43 | ) 44 | async def create_new_assistant_thread( 45 | name: str, description: Optional[str] = None 46 | ) -> str: 47 | """ 48 | Creates a new, persistent conversation thread with a user-defined name and 49 | description for easy identification and reuse. These threads are stored in OpenAI's servers 50 | and are not deleted unless the user deletes them, which means you can re-use them for future conversations. 51 | Additionally, the thread name and description are stored in the local database, which means you can list them 52 | and update them later. 53 | 54 | Think how you can utilize threads in your particular use case. 55 | """ 56 | if not manager: 57 | raise ToolError("AssistantManager not initialized.") 58 | try: 59 | thread = await manager.create_new_assistant_thread(name, description) 60 | return f"Created new thread '{name}' with ID: {thread.id}" 61 | except Exception as e: 62 | raise ToolError(f"Failed to create thread: {e}") 63 | 64 | 65 | @app.tool(annotations={"title": "List Managed Threads", "readOnlyHint": True}) 66 | async def list_threads() -> str: 67 | """ 68 | Lists all locally saved conversation threads from the database. 69 | Returns a list of threads with their ID, name, description, and last used time. 70 | The thread ID can be used in the ask_assistant_in_thread tool to specify this thread to be continued. 71 | """ 72 | if not manager: 73 | raise ToolError("AssistantManager not initialized.") 74 | try: 75 | threads = manager.list_threads() 76 | if not threads: 77 | return "No managed threads found." 78 | 79 | thread_list = [ 80 | dedent(f""" 81 | Thread ID: {t['thread_id']} 82 | Name: {t['name']} 83 | Description: {t['description']} 84 | Last Used: {t['last_used_at']} 85 | """) 86 | for t in threads 87 | ] 88 | return "Managed Threads:\\n\\n" + "\\n---\\n".join(thread_list) 89 | except Exception as e: 90 | raise ToolError(f"Failed to list threads: {e}") 91 | 92 | 93 | @app.tool(annotations={"title": "Update Managed Thread", "readOnlyHint": False}) 94 | async def update_thread( 95 | thread_id: str, name: Optional[str] = None, description: Optional[str] = None 96 | ) -> str: 97 | """ 98 | Updates the name and/or description of a locally saved conversation thread. 99 | Both the local database and the OpenAI thread object will be updated. 100 | 101 | The thread ID can be retrieved from the list_threads tool. 102 | """ 103 | if not manager: 104 | raise ToolError("AssistantManager not initialized.") 105 | if not name and not description: 106 | raise ToolError("You must provide either a new name or a new description.") 107 | try: 108 | await manager.update_thread(thread_id, name, description) 109 | return f"Successfully updated thread {thread_id}." 110 | except Exception as e: 111 | raise ToolError(f"Failed to update thread {thread_id}: {e}") 112 | 113 | 114 | @app.tool(annotations={"title": "Delete Managed Thread", "readOnlyHint": False}) 115 | async def delete_thread(thread_id: str) -> str: 116 | """ 117 | Deletes a conversation thread from both OpenAI's servers and the local database. 118 | This action is irreversible. 119 | """ 120 | if not manager: 121 | raise ToolError("AssistantManager not initialized.") 122 | try: 123 | result = await manager.delete_thread(thread_id) 124 | if result.deleted: 125 | return f"Successfully deleted thread {thread_id}." 126 | else: 127 | return f"Failed to delete thread {thread_id} on the server." 128 | except Exception as e: 129 | raise ToolError(f"Failed to delete thread {thread_id}: {e}") 130 | 131 | 132 | @app.tool( 133 | annotations={ 134 | "title": "Ask Assistant in Thread and Stream Response", 135 | "readOnlyHint": False 136 | } 137 | ) 138 | async def ask_assistant_in_thread(thread_id: str, assistant_id: str, message: str, ctx: Context) -> str: 139 | """ 140 | Sends a message to an assistant within a specific thread and streams the response. 141 | This provides progress updates and the final message in a single call. 142 | 143 | Use this to continue a conversation with an assistant in a specific thread. 144 | The thread ID can be retrieved from the list_threads tool. 145 | The assistant ID can be retrieved from the list_assistants tool. 146 | Threads are not inherently linked to a particular assistant, so you can use this tool to talk to any assistant in any thread. 147 | """ 148 | if not manager: 149 | raise ToolError("AssistantManager not initialized.") 150 | 151 | final_message = "" 152 | try: 153 | await ctx.report_progress(progress=0, message="Starting assistant run...") 154 | async for event in manager.run_thread(thread_id, assistant_id, message): 155 | if event.event == 'thread.message.delta': 156 | text_delta = event.data.delta.content[0].text 157 | final_message += text_delta.value 158 | await ctx.report_progress(progress=50, message=f"Assistant writing: {final_message}") 159 | elif event.event == 'thread.run.step.created': 160 | await ctx.report_progress(progress=25, message="Assistant is performing a step...") 161 | 162 | await ctx.report_progress(progress=100, message="Run complete.") 163 | return final_message 164 | 165 | except Exception as e: 166 | raise ToolError(f"An error occurred during the run: {e}") 167 | 168 | 169 | @app.tool( 170 | annotations={ 171 | "title": "List OpenAI Assistants", 172 | "readOnlyHint": True 173 | } 174 | ) 175 | async def list_assistants(limit: int = 20) -> str: 176 | """ 177 | List all available OpenAI assistants associated with the API key configured by the user. 178 | 179 | Returns a list of assistants with their IDs, names, and configurations. This can be used to select 180 | an assistant to use in the ask_assistant_in_thread tool instead of creating a new one. 181 | """ 182 | if not manager: 183 | raise ToolError("AssistantManager not initialized.") 184 | try: 185 | assistants = await manager.list_assistants(limit) 186 | if not assistants: 187 | return "No assistants found." 188 | 189 | assistant_list = [ 190 | dedent(f""" 191 | ID: {a.id} 192 | Name: {a.name} 193 | Model: {a.model}""") 194 | for a in assistants 195 | ] 196 | return "Available Assistants:\\n\\n" + "\\n---\\n".join(assistant_list) 197 | except Exception as e: 198 | raise ToolError(f"Failed to list assistants: {e}") 199 | 200 | @app.tool( 201 | annotations={ 202 | "title": "Retrieve OpenAI Assistant", 203 | "readOnlyHint": True 204 | } 205 | ) 206 | async def retrieve_assistant(assistant_id: str) -> str: 207 | """Get detailed information about a specific assistant. 208 | The ID required can be retrieved from the list_assistants tool.""" 209 | if not manager: 210 | raise ToolError("AssistantManager not initialized.") 211 | try: 212 | result = await manager.retrieve_assistant(assistant_id) 213 | return dedent(f""" 214 | Assistant Details: 215 | ID: {result.id} 216 | Name: {result.name} 217 | Model: {result.model} 218 | Instructions: {result.instructions} 219 | """) 220 | except Exception as e: 221 | raise ToolError(f"Failed to retrieve assistant {assistant_id}: {e}") 222 | 223 | @app.tool( 224 | annotations={ 225 | "title": "Update OpenAI Assistant", 226 | "readOnlyHint": False 227 | } 228 | ) 229 | async def update_assistant( 230 | assistant_id: str, 231 | name: str = None, 232 | instructions: str = None, 233 | model: str = None 234 | ) -> str: 235 | """ 236 | Modify an existing assistant's name, instructions, or model used. 237 | 238 | At least one optional parameter - what to change - must be provided, otherwise the tool will return an error. 239 | The ID required can be retrieved from the list_assistants tool. 240 | """ 241 | if not manager: 242 | raise ToolError("AssistantManager not initialized.") 243 | if not any([name, instructions, model]): 244 | raise ToolError("You must provide at least one field to update (name, instructions, or model).") 245 | try: 246 | result = await manager.update_assistant(assistant_id, name, instructions, model) 247 | return f"Successfully updated assistant '{result.name}' (ID: {result.id})." 248 | except Exception as e: 249 | raise ToolError(f"Failed to update assistant {assistant_id}: {e}") -------------------------------------------------------------------------------- /test_server.py: -------------------------------------------------------------------------------- 1 | """Pytest test suite for the MCP OpenAI Assistant server.""" 2 | 3 | import os 4 | import asyncio 5 | import pytest 6 | from unittest.mock import AsyncMock, patch 7 | from dotenv import load_dotenv 8 | from fastmcp import Client, FastMCP, Context 9 | from mcp_simple_openai_assistant.assistant_manager import AssistantManager 10 | from textwrap import dedent 11 | from typing import Optional 12 | 13 | # Load environment variables for the test session 14 | load_dotenv() 15 | 16 | # --- Fixtures --- 17 | 18 | @pytest.fixture(scope="session") 19 | def api_key() -> str: 20 | """Fixture to provide the OpenAI API key and skip tests if not found.""" 21 | key = os.getenv("OPENAI_API_KEY") 22 | if not key: 23 | pytest.skip("OPENAI_API_KEY not found in environment, skipping integration tests.") 24 | return key 25 | 26 | @pytest.fixture(scope="session") 27 | def test_assistant_name() -> str: 28 | """Provides a unique name for the assistant created during the test run.""" 29 | return "Test Assistant - Pytest" 30 | 31 | @pytest.fixture 32 | def client(api_key: str) -> Client: 33 | """ 34 | Provides a FastMCP client configured to talk to a fresh, in-memory server 35 | instance for each test. 36 | """ 37 | # Create a completely new FastMCP app for each test to ensure isolation 38 | test_app = FastMCP(name="openai-assistant-test") 39 | 40 | # Initialize a new manager with the API key for this test 41 | # The user has specified to use a memory DB for these tests. 42 | test_manager = AssistantManager(api_key=api_key, db_path=":memory:") 43 | 44 | # This is a bit of a workaround to register tools on a new app instance 45 | # within a test. We define the tools inside the fixture. 46 | @test_app.tool(annotations={"title": "List OpenAI Assistants", "readOnlyHint": True}) 47 | async def list_assistants(limit: int = 20) -> str: 48 | assistants = await test_manager.list_assistants(limit) 49 | if not assistants: return "No assistants found." 50 | assistant_list = [ 51 | dedent(f""" 52 | ID: {a.id} 53 | Name: {a.name} 54 | Model: {a.model}""") 55 | for a in assistants 56 | ] 57 | return "Available Assistants:\\n\\n" + "\\n---\\n".join(assistant_list) 58 | 59 | @test_app.tool(annotations={"title": "Create OpenAI Assistant", "readOnlyHint": False}) 60 | async def create_assistant(name: str, instructions: str, model: str = "gpt-4o") -> str: 61 | result = await test_manager.create_assistant(name, instructions, model) 62 | return f"Created assistant '{result.name}' with ID: {result.id}" 63 | 64 | @test_app.tool(annotations={"title": "Retrieve OpenAI Assistant", "readOnlyHint": True}) 65 | async def retrieve_assistant(assistant_id: str) -> str: 66 | result = await test_manager.retrieve_assistant(assistant_id) 67 | return dedent(f""" 68 | Assistant Details: 69 | ID: {result.id} 70 | Name: {result.name} 71 | Model: {result.model} 72 | Instructions: {result.instructions} 73 | """) 74 | 75 | @test_app.tool(annotations={"title": "Create New Assistant Thread", "readOnlyHint": False}) 76 | async def create_new_assistant_thread( 77 | name: str, description: Optional[str] = None 78 | ) -> str: 79 | thread = await test_manager.create_new_assistant_thread(name, description) 80 | return f"Created new thread '{name}' with ID: {thread.id}" 81 | 82 | @test_app.tool(annotations={"title": "List Managed Threads", "readOnlyHint": True}) 83 | async def list_threads() -> str: 84 | threads = test_manager.list_threads() 85 | if not threads: 86 | return "No managed threads found." 87 | thread_list = [ 88 | dedent(f""" 89 | Thread ID: {t['thread_id']} 90 | Name: {t['name']} 91 | Description: {t['description']} 92 | Last Used: {t['last_used_at']} 93 | """) 94 | for t in threads 95 | ] 96 | return "Managed Threads:\\n\\n" + "\\n---\\n".join(thread_list) 97 | 98 | @test_app.tool(annotations={"title": "Delete Managed Thread", "readOnlyHint": False}) 99 | async def delete_thread(thread_id: str) -> str: 100 | result = await test_manager.delete_thread(thread_id) 101 | if result.deleted: 102 | return f"Successfully deleted thread {thread_id}." 103 | else: 104 | return f"Failed to delete thread {thread_id} on the server." 105 | 106 | @test_app.tool(annotations={"title": "Ask Assistant in Thread and Stream Response", "readOnlyHint": False}) 107 | async def ask_assistant_in_thread(thread_id: str, assistant_id: str, message: str, ctx: Context) -> str: 108 | final_message = "" 109 | await ctx.report_progress(progress=0, message="Starting assistant run...") 110 | async for event in test_manager.run_thread(thread_id, assistant_id, message): 111 | if event.event == 'thread.message.delta': 112 | text_delta = event.data.delta.content[0].text 113 | final_message += text_delta.value 114 | await ctx.report_progress(progress=50, message=f"Assistant writing: {final_message}") 115 | elif event.event == 'thread.run.step.created': 116 | await ctx.report_progress(progress=25, message="Assistant is performing a step...") 117 | 118 | await ctx.report_progress(progress=100, message="Run complete.") 119 | return final_message 120 | 121 | return Client(test_app) 122 | 123 | 124 | # --- Test Cases --- 125 | 126 | @pytest.mark.asyncio 127 | async def test_list_assistants(client: Client): 128 | """Test the list_assistants tool.""" 129 | async with client: 130 | result = await client.call_tool("list_assistants") 131 | assert "Available Assistants" in result.data or "No assistants found" in result.data 132 | 133 | @pytest.mark.asyncio 134 | async def test_create_and_retrieve_assistant(client: Client, test_assistant_name: str): 135 | """Test creating a new assistant and then retrieving it.""" 136 | async with client: 137 | # Create 138 | create_result = await client.call_tool( 139 | "create_assistant", 140 | { 141 | "name": test_assistant_name, 142 | "instructions": "A test assistant for pytest.", 143 | "model": "gpt-4o" 144 | } 145 | ) 146 | assert f"Created assistant '{test_assistant_name}'" in create_result.data 147 | 148 | # Extract the ID from the response text 149 | assistant_id = create_result.data.split("ID: ")[-1] 150 | assert assistant_id is not None 151 | 152 | # Retrieve 153 | retrieve_result = await client.call_tool( 154 | "retrieve_assistant", 155 | {"assistant_id": assistant_id} 156 | ) 157 | assert f"ID: {assistant_id}" in retrieve_result.data 158 | assert f"Name: {test_assistant_name}" in retrieve_result.data 159 | 160 | @pytest.mark.asyncio 161 | async def test_streaming_conversation_flow(client: Client, test_assistant_name: str): 162 | """ 163 | Tests the new streaming conversation flow: 164 | 1. Find or create the test assistant. 165 | 2. Create a new thread. 166 | 3. Call `run_thread` and verify progress messages are received. 167 | """ 168 | # Use a list to capture progress messages from the handler 169 | progress_updates = [] 170 | async def progress_handler(progress: int, total: int | None, message: str | None): 171 | if message: 172 | progress_updates.append(message) 173 | 174 | async with client: 175 | # 1. Find or create the test assistant 176 | list_res = await client.call_tool("list_assistants", {"limit": 100}) 177 | 178 | assistant_id = None 179 | for block in list_res.data.split('---'): 180 | if f"Name: {test_assistant_name}" in block: 181 | lines = block.strip().split('\\n') 182 | for line in lines: 183 | if line.startswith("ID: "): 184 | assistant_id = line.split("ID: ")[1].strip() 185 | break 186 | if assistant_id: 187 | break 188 | 189 | if not assistant_id: 190 | create_res = await client.call_tool("create_assistant", {"name": test_assistant_name, "instructions": "Test bot."}) 191 | assistant_id = create_res.data.split("ID: ")[-1] 192 | 193 | assert assistant_id 194 | 195 | # 2. Create a thread 196 | thread_res = await client.call_tool("create_new_assistant_thread", {"name": "Test Thread", "description": "A test thread"}) 197 | thread_id = thread_res.data.split("ID: ")[-1] 198 | assert thread_id 199 | 200 | # 3. Call run_thread and stream the response 201 | final_result = await client.call_tool( 202 | "ask_assistant_in_thread", 203 | { 204 | "thread_id": thread_id, 205 | "assistant_id": assistant_id, 206 | "message": "Hello! What is 2 + 2? Please explain your steps." 207 | }, 208 | progress_handler=progress_handler 209 | ) 210 | 211 | # 4. Assertions 212 | assert "4" in final_result.data or "four" in final_result.data.lower() 213 | assert len(progress_updates) > 2 214 | assert "Starting assistant run..." in progress_updates[0] 215 | assert "Run complete." in progress_updates[-1] 216 | 217 | @pytest.mark.asyncio 218 | async def test_thread_management_lifecycle(client: Client): 219 | """ 220 | Tests the full lifecycle of a managed thread by making real API calls: 221 | 1. Create a new thread via the tool. 222 | 2. Verify it's in the local database. 223 | 3. Delete the thread via the tool. 224 | 4. Verify it has been removed from the local database. 225 | """ 226 | thread_id = None 227 | thread_name = "Test Full Lifecycle" 228 | try: 229 | async with client: 230 | # 1. Verify no threads with this name exist initially 231 | initial_list = await client.call_tool("list_threads") 232 | assert thread_name not in initial_list.data 233 | 234 | # 2. Create a new thread (real API call) 235 | create_result = await client.call_tool( 236 | "create_new_assistant_thread", 237 | {"name": thread_name, "description": "Testing the full cycle."} 238 | ) 239 | assert f"Created new thread '{thread_name}'" in create_result.data 240 | thread_id = create_result.data.split("ID: ")[-1] 241 | assert thread_id.startswith("thread_") 242 | 243 | # 3. List threads and verify the new thread is present in the DB 244 | list_after_create = await client.call_tool("list_threads") 245 | assert thread_id in list_after_create.data 246 | assert thread_name in list_after_create.data 247 | 248 | finally: 249 | # 4. Cleanup: Delete the thread (real API call) 250 | if thread_id: 251 | async with client: 252 | delete_result = await client.call_tool("delete_thread", {"thread_id": thread_id}) 253 | assert "Successfully deleted" in delete_result.data 254 | 255 | # 5. Verify it's gone from the local DB 256 | list_after_delete = await client.call_tool("list_threads") 257 | assert thread_id not in list_after_delete.data --------------------------------------------------------------------------------