├── .github └── workflows │ └── python-package.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── build.sh ├── dify_client ├── __init__.py ├── _clientx.py ├── errors.py ├── models │ ├── __init__.py │ ├── base.py │ ├── chat.py │ ├── completion.py │ ├── feedback.py │ ├── file.py │ ├── stream.py │ └── workflow.py └── utils │ ├── __init__.py │ └── _common.py └── setup.py /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ "master" ] 9 | pull_request: 10 | branches: [ "master" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | - name: Lint with flake8 33 | run: | 34 | # stop the build if there are Python syntax errors or undefined names 35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 38 | - name: Test with pytest 39 | run: | 40 | pytest 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | .idea/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 haoyuhu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include dify_client *.py -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dify-client-python 2 | 3 | Welcome to the `dify-client-python` repository! This Python package provides a convenient and powerful interface to 4 | interact with the Dify API, enabling developers to integrate a wide range of features into their applications with ease. 5 | 6 | ## Main Features 7 | 8 | * **Synchronous and Asynchronous Support**: The client offers both synchronous and asynchronous methods, allowing for 9 | flexible integration into various Python codebases and frameworks. 10 | * **Stream and Non-stream Support**: Seamlessly work with both streaming and non-streaming endpoints of the Dify API for 11 | real-time and batch processing use cases. 12 | * **Comprehensive Endpoint Coverage**: Support completion, chat, workflows, feedback, file uploads, etc., the client 13 | covers all available Dify API endpoints. 14 | 15 | ## Installation 16 | 17 | Before using the `dify-client-python` client, you'll need to install it. You can easily install it using `pip`: 18 | 19 | ```bash 20 | pip install dify-client-python 21 | ``` 22 | 23 | ## Quick Start 24 | 25 | Here's a quick example of how you can use the Dify Client to send a chat message. 26 | 27 | ```python 28 | import uuid 29 | from dify_client import Client, models 30 | 31 | # Initialize the client with your API key 32 | client = Client( 33 | api_key="your-api-key", 34 | api_base="http://localhost/v1", 35 | ) 36 | user = str(uuid.uuid4()) 37 | 38 | # Create a blocking chat request 39 | blocking_chat_req = models.ChatRequest( 40 | query="Hi, dify-client-python!", 41 | inputs={"city": "Beijing"}, 42 | user=user, 43 | response_mode=models.ResponseMode.BLOCKING, 44 | ) 45 | 46 | # Send the chat message 47 | chat_response = client.chat_messages(blocking_chat_req, timeout=60.) 48 | print(chat_response) 49 | 50 | # Create a streaming chat request 51 | streaming_chat_req = models.ChatRequest( 52 | query="Hi, dify-client-python!", 53 | inputs={"city": "Beijing"}, 54 | user=user, 55 | response_mode=models.ResponseMode.STREAMING, 56 | ) 57 | 58 | # Send the chat message 59 | for chunk in client.chat_messages(streaming_chat_req, timeout=60.): 60 | print(chunk) 61 | ``` 62 | 63 | For asynchronous operations, use the `AsyncClient` in a similar fashion: 64 | 65 | ```python 66 | import asyncio 67 | import uuid 68 | 69 | from dify_client import AsyncClient, models 70 | 71 | # Initialize the async client with your API key 72 | async_client = AsyncClient( 73 | api_key="your-api-key", 74 | api_base="http://localhost/v1", 75 | ) 76 | 77 | 78 | # Define an asynchronous function to send a blocking chat message with BLOCKING ResponseMode 79 | async def send_chat_message(): 80 | user = str(uuid.uuid4()) 81 | # Create a blocking chat request 82 | blocking_chat_req = models.ChatRequest( 83 | query="Hi, dify-client-python!", 84 | inputs={"city": "Beijing"}, 85 | user=user, 86 | response_mode=models.ResponseMode.BLOCKING, 87 | ) 88 | chat_response = await async_client.achat_messages(blocking_chat_req, timeout=60.) 89 | print(chat_response) 90 | 91 | 92 | # Define an asynchronous function to send a chat message with STREAMING ResponseMode 93 | async def send_chat_message_stream(): 94 | user = str(uuid.uuid4()) 95 | # Create a blocking chat request 96 | streaming_chat_req = models.ChatRequest( 97 | query="Hi, dify-client-python!", 98 | inputs={"city": "Beijing"}, 99 | user=user, 100 | response_mode=models.ResponseMode.STREAMING, 101 | ) 102 | async for chunk in await async_client.achat_messages(streaming_chat_req, timeout=60.): 103 | print(chunk) 104 | 105 | 106 | # Run the asynchronous function 107 | asyncio.gather(send_chat_message(), send_chat_message_stream()) 108 | ``` 109 | 110 | ## Documentation 111 | 112 | For detailed information on all the functionalities and how to use each endpoint, please refer to the official Dify API 113 | documentation. This will provide you with comprehensive guidance on request and response structures, error handling, and 114 | other important details. 115 | 116 | ## Contributing 117 | 118 | Contributions are welcome! If you would like to contribute to the `dify-client-python`, please feel free to make a pull 119 | request or open an issue to discuss potential changes. 120 | 121 | ## License 122 | 123 | This project is licensed under the MIT License - see the LICENSE file for details. 124 | 125 | ```text 126 | MIT License 127 | 128 | Copyright (c) 2024 haoyuhu 129 | 130 | Permission is hereby granted, free of charge, to any person obtaining a copy 131 | of this software and associated documentation files (the "Software"), to deal 132 | in the Software without restriction, including without limitation the rights 133 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 134 | copies of the Software, and to permit persons to whom the Software is 135 | furnished to do so, subject to the following conditions: 136 | 137 | The above copyright notice and this permission notice shall be included in all 138 | copies or substantial portions of the Software. 139 | 140 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 141 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 142 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 143 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 144 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 145 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 146 | SOFTWARE. 147 | 148 | ``` 149 | 150 | ## Support 151 | 152 | If you encounter any issues or have questions regarding the usage of this client, please reach out to the Dify Client 153 | support team. 154 | 155 | Happy coding! 🚀 -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | rm -rf build dist *.egg-info 6 | 7 | pip install setuptools wheel twine 8 | python setup.py sdist bdist_wheel 9 | twine upload dist/* -------------------------------------------------------------------------------- /dify_client/__init__.py: -------------------------------------------------------------------------------- 1 | from ._clientx import Client, AsyncClient 2 | -------------------------------------------------------------------------------- /dify_client/_clientx.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Any, Mapping, Iterator, AsyncIterator, Union, Dict 2 | 3 | try: 4 | from enum import StrEnum 5 | except ImportError: 6 | from strenum import StrEnum 7 | try: 8 | from http import HTTPMethod 9 | except ImportError: 10 | class HTTPMethod(StrEnum): 11 | GET = "GET" 12 | POST = "POST" 13 | PUT = "PUT" 14 | DELETE = "DELETE" 15 | 16 | import httpx 17 | # noinspection PyProtectedMember 18 | import httpx._types as types 19 | from httpx_sse import connect_sse, ServerSentEvent, aconnect_sse 20 | from pydantic import BaseModel 21 | 22 | from dify_client import errors, models 23 | 24 | _httpx_client = httpx.Client() 25 | _async_httpx_client = httpx.AsyncClient() 26 | 27 | IGNORED_STREAM_EVENTS = (models.StreamEvent.PING.value,) 28 | 29 | # feedback 30 | ENDPOINT_FEEDBACKS = "/messages/{message_id}/feedbacks" 31 | # suggest 32 | ENDPOINT_SUGGESTED = "/messages/{message_id}/suggested" 33 | # files upload 34 | ENDPOINT_FILES_UPLOAD = "/files/upload" 35 | # completion 36 | ENDPOINT_COMPLETION_MESSAGES = "/completion-messages" 37 | ENDPOINT_STOP_COMPLETION_MESSAGES = "/completion-messages/{task_id}/stop" 38 | # chat 39 | ENDPOINT_CHAT_MESSAGES = "/chat-messages" 40 | ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop" 41 | # workflow 42 | ENDPOINT_RUN_WORKFLOWS = "/workflows/run" 43 | ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop" 44 | # audio <-> text 45 | ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio" 46 | ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text" 47 | 48 | 49 | class Client(BaseModel): 50 | api_key: str 51 | api_base: Optional[str] = "https://api.dify.ai/v1" 52 | 53 | def request(self, endpoint: str, method: str, 54 | content: Optional[types.RequestContent] = None, 55 | data: Optional[types.RequestData] = None, 56 | files: Optional[types.RequestFiles] = None, 57 | json: Optional[Any] = None, 58 | params: Optional[types.QueryParamTypes] = None, 59 | headers: Optional[Mapping[str, str]] = None, 60 | **kwargs: object, 61 | ) -> httpx.Response: 62 | """ 63 | Sends a synchronous HTTP request to the specified endpoint. 64 | 65 | Args: 66 | endpoint: The API endpoint to send the request to. 67 | method: The HTTP method to use (e.g., 'GET', 'POST'). 68 | content: Raw content to include in the request body. 69 | data: Form data to include in the request body. 70 | files: Files to include in the request body. 71 | json: JSON data to include in the request body. 72 | params: Query parameters to include in the request URL. 73 | headers: Additional headers to include in the request. 74 | **kwargs: Extra keyword arguments to pass to the request function. 75 | 76 | Returns: 77 | A `httpx.Response` object containing the HTTP response. 78 | 79 | Raises: 80 | Various DifyAPIError exceptions if the response contains an error. 81 | """ 82 | merged_headers = {} 83 | if headers: 84 | merged_headers.update(headers) 85 | self._prepare_auth_headers(merged_headers) 86 | 87 | response = _httpx_client.request(method, endpoint, content=content, data=data, files=files, json=json, 88 | params=params, headers=merged_headers, **kwargs) 89 | errors.raise_for_status(response) 90 | return response 91 | 92 | def request_stream(self, endpoint: str, method: str, 93 | content: Optional[types.RequestContent] = None, 94 | data: Optional[types.RequestData] = None, 95 | files: Optional[types.RequestFiles] = None, 96 | json: Optional[Any] = None, 97 | params: Optional[types.QueryParamTypes] = None, 98 | headers: Optional[Mapping[str, str]] = None, 99 | **kwargs, 100 | ) -> Iterator[ServerSentEvent]: 101 | """ 102 | Opens a server-sent events (SSE) stream to the specified endpoint. 103 | 104 | Args: 105 | endpoint: The API endpoint to send the request to. 106 | method: The HTTP method to use (e.g., 'GET', 'POST'). 107 | content: Raw content to include in the request body. 108 | data: Form data to include in the request body. 109 | files: Files to include in the request body. 110 | json: JSON data to include in the request body. 111 | params: Query parameters to include in the request URL. 112 | headers: Additional headers to include in the request. 113 | **kwargs: Extra keyword arguments to pass to the request function. 114 | 115 | Returns: 116 | An iterator of `ServerSentEvent` objects representing the stream of events. 117 | 118 | Raises: 119 | Various DifyAPIError exceptions if an error event is received in the stream. 120 | """ 121 | merged_headers = {} 122 | if headers: 123 | merged_headers.update(headers) 124 | self._prepare_auth_headers(merged_headers) 125 | 126 | with connect_sse(_httpx_client, method, endpoint, headers=merged_headers, 127 | content=content, data=data, files=files, json=json, params=params, **kwargs) as event_source: 128 | if not _check_stream_content_type(event_source.response): 129 | event_source.response.read() 130 | errors.raise_for_status(event_source.response) 131 | for sse in event_source.iter_sse(): 132 | errors.raise_for_status(sse) 133 | if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS: 134 | continue 135 | yield sse 136 | 137 | def feedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) -> models.FeedbackResponse: 138 | """ 139 | Submits feedback for a specific message. 140 | 141 | Args: 142 | message_id: The identifier of the message to submit feedback for. 143 | req: A `FeedbackRequest` object containing the feedback details, such as the rating. 144 | **kwargs: Extra keyword arguments to pass to the request function. 145 | 146 | Returns: 147 | A `FeedbackResponse` object containing the result of the feedback submission. 148 | """ 149 | response = self.request( 150 | self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), 151 | HTTPMethod.POST, 152 | json=req.model_dump(), 153 | **kwargs, 154 | ) 155 | return models.FeedbackResponse(**response.json()) 156 | 157 | def suggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) -> models.ChatSuggestResponse: 158 | """ 159 | Retrieves suggested messages based on a specific message. 160 | 161 | Args: 162 | message_id: The identifier of the message to get suggestions for. 163 | req: A `ChatSuggestRequest` object containing the request details. 164 | **kwargs: Extra keyword arguments to pass to the request function. 165 | 166 | Returns: 167 | A `ChatSuggestResponse` object containing suggested messages. 168 | """ 169 | response = self.request( 170 | self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), 171 | HTTPMethod.GET, 172 | params=req.model_dump(), 173 | **kwargs, 174 | ) 175 | return models.ChatSuggestResponse(**response.json()) 176 | 177 | def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest, 178 | **kwargs) -> models.UploadFileResponse: 179 | """ 180 | Uploads a file to be used in subsequent requests. 181 | 182 | Args: 183 | file: The file to upload. This can be a file-like object, or a tuple of 184 | (`filename`, file-like object, mime_type). 185 | req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. 186 | **kwargs: Extra keyword arguments to pass to the request function. 187 | 188 | Returns: 189 | An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. 190 | """ 191 | response = self.request( 192 | self._prepare_url(ENDPOINT_FILES_UPLOAD), 193 | HTTPMethod.POST, 194 | data=req.model_dump(), 195 | files=[("file", file)], 196 | **kwargs, 197 | ) 198 | return models.UploadFileResponse(**response.json()) 199 | 200 | def completion_messages(self, req: models.CompletionRequest, **kwargs) \ 201 | -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]: 202 | """ 203 | Sends a request to generate a completion or a series of completions based on the provided input. 204 | 205 | Returns: 206 | If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. 207 | If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing 208 | the stream of generated events. 209 | """ 210 | if req.response_mode == models.ResponseMode.BLOCKING: 211 | return self._completion_messages(req, **kwargs) 212 | if req.response_mode == models.ResponseMode.STREAMING: 213 | return self._completion_messages_stream(req, **kwargs) 214 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 215 | 216 | def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: 217 | response = self.request( 218 | self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), 219 | HTTPMethod.POST, 220 | json=req.model_dump(), 221 | **kwargs, 222 | ) 223 | return models.CompletionResponse(**response.json()) 224 | 225 | def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ 226 | -> Iterator[models.CompletionStreamResponse]: 227 | event_source = self.request_stream( 228 | self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), 229 | HTTPMethod.POST, 230 | json=req.model_dump(), 231 | **kwargs, 232 | ) 233 | for sse in event_source: 234 | yield models.build_completion_stream_response(sse.json()) 235 | 236 | def stop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 237 | """ 238 | Sends a request to stop a streaming completion task. 239 | 240 | Returns: 241 | A `StopResponse` object indicating the success of the operation. 242 | """ 243 | return self._stop_stream(self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) 244 | 245 | def chat_messages(self, req: models.ChatRequest, **kwargs) \ 246 | -> Union[models.ChatResponse, Iterator[models.ChatStreamResponse]]: 247 | """ 248 | Sends a request to generate a chat message or a series of chat messages based on the provided input. 249 | 250 | Returns: 251 | If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. 252 | If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the 253 | stream of chat events. 254 | """ 255 | if req.response_mode == models.ResponseMode.BLOCKING: 256 | return self._chat_messages(req, **kwargs) 257 | if req.response_mode == models.ResponseMode.STREAMING: 258 | return self._chat_messages_stream(req, **kwargs) 259 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 260 | 261 | def _chat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: 262 | response = self.request( 263 | self._prepare_url(ENDPOINT_CHAT_MESSAGES), 264 | HTTPMethod.POST, 265 | json=req.model_dump(), 266 | **kwargs, 267 | ) 268 | return models.ChatResponse(**response.json()) 269 | 270 | def _chat_messages_stream(self, req: models.ChatRequest, **kwargs) -> Iterator[models.ChatStreamResponse]: 271 | event_source = self.request_stream( 272 | self._prepare_url(ENDPOINT_CHAT_MESSAGES), 273 | HTTPMethod.POST, 274 | json=req.model_dump(), 275 | **kwargs, 276 | ) 277 | for sse in event_source: 278 | yield models.build_chat_stream_response(sse.json()) 279 | 280 | def stop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 281 | """ 282 | Sends a request to stop a streaming chat task. 283 | 284 | Returns: 285 | A `StopResponse` object indicating the success of the operation. 286 | """ 287 | return self._stop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) 288 | 289 | def run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ 290 | -> Union[models.WorkflowsRunResponse, Iterator[models.WorkflowsRunStreamResponse]]: 291 | """ 292 | Initiates the execution of a workflow, which can consist of multiple steps and actions. 293 | 294 | Returns: 295 | If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the 296 | completed workflow. 297 | If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects 298 | containing the stream of workflow events. 299 | """ 300 | if req.response_mode == models.ResponseMode.BLOCKING: 301 | return self._run_workflows(req, **kwargs) 302 | if req.response_mode == models.ResponseMode.STREAMING: 303 | return self._run_workflows_stream(req, **kwargs) 304 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 305 | 306 | def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: 307 | response = self.request( 308 | self._prepare_url(ENDPOINT_RUN_WORKFLOWS), 309 | HTTPMethod.POST, 310 | json=req.model_dump(), 311 | **kwargs, 312 | ) 313 | return models.WorkflowsRunResponse(**response.json()) 314 | 315 | def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ 316 | -> Iterator[models.WorkflowsRunStreamResponse]: 317 | event_source = self.request_stream( 318 | self._prepare_url(ENDPOINT_RUN_WORKFLOWS), 319 | HTTPMethod.POST, 320 | json=req.model_dump(), 321 | **kwargs, 322 | ) 323 | for sse in event_source: 324 | yield models.build_workflows_stream_response(sse.json()) 325 | 326 | def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 327 | """ 328 | Sends a request to stop a streaming workflow task. 329 | 330 | Returns: 331 | A `StopResponse` object indicating the success of the operation. 332 | """ 333 | return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) 334 | 335 | def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 336 | response = self.request( 337 | endpoint, 338 | HTTPMethod.POST, 339 | json=req.model_dump(), 340 | **kwargs, 341 | ) 342 | return models.StopResponse(**response.json()) 343 | 344 | def _prepare_url(self, endpoint: str, **kwargs) -> str: 345 | return self.api_base + endpoint.format(**kwargs) 346 | 347 | def _prepare_auth_headers(self, headers: Dict[str, str]): 348 | if "authorization" not in (key.lower() for key in headers.keys()): 349 | headers["Authorization"] = f"Bearer {self.api_key}" 350 | 351 | 352 | class AsyncClient(BaseModel): 353 | api_key: str 354 | api_base: Optional[str] = "https://api.dify.ai/v1" 355 | 356 | async def arequest(self, endpoint: str, method: str, 357 | content: Optional[types.RequestContent] = None, 358 | data: Optional[types.RequestData] = None, 359 | files: Optional[types.RequestFiles] = None, 360 | json: Optional[Any] = None, 361 | params: Optional[types.QueryParamTypes] = None, 362 | headers: Optional[Mapping[str, str]] = None, 363 | **kwargs, 364 | ) -> httpx.Response: 365 | """ 366 | Asynchronously sends a request to the specified Dify API endpoint. 367 | 368 | Args: 369 | endpoint: The endpoint URL to which the request is sent. 370 | method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). 371 | content: Raw content to include in the request body, if any. 372 | data: Form data to be sent in the request body. 373 | files: Files to be uploaded with the request. 374 | json: JSON data to be sent in the request body. 375 | params: Query parameters to be included in the request URL. 376 | headers: Additional headers to be sent with the request. 377 | **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. 378 | 379 | Returns: 380 | A httpx.Response object containing the server's response to the HTTP request. 381 | 382 | Raises: 383 | Various DifyAPIError exceptions if the response contains an error. 384 | """ 385 | merged_headers = {} 386 | if headers: 387 | merged_headers.update(headers) 388 | self._prepare_auth_headers(merged_headers) 389 | 390 | response = await _async_httpx_client.request(method, endpoint, content=content, data=data, files=files, 391 | json=json, params=params, headers=merged_headers, **kwargs) 392 | errors.raise_for_status(response) 393 | return response 394 | 395 | async def arequest_stream(self, endpoint: str, method: str, 396 | content: Optional[types.RequestContent] = None, 397 | data: Optional[types.RequestData] = None, 398 | files: Optional[types.RequestFiles] = None, 399 | json: Optional[Any] = None, 400 | params: Optional[types.QueryParamTypes] = None, 401 | headers: Optional[Mapping[str, str]] = None, 402 | **kwargs, 403 | ) -> AsyncIterator[ServerSentEvent]: 404 | """ 405 | Asynchronously establishes a streaming connection to the specified Dify API endpoint. 406 | 407 | Args: 408 | endpoint: The endpoint URL to which the request is sent. 409 | method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). 410 | content: Raw content to include in the request body, if any. 411 | data: Form data to be sent in the request body. 412 | files: Files to be uploaded with the request. 413 | json: JSON data to be sent in the request body. 414 | params: Query parameters to be included in the request URL. 415 | headers: Additional headers to be sent with the request. 416 | **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. 417 | 418 | Yields: 419 | ServerSentEvent objects representing the events received from the server. 420 | 421 | Raises: 422 | Various DifyAPIError exceptions if an error event is received in the stream. 423 | """ 424 | merged_headers = {} 425 | if headers: 426 | merged_headers.update(headers) 427 | self._prepare_auth_headers(merged_headers) 428 | 429 | async with aconnect_sse(_async_httpx_client, method, endpoint, headers=merged_headers, 430 | content=content, data=data, files=files, json=json, params=params, 431 | **kwargs) as event_source: 432 | if not _check_stream_content_type(event_source.response): 433 | await event_source.response.aread() 434 | errors.raise_for_status(event_source.response) 435 | async for sse in event_source.aiter_sse(): 436 | errors.raise_for_status(sse) 437 | if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS: 438 | continue 439 | yield sse 440 | 441 | async def afeedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) \ 442 | -> models.FeedbackResponse: 443 | """ 444 | Submits feedback for a specific message. 445 | 446 | Args: 447 | message_id: The identifier of the message to submit feedback for. 448 | req: A `FeedbackRequest` object containing the feedback details, such as the rating. 449 | **kwargs: Extra keyword arguments to pass to the request function. 450 | 451 | Returns: 452 | A `FeedbackResponse` object containing the result of the feedback submission. 453 | """ 454 | response = await self.arequest( 455 | self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), 456 | HTTPMethod.POST, 457 | json=req.model_dump(), 458 | **kwargs, 459 | ) 460 | return models.FeedbackResponse(**response.json()) 461 | 462 | async def asuggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) \ 463 | -> models.ChatSuggestResponse: 464 | """ 465 | Retrieves suggested messages based on a specific message. 466 | 467 | Args: 468 | message_id: The identifier of the message to get suggestions for. 469 | req: A `ChatSuggestRequest` object containing the request details. 470 | **kwargs: Extra keyword arguments to pass to the request function. 471 | 472 | Returns: 473 | A `ChatSuggestResponse` object containing suggested messages. 474 | """ 475 | response = await self.arequest( 476 | self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), 477 | HTTPMethod.GET, 478 | params=req.model_dump(), 479 | **kwargs, 480 | ) 481 | return models.ChatSuggestResponse(**response.json()) 482 | 483 | async def aupload_files(self, file: types.FileTypes, req: models.UploadFileRequest, **kwargs) \ 484 | -> models.UploadFileResponse: 485 | """ 486 | Uploads a file to be used in subsequent requests. 487 | 488 | Args: 489 | file: The file to upload. This can be a file-like object, or a tuple of 490 | (`filename`, file-like object, mime_type). 491 | req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. 492 | **kwargs: Extra keyword arguments to pass to the request function. 493 | 494 | Returns: 495 | An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. 496 | """ 497 | response = await self.arequest( 498 | self._prepare_url(ENDPOINT_FILES_UPLOAD), 499 | HTTPMethod.POST, 500 | data=req.model_dump(), 501 | files=[("file", file)], 502 | **kwargs, 503 | ) 504 | return models.UploadFileResponse(**response.json()) 505 | 506 | async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \ 507 | -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]: 508 | """ 509 | Sends a request to generate a completion or a series of completions based on the provided input. 510 | 511 | Returns: 512 | If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. 513 | If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing 514 | the stream of generated events. 515 | """ 516 | if req.response_mode == models.ResponseMode.BLOCKING: 517 | return await self._acompletion_messages(req, **kwargs) 518 | if req.response_mode == models.ResponseMode.STREAMING: 519 | return self._acompletion_messages_stream(req, **kwargs) 520 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 521 | 522 | async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: 523 | response = await self.arequest( 524 | self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), 525 | HTTPMethod.POST, 526 | json=req.model_dump(), 527 | **kwargs, 528 | ) 529 | return models.CompletionResponse(**response.json()) 530 | 531 | async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ 532 | -> AsyncIterator[models.CompletionStreamResponse]: 533 | async for sse in self.arequest_stream( 534 | self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), 535 | HTTPMethod.POST, 536 | json=req.model_dump(), 537 | **kwargs): 538 | yield models.build_completion_stream_response(sse.json()) 539 | 540 | async def astop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 541 | """ 542 | Sends a request to stop a streaming completion task. 543 | 544 | Returns: 545 | A `StopResponse` object indicating the success of the operation. 546 | """ 547 | return await self._astop_stream( 548 | self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) 549 | 550 | async def achat_messages(self, req: models.ChatRequest, **kwargs) \ 551 | -> Union[models.ChatResponse, AsyncIterator[models.ChatStreamResponse]]: 552 | """ 553 | Sends a request to generate a chat message or a series of chat messages based on the provided input. 554 | 555 | Returns: 556 | If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. 557 | If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the 558 | stream of chat events. 559 | """ 560 | if req.response_mode == models.ResponseMode.BLOCKING: 561 | return await self._achat_messages(req, **kwargs) 562 | if req.response_mode == models.ResponseMode.STREAMING: 563 | return self._achat_messages_stream(req, **kwargs) 564 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 565 | 566 | async def _achat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: 567 | response = await self.arequest( 568 | self._prepare_url(ENDPOINT_CHAT_MESSAGES), 569 | HTTPMethod.POST, 570 | json=req.model_dump(), 571 | **kwargs, 572 | ) 573 | return models.ChatResponse(**response.json()) 574 | 575 | async def _achat_messages_stream(self, req: models.ChatRequest, **kwargs) \ 576 | -> AsyncIterator[models.ChatStreamResponse]: 577 | async for sse in self.arequest_stream( 578 | self._prepare_url(ENDPOINT_CHAT_MESSAGES), 579 | HTTPMethod.POST, 580 | json=req.model_dump(), 581 | **kwargs): 582 | yield models.build_chat_stream_response(sse.json()) 583 | 584 | async def astop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 585 | """ 586 | Sends a request to stop a streaming chat task. 587 | 588 | Returns: 589 | A `StopResponse` object indicating the success of the operation. 590 | """ 591 | return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) 592 | 593 | async def arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ 594 | -> Union[models.WorkflowsRunResponse, AsyncIterator[models.WorkflowsStreamResponse]]: 595 | """ 596 | Initiates the execution of a workflow, which can consist of multiple steps and actions. 597 | 598 | Returns: 599 | If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the 600 | completed workflow. 601 | If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects 602 | containing the stream of workflow events. 603 | """ 604 | if req.response_mode == models.ResponseMode.BLOCKING: 605 | return await self._arun_workflows(req, **kwargs) 606 | if req.response_mode == models.ResponseMode.STREAMING: 607 | return self._arun_workflows_stream(req, **kwargs) 608 | raise ValueError(f"Invalid request_mode: {req.response_mode}") 609 | 610 | async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: 611 | response = await self.arequest( 612 | self._prepare_url(ENDPOINT_RUN_WORKFLOWS), 613 | HTTPMethod.POST, 614 | json=req.model_dump(), 615 | **kwargs, 616 | ) 617 | return models.WorkflowsRunResponse(**response.json()) 618 | 619 | async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ 620 | -> AsyncIterator[models.WorkflowsRunStreamResponse]: 621 | async for sse in self.arequest_stream( 622 | self._prepare_url(ENDPOINT_RUN_WORKFLOWS), 623 | HTTPMethod.POST, 624 | json=req.model_dump(), 625 | **kwargs): 626 | yield models.build_workflows_stream_response(sse.json()) 627 | 628 | async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 629 | """ 630 | Sends a request to stop a streaming workflow task. 631 | 632 | Returns: 633 | A `StopResponse` object indicating the success of the operation. 634 | """ 635 | return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) 636 | 637 | async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: 638 | response = await self.arequest( 639 | endpoint, 640 | HTTPMethod.POST, 641 | json=req.model_dump(), 642 | **kwargs, 643 | ) 644 | return models.StopResponse(**response.json()) 645 | 646 | def _prepare_url(self, endpoint: str, **kwargs) -> str: 647 | return self.api_base + endpoint.format(**kwargs) 648 | 649 | def _prepare_auth_headers(self, headers: Dict[str, str]): 650 | if "authorization" not in (key.lower() for key in headers.keys()): 651 | headers["Authorization"] = f"Bearer {self.api_key}" 652 | 653 | 654 | def _get_content_type(headers: httpx.Headers) -> str: 655 | return headers.get("content-type", "").partition(";")[0] 656 | 657 | 658 | def _check_stream_content_type(response: httpx.Response) -> bool: 659 | content_type = _get_content_type(response.headers) 660 | return response.is_success and "text/event-stream" in content_type 661 | -------------------------------------------------------------------------------- /dify_client/errors.py: -------------------------------------------------------------------------------- 1 | from http import HTTPStatus 2 | from typing import Union 3 | 4 | import httpx 5 | import httpx_sse 6 | 7 | from dify_client import models 8 | 9 | 10 | class DifyAPIError(Exception): 11 | def __init__(self, status: int, code: str, message: str): 12 | super().__init__(f"status_code={status}, code={code}, {message}") 13 | self.status = status 14 | self.code = code 15 | self.message = message 16 | 17 | 18 | class DifyInvalidParam(DifyAPIError): 19 | pass 20 | 21 | 22 | class DifyNotChatApp(DifyAPIError): 23 | pass 24 | 25 | 26 | class DifyResourceNotFound(DifyAPIError): 27 | pass 28 | 29 | 30 | class DifyAppUnavailable(DifyAPIError): 31 | pass 32 | 33 | 34 | class DifyProviderNotInitialize(DifyAPIError): 35 | pass 36 | 37 | 38 | class DifyProviderQuotaExceeded(DifyAPIError): 39 | pass 40 | 41 | 42 | class DifyModelCurrentlyNotSupport(DifyAPIError): 43 | pass 44 | 45 | 46 | class DifyCompletionRequestError(DifyAPIError): 47 | pass 48 | 49 | 50 | class DifyInternalServerError(DifyAPIError): 51 | pass 52 | 53 | 54 | class DifyNoFileUploaded(DifyAPIError): 55 | pass 56 | 57 | 58 | class DifyTooManyFiles(DifyAPIError): 59 | pass 60 | 61 | 62 | class DifyUnsupportedPreview(DifyAPIError): 63 | pass 64 | 65 | 66 | class DifyUnsupportedEstimate(DifyAPIError): 67 | pass 68 | 69 | 70 | class DifyFileTooLarge(DifyAPIError): 71 | pass 72 | 73 | 74 | class DifyUnsupportedFileType(DifyAPIError): 75 | pass 76 | 77 | 78 | class DifyS3ConnectionFailed(DifyAPIError): 79 | pass 80 | 81 | 82 | class DifyS3PermissionDenied(DifyAPIError): 83 | pass 84 | 85 | 86 | class DifyS3FileTooLarge(DifyAPIError): 87 | pass 88 | 89 | 90 | SPEC_CODE_ERRORS = { 91 | # completion & chat & workflow 92 | "invalid_param": DifyInvalidParam, 93 | "not_chat_app": DifyNotChatApp, 94 | "app_unavailable": DifyAppUnavailable, 95 | "provider_not_initialize": DifyProviderNotInitialize, 96 | "provider_quota_exceeded": DifyProviderQuotaExceeded, 97 | "model_currently_not_support": DifyModelCurrentlyNotSupport, 98 | "completion_request_error": DifyCompletionRequestError, 99 | # files upload 100 | "no_file_uploaded": DifyNoFileUploaded, 101 | "too_many_files": DifyTooManyFiles, 102 | "unsupported_preview": DifyUnsupportedPreview, 103 | "unsupported_estimate": DifyUnsupportedEstimate, 104 | "file_too_large": DifyFileTooLarge, 105 | "unsupported_file_type": DifyUnsupportedFileType, 106 | "s3_connection_failed": DifyS3ConnectionFailed, 107 | "s3_permission_denied": DifyS3PermissionDenied, 108 | "s3_file_too_large": DifyS3FileTooLarge, 109 | } 110 | 111 | 112 | def raise_for_status(response: Union[httpx.Response, httpx_sse.ServerSentEvent]): 113 | if isinstance(response, httpx.Response): 114 | if response.is_success: 115 | return 116 | json = response.json() 117 | if "status" not in json: 118 | json["status"] = response.status_code 119 | details = models.ErrorResponse(**json) 120 | elif isinstance(response, httpx_sse.ServerSentEvent): 121 | if response.event != models.StreamEvent.ERROR.value: 122 | return 123 | details = models.ErrorStreamResponse(**response.json()) 124 | else: 125 | raise ValueError(f"Invalid dify response type: {type(response)}") 126 | 127 | if details.status == HTTPStatus.NOT_FOUND: 128 | raise DifyResourceNotFound(details.status, details.code, details.message) 129 | elif details.status == HTTPStatus.INTERNAL_SERVER_ERROR: 130 | raise DifyInternalServerError(details.status, details.code, details.message) 131 | else: 132 | raise SPEC_CODE_ERRORS.get(details.code, DifyAPIError)(details.status, details.code, details.message) 133 | -------------------------------------------------------------------------------- /dify_client/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .chat import * 2 | from .completion import * 3 | from .feedback import * 4 | from .file import * 5 | from .workflow import * 6 | from .stream import * 7 | from .base import StopRequest, StopResponse 8 | -------------------------------------------------------------------------------- /dify_client/models/base.py: -------------------------------------------------------------------------------- 1 | try: 2 | from enum import StrEnum 3 | except ImportError: 4 | from strenum import StrEnum 5 | from http import HTTPStatus 6 | from typing import Optional, List 7 | 8 | from pydantic import BaseModel, ConfigDict 9 | 10 | 11 | class Mode(StrEnum): 12 | CHAT = "chat" 13 | COMPLETION = "completion" 14 | ADAVANCED_CHAT = "advanced-chat" 15 | 16 | class ResponseMode(StrEnum): 17 | STREAMING = 'streaming' 18 | BLOCKING = 'blocking' 19 | 20 | 21 | class FileType(StrEnum): 22 | IMAGE = "image" 23 | 24 | 25 | class TransferMethod(StrEnum): 26 | REMOTE_URL = "remote_url" 27 | LOCAL_FILE = "local_file" 28 | 29 | 30 | # Allows the entry of various variable values defined by the App. 31 | # The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and 32 | # each value being the specific value for that variable. 33 | # The text generation application requires at least one key/value pair to be inputted. 34 | class CompletionInputs(BaseModel): 35 | model_config = ConfigDict(extra='allow') 36 | # Required The input text, the content to be processed. 37 | query: str 38 | 39 | 40 | class File(BaseModel): 41 | type: FileType 42 | transfer_method: TransferMethod 43 | url: Optional[str] 44 | # Uploaded file ID, which must be obtained by uploading through the File Upload API in advance 45 | # (when the transfer method is local_file) 46 | upload_file_id: Optional[str] 47 | 48 | 49 | class Usage(BaseModel): 50 | prompt_tokens: int 51 | completion_tokens: int 52 | total_tokens: int 53 | 54 | prompt_unit_price: str 55 | prompt_price_unit: str 56 | prompt_price: str 57 | completion_unit_price: str 58 | completion_price_unit: str 59 | completion_price: str 60 | total_price: str 61 | currency: str 62 | 63 | latency: float 64 | 65 | 66 | class RetrieverResource(BaseModel): 67 | position: int 68 | dataset_id: str 69 | dataset_name: str 70 | document_id: str 71 | document_name: str 72 | segment_id: str 73 | score: float 74 | content: str 75 | 76 | 77 | class Metadata(BaseModel): 78 | usage: Usage 79 | retriever_resources: List[RetrieverResource] = [] 80 | 81 | 82 | class StopRequest(BaseModel): 83 | user: str 84 | 85 | 86 | class StopResponse(BaseModel): 87 | result: str # success 88 | 89 | 90 | class ErrorResponse(BaseModel): 91 | status: int = HTTPStatus.INTERNAL_SERVER_ERROR # HTTP status code 92 | code: str = "" 93 | message: str = "" 94 | -------------------------------------------------------------------------------- /dify_client/models/chat.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional, Any 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | from dify_client.models.base import ResponseMode, File 6 | from dify_client.models.completion import CompletionResponse 7 | 8 | 9 | class ChatRequest(BaseModel): 10 | query: str 11 | inputs: Dict[str, Any] = Field(default_factory=dict) 12 | response_mode: ResponseMode 13 | user: str 14 | conversation_id: Optional[str] = "" 15 | files: List[File] = [] 16 | auto_generate_name: bool = True 17 | 18 | 19 | class ChatResponse(CompletionResponse): 20 | pass 21 | 22 | 23 | class ChatSuggestRequest(BaseModel): 24 | user: str 25 | 26 | 27 | class ChatSuggestResponse(BaseModel): 28 | result: str 29 | data: List[str] = [] 30 | -------------------------------------------------------------------------------- /dify_client/models/completion.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List 2 | 3 | from pydantic import BaseModel 4 | 5 | from dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode 6 | 7 | 8 | class CompletionRequest(BaseModel): 9 | inputs: CompletionInputs 10 | response_mode: ResponseMode 11 | user: str 12 | conversation_id: Optional[str] = "" 13 | files: List[File] = [] 14 | 15 | 16 | class CompletionResponse(BaseModel): 17 | message_id: str 18 | conversation_id: Optional[str] = "" 19 | mode: Mode 20 | answer: str 21 | metadata: Metadata 22 | created_at: int # unix timestamp seconds 23 | -------------------------------------------------------------------------------- /dify_client/models/feedback.py: -------------------------------------------------------------------------------- 1 | try: 2 | from enum import StrEnum 3 | except ImportError: 4 | from strenum import StrEnum 5 | from typing import Optional 6 | 7 | from pydantic import BaseModel 8 | 9 | 10 | class Rating(StrEnum): 11 | LIKE = "like" 12 | DISLIKE = "dislike" 13 | 14 | 15 | class FeedbackRequest(BaseModel): 16 | rating: Optional[Rating] = None 17 | user: str 18 | 19 | 20 | class FeedbackResponse(BaseModel): 21 | result: str # success 22 | -------------------------------------------------------------------------------- /dify_client/models/file.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class UploadFileRequest(BaseModel): 5 | user: str 6 | 7 | 8 | class UploadFileResponse(BaseModel): 9 | id: str 10 | name: str 11 | size: int 12 | extension: str 13 | mime_type: str 14 | created_by: str # created by user 15 | created_at: int # unix timestamp seconds 16 | -------------------------------------------------------------------------------- /dify_client/models/stream.py: -------------------------------------------------------------------------------- 1 | try: 2 | from enum import StrEnum 3 | except ImportError: 4 | from strenum import StrEnum 5 | from typing import Union, Optional, List 6 | 7 | from pydantic import BaseModel, ConfigDict, field_validator 8 | 9 | from dify_client import utils 10 | from dify_client.models.base import Metadata, ErrorResponse 11 | from dify_client.models.workflow import WorkflowStartedData, WorkflowFinishedData, NodeStartedData, NodeFinishedData 12 | 13 | STREAM_EVENT_KEY = "event" 14 | 15 | 16 | class StreamEvent(StrEnum): 17 | MESSAGE = "message" 18 | AGENT_MESSAGE = "agent_message" 19 | AGENT_THOUGHT = "agent_thought" 20 | MESSAGE_FILE = "message_file" # need to show file 21 | WORKFLOW_STARTED = "workflow_started" 22 | NODE_STARTED = "node_started" 23 | NODE_FINISHED = "node_finished" 24 | WORKFLOW_FINISHED = "workflow_finished" 25 | MESSAGE_END = "message_end" 26 | MESSAGE_REPLACE = "message_replace" 27 | ERROR = "error" 28 | PING = "ping" 29 | TTS_MESSAGE_END = "tts_message_end" 30 | 31 | @classmethod 32 | def new(cls, event: Union["StreamEvent", str]) -> "StreamEvent": 33 | if isinstance(event, cls): 34 | return event 35 | return utils.str_to_enum(cls, event) 36 | 37 | 38 | class StreamResponse(BaseModel): 39 | model_config = ConfigDict(extra='allow') 40 | 41 | event: StreamEvent 42 | task_id: Optional[str] = "" 43 | 44 | @field_validator("event", mode="before") 45 | def transform_stream_event(cls, event: Union[StreamEvent, str]) -> StreamEvent: 46 | return StreamEvent.new(event) 47 | 48 | 49 | class PingResponse(StreamResponse): 50 | pass 51 | 52 | 53 | class ErrorStreamResponse(StreamResponse, ErrorResponse): 54 | message_id: Optional[str] = "" 55 | 56 | 57 | class MessageStreamResponse(StreamResponse): 58 | message_id: str 59 | conversation_id: Optional[str] = "" 60 | answer: str 61 | created_at: int # unix timestamp seconds 62 | 63 | 64 | class MessageEndStreamResponse(StreamResponse): 65 | message_id: str 66 | conversation_id: Optional[str] = "" 67 | created_at: int # unix timestamp seconds 68 | metadata: Optional[Metadata] 69 | 70 | 71 | class MessageReplaceStreamResponse(MessageStreamResponse): 72 | pass 73 | 74 | 75 | class AgentMessageStreamResponse(MessageStreamResponse): 76 | pass 77 | 78 | 79 | class AgentThoughtStreamResponse(StreamResponse): 80 | id: str # agent thought id 81 | message_id: str 82 | conversation_id: str 83 | position: int # thought position, start from 1 84 | thought: str 85 | observation: str 86 | tool: str 87 | tool_input: str 88 | message_files: List[str] = [] 89 | created_at: int # unix timestamp seconds 90 | 91 | 92 | class MessageFileStreamResponse(StreamResponse): 93 | id: str # file id 94 | conversation_id: str 95 | type: str # only image 96 | belongs_to: str # assistant 97 | url: str 98 | 99 | 100 | class WorkflowsStreamResponse(StreamResponse): 101 | workflow_run_id: str 102 | data: Optional[Union[ 103 | WorkflowStartedData, 104 | WorkflowFinishedData, 105 | NodeStartedData, 106 | NodeFinishedData] 107 | ] 108 | 109 | 110 | class ChatWorkflowsStreamResponse(WorkflowsStreamResponse): 111 | message_id: str 112 | conversation_id: str 113 | created_at: int 114 | 115 | 116 | _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING = { 117 | StreamEvent.PING: PingResponse, 118 | StreamEvent.MESSAGE: MessageStreamResponse, 119 | StreamEvent.MESSAGE_END: MessageEndStreamResponse, 120 | StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, 121 | } 122 | 123 | CompletionStreamResponse = Union[ 124 | PingResponse, 125 | MessageStreamResponse, 126 | MessageEndStreamResponse, 127 | MessageReplaceStreamResponse, 128 | ] 129 | 130 | 131 | def build_completion_stream_response(data: dict) -> CompletionStreamResponse: 132 | event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) 133 | return _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) 134 | 135 | 136 | _CHAT_EVENT_TO_STREAM_RESP_MAPPING = { 137 | StreamEvent.PING: PingResponse, 138 | # chat 139 | StreamEvent.MESSAGE: MessageStreamResponse, 140 | StreamEvent.MESSAGE_END: MessageEndStreamResponse, 141 | StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, 142 | StreamEvent.MESSAGE_FILE: MessageFileStreamResponse, 143 | # agent 144 | StreamEvent.AGENT_MESSAGE: AgentMessageStreamResponse, 145 | StreamEvent.AGENT_THOUGHT: AgentThoughtStreamResponse, 146 | # workflow 147 | StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, 148 | StreamEvent.NODE_STARTED: WorkflowsStreamResponse, 149 | StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, 150 | StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, 151 | } 152 | 153 | ChatStreamResponse = Union[ 154 | PingResponse, 155 | MessageStreamResponse, 156 | MessageEndStreamResponse, 157 | MessageReplaceStreamResponse, 158 | MessageFileStreamResponse, 159 | AgentMessageStreamResponse, 160 | AgentThoughtStreamResponse, 161 | WorkflowsStreamResponse, 162 | ] 163 | 164 | 165 | def build_chat_stream_response(data: dict) -> ChatStreamResponse: 166 | event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) 167 | return _CHAT_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) 168 | 169 | 170 | _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING = { 171 | StreamEvent.PING: PingResponse, 172 | # workflow 173 | StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, 174 | StreamEvent.NODE_STARTED: WorkflowsStreamResponse, 175 | StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, 176 | StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, 177 | } 178 | 179 | WorkflowsRunStreamResponse = Union[ 180 | PingResponse, 181 | WorkflowsStreamResponse, 182 | ] 183 | 184 | 185 | def build_workflows_stream_response(data: dict) -> WorkflowsRunStreamResponse: 186 | event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) 187 | return _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) 188 | -------------------------------------------------------------------------------- /dify_client/models/workflow.py: -------------------------------------------------------------------------------- 1 | try: 2 | from enum import StrEnum 3 | except ImportError: 4 | from strenum import StrEnum 5 | from typing import Dict, List, Optional 6 | 7 | from pydantic import BaseModel 8 | 9 | from dify_client.models.base import ResponseMode, File 10 | 11 | 12 | class WorkflowStatus(StrEnum): 13 | RUNNING = "running" 14 | SUCCEEDED = "succeeded" 15 | FAILED = "failed" 16 | STOPPED = "stopped" 17 | 18 | 19 | class ExecutionMetadata(BaseModel): 20 | total_tokens: Optional[int] 21 | total_price: Optional[str] 22 | currency: Optional[str] 23 | 24 | 25 | class WorkflowStartedData(BaseModel): 26 | id: str # workflow run id 27 | workflow_id: str # workflow id 28 | sequence_number: int 29 | inputs: Optional[dict] = None 30 | created_at: int # unix timestamp seconds 31 | 32 | 33 | class NodeStartedData(BaseModel): 34 | id: str # workflow run id 35 | node_id: str 36 | node_type: str 37 | title: str 38 | index: int 39 | predecessor_node_id: Optional[str] = None 40 | inputs: Optional[dict] = None 41 | created_at: int 42 | extras: dict = {} 43 | 44 | 45 | class NodeFinishedData(BaseModel): 46 | id: str # workflow run id 47 | node_id: str 48 | node_type: str 49 | title: str 50 | index: int 51 | predecessor_node_id: Optional[str] = None 52 | inputs: Optional[dict] = None 53 | process_data: Optional[dict] = None 54 | outputs: Optional[dict] = {} 55 | status: WorkflowStatus 56 | error: Optional[str] = None 57 | elapsed_time: Optional[float] # seconds 58 | execution_metadata: Optional[ExecutionMetadata] = None 59 | created_at: int 60 | finished_at: int 61 | files: List = [] 62 | 63 | 64 | class WorkflowFinishedData(BaseModel): 65 | id: str # workflow run id 66 | workflow_id: str # workflow id 67 | sequence_number: int 68 | status: WorkflowStatus 69 | outputs: Optional[dict] 70 | error: Optional[str] 71 | elapsed_time: Optional[float] 72 | total_tokens: Optional[int] 73 | total_steps: Optional[int] = 0 74 | created_at: int 75 | finished_at: int 76 | created_by: dict = {} 77 | files: List = [] 78 | 79 | 80 | class WorkflowsRunRequest(BaseModel): 81 | inputs: Dict = {} 82 | response_mode: ResponseMode 83 | user: str 84 | conversation_id: Optional[str] = "" 85 | files: List[File] = [] 86 | 87 | 88 | class WorkflowsRunResponse(BaseModel): 89 | log_id: str 90 | task_id: str 91 | data: WorkflowFinishedData 92 | -------------------------------------------------------------------------------- /dify_client/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from ._common import * 2 | -------------------------------------------------------------------------------- /dify_client/utils/_common.py: -------------------------------------------------------------------------------- 1 | def str_to_enum(str_enum_class, str_value: str, ignore_not_found: bool = False, enum_default=None): 2 | for key, member in str_enum_class.__members__.items(): 3 | if str_value == member.value: 4 | return member 5 | if ignore_not_found: 6 | return enum_default 7 | raise ValueError(f"Invalid enum value: {str_value}") 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setup( 7 | name="dify-client-python", 8 | version="1.0.1", 9 | author="haoyuhu", 10 | author_email="im@huhaoyu.com", 11 | description="A package for interacting with the Dify Service-API", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/haoyuhu/dify-client-python", 15 | license='MIT', 16 | packages=['dify_client'], 17 | classifiers=[ 18 | "Programming Language :: Python :: 3", 19 | "License :: OSI Approved :: MIT License", 20 | "Operating System :: OS Independent", 21 | ], 22 | python_requires=">=3.7", 23 | install_requires=[ 24 | "httpx", 25 | "httpx-sse", 26 | "pydantic", 27 | "StrEnum", 28 | ], 29 | keywords='dify nlp ai language-processing', 30 | include_package_data=True, 31 | ) 32 | --------------------------------------------------------------------------------