├── .gitignore ├── LICENSE ├── README.md ├── setup.py └── wenxinworkshop ├── __init__.py ├── apis.py └── types.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # VS Code 163 | .vscode/ 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # WenxinWorkshop Python SDK 2 | * A third-party Python SDK for a WenxinWorkshop. 3 | 4 | ## Quick Start 5 | * Install wenxinworkshop SDK 6 | 7 | ```bash 8 | $ pip install git+https://github.com/jm12138/WenxinWorkshop-Python-SDK 9 | ``` 10 | 11 | * Import wenxinworkshop SDK 12 | 13 | ```python 14 | from wenxinworkshop import LLMAPI, EmbeddingAPI, PromptTemplateAPI 15 | from wenxinworkshop import Message, Messages, Texts 16 | ``` 17 | 18 | * Set API key and Secret key 19 | 20 | ```python 21 | api_key = '...' 22 | secret_key = '...' 23 | ``` 24 | 25 | * LLM chat 26 | 27 | ```python 28 | # create a LLM API 29 | erniebot = LLMAPI( 30 | api_key=api_key, 31 | secret_key=secret_key, 32 | url=LLMAPI.ERNIEBot 33 | ) 34 | 35 | # create a message 36 | message = Message( 37 | role='user', 38 | content='你好!' 39 | ) 40 | 41 | # create messages 42 | messages: Messages = [message] 43 | 44 | # get response from LLM API 45 | response = erniebot( 46 | messages=messages, 47 | temperature=None, 48 | top_p=None, 49 | penalty_score=None, 50 | stream=None, 51 | user_id=None, 52 | chunk_size=512 53 | ) 54 | 55 | # print response 56 | print(response) 57 | 58 | # get response stream from LLM API 59 | response_stream = erniebot( 60 | messages=messages, 61 | temperature=None, 62 | top_p=None, 63 | penalty_score=None, 64 | stream=True, 65 | user_id=None, 66 | chunk_size=512 67 | ) 68 | 69 | # print response stream 70 | for item in response_stream: 71 | print(item, end='') 72 | ``` 73 | 74 | * Embedding 75 | 76 | ```python 77 | # create a Embedding API 78 | ernieembedding = EmbeddingAPI( 79 | api_key=api_key, 80 | secret_key=secret_key, 81 | url=EmbeddingAPI.EmbeddingV1 82 | ) 83 | 84 | # create texts 85 | texts: Texts = [ 86 | '你好!', 87 | '你好吗?', 88 | '你是谁?' 89 | ] 90 | 91 | # get embeddings from Embedding API 92 | response = ernieembedding( 93 | texts=texts, 94 | user_id=None 95 | ) 96 | 97 | # print embeddings 98 | print(response) 99 | ``` 100 | 101 | * Get prompt template 102 | 103 | ```python 104 | # create a Prompt Template API 105 | prompttemplate = PromptTemplateAPI( 106 | api_key=api_key, 107 | secret_key=secret_key, 108 | url=PromptTemplateAPI.PromptTemplate 109 | ) 110 | 111 | # get prompt template from Prompt Template API 112 | response = prompttemplate( 113 | template_id=1968, 114 | content='侏罗纪世界' 115 | ) 116 | 117 | # print prompt template 118 | print(response) 119 | ``` 120 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from wenxinworkshop import __version__ 3 | 4 | 5 | setup( 6 | name='wenxinworkshop', 7 | version=__version__, 8 | description='A package for WenxinWorkshop.', 9 | long_description=open('README.md', encoding='UTF-8').read(), 10 | long_description_content_type='text/markdown', 11 | author='jm12138', 12 | author_email='2286040843@qq.com', 13 | url='https://github.com/jm12138/WenxinWorkshop-Python-SDK', 14 | packages=['wenxinworkshop'], 15 | license='Apache License 2.0', 16 | install_requires=[ 17 | 'requests', 18 | ] 19 | ) -------------------------------------------------------------------------------- /wenxinworkshop/__init__.py: -------------------------------------------------------------------------------- 1 | from .types import Texts, Messages, Embedding, Embeddings 2 | 3 | from .types import Message 4 | from .types import ChatUsage, ChatResponse 5 | from .types import AccessTokenResponse 6 | from .types import EmbeddingUsage, EmbeddingResponse, EmbeddingObject 7 | from .types import PromptTemplateResult, PromptTemplateResponse 8 | from .types import AIStudioChatUsage, AIStudioChatResult, AIStudioChatResponse 9 | from .types import AIStudioEmbeddingObject, AIStudioEmbeddingUsage 10 | from .types import AIStudioEmbeddingResult, AIStudioEmbeddingResponse 11 | 12 | from .apis import get_access_token 13 | from .apis import LLMAPI, EmbeddingAPI, PromptTemplateAPI 14 | from .apis import AIStudioLLMAPI, AIStudioEmbeddingAPI 15 | 16 | 17 | __all__ = [ 18 | "__version__", 19 | "Texts", 20 | "Message", 21 | "Messages", 22 | "Embedding", 23 | "Embeddings", 24 | "ChatResponse", 25 | "ChatUsage", 26 | "AccessTokenResponse", 27 | "PromptTemplateResult", 28 | "PromptTemplateResponse", 29 | "EmbeddingResponse", 30 | "EmbeddingUsage", 31 | "EmbeddingObject", 32 | "AIStudioChatUsage", 33 | "AIStudioChatResult", 34 | "AIStudioChatResponse", 35 | "AIStudioEmbeddingObject", 36 | "AIStudioEmbeddingUsage", 37 | "AIStudioEmbeddingResult", 38 | "AIStudioEmbeddingResponse", 39 | "get_access_token", 40 | "LLMAPI", 41 | "EmbeddingAPI", 42 | "PromptTemplateAPI", 43 | "AIStudioLLMAPI", 44 | "AIStudioEmbeddingAPI", 45 | ] 46 | 47 | 48 | __version__ = "0.3.0" 49 | -------------------------------------------------------------------------------- /wenxinworkshop/apis.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | from typing import Dict 5 | from typing import Optional, Generator, Union 6 | 7 | from .types import Messages, Embeddings, Texts 8 | 9 | from .types import Message 10 | from .types import ChatResponse 11 | from .types import EmbeddingResponse 12 | from .types import AccessTokenResponse 13 | from .types import PromptTemplateResponse 14 | from .types import AIStudioChatResponse 15 | from .types import AIStudioEmbeddingResponse 16 | 17 | 18 | __all__ = [ 19 | "get_access_token", 20 | "LLMAPI", 21 | "EmbeddingAPI", 22 | "PromptTemplateAPI", 23 | "AIStudioLLMAPI", 24 | "AIStudioEmbeddingAPI", 25 | ] 26 | 27 | 28 | """ 29 | APIs of Wenxin Workshop. 30 | """ 31 | 32 | 33 | def get_access_token(api_key: str, secret_key: str) -> str: 34 | """ 35 | Get access token from Baidu AI Cloud. 36 | 37 | Parameters 38 | ---------- 39 | api_key : str 40 | API key from Baidu AI Cloud. 41 | secret_key : str 42 | Secret key from Baidu AI Cloud. 43 | 44 | Returns 45 | ------- 46 | str 47 | Access token from Baidu AI Cloud. 48 | 49 | Raises 50 | ------ 51 | ValueError 52 | If request failed. Please check your API key and secret key. 53 | 54 | Examples 55 | -------- 56 | >>> from wenxinworkshop import get_access_token 57 | >>> api_key = '' 58 | >>> secret_key = '' 59 | >>> access_token = get_access_token( 60 | ... api_key=api_key, 61 | ... secret_key=secret_key 62 | ... ) 63 | >>> print(access_token) 64 | 24.6b3b3f7b0b3b3f7b0b3b3f7b0b3b3f7b.2592000.1628041234.222222-44444444 65 | """ 66 | url = "https://aip.baidubce.com/oauth/2.0/token" 67 | 68 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 69 | 70 | params = { 71 | "grant_type": "client_credentials", 72 | "client_id": api_key, 73 | "client_secret": secret_key, 74 | } 75 | 76 | response = requests.request(method="POST", url=url, headers=headers, params=params) 77 | 78 | try: 79 | response_json: AccessTokenResponse = response.json() 80 | return response_json["access_token"] 81 | except: 82 | raise ValueError(response.text) 83 | 84 | 85 | class LLMAPI: 86 | """ 87 | LLM API. 88 | 89 | Attributes 90 | ---------- 91 | url : str 92 | URL of LLM API. 93 | 94 | access_token : str 95 | Access token from Baidu AI Cloud. 96 | 97 | ERNIEBot : str 98 | URL of ERNIEBot LLM API. 99 | 100 | ERNIEBot_turbo : str 101 | URL of ERNIEBot turbo LLM API. 102 | 103 | Methods 104 | ------- 105 | __init__( 106 | self, 107 | api_key: str, 108 | secret_key: str, 109 | url: str = LLMAPI.ERNIEBot 110 | ) -> None: 111 | Initialize LLM API. 112 | 113 | __call__( 114 | self, 115 | messages: Messages, 116 | temperature: Optional[float] = None, 117 | top_p: Optional[float] = None, 118 | penalty_score: Optional[float] = None, 119 | stream: Optional[bool] = None, 120 | user_id: Optional[str] = None, 121 | chunk_size: int = 512 122 | ) -> Union[str, Generator[str, None, None]]: 123 | Get response from LLM API. 124 | 125 | stream_response( 126 | response: requests.Response, 127 | chunk_size: int = 512 128 | ) -> Generator[str, None, None]: 129 | Stream response from LLM API. 130 | """ 131 | 132 | ERNIEBot = ( 133 | "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" 134 | ) 135 | ERNIEBot_turbo = ( 136 | "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant" 137 | ) 138 | 139 | def __init__( 140 | self: "LLMAPI", api_key: str, secret_key: str, url: str = ERNIEBot 141 | ) -> None: 142 | """ 143 | Initialize LLM API. 144 | 145 | Parameters 146 | ---------- 147 | api_key : str 148 | API key from Baidu AI Cloud. 149 | 150 | secret_key : str 151 | Secret key from Baidu AI Cloud. 152 | 153 | url : Optional[str], optional 154 | URL of LLM API, by default LLMAPI.ERNIEBot. You can also use LLMAPI.ERNIEBot_turbo or other LLM API urls. 155 | 156 | Examples 157 | -------- 158 | >>> from wenxinworkshop import LLMAPI 159 | >>> api_key = '' 160 | >>> secret_key = '' 161 | >>> erniebot = LLMAPI( 162 | ... api_key=api_key, 163 | ... secret_key=secret_key, 164 | ... url=LLMAPI.ERNIEBot 165 | ... ) 166 | """ 167 | self.url = url 168 | self.access_token = get_access_token(api_key=api_key, secret_key=secret_key) 169 | 170 | def __call__( 171 | self: "LLMAPI", 172 | messages: Messages, 173 | temperature: Optional[float] = None, 174 | top_p: Optional[float] = None, 175 | penalty_score: Optional[float] = None, 176 | stream: Optional[bool] = None, 177 | user_id: Optional[str] = None, 178 | chunk_size: int = 512, 179 | ) -> Union[str, Generator[str, None, None]]: 180 | """ 181 | Get response from LLM API. 182 | 183 | Parameters 184 | ---------- 185 | messages : Messages 186 | Messages from user and assistant. 187 | 188 | temperature : Optional[float], optional 189 | Temperature of LLM API, by default None. 190 | 191 | top_p : Optional[float], optional 192 | Top p of LLM API, by default None. 193 | 194 | penalty_score : Optional[float], optional 195 | Penalty score of LLM API, by default None. 196 | 197 | stream : Optional[bool], optional 198 | Stream of LLM API, by default None. 199 | 200 | user_id : Optional[str], optional 201 | User ID of LLM API, by default None. 202 | 203 | chunk_size : int, optional 204 | Chunk size of LLM API, by default 512. 205 | 206 | Returns 207 | ------- 208 | Union[str, Generator[str, None, None]] 209 | Response from LLM API. 210 | 211 | Raises 212 | ------ 213 | ValueError 214 | If request failed. Please check your API key and secret key. Or check the parameters. 215 | 216 | Examples 217 | -------- 218 | >>> message = Message( 219 | ... role='user', 220 | ... content='你好!' 221 | ... ) 222 | >>> messages: Messages = [message] 223 | 224 | >>> response = erniebot( 225 | ... messages=messages, 226 | ... temperature=None, 227 | ... top_p=None, 228 | ... penalty_score=None, 229 | ... stream=None, 230 | ... user_id=None, 231 | ... chunk_size=512 232 | ... ) 233 | 234 | >>> print(response) 235 | 你好,有什么可以帮助你的。 236 | 237 | >>> response_stream = erniebot( 238 | ... messages=messages, 239 | ... temperature=None, 240 | ... top_p=None, 241 | ... penalty_score=None, 242 | ... stream=True, 243 | ... user_id=None, 244 | ... chunk_size=512 245 | ... ) 246 | 247 | >>> for item in response_stream: 248 | ... print(item, end='') 249 | 你好,有什么可以帮助你的。 250 | """ 251 | headers = {"Content-Type": "application/json"} 252 | 253 | params = {"access_token": self.access_token} 254 | 255 | data = { 256 | "messages": messages, 257 | "temperature": temperature, 258 | "top_p": top_p, 259 | "penalty_score": penalty_score, 260 | "stream": stream, 261 | "user_id": user_id, 262 | } 263 | 264 | response = requests.request( 265 | method="POST", 266 | url=self.url, 267 | headers=headers, 268 | params=params, 269 | data=json.dumps(data), 270 | stream=stream, 271 | ) 272 | 273 | if stream: 274 | return self.stream_response(response=response, chunk_size=chunk_size) 275 | else: 276 | try: 277 | response_json: ChatResponse = response.json() 278 | return response_json["result"] 279 | except: 280 | raise ValueError(response.text) 281 | 282 | @staticmethod 283 | def stream_response( 284 | response: requests.Response, chunk_size: int = 512 285 | ) -> Generator[str, None, None]: 286 | """ 287 | Stream response from LLM API. 288 | 289 | Parameters 290 | ---------- 291 | response : requests.Response 292 | Response from LLM API. 293 | 294 | chunk_size : int, optional 295 | Chunk size of LLM API, by default 512. 296 | 297 | Yields 298 | ------- 299 | Generator[str, None, None] 300 | Response from LLM API. 301 | 302 | Raises 303 | ------ 304 | ValueError 305 | If request failed. Please check your API key and secret key. Or check the parameters. 306 | 307 | Examples 308 | -------- 309 | >>> stream_response = erniebot.stream_response( 310 | ... response=response, 311 | ... chunk_size=512 312 | ... ) 313 | 314 | >>> for item in stream_response: 315 | ... print(item, end='') 316 | 你好,有什么可以帮助你的。 317 | """ 318 | for response_line in response.iter_lines( 319 | chunk_size=chunk_size, decode_unicode=True 320 | ): 321 | if response_line: 322 | try: 323 | response_json: ChatResponse = json.loads(response_line[5:]) 324 | yield response_json["result"] 325 | except: 326 | raise ValueError(response_line) 327 | 328 | 329 | class EmbeddingAPI: 330 | """ 331 | Embedding API. 332 | 333 | Attributes 334 | ---------- 335 | url : str 336 | URL of Embedding API. 337 | 338 | access_token : str 339 | Access token from Baidu AI Cloud. 340 | 341 | EmbeddingV1 : str 342 | URL of Embedding V1 API. 343 | 344 | Methods 345 | ------- 346 | __init__( 347 | self, 348 | api_key: str, 349 | secret_key: str, 350 | url: str = EmbeddingAPI.EmbeddingV1 351 | ) -> None: 352 | Initialize Embedding API. 353 | 354 | __call__( 355 | self, 356 | texts: Texts, 357 | user_id: Optional[str] = None 358 | ) -> Embeddings: 359 | Get embeddings from Embedding API. 360 | """ 361 | 362 | EmbeddingV1 = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1" 363 | 364 | def __init__( 365 | self: "EmbeddingAPI", api_key: str, secret_key: str, url: str = EmbeddingV1 366 | ) -> None: 367 | """ 368 | Initialize Embedding API. 369 | 370 | Parameters 371 | ---------- 372 | api_key : str 373 | API key from Baidu AI Cloud. 374 | 375 | secret_key : str 376 | Secret key from Baidu AI Cloud. 377 | 378 | url : Optional[str], optional 379 | URL of Embedding API, by default EmbeddingAPI.EmbeddingV1. You can also use other Embedding API urls. 380 | 381 | Examples 382 | -------- 383 | >>> from wenxinworkshop import EmbeddingAPI 384 | >>> api_key = '' 385 | >>> secret_key = '' 386 | >>> ernieembedding = EmbeddingAPI( 387 | ... api_key=api_key, 388 | ... secret_key=secret_key, 389 | ... url=EmbeddingAPI.EmbeddingV1 390 | ... ) 391 | """ 392 | self.url = url 393 | self.access_token = get_access_token(api_key=api_key, secret_key=secret_key) 394 | 395 | def __call__( 396 | self: "EmbeddingAPI", texts: Texts, user_id: Optional[str] = None 397 | ) -> Embeddings: 398 | """ 399 | Get embeddings from Embedding API. 400 | 401 | Parameters 402 | ---------- 403 | texts : Texts 404 | Texts of inputs. 405 | 406 | user_id : Optional[str], optional 407 | User ID of Embedding API, by default None. 408 | 409 | Returns 410 | ------- 411 | Embeddings 412 | Embeddings from Embedding API. 413 | 414 | Raises 415 | ------ 416 | ValueError 417 | If request failed. Please check your API key and secret key. Or check the parameters. 418 | 419 | Examples 420 | -------- 421 | >>> texts: Texts = [ 422 | ... '你好!', 423 | ... '你好吗?', 424 | ... '你是谁?' 425 | ... ] 426 | 427 | >>> response = ernieembedding( 428 | ... texts=texts, 429 | ... user_id=None 430 | ... ) 431 | 432 | >>> print(response) 433 | [[0.123, 0.456, 0.789, ...], [0.123, 0.456, 0.789, ...], [0.123, 0.456, 0.789, ...]] 434 | """ 435 | headers = {"Content-Type": "application/json"} 436 | 437 | params = {"access_token": self.access_token} 438 | 439 | data = {"input": texts, "user_id": user_id} 440 | 441 | response = requests.request( 442 | method="POST", 443 | url=self.url, 444 | headers=headers, 445 | params=params, 446 | data=json.dumps(data), 447 | ) 448 | 449 | try: 450 | response_json: EmbeddingResponse = response.json() 451 | embeddings: Embeddings = [ 452 | embedding["embedding"] for embedding in response_json["data"] 453 | ] 454 | return embeddings 455 | except: 456 | raise ValueError(response.text) 457 | 458 | 459 | class PromptTemplateAPI: 460 | """ 461 | Prompt Template API. 462 | 463 | Attributes 464 | ---------- 465 | url : str 466 | URL of Prompt Template API. 467 | 468 | access_token : str 469 | Access token from Baidu AI Cloud. 470 | 471 | PromptTemplate : str 472 | URL of Prompt Template API. 473 | 474 | Methods 475 | ------- 476 | __init__( 477 | self, 478 | api_key: str, 479 | secret_key: str, 480 | url: str = PromptTemplate 481 | ) -> None: 482 | Initialize Prompt Template API. 483 | 484 | __call__( 485 | self, 486 | template_id: int, 487 | **kwargs: str 488 | ) -> str: 489 | Get prompt template from Prompt Template API. 490 | """ 491 | 492 | PromptTemplate = ( 493 | "https://aip.baidubce.com/rest/2.0/wenxinworkshop/api/v1/template/info" 494 | ) 495 | 496 | def __init__( 497 | self: "PromptTemplateAPI", 498 | api_key: str, 499 | secret_key: str, 500 | url: str = PromptTemplate, 501 | ) -> None: 502 | """ 503 | Initialize Prompt Template API. 504 | 505 | Parameters 506 | ---------- 507 | api_key : str 508 | API key from Baidu AI Cloud. 509 | 510 | secret_key : str 511 | Secret key from Baidu AI Cloud. 512 | 513 | url : Optional[str], optional 514 | URL of Prompt Template API, by default PromptTemplateAPI.PromptTemplate. You can also use other Prompt Template API urls. 515 | 516 | Examples 517 | -------- 518 | >>> from wenxinworkshop import PromptTemplateAPI 519 | >>> api_key = '' 520 | >>> secret_key = '' 521 | >>> prompttemplate = PromptTemplateAPI( 522 | ... api_key=api_key, 523 | ... secret_key=secret_key, 524 | ... url=PromptTemplateAPI.PromptTemplate 525 | ... ) 526 | """ 527 | self.url = url 528 | self.access_token = get_access_token(api_key=api_key, secret_key=secret_key) 529 | 530 | def __call__(self, template_id: int, **kwargs: str) -> str: 531 | """ 532 | Get prompt template from Prompt Template API. 533 | 534 | Parameters 535 | ---------- 536 | template_id : int 537 | ID of prompt template. 538 | 539 | **kwargs : str 540 | Variables of prompt template. 541 | 542 | Returns 543 | ------- 544 | str 545 | Prompt template content. 546 | 547 | Raises 548 | ------ 549 | ValueError 550 | If request failed. Please check your API key and secret key. Or check the parameters. 551 | 552 | Examples 553 | -------- 554 | >>> response = prompttemplate( 555 | ... template_id=1968, 556 | ... content='侏罗纪世界' 557 | ... ) 558 | 559 | >>> print(response) 560 | 561 | """ 562 | headers = {"Content-Type": "application/json"} 563 | 564 | params: Dict[str, Union[str, int]] = { 565 | "access_token": self.access_token, 566 | "id": template_id, 567 | **kwargs, 568 | } 569 | 570 | response = requests.request( 571 | method="GET", url=self.url, headers=headers, params=params 572 | ) 573 | 574 | try: 575 | response_json: PromptTemplateResponse = response.json() 576 | return response_json["result"]["content"] 577 | except: 578 | raise ValueError(response.text) 579 | 580 | 581 | """ 582 | APIs of AI Studio. 583 | """ 584 | 585 | 586 | class AIStudioLLMAPI: 587 | """ 588 | LLM API of AI Studio. 589 | 590 | Attributes 591 | ---------- 592 | url : str 593 | 594 | model : str 595 | 596 | authorization: str 597 | 598 | ERNIEBot : str 599 | 600 | Methods 601 | ------- 602 | __init__( 603 | self, 604 | user_id: str, 605 | access_token: str, 606 | model: str = AIStudioLLMAPI.ERNIEBot 607 | ) -> None: 608 | 609 | __call__( 610 | self, 611 | messages: Messages, 612 | temperature: Optional[float] = None, 613 | top_p: Optional[float] = None, 614 | penalty_score: Optional[float] = None 615 | ) -> str: 616 | """ 617 | 618 | ERNIEBot = "ERNIE-Bot" 619 | 620 | def __init__( 621 | self: "AIStudioLLMAPI", user_id: str, access_token: str, model: str = ERNIEBot 622 | ) -> None: 623 | """ 624 | Initialize LLM API. 625 | 626 | Parameters 627 | ---------- 628 | user_id : str 629 | User ID of LLM API. 630 | 631 | access_token : str 632 | Access token of LLM API. 633 | 634 | model : str, optional 635 | Model of LLM API, by default AIStudioLLMAPI.ERNIEBot. 636 | 637 | Examples 638 | -------- 639 | >>> from wenxinworkshop import AIStudioLLMAPI 640 | >>> user_id = '' 641 | >>> access_token = '' 642 | >>> erniebot = AIStudioLLMAPI( 643 | ... user_id=user_id, 644 | ... access_token=access_token, 645 | ... model=AIStudioLLMAPI.ERNIEBot 646 | ... ) 647 | """ 648 | self.url = "https://aistudio.baidu.com/llm/lmapi/api/v1/chat/completions" 649 | self.model = model 650 | self.authorization = "token {} {}".format(user_id, access_token) 651 | 652 | def __call__( 653 | self: "AIStudioLLMAPI", 654 | messages: Messages, 655 | temperature: Optional[float] = None, 656 | top_p: Optional[float] = None, 657 | penalty_score: Optional[float] = None, 658 | ) -> str: 659 | """ 660 | Get response from LLM API. 661 | 662 | Parameters 663 | ---------- 664 | messages : Messages 665 | Messages of inputs. 666 | 667 | temperature : Optional[float], optional 668 | Temperature of LLM API, by default None. 669 | 670 | top_p : Optional[float], optional 671 | Top p of LLM API, by default None. 672 | 673 | penalty_score : Optional[float], optional 674 | Penalty score of LLM API, by default None. 675 | 676 | Returns 677 | ------- 678 | str 679 | Response from LLM API. 680 | 681 | Raises 682 | ------ 683 | ValueError 684 | If request failed. Please check your API key and secret key. Or check the parameters. 685 | 686 | Examples 687 | -------- 688 | >>> message = Message( 689 | ... role='user', 690 | ... content='你好!' 691 | ... ) 692 | 693 | >>> messages: Messages = [message] 694 | 695 | >>> response = erniebot( 696 | ... messages=messages, 697 | ... temperature=None, 698 | ... top_p=None, 699 | ... penalty_score=None 700 | ... ) 701 | 702 | >>> print(response) 703 | 你好! 704 | """ 705 | headers = { 706 | "Content-Type": "application/json", 707 | "Authorization": self.authorization, 708 | "SDK-Version": "0.0.2", 709 | } 710 | 711 | data = { 712 | "model": self.model, 713 | "messages": messages, 714 | "temperature": temperature, 715 | "top_p": top_p, 716 | "penalty_score": penalty_score, 717 | } 718 | 719 | response = requests.request( 720 | method="POST", url=self.url, headers=headers, data=json.dumps(data) 721 | ) 722 | 723 | try: 724 | response_json: AIStudioChatResponse = response.json() 725 | return response_json["result"]["result"] 726 | except: 727 | raise ValueError(response.text) 728 | 729 | 730 | class AIStudioEmbeddingAPI: 731 | """ 732 | Embedding API of AI Studio. 733 | 734 | Attributes 735 | ---------- 736 | url : str 737 | 738 | authorization: str 739 | 740 | Methods 741 | ------- 742 | __init__( 743 | self, 744 | user_id: str, 745 | access_token: str 746 | ) -> None: 747 | 748 | __call__( 749 | self, 750 | texts: Texts 751 | ) -> Embeddings: 752 | """ 753 | 754 | def __init__(self: "AIStudioEmbeddingAPI", user_id: str, access_token: str) -> None: 755 | """ 756 | Initialize Embedding API. 757 | 758 | Parameters 759 | ---------- 760 | user_id : str 761 | User ID of Embedding API. 762 | 763 | access_token : str 764 | Access token of Embedding API. 765 | 766 | Examples 767 | -------- 768 | >>> from wenxinworkshop import AIStudioEmbeddingAPI 769 | >>> user_id = '' 770 | >>> access_token = '' 771 | >>> ernieembedding = AIStudioEmbeddingAPI( 772 | ... user_id=user_id, 773 | ... access_token=access_token 774 | ... ) 775 | """ 776 | self.url = "https://aistudio.baidu.com/llm/lmapi/api/v1/embedding" 777 | self.authorization = "token {} {}".format(user_id, access_token) 778 | 779 | def __call__(self: "AIStudioEmbeddingAPI", texts: Texts) -> Embeddings: 780 | """ 781 | Get embeddings from Embedding API. 782 | 783 | Parameters 784 | ---------- 785 | texts : Texts 786 | Texts of inputs. 787 | 788 | Returns 789 | ------- 790 | Embeddings 791 | Embeddings from Embedding API. 792 | 793 | Raises 794 | ------ 795 | ValueError 796 | If request failed. Please check your API key and secret key. Or check the parameters. 797 | 798 | Examples 799 | -------- 800 | >>> texts: Texts = [ 801 | ... '你好!', 802 | ... '你好吗?', 803 | ... '你是谁?' 804 | ... ] 805 | 806 | >>> response = ernieembedding( 807 | ... texts=texts 808 | ... ) 809 | 810 | >>> print(response) 811 | [[0.123, 0.456, 0.789, ...], [0.123, 0.456, 0.789, ...], [0.123, 0.456, 0.789, ...]] 812 | """ 813 | headers = { 814 | "Content-Type": "application/json", 815 | "Authorization": self.authorization, 816 | "SDK-Version": "0.0.2", 817 | } 818 | 819 | data = { 820 | "input": texts, 821 | } 822 | 823 | response = requests.request( 824 | method="POST", url=self.url, headers=headers, data=json.dumps(data) 825 | ) 826 | 827 | try: 828 | response_json: AIStudioEmbeddingResponse = response.json() 829 | embeddings: Embeddings = [ 830 | embedding["embedding"] for embedding in response_json["result"]["data"] 831 | ] 832 | return embeddings 833 | except: 834 | raise ValueError(response.text) 835 | 836 | 837 | if __name__ == "__main__": 838 | """ 839 | Wenxin Workshop APIs Examples 840 | """ 841 | """ 842 | Configurations 843 | """ 844 | # Set API key and Secret key 845 | api_key = "" 846 | secret_key = "" 847 | 848 | """ 849 | LLM API Examples 850 | """ 851 | # create a LLM API 852 | erniebot = LLMAPI(api_key=api_key, secret_key=secret_key, url=LLMAPI.ERNIEBot) 853 | 854 | # create a message 855 | message = Message(role="user", content="你好!") 856 | 857 | # create messages 858 | messages: Messages = [message] 859 | 860 | # get response from LLM API 861 | response = erniebot( 862 | messages=messages, 863 | temperature=None, 864 | top_p=None, 865 | penalty_score=None, 866 | stream=None, 867 | user_id=None, 868 | chunk_size=512, 869 | ) 870 | 871 | # print response 872 | print(response) 873 | 874 | # get response stream from LLM API 875 | response_stream = erniebot( 876 | messages=messages, 877 | temperature=None, 878 | top_p=None, 879 | penalty_score=None, 880 | stream=True, 881 | user_id=None, 882 | chunk_size=512, 883 | ) 884 | 885 | # print response stream 886 | for item in response_stream: 887 | print(item, end="") 888 | 889 | """ 890 | Embedding API Examples 891 | """ 892 | # create a Embedding API 893 | ernieembedding = EmbeddingAPI( 894 | api_key=api_key, secret_key=secret_key, url=EmbeddingAPI.EmbeddingV1 895 | ) 896 | 897 | # create texts 898 | texts: Texts = ["你好!", "你好吗?", "你是谁?"] 899 | 900 | # get embeddings from Embedding API 901 | embeddings = ernieembedding(texts=texts, user_id=None) 902 | 903 | # print embeddings 904 | print(embeddings) 905 | 906 | """ 907 | Prompt Template API Examples 908 | """ 909 | # create a Prompt Template API 910 | prompttemplate = PromptTemplateAPI( 911 | api_key=api_key, secret_key=secret_key, url=PromptTemplateAPI.PromptTemplate 912 | ) 913 | 914 | # get prompt template from Prompt Template API 915 | template = prompttemplate(template_id=1968, content="侏罗纪世界") 916 | 917 | # print prompt template 918 | print(template) 919 | 920 | """ 921 | AI Studio LLM API Examples 922 | """ 923 | """ 924 | Configurations 925 | """ 926 | # Set user ID and access token 927 | user_id = "" 928 | access_token = "" 929 | 930 | # create a LLM API 931 | erniebot = AIStudioLLMAPI( 932 | user_id=user_id, access_token=access_token, model=AIStudioLLMAPI.ERNIEBot 933 | ) 934 | 935 | # create a message 936 | message = Message(role="user", content="你好!") 937 | 938 | # create messages 939 | messages: Messages = [message] 940 | 941 | # get response from LLM API 942 | response = erniebot( 943 | messages=messages, temperature=None, top_p=None, penalty_score=None 944 | ) 945 | 946 | # print response 947 | print(response) 948 | 949 | """ 950 | AI Studio Embedding API Examples 951 | """ 952 | # create a Embedding API 953 | ernieembedding = AIStudioEmbeddingAPI(user_id=user_id, access_token=access_token) 954 | 955 | # create texts 956 | texts: Texts = ["你好!", "你好吗?", "你是谁?"] 957 | 958 | # get embeddings from Embedding API 959 | embeddings = ernieembedding(texts=texts) 960 | 961 | # print embeddings 962 | print(embeddings) 963 | -------------------------------------------------------------------------------- /wenxinworkshop/types.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from typing import TypedDict, Literal 3 | 4 | 5 | __all__ = [ 6 | "Texts", 7 | "Message", 8 | "Messages", 9 | "Embedding", 10 | "Embeddings", 11 | "ChatResponse", 12 | "ChatUsage", 13 | "AccessTokenResponse", 14 | "PromptTemplateResult", 15 | "PromptTemplateResponse", 16 | "EmbeddingResponse", 17 | "EmbeddingUsage", 18 | "EmbeddingObject", 19 | "AIStudioChatUsage", 20 | "AIStudioChatResult", 21 | "AIStudioChatResponse", 22 | "AIStudioEmbeddingObject", 23 | "AIStudioEmbeddingUsage", 24 | "AIStudioEmbeddingResult", 25 | "AIStudioEmbeddingResponse", 26 | ] 27 | 28 | 29 | """ 30 | Type definitions of Wenxin Workshop. 31 | """ 32 | 33 | 34 | class Message(TypedDict): 35 | """ 36 | Message object. 37 | 38 | Attributes 39 | ---------- 40 | role : Literal['user', 'assistant'] 41 | Role of the message. 42 | 43 | content : str 44 | Content of the message. 45 | """ 46 | 47 | role: Literal["user", "assistant"] 48 | content: str 49 | 50 | 51 | class ChatUsage(TypedDict): 52 | """ 53 | Chat API usage object. 54 | 55 | Attributes 56 | ---------- 57 | prompt_tokens : int 58 | Prompt tokens of the chat. 59 | 60 | completion_tokens : int 61 | Completion tokens of the chat. 62 | 63 | total_tokens : int 64 | Total tokens of the chat. 65 | """ 66 | 67 | prompt_tokens: int 68 | completion_tokens: int 69 | total_tokens: int 70 | 71 | 72 | class ChatResponse(TypedDict): 73 | """ 74 | Chat API response object. 75 | 76 | Attributes 77 | ---------- 78 | id : str 79 | ID of the response. 80 | 81 | object : Literal['chat.completion'] 82 | Object of the response. 83 | 84 | created : int 85 | Created time of the response. 86 | 87 | sentence_id : int 88 | Sentence ID of the response. 89 | 90 | is_end : bool 91 | Whether the response is end. 92 | 93 | is_truncated : bool 94 | Whether the response is truncated. 95 | 96 | result : str 97 | Result of the response. 98 | 99 | need_clear_history : bool 100 | Whether to clear the history. 101 | 102 | ban_round : int 103 | Ban round of the response. 104 | 105 | usage : ChatUsage 106 | Usage of the response. 107 | """ 108 | 109 | id: str 110 | object: Literal["chat.completion"] 111 | created: int 112 | sentence_id: int 113 | is_end: bool 114 | is_truncated: bool 115 | result: str 116 | need_clear_history: bool 117 | ban_round: int 118 | usage: ChatUsage 119 | 120 | 121 | class EmbeddingObject(TypedDict): 122 | """ 123 | Embedding API object object. 124 | 125 | Attributes 126 | ---------- 127 | object : Literal['embedding'] 128 | Object of the embedding. 129 | 130 | embedding : Embedding 131 | Embedding of the embedding. 132 | 133 | index : int 134 | Index of the embedding. 135 | """ 136 | 137 | object: Literal["embedding"] 138 | embedding: "Embedding" 139 | index: int 140 | 141 | 142 | class EmbeddingUsage(TypedDict): 143 | """ 144 | Embedding API usage object. 145 | 146 | Attributes 147 | ---------- 148 | prompt_tokens : int 149 | Prompt tokens of the embedding. 150 | 151 | total_tokens : int 152 | Total tokens of the embedding. 153 | """ 154 | 155 | prompt_tokens: int 156 | total_tokens: int 157 | 158 | 159 | class EmbeddingResponse(TypedDict): 160 | """ 161 | Embedding API response object. 162 | 163 | Attributes 164 | ---------- 165 | id : str 166 | ID of the response. 167 | 168 | object : Literal['embedding_list'] 169 | Object of the response. 170 | 171 | created : int 172 | Created time of the response. 173 | 174 | data : List[EmbeddingObject] 175 | 176 | usage : EmbeddingUsage 177 | """ 178 | 179 | id: str 180 | object: Literal["embedding_list"] 181 | created: int 182 | data: List[EmbeddingObject] 183 | usage: EmbeddingUsage 184 | 185 | 186 | class AccessTokenResponse(TypedDict): 187 | """ 188 | Access token response object. 189 | 190 | Attributes 191 | ---------- 192 | refresh_token : str 193 | Refresh token of the access token. 194 | 195 | expires_in : int 196 | Expires in of the access token. 197 | 198 | session_key : str 199 | Session key of the access token. 200 | 201 | access_token : str 202 | Access token of the access token. 203 | 204 | scope : str 205 | Scope of the access token. 206 | 207 | session_secret : str 208 | Session secret of the access token. 209 | """ 210 | 211 | refresh_token: str 212 | expires_in: int 213 | session_key: str 214 | access_token: str 215 | scope: str 216 | session_secret: str 217 | 218 | 219 | class PromptTemplateResult(TypedDict): 220 | """ 221 | Prompt template result object. 222 | 223 | Attributes 224 | ---------- 225 | templateId : str 226 | Template ID of the prompt template. 227 | 228 | templateName : str 229 | Template name of the prompt template. 230 | 231 | templateContent : str 232 | Template content of the prompt template. 233 | 234 | templateVariables : str 235 | Template variables of the prompt template. 236 | 237 | content : str 238 | Content of the prompt template. 239 | """ 240 | 241 | templateId: str 242 | templateName: str 243 | templateContent: str 244 | templateVariables: str 245 | content: str 246 | 247 | 248 | class PromptTemplateResponse(TypedDict): 249 | """ 250 | Prompt template response object. 251 | 252 | Attributes 253 | ---------- 254 | log_id : int 255 | Log ID of the prompt template. 256 | 257 | result : PromptTemplateResult 258 | Result of the prompt template. 259 | """ 260 | 261 | log_id: int 262 | result: PromptTemplateResult 263 | 264 | 265 | """ 266 | Type definitions of AI Studio. 267 | """ 268 | 269 | 270 | class AIStudioChatUsage(TypedDict): 271 | """ 272 | AIStudio Chat API usage object. 273 | 274 | Attributes 275 | ---------- 276 | prompt_tokens : int 277 | Prompt tokens of the chat. 278 | 279 | completion_tokens : int 280 | Completion tokens of the chat. 281 | 282 | total_tokens : int 283 | Total tokens of the chat. 284 | """ 285 | 286 | prompt_tokens: int 287 | completion_tokens: int 288 | total_tokens: int 289 | 290 | 291 | class AIStudioChatResult(TypedDict): 292 | """ 293 | AI Studio Chat API result object. 294 | 295 | Attributes 296 | ---------- 297 | id : str 298 | ID of the chat. 299 | 300 | object : str 301 | Object of the chat. 302 | 303 | created : int 304 | Created time of the chat. 305 | 306 | result : str 307 | Result of the chat. 308 | 309 | need_clear_history : bool 310 | Whether the chat need clear history. 311 | 312 | usage : ChatUsage 313 | Usage of the chat. 314 | """ 315 | 316 | id: str 317 | object: Literal["chat.completion"] 318 | created: int 319 | result: str 320 | need_clear_history: bool 321 | usage: AIStudioChatUsage 322 | 323 | 324 | class AIStudioChatResponse(TypedDict): 325 | """ 326 | AI Studio Chat API response object. 327 | 328 | Attributes 329 | ---------- 330 | logId : str 331 | Log ID of the chat. 332 | 333 | errorCode : int 334 | Error code of the chat. 335 | 336 | errorMsg : str 337 | Error message of the chat. 338 | 339 | result : AIStudioChatResult 340 | Result of the chat. 341 | """ 342 | 343 | logId: str 344 | errorCode: int 345 | errorMsg: str 346 | result: AIStudioChatResult 347 | 348 | 349 | class AIStudioEmbeddingObject(TypedDict): 350 | """ 351 | AI Studio Embedding API object object. 352 | 353 | Attributes 354 | ---------- 355 | object : Literal['embedding'] 356 | Object of the embedding. 357 | 358 | embedding : Embedding 359 | Embedding of the embedding. 360 | 361 | index : int 362 | Index of the embedding. 363 | """ 364 | 365 | object: Literal["embedding"] 366 | embedding: "Embedding" 367 | index: int 368 | 369 | 370 | class AIStudioEmbeddingUsage(TypedDict): 371 | """ 372 | AI Studio Embedding API usage object. 373 | 374 | Attributes 375 | ---------- 376 | prompt_tokens : int 377 | Prompt tokens of the embedding. 378 | 379 | total_tokens : int 380 | Total tokens of the embedding. 381 | """ 382 | 383 | prompt_tokens: int 384 | total_tokens: int 385 | 386 | 387 | class AIStudioEmbeddingResult(TypedDict): 388 | """ 389 | AI Studio Embedding API result object. 390 | 391 | Attributes 392 | ---------- 393 | id : str 394 | ID of the embedding. 395 | 396 | object : Literal['embedding_list'] 397 | Object of the embedding. 398 | 399 | created : int 400 | Created time of the embedding. 401 | 402 | data : List[EmbeddingObject] 403 | Data of the embedding. 404 | 405 | usage : EmbeddingUsage 406 | Usage of the embedding. 407 | """ 408 | 409 | id: str 410 | object: Literal["embedding_list"] 411 | created: int 412 | data: List[EmbeddingObject] 413 | usage: AIStudioEmbeddingUsage 414 | 415 | 416 | class AIStudioEmbeddingResponse(TypedDict): 417 | """ 418 | AI Studio Embedding API response object. 419 | 420 | Attributes 421 | ---------- 422 | logId : str 423 | Log ID of the embedding. 424 | 425 | errorCode : int 426 | Error code of the embedding. 427 | 428 | errorMsg : str 429 | Error message of the embedding. 430 | 431 | result : AIStudioEmbeddingResult 432 | Result of the embedding. 433 | """ 434 | 435 | logId: str 436 | errorCode: int 437 | errorMsg: str 438 | result: AIStudioEmbeddingResult 439 | 440 | 441 | """ 442 | Type aliases. 443 | """ 444 | # A List of str 445 | Texts = List[str] 446 | 447 | # A List of float 448 | Embedding = List[float] 449 | 450 | # A List of Embedding 451 | Embeddings = List[Embedding] 452 | 453 | # A List of Message 454 | Messages = List[Message] 455 | --------------------------------------------------------------------------------