├── .github └── workflows │ └── dependencies_lint.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── example_client.ipynb ├── invoker.png ├── invoker ├── api_types.py ├── model.py └── utils │ └── enum_tags.py ├── requirements-dev.txt ├── requirements.txt └── server_fastapi.py /.github/workflows/dependencies_lint.yml: -------------------------------------------------------------------------------- 1 | name: Dependencies and Lint 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | dependencies-lint: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | python-version: ["3.10", "3.11"] 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install -r requirements.txt 27 | - name: Lint 28 | run: | 29 | pip install -r requirements-dev.txt 30 | - uses: actions/cache@v3.3.2 31 | with: 32 | path: ~/.cache/pre-commit 33 | key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} 34 | - name: Run pre-commit 35 | run: pre-commit run --all-files -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_language_version: 2 | python: python3 3 | 4 | repos: 5 | - repo: https://github.com/psf/black 6 | rev: 22.12.0 7 | hooks: 8 | - id: black 9 | name: Format code 10 | args: [--line-length=120] 11 | additional_dependencies: ['click==8.0.2'] 12 | - repo: https://github.com/PyCQA/isort 13 | rev: 5.12.0 14 | hooks: 15 | - id: isort 16 | name: Format imports 17 | args: ["--profile", "black", "--filter-files"] 18 | - repo: https://github.com/PyCQA/flake8 19 | rev: 6.1.0 20 | hooks: 21 | - id: flake8 22 | args: [--max-line-length=120, "--ignore=E203,W503"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Invoker 2 | 3 |
4 | 5 | 6 | 7 | ***The one who calls upon... Functions!*** 8 | 9 |
10 | 11 | Invoker is a suite of large language models based on Llama-2 and is finetuned to plan between calling functions and providing responses directly. Currently, we have released the 13B version and there are plans for the 7B and 34B versions to be trained and released in the future. 12 | 13 | ## News 14 | 15 | - [2023/09] We released **Invoker-13B-GPTQ**, which is a 4-bit quantized GPTQ implementation of Invoker-13B. Download [weights](https://huggingface.co/jeffrey-fong/invoker-13b-GPTQ). We also added ExllamaV2 integration! 16 | - [2023/09] We released **Invoker-13B**, a model trained on function-calling and multi-turn conversation datasets. Download [weights](https://huggingface.co/jeffrey-fong/invoker-13b) 17 | 18 | ## Installation & Usage 19 | 20 | The usage of Invoker follows exactly like OpenAI's function calling. Simply install the required dependencies: 21 | 22 | ```shell 23 | pip install -r requirements.txt 24 | ``` 25 | 26 | #### Launching the Server 27 | 28 | Kick-start the FastAPI server. You can indicate the model details via environment variables. The Invoker server currently supports 2 different ways to load the model. If you would like to load the full fp16 model using HuggingFace transformers, run the following commands: 29 | 30 | ```shell 31 | export INVOKER_MODEL_TYPE=hf 32 | export INVOKER_MODEL_NAME_OR_PATH=jeffrey-fong/invoker-13b 33 | uvicorn server_fastapi:app 34 | ``` 35 | 36 | If you would like to load 4-bit quantized Invoker GPTQ models using [ExLlamaV2](https://github.com/turboderp/exllamav2), clone the [model repository](https://huggingface.co/jeffrey-fong/invoker-13b-GPTQ) into your local machine. Then, run the following commands: 37 | 38 | ```shell 39 | export INVOKER_MODEL_TYPE=exllamav2 40 | export INVOKER_MODEL_NAME_OR_PATH=path_to_downloaded_invoker-13b-GPTQ-model_dir 41 | uvicorn server_fastapi:app 42 | ``` 43 | 44 | The full list of models are indicated [here](#download). 45 | 46 | #### Inference 47 | 48 | Inference can then be performed exactly like OpenAI function-calling. Provide the chat and the functions in the `messages` and `functions` arguments respectively. Invoker also supports the following generation hyperparameters: 49 | 50 | - `temperature: float = 0.5` Accepts values between 0.0 and 1.0. Defaults to 0.5 if the temperature is not passed in. 51 | - `top_p: float = 1.0` Accepts values between 0.0 and 1.0. Defaults to 1.0 if the top_p is not passed in. 52 | 53 | ```python 54 | import openai 55 | 56 | openai.api_base = "http://localhost:8000" 57 | openai.api_key = "test" 58 | 59 | messages = [{"role": "user", "content": "Can you check what is the time in Singapore?"}] 60 | response = openai.ChatCompletion.create( 61 | model="jeffrey-fong/invoker-13b", 62 | messages=messages, 63 | functions=[ 64 | { 65 | "name": "get_time", 66 | "description": "Get the current time", 67 | "parameters": { 68 | "type": "object", 69 | "properties": { 70 | "location": { 71 | "type": "string", 72 | "description": "The city and state, e.g. New York City, NY" 73 | }, 74 | "format": { 75 | "type": "string", 76 | "enum": ["12-hour", "24-hour"] 77 | } 78 | }, 79 | "required": ["location"] 80 | } 81 | } 82 | ] 83 | ) 84 | response_message = response["choices"][0]["message"] 85 | ``` 86 | 87 | The model can choose to call a function; if so, the content will be a stringified JSON object indicating a function call with the function name and arguments generated by the model (note: the model may generate invalid JSON or hallucinate parameters). To allow the model to summarize the results of the function response, parse the string into JSON in your code, and call your function with the provided arguments if they exist. Perform another inference with the model after appending the function response as a new message. 88 | 89 | Using the above example again, 90 | 91 | ```python 92 | if response_message.get("function_call"): 93 | available_functions = {"get_time": get_time} 94 | function_name = response_message["function_call"]["name"] 95 | function_to_call = available_functions[function_name] 96 | function_args = json.loads(response_message["function_call"]["arguments"]) 97 | function_response = function_to_call( 98 | location=function_args.get("location"), 99 | unit=function_args.get("format"), 100 | ) 101 | messages.append(response_message) 102 | messages.append( 103 | { 104 | "role": "function", 105 | "name": function_name, 106 | "content": function_response, 107 | } 108 | ) 109 | second_response = openai.ChatCompletion.create( 110 | model="jeffrey-fong/invoker-13b", 111 | messages=messages, 112 | ) 113 | print(second_response["choices"][0]["message"]) 114 | ``` 115 | 116 | Refer to the example client code [here](example_client.ipynb) for a more detailed example. 117 | 118 | #### Using the model directly 119 | Please refer to the model card in HuggingFace to see how to use the model directly, including the prompt format, etc. 120 | 121 | #### Model Download 122 | | Model | Link | Version | 123 | | ------------- | ------------- |------------- | 124 | | Invoker-13B | [Huggingface Repo](https://huggingface.co/jeffrey-fong/invoker-13b) |v1.0| 125 | | Invoker-13B-GPTQ | [Huggingface Repo](https://huggingface.co/jeffrey-fong/invoker-13b-GPTQ) |v1.0| 126 | | Invoker-7B | Coming Soon |v1.0| 127 | | Invoker-34B | Coming Soon |v1.0| 128 | 129 | ## Training 130 | 131 | Training was performed using QLora which significantly reduces the computational resources required to train the models. Similar to [FastChat](https://github.com/lm-sys/FastChat), we only consider the gradients for the assistant responses when computing the loss for backpropagation and ignore all other outputs and responses. 132 | 133 | We accelerated training with DeepSpeed Zero Stage 2 for fast data parallelism. QLora is currently not compatible with DeepSpeed Zero Stage 3 which shards the model into multiple GPUs. 134 | 135 | Training code will released in the future. 136 | 137 | #### Training hyperparameters 138 | | Hyperparameter | Value | 139 | | ------------- | ------------- | 140 | | Total batch size | 192 | 141 | | Epochs | 2 | 142 | | Learning rate | 2e-05 | 143 | | Lora r | 64 | 144 | | Lora alpha | 16 | 145 | | Lora dropout | 0.05 | 146 | | Weight decay | 0.0 | 147 | | Warmup ratio | 0.03 | 148 | 149 | #### Training Data 150 | 151 | We use a variety of sources when building our training dataset. All the datasets are carefully chosen to improve both the conversational and function-calling capability of the model. 152 | 153 | - [ToolBench](https://github.com/OpenBMB/ToolBench) (0830 updated) 154 | ToolBench is an open-source, large-scale and high quality instruction tuning SFT dataset to facilitate the training of LLMs with general tool-use capability. It consists of multi-turn conversations where the assistant, who is presented with several potential functions to call, will call one or multiple functions before returning its response to the user. We had undergone rigorous cleaning of the data where we 155 | 156 | 1. Removed all datapoints that do not end with the assistant returning a summarized response 157 | 2. Cleaned datapoints with unnecessary calls to the same function 158 | 3. Changed all function names and descriptions to not include the domain name, so the functions feels more generic 159 | 160 | - [ShareGPT-34K](https://huggingface.co/datasets/ehartford/wizard_vicuna_70k_unfiltered) 161 | ShareGPT-34K is a filtered dataset containing high quality multi-turn conversations between a user and an assistant. Some of the assistant responses are generated from OpenAI's GPT-3.5-Turbo while some are from GPT-4. 162 | 163 | - [OASST1](https://huggingface.co/datasets/HuggingFaceH4/oasst1_en) 164 | OASST1 is a human-generated and human-annotated assistant-style conversation corpus. We filtered out the conversations in English. 165 | 166 | All the datasets used are under Apache-2.0 License. Therefore, this dataset will also be under the same license. 167 | 168 | ## To-Dos 169 | 170 | - [X] Quantize 13B model 171 | - [X] Work on GPTQ-based servers ([ExLlama](https://github.com/turboderp/exllama) and/or [ExLlamaV2](https://github.com/turboderp/exllamav2)) 172 | - [ ] Work on validating function names, descriptions, etc. Just like OpenAI's function calling 173 | - [ ] Converting Invoker to other formats like: 174 | - [ ] GGUF 175 | - [ ] AWQ 176 | - [ ] Train 7B Llama-2 model and 34B CodeLlama model 177 | - [ ] Investigate ways to evaluate function calling 178 | 179 | ## Citation 180 | 181 | If this work is helpful, please kindly cite as: 182 | 183 | ```bibtex 184 | @Misc{invoker-function-calling, 185 | title = {Invoker: The one who calls upon functions - Function-Calling Language Model}, 186 | author = {jeffrey-fong}, 187 | howpublished = {\url{https://github.com/jeffrey-fong/Invoker}}, 188 | year = {2023} 189 | } 190 | ``` 191 | -------------------------------------------------------------------------------- /example_client.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import json\n", 10 | "import openai\n", 11 | "\n", 12 | "openai.api_base = \"http://localhost:8000\"\n", 13 | "openai.api_key = \"test\"" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "# Define functions\n", 23 | "from datetime import datetime\n", 24 | "\n", 25 | "def get_time(location, format=\"12-hour\"):\n", 26 | " times = {\"Singapore\": \"08:00\", \"London\": \"00:00\", \"New York\": \"20:00\", \"Beijing\": \"08:00\"}\n", 27 | " return datetime.strptime(times[location], \"%H:%M\").strftime(\"%r\") if format == \"12-hour\" else times[location]" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 3, 33 | "metadata": {}, 34 | "outputs": [ 35 | { 36 | "data": { 37 | "text/plain": [ 38 | " JSON: {\n", 39 | " \"id\": \"e2322c58-6aa6-4539-8cbc-e5219b67befe\",\n", 40 | " \"object\": \"chat.completion\",\n", 41 | " \"created\": 1695652179,\n", 42 | " \"choices\": [\n", 43 | " {\n", 44 | " \"message\": {\n", 45 | " \"role\": \"assistant\",\n", 46 | " \"content\": null,\n", 47 | " \"name\": null,\n", 48 | " \"function_call\": {\n", 49 | " \"name\": \"get_time\",\n", 50 | " \"arguments\": \"{\\\"location\\\": \\\"Singapore\\\", \\\"format\\\": \\\"12-hour\\\"}\"\n", 51 | " }\n", 52 | " },\n", 53 | " \"finish_reason\": \"function_call\"\n", 54 | " }\n", 55 | " ]\n", 56 | "}" 57 | ] 58 | }, 59 | "execution_count": 3, 60 | "metadata": {}, 61 | "output_type": "execute_result" 62 | } 63 | ], 64 | "source": [ 65 | "# Call the model the first time with the functions\n", 66 | "messages = [{\"role\": \"user\", \"content\": \"Can you check what is the time in Singapore?\"}]\n", 67 | "response = openai.ChatCompletion.create(\n", 68 | " model=\"jeffrey-fong/invoker-13b\",\n", 69 | " messages=messages,\n", 70 | " functions=[\n", 71 | " {\n", 72 | " \"name\": \"get_time\",\n", 73 | " \"description\": \"Get the current time\",\n", 74 | " \"parameters\": {\n", 75 | " \"type\": \"object\",\n", 76 | " \"properties\": {\n", 77 | " \"location\": {\n", 78 | " \"type\": \"string\",\n", 79 | " \"description\": \"The city and state, e.g. New York City, NY\"\n", 80 | " },\n", 81 | " \"format\": {\n", 82 | " \"type\": \"string\",\n", 83 | " \"enum\": [\"12-hour\", \"24-hour\"]\n", 84 | " }\n", 85 | " },\n", 86 | " \"required\": [\"location\"]\n", 87 | " }\n", 88 | " }\n", 89 | " ]\n", 90 | ")\n", 91 | "response" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 8, 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "text/plain": [ 102 | " JSON: {\n", 103 | " \"id\": \"912bec9a-d3c6-473f-8161-702b45d393e2\",\n", 104 | " \"object\": \"chat.completion\",\n", 105 | " \"created\": 1695652744,\n", 106 | " \"choices\": [\n", 107 | " {\n", 108 | " \"message\": {\n", 109 | " \"role\": \"assistant\",\n", 110 | " \"content\": \"The time in Singapore is 08:00 AM.\",\n", 111 | " \"name\": null,\n", 112 | " \"function_call\": null\n", 113 | " },\n", 114 | " \"finish_reason\": \"stop\"\n", 115 | " }\n", 116 | " ]\n", 117 | "}" 118 | ] 119 | }, 120 | "execution_count": 8, 121 | "metadata": {}, 122 | "output_type": "execute_result" 123 | } 124 | ], 125 | "source": [ 126 | "response_message = response[\"choices\"][0][\"message\"]\n", 127 | "# Call the function indicated by the model if the model generates a function call\n", 128 | "# Append the messages and call the model again to get a summarized assistant response\n", 129 | "if response_message.get(\"function_call\"):\n", 130 | " available_functions = {\"get_time\": get_time}\n", 131 | " function_name = response_message[\"function_call\"][\"name\"]\n", 132 | " function_to_call = available_functions[function_name]\n", 133 | " function_args = json.loads(response_message[\"function_call\"][\"arguments\"])\n", 134 | " function_response = function_to_call(\n", 135 | " location=function_args.get(\"location\"),\n", 136 | " format=function_args.get(\"format\"),\n", 137 | " )\n", 138 | " messages.append(response_message)\n", 139 | " messages.append(\n", 140 | " {\n", 141 | " \"role\": \"function\",\n", 142 | " \"name\": function_name,\n", 143 | " \"content\": function_response,\n", 144 | " }\n", 145 | " )\n", 146 | " second_response = openai.ChatCompletion.create(\n", 147 | " model=\"jeffrey-fong/invoker-13b\",\n", 148 | " messages=messages,\n", 149 | " )\n", 150 | "second_response" 151 | ] 152 | } 153 | ], 154 | "metadata": { 155 | "kernelspec": { 156 | "display_name": "invoker", 157 | "language": "python", 158 | "name": "python3" 159 | }, 160 | "language_info": { 161 | "codemirror_mode": { 162 | "name": "ipython", 163 | "version": 3 164 | }, 165 | "file_extension": ".py", 166 | "mimetype": "text/x-python", 167 | "name": "python", 168 | "nbconvert_exporter": "python", 169 | "pygments_lexer": "ipython3", 170 | "version": "3.10.12" 171 | }, 172 | "orig_nbformat": 4 173 | }, 174 | "nbformat": 4, 175 | "nbformat_minor": 2 176 | } 177 | -------------------------------------------------------------------------------- /invoker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jeffrey-fong/Invoker/a0ea16da21d15c994874440b92bc4023bdbab4bc/invoker.png -------------------------------------------------------------------------------- /invoker/api_types.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from pydantic import BaseModel 4 | 5 | 6 | class FunctionCall(BaseModel): 7 | name: Optional[str] = None 8 | arguments: Optional[str] = None 9 | 10 | 11 | class Parameters(BaseModel): 12 | type: str = "object" 13 | properties: dict 14 | required: list 15 | 16 | 17 | class Message(BaseModel): 18 | role: Optional[str] = None 19 | content: Optional[str] = None 20 | name: Optional[str] = None 21 | function_call: Optional[FunctionCall] = None 22 | 23 | 24 | class Function(BaseModel): 25 | name: str 26 | description: str 27 | parameters: Parameters 28 | 29 | 30 | class ChatInput(BaseModel): 31 | model: str 32 | messages: List[Message] 33 | functions: Optional[List[Function]] = None 34 | temperature: float = 0.5 35 | top_p: float = 1.0 36 | stream: bool = False 37 | 38 | 39 | class Choice(BaseModel): 40 | message: Message 41 | finish_reason: str = "stop" 42 | 43 | 44 | class StreamChoice(BaseModel): 45 | delta: Message 46 | finish_reason: Optional[str] 47 | 48 | 49 | class ChatOutput(BaseModel): 50 | id: str 51 | object: str = "chat.completion" 52 | created: int 53 | choices: List[Choice] 54 | 55 | 56 | class ChatStreamOutput(BaseModel): 57 | id: str 58 | object: str = "chat.completion.chunk" 59 | created: int 60 | choices: List[StreamChoice] 61 | -------------------------------------------------------------------------------- /invoker/model.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | import re 5 | from typing import Any, Dict, Generator, List, Optional 6 | 7 | import torch 8 | from exllamav2 import ExLlamaV2, ExLlamaV2Cache, ExLlamaV2Config, ExLlamaV2Tokenizer 9 | from exllamav2.generator import ( 10 | ExLlamaV2BaseGenerator, 11 | ExLlamaV2Sampler, 12 | ExLlamaV2StreamingGenerator, 13 | ) 14 | from transformers import LlamaForCausalLM, LlamaTokenizer 15 | from transformers.generation.logits_process import ( 16 | LogitsProcessorList, 17 | TemperatureLogitsWarper, 18 | TopPLogitsWarper, 19 | ) 20 | 21 | from invoker.api_types import Function, Message 22 | from invoker.utils.enum_tags import ModelType 23 | 24 | 25 | class InvokerPipeline: 26 | # Singleton instance 27 | _pipeline = None 28 | 29 | def __init__(self, model_path: str, model_type: ModelType): 30 | # Load model 31 | self._model_type = model_type 32 | if model_type == ModelType.exllamav2: 33 | config = ExLlamaV2Config() 34 | config.model_dir = model_path 35 | config.prepare() 36 | model = ExLlamaV2(config) 37 | model.load() 38 | self._tokenizer = ExLlamaV2Tokenizer(config) 39 | cache = ExLlamaV2Cache(model) 40 | self._generator = ExLlamaV2BaseGenerator(model, cache, self._tokenizer) 41 | self._generator.warmup() 42 | self._stream_generator = ExLlamaV2StreamingGenerator(model, cache, self._tokenizer) 43 | self._stream_generator.warmup() 44 | self._settings = ExLlamaV2Sampler.Settings() 45 | self._settings.token_repetition_penalty = 1.0 46 | else: 47 | self._tokenizer = LlamaTokenizer.from_pretrained(model_path, use_fast=False) 48 | self._model = LlamaForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, device_map="auto") 49 | self._max_new_tokens = 512 50 | 51 | def format_message(self, messages: List[Message], functions: Optional[List[Function]]): 52 | prompt = "Available Functions:" 53 | if functions is not None: 54 | for function in functions: 55 | prompt += f"\n```json\n{json.dumps(function.model_dump(mode='json'))}\n```" 56 | else: 57 | prompt += "\nNone" 58 | prompt += ( 59 | "\n\nA chat between a curious user and an artificial intelligence assistant. " 60 | "The assistant gives helpful, detailed, and polite answers to the user's questions. " 61 | "The assistant calls functions with appropriate input when necessary." 62 | ) 63 | for message in messages: 64 | if message.role == "assistant": 65 | prompt += f"\n{message.role.upper()}: ```" + "{" 66 | if message.content is None: 67 | prompt += ( 68 | '"content": null, "function_call": {' 69 | + f'"name": "{message.function_call.name}", "arguments": {message.function_call.arguments}' 70 | + "}```" 71 | ) 72 | else: 73 | prompt += f'"content": {message.content}' + ', "function_call": None}```' 74 | elif message.role == "function": 75 | prompt += ( 76 | f"\n{message.role.upper()}: ```" 77 | + "{" 78 | + f'"name": "{message.name}", "content": {message.content}' 79 | + "}```" 80 | ) 81 | else: 82 | prompt += f"\n{message.role.upper()}: {message.content}" 83 | prompt += "\nASSISTANT:" 84 | return prompt 85 | 86 | def generate(self, input_text: str, params: Dict[str, Any]) -> str: 87 | temperature, top_p = params.get("temperature"), params.get("top_p") 88 | if self._model_type == ModelType.exllamav2: 89 | self._settings.temperature, self._settings.top_p = temperature, top_p 90 | raw_output = self._generator.generate_simple(input_text, self._settings, num_tokens=self._max_new_tokens) 91 | else: 92 | input_ids = self._tokenizer(input_text, return_tensors="pt").input_ids.cuda() 93 | do_sample = True if temperature > 0.0 else False 94 | output_ids = self._model.generate( 95 | input_ids=input_ids, 96 | max_new_tokens=self._max_new_tokens, 97 | do_sample=do_sample, 98 | top_p=top_p, 99 | temperature=temperature, 100 | ) 101 | raw_output = self._tokenizer.decode(output_ids[0], skip_special_tokens=True) 102 | output = raw_output[len(input_text) :] 103 | choices = self._postprocess(text=output) 104 | return choices 105 | 106 | def generate_stream(self, input_text: str, params: Dict[str, Any]) -> Generator[str]: 107 | temperature, top_p = params.get("temperature"), params.get("top_p") 108 | self._curr_response, self._response_type, self._finish_reason = "", None, None 109 | if self._model_type == ModelType.exllamav2: 110 | self._settings.temperature, self._settings.top_p = temperature, top_p 111 | input_ids = self._tokenizer.encode(input_text) 112 | self._stream_generator.begin_stream(input_ids, self._settings) 113 | generated_tokens = 0 114 | while True: 115 | chunk, eos, _ = self._stream_generator.stream() 116 | generated_tokens += 1 117 | if eos or generated_tokens == self._max_new_tokens or self._finish_reason == "complete": 118 | break 119 | chunk = self._postprocess_stream_chunk(text=chunk) 120 | if chunk: 121 | yield chunk 122 | if generated_tokens == self._max_new_tokens: 123 | yield {"delta": {}, "finish_reason": "length"} 124 | else: 125 | input_ids = self._tokenizer(input_text, return_tensors="pt").input_ids.cuda() 126 | logits_processor = self._get_logits_processor(temperature=temperature, top_p=top_p) 127 | hf_generator = self._hf_generate_stream( 128 | input_ids=input_ids, params=params, logits_processor=logits_processor 129 | ) 130 | for chunk in hf_generator: 131 | if self._finish_reason == "complete": 132 | break 133 | chunk = self._postprocess_stream_chunk(text=chunk) 134 | if chunk: 135 | yield chunk 136 | del self._curr_response 137 | del self._response_type 138 | del self._finish_reason 139 | 140 | def _get_logits_processor(self, temperature, top_p) -> LogitsProcessorList: 141 | processors = LogitsProcessorList() 142 | if temperature > 0.0 and temperature != 1.0: 143 | processors.append(TemperatureLogitsWarper(temperature=temperature)) 144 | if top_p is not None and top_p < 1.0: 145 | processors.append(TopPLogitsWarper(top_p=top_p)) 146 | return processors 147 | 148 | def _hf_generate_stream(self, input_ids, params, logits_processor) -> Generator[str]: 149 | past_key_values, output_ids, sampled_token_tensor = None, input_ids.clone().detach(), None 150 | for i in range(self._max_new_tokens): 151 | out = self._model( 152 | input_ids if not past_key_values else sampled_token_tensor, 153 | use_cache=True, 154 | past_key_values=past_key_values, 155 | ) 156 | logits, past_key_values = out.logits, out.past_key_values 157 | processed_logits = logits_processor(None, logits[:, -1, :])[0] if logits_processor else logits[0, -1, :] 158 | if params.get("temperature") == 0.0: 159 | _, indices = torch.topk(processed_logits, 2) 160 | sampled_tokens = [int(index) for index in indices.tolist()] 161 | else: 162 | probs = torch.softmax(processed_logits, dim=-1) 163 | indices = torch.multinomial(probs, num_samples=2) 164 | sampled_tokens = [int(token) for token in indices.tolist()] 165 | sampled_token = sampled_tokens[0] 166 | sampled_token_tensor = torch.as_tensor([[sampled_token]], device="cuda") 167 | current_output_text = self._tokenizer.decode( 168 | output_ids[0].tolist(), skip_special_tokens=True, clean_up_tokenization_spaces=False 169 | ) 170 | output_ids = torch.cat((output_ids, sampled_token_tensor), 1) 171 | next_output_text = self._tokenizer.decode( 172 | output_ids[0].tolist(), 173 | skip_special_tokens=True, 174 | clean_up_tokenization_spaces=False, 175 | ) 176 | output = next_output_text[len(current_output_text) :] 177 | if sampled_token == self._tokenizer.eos_token_id: 178 | break 179 | yield output 180 | else: 181 | yield "[|LENGTH|]" 182 | 183 | def _postprocess(self, text): 184 | output_json = json.loads(re.search(r"```(.*?)```?", text, re.DOTALL).group(1)) 185 | if output_json["function_call"] is not None: 186 | choices = [ 187 | { 188 | "message": { 189 | "role": "assistant", 190 | "content": None, 191 | "function_call": { 192 | "name": output_json["function_call"]["name"], 193 | "arguments": output_json["function_call"]["arguments"] 194 | if isinstance(output_json["function_call"]["arguments"], str) 195 | else json.dumps(output_json["function_call"]["arguments"]), 196 | }, 197 | }, 198 | "finish_reason": "function_call", 199 | } 200 | ] 201 | else: 202 | choices = [ 203 | { 204 | "message": {"role": "assistant", "content": output_json["content"]}, 205 | "finish_reason": "stop", 206 | } 207 | ] 208 | return choices 209 | 210 | def _postprocess_stream_chunk(self, text): 211 | self._curr_response += text 212 | if text == "[|LENGTH|]": 213 | self._finish_reason = "complete" 214 | return {"delta": {}, "finish_reason": "length"} 215 | if not self._response_type: 216 | # Check for "content" 217 | if '"content": null, "function_call": {' in self._curr_response: 218 | self._response_type = "function" 219 | elif '"content": "' in self._curr_response: 220 | self._response_type = "content" 221 | return None 222 | elif self._response_type == "function": 223 | if self._curr_response.endswith('", "arguments": "'): 224 | name_match = re.search(r'"function_call":\s*\{"name":\s*"([^"]+)"', self._curr_response) 225 | return { 226 | "delta": {"role": "assistant", "function_call": {"name": name_match.group(1)}}, 227 | "finish_reason": None, 228 | } 229 | elif '", "arguments": "' in self._curr_response: 230 | if self._finish_reason == "function_call": 231 | output = {"delta": {}, "finish_reason": self._finish_reason} 232 | self._finish_reason = "complete" 233 | return output 234 | if self._curr_response.endswith('}"'): 235 | self._finish_reason = "function_call" 236 | text = text.rstrip('"') 237 | return {"delta": {"role": "assistant", "function_call": {"arguments": text}}, "finish_reason": None} 238 | elif self._response_type == "content": 239 | match = re.search(r'"content":\s*"([^"]+)"', self._curr_response) 240 | if match: 241 | self._finish_reason = "stop" 242 | if text[0] in [".", "?", "!"]: 243 | return {"delta": {"role": "assistant", "content": text[0]}, "finish_reason": None} 244 | if not self._finish_reason: 245 | return {"delta": {"role": "assistant", "content": text}, "finish_reason": self._finish_reason} 246 | elif self._finish_reason == "stop": 247 | output = {"delta": {}, "finish_reason": self._finish_reason} 248 | self._finish_reason = "complete" 249 | return output 250 | else: 251 | return None 252 | 253 | @classmethod 254 | async def maybe_init(cls, model_path: str, model_type: ModelType) -> InvokerPipeline: 255 | if cls._pipeline is None: 256 | cls._pipeline = InvokerPipeline(model_path=model_path, model_type=model_type) 257 | if cls._pipeline is not None: 258 | return cls._pipeline 259 | else: 260 | raise ValueError("Pipeline could not be initialized!") 261 | -------------------------------------------------------------------------------- /invoker/utils/enum_tags.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ModelType(str, Enum): 5 | hf = "hf" 6 | exllamav2 = "exllamav2" 7 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pre-commit==3.4.0 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | --find-links https://download.pytorch.org/whl/cu118/torch_stable.html 2 | torch==2.0.1+cu118 3 | transformers==4.31.0 4 | sentencepiece==0.1.99 5 | accelerate==0.22.0 6 | fastapi==0.103.0 7 | uvicorn==0.23.2 8 | pydantic-settings==2.0.3 9 | openai==0.28.0 10 | exllamav2==0.0.3 -------------------------------------------------------------------------------- /server_fastapi.py: -------------------------------------------------------------------------------- 1 | import time 2 | import uuid 3 | 4 | from fastapi import FastAPI 5 | from fastapi.responses import StreamingResponse 6 | from pydantic import Field 7 | from pydantic_settings import BaseSettings 8 | 9 | from invoker.api_types import ChatInput, ChatOutput, ChatStreamOutput, StreamChoice 10 | from invoker.model import InvokerPipeline 11 | from invoker.utils.enum_tags import ModelType 12 | 13 | 14 | class Settings(BaseSettings): 15 | invoker_model_type: ModelType = Field("hf", env="INVOKER_MODEL_TYPE") 16 | invoker_model_name_or_path: str = Field("jeffrey-fong/invoker-13b", env="INVOKER_MODEL_NAME_OR_PATH") 17 | 18 | 19 | async def get_pipeline(model_path: str, model_type: ModelType): 20 | return await InvokerPipeline.maybe_init(model_path=model_path, model_type=model_type) 21 | 22 | 23 | app = FastAPI(title="Invoker") 24 | settings = Settings() 25 | 26 | 27 | @app.post("/chat/completions") 28 | async def chat(req: ChatInput): 29 | id = str(uuid.uuid4()) 30 | invoker_pipeline: InvokerPipeline = await get_pipeline( 31 | model_path=settings.invoker_model_name_or_path, model_type=settings.invoker_model_type 32 | ) 33 | prompt = invoker_pipeline.format_message(messages=req.messages, functions=req.functions) 34 | created = int(time.time()) 35 | if not req.stream: 36 | choices = invoker_pipeline.generate( 37 | input_text=prompt, params={"temperature": req.temperature, "top_p": req.top_p} 38 | ) 39 | return ChatOutput(id=id, created=created, choices=choices) 40 | else: 41 | response_generator = invoker_pipeline.generate_stream( 42 | input_text=prompt, params={"temperature": req.temperature, "top_p": req.top_p} 43 | ) 44 | 45 | def get_streaming_response(): 46 | i = 0 47 | for chunk in response_generator: 48 | choices = [StreamChoice(**chunk)] 49 | i += 1 50 | yield "data: " + ChatStreamOutput(id=id, created=created, choices=choices).model_dump_json( 51 | exclude_unset=True 52 | ) + "\n\n" 53 | 54 | return StreamingResponse(content=get_streaming_response(), media_type="text/event-stream") 55 | 56 | 57 | @app.on_event("startup") 58 | async def startup(): 59 | _ = await get_pipeline(model_path=settings.invoker_model_name_or_path, model_type=settings.invoker_model_type) 60 | --------------------------------------------------------------------------------