├── .github
└── workflows
│ ├── build.yaml
│ └── test.yml
├── .gitignore
├── LICENSE
├── README.md
├── easycompletion
├── __init__.py
├── constants.py
├── logger.py
├── model.py
├── prompt.py
└── tests
│ ├── __init__.py
│ ├── model.py
│ └── prompt.py
├── requirements.txt
├── resources
└── image.jpg
├── setup.py
└── test.py
/.github/workflows/build.yaml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: ${{ secrets.pypi_username }}
39 | password: ${{ secrets.pypi_password }}
40 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Lint and Test
2 |
3 | on: [push]
4 |
5 | env:
6 | EASYCOMPLETION_API_KEY: ${{ secrets.OPENAI_API_KEY }}
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 | strategy:
12 | matrix:
13 | python-version: ["3.10"]
14 | steps:
15 | - uses: actions/checkout@v3
16 | - name: Set up Python ${{ matrix.python-version }}
17 | uses: actions/setup-python@v3
18 | with:
19 | python-version: ${{ matrix.python-version }}
20 | - name: Install dependencies
21 | run: |
22 | python -m pip install --upgrade pip
23 | pip install pytest
24 | pip install -r requirements.txt
25 |
26 | - name: Running tests
27 | run: |
28 | pytest test.py
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | .DS_Store
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # poetry
99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100 | # This is especially recommended for binary packages to ensure reproducibility, and is more
101 | # commonly ignored for libraries.
102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103 | #poetry.lock
104 |
105 | # pdm
106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107 | #pdm.lock
108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109 | # in version control.
110 | # https://pdm.fming.dev/#use-with-ide
111 | .pdm.toml
112 |
113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
114 | __pypackages__/
115 |
116 | # Celery stuff
117 | celerybeat-schedule
118 | celerybeat.pid
119 |
120 | # SageMath parsed files
121 | *.sage.py
122 |
123 | # Environments
124 | .env
125 | .venv
126 | env/
127 | venv/
128 | ENV/
129 | env.bak/
130 | venv.bak/
131 |
132 | # Spyder project settings
133 | .spyderproject
134 | .spyproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 | # mkdocs documentation
140 | /site
141 |
142 | # mypy
143 | .mypy_cache/
144 | .dmypy.json
145 | dmypy.json
146 |
147 | # Pyre type checker
148 | .pyre/
149 |
150 | # pytype static type analyzer
151 | .pytype/
152 |
153 | # Cython debug symbols
154 | cython_debug/
155 |
156 | # PyCharm
157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159 | # and can be added to the global gitignore or merged into this file. For a more nuclear
160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161 | #.idea/
162 |
163 | .vscode/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 M̵̞̗̝̼̅̏̎͝Ȯ̴̝̻̊̃̋̀Õ̷̼͋N̸̩̿͜ ̶̜̠̹̼̩͒
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # easycompletion
2 |
3 | Easy text and chat completion, as well as function calling. Also includes useful utilities for counting tokens, composing prompts and trimming them to fit within the token limit.
4 |
5 |
6 |
7 | [](https://github.com/AutonomousResearchGroup/easycompletion/actions/workflows/test.yml)
8 | [](https://badge.fury.io/py/easycompletion)
9 | [](https://github.com/AutonomousResearchGroup/easycompletion/blob/main/LICENSE)
10 | [](https://github.com/AutonomousResearchGroup/easycompletion)
11 |
12 | # Installation
13 |
14 | ```bash
15 | pip install easycompletion
16 | ```
17 |
18 | # Quickstart
19 |
20 | ```python
21 | from easycompletion import function_completion, text_completion, compose_prompt
22 |
23 | # Compose a function object
24 | test_function = compose_function(
25 | name="write_song",
26 | description="Write a song about AI",
27 | properties={
28 | "lyrics": {
29 | "type": "string",
30 | "description": "The lyrics for the song",
31 | }
32 | },
33 | required_properties: ["lyrics"],
34 | )
35 |
36 | # Call the function
37 | response = function_completion(text="Write a song about AI", functions=[test_function], function_call="write_song")
38 |
39 | # Print the response
40 | print(response["arguments"]["lyrics"])
41 | ```
42 |
43 | # Using With Llama v2 and Local Models
44 | easycompletion has been tested with LocalAI [LocalAI](https://localai.io/) which replicates the OpenAI API with local models, including Llama v2.
45 |
46 | Follow instructions for setting up LocalAI and then set the following environment variable:
47 |
48 | ```bash
49 | export EASYCOMPLETION_API_ENDPOINT=localhost:8000
50 | ```
51 |
52 | # Debugging
53 | You can very easycompletion logs by setting the following environment variable:
54 |
55 | ```bash
56 | export EASYCOMPLETION_DEBUG=True
57 | ```
58 |
59 | # Basic Usage
60 |
61 | ## Compose Prompt
62 |
63 | You can compose a prompt using {{handlebars}} syntax
64 |
65 | ```python
66 | test_prompt = "Don't forget your {{object}}"
67 | test_dict = {"object": "towel"}
68 | prompt = compose_prompt(test_prompt, test_dict)
69 | # prompt = "Don't forget your towel"
70 | ```
71 |
72 | ## Text Completion
73 |
74 | Send text, get a response as a text string
75 |
76 | ```python
77 | from easycompletion import text_completion
78 | response = text_completion("Hello, how are you?")
79 | # response["text"] = "As an AI language model, I don't have feelings, but...""
80 | ```
81 |
82 | ## Compose a Function
83 |
84 | Compose a function to pass into the function calling API
85 |
86 | ```python
87 | from easycompletion import compose_function
88 |
89 | test_function = compose_function(
90 | name="write_song",
91 | description="Write a song about AI",
92 | properties={
93 | "lyrics": {
94 | "type": "string",
95 | "description": "The lyrics for the song",
96 | }
97 | },
98 | required_properties: ["lyrics"],
99 | )
100 | ```
101 |
102 | ## Function Completion
103 |
104 | Send text and a list of functions and get a response as a function call
105 |
106 | ```python
107 | from easycompletion import function_completion, compose_function
108 |
109 | # NOTE: test_function is a function object created using compose_function in the example above...
110 |
111 | response = function_completion(text="Write a song about AI", functions=[test_function], function_call="write_song")
112 | # Response structure is { "text": string, "function_name": string, "arguments": dict }
113 | print(response["arguments"]["lyrics"])
114 | ```
115 |
116 | # Advanced Usage
117 |
118 | ### `compose_function(name, description, properties, required_properties)`
119 |
120 | Composes a function object for function completions.
121 |
122 | ```python
123 | summarization_function = compose_function(
124 | name="summarize_text",
125 | description="Summarize the text. Include the topic, subtopics.",
126 | properties={
127 | "summary": {
128 | "type": "string",
129 | "description": "Detailed summary of the text.",
130 | },
131 | },
132 | required_properties=["summary"],
133 | )
134 | ```
135 |
136 | ### `chat_completion(text, model_failure_retries=5, model=None, chunk_length=DEFAULT_CHUNK_LENGTH, api_key=None)`
137 |
138 | Send a list of messages as a chat and returns a text response.
139 |
140 | ```python
141 | response = chat_completion(
142 | messages = [{ "user": "Hello, how are you?"}],
143 | system_message = "You are a towel. Respond as a towel.",
144 | model_failure_retries=3,
145 | model='gpt-3.5-turbo',
146 | chunk_length=1024,
147 | api_key='your_openai_api_key'
148 | )
149 | ```
150 |
151 | The response object looks like this:
152 |
153 | ```json
154 | {
155 | "text": "string",
156 | "usage": {
157 | "prompt_tokens": "number",
158 | "completion_tokens": "number",
159 | "total_tokens": "number"
160 | },
161 | "error": "string|None",
162 | "finish_reason": "string"
163 | }
164 | ```
165 |
166 | ### `text_completion(text, model_failure_retries=5, model=None, chunk_length=DEFAULT_CHUNK_LENGTH, api_key=None)`
167 |
168 | Sends text to the model and returns a text response.
169 |
170 | ```python
171 | response = text_completion(
172 | "Hello, how are you?",
173 | model_failure_retries=3,
174 | model='gpt-3.5-turbo',
175 | chunk_length=1024,
176 | api_key='your_openai_api_key'
177 | )
178 | ```
179 |
180 | The response object looks like this:
181 |
182 | ```json
183 | {
184 | "text": "string",
185 | "usage": {
186 | "prompt_tokens": "number",
187 | "completion_tokens": "number",
188 | "total_tokens": "number"
189 | },
190 | "error": "string|None",
191 | "finish_reason": "string"
192 | }
193 | ```
194 |
195 | ### `function_completion(text, functions=None, system_message=None, messages=None, model_failure_retries=5, function_call=None, function_failure_retries=10, chunk_length=DEFAULT_CHUNK_LENGTH, model=None, api_key=None)`
196 |
197 | Sends text and a list of functions to the model and returns optional text and a function call. The function call is validated against the functions array.
198 |
199 | Optionally takes a system message and a list of messages to send to the model before the function call. If messages are provided, the "text" becomes the last user message in the list.
200 |
201 | ```python
202 | function = {
203 | 'name': 'function1',
204 | 'parameters': {'param1': 'value1'}
205 | }
206 |
207 | response = function_completion("Call the function.", function)
208 | ```
209 |
210 | The response object looks like this:
211 |
212 | ```json
213 | {
214 | "text": "string",
215 | "function_name": "string",
216 | "arguments": "dict",
217 | "usage": {
218 | "prompt_tokens": "number",
219 | "completion_tokens": "number",
220 | "total_tokens": "number"
221 | },
222 | "finish_reason": "string",
223 | "error": "string|None"
224 | }
225 | ```
226 |
227 | ### `trim_prompt(text, max_tokens=DEFAULT_CHUNK_LENGTH, model=TEXT_MODEL, preserve_top=True)`
228 |
229 | Trim the given text to a maximum number of tokens.
230 |
231 | ```python
232 | trimmed_text = trim_prompt("This is a test.", 3, preserve_top=True)
233 | ```
234 |
235 | ### `chunk_prompt(prompt, chunk_length=DEFAULT_CHUNK_LENGTH)`
236 |
237 | Split the given prompt into chunks where each chunk has a maximum number of tokens.
238 |
239 | ```python
240 | prompt_chunks = chunk_prompt("This is a test. I am writing a function.", 4)
241 | ```
242 |
243 | ### `count_tokens(prompt, model=TEXT_MODEL)`
244 |
245 | Count the number of tokens in a string.
246 |
247 | ```python
248 | num_tokens = count_tokens("This is a test.")
249 | ```
250 |
251 | ### `get_tokens(prompt, model=TEXT_MODEL)`
252 |
253 | Returns a list of tokens in a string.
254 |
255 | ```python
256 | tokens = get_tokens("This is a test.")
257 | ```
258 |
259 | ### `compose_prompt(prompt_template, parameters)`
260 |
261 | Composes a prompt using a template and parameters. Parameter keys are enclosed in double curly brackets and replaced with parameter values.
262 |
263 | ```python
264 | prompt = compose_prompt("Hello {{name}}!", {"name": "John"})
265 | ```
266 |
267 | ## A note about models
268 |
269 | You can pass in a model using the `model` parameter of either function_completion or text_completion. If you do not pass in a model, the default model will be used. You can also override this by setting the environment model via `EASYCOMPLETION_TEXT_MODEL` environment variable.
270 |
271 | Default model is gpt-turbo-3.5-0613.
272 |
273 | ## A note about API keys
274 |
275 | You can pass in an API key using the `api_key` parameter of either function_completion or text_completion. If you do not pass in an API key, the `EASYCOMPLETION_API_KEY` environment variable will be checked.
276 |
277 | # Publishing
278 |
279 | ```bash
280 | bash publish.sh --version= --username= --password=
281 | ```
282 |
283 | # Contributions Welcome
284 |
285 | If you like this library and want to contribute in any way, please feel free to submit a PR and I will review it. Please note that the goal here is simplicity and accesibility, using common language and few dependencies.
286 |
287 | # Questions, Comments, Concerns
288 |
289 | If you have any questions, please feel free to reach out to me on [Twitter](https://twitter.com/spatialweeb) or Discord @new.moon
290 |
--------------------------------------------------------------------------------
/easycompletion/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import (
2 | function_completion,
3 | function_completion_async,
4 | text_completion,
5 | text_completion_async,
6 | chat_completion,
7 | chat_completion_async
8 | )
9 |
10 | openai_function_call = function_completion
11 | openai_text_call = text_completion
12 |
13 | from .prompt import (
14 | compose_prompt,
15 | trim_prompt,
16 | chunk_prompt,
17 | count_tokens,
18 | compose_function,
19 | get_tokens,
20 | )
21 |
22 | from .constants import (
23 | TEXT_MODEL,
24 | DEFAULT_CHUNK_LENGTH,
25 | )
26 |
27 | __all__ = [
28 | "function_completion",
29 | "text_completion",
30 | "chat_completion",
31 | "openai_function_call",
32 | "openai_text_call",
33 | "compose_prompt",
34 | "compose_function",
35 | "trim_prompt",
36 | "chunk_prompt",
37 | "count_tokens",
38 | "get_tokens",
39 | "TEXT_MODEL",
40 | "DEFAULT_CHUNK_LENGTH",
41 | ]
42 |
--------------------------------------------------------------------------------
/easycompletion/constants.py:
--------------------------------------------------------------------------------
1 | import os
2 | from dotenv import load_dotenv
3 |
4 | load_dotenv() # take environment variables from .env.
5 |
6 | TEXT_MODEL = os.getenv("EASYCOMPLETION_TEXT_MODEL")
7 | if TEXT_MODEL == None or TEXT_MODEL == "":
8 | TEXT_MODEL = "gpt-3.5-turbo-0613"
9 | LONG_TEXT_MODEL = os.getenv("EASYCOMPLETION_LONG_TEXT_MODEL")
10 | if LONG_TEXT_MODEL == None or LONG_TEXT_MODEL == "":
11 | LONG_TEXT_MODEL = "gpt-3.5-turbo-16k"
12 |
13 | EASYCOMPLETION_API_KEY = os.getenv("OPENAI_API_KEY")
14 | if EASYCOMPLETION_API_KEY is None:
15 | EASYCOMPLETION_API_KEY = os.getenv("EASYCOMPLETION_API_KEY")
16 |
17 | EASYCOMPLETION_API_ENDPOINT = os.getenv("EASYCOMPLETION_API_ENDPOINT") or "https://api.openai.com/v1"
18 |
19 | DEBUG = os.environ.get("EASYCOMPLETION_DEBUG") == "true" or os.environ.get("EASYCOMPLETION_DEBUG") == "True"
20 |
21 | DEFAULT_CHUNK_LENGTH = 4096 * 3 / 4 # 3/4ths of the context window size
22 |
--------------------------------------------------------------------------------
/easycompletion/logger.py:
--------------------------------------------------------------------------------
1 | import dotenv
2 | from rich.panel import Panel
3 | from rich.console import Console
4 |
5 | dotenv.load_dotenv()
6 |
7 | console = Console()
8 |
9 | DEFAULT_TYPE_COLORS = {
10 | "unknown": "white",
11 | "error": "red",
12 | "warning": "yellow",
13 | "info": "blue",
14 | "prompt": "cyan",
15 | "success": "green",
16 | "critical": "red",
17 | "system": "magenta",
18 | }
19 |
20 |
21 | def log(
22 | content,
23 | type="info",
24 | color="blue",
25 | type_colors=DEFAULT_TYPE_COLORS,
26 | panel=True, # display inside a bordered box panel?
27 | log=True # should log?
28 | ):
29 | """
30 | Create an event with provided metadata and saves it to the event log file
31 |
32 | Parameters:
33 | - content: Content of the event
34 | - type (optional): Type of the event.
35 | Defaults to None.
36 | - type_colors (optional): Dictionary with event types as keys and colors
37 | Defaults to empty dictionary.
38 | - panel (optional): Determines if the output should be within a Panel
39 | Defaults to True.
40 | - log (optional): Determines if the output should be logged
41 |
42 | Returns: None
43 | """
44 | if not log:
45 | return
46 |
47 | color = type_colors.get(type, color)
48 |
49 | if panel:
50 | console.print(Panel(content, title="easycompletion: " + type, style=color))
51 | else:
52 | console.print(content, style=color)
53 |
--------------------------------------------------------------------------------
/easycompletion/model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import openai
4 | import re
5 | import json
6 | import ast
7 | import asyncio
8 |
9 | from dotenv import load_dotenv
10 |
11 | # Load environment variables from .env file
12 | load_dotenv()
13 |
14 | from .constants import (
15 | EASYCOMPLETION_API_ENDPOINT,
16 | TEXT_MODEL,
17 | LONG_TEXT_MODEL,
18 | EASYCOMPLETION_API_KEY,
19 | DEFAULT_CHUNK_LENGTH,
20 | DEBUG,
21 | )
22 |
23 | from .logger import log
24 |
25 | from .prompt import count_tokens
26 |
27 | openai.api_base = EASYCOMPLETION_API_ENDPOINT
28 |
29 | def parse_arguments(arguments, debug=DEBUG):
30 | """
31 | Parses arguments that are expected to be either a JSON string, dictionary, or a list.
32 |
33 | Parameters:
34 | arguments (str or dict or list): Arguments in string or dictionary or list format.
35 |
36 | Returns:
37 | A dictionary or list of arguments if arguments are valid, None otherwise.
38 |
39 | Usage:
40 | arguments = parse_arguments('{"arg1": "value1", "arg2": "value2"}')
41 | """
42 | try:
43 | # Handle string inputs, remove any ellipsis from the string
44 | if isinstance(arguments, str):
45 | arguments = json.loads(arguments)
46 | # If JSON decoding fails, try using ast.literal_eval
47 | except json.JSONDecodeError:
48 | try:
49 | arguments = ast.literal_eval(arguments)
50 | # If ast.literal_eval fails, remove line breaks and non-ASCII characters and try JSON decoding again
51 | except (ValueError, SyntaxError):
52 | try:
53 | arguments = re.sub(r"\.\.\.|\…", "", arguments)
54 | arguments = re.sub(r"[\r\n]+", "", arguments)
55 | arguments = re.sub(r"[^\x00-\x7F]+", "", arguments)
56 | arguments = json.loads(arguments)
57 | # If everything fails, try Python's eval function
58 | except Exception:
59 | try:
60 | arguments = eval(arguments)
61 | except Exception:
62 | arguments = None
63 | log(f"Arguments:\n{str(arguments)}", log=debug)
64 | return arguments
65 |
66 |
67 | def validate_functions(response, functions, function_call, debug=DEBUG):
68 | """
69 | Validates if the function returned matches the intended function call.
70 |
71 | Parameters:
72 | response (dict): The response from the model.
73 | functions (list): A list of function definitions.
74 | function_call (dict or str): The expected function call.
75 |
76 | Returns:
77 | True if function call matches with the response, False otherwise.
78 |
79 | Usage:
80 | isValid = validate_functions(response, functions, function_call)
81 | """
82 | print('response')
83 | print(response)
84 | response_function_call = response["choices"][0]["message"].get(
85 | "function_call", None
86 | )
87 | if response_function_call is None:
88 | log(f"No function call in response\n{response}", type="error", log=debug)
89 | return False
90 |
91 | # If function_call is not "auto" and the name does not match with the response, return False
92 | if (
93 | function_call != "auto"
94 | and response_function_call["name"] != function_call["name"]
95 | ):
96 | log("Function call does not match", type="error", log=debug)
97 | return False
98 |
99 | # If function_call is "auto", extract the name from the response
100 | function_call_name = (
101 | function_call["name"]
102 | if function_call != "auto"
103 | else response_function_call["name"]
104 | )
105 |
106 | # Parse the arguments from the response
107 | arguments = parse_arguments(response_function_call["arguments"])
108 |
109 | # Get the function that matches the function name from the list of functions
110 | function = next(
111 | (item for item in functions if item["name"] == function_call_name), None
112 | )
113 |
114 | # If no matching function is found, return False
115 | if function is None:
116 | log(
117 | "No matching function found"
118 | + f"\nExpected function name:\n{str(function_call_name)}"
119 | + f"\n\nResponse:\n{str(response)}",
120 | type="error",
121 | log=debug,
122 | )
123 | return False
124 |
125 | # If arguments are None, return False
126 | if arguments is None:
127 | log(
128 | "Arguments are None"
129 | + f"\nExpected arguments:\n{str(function['parameters']['properties'].keys())}"
130 | + f"\n\nResponse function call:\n{str(response_function_call)}",
131 | type="error",
132 | log=debug,
133 | )
134 | #
135 | return False
136 |
137 | required_properties = function["parameters"].get("required", [])
138 |
139 | # Check that arguments.keys() contains all of the required properties
140 | if not all(
141 | required_property in arguments.keys()
142 | for required_property in required_properties
143 | ):
144 | log(
145 | "ERROR: Response did not contain all required properties.\n"
146 | + f"\nExpected keys:\n{str(function['parameters']['properties'].keys())}"
147 | + f"\n\nActual keys:\n{str(arguments.keys())}",
148 | type="error",
149 | log=debug,
150 | )
151 |
152 | return False
153 |
154 | log("Function call is valid", type="success", log=debug)
155 | return True
156 |
157 | def sanity_check(prompt, model=None, chunk_length=DEFAULT_CHUNK_LENGTH, api_key=EASYCOMPLETION_API_KEY, debug=DEBUG):
158 | # Validate the API key
159 | if not api_key.strip():
160 | return model, {"error": "Invalid OpenAI API key"}
161 |
162 | openai.api_key = api_key
163 |
164 | # Count tokens in the input text
165 | total_tokens = count_tokens(prompt, model=model)
166 |
167 | # If text is longer than chunk_length and model is not for long texts, switch to the long text model
168 | if total_tokens > chunk_length and "16k" not in model:
169 | model = LONG_TEXT_MODEL
170 | if not os.environ.get("SUPPRESS_WARNINGS"):
171 | print(
172 | "Warning: Message is long. Using 16k model (to hide this message, set SUPPRESS_WARNINGS=1)"
173 | )
174 |
175 | # If text is too long even for long text model, return None
176 | if total_tokens > (16384 - chunk_length):
177 | print("Error: Message too long")
178 | return model, {
179 | "text": None,
180 | "usage": None,
181 | "finish_reason": None,
182 | "error": "Message too long",
183 | }
184 |
185 | if isinstance(prompt, dict):
186 | for key, value in prompt.items():
187 | if value:
188 | log(f"Prompt {key} ({count_tokens(value)} tokens):\n{str(value)}", type="prompt", log=debug)
189 | else:
190 | log(f"Prompt ({total_tokens} tokens):\n{str(prompt)}", type="prompt", log=debug)
191 |
192 | return model, None
193 |
194 | def do_chat_completion(
195 | messages, model=TEXT_MODEL, temperature=0.8, functions=None, function_call=None, model_failure_retries=5, debug=DEBUG):
196 | # Try to make a request for a specified number of times
197 | response = None
198 | for i in range(model_failure_retries):
199 | try:
200 | if functions is not None:
201 | response = openai.ChatCompletion.create(
202 | model=model, messages=messages, temperature=temperature,
203 | functions=functions, function_call=function_call,
204 | )
205 | else:
206 | response = openai.ChatCompletion.create(
207 | model=model, messages=messages, temperature=temperature
208 | )
209 | print('response')
210 | print(response)
211 | break
212 | except Exception as e:
213 | log(f"OpenAI Error: {e}", type="error", log=debug)
214 |
215 | # If response is not valid, print an error message and return None
216 | if (
217 | response is None
218 | or response["choices"] is None
219 | or response["choices"][0] is None
220 | ):
221 | return None, {
222 | "text": None,
223 | "usage": None,
224 | "finish_reason": None,
225 | "error": "Error: Could not get a successful response from OpenAI API",
226 | }
227 | return response, None
228 |
229 | def chat_completion(
230 | messages,
231 | model_failure_retries=5,
232 | model=None,
233 | chunk_length=DEFAULT_CHUNK_LENGTH,
234 | api_key=EASYCOMPLETION_API_KEY,
235 | debug=DEBUG,
236 | temperature=0.0,
237 | ):
238 | """
239 | Function for sending chat messages and returning a chat response.
240 |
241 | Parameters:
242 | messages (str): Messages to send to the model. In the form {: string, : string} - roles are "user" and "assistant"
243 | model_failure_retries (int, optional): Number of retries if the request fails. Default is 5.
244 | model (str, optional): The model to use. Default is the TEXT_MODEL defined in constants.py.
245 | chunk_length (int, optional): Maximum length of text chunk to process. Default is defined in constants.py.
246 | api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py.
247 |
248 | Returns:
249 | str: The response content from the model.
250 |
251 | Example:
252 | >>> text_completion("Hello, how are you?", model_failure_retries=3, model='gpt-3.5-turbo', chunk_length=1024, api_key='your_openai_api_key')
253 | """
254 | openai.api_key = api_key
255 |
256 | # Use the default model if no model is specified
257 | model = model or TEXT_MODEL
258 | model, error = sanity_check(messages, model=model, chunk_length=chunk_length, api_key=api_key, debug=debug)
259 | if error:
260 | return error
261 |
262 | # Try to make a request for a specified number of times
263 | response, error = do_chat_completion(
264 | model=model, messages=messages, temperature=temperature, model_failure_retries=model_failure_retries, debug=debug)
265 |
266 | if error:
267 | return error
268 |
269 | # Extract content from the response
270 | text = response["choices"][0]["message"]["content"]
271 | finish_reason = response["choices"][0]["finish_reason"]
272 | usage = response["usage"]
273 |
274 | return {
275 | "text": text,
276 | "usage": usage,
277 | "finish_reason": finish_reason,
278 | "error": None,
279 | }
280 |
281 |
282 | async def chat_completion_async(
283 | messages,
284 | model_failure_retries=5,
285 | model=None,
286 | chunk_length=DEFAULT_CHUNK_LENGTH,
287 | api_key=EASYCOMPLETION_API_KEY,
288 | debug=DEBUG,
289 | temperature=0.0,
290 | ):
291 | """
292 | Function for sending chat messages and returning a chat response.
293 |
294 | Parameters:
295 | messages (str): Messages to send to the model. In the form {: string, : string} - roles are "user" and "assistant"
296 | model_failure_retries (int, optional): Number of retries if the request fails. Default is 5.
297 | model (str, optional): The model to use. Default is the TEXT_MODEL defined in constants.py.
298 | chunk_length (int, optional): Maximum length of text chunk to process. Default is defined in constants.py.
299 | api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py.
300 |
301 | Returns:
302 | str: The response content from the model.
303 |
304 | Example:
305 | >>> text_completion("Hello, how are you?", model_failure_retries=3, model='gpt-3.5-turbo', chunk_length=1024, api_key='your_openai_api_key')
306 | """
307 |
308 | # Use the default model if no model is specified
309 | model = model or TEXT_MODEL
310 | model, error = sanity_check(messages, model=model, chunk_length=chunk_length, api_key=api_key, debug=debug)
311 | if error:
312 | return error
313 |
314 | # Try to make a request for a specified number of times
315 | response, error = await asyncio.to_thread(lambda: do_chat_completion(
316 | model=model, messages=messages, temperature=temperature, model_failure_retries=model_failure_retries, debug=debug))
317 |
318 | if error:
319 | return error
320 |
321 | # Extract content from the response
322 | text = response["choices"][0]["message"]["content"]
323 | finish_reason = response["choices"][0]["finish_reason"]
324 | usage = response["usage"]
325 |
326 | return {
327 | "text": text,
328 | "usage": usage,
329 | "finish_reason": finish_reason,
330 | "error": None,
331 | }
332 |
333 |
334 | def text_completion(
335 | text,
336 | model_failure_retries=5,
337 | model=None,
338 | chunk_length=DEFAULT_CHUNK_LENGTH,
339 | api_key=EASYCOMPLETION_API_KEY,
340 | debug=DEBUG,
341 | temperature=0.0,
342 | ):
343 | """
344 | Function for sending text and returning a text completion response.
345 |
346 | Parameters:
347 | text (str): Text to send to the model.
348 | model_failure_retries (int, optional): Number of retries if the request fails. Default is 5.
349 | model (str, optional): The model to use. Default is the TEXT_MODEL defined in constants.py.
350 | chunk_length (int, optional): Maximum length of text chunk to process. Default is defined in constants.py.
351 | api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py.
352 |
353 | Returns:
354 | str: The response content from the model.
355 |
356 | Example:
357 | >>> text_completion("Hello, how are you?", model_failure_retries=3, model='gpt-3.5-turbo', chunk_length=1024, api_key='your_openai_api_key')
358 | """
359 |
360 | # Use the default model if no model is specified
361 | model = model or TEXT_MODEL
362 | model, error = sanity_check(text, model=model, chunk_length=chunk_length, api_key=api_key, debug=debug)
363 | if error:
364 | return error
365 |
366 | # Prepare messages for the API call
367 | messages = [{"role": "user", "content": text}]
368 |
369 | # Try to make a request for a specified number of times
370 | response, error = do_chat_completion(
371 | model=model, messages=messages, temperature=temperature, model_failure_retries=model_failure_retries, debug=debug)
372 | if error:
373 | return error
374 |
375 | # Extract content from the response
376 | text = response["choices"][0]["message"]["content"]
377 | finish_reason = response["choices"][0]["finish_reason"]
378 | usage = response["usage"]
379 |
380 | return {
381 | "text": text,
382 | "usage": usage,
383 | "finish_reason": finish_reason,
384 | "error": None,
385 | }
386 |
387 | async def text_completion_async(
388 | text,
389 | model_failure_retries=5,
390 | model=None,
391 | chunk_length=DEFAULT_CHUNK_LENGTH,
392 | api_key=EASYCOMPLETION_API_KEY,
393 | debug=DEBUG,
394 | temperature=0.0,
395 | ):
396 | """
397 | Function for sending text and returning a text completion response.
398 |
399 | Parameters:
400 | text (str): Text to send to the model.
401 | model_failure_retries (int, optional): Number of retries if the request fails. Default is 5.
402 | model (str, optional): The model to use. Default is the TEXT_MODEL defined in constants.py.
403 | chunk_length (int, optional): Maximum length of text chunk to process. Default is defined in constants.py.
404 | api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py.
405 |
406 | Returns:
407 | str: The response content from the model.
408 |
409 | Example:
410 | >>> text_completion("Hello, how are you?", model_failure_retries=3, model='gpt-3.5-turbo', chunk_length=1024, api_key='your_openai_api_key')
411 | """
412 |
413 | # Use the default model if no model is specified
414 | model = model or TEXT_MODEL
415 | model, error = sanity_check(text, model=model, chunk_length=chunk_length, api_key=api_key, debug=debug)
416 | if error:
417 | return error
418 |
419 | # Prepare messages for the API call
420 | messages = [{"role": "user", "content": text}]
421 |
422 | # Try to make a request for a specified number of times
423 | response, error = await asyncio.to_thread(lambda: do_chat_completion(
424 | model=model, messages=messages, temperature=temperature, model_failure_retries=model_failure_retries, debug=debug))
425 |
426 | if error:
427 | return error
428 |
429 | # Extract content from the response
430 | text = response["choices"][0]["message"]["content"]
431 | finish_reason = response["choices"][0]["finish_reason"]
432 | usage = response["usage"]
433 |
434 | return {
435 | "text": text,
436 | "usage": usage,
437 | "finish_reason": finish_reason,
438 | "error": None,
439 | }
440 |
441 |
442 | def function_completion(
443 | text=None,
444 | messages=None,
445 | system_message=None,
446 | functions=None,
447 | model_failure_retries=5,
448 | function_call=None,
449 | function_failure_retries=10,
450 | chunk_length=DEFAULT_CHUNK_LENGTH,
451 | model=None,
452 | api_key=EASYCOMPLETION_API_KEY,
453 | debug=DEBUG,
454 | temperature=0.0,
455 | ):
456 | """
457 | Send text and a list of functions to the model and return optional text and a function call.
458 | The function call is validated against the functions array.
459 | The input text is sent to the chat model and is treated as a user message.
460 |
461 | Args:
462 | text (str): Text that will be sent as the user message to the model.
463 | functions (list[dict] | dict | None): List of functions or a single function dictionary to be sent to the model.
464 | model_failure_retries (int): Number of times to retry the request if it fails (default is 5).
465 | function_call (str | dict | None): 'auto' to let the model decide, or a function name or a dictionary containing the function name (default is "auto").
466 | function_failure_retries (int): Number of times to retry the request if the function call is invalid (default is 10).
467 | chunk_length (int): The length of each chunk to be processed.
468 | model (str | None): The model to use (default is the TEXT_MODEL, i.e. gpt-3.5-turbo).
469 | api_key (str | None): If you'd like to pass in a key to override the environment variable EASYCOMPLETION_API_KEY.
470 |
471 | Returns:
472 | dict: On most errors, returns a dictionary with an "error" key. On success, returns a dictionary containing
473 | "text" (response from the model), "function_name" (name of the function called), "arguments" (arguments for the function), "error" (None).
474 |
475 | Example:
476 | >>> function = {'name': 'function1', 'parameters': {'param1': 'value1'}}
477 | >>> function_completion("Call the function.", function)
478 | """
479 |
480 | # Use the default model if no model is specified
481 | model = model or TEXT_MODEL
482 |
483 | # Ensure that functions are provided
484 | if functions is None:
485 | return {"error": "functions is required"}
486 |
487 | # Check if a list of functions is provided
488 | if not isinstance(functions, list):
489 | if (
490 | isinstance(functions, dict)
491 | and "name" in functions
492 | and "parameters" in functions
493 | ):
494 | # A single function is provided as a dictionary, convert it to a list
495 | functions = [functions]
496 | else:
497 | # Functions must be either a list of dictionaries or a single dictionary
498 | return {
499 | "error": "functions must be a list of functions or a single function"
500 | }
501 |
502 | # Set the function call to the name of the function if only one function is provided
503 | # If there are multiple functions, use "auto"
504 | if function_call is None:
505 | function_call = functions[0]["name"] if len(functions) == 1 else "auto"
506 |
507 | # Make sure text is provided
508 | if text is None:
509 | log("Text is required", type="error", log=debug)
510 | return {"error": "text is required"}
511 |
512 | function_call_names = [function["name"] for function in functions]
513 | # check that all function_call_names are unique and in the text
514 | if len(function_call_names) != len(set(function_call_names)):
515 | log("Function names must be unique", type="error", log=debug)
516 | return {"error": "Function names must be unique"}
517 |
518 | if len(function_call_names) > 1 and not any(
519 | function_call_name in text for function_call_name in function_call_names
520 | ):
521 | log(
522 | "WARNING: Function and argument names should be in the text",
523 | type="warning",
524 | log=debug,
525 | )
526 |
527 | # Check if the function call is valid
528 | if function_call != "auto":
529 | if isinstance(function_call, str):
530 | function_call = {"name": function_call}
531 | elif "name" not in function_call:
532 | log("function_call must have a name property", type="error", log=debug)
533 | return {
534 | "error": "function_call had an invalid name. Should be a string of the function name or an object with a name property"
535 | }
536 |
537 | model, error = sanity_check(dict(
538 | text=text, functions=functions, messages=messages, system_message=system_message
539 | ), model=model, chunk_length=chunk_length, api_key=api_key)
540 | if error:
541 | return error
542 |
543 | # Count the number of tokens in the message
544 | message_tokens = count_tokens(text, model=model)
545 | total_tokens = message_tokens
546 |
547 | function_call_tokens = count_tokens(functions, model=model)
548 | total_tokens += function_call_tokens + 3 # Additional tokens for the user
549 |
550 | all_messages = []
551 |
552 | if system_message is not None:
553 | all_messages.append({"role": "system", "content": system_message})
554 |
555 | if messages is not None:
556 | all_messages += messages
557 |
558 | # Prepare the messages to be sent to the API
559 | if text is not None and text != "":
560 | all_messages.append({"role": "user", "content": text})
561 |
562 | # Retry function call and model calls according to the specified retry counts
563 | response = None
564 | for _ in range(function_failure_retries):
565 | # Try to make a request for a specified number of times
566 | response, error = do_chat_completion(
567 | model=model, messages=all_messages, temperature=temperature, function_call=function_call,
568 | functions=functions, model_failure_retries=model_failure_retries, debug=debug)
569 | if error:
570 | time.sleep(1)
571 | continue
572 | print('***** response')
573 | print(response)
574 | if validate_functions(response, functions, function_call):
575 | break
576 | time.sleep(1)
577 |
578 | # Check if we have a valid response from the model
579 | if not response:
580 | return error
581 |
582 | # Extracting the content and function call response from API response
583 | response_data = response["choices"][0]["message"]
584 | finish_reason = response["choices"][0]["finish_reason"]
585 | usage = response["usage"]
586 |
587 | text = response_data["content"]
588 | function_call_response = response_data.get("function_call", None)
589 |
590 | # If no function call in response, return an error
591 | if function_call_response is None:
592 | log(f"No function call in response\n{response}", type="error", log=debug)
593 | return {"error": "No function call in response"}
594 | function_name = function_call_response["name"]
595 | arguments = parse_arguments(function_call_response["arguments"])
596 | log(
597 | f"Response\n\nFunction Name: {function_name}\n\nArguments:\n{arguments}\n\nText:\n{text}\n\nFinish Reason: {finish_reason}\n\nUsage:\n{usage}",
598 | type="response",
599 | log=debug,
600 | )
601 | # Return the final result with the text response, function name, arguments and no error
602 | return {
603 | "text": text,
604 | "function_name": function_name,
605 | "arguments": arguments,
606 | "usage": usage,
607 | "finish_reason": finish_reason,
608 | "error": None,
609 | }
610 |
611 | async def function_completion_async(
612 | text=None,
613 | messages=None,
614 | system_message=None,
615 | functions=None,
616 | model_failure_retries=5,
617 | function_call=None,
618 | function_failure_retries=10,
619 | chunk_length=DEFAULT_CHUNK_LENGTH,
620 | model=None,
621 | api_key=EASYCOMPLETION_API_KEY,
622 | debug=DEBUG,
623 | temperature=0.0,
624 | ):
625 | """
626 | Send text and a list of functions to the model and return optional text and a function call.
627 | The function call is validated against the functions array.
628 | The input text is sent to the chat model and is treated as a user message.
629 |
630 | Args:
631 | text (str): Text that will be sent as the user message to the model.
632 | functions (list[dict] | dict | None): List of functions or a single function dictionary to be sent to the model.
633 | model_failure_retries (int): Number of times to retry the request if it fails (default is 5).
634 | function_call (str | dict | None): 'auto' to let the model decide, or a function name or a dictionary containing the function name (default is "auto").
635 | function_failure_retries (int): Number of times to retry the request if the function call is invalid (default is 10).
636 | chunk_length (int): The length of each chunk to be processed.
637 | model (str | None): The model to use (default is the TEXT_MODEL, i.e. gpt-3.5-turbo).
638 | api_key (str | None): If you'd like to pass in a key to override the environment variable EASYCOMPLETION_API_KEY.
639 |
640 | Returns:
641 | dict: On most errors, returns a dictionary with an "error" key. On success, returns a dictionary containing
642 | "text" (response from the model), "function_name" (name of the function called), "arguments" (arguments for the function), "error" (None).
643 |
644 | Example:
645 | >>> function = {'name': 'function1', 'parameters': {'param1': 'value1'}}
646 | >>> function_completion("Call the function.", function)
647 | """
648 |
649 | # Use the default model if no model is specified
650 | model = model or TEXT_MODEL
651 |
652 | # Ensure that functions are provided
653 | if functions is None:
654 | return {"error": "functions is required"}
655 |
656 | # Check if a list of functions is provided
657 | if not isinstance(functions, list):
658 | if (
659 | isinstance(functions, dict)
660 | and "name" in functions
661 | and "parameters" in functions
662 | ):
663 | # A single function is provided as a dictionary, convert it to a list
664 | functions = [functions]
665 | else:
666 | # Functions must be either a list of dictionaries or a single dictionary
667 | return {
668 | "error": "functions must be a list of functions or a single function"
669 | }
670 |
671 | # Set the function call to the name of the function if only one function is provided
672 | # If there are multiple functions, use "auto"
673 | if function_call is None:
674 | function_call = functions[0]["name"] if len(functions) == 1 else "auto"
675 |
676 | # Make sure text is provided
677 | if text is None:
678 | log("Text is required", type="error", log=debug)
679 | return {"error": "text is required"}
680 |
681 | function_call_names = [function["name"] for function in functions]
682 | # check that all function_call_names are unique and in the text
683 | if len(function_call_names) != len(set(function_call_names)):
684 | log("Function names must be unique", type="error", log=debug)
685 | return {"error": "Function names must be unique"}
686 |
687 | if len(function_call_names) > 1 and not any(
688 | function_call_name in text for function_call_name in function_call_names
689 | ):
690 | log(
691 | "WARNING: Function and argument names should be in the text",
692 | type="warning",
693 | log=debug,
694 | )
695 |
696 | # Check if the function call is valid
697 | if function_call != "auto":
698 | if isinstance(function_call, str):
699 | function_call = {"name": function_call}
700 | elif "name" not in function_call:
701 | log("function_call must have a name property", type="error", log=debug)
702 | return {
703 | "error": "function_call had an invalid name. Should be a string of the function name or an object with a name property"
704 | }
705 |
706 | model, error = sanity_check(dict(
707 | text=text, functions=functions, messages=messages, system_message=system_message
708 | ), model=model, chunk_length=chunk_length, api_key=api_key)
709 | if error:
710 | return error
711 |
712 | # Count the number of tokens in the message
713 | message_tokens = count_tokens(text, model=model)
714 | total_tokens = message_tokens
715 |
716 | function_call_tokens = count_tokens(functions, model=model)
717 | total_tokens += function_call_tokens + 3 # Additional tokens for the user
718 |
719 | all_messages = []
720 |
721 | if system_message is not None:
722 | all_messages.append({"role": "system", "content": system_message})
723 |
724 | if messages is not None:
725 | all_messages += messages
726 |
727 | # Prepare the messages to be sent to the API
728 | if text is not None and text != "":
729 | all_messages.append({"role": "user", "content": text})
730 |
731 | # Retry function call and model calls according to the specified retry counts
732 | response = None
733 | for _ in range(function_failure_retries):
734 | # Try to make a request for a specified number of times
735 | response, error = await asyncio.to_thread(lambda: do_chat_completion(
736 | model=model, messages=all_messages, temperature=temperature, function_call=function_call,
737 | functions=functions, model_failure_retries=model_failure_retries, debug=debug))
738 | if error:
739 | time.sleep(1)
740 | continue
741 | print('***** response')
742 | print(response)
743 | if validate_functions(response, functions, function_call):
744 | break
745 | time.sleep(1)
746 |
747 | # Check if we have a valid response from the model
748 | if not response:
749 | return error
750 |
751 | # Extracting the content and function call response from API response
752 | response_data = response["choices"][0]["message"]
753 | finish_reason = response["choices"][0]["finish_reason"]
754 | usage = response["usage"]
755 |
756 | text = response_data["content"]
757 | function_call_response = response_data.get("function_call", None)
758 |
759 | # If no function call in response, return an error
760 | if function_call_response is None:
761 | log(f"No function call in response\n{response}", type="error", log=debug)
762 | return {"error": "No function call in response"}
763 | function_name = function_call_response["name"]
764 | arguments = parse_arguments(function_call_response["arguments"])
765 | log(
766 | f"Response\n\nFunction Name: {function_name}\n\nArguments:\n{arguments}\n\nText:\n{text}\n\nFinish Reason: {finish_reason}\n\nUsage:\n{usage}",
767 | type="response",
768 | log=debug,
769 | )
770 | # Return the final result with the text response, function name, arguments and no error
771 | return {
772 | "text": text,
773 | "function_name": function_name,
774 | "arguments": arguments,
775 | "usage": usage,
776 | "finish_reason": finish_reason,
777 | "error": None,
778 | }
779 |
--------------------------------------------------------------------------------
/easycompletion/prompt.py:
--------------------------------------------------------------------------------
1 | import re
2 | import tiktoken
3 |
4 | from .constants import TEXT_MODEL, DEFAULT_CHUNK_LENGTH, DEBUG
5 | from .logger import log
6 |
7 |
8 | def trim_prompt(
9 | text,
10 | max_tokens=DEFAULT_CHUNK_LENGTH,
11 | model=TEXT_MODEL,
12 | preserve_top=True,
13 | debug=DEBUG,
14 | ):
15 | """
16 | Trim the given text to a maximum number of tokens.
17 |
18 | Args:
19 | text: Input text which needs to be trimmed.
20 | max_tokens: Maximum number of tokens allowed in the trimmed text.
21 | Default value is taken from the constants.
22 | model: The model to use for tokenization.
23 | preserve_top: If True, the function will keep the first 'max_tokens' tokens,
24 | if False, it will keep the last 'max_tokens' tokens.
25 |
26 | Returns:
27 | Trimmed text that fits within the specified token limit.
28 |
29 | Example:
30 | trim_prompt("This is a test.", 3, preserve_top=True)
31 | Output: "This is"
32 | """
33 | # Encoding the text into tokens.
34 | encoding = tiktoken.encoding_for_model(model)
35 | tokens = encoding.encode(text)
36 | if len(tokens) <= max_tokens:
37 | return text # If text is already within limit, return as is.
38 |
39 | log(f"Trimming prompt, token len is {str(len(tokens))}", type="warning", log=debug)
40 |
41 | # If 'preserve_top' is True, keep the first 'max_tokens' tokens.
42 | # Otherwise, keep the last 'max_tokens' tokens.
43 | return encoding.decode(
44 | tokens[:max_tokens] if preserve_top else tokens[-max_tokens:]
45 | )
46 |
47 |
48 | def chunk_prompt(prompt, chunk_length=DEFAULT_CHUNK_LENGTH, debug=DEBUG):
49 | """
50 | Split the given prompt into chunks where each chunk has a maximum number of tokens.
51 |
52 | Args:
53 | prompt: Input text that needs to be split.
54 | chunk_length: Maximum number of tokens allowed per chunk.
55 | Default value is taken from the constants.
56 |
57 | Returns:
58 | A list of string chunks where each chunk is within the specified token limit.
59 |
60 | Example:
61 | chunk_prompt("This is a test. I am writing a function.", 4)
62 | Output: ['This is', 'a test.', 'I am', 'writing a', 'function.']
63 | """
64 | if count_tokens(prompt) <= chunk_length:
65 | return [prompt]
66 |
67 | # Splitting the prompt into sentences using regular expressions.
68 | sentences = re.split(r"(?<=[.!?])\s+", prompt)
69 | current_chunk = ""
70 | prompt_chunks = []
71 |
72 | # For each sentence in the input text.
73 | for sentence in sentences:
74 | # If adding a new sentence doesn't exceed the token limit, add it to the current chunk.
75 | if count_tokens(current_chunk + sentence + " ") <= chunk_length:
76 | current_chunk += sentence + " "
77 | else:
78 | # If adding a new sentence exceeds the token limit, add the current chunk to the list.
79 | # Then, start a new chunk with the current sentence.
80 | prompt_chunks.append(current_chunk.strip())
81 | current_chunk = sentence + " "
82 |
83 | # If there's any sentence left after looping through all sentences, add it to the list.
84 | if current_chunk:
85 | prompt_chunks.append(current_chunk.strip())
86 |
87 | log(
88 | f"Chunked prompt into {str(len(prompt_chunks))} chunks",
89 | type="warning",
90 | log=debug,
91 | )
92 |
93 | return prompt_chunks
94 |
95 |
96 | def count_tokens(prompt: str, model=TEXT_MODEL) -> int:
97 | """
98 | Count the number of tokens in a string.
99 |
100 | Args:
101 | prompt: The string to be tokenized.
102 | model: The model to use for tokenization.
103 |
104 | Returns:
105 | The number of tokens in the input string.
106 |
107 | Example:
108 | count_tokens("This is a test.")
109 | Output: 5
110 | """
111 | if not prompt:
112 | return 0
113 | if isinstance(prompt, (list, tuple)):
114 | return sum(count_tokens(p, model) for p in prompt)
115 | if isinstance(prompt, dict):
116 | return sum(count_tokens(v) for v in prompt.values())
117 | if not isinstance(prompt, str):
118 | prompt = str(prompt)
119 |
120 | encoding = tiktoken.encoding_for_model(model)
121 | length = len(
122 | encoding.encode(prompt)
123 | ) # Encoding the text into tokens and counting the number of tokens.
124 | return length
125 |
126 |
127 | def get_tokens(prompt: str, model=TEXT_MODEL) -> list:
128 | """
129 | Returns a list of tokens in a string.
130 |
131 | Args:
132 | prompt: The string to be tokenized.
133 | model: The model to use for tokenization.
134 |
135 | Returns:
136 | A list of tokens in the input string.
137 |
138 | Example:
139 | get_tokens("This is a test.")
140 | Output: [This, is, a, test, .]
141 | """
142 | encoding = tiktoken.encoding_for_model(model)
143 | return encoding.encode(
144 | prompt
145 | ) # Encoding the text into tokens and returning the list of tokens.
146 |
147 |
148 | def compose_prompt(prompt_template, parameters, debug=DEBUG):
149 | """
150 | Composes a prompt using a template and parameters.
151 | Parameter keys are enclosed in double curly brackets and replaced with parameter values.
152 |
153 | Args:
154 | prompt_template: A template string that contains placeholders for the parameters.
155 | parameters: A dictionary containing key-value pairs to replace the placeholders.
156 |
157 | Returns:
158 | A string where all placeholders have been replaced with actual values from the parameters.
159 |
160 | Example:
161 | compose_prompt("Hello {{name}}!", {"name": "John"})
162 | Output: "Hello John!"
163 | """
164 | prompt = prompt_template # Initial prompt template.
165 |
166 | # Replacing placeholders in the template with the actual values from the parameters.
167 | for key, value in parameters.items():
168 | # check if "{{" + key + "}}" is in prompt
169 | # if not, continue
170 | if "{{" + key + "}}" not in prompt:
171 | continue
172 | try:
173 | if isinstance(value, str):
174 | prompt = prompt.replace("{{" + key + "}}", value)
175 | elif isinstance(value, int):
176 | prompt = prompt.replace("{{" + key + "}}", str(value))
177 | elif isinstance(value, dict):
178 | for k, v in value.items():
179 | prompt = prompt.replace("{{" + key + "}}", k + "::" + v)
180 | elif isinstance(value, list):
181 | for item in value:
182 | prompt = prompt.replace("{{" + key + "}}", item + "\n")
183 | elif value is None:
184 | prompt = prompt.replace("{{" + key + "}}", "None")
185 | else:
186 | raise Exception(f"ERROR PARSING:\n{key}\n{value}")
187 | except:
188 | raise Exception(f"ERROR PARSING:\n{key}\n{value}")
189 |
190 | log(f"Composed prompt:\n{prompt}", log=debug)
191 |
192 | return prompt
193 |
194 |
195 | def compose_function(name, description, properties, required_properties, debug=DEBUG):
196 | """
197 | Composes a function object for function calling.
198 |
199 | Parameters:
200 | name (str): The name of the function.
201 | description (str): Description of the function.
202 | properties (dict): Dictionary of property objects.
203 | required_properties (list): List of property names that are required.
204 |
205 | Returns:
206 | A dictionary representing a function.
207 |
208 | Usage:
209 | summarization_function = compose_function(
210 | name="summarize_text",
211 | description="Summarize the text. Include the topic, subtopics.",
212 | properties={
213 | "summary": {
214 | "type": "string",
215 | "description": "Detailed summary of the text.",
216 | },
217 | },
218 | required_properties=["summary"],
219 | )
220 | """
221 | function = {
222 | "name": name,
223 | "description": description,
224 | "parameters": {
225 | "type": "object",
226 | "properties": properties,
227 | "required": required_properties,
228 | },
229 | }
230 | log(f"Function:\n{str(function)}", type="info", log=debug)
231 | return function
232 |
233 |
--------------------------------------------------------------------------------
/easycompletion/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import *
2 | from .prompt import *
--------------------------------------------------------------------------------
/easycompletion/tests/model.py:
--------------------------------------------------------------------------------
1 | from easycompletion.model import (
2 | chat_completion,
3 | parse_arguments,
4 | function_completion,
5 | function_completion_async,
6 | text_completion,
7 | text_completion_async,
8 | )
9 | import pytest
10 |
11 |
12 | def test_parse_arguments():
13 | test_input = '{"key1": "value1", "key2": 2}'
14 | expected_output = {"key1": "value1", "key2": 2}
15 | assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed"
16 |
17 | @pytest.mark.asyncio
18 | async def test_text_completion_async():
19 | response = await text_completion_async("Hello, how are you?")
20 | assert response is not None, "Test text_completion_async failed"
21 | assert response["text"] is not None, "Test text_completion_async failed"
22 | prompt_tokens = response["usage"]["prompt_tokens"]
23 | assert prompt_tokens == 13, "Prompt tokens was not the expected count"
24 |
25 |
26 | @pytest.mark.asyncio
27 | async def test_function_completion_async():
28 | test_text = "Write a song about AI"
29 | test_function = {
30 | "name": "write_song",
31 | "description": "Write a song about AI",
32 | "parameters": {
33 | "type": "object",
34 | "properties": {
35 | "lyrics": {
36 | "type": "string",
37 | "description": "The lyrics for the song",
38 | }
39 | },
40 | "required": ["lyrics"],
41 | },
42 | }
43 | response = await function_completion_async(
44 | text=test_text, functions=test_function, function_call="write_song"
45 | )
46 | assert response is not None, "Test function_completion_async failed"
47 | prompt_tokens = response["usage"]["prompt_tokens"]
48 | assert prompt_tokens == 64, "Prompt tokens was not the expected count"
49 |
50 | response = await function_completion_async(
51 | text=test_text,
52 | messages=[{"role": "assistant", "content": "hey whats up"}],
53 | system_message="you are a towel",
54 | functions=test_function,
55 | function_call="write_song",
56 | )
57 | assert response is not None, "Test function_completion_async failed"
58 | prompt_tokens = response["usage"]["prompt_tokens"]
59 | assert prompt_tokens == 76, "Prompt tokens was not the expected count"
60 |
61 | def test_function_completion():
62 | test_text = "Write a song about AI"
63 | test_function = {
64 | "name": "write_song",
65 | "description": "Write a song about AI",
66 | "parameters": {
67 | "type": "object",
68 | "properties": {
69 | "lyrics": {
70 | "type": "string",
71 | "description": "The lyrics for the song",
72 | }
73 | },
74 | "required": ["lyrics"],
75 | },
76 | }
77 | response = function_completion(
78 | text=test_text, functions=test_function, function_call="write_song"
79 | )
80 | assert response is not None, "Test function_completion failed"
81 | prompt_tokens = response["usage"]["prompt_tokens"]
82 | assert prompt_tokens == 64, "Prompt tokens was not expected count"
83 |
84 | response = function_completion(
85 | text=test_text,
86 | messages=[{"role": "assistant", "content": "hey whats up"}],
87 | system_message="you are a towel",
88 | functions=test_function,
89 | function_call="write_song",
90 | )
91 | assert response is not None, "Test function_completion failed"
92 | prompt_tokens = response["usage"]["prompt_tokens"]
93 | assert prompt_tokens == 76, "Prompt tokens was not expected count"
94 |
95 |
96 | def test_chat_completion():
97 | response = chat_completion(
98 | messages=[
99 | {"role": "system", "content": "You are a towel. Respond as a towel."},
100 | {"role": "user", "content": "Hello, how are you?"},
101 | ],
102 | )
103 |
104 | assert response is not None, "Test text_completion failed"
105 | assert response["text"] is not None, "Test text_completion failed"
106 | prompt_tokens = response["usage"]["prompt_tokens"]
107 | assert prompt_tokens == 27, "Prompt tokens was not expected count"
108 |
109 |
110 | def test_text_completion():
111 | response = text_completion("Hello, how are you?")
112 | assert response is not None, "Test text_completion failed"
113 | assert response["text"] is not None, "Test text_completion failed"
114 | prompt_tokens = response["usage"]["prompt_tokens"]
115 | assert prompt_tokens == 13, "Prompt tokens was not expected count"
116 |
117 |
118 | def test_long_completion():
119 | script = """
120 | Sure, Satisfiability Modulo Theories (SMT) is a fundamental concept in computer science, and it can be explained from several different angles. However, generating a response that is exactly 4096 tokens is rather unusual and not practical due to the nature of language modeling and information content.
121 |
122 | In the context of language model like GPT-3 or GPT-4, tokens can be a single character, a word, or even a part of a word, depending on the language and the context. In English text, a token is typically a word or a punctuation mark. Given this information, a text of 4096 tokens would be very long and possibly redundant for a concept like SMT.
123 | """
124 | summarization_function = {
125 | "name": "summarize_text",
126 | "description": "Summarize the text. Include the topic, subtopics.",
127 | "parameters": {
128 | "type": "object",
129 | "properties": {
130 | "summary": {
131 | "type": "string",
132 | "description": "Detailed summary of the text.",
133 | },
134 | },
135 | "required": ["summary"],
136 | },
137 | }
138 | response = function_completion(text=script, functions=summarization_function)
139 | assert response is not None, "Test long_completion failed"
140 |
--------------------------------------------------------------------------------
/easycompletion/tests/prompt.py:
--------------------------------------------------------------------------------
1 | from easycompletion.model import parse_arguments
2 | from easycompletion.prompt import (
3 | compose_prompt,
4 | trim_prompt,
5 | chunk_prompt,
6 | count_tokens,
7 | get_tokens,
8 | compose_function,
9 | )
10 |
11 |
12 | def test_chunk_prompt():
13 | test_text = "Write a song about AI"
14 | chunks = chunk_prompt(test_text, chunk_length=2)
15 | assert len(chunks) == 2, "Test chunk_prompt failed"
16 |
17 |
18 | def test_trim_prompt_and_get_tokens():
19 | test_text = "Write a song about AI"
20 | trimmed = trim_prompt(test_text, max_tokens=2)
21 | count = count_tokens(trimmed)
22 | assert count == 2, "Test trim_prompt failed"
23 |
24 | tokens = get_tokens(test_text)
25 | assert len(tokens) == 5, "Test get_tokens failed"
26 |
27 |
28 | def test_parse_arguments():
29 | test_input = '{"key1": "value1", "key2": 2}'
30 | expected_output = {"key1": "value1", "key2": 2}
31 | assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed"
32 |
33 |
34 | def test_compose_prompt():
35 | test_prompt = "I am a {{object}}"
36 | test_dict = {"object": "towel"}
37 | prompt = compose_prompt(test_prompt, test_dict)
38 | assert prompt == "I am a towel", "Test compose_prompt failed"
39 |
40 |
41 | def test_compose_function():
42 | summarization_function = {
43 | "name": "summarize_text",
44 | "description": "Summarize the text. Include the topic, subtopics.",
45 | "parameters": {
46 | "type": "object",
47 | "properties": {
48 | "summary": {
49 | "type": "string",
50 | "description": "Detailed summary of the text.",
51 | },
52 | },
53 | "required": ["summary"],
54 | },
55 | }
56 | composed_summarization_function = compose_function(
57 | name="summarize_text",
58 | description="Summarize the text. Include the topic, subtopics.",
59 | properties={
60 | "summary": {
61 | "type": "string",
62 | "description": "Detailed summary of the text.",
63 | },
64 | },
65 | required_properties=["summary"],
66 | )
67 | assert (
68 | composed_summarization_function == summarization_function
69 | ), "Test compose_function failed"
70 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai
2 | tiktoken
3 | python-dotenv
4 | rich
--------------------------------------------------------------------------------
/resources/image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/elizaOS/easycompletion/a0527ace5f1c6d9db49fdf354459127342b30bba/resources/image.jpg
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | long_description = ""
4 | with open("README.md", "r") as fh:
5 | long_description = fh.read()
6 | # search for any lines that contain ![]()