├── .gitignore ├── LICENSE ├── README.md ├── funcgpt ├── __init__.py ├── credentials.py ├── data │ └── cl100k_base.tiktoken.bz2 ├── decorators.py ├── gpt.py ├── message.py ├── protocols.py ├── tokentools.py └── wrapper.py └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/**/workspace.xml 2 | .idea/**/tasks.xml 3 | .idea/**/usage.statistics.xml 4 | .idea/**/dictionaries 5 | .idea/**/shelf 6 | .idea/**/aws.xml 7 | .idea/**/contentModel.xml 8 | .idea/**/dataSources/ 9 | .idea/**/dataSources.ids 10 | .idea/**/dataSources.local.xml 11 | .idea/**/sqlDataSources.xml 12 | .idea/**/dynamic.xml 13 | .idea/**/uiDesigner.xml 14 | .idea/**/dbnavigator.xml 15 | .idea/**/gradle.xml 16 | .idea/**/libraries 17 | cmake-build-*/ 18 | .idea/**/mongoSettings.xml 19 | *.iws 20 | out/ 21 | .idea_modules/ 22 | atlassian-ide-plugin.xml 23 | .idea/replstate.xml 24 | .idea/sonarlint/ 25 | com_crashlytics_export_strings.xml 26 | crashlytics.properties 27 | crashlytics-build.properties 28 | fabric.properties 29 | .idea/httpRequests 30 | .idea/caches/build_file_checksums.ser 31 | __pycache__/ 32 | *.py[cod] 33 | *$py.class 34 | *.so 35 | .Python 36 | build/ 37 | develop-eggs/ 38 | dist/ 39 | downloads/ 40 | eggs/ 41 | .eggs/ 42 | lib/ 43 | lib64/ 44 | parts/ 45 | sdist/ 46 | var/ 47 | wheels/ 48 | share/python-wheels/ 49 | *.egg-info/ 50 | .installed.cfg 51 | *.egg 52 | MANIFEST 53 | *.manifest 54 | *.spec 55 | pip-log.txt 56 | pip-delete-this-directory.txt 57 | htmlcov/ 58 | .tox/ 59 | .nox/ 60 | .coverage 61 | .coverage.* 62 | .cache 63 | nosetests.xml 64 | coverage.xml 65 | *.cover 66 | *.py,cover 67 | .hypothesis/ 68 | .pytest_cache/ 69 | cover/ 70 | *.mo 71 | *.pot 72 | *.log 73 | local_settings.py 74 | db.sqlite3 75 | db.sqlite3-journal 76 | instance/ 77 | .webassets-cache 78 | .scrapy 79 | docs/_build/ 80 | .pybuilder/ 81 | target/ 82 | .ipynb_checkpoints 83 | profile_default/ 84 | ipython_config.py 85 | .pdm.toml 86 | __pypackages__/ 87 | celerybeat-schedule 88 | celerybeat.pid 89 | *.sage.py 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | .spyderproject 98 | .spyproject 99 | .ropeproject 100 | /site 101 | .mypy_cache/ 102 | .dmypy.json 103 | dmypy.json 104 | .pyre/ 105 | .pytype/ 106 | cython_debug/ 107 | .idea/ 108 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | ===================== 3 | 4 | Copyright © 2023 Leandro Lima 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # funcgpt: Python library for creating functions with OpenAI's GPT 2 | 3 | funcgpt is an easy-to-use Python library that allows you to quickly create Python functions using the power of OpenAI's GPT models. With just a few lines of code, you can create functions that generate human-like responses, answer questions, or anything else that GPT is capable of. 4 | 5 | ## Features 6 | 7 | - Easy to use decorator for creating functions based on GPT models 8 | - Supports different GPT model versions 9 | - Customize GPT's behavior with adjustable temperature values 10 | - Generate responses in streaming or non-streaming modes 11 | 12 | ## Installation 13 | 14 | To install funcgpt, use pip: 15 | 16 | ```bash 17 | pip install funcgpt 18 | ``` 19 | 20 | ## Usage 21 | 22 | To create a function that answers questions like a pirate, you can use the following snippet: 23 | 24 | ```python 25 | from funcgpt import gpt 26 | 27 | @gpt 28 | def answer_like_pirate(message: str) -> str: 29 | """Answer questions like a pirate.""" 30 | ... 31 | 32 | ``` 33 | 34 | Usage: 35 | 36 | ```python 37 | >>> answer_like_pirate("How are you doing today?") 38 | "Arrr, I be doin' fine, matey." 39 | ``` 40 | 41 | To do the same thing, but with a function that streams responses, you can use the following snippet: 42 | 43 | ```python 44 | from typing import Iterator 45 | from funcgpt import gpt 46 | 47 | @gpt 48 | def stream_like_pirate(message: str) -> Iterator[str]: 49 | """Answers questions like a pirate.""" 50 | ... 51 | 52 | ``` 53 | 54 | Usage: 55 | 56 | ```python 57 | >>> for token in stream_like_pirate("How are you doing today?"): 58 | ... print(token, end="", flush=True) 59 | ... 60 | Arrr, I be doin' fine, matey. 61 | ``` 62 | 63 | For defining a function that returns a boolean value, you can use the following snippet: 64 | 65 | ```python 66 | from funcgpt import gpt 67 | 68 | @gpt 69 | def is_pirate(message: str) -> bool: 70 | """Returns true if the message is from a pirate.""" 71 | ... 72 | 73 | ``` 74 | 75 | Usage: 76 | 77 | ```python 78 | >>> is_pirate("Arrr, I be doin' fine, matey.") 79 | True 80 | ``` 81 | 82 | For choosing a different model or temperature, you can use the `model` and `temperature` keyword arguments: 83 | 84 | ```python 85 | from funcgpt import gpt 86 | 87 | @gpt(model="gpt-4", temperature=0) 88 | def answer_like_pirate(message: str) -> str: 89 | """Answer questions like a pirate.""" 90 | ... 91 | 92 | ``` 93 | 94 | ## Contributing 95 | 96 | We welcome contributions! Please feel free to fork the repository, make changes, and submit pull requests. If you have any questions or ideas, don't hesitate to open an issue. 97 | 98 | ## License 99 | 100 | funcgpt is released under the MIT License. See the [LICENSE](LICENSE) file for more details. 101 | -------------------------------------------------------------------------------- /funcgpt/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from .decorators import gpt 4 | 5 | __all__ = ["gpt"] 6 | -------------------------------------------------------------------------------- /funcgpt/credentials.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from os import getenv 4 | 5 | if (OPENAI_API_KEY := getenv("OPENAI_API_KEY")) is None: 6 | raise RuntimeError("OPENAI_API_KEY environment variable not set") 7 | 8 | OPENAI_ORG_ID = getenv("OPENAI_ORG_ID") 9 | -------------------------------------------------------------------------------- /funcgpt/data/cl100k_base.tiktoken.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leandropls/funcgpt/f67d0c5b41a08171d608862319d701888f774931/funcgpt/data/cl100k_base.tiktoken.bz2 -------------------------------------------------------------------------------- /funcgpt/decorators.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Literal, TypeVar, overload 2 | 3 | from funcgpt.protocols import GPTAnswerProtocol, GPTStreamProtocol 4 | from funcgpt.wrapper import create_generic_wrapper 5 | 6 | T = TypeVar("T", bound=GPTAnswerProtocol | GPTStreamProtocol) 7 | 8 | __all__ = ["gpt"] 9 | 10 | 11 | @overload 12 | def gpt(f: T) -> T: 13 | ... 14 | 15 | 16 | @overload 17 | def gpt( 18 | model: Literal["gpt-3.5-turbo", "gpt-4"] = "gpt-3.5-turbo", 19 | temperature: int = 0, 20 | max_tokens: int | None = None, 21 | ) -> Callable[[T], T]: 22 | ... 23 | 24 | 25 | def gpt( 26 | *args, 27 | model: Literal["gpt-3.5-turbo", "gpt-4"] = "gpt-3.5-turbo", 28 | temperature: int = 0, 29 | max_tokens: int | None = None, 30 | ): 31 | if len(args) == 1 and callable(f := args[0]): 32 | return create_generic_wrapper( 33 | f=f, model=model, temperature=temperature, max_tokens=max_tokens 34 | ) 35 | else: 36 | return lambda f_: create_generic_wrapper( 37 | f_, model=model, temperature=temperature, max_tokens=max_tokens 38 | ) 39 | -------------------------------------------------------------------------------- /funcgpt/gpt.py: -------------------------------------------------------------------------------- 1 | import re 2 | from codecs import getreader 3 | from json import dumps, loads 4 | from typing import IO, Iterator, Literal, cast 5 | from urllib.request import Request, urlopen 6 | 7 | from funcgpt.credentials import OPENAI_API_KEY, OPENAI_ORG_ID 8 | from funcgpt.message import Message 9 | 10 | DEFAULT_COMPLETIONS_URL = "https://api.openai.com/v1/chat/completions" 11 | 12 | BASE_HEADERS = { 13 | "Content-Type": "application/json", 14 | "Authorization": f"Bearer {OPENAI_API_KEY}", 15 | } 16 | 17 | if OPENAI_ORG_ID is not None: 18 | BASE_HEADERS["OpenAI-Organization"] = OPENAI_ORG_ID 19 | 20 | match_data = re.compile(r"^data: (.*)(?:\r\n|\r|\n)$").match 21 | 22 | match_empty_line = re.compile(r"^(?:\r\n|\r|\n)$").match 23 | 24 | utf8reader = getreader("utf-8") 25 | 26 | 27 | def stream( 28 | model: Literal["gpt-3.5-turbo", "gpt-4"], 29 | messages: list[Message], 30 | temperature: int = 0, 31 | stop: str | list[str] | None = None, 32 | chat_completions_url: str = DEFAULT_COMPLETIONS_URL, 33 | ) -> Iterator[str]: 34 | """ 35 | Stream the chat completions for a given model, messages and other parameters. 36 | 37 | :param model: The identifier of the AI model to be used. Can be either "gpt-3.5-turbo" or "gpt-4". 38 | :param messages: A list of dictionaries representing the message history to be passed to the model. 39 | :param temperature: Controls randomness in the AI response. Higher values result in more random 40 | completions, while lower values steer the model towards more focused responses. 41 | :param stop: A string or list of strings to specify sequence(s) at which the API will stop 42 | generating further tokens. Default is None. 43 | :param chat_completions_url: The URL endpoint for fetching chat completions. The default value 44 | is DEFAULT_COMPLETIONS_URL. 45 | 46 | :return: An iterator over the formatted chat completions. 47 | 48 | :raises ValueError: Raised if the provided temperature is less than 0. 49 | 50 | .. note:: If the model does not return a completion, the iterator will ignore the response and 51 | not yield anything. 52 | """ 53 | # Prepare request payload with model, messages, and temperature 54 | requestArguments = { 55 | "model": model, 56 | "messages": messages, 57 | "temperature": temperature, 58 | "stream": True, 59 | } 60 | 61 | # Add "stop" parameter if provided 62 | if stop is not None: 63 | requestArguments["stop"] = stop 64 | 65 | request = Request( 66 | url=chat_completions_url, 67 | data=dumps(requestArguments).encode(), 68 | headers=BASE_HEADERS, 69 | ) 70 | 71 | with urlopen(request) as f: # type: IO[bytes] 72 | fstr = cast(IO[str], utf8reader(f)) 73 | data = "" 74 | 75 | while True: 76 | line = fstr.readline() 77 | 78 | # If data is empty, look for a valid data line 79 | if not data: 80 | if (dataMatch := match_data(line)) is not None: 81 | data = dataMatch.group(1) 82 | continue 83 | 84 | # If it's not an empty line, reset the data and continue 85 | if match_empty_line(line) is None: 86 | data = "" 87 | continue 88 | 89 | # If data received is "[DONE]", break the loop 90 | if data == "[DONE]": 91 | break 92 | 93 | # Process and yield the completion 94 | body = loads(data) 95 | data = "" 96 | delta = body["choices"][0]["delta"] 97 | 98 | if "content" not in delta: 99 | continue 100 | 101 | deltaContent = delta["content"] 102 | 103 | yield deltaContent 104 | 105 | 106 | def answer( 107 | model: Literal["gpt-3.5-turbo", "gpt-4"], 108 | messages: list[Message], 109 | temperature: float = 0, 110 | stop: str | list[str] | None = None, 111 | chat_completions_url: str = DEFAULT_COMPLETIONS_URL, 112 | ) -> str: 113 | """ 114 | Query an OpenAI model with a list of messages and get the model's answer. 115 | 116 | :param model: The identifier of the AI model to be used. Can be either "gpt-3.5-turbo" or "gpt-4". 117 | :param messages: A list of Message TypedDicts. 118 | :param temperature: Controls randomness in the model's response. Higher values (e.g., 1) will 119 | generate more random answers, while lower values (e.g., 0) make the model 120 | more deterministic. Default is 0. 121 | :param stop: A string or list of strings to specify sequence(s) at which the API will stop 122 | generating further tokens. Default is None. 123 | :param chat_completions_url: The URL endpoint for fetching chat completions. The default value 124 | is DEFAULT_COMPLETIONS_URL. 125 | :raises OverflowError: Raised if the model's response exceeds the maximum length. 126 | :return: The assistant's generated response as a string. 127 | """ 128 | # Prepare request payload with model, messages, and temperature 129 | requestArguments = { 130 | "model": model, 131 | "messages": messages, 132 | "temperature": temperature, 133 | } 134 | 135 | # Add "stop" parameter if provided 136 | if stop is not None: 137 | requestArguments["stop"] = stop 138 | 139 | # Prepare the HTTP request 140 | request = Request( 141 | url=chat_completions_url, 142 | data=dumps(requestArguments).encode(), 143 | headers=BASE_HEADERS, 144 | ) 145 | 146 | # Send the request to OpenAI's chat completions API 147 | response = urlopen(request) 148 | 149 | # Parse the response JSON 150 | body = loads(response.read()) 151 | 152 | # Get the first choice 153 | choice = body["choices"][0] 154 | 155 | if choice["finish_reason"] == "length": 156 | raise OverflowError("The model's response exceeded the maximum length.") 157 | 158 | # Extract and return the assistant's response 159 | return choice["message"]["content"] 160 | -------------------------------------------------------------------------------- /funcgpt/message.py: -------------------------------------------------------------------------------- 1 | from typing import Literal, TypedDict 2 | 3 | 4 | class Message(TypedDict): 5 | """ 6 | A message sent or received by the assistant. 7 | 8 | :param role: The role of the message sender or receiver. Can be "system", 9 | "user" or "assistant". 10 | :param content: The text content of the message. 11 | """ 12 | 13 | role: Literal["system", "user", "assistant"] 14 | content: str 15 | -------------------------------------------------------------------------------- /funcgpt/protocols.py: -------------------------------------------------------------------------------- 1 | from typing import Iterator, Protocol 2 | 3 | 4 | class GPTAnswerProtocol(Protocol): 5 | def __call__(self, message: str) -> str: 6 | ... 7 | 8 | 9 | class GPTStreamProtocol(Protocol): 10 | def __call__(self, message: str) -> Iterator[str]: 11 | ... 12 | -------------------------------------------------------------------------------- /funcgpt/tokentools.py: -------------------------------------------------------------------------------- 1 | import bz2 2 | from pathlib import Path 3 | from typing import Literal, TypedDict 4 | from unittest.mock import patch 5 | from urllib.parse import urlparse 6 | 7 | from tiktoken import Encoding as TikTokenEncoding 8 | from tiktoken_ext.openai_public import cl100k_base 9 | 10 | from funcgpt.message import Message 11 | 12 | __all__ = ["serialize_to_gpt", "get_token_count"] 13 | 14 | MSG_SEP = { 15 | "gpt-3.5-turbo": "\n", 16 | "gpt-4": "", 17 | } 18 | 19 | IM_SEP = { 20 | "gpt-3.5-turbo": "\n", 21 | "gpt-4": "<|im_sep|>", 22 | } 23 | 24 | IM_START = { 25 | "gpt-3.5-turbo": "<|im_start|>", 26 | "gpt-4": "<|im_start|>", 27 | } 28 | 29 | IM_END = { 30 | "gpt-3.5-turbo": "<|im_end|>", 31 | "gpt-4": "<|im_end|>", 32 | } 33 | 34 | 35 | def read_file_cached(blobpath: str) -> bytes: 36 | """ 37 | Reads a cached file given a URL path to the original file. 38 | 39 | The cache is stored in the "data" directory, and the cached files are 40 | compressed using the bz2 format. If the cached file is not found, this 41 | function raises a FileNotFoundError. 42 | 43 | :param blobpath: The URL path of the original file. 44 | Example: "https://example.com/data/file.txt" 45 | :return: The content of the cached file as bytes. 46 | :raises FileNotFoundError: If the cached file is not found. 47 | """ 48 | # Get the cache directory path 49 | cache_dir = Path(__file__).parent / "data" 50 | 51 | # Generate the cache key by extracting the file name from the URL and adding ".bz2" extension 52 | cache_key = Path(urlparse(blobpath).path).name + ".bz2" 53 | 54 | # Construct the full cache file path 55 | cache_path = cache_dir / cache_key 56 | 57 | # Read and return the content of the bz2-compressed cached file 58 | with bz2.open(cache_path, "rb") as f: 59 | return f.read() 60 | 61 | 62 | class EncodingParameters(TypedDict): 63 | name: str 64 | pat_str: str 65 | mergeable_ranks: dict[bytes, int] 66 | special_tokens: dict[str, int] 67 | 68 | 69 | @patch("tiktoken.load.read_file_cached", read_file_cached) 70 | def get_cl100k_im_encoding() -> TikTokenEncoding: 71 | """ 72 | Generate a tiktoken Encoding for cl100k_im with appropriate special tokens. 73 | 74 | :return: the tiktoken Encoding instance 75 | """ 76 | # Obtain base encoding parameters for cl100k 77 | parameters: EncodingParameters = cl100k_base() 78 | 79 | # Return an encoding instance with the specific cl100k_im parameters and special tokens 80 | return TikTokenEncoding( 81 | name="cl100k_im", 82 | pat_str=parameters["pat_str"], 83 | mergeable_ranks=parameters["mergeable_ranks"], 84 | special_tokens={ 85 | **parameters["special_tokens"], 86 | IM_START["gpt-4"]: 100264, 87 | IM_END["gpt-4"]: 100265, 88 | IM_SEP["gpt-4"]: 100266, 89 | }, 90 | ) 91 | 92 | 93 | CL100K_IM_ENCODING = get_cl100k_im_encoding() 94 | 95 | 96 | def serialize_to_gpt( 97 | messages: list[Message], 98 | model: Literal["gpt-3.5-turbo", "gpt-4"], 99 | ) -> list[int]: 100 | """ 101 | Serialize the given list of messages to a format that can be consumed by the specified GPT model by 102 | converting them into their respective encoding indices. 103 | 104 | :param messages: A list of message dictionaries, where each dictionary contains a "role" (either 105 | "system", "user", or "assistant") and a "content" (string). 106 | :param model: The GPT model to be used ("gpt-3.5-turbo" or "gpt-4"). 107 | :return: A list of integer encoding indices that represent the serialized messages. 108 | """ 109 | # Constants for the start, end, and separator placeholders 110 | imStart = IM_START[model] 111 | imEnd = IM_END[model] 112 | imSep = IM_SEP[model] 113 | msgSep = MSG_SEP[model] 114 | 115 | pieces = [] 116 | for message in messages: 117 | # Format each message into its corresponding encoded format 118 | pieces.append(f"{imStart}{message['role']}{imSep}{message['content']}{imEnd}") 119 | # Add the final assistant prompt 120 | pieces.append(f"{imStart}assistant{imSep}") 121 | # Join the encoded message pieces with a separator 122 | serialized = msgSep.join(pieces) 123 | 124 | return CL100K_IM_ENCODING.encode(serialized, allowed_special="all") 125 | 126 | 127 | def get_token_count( 128 | messages: list[Message], 129 | model: Literal["gpt-3.5-turbo", "gpt-4"], 130 | ) -> int: 131 | """ 132 | Determine the number of tokens for a given list of messages. 133 | 134 | :param messages: A list of message dictionaries, where each dictionary contains a "role" (either 135 | "system", "user", or "assistant") and a "content" (string). 136 | :param model: The GPT model to be used ("gpt-3.5-turbo" or "gpt-4"). 137 | :return: The total number of tokens in the serialized messages. 138 | """ 139 | return len(serialize_to_gpt(messages, model)) 140 | -------------------------------------------------------------------------------- /funcgpt/wrapper.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from functools import wraps 4 | from inspect import signature 5 | from textwrap import dedent 6 | from typing import Callable, Iterator, Literal 7 | 8 | from funcgpt.gpt import answer, stream 9 | from funcgpt.tokentools import get_token_count 10 | 11 | MAX_TOKENS = { 12 | "gpt-3.5-turbo": 4096, 13 | "gpt-4": 8192, 14 | } 15 | 16 | DEFAULT_PROMPT_TOKENS_SHARE = 0.875 # 7/8 17 | 18 | 19 | def create_generic_wrapper( 20 | f: Callable, 21 | model: Literal["gpt-3.5-turbo", "gpt-4"], 22 | temperature: int, 23 | max_tokens: int | None = None, 24 | ) -> Callable[[str], str | Iterator[str]]: 25 | """ 26 | Create a generic wrapper for a callable (typically a function) to generate response 27 | based on the callable's signature and docstring. 28 | 29 | :param f: The callable to be wrapped. 30 | :param model: The OpenAI model to use for generating responses. Must be one of: 31 | "gpt-3.5-turbo" or "gpt-4". 32 | :param temperature: The temperature value to use for generating the responses. 33 | Lower values make the responses more focused and deterministic, 34 | while higher values make them more diverse. 35 | :param max_tokens: The maximum number of tokens for the input prompt. If not provided, 36 | the maximum number of tokens will be determined based on the model 37 | and the default prompt tokens share. 38 | 39 | :return: A wrapped callable that generates responses according to the specifications 40 | and instructions provided in the function docstring. 41 | 42 | :raises ValueError: If the callable does not have a docstring or if the return 43 | annotation is not of the types str, Iterator[str] or bool. 44 | """ 45 | # Extract the callable's docstring 46 | fdoc: str | None = f.__doc__ 47 | if fdoc is None: 48 | raise ValueError("Function must have a docstring") 49 | 50 | # Extract the callable's return annotation 51 | fsig = signature(f) 52 | return_annotation = fsig.return_annotation 53 | 54 | # Determine the engine to use for generating responses 55 | if return_annotation is str: 56 | engine = answer 57 | elif return_annotation is Iterator[str]: 58 | engine = stream 59 | elif return_annotation is bool: 60 | # Wrap the answer function with special handling for boolean return values 61 | engine = ( 62 | lambda *args, **kwargs: "true" 63 | in answer(*args, **kwargs, stop=["true", "false"]).lower() 64 | ) 65 | else: 66 | raise ValueError("Function must have a return annotation of str, Iterator[str], or bool") 67 | 68 | # Create the instructions for the GPT engine 69 | instructions = "You should answer to inputs according to the following specification:\n\n" 70 | instructions += dedent(fdoc).strip() 71 | 72 | if return_annotation is bool: 73 | instructions += ( 74 | "\n\nAnswer with either true or false without including any other text. " 75 | "If no definitive answer can be given, answer false." 76 | ) 77 | else: 78 | instructions += "\n\nAnswer with only what was requested without including any other text." 79 | 80 | # Determine the system role based on the model 81 | if model == "gpt-3.5-turbo": 82 | systemRole: Literal["system", "user"] = "user" 83 | else: 84 | systemRole = "system" 85 | 86 | # Determine the maximum number of tokens for the input prompt 87 | if max_tokens is None: 88 | max_tokens = int(MAX_TOKENS[model] * DEFAULT_PROMPT_TOKENS_SHARE) 89 | 90 | # Create the wrapper function 91 | @wraps(f) 92 | def wrapper(message: str) -> str | Iterator[str]: 93 | messages = [ 94 | {"role": systemRole, "content": instructions}, 95 | {"role": "user", "content": message}, 96 | ] 97 | message_tokens_count = get_token_count(messages=messages, model=model) 98 | if message_tokens_count > max_tokens: 99 | raise ValueError( 100 | f"Message exceeds maximum number of tokens ({message_tokens_count} > {max_tokens})" 101 | ) 102 | return engine( 103 | model=model, 104 | messages=messages, 105 | temperature=temperature, 106 | ) 107 | 108 | # Return the wrapper function 109 | return wrapper 110 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "funcgpt" 7 | version = "1.1.1" 8 | authors = [ 9 | { name="Leandro Pereira de Lima e Silva", email="leandro@lls-software.com" }, 10 | ] 11 | description = "A library to easily create functions based on OpenAI's GPT" 12 | readme = "README.md" 13 | requires-python = ">=3.10" 14 | classifiers = [ 15 | "Development Status :: 4 - Beta", 16 | "Intended Audience :: Developers", 17 | "License :: OSI Approved :: MIT License", 18 | "Programming Language :: Python :: 3 :: Only", 19 | "Operating System :: OS Independent", 20 | "Topic :: Software Development", 21 | "Topic :: Software Development :: Libraries", 22 | "Topic :: Software Development :: Libraries :: Python Modules", 23 | "Topic :: Utilities", 24 | ] 25 | dependencies = [ 26 | "tiktoken>=0.3.3", 27 | ] 28 | 29 | [project.urls] 30 | "Homepage" = "https://github.com/leandropls/funcgpt" 31 | "Bug Tracker" = "https://github.com/leandropls/funcgpt/issues" 32 | 33 | [tool.isort] 34 | profile = "black" 35 | multi_line_output = 3 36 | 37 | [tool.black] 38 | line-length = 100 39 | 40 | [tool.setuptools.package-data] 41 | funcgpt = ["data/*"] 42 | --------------------------------------------------------------------------------