├── .flake8 ├── .github └── workflows │ └── build.yml ├── .gitignore ├── LICENSE ├── README.md ├── agentflow ├── __init__.py ├── flow.py ├── flows │ ├── example.json │ ├── example_with_variables.json │ └── summarize_url.json ├── function.py ├── functions │ ├── __init__.py │ ├── create_image.py │ ├── get_url.py │ ├── save_file.py │ └── summarize_text.py ├── llm.py ├── output.py └── outputs │ └── .gitignore ├── example.env ├── requirements.in ├── requirements.txt ├── run.py └── tests ├── __init__.py ├── test_flow.py ├── test_flow_basic.json ├── test_flow_with_functions.json ├── test_flow_with_variables.json ├── test_function.py ├── test_function_create_image.py ├── test_function_get_url.py ├── test_function_save_file.py ├── test_function_summarize_text.py ├── test_llm.py └── test_output.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | extend-ignore = E203,E501,W503,E231 3 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: build 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | branches: [ "main" ] 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | build: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Set up Python 3.10 23 | uses: actions/setup-python@v3 24 | with: 25 | python-version: "3.10" 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install flake8 pytest 30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 31 | - name: Lint with flake8 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with pytest 38 | run: | 39 | pytest 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Simon Smith 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Agentflow: Complex LLM Workflows from Simple JSON 2 | 3 | ![Python lint and test](https://github.com/simonmesmith/agentflow/actions/workflows/build.yml/badge.svg) 4 | 5 | Agentflow is a powerful yet user-friendly tool to run workflows powered by LLMs. You can: 6 | 7 | * **Write workflows in plain English** in human-readable JSON files. 8 | * **Use variables for dynamic outputs** that change based on user input. 9 | * **Build and execute custom functions** to go beyond text generation. 10 | 11 | ## Why Agentflow? 12 | 13 | Agentflow fills the gap between chat and autonomous interfaces: 14 | 15 | * **Chat (e.g. ChatGPT) can't run workflows** because they're conversational. 16 | * **Autonomous (e.g. Auto-GPT) run them unreliably** because they have too much freedom. 17 | 18 | Agentflow offers a balanced solution: Workflows that LLMs follow step-by-step. 19 | 20 | ## Install and Use 21 | 22 | Agentflow is currently in development. To try it: 23 | 24 | 1. Sign up for the [OpenAI API](https://platform.openai.com/overview) and get an [API key](https://help.openai.com/en/articles/4936850-where-do-i-find-my-secret-api-key) 25 | 2. Clone or download this repository. 26 | 3. Create a `.env` file from [example.env](https://github.com/simonmesmith/agentflow/blob/main/example.env) and add your OpenAI API key. 27 | 4. Run `pip install -r requirements.txt` to install dependencies. 28 | 29 | Now you can run flows from the command line, like this: 30 | ```bash 31 | python -m run --flow=example 32 | ``` 33 | 34 | ### Optional Arguments 35 | 36 | #### Use `variables` to pass variables to your flow 37 | 38 | ```bash 39 | python -m run --flow=example_with_variables --variables 'market=college students' 'price_point=$50' 40 | ``` 41 | 42 | #### Use `v` (verbose) to see task completion in real-time 43 | 44 | ```bash 45 | python -m run --flow=example -v 46 | ``` 47 | 48 | ## Create New Flows 49 | 50 | Copy [example.json](https://github.com/simonmesmith/agentflow/blob/main/agentflow/flows/example.json) or [example_with_variables.json](https://github.com/simonmesmith/agentflow/blob/main/agentflow/flows/example_with_variables.json) or create a flow from scratch in this format: 51 | 52 | ```json 53 | { 54 | "system_message": "An optional message that guides the model's behavior.", 55 | "tasks": [ 56 | { 57 | "action": "Instruct the LLM here!" 58 | }, 59 | { 60 | "action": "Actions can have settings, including function calls and temperature, like so:", 61 | "settings": { 62 | "function_call": "save_file", 63 | "temperature": 0.5 64 | } 65 | }, 66 | { 67 | "action": "..." 68 | } 69 | ] 70 | } 71 | ``` 72 | 73 | ## Create New Functions 74 | 75 | Copy [save_file.py](https://github.com/simonmesmith/agentflow/blob/main/agentflow/functions/save_file.py) and modify it, or follow these instructions (replace "function_name" with your function name): 76 | 77 | 1. **Create `function_name.py` in the [functions](https://github.com/simonmesmith/agentflow/tree/main/agentflow/functions) folder**. 78 | 2. **Create a class within called `FunctionName`** that inherits from `BaseFunction`. 79 | 3. **Add `get_definition()` and `execute()` in the class**. See descriptions of these in `BaseFunction`. 80 | 81 | That's it! You can now use your function in `function_call` as shown above. However, you should probably: 82 | 83 | 4. **Add tests in [tests](https://github.com/simonmesmith/agentflow/tree/main/tests)**! Then you'll know if workflows are failing because of your function. 84 | 85 | ## License 86 | 87 | Agentflow is licensed under the [MIT License](https://github.com/simonmesmith/agentflow/blob/main/LICENSE). 88 | -------------------------------------------------------------------------------- /agentflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonmesmith/agentflow/19759304ea9802fec8a4351a94aa4a6fb8fb156a/agentflow/__init__.py -------------------------------------------------------------------------------- /agentflow/flow.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines the Flow and Task classes which are used to load and execute a series of tasks defined in a JSON file. 3 | Each task is processed by the LLM (Large Language Model) and the results are saved in a JSON file. 4 | """ 5 | 6 | import json 7 | import logging 8 | import os 9 | import re 10 | 11 | from agentflow.function import Function 12 | from agentflow.llm import LLM, Settings 13 | from agentflow.output import Output 14 | 15 | 16 | class Task: 17 | """ 18 | Represents a task to be processed by the LLM. 19 | 20 | :param action: The action to be performed by the task. 21 | :type action: str 22 | :param settings: Settings for the task. Defaults to an empty Settings object. 23 | :type settings: Settings, optional 24 | """ 25 | 26 | def __init__(self, action: str, settings: Settings = None): 27 | self.action = action 28 | self.settings = settings if settings else Settings() 29 | 30 | 31 | class Flow: 32 | """ 33 | Represents a flow of tasks loaded from a JSON file. 34 | 35 | :param name: The name of the flow. 36 | :type name: str 37 | :param variables: Variables to be used in the flow. Defaults to an empty dictionary. 38 | :type variables: dict, optional 39 | :param flows_path: The base path to the flows directory. If not set, will be agentflow/flows. 40 | :type flows_path: str, optional 41 | """ 42 | 43 | def __init__(self, name: str, variables: dict = None, flows_path: str = None): 44 | self.name = name 45 | self.flows_path = flows_path or os.path.join(os.path.dirname(__file__), "flows") 46 | self._load_flow(name) 47 | self._validate_and_format_messages(variables or {}) 48 | self.output = Output(name) 49 | self.messages = self._get_initial_messages() 50 | self.functions = self._get_functions() 51 | self.llm = LLM() 52 | 53 | def _load_flow(self, name: str) -> None: 54 | """ 55 | Load flow from a JSON file. 56 | 57 | :param name: The name of the flow. 58 | :type name: str 59 | :raises FileNotFoundError: If the JSON file does not exist. 60 | """ 61 | 62 | file_path = f"{self.flows_path}/{name}.json" 63 | 64 | if not os.path.exists(file_path): 65 | raise FileNotFoundError(f"File not found: {file_path}.") 66 | 67 | with open(file_path, "r") as file: 68 | data = json.load(file) 69 | 70 | self.system_message = data.get("system_message") 71 | self.tasks = [ 72 | Task(task["action"], Settings(**task.get("settings", {}))) 73 | for task in data.get("tasks", []) 74 | ] 75 | 76 | def _validate_and_format_messages(self, variables: dict) -> None: 77 | """ 78 | Validate and format messages with provided variables. 79 | 80 | :param variables: Variables to be used in the flow. 81 | :type variables: dict 82 | :raises ValueError: If there are extra or missing variables. 83 | """ 84 | all_messages = [self.system_message] + [task.action for task in self.tasks] 85 | all_variables = set( 86 | match.group(1) 87 | for message in all_messages 88 | if message 89 | for match in re.finditer( 90 | r"{([^{}]+)}", message.replace("{{", "").replace("}}", "") 91 | ) 92 | ) 93 | 94 | extra_variables = set(variables.keys()) - all_variables 95 | if extra_variables: 96 | raise ValueError(f"Extra variables provided: {extra_variables}.") 97 | 98 | missing_variables = all_variables - set(variables.keys()) 99 | if missing_variables: 100 | raise ValueError(f"Missing variable values for: {missing_variables}.") 101 | 102 | self._format_messages(variables) 103 | 104 | def _format_messages(self, variables: dict) -> None: 105 | """ 106 | Format messages with provided variables. 107 | 108 | :param variables: Variables to be used in the flow. 109 | :type variables: dict 110 | """ 111 | if self.system_message: 112 | self.system_message = self._format_message(self.system_message, variables) 113 | for task in self.tasks: 114 | if task.action: 115 | task.action = self._format_message(task.action, variables) 116 | 117 | @staticmethod 118 | def _format_message(message: str, variables: dict) -> str: 119 | """ 120 | Format a single message with provided variables. 121 | 122 | :param message: The message to be formatted. 123 | :type message: str 124 | :param variables: Variables to be used in the flow. 125 | :type variables: dict 126 | :return: The formatted message. 127 | :rtype: str 128 | """ 129 | return message.format(**variables).replace("{{", "{").replace("}}", "}") 130 | 131 | def run(self): 132 | """ 133 | Run the flow. 134 | 135 | The flow is processed by the LLM and the results are saved in a JSON file. 136 | """ 137 | 138 | print(f"Running flow: {self.name}.") 139 | 140 | for task in self.tasks: 141 | pre_task_messages_length = len(self.messages) 142 | try: 143 | self._process_task(task) 144 | logging.info(self.messages[pre_task_messages_length:]) 145 | except Exception as e: 146 | logging.error(e) 147 | return 148 | 149 | self.output.save("messages.json", self.messages) 150 | print(f"Output folder: {self.output.output_path}") 151 | 152 | def _get_initial_messages(self) -> list: 153 | """ 154 | Get initial system and user messages. 155 | 156 | :return: A list of initial messages. 157 | :rtype: list 158 | """ 159 | messages = [] 160 | if self.system_message: 161 | messages.append({"role": "system", "content": self.system_message}) 162 | return messages 163 | 164 | def _get_functions(self) -> list: 165 | """ 166 | Get function definitions for tasks with function calls. 167 | """ 168 | return [ 169 | Function(task.settings.function_call, self.output).definition 170 | for task in self.tasks 171 | if task.settings.function_call is not None 172 | ] 173 | 174 | def _process_task(self, task: Task): 175 | """ 176 | Process a single task. 177 | 178 | :param task: The task to be processed. 179 | :type task: Task 180 | """ 181 | self.messages.append({"role": "user", "content": task.action}) 182 | 183 | task.settings.function_call = ( 184 | "none" 185 | if task.settings.function_call is None 186 | else {"name": task.settings.function_call} 187 | ) 188 | 189 | message = self.llm.respond(task.settings, self.messages, self.functions) 190 | 191 | if message.content: 192 | self._process_message(message) 193 | elif message.function_call: 194 | self._process_function_call(message, task) 195 | 196 | def _process_message(self, message) -> None: 197 | """ 198 | Process a message from the assistant. 199 | 200 | :param message: The message from the assistant. 201 | :type message: Message 202 | """ 203 | self.messages.append({"role": "assistant", "content": message.content}) 204 | 205 | def _process_function_call(self, message, task: Task) -> None: 206 | """ 207 | Process a function call from the assistant. 208 | 209 | :param message: The message from the assistant. 210 | :type message: Message 211 | :param task: The task to be processed. 212 | :type task: Task 213 | """ 214 | self.messages.append( 215 | { 216 | "role": "assistant", 217 | "content": message.content, 218 | "function_call": { 219 | "name": message.function_call.name, 220 | "arguments": message.function_call.arguments, 221 | }, 222 | } 223 | ) 224 | function = Function(message.function_call.name, self.output) 225 | function_content = function.execute(message.function_call.arguments) 226 | self.messages.append( 227 | { 228 | "role": "function", 229 | "content": function_content, 230 | "name": message.function_call.name, 231 | } 232 | ) 233 | task.settings.function_call = "none" 234 | message = self.llm.respond(task.settings, self.messages, self.functions) 235 | self._process_message(message) 236 | -------------------------------------------------------------------------------- /agentflow/flows/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "You are a brilliant entrepreneur. You are exceptional at generating new business ideas and marketing them.", 3 | "tasks": [ 4 | { 5 | "action": "Brainstorm five ideas for a product." 6 | }, 7 | { 8 | "action": "Choose the best idea from the list based on likely purchase intent." 9 | }, 10 | { 11 | "action": "Write a short description of the chosen product." 12 | }, 13 | { 14 | "action": "Create an image of the product based on the description.", 15 | "settings": { 16 | "function_call": "create_image" 17 | } 18 | }, 19 | { 20 | "action": "Come up with five name ideas for the chosen product." 21 | }, 22 | { 23 | "action": "Choose the best name from the list based on the attributes of top brands." 24 | }, 25 | { 26 | "action": "Write a tagline to go with the best name from the list." 27 | }, 28 | { 29 | "action": "Create a simple HTML page with the product name, tagline, description, and image." 30 | }, 31 | { 32 | "action": "Save the HTML to a .html file.", 33 | "settings": { 34 | "function_call": "save_file" 35 | } 36 | } 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /agentflow/flows/example_with_variables.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "You are a brilliant entrepreneur. You are exceptional at generating new business ideas and marketing them.", 3 | "tasks": [ 4 | { 5 | "action": "Brainstorm five ideas for a product that targets {market} at a price point of {price_point}." 6 | }, 7 | { 8 | "action": "Choose the best idea from the list based on likely purchase intent." 9 | }, 10 | { 11 | "action": "Write a short description of the chosen product." 12 | }, 13 | { 14 | "action": "Create an image of the product based on the description.", 15 | "settings": { 16 | "function_call": "create_image" 17 | } 18 | }, 19 | { 20 | "action": "Come up with five name ideas for the chosen product." 21 | }, 22 | { 23 | "action": "Choose the best name from the list based on the attributes of top brands." 24 | }, 25 | { 26 | "action": "Write a tagline to go with the best name from the list." 27 | }, 28 | { 29 | "action": "Create a simple HTML page with the product name, tagline, description, and image." 30 | }, 31 | { 32 | "action": "Save the HTML to a .html file.", 33 | "settings": { 34 | "function_call": "save_file" 35 | } 36 | } 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /agentflow/flows/summarize_url.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "You summarize URLs.", 3 | "tasks": [ 4 | { 5 | "action": "Get text from the URL {url}.", 6 | "settings": { 7 | "function_call": "get_url" 8 | } 9 | }, 10 | { 11 | "action": "Summarize the text.", 12 | "settings": { 13 | "function_call": "summarize_text" 14 | } 15 | }, 16 | { 17 | "action": "Save the summary as summary.txt.", 18 | "settings": { 19 | "function_call": "save_file" 20 | } 21 | } 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /agentflow/function.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides classes for managing functions. It includes an abstract base class for functions and a class for managing function instances. 3 | """ 4 | 5 | import importlib 6 | import json 7 | from abc import ABC, abstractmethod 8 | 9 | from agentflow.output import Output 10 | 11 | 12 | class BaseFunction(ABC): 13 | """ 14 | This abstract base class defines the interface for functions. 15 | """ 16 | 17 | def __init__(self, output: Output): 18 | """ 19 | Initializes the BaseFunction object with an output object. 20 | 21 | :param output: The output object. 22 | :type output: Output 23 | """ 24 | self.output = output 25 | 26 | @abstractmethod 27 | def get_definition(self) -> dict: 28 | """ 29 | Returns the definition of the function. 30 | 31 | :return: The definition of the function. 32 | :rtype: dict 33 | """ 34 | pass 35 | 36 | @abstractmethod 37 | def execute(self, *args, **kwargs) -> str: 38 | """ 39 | Executes the function with the given arguments. 40 | 41 | :param args: The positional arguments. 42 | :param kwargs: The keyword arguments. 43 | :return: The result of the function execution. 44 | :rtype: str 45 | """ 46 | pass 47 | 48 | 49 | class Function: 50 | """ 51 | This class is responsible for managing function instances. 52 | """ 53 | 54 | def __init__(self, function_name: str, output: Output): 55 | """ 56 | Initializes the Function object by importing the function module and creating an instance of the function class. 57 | 58 | :param function_name: The name of the function. 59 | :type function_name: str 60 | :param output: The output object. 61 | :type output: Output 62 | """ 63 | self.module = importlib.import_module(f"agentflow.functions.{function_name}") 64 | function_class_name = function_name.replace("_", " ").title().replace(" ", "") 65 | self.function_class = getattr(self.module, function_class_name) 66 | self.instance = self.function_class(output) 67 | 68 | @property 69 | def definition(self) -> dict: 70 | """ 71 | Returns the definition of the function instance. 72 | 73 | :return: The definition of the function instance. 74 | :rtype: dict 75 | """ 76 | return self.instance.get_definition() 77 | 78 | def execute(self, args_json: str) -> str: 79 | """ 80 | Executes the function instance with the given arguments. 81 | 82 | :param args_json: The arguments in JSON format as a string. 83 | :type args_json: str 84 | :return: The result of the function execution. 85 | :rtype: str 86 | """ 87 | args_dict = json.loads(args_json) 88 | return self.instance.execute(**args_dict) 89 | -------------------------------------------------------------------------------- /agentflow/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonmesmith/agentflow/19759304ea9802fec8a4351a94aa4a6fb8fb156a/agentflow/functions/__init__.py -------------------------------------------------------------------------------- /agentflow/functions/create_image.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a class for creating an image from a description using OpenAI's API. It generates a unique image name based on the prompt and the current time, downloads the image, and saves it to a specified output path. 3 | """ 4 | 5 | import hashlib 6 | import time 7 | 8 | import openai 9 | import requests 10 | 11 | from agentflow.function import BaseFunction 12 | 13 | 14 | class CreateImage(BaseFunction): 15 | """ 16 | This class inherits from the BaseFunction class. It defines a function for creating an image from a description using OpenAI's API. 17 | """ 18 | 19 | def get_definition(self) -> dict: 20 | """ 21 | Returns a dictionary that defines the function. It includes the function's name, description, and parameters. 22 | 23 | :return: A dictionary that defines the function. 24 | :rtype: dict 25 | """ 26 | return { 27 | "name": "create_image", 28 | "description": "Creates an image from a description. Returns the path to the image.", 29 | "parameters": { 30 | "type": "object", 31 | "properties": { 32 | "prompt": { 33 | "type": "string", 34 | "description": "The prompt that describes the image. Be specific and detailed about the content and style of the image.", 35 | } 36 | }, 37 | "required": ["prompt"], 38 | }, 39 | } 40 | 41 | def execute(self, prompt: str, n: int = 1, size: str = "1024x1024") -> str: 42 | """ 43 | Creates an image from a description using OpenAI's API, generates a unique image name based on the prompt and the current time, downloads the image, and saves it to a specified output path. 44 | 45 | :param prompt: The prompt that describes the image. 46 | :type prompt: str 47 | :param n: The number of images to generate. Defaults to 1. Currently, only 1 is supported. 48 | :type n: int, optional 49 | :param size: The size of the image. Defaults to "1024x1024". Currently, only "1024x1024" is supported. 50 | :type size: str, optional 51 | :return: The path to the image. 52 | :rtype: str 53 | """ 54 | image_name = self._generate_image_name(prompt) 55 | image_url = self._create_image(prompt, n, size) 56 | image_path = f"{self.output.output_path}/{image_name}" 57 | self._download_and_save_image(image_url, image_path) 58 | return image_path 59 | 60 | def _generate_image_name(self, prompt: str) -> str: 61 | """ 62 | Generates a unique image name based on the prompt and the current time. 63 | 64 | :param prompt: The prompt that describes the image. 65 | :type prompt: str 66 | :return: The name of the image file. 67 | :rtype: str 68 | """ 69 | timestamp = str(time.time()) 70 | return hashlib.sha256((prompt + timestamp).encode()).hexdigest() + ".png" 71 | 72 | def _create_image(self, prompt: str, n: int, size: str) -> str: 73 | """ 74 | Creates an image from a description using OpenAI's API. 75 | 76 | :param prompt: The prompt that describes the image. 77 | :type prompt: str 78 | :param n: The number of images to generate. 79 | :type n: int 80 | :param size: The size of the image. 81 | :type size: str 82 | :return: The URL of the image. 83 | :rtype: str 84 | """ 85 | response = openai.Image.create(prompt=prompt, n=n, size=size) 86 | return response["data"][0]["url"] 87 | 88 | def _download_and_save_image(self, image_url: str, image_path: str) -> None: 89 | """ 90 | Downloads the image and saves it to the specified path. 91 | 92 | :param image_url: The URL of the image. 93 | :type image_url: str 94 | :param image_path: The path to the image. 95 | :type image_path: str 96 | """ 97 | image_data = requests.get(image_url).content 98 | with open(image_path, "wb") as handler: 99 | handler.write(image_data) 100 | -------------------------------------------------------------------------------- /agentflow/functions/get_url.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from bs4 import BeautifulSoup 3 | 4 | from agentflow.function import BaseFunction 5 | 6 | 7 | class GetUrl(BaseFunction): 8 | """ 9 | This class inherits from the BaseFunction class. It defines a function for fetching the contents of a URL. 10 | """ 11 | 12 | def get_definition(self) -> dict: 13 | """ 14 | Returns a dictionary that defines the function. It includes the function's name, description, and parameters. 15 | 16 | :return: A dictionary that defines the function. 17 | :rtype: dict 18 | """ 19 | return { 20 | "name": "get_url", 21 | "description": "Fetch the contents of a URL.", 22 | "parameters": { 23 | "type": "object", 24 | "properties": { 25 | "url": { 26 | "type": "string", 27 | "description": "The URL to fetch content from.", 28 | }, 29 | "format": { 30 | "type": "string", 31 | "enum": ["html", "text"], 32 | "default": "html", 33 | "description": "The format of the returned content. If 'html', the full HTML will be returned. If 'text', only the text will be returned.", 34 | }, 35 | }, 36 | "required": ["url"], 37 | }, 38 | } 39 | 40 | def execute(self, url: str, format: str = "html") -> str: 41 | """ 42 | Fetches the contents of a URL. The URL and the format of the returned content are provided as parameters. 43 | 44 | :param url: The URL to fetch content from. 45 | :type url: str 46 | :param format: The format of the returned content. If 'html', the full HTML will be returned. If 'text', only the text will be returned. 47 | :type format: str 48 | :return: The contents of the URL. 49 | :rtype: str 50 | """ 51 | headers = { 52 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3" 53 | } 54 | response = requests.get(url, headers=headers) 55 | if response.status_code == 200: 56 | if format == "html": 57 | return response.text 58 | elif format == "text": 59 | soup = BeautifulSoup(response.text, "html.parser") 60 | return soup.get_text() 61 | else: 62 | raise Exception( 63 | f"Failed to fetch URL. HTTP status code: {response.status_code}" 64 | ) 65 | -------------------------------------------------------------------------------- /agentflow/functions/save_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a class for saving a file to the output directory. The file's name and contents are provided as parameters. 3 | """ 4 | 5 | from agentflow.function import BaseFunction 6 | 7 | 8 | class SaveFile(BaseFunction): 9 | """ 10 | This class inherits from the BaseFunction class. It defines a function for saving a file to the output directory. 11 | """ 12 | 13 | def get_definition(self) -> dict: 14 | """ 15 | Returns a dictionary that defines the function. It includes the function's name, description, and parameters. 16 | 17 | :return: A dictionary that defines the function. 18 | :rtype: dict 19 | """ 20 | return { 21 | "name": "save_file", 22 | "description": "Saves a file. Returns the path to the file.", 23 | "parameters": { 24 | "type": "object", 25 | "properties": { 26 | "file_name": { 27 | "type": "string", 28 | "description": "The name of the file, including its extension. For example, test.txt.", 29 | }, 30 | "file_contents": { 31 | "type": "string", 32 | "description": "The contents of the file.", 33 | }, 34 | }, 35 | "required": ["file_name", "file_contents"], 36 | }, 37 | } 38 | 39 | def execute(self, file_name: str, file_contents: str) -> str: 40 | """ 41 | Saves a file to the output directory. The file's name and contents are provided as parameters. 42 | 43 | :param file_name: The name of the file, including its extension. For example, test.txt. 44 | :type file_name: str 45 | :param file_contents: The contents of the file. 46 | :type file_contents: str 47 | :return: The name of the saved file. 48 | :rtype: str 49 | """ 50 | return self.output.save(file_name, file_contents) 51 | -------------------------------------------------------------------------------- /agentflow/functions/summarize_text.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a class for summarizing text. 3 | """ 4 | 5 | import os 6 | from typing import Dict, List, Tuple 7 | 8 | import openai 9 | 10 | from agentflow.function import BaseFunction 11 | from agentflow.llm import LLM, Settings 12 | from agentflow.output import Output 13 | 14 | 15 | class SummarizeText(BaseFunction): 16 | """ 17 | This class inherits from the BaseFunction class. It defines a function for summarizing text. 18 | """ 19 | 20 | def __init__(self, output: Output): 21 | """ 22 | Initializes the SummarizeText object. 23 | """ 24 | super().__init__(output) 25 | openai.api_key = os.getenv("OPENAI_API_KEY") 26 | self.default_instructions = ( 27 | "Return a summary that succinctly captures its main points." 28 | ) 29 | self.chars_per_token = 4 # See https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them 30 | self.max_tokens = 14000 # To allow room for the instructions and summary 31 | 32 | def get_definition(self) -> dict: 33 | """ 34 | Returns a dictionary that defines the function. It includes the function's name, description, and parameters. 35 | 36 | :return: A dictionary that defines the function. 37 | :rtype: dict 38 | """ 39 | return { 40 | "name": "summarize_text", 41 | "description": "Summarizes text.", 42 | "parameters": { 43 | "type": "object", 44 | "properties": { 45 | "text_to_summarize": { 46 | "type": "string", 47 | "description": "The text to summarize.", 48 | }, 49 | "instructions": { 50 | "type": "string", 51 | "description": "Instructions for summarizing the text.", 52 | "default": self.default_instructions, 53 | }, 54 | }, 55 | "required": ["text_to_summarize"], 56 | }, 57 | } 58 | 59 | def execute(self, text_to_summarize: str, instructions: str | None = None) -> str: 60 | """ 61 | Summarizes text. 62 | 63 | :param text_to_summarize: The text to summarize. 64 | :type text_to_summarize: str 65 | :param instructions: Optional instructions for summarizing the text. Defaults to default instructions. 66 | :type instructions: str 67 | :return: The summary of the text. 68 | :rtype: str 69 | """ 70 | truncated_text = self._truncate_text(text_to_summarize) 71 | messages = self._prepare_messages(truncated_text, instructions) 72 | model, max_return_tokens = self._select_model(messages) 73 | return self._summarize(model, messages, max_return_tokens) 74 | 75 | def _truncate_text(self, text: str) -> str: 76 | """ 77 | Truncates text. 78 | 79 | :param text: The text to truncate. 80 | :type text: str 81 | :return: The truncated text. 82 | :rtype: str 83 | """ 84 | return text[: self.max_tokens * self.chars_per_token] 85 | 86 | def _prepare_messages( 87 | self, truncated_text: str, instructions: str 88 | ) -> List[Dict[str, str]]: 89 | """ 90 | Prepares messages for the language model. 91 | 92 | :param truncated_text: The text to summarize. 93 | :type truncated_text: str 94 | :param instructions: Instructions for summarizing the text. 95 | :type instructions: str 96 | :return: The messages for the language model. 97 | :rtype: list[dict[str, str ]] 98 | """ 99 | system_content = f"You are an AI summarizer. {instructions}" 100 | user_content = f"Text to summarize: {truncated_text}" 101 | return [ 102 | {"role": "system", "content": system_content}, 103 | {"role": "user", "content": user_content}, 104 | ] 105 | 106 | def _select_model(self, messages: List[Dict[str, str]]) -> Tuple[str, int]: 107 | """ 108 | Selects the model to use for summarizing the text. 109 | 110 | :param messages: The messages for the language model. 111 | :type messages: list[dict[str, str]] 112 | :return: The model to use for summarizing the text and the maximum number of tokens to return. 113 | :rtype: tuple[str, int] 114 | """ 115 | messages_tokens = self._calculate_tokens(messages) 116 | if messages_tokens > 4000: 117 | return "gpt-3.5-turbo-16k", 16000 - messages_tokens 118 | else: 119 | return "gpt-3.5-turbo", 4000 - messages_tokens 120 | 121 | def _calculate_tokens(self, messages: List[Dict[str, str]]) -> int: 122 | return int(len(str(messages)) / self.chars_per_token) 123 | 124 | @staticmethod 125 | def _summarize( 126 | model: str, messages: List[Dict[str, str]], max_return_tokens: int 127 | ) -> str: 128 | """ 129 | Summarizes text. 130 | :param model: The model to use for summarizing the text. 131 | :type model: str 132 | :param messages: The messages for the language model. 133 | :type messages: list[dict[str, str]] 134 | :param max_return_tokens: The maximum number of tokens to return. 135 | :type max_return_tokens: int 136 | """ 137 | settings = Settings( 138 | model=model, 139 | max_tokens=max_return_tokens, 140 | temperature=0, 141 | ) 142 | llm = LLM() 143 | summary_message = llm.respond(settings, messages) 144 | return summary_message.content 145 | -------------------------------------------------------------------------------- /agentflow/llm.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a class for interacting with OpenAI's LLMs. It includes a dataclass for settings and a class for managing the interaction. 3 | """ 4 | 5 | import os 6 | from dataclasses import dataclass 7 | from typing import Any, Dict, List, Optional 8 | 9 | import openai 10 | from dotenv import load_dotenv 11 | from tenacity import retry, wait_exponential 12 | 13 | 14 | @dataclass 15 | class Settings: 16 | """ 17 | This dataclass holds the settings for interacting with OpenAI's LLMs. 18 | """ 19 | 20 | model: str = os.getenv("OPENAI_DEFAULT_MODEL", "gpt-4") 21 | function_call: Optional[str] = None 22 | temperature: float = 1.0 23 | top_p: Optional[float] = None 24 | max_tokens: Optional[int] = None 25 | presence_penalty: Optional[float] = None 26 | frequency_penalty: Optional[float] = None 27 | 28 | 29 | class LLM: 30 | """ 31 | This class is responsible for managing the interaction with OpenAI's LLMs. 32 | """ 33 | 34 | def __init__(self): 35 | """ 36 | Initializes the LLM object by loading the environment variables and setting the OpenAI API key. 37 | """ 38 | load_dotenv() 39 | openai.api_key = os.getenv("OPENAI_API_KEY") 40 | 41 | @retry(wait=wait_exponential(multiplier=1, min=4, max=10)) 42 | def respond( 43 | self, 44 | settings: Settings, 45 | messages: List[Dict[str, str]], 46 | functions: Optional[List[Dict[str, str]]] = None, 47 | ) -> Any: 48 | """ 49 | Sends a request to OpenAI's LLM API and returns the response. 50 | 51 | :param settings: The settings for the interaction. 52 | :type settings: Settings 53 | :param messages: The messages to be processed by the language model. 54 | :type messages: List[Dict[str, str]] 55 | :param functions: The functions to be processed by the language model. 56 | :type functions: Optional[List[Dict[str, str]]] 57 | :return: The response from the language model. 58 | :rtype: Any 59 | """ 60 | openai_args = {k: v for k, v in vars(settings).items() if v is not None} 61 | openai_args["messages"] = messages 62 | if functions: 63 | openai_args["functions"] = functions 64 | response = openai.ChatCompletion.create(**openai_args) 65 | return response.choices[0].message 66 | -------------------------------------------------------------------------------- /agentflow/output.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a class for managing output files. It creates a unique directory for each flow and allows saving files to that directory. 3 | """ 4 | 5 | import json 6 | import os 7 | from datetime import datetime 8 | from typing import Union 9 | 10 | 11 | class Output: 12 | """ 13 | This class is responsible for managing output files. It creates a unique directory for each flow and provides a method to save files to that directory. 14 | """ 15 | 16 | def __init__(self, flow_name: str): 17 | """ 18 | Initializes the Output object with a unique directory for the flow. 19 | 20 | :param flow_name: The name of the flow. 21 | :type flow_name: str 22 | """ 23 | self.base_path = os.path.join(os.path.dirname(__file__), "outputs") 24 | self.timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") 25 | self.output_path = os.path.join(self.base_path, f"{flow_name}_{self.timestamp}") 26 | os.makedirs(self.output_path, exist_ok=True) 27 | 28 | def save(self, file_name: str, file_contents: Union[str, list, dict]) -> str: 29 | """ 30 | Saves the file contents to a file in the flow's directory. 31 | 32 | :param file_name: The name of the file. 33 | :type file_name: str 34 | :param file_contents: The contents of the file. 35 | :type file_contents: Union[str, list, dict] 36 | :return: The path to the saved file. 37 | :rtype: str 38 | """ 39 | file_path = os.path.join(self.output_path, file_name) 40 | mode = "w" 41 | if isinstance(file_contents, str): 42 | data_to_write = file_contents 43 | elif isinstance(file_contents, (list, dict)): 44 | data_to_write = json.dumps(file_contents, indent=4) 45 | else: 46 | raise TypeError("file_contents must be of type str, list, or dict") 47 | 48 | with open(file_path, mode) as f: 49 | f.write(data_to_write) 50 | 51 | return file_path 52 | -------------------------------------------------------------------------------- /agentflow/outputs/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /example.env: -------------------------------------------------------------------------------- 1 | # OpenAI API Key 2 | OPENAI_API_KEY=YourOpenAIAPIKey 3 | 4 | # Default model for OpenAI (options: gpt-4, gpt-3.5-turbo; default if not specified: gpt-4) 5 | OPENAI_DEFAULT_MODEL=gpt-4 6 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | openai 3 | python-dotenv 4 | pytest 5 | tenacity 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile 6 | # 7 | aiohttp==3.8.5 8 | # via openai 9 | aiosignal==1.3.1 10 | # via aiohttp 11 | async-timeout==4.0.2 12 | # via aiohttp 13 | attrs==23.1.0 14 | # via aiohttp 15 | beautifulsoup4==4.12.2 16 | # via -r requirements.in 17 | certifi==2023.7.22 18 | # via requests 19 | charset-normalizer==3.2.0 20 | # via 21 | # aiohttp 22 | # requests 23 | exceptiongroup==1.1.2 24 | # via pytest 25 | frozenlist==1.4.0 26 | # via 27 | # aiohttp 28 | # aiosignal 29 | idna==3.4 30 | # via 31 | # requests 32 | # yarl 33 | iniconfig==2.0.0 34 | # via pytest 35 | multidict==6.0.4 36 | # via 37 | # aiohttp 38 | # yarl 39 | openai==0.27.8 40 | # via -r requirements.in 41 | packaging==23.1 42 | # via pytest 43 | pluggy==1.2.0 44 | # via pytest 45 | pytest==7.4.0 46 | # via -r requirements.in 47 | python-dotenv==1.0.0 48 | # via -r requirements.in 49 | requests==2.31.0 50 | # via openai 51 | soupsieve==2.4.1 52 | # via beautifulsoup4 53 | tenacity==8.2.2 54 | # via -r requirements.in 55 | tomli==2.0.1 56 | # via pytest 57 | tqdm==4.65.0 58 | # via openai 59 | urllib3==2.0.4 60 | # via requests 61 | yarl==1.9.2 62 | # via aiohttp 63 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module is used to run Agentflow flows. To run one, use the following command: 3 | 4 | .. code-block:: bash 5 | 6 | python -m run --flow= --variables '=' '=' 7 | 8 | Optionally, use -v for verbose output. 9 | 10 | """ 11 | 12 | import argparse 13 | import logging 14 | 15 | from agentflow.flow import Flow 16 | 17 | 18 | def main() -> None: 19 | """ 20 | The main function that parses command line arguments and runs the specified flow. 21 | """ 22 | parser = argparse.ArgumentParser(description="AgentFlow") 23 | parser.add_argument( 24 | "--flow", 25 | type=str, 26 | required=True, 27 | help="The name of the flow to run. (The part before .json.)", 28 | dest="flow_name", 29 | ) 30 | parser.add_argument( 31 | "--variables", 32 | nargs="*", 33 | help="Variables to be used in the flow. Should be in the format key1=value1 key2=value2. Put key=value pairs in quotes if they contain space.", 34 | dest="variables", 35 | ) 36 | parser.add_argument( 37 | "-v", "--verbose", action="store_true", help="Show detailed output." 38 | ) 39 | 40 | args = parser.parse_args() 41 | variables = parse_variables(args.variables) 42 | if args.verbose: 43 | logging.basicConfig(level=logging.INFO) 44 | logging.info("Verbose mode enabled.") 45 | 46 | flow = Flow(args.flow_name, variables) 47 | flow.run() 48 | 49 | 50 | def parse_variables(variables: list[str]) -> dict[str, str]: 51 | """ 52 | Parses the variables provided as command line arguments. 53 | 54 | :param variables: A list of strings where each string is a key-value pair in the format 'key=value'. 55 | :type variables: list[str] 56 | :return: A dictionary where the keys are the variable names and the values are the corresponding values. 57 | :rtype: dict[str, str] 58 | """ 59 | if not variables: 60 | return {} 61 | 62 | variable_dict = {} 63 | for variable in variables: 64 | key, value = variable.split("=") 65 | variable_dict[key] = value 66 | 67 | return variable_dict 68 | 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonmesmith/agentflow/19759304ea9802fec8a4351a94aa4a6fb8fb156a/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_flow.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains tests for the Flow class. 3 | """ 4 | 5 | import json 6 | import os 7 | import shutil 8 | from types import SimpleNamespace 9 | from typing import Dict, List, Optional 10 | from unittest.mock import patch 11 | 12 | import pytest 13 | 14 | from agentflow.flow import Flow 15 | from agentflow.llm import Settings 16 | 17 | 18 | def mock_llm_respond( 19 | settings: Settings, 20 | messages: List[Dict[str, str]], 21 | functions: Optional[List[Dict[str, str]]] = None, 22 | ) -> SimpleNamespace: 23 | """ 24 | Mock the LLM respond method. 25 | """ 26 | if settings.function_call != "none": 27 | return SimpleNamespace( 28 | role="assistant", 29 | content=None, 30 | function_call=SimpleNamespace( 31 | name=settings.function_call["name"], 32 | arguments=json.dumps({"arg1": "value1", "arg2": "value2"}), 33 | ), 34 | ) 35 | elif messages[-1]["role"] == "function": 36 | return SimpleNamespace( 37 | role="assistant", 38 | content=f"Response to function call {messages[-1]['name']}.", 39 | ) 40 | else: 41 | return SimpleNamespace( 42 | role="assistant", 43 | content=f"Response to user message {messages[-1]['content']}.", 44 | ) 45 | 46 | 47 | def mock_function_definition(function_name: str) -> dict: 48 | """ 49 | Mock a function definition. 50 | """ 51 | return {"name": f"{function_name} definition"} 52 | 53 | 54 | def mock_function_execute(args_json: str) -> str: 55 | """ 56 | Mock a function execute method. 57 | """ 58 | return f"Response to function call with these arguments: {args_json}." 59 | 60 | 61 | @pytest.fixture 62 | def flows_path(): 63 | """ 64 | Get the path to the test flows directory. 65 | """ 66 | return os.path.dirname(os.path.abspath(__file__)) 67 | 68 | 69 | def test_file_not_found(flows_path): 70 | """ 71 | Test that a FileNotFoundError is raised if the flow file does not exist. 72 | """ 73 | with pytest.raises(FileNotFoundError): 74 | _ = Flow("file_not_found", flows_path=flows_path) 75 | 76 | 77 | def test_flow_basic(flows_path): 78 | """ 79 | Test that we can load and run a basic flow. 80 | """ 81 | with patch("agentflow.flow.LLM") as MockLLM: 82 | mock_llm = MockLLM.return_value 83 | mock_llm.respond.side_effect = mock_llm_respond 84 | 85 | flow = Flow("test_flow_basic", flows_path=flows_path) 86 | 87 | assert flow.system_message == "Test system message." 88 | assert len(flow.tasks) == 3 89 | 90 | # Test that settings get loaded correctly 91 | with open(os.path.join(flows_path, "test_flow_basic.json"), "r") as file: 92 | flow_json = json.load(file) 93 | flow_json_test_settings = flow_json["tasks"][0]["settings"] 94 | flow_object_settings = [ 95 | s for s in flow.tasks[0].settings.__dict__ if s != "function_call" 96 | ] 97 | for setting in flow_object_settings: 98 | assert ( 99 | getattr(flow.tasks[0].settings, setting) 100 | == flow_json_test_settings[setting] 101 | ) 102 | 103 | # Test that we use default settings if there are none provided 104 | assert flow.tasks[1].settings == Settings() 105 | 106 | flow.run() 107 | 108 | assert MockLLM.call_count == 1 109 | assert mock_llm.respond.call_count > 0 110 | 111 | last_call = mock_llm.respond.call_args 112 | last_messages = last_call[0][1] 113 | assert ( 114 | last_messages[-1]["content"] 115 | == f"Response to user message {last_messages[-2]['content']}." 116 | ) 117 | 118 | shutil.rmtree(flow.output.output_path) 119 | 120 | 121 | def test_flow_with_variables(flows_path): 122 | """ 123 | Test that we can load and run a flow with variables. 124 | """ 125 | variables = { 126 | "system_message_variable": "system_message_variable_value", 127 | "task_1_variable": "task_1_variable_value", 128 | } 129 | flow = Flow("test_flow_with_variables", variables, flows_path) 130 | 131 | # Test that we set variables correctly 132 | assert flow.system_message == "System message with system_message_variable_value." 133 | assert flow.tasks[0].action == "Task 1 action with task_1_variable_value." 134 | assert ( 135 | flow.tasks[1].action 136 | == "Task 2 action with {task_2_curly_bracket_non_variable}." 137 | ) 138 | 139 | # Test that we raise an error if we provide extra variables 140 | variables["extra_variable"] = "extra_variable_value" 141 | with pytest.raises( 142 | ValueError, match="Extra variables provided: {'extra_variable'}." 143 | ): 144 | _ = Flow("test_flow_with_variables", variables, flows_path) 145 | 146 | # Test that we raise an error if we don't provide all variables 147 | variables.pop("extra_variable") 148 | variables.pop("system_message_variable") 149 | with pytest.raises( 150 | ValueError, match="Missing variable values for: {'system_message_variable'}." 151 | ): 152 | _ = Flow("test_flow_with_variables", variables, flows_path) 153 | 154 | shutil.rmtree(flow.output.output_path) 155 | 156 | 157 | def test_flow_with_functions(flows_path): 158 | """ 159 | Test that we can load and run a flow with functions. 160 | """ 161 | with patch("agentflow.flow.Function") as MockFunction: 162 | with patch("agentflow.flow.LLM") as MockLLM: 163 | mock_function = MockFunction.return_value 164 | mock_function.definition.side_effect = mock_function_definition 165 | mock_function.execute.side_effect = mock_function_execute 166 | 167 | mock_llm = MockLLM.return_value 168 | mock_llm.respond.side_effect = mock_llm_respond 169 | 170 | flow = Flow("test_flow_with_functions", flows_path=flows_path) 171 | 172 | # Ensure that we have the correct number of functions 173 | assert len(flow.functions) == 2 174 | 175 | flow.run() 176 | 177 | last_call = mock_llm.respond.call_args 178 | last_messages = last_call[0][1] 179 | 180 | # Ensure that we're calling functions correctly 181 | assert last_messages[-3]["role"] == "assistant" 182 | assert last_messages[-3]["function_call"] == { 183 | "name": "test_function_for_task_3", 184 | "arguments": '{"arg1": "value1", "arg2": "value2"}', 185 | } 186 | assert last_messages[-2]["role"] == "function" 187 | assert last_messages[-2]["name"] == "test_function_for_task_3" 188 | assert last_messages[-2]["content"] == ( 189 | 'Response to function call with these arguments: {"arg1": "value1", "arg2": "value2"}.' 190 | ) 191 | 192 | # Ensure that we're responding to functions correctly 193 | assert last_messages[-1]["content"] == ( 194 | "Response to function call test_function_for_task_3." 195 | ) 196 | 197 | shutil.rmtree(flow.output.output_path) 198 | -------------------------------------------------------------------------------- /tests/test_flow_basic.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "Test system message.", 3 | "tasks": [ 4 | { 5 | "action": "Task 1 action.", 6 | "settings": { 7 | "model": "test_model", 8 | "temperature": 0.123, 9 | "top_p": 0.1234, 10 | "max_tokens": 12345, 11 | "presence_penalty": 0.123456, 12 | "frequency_penalty": 0.1234567 13 | } 14 | }, 15 | { 16 | "action": "Task 2 action." 17 | }, 18 | { 19 | "action": "Task 3 action." 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /tests/test_flow_with_functions.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "Test system message.", 3 | "tasks": [ 4 | { 5 | "action": "Task 1 action with no function." 6 | }, 7 | { 8 | "action": "Task 2 action with function.", 9 | "settings": { 10 | "function_call": "test_function_for_task_2" 11 | } 12 | }, 13 | { 14 | "action": "Task 3 action with function.", 15 | "settings": { 16 | "function_call": "test_function_for_task_3" 17 | } 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /tests/test_flow_with_variables.json: -------------------------------------------------------------------------------- 1 | { 2 | "system_message": "System message with {system_message_variable}.", 3 | "tasks": [ 4 | { 5 | "action": "Task 1 action with {task_1_variable}." 6 | }, 7 | { 8 | "action": "Task 2 action with {{task_2_curly_bracket_non_variable}}." 9 | } 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /tests/test_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains tests for the Function class. 3 | """ 4 | 5 | import shutil 6 | 7 | from agentflow.function import Function 8 | from agentflow.output import Output 9 | 10 | 11 | def test_function(): 12 | """ 13 | Tests the execute method of the Function class. 14 | 15 | The test creates an Output object and a Function object. It then calls the execute 16 | method of the Function object with a JSON string containing the file name and file 17 | contents. The test checks if the execute method returns the correct file path and 18 | if the file is saved correctly. 19 | """ 20 | output = Output("test_function") 21 | function = Function("save_file", output) 22 | result = function.execute( 23 | '{"file_name": "test.txt", "file_contents": "Hello, world!"}' 24 | ) 25 | assert ( 26 | result == f"{output.output_path}/test.txt" 27 | ), "File path returned by execute method is incorrect" 28 | shutil.rmtree(output.output_path) 29 | -------------------------------------------------------------------------------- /tests/test_function_create_image.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a test for the CreateImage class in the agentflow.functions.create_image module. It uses the unittest.mock library to mock the OpenAI and requests APIs, and checks that the image creation process works correctly. 3 | """ 4 | 5 | import re 6 | import shutil 7 | from unittest.mock import MagicMock, patch 8 | 9 | from agentflow.functions.create_image import CreateImage 10 | from agentflow.output import Output 11 | 12 | 13 | @patch("openai.Image.create") 14 | @patch("requests.get") 15 | def test_execute(mock_get, mock_create): 16 | """ 17 | Tests the execute method of the CreateImage class. It mocks the OpenAI and requests APIs, and checks that the image creation process works correctly. 18 | """ 19 | # Mock the openai.Image.create call to return a mock response with a mock image URL 20 | mock_create.return_value = {"data": [{"url": "https://mockurl.com/mock_image.jpg"}]} 21 | 22 | # Mock the requests.get call to return a mock response with mock image content 23 | mock_response = MagicMock() 24 | mock_response.content = b"mock image content" 25 | mock_get.return_value = mock_response 26 | 27 | output = Output("test_create_image_execute") 28 | create_image = CreateImage(output) 29 | image_path = create_image.execute("a white siamese cat", 1, "1024x1024") 30 | 31 | # Check that the returned image name is a valid SHA-256 hash followed by ".png" 32 | image_file_name = image_path.split("/")[-1] 33 | assert re.match(r"[0-9a-f]{64}\.png$", image_file_name) is not None 34 | 35 | # Check that the image file was created with the correct content 36 | with open(image_path, "rb") as f: 37 | assert f.read() == b"mock image content" 38 | 39 | # Clean up the test environment by removing the created file and directory 40 | shutil.rmtree(output.output_path) 41 | -------------------------------------------------------------------------------- /tests/test_function_get_url.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains tests for the GetUrl function in the agentflow.functions.get_url module. It checks that the URL fetching process works correctly. 3 | """ 4 | 5 | import shutil 6 | from unittest.mock import patch 7 | 8 | from agentflow.functions.get_url import GetUrl 9 | from agentflow.output import Output 10 | 11 | 12 | def test_execute_html(): 13 | """ 14 | Tests the execute method of the GetUrl function with format set to 'html'. 15 | """ 16 | output = Output("test_get_url_execute_html") 17 | get_url = GetUrl(output) 18 | 19 | with patch("requests.get") as mocked_get: 20 | # Mock the returned response 21 | mocked_get.return_value.status_code = 200 22 | mocked_get.return_value.text = "Hello, world!" 23 | 24 | # Execute the GetUrl function 25 | result = get_url.execute("http://test.com", "html") 26 | 27 | # Check that the returned content is correct 28 | assert result == "Hello, world!" 29 | 30 | # Clean up the test environment by removing the created directory 31 | shutil.rmtree(output.output_path) 32 | 33 | 34 | def test_execute_text(): 35 | """ 36 | Tests the execute method of the GetUrl function with format set to 'text'. 37 | """ 38 | output = Output("test_get_url_execute_text") 39 | get_url = GetUrl(output) 40 | 41 | with patch("requests.get") as mocked_get: 42 | # Mock the returned response 43 | mocked_get.return_value.status_code = 200 44 | mocked_get.return_value.text = "Hello, world!" 45 | 46 | # Execute the GetUrl function 47 | result = get_url.execute("http://test.com", "text") 48 | 49 | # Check that the returned content is correct 50 | assert result == "Hello, world!" 51 | 52 | # Clean up the test environment by removing the created directory 53 | shutil.rmtree(output.output_path) 54 | -------------------------------------------------------------------------------- /tests/test_function_save_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a test for the SaveFile class in the agentflow.functions.save_file module. It checks that the file saving process works correctly. 3 | """ 4 | 5 | import shutil 6 | 7 | from agentflow.functions.save_file import SaveFile 8 | from agentflow.output import Output 9 | 10 | 11 | def test_execute(): 12 | """ 13 | Tests the execute method of the SaveFile class. It checks that the file saving process works correctly. 14 | """ 15 | output = Output("test_save_file_execute") 16 | save_file = SaveFile(output) 17 | result = save_file.execute("test.txt", "Hello, world!") 18 | 19 | # Check that the returned file path is correct 20 | assert result == f"{output.output_path}/test.txt" 21 | 22 | # Clean up the test environment by removing the created file and directory 23 | shutil.rmtree(output.output_path) 24 | -------------------------------------------------------------------------------- /tests/test_function_summarize_text.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from unittest.mock import patch 3 | 4 | import pytest 5 | 6 | from agentflow.functions.summarize_text import SummarizeText 7 | from agentflow.output import Output 8 | 9 | 10 | @pytest.fixture 11 | def output(): 12 | flow = "test_summarize_text" 13 | output = Output(flow) 14 | yield output 15 | shutil.rmtree(output.output_path) 16 | 17 | 18 | def test_get_definition(output): 19 | """ 20 | Tests the get_definition method of the SummarizeText class. It checks that the definition of the function is correct. 21 | """ 22 | summarizer = SummarizeText(output) 23 | definition = summarizer.get_definition() 24 | assert definition["name"] == "summarize_text" 25 | assert "text_to_summarize" in definition["parameters"]["properties"] 26 | 27 | 28 | def test_truncate_text(output): 29 | """ 30 | Tests the _truncate_text method of the SummarizeText class. It checks that the text is truncated correctly. 31 | """ 32 | summarizer = SummarizeText(output) 33 | text = "a" * (summarizer.max_tokens * summarizer.chars_per_token + 10) 34 | truncated_text = summarizer._truncate_text(text) 35 | assert len(truncated_text) == summarizer.max_tokens * summarizer.chars_per_token 36 | 37 | 38 | def test_prepare_messages(output): 39 | """ 40 | Tests the _prepare_messages method of the SummarizeText class. It checks that the messages are prepared correctly. 41 | """ 42 | summarizer = SummarizeText(output) 43 | messages = summarizer._prepare_messages("text", "instructions") 44 | assert messages[0]["content"] == "You are an AI summarizer. instructions" 45 | assert messages[1]["content"] == "Text to summarize: text" 46 | 47 | 48 | def test_select_model_base(output): 49 | """ 50 | Tests the _select_model method of the SummarizeText class. It checks that the correct model is selected for 4000 tokens or less. 51 | """ 52 | summarizer = SummarizeText(output) 53 | # Constructing a message size that fits the base model 54 | messages = [{"content": "a" * (1000 * summarizer.chars_per_token)}] 55 | model, _ = summarizer._select_model(messages) 56 | assert model == "gpt-3.5-turbo" 57 | 58 | 59 | def test_select_model_16k(output): 60 | """ 61 | Tests the _select_model method of the SummarizeText class. It checks that the correct model is selected for more than 4000 tokens. 62 | """ 63 | summarizer = SummarizeText(output) 64 | # Constructing a message size that requires the larger 16k model 65 | messages = [{"content": "a" * (5000 * summarizer.chars_per_token)}] 66 | model, _ = summarizer._select_model(messages) 67 | assert model == "gpt-3.5-turbo-16k" 68 | 69 | 70 | def test_calculate_tokens(output): 71 | """ 72 | Tests the _calculate_tokens method of the SummarizeText class. It checks that the number of tokens is calculated correctly. 73 | """ 74 | summarizer = SummarizeText(output) 75 | tokens = summarizer._calculate_tokens([{"content": "a" * 1000}]) 76 | assert tokens == 254 77 | 78 | 79 | @patch("agentflow.functions.summarize_text.LLM") 80 | def test_execute(MockLLM, output): 81 | """ 82 | Tests the execute method of the SummarizeText class. 83 | """ 84 | mock_summary = "This is the summary." 85 | MockLLM.return_value.respond.return_value.content = mock_summary 86 | summarizer = SummarizeText(output) 87 | 88 | summary = summarizer.execute("Text to summarize.", "Instruction summary.") 89 | assert summary == mock_summary 90 | -------------------------------------------------------------------------------- /tests/test_llm.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains tests for the LLM class. 3 | """ 4 | 5 | import os 6 | from unittest.mock import MagicMock, patch 7 | 8 | from agentflow.llm import Settings 9 | 10 | 11 | def test_settings(monkeypatch): 12 | """ 13 | Tests that key settings defaults are properly set. 14 | """ 15 | 16 | # Test that we're properly setting the default model 17 | if "OPENAI_DEFAULT_MODEL" in os.environ: 18 | monkeypatch.delenv("OPENAI_DEFAULT_MODEL") 19 | settings = Settings() 20 | assert settings.model == "gpt-4" 21 | 22 | 23 | @patch("agentflow.llm.LLM") 24 | def test_respond(mock_llm_class): 25 | """ 26 | Tests the respond method of the LLM class. 27 | 28 | The LLM class and its respond method are mocked. The test checks if the respond method 29 | is called correctly and if it returns a non-None value when called with a settings object 30 | and a list of messages. 31 | """ 32 | mock_llm_instance = MagicMock() 33 | mock_llm_instance.respond.return_value = { 34 | "role": "assistant", 35 | "content": "Yes, I am here!", 36 | } 37 | mock_llm_class.return_value = mock_llm_instance 38 | 39 | llm = mock_llm_class() 40 | settings = Settings() 41 | messages = [{"role": "user", "content": "This is a test. Are you there?"}] 42 | response = llm.respond(settings, messages) 43 | assert response is not None, "Response is None" 44 | mock_llm_instance.respond.assert_called_once_with(settings, messages) 45 | -------------------------------------------------------------------------------- /tests/test_output.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains tests for the Output class. 3 | """ 4 | 5 | import json 6 | import os 7 | import shutil 8 | 9 | from agentflow.output import Output 10 | 11 | 12 | def test_init(): 13 | """ 14 | Tests the initialization of the Output object by checking if the output directory is created. 15 | """ 16 | output = Output("test_output_init") 17 | assert os.path.exists(output.output_path), "Output path does not exist" 18 | os.rmdir(output.output_path) 19 | 20 | 21 | def test_save(): 22 | """ 23 | Tests the save method of the Output object by saving a text file and a JSON file, 24 | and then checking if these files exist in the output directory. 25 | """ 26 | output = Output("test") 27 | 28 | # Test saving a text file 29 | txt_file_path = os.path.join(output.output_path, "test.txt") 30 | output.save("test.txt", "test_output_save") 31 | assert os.path.exists(txt_file_path), "Text file was not saved correctly" 32 | with open(txt_file_path, "r") as f: 33 | assert f.read() == "test_output_save", "Text file content is incorrect" 34 | os.remove(txt_file_path) 35 | 36 | # Test saving a JSON file 37 | json_file_path = os.path.join(output.output_path, "test.json") 38 | output.save("test.json", [{"test": "test1"}, {"test": "test2"}]) 39 | assert os.path.exists(json_file_path), "JSON file was not saved correctly" 40 | with open(json_file_path, "r") as f: 41 | assert json.load(f) == [ 42 | {"test": "test1"}, 43 | {"test": "test2"}, 44 | ], "JSON file content is incorrect" 45 | 46 | shutil.rmtree(output.output_path) 47 | --------------------------------------------------------------------------------