├── .env.example ├── src └── agentic_patterns │ ├── utils │ ├── __init__.py │ ├── logging.py │ ├── extraction.py │ └── completions.py │ ├── tool_pattern │ ├── __init__.py │ ├── tool.py │ └── tool_agent.py │ ├── multiagent_pattern │ ├── __init__.py │ ├── crew.py │ └── agent.py │ ├── planning_pattern │ ├── __init__.py │ └── react_agent.py │ ├── __init__.py │ └── reflection_pattern │ ├── __init__.py │ └── reflection_agent.py ├── notebooks ├── tool_agent_example.txt ├── poem.txt ├── multiagent_pattern.ipynb ├── reflection_pattern.ipynb ├── planning_pattern.ipynb └── tool_pattern.ipynb ├── .github ├── workflows │ ├── constraints.txt │ └── release.yml └── release-drafter.yml ├── img ├── groq.png ├── karpathy.png ├── mergesort.png ├── tool_pattern.png ├── agentic_patterns.png ├── multiagent_pattern.png ├── planning_pattern.png ├── reflection_pattern.png └── dag.svg ├── .pre-commit-config.yaml ├── pyproject.toml ├── LICENSE ├── .gitignore └── README.md /.env.example: -------------------------------------------------------------------------------- 1 | GROQ_API_KEY="" 2 | -------------------------------------------------------------------------------- /src/agentic_patterns/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/agentic_patterns/tool_pattern/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /notebooks/tool_agent_example.txt: -------------------------------------------------------------------------------- 1 | This is a Tool Agent -------------------------------------------------------------------------------- /src/agentic_patterns/multiagent_pattern/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/agentic_patterns/planning_pattern/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/workflows/constraints.txt: -------------------------------------------------------------------------------- 1 | pip==24.1.2 2 | poetry==1.8.3 3 | virtualenv<21.0.0 -------------------------------------------------------------------------------- /src/agentic_patterns/__init__.py: -------------------------------------------------------------------------------- 1 | from .reflection_pattern import ReflectionAgent 2 | -------------------------------------------------------------------------------- /img/groq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/groq.png -------------------------------------------------------------------------------- /img/karpathy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/karpathy.png -------------------------------------------------------------------------------- /src/agentic_patterns/reflection_pattern/__init__.py: -------------------------------------------------------------------------------- 1 | from .reflection_agent import ReflectionAgent -------------------------------------------------------------------------------- /img/mergesort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/mergesort.png -------------------------------------------------------------------------------- /img/tool_pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/tool_pattern.png -------------------------------------------------------------------------------- /img/agentic_patterns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/agentic_patterns.png -------------------------------------------------------------------------------- /img/multiagent_pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/multiagent_pattern.png -------------------------------------------------------------------------------- /img/planning_pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/planning_pattern.png -------------------------------------------------------------------------------- /img/reflection_pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ashishpatel26/agentic_patterns/main/img/reflection_pattern.png -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: local 3 | hooks: 4 | - id: black 5 | name: black 6 | entry: black 7 | language: system 8 | types: [python] 9 | require_serial: true 10 | - id: reorder-python-imports 11 | name: Reorder python imports 12 | entry: reorder-python-imports 13 | language: system 14 | types: [python] 15 | args: [--application-directories=src] 16 | - id: trailing-whitespace 17 | name: Trim Trailing Whitespace 18 | entry: trailing-whitespace-fixer 19 | language: system 20 | types: [text] 21 | stages: [commit, push, manual] -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "agentic-patterns" 3 | version = "1.0.0" 4 | description = "" 5 | authors = ["MichaelisTrofficus "] 6 | license = "MIT" 7 | readme = "README.md" 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.11" 11 | groq = "^0.9.0" 12 | jupyter = "^1.0.0" 13 | python-dotenv = "^1.0.1" 14 | colorama = "^0.4.6" 15 | types-colorama = "^0.4.15.20240311" 16 | graphviz = "^0.20.3" 17 | 18 | [tool.poetry.group.dev.dependencies] 19 | pre-commit = "^3.8.0" 20 | black = "^24.8.0" 21 | reorder-python-imports = "^3.13.0" 22 | pre-commit-hooks = "^4.6.0" 23 | 24 | [build-system] 25 | requires = ["poetry-core"] 26 | build-backend = "poetry.core.masonry.api" 27 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | categories: 2 | - title: ":boom: Breaking Changes" 3 | label: "breaking" 4 | - title: ":rocket: Features" 5 | label: "enhancement" 6 | - title: ":fire: Removals and Deprecations" 7 | label: "removal" 8 | - title: ":beetle: Fixes" 9 | label: "bug" 10 | - title: ":racehorse: Performance" 11 | label: "performance" 12 | - title: ":rotating_light: Testing" 13 | label: "testing" 14 | - title: ":construction_worker: Continuous Integration" 15 | label: "ci" 16 | - title: ":books: Documentation" 17 | label: "documentation" 18 | - title: ":hammer: Refactoring" 19 | label: "refactoring" 20 | - title: ":lipstick: Style" 21 | label: "style" 22 | - title: ":package: Dependencies" 23 | labels: 24 | - "dependencies" 25 | - "build" 26 | template: | 27 | ## Changes 28 | 29 | $CHANGES -------------------------------------------------------------------------------- /src/agentic_patterns/utils/logging.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from colorama import Fore 4 | from colorama import Style 5 | 6 | 7 | def fancy_print(message: str) -> None: 8 | """ 9 | Displays a fancy print message. 10 | 11 | Args: 12 | message (str): The message to display. 13 | """ 14 | print(Style.BRIGHT + Fore.CYAN + f"\n{'=' * 50}") 15 | print(Fore.MAGENTA + f"{message}") 16 | print(Style.BRIGHT + Fore.CYAN + f"{'=' * 50}\n") 17 | time.sleep(0.5) 18 | 19 | 20 | def fancy_step_tracker(step: int, total_steps: int) -> None: 21 | """ 22 | Displays a fancy step tracker for each iteration of the generation-reflection loop. 23 | 24 | Args: 25 | step (int): The current step in the loop. 26 | total_steps (int): The total number of steps in the loop. 27 | """ 28 | fancy_print(f"STEP {step + 1}/{total_steps}") 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 The Neural Maze 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /notebooks/poem.txt: -------------------------------------------------------------------------------- 1 | En la gran tapicería del tiempo y el espacio, 2 | Un hilo de existencia teje su lugar, 3 | Un momento fugaz, un destello de fuerza, 4 | Una vida que arde y parpadea en la noche. 5 | 6 | Bailamos sobre la tierra, con pies de barro, 7 | Buscando respuestas, de noche y de día, 8 | Nos esforzamos y luchamos, reímos y jugamos, 9 | Pero al final, ¿qué significa todo esto? 10 | 11 | Algunos dicen que es un propósito, otros dicen que es el destino, 12 | Algunos afirman saber, mientras que otros dudan, 13 | Pero mientras avanzamos por la puerta retorcida de la vida, 14 | Comenzamos a ver, nuestro propio camino crear. 15 | 16 | Quizás el significado se encuentra, no en el final, 17 | Sino en el viaje, el amor, el amigo, 18 | Los días bañados por el sol, las noches estrelladas, 19 | Los recuerdos que atesoramos, las risas, las peleas. 20 | 21 | O tal vez sea un misterio que nunca sabremos, 22 | Una verdad escondida, en el resplandor del cosmos, 23 | Un secreto que solo el universo puede compartir, 24 | Un enigma que solo se resuelve, en el silencio que guardamos. 25 | 26 | Pero esto sí lo sabemos, en esta vida que vivimos, 27 | Tenemos el poder, de sembrar la semilla, 28 | De nutrir el crecimiento, de cuidar la tierra, 29 | De dejar nuestra huella, de dar nuestra alegría. 30 | 31 | Y cuando nuestro tiempo se acabe y hayamos partido, 32 | Nuestro legado permanece, como el sol de la mañana, 33 | Una luz que brilla, un amor que es verdad, 34 | Un recuerdo que vive, en los corazones de ti y de mí. -------------------------------------------------------------------------------- /src/agentic_patterns/utils/extraction.py: -------------------------------------------------------------------------------- 1 | import re 2 | from dataclasses import dataclass 3 | 4 | 5 | @dataclass 6 | class TagContentResult: 7 | """ 8 | A data class to represent the result of extracting tag content. 9 | 10 | Attributes: 11 | content (List[str]): A list of strings containing the content found between the specified tags. 12 | found (bool): A flag indicating whether any content was found for the given tag. 13 | """ 14 | 15 | content: list[str] 16 | found: bool 17 | 18 | 19 | def extract_tag_content(text: str, tag: str) -> TagContentResult: 20 | """ 21 | Extracts all content enclosed by specified tags (e.g., , , etc.). 22 | 23 | Parameters: 24 | text (str): The input string containing multiple potential tags. 25 | tag (str): The name of the tag to search for (e.g., 'thought', 'response'). 26 | 27 | Returns: 28 | dict: A dictionary with the following keys: 29 | - 'content' (list): A list of strings containing the content found between the specified tags. 30 | - 'found' (bool): A flag indicating whether any content was found for the given tag. 31 | """ 32 | # Build the regex pattern dynamically to find multiple occurrences of the tag 33 | tag_pattern = rf"<{tag}>(.*?)" 34 | 35 | # Use findall to capture all content between the specified tag 36 | matched_contents = re.findall(tag_pattern, text, re.DOTALL) 37 | 38 | # Return the dataclass instance with the result 39 | return TagContentResult( 40 | content=[content.strip() for content in matched_contents], 41 | found=bool(matched_contents), 42 | ) 43 | -------------------------------------------------------------------------------- /img/dag.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | Poet Agent 14 | 15 | Poet Agent 16 | 17 | 18 | 19 | Poem Translator Agent 20 | 21 | Poem Translator Agent 22 | 23 | 24 | 25 | Poet Agent->Poem Translator Agent 26 | 27 | 28 | 29 | 30 | 31 | Writer Agent 32 | 33 | Writer Agent 34 | 35 | 36 | 37 | Poem Translator Agent->Writer Agent 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | 9 | permissions: write-all 10 | 11 | jobs: 12 | release: 13 | name: Release 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Check out the repository 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 2 20 | 21 | - name: Set up Python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: "3.11" 25 | 26 | - name: Upgrade pip 27 | run: | 28 | pip install --constraint=.github/workflows/constraints.txt pip 29 | pip --version 30 | 31 | - name: Install Poetry 32 | run: | 33 | pip install --constraint=.github/workflows/constraints.txt poetry 34 | poetry --version 35 | 36 | - name: Check if there is a parent commit 37 | id: check-parent-commit 38 | run: | 39 | echo "::set-output name=sha::$(git rev-parse --verify --quiet HEAD^)" 40 | 41 | - name: Detect and tag new version 42 | id: check-version 43 | if: steps.check-parent-commit.outputs.sha 44 | uses: salsify/action-detect-and-tag-new-version@v2.0.3 45 | with: 46 | version-command: | 47 | bash -o pipefail -c "poetry version | awk '{ print \$2 }'" 48 | 49 | - name: Bump version for developmental release 50 | if: "! steps.check-version.outputs.tag" 51 | run: | 52 | poetry version patch && 53 | version=$(poetry version | awk '{ print $2 }') && 54 | poetry version $version.dev.$(date +%s) 55 | 56 | - name: Build package 57 | run: | 58 | poetry build --ansi 59 | 60 | - name: Publish package on PyPI 61 | if: steps.check-version.outputs.tag 62 | uses: pypa/gh-action-pypi-publish@v1.8.11 63 | with: 64 | user: __token__ 65 | password: ${{ secrets.PYPI_TOKEN }} 66 | 67 | - name: Publish package on TestPyPI 68 | if: "! steps.check-version.outputs.tag" 69 | uses: pypa/gh-action-pypi-publish@v1.8.11 70 | with: 71 | user: __token__ 72 | password: ${{ secrets.TEST_PYPI_TOKEN }} 73 | repository_url: https://test.pypi.org/legacy/ 74 | 75 | - name: Publish the release notes 76 | uses: release-drafter/release-drafter@v5.25.0 77 | with: 78 | publish: ${{ steps.check-version.outputs.tag != '' }} 79 | tag: ${{ steps.check-version.outputs.tag }} 80 | env: 81 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /src/agentic_patterns/utils/completions.py: -------------------------------------------------------------------------------- 1 | def completions_create(client, messages: list, model: str) -> str: 2 | """ 3 | Sends a request to the client's `completions.create` method to interact with the language model. 4 | 5 | Args: 6 | client (Groq): The Groq client object 7 | messages (list[dict]): A list of message objects containing chat history for the model. 8 | model (str): The model to use for generating tool calls and responses. 9 | 10 | Returns: 11 | str: The content of the model's response. 12 | """ 13 | response = client.chat.completions.create(messages=messages, model=model) 14 | return str(response.choices[0].message.content) 15 | 16 | 17 | def build_prompt_structure(prompt: str, role: str, tag: str = "") -> dict: 18 | """ 19 | Builds a structured prompt that includes the role and content. 20 | 21 | Args: 22 | prompt (str): The actual content of the prompt. 23 | role (str): The role of the speaker (e.g., user, assistant). 24 | 25 | Returns: 26 | dict: A dictionary representing the structured prompt. 27 | """ 28 | if tag: 29 | prompt = f"<{tag}>{prompt}" 30 | return {"role": role, "content": prompt} 31 | 32 | 33 | def update_chat_history(history: list, msg: str, role: str): 34 | """ 35 | Updates the chat history by appending the latest response. 36 | 37 | Args: 38 | history (list): The list representing the current chat history. 39 | msg (str): The message to append. 40 | role (str): The role type (e.g. 'user', 'assistant', 'system') 41 | """ 42 | history.append(build_prompt_structure(prompt=msg, role=role)) 43 | 44 | 45 | class ChatHistory(list): 46 | def __init__(self, messages: list | None = None, total_length: int = -1): 47 | """Initialise the queue with a fixed total length. 48 | 49 | Args: 50 | messages (list | None): A list of initial messages 51 | total_length (int): The maximum number of messages the chat history can hold. 52 | """ 53 | if messages is None: 54 | messages = [] 55 | 56 | super().__init__(messages) 57 | self.total_length = total_length 58 | 59 | def append(self, msg: str): 60 | """Add a message to the queue. 61 | 62 | Args: 63 | msg (str): The message to be added to the queue 64 | """ 65 | if len(self) == self.total_length: 66 | self.pop(0) 67 | super().append(msg) 68 | 69 | 70 | class FixedFirstChatHistory(ChatHistory): 71 | def __init__(self, messages: list | None = None, total_length: int = -1): 72 | """Initialise the queue with a fixed total length. 73 | 74 | Args: 75 | messages (list | None): A list of initial messages 76 | total_length (int): The maximum number of messages the chat history can hold. 77 | """ 78 | super().__init__(messages, total_length) 79 | 80 | def append(self, msg: str): 81 | """Add a message to the queue. The first messaage will always stay fixed. 82 | 83 | Args: 84 | msg (str): The message to be added to the queue 85 | """ 86 | if len(self) == self.total_length: 87 | self.pop(1) 88 | super().append(msg) 89 | -------------------------------------------------------------------------------- /src/agentic_patterns/tool_pattern/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Callable 3 | 4 | 5 | def get_fn_signature(fn: Callable) -> dict: 6 | """ 7 | Generates the signature for a given function. 8 | 9 | Args: 10 | fn (Callable): The function whose signature needs to be extracted. 11 | 12 | Returns: 13 | dict: A dictionary containing the function's name, description, 14 | and parameter types. 15 | """ 16 | fn_signature: dict = { 17 | "name": fn.__name__, 18 | "description": fn.__doc__, 19 | "parameters": {"properties": {}}, 20 | } 21 | schema = { 22 | k: {"type": v.__name__} for k, v in fn.__annotations__.items() if k != "return" 23 | } 24 | fn_signature["parameters"]["properties"] = schema 25 | return fn_signature 26 | 27 | 28 | def validate_arguments(tool_call: dict, tool_signature: dict) -> dict: 29 | """ 30 | Validates and converts arguments in the input dictionary to match the expected types. 31 | 32 | Args: 33 | tool_call (dict): A dictionary containing the arguments passed to the tool. 34 | tool_signature (dict): The expected function signature and parameter types. 35 | 36 | Returns: 37 | dict: The tool call dictionary with the arguments converted to the correct types if necessary. 38 | """ 39 | properties = tool_signature["parameters"]["properties"] 40 | 41 | # TODO: This is overly simplified but enough for simple Tools. 42 | type_mapping = { 43 | "int": int, 44 | "str": str, 45 | "bool": bool, 46 | "float": float, 47 | } 48 | 49 | for arg_name, arg_value in tool_call["arguments"].items(): 50 | expected_type = properties[arg_name].get("type") 51 | 52 | if not isinstance(arg_value, type_mapping[expected_type]): 53 | tool_call["arguments"][arg_name] = type_mapping[expected_type](arg_value) 54 | 55 | return tool_call 56 | 57 | 58 | class Tool: 59 | """ 60 | A class representing a tool that wraps a callable and its signature. 61 | 62 | Attributes: 63 | name (str): The name of the tool (function). 64 | fn (Callable): The function that the tool represents. 65 | fn_signature (str): JSON string representation of the function's signature. 66 | """ 67 | 68 | def __init__(self, name: str, fn: Callable, fn_signature: str): 69 | self.name = name 70 | self.fn = fn 71 | self.fn_signature = fn_signature 72 | 73 | def __str__(self): 74 | return self.fn_signature 75 | 76 | def run(self, **kwargs): 77 | """ 78 | Executes the tool (function) with provided arguments. 79 | 80 | Args: 81 | **kwargs: Keyword arguments passed to the function. 82 | 83 | Returns: 84 | The result of the function call. 85 | """ 86 | return self.fn(**kwargs) 87 | 88 | 89 | def tool(fn: Callable): 90 | """ 91 | A decorator that wraps a function into a Tool object. 92 | 93 | Args: 94 | fn (Callable): The function to be wrapped. 95 | 96 | Returns: 97 | Tool: A Tool object containing the function, its name, and its signature. 98 | """ 99 | 100 | def wrapper(): 101 | fn_signature = get_fn_signature(fn) 102 | return Tool( 103 | name=fn_signature.get("name"), fn=fn, fn_signature=json.dumps(fn_signature) 104 | ) 105 | 106 | return wrapper() 107 | -------------------------------------------------------------------------------- /src/agentic_patterns/multiagent_pattern/crew.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | 3 | from colorama import Fore 4 | from graphviz import Digraph # type: ignore 5 | 6 | from agentic_patterns.utils.logging import fancy_print 7 | 8 | 9 | class Crew: 10 | """ 11 | A class representing a crew of agents working together. 12 | 13 | This class manages a group of agents, their dependencies, and provides methods 14 | for running the agents in a topologically sorted order. 15 | 16 | Attributes: 17 | current_crew (Crew): Class-level variable to track the active Crew context. 18 | agents (list): A list of agents in the crew. 19 | """ 20 | 21 | current_crew = None 22 | 23 | def __init__(self): 24 | self.agents = [] 25 | 26 | def __enter__(self): 27 | """ 28 | Enters the context manager, setting this crew as the current active context. 29 | 30 | Returns: 31 | Crew: The current Crew instance. 32 | """ 33 | Crew.current_crew = self 34 | return self 35 | 36 | def __exit__(self, exc_type, exc_val, exc_tb): 37 | """ 38 | Exits the context manager, clearing the active context. 39 | 40 | Args: 41 | exc_type: The exception type, if an exception was raised. 42 | exc_val: The exception value, if an exception was raised. 43 | exc_tb: The traceback, if an exception was raised. 44 | """ 45 | Crew.current_crew = None 46 | 47 | def add_agent(self, agent): 48 | """ 49 | Adds an agent to the crew. 50 | 51 | Args: 52 | agent: The agent to be added to the crew. 53 | """ 54 | self.agents.append(agent) 55 | 56 | @staticmethod 57 | def register_agent(agent): 58 | """ 59 | Registers an agent with the current active crew context. 60 | 61 | Args: 62 | agent: The agent to be registered. 63 | """ 64 | if Crew.current_crew is not None: 65 | Crew.current_crew.add_agent(agent) 66 | 67 | def topological_sort(self): 68 | """ 69 | Performs a topological sort of the agents based on their dependencies. 70 | 71 | Returns: 72 | list: A list of agents sorted in topological order. 73 | 74 | Raises: 75 | ValueError: If there's a circular dependency among the agents. 76 | """ 77 | in_degree = {agent: len(agent.dependencies) for agent in self.agents} 78 | queue = deque([agent for agent in self.agents if in_degree[agent] == 0]) 79 | 80 | sorted_agents = [] 81 | 82 | while queue: 83 | current_agent = queue.popleft() 84 | sorted_agents.append(current_agent) 85 | 86 | for dependent in current_agent.dependents: 87 | in_degree[dependent] -= 1 88 | if in_degree[dependent] == 0: 89 | queue.append(dependent) 90 | 91 | if len(sorted_agents) != len(self.agents): 92 | raise ValueError( 93 | "Circular dependencies detected among agents, preventing a valid topological sort" 94 | ) 95 | 96 | return sorted_agents 97 | 98 | def plot(self): 99 | """ 100 | Plots the Directed Acyclic Graph (DAG) of agents in the crew using Graphviz. 101 | 102 | Returns: 103 | Digraph: A Graphviz Digraph object representing the agent dependencies. 104 | """ 105 | dot = Digraph(format="png") # Set format to PNG for inline display 106 | 107 | # Add nodes and edges for each agent in the crew 108 | for agent in self.agents: 109 | dot.node(agent.name) 110 | for dependency in agent.dependencies: 111 | dot.edge(dependency.name, agent.name) 112 | return dot 113 | 114 | def run(self): 115 | """ 116 | Runs all agents in the crew in topologically sorted order. 117 | 118 | This method executes each agent's run method and prints the results. 119 | """ 120 | sorted_agents = self.topological_sort() 121 | for agent in sorted_agents: 122 | fancy_print(f"RUNNING AGENT: {agent}") 123 | print(Fore.RED + f"{agent.run()}") 124 | -------------------------------------------------------------------------------- /src/agentic_patterns/tool_pattern/tool_agent.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from colorama import Fore 5 | from dotenv import load_dotenv 6 | from groq import Groq 7 | 8 | from agentic_patterns.tool_pattern.tool import Tool 9 | from agentic_patterns.tool_pattern.tool import validate_arguments 10 | from agentic_patterns.utils.completions import build_prompt_structure 11 | from agentic_patterns.utils.completions import ChatHistory 12 | from agentic_patterns.utils.completions import completions_create 13 | from agentic_patterns.utils.completions import update_chat_history 14 | from agentic_patterns.utils.extraction import extract_tag_content 15 | 16 | load_dotenv() 17 | 18 | 19 | TOOL_SYSTEM_PROMPT = """ 20 | You are a function calling AI model. You are provided with function signatures within XML tags. 21 | You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug 22 | into functions. Pay special attention to the properties 'types'. You should use those types as in a Python dict. 23 | For each function call return a json object with function name and arguments within 24 | XML tags as follows: 25 | 26 | 27 | {"name": ,"arguments": , "id": } 28 | 29 | 30 | Here are the available tools: 31 | 32 | 33 | %s 34 | 35 | """ 36 | 37 | 38 | class ToolAgent: 39 | """ 40 | The ToolAgent class represents an agent that can interact with a language model and use tools 41 | to assist with user queries. It generates function calls based on user input, validates arguments, 42 | and runs the respective tools. 43 | 44 | Attributes: 45 | tools (Tool | list[Tool]): A list of tools available to the agent. 46 | model (str): The model to be used for generating tool calls and responses. 47 | client (Groq): The Groq client used to interact with the language model. 48 | tools_dict (dict): A dictionary mapping tool names to their corresponding Tool objects. 49 | """ 50 | 51 | def __init__( 52 | self, 53 | tools: Tool | list[Tool], 54 | model: str = "llama3-groq-70b-8192-tool-use-preview", 55 | ) -> None: 56 | self.client = Groq() 57 | self.model = model 58 | self.tools = tools if isinstance(tools, list) else [tools] 59 | self.tools_dict = {tool.name: tool for tool in self.tools} 60 | 61 | def add_tool_signatures(self) -> str: 62 | """ 63 | Collects the function signatures of all available tools. 64 | 65 | Returns: 66 | str: A concatenated string of all tool function signatures in JSON format. 67 | """ 68 | return "".join([tool.fn_signature for tool in self.tools]) 69 | 70 | def process_tool_calls(self, tool_calls_content: list) -> dict: 71 | """ 72 | Processes each tool call, validates arguments, executes the tools, and collects results. 73 | 74 | Args: 75 | tool_calls_content (list): List of strings, each representing a tool call in JSON format. 76 | 77 | Returns: 78 | dict: A dictionary where the keys are tool call IDs and values are the results from the tools. 79 | """ 80 | observations = {} 81 | for tool_call_str in tool_calls_content: 82 | tool_call = json.loads(tool_call_str) 83 | tool_name = tool_call["name"] 84 | tool = self.tools_dict[tool_name] 85 | 86 | print(Fore.GREEN + f"\nUsing Tool: {tool_name}") 87 | 88 | # Validate and execute the tool call 89 | validated_tool_call = validate_arguments( 90 | tool_call, json.loads(tool.fn_signature) 91 | ) 92 | print(Fore.GREEN + f"\nTool call dict: \n{validated_tool_call}") 93 | 94 | result = tool.run(**validated_tool_call["arguments"]) 95 | print(Fore.GREEN + f"\nTool result: \n{result}") 96 | 97 | # Store the result using the tool call ID 98 | observations[validated_tool_call["id"]] = result 99 | 100 | return observations 101 | 102 | def run( 103 | self, 104 | user_msg: str, 105 | ) -> str: 106 | """ 107 | Handles the full process of interacting with the language model and executing a tool based on user input. 108 | 109 | Args: 110 | user_msg (str): The user's message that prompts the tool agent to act. 111 | 112 | Returns: 113 | str: The final output after executing the tool and generating a response from the model. 114 | """ 115 | user_prompt = build_prompt_structure(prompt=user_msg, role="user") 116 | 117 | tool_chat_history = ChatHistory( 118 | [ 119 | build_prompt_structure( 120 | prompt=TOOL_SYSTEM_PROMPT % self.add_tool_signatures(), 121 | role="system", 122 | ), 123 | user_prompt, 124 | ] 125 | ) 126 | agent_chat_history = ChatHistory([user_prompt]) 127 | 128 | tool_call_response = completions_create( 129 | self.client, messages=tool_chat_history, model=self.model 130 | ) 131 | tool_calls = extract_tag_content(str(tool_call_response), "tool_call") 132 | 133 | if tool_calls.found: 134 | observations = self.process_tool_calls(tool_calls.content) 135 | update_chat_history( 136 | agent_chat_history, f'f"Observation: {observations}"', "user" 137 | ) 138 | 139 | return completions_create(self.client, agent_chat_history, self.model) 140 | -------------------------------------------------------------------------------- /src/agentic_patterns/reflection_pattern/reflection_agent.py: -------------------------------------------------------------------------------- 1 | from colorama import Fore 2 | from dotenv import load_dotenv 3 | from groq import Groq 4 | 5 | from agentic_patterns.utils.completions import build_prompt_structure 6 | from agentic_patterns.utils.completions import completions_create 7 | from agentic_patterns.utils.completions import FixedFirstChatHistory 8 | from agentic_patterns.utils.completions import update_chat_history 9 | from agentic_patterns.utils.logging import fancy_step_tracker 10 | 11 | load_dotenv() 12 | 13 | 14 | BASE_GENERATION_SYSTEM_PROMPT = """ 15 | Your task is to Generate the best content possible for the user's request. 16 | If the user provides critique, respond with a revised version of your previous attempt. 17 | You must always output the revised content. 18 | """ 19 | 20 | BASE_REFLECTION_SYSTEM_PROMPT = """ 21 | You are tasked with generating critique and recommendations to the user's generated content. 22 | If the user content has something wrong or something to be improved, output a list of recommendations 23 | and critiques. If the user content is ok and there's nothing to change, output this: 24 | """ 25 | 26 | 27 | class ReflectionAgent: 28 | """ 29 | A class that implements a Reflection Agent, which generates responses and reflects 30 | on them using the LLM to iteratively improve the interaction. The agent first generates 31 | responses based on provided prompts and then critiques them in a reflection step. 32 | 33 | Attributes: 34 | model (str): The model name used for generating and reflecting on responses. 35 | client (Groq): An instance of the Groq client to interact with the language model. 36 | """ 37 | 38 | def __init__(self, model: str = "llama-3.1-70b-versatile"): 39 | self.client = Groq() 40 | self.model = model 41 | 42 | def _request_completion( 43 | self, 44 | history: list, 45 | verbose: int = 0, 46 | log_title: str = "COMPLETION", 47 | log_color: str = "", 48 | ): 49 | """ 50 | A private method to request a completion from the Groq model. 51 | 52 | Args: 53 | history (list): A list of messages forming the conversation or reflection history. 54 | verbose (int, optional): The verbosity level. Defaults to 0 (no output). 55 | 56 | Returns: 57 | str: The model-generated response. 58 | """ 59 | output = completions_create(self.client, history, self.model) 60 | 61 | if verbose > 0: 62 | print(log_color, f"\n\n{log_title}\n\n", output) 63 | 64 | return output 65 | 66 | def generate(self, generation_history: list, verbose: int = 0) -> str: 67 | """ 68 | Generates a response based on the provided generation history using the model. 69 | 70 | Args: 71 | generation_history (list): A list of messages forming the conversation or generation history. 72 | verbose (int, optional): The verbosity level, controlling printed output. Defaults to 0. 73 | 74 | Returns: 75 | str: The generated response. 76 | """ 77 | return self._request_completion( 78 | generation_history, verbose, log_title="GENERATION", log_color=Fore.BLUE 79 | ) 80 | 81 | def reflect(self, reflection_history: list, verbose: int = 0) -> str: 82 | """ 83 | Reflects on the generation history by generating a critique or feedback. 84 | 85 | Args: 86 | reflection_history (list): A list of messages forming the reflection history, typically based on 87 | the previous generation or interaction. 88 | verbose (int, optional): The verbosity level, controlling printed output. Defaults to 0. 89 | 90 | Returns: 91 | str: The critique or reflection response from the model. 92 | """ 93 | return self._request_completion( 94 | reflection_history, verbose, log_title="REFLECTION", log_color=Fore.GREEN 95 | ) 96 | 97 | def run( 98 | self, 99 | user_msg: str, 100 | generation_system_prompt: str = "", 101 | reflection_system_prompt: str = "", 102 | n_steps: int = 10, 103 | verbose: int = 0, 104 | ) -> str: 105 | """ 106 | Runs the ReflectionAgent over multiple steps, alternating between generating a response 107 | and reflecting on it for the specified number of steps. 108 | 109 | Args: 110 | user_msg (str): The user message or query that initiates the interaction. 111 | generation_system_prompt (str, optional): The system prompt for guiding the generation process. 112 | reflection_system_prompt (str, optional): The system prompt for guiding the reflection process. 113 | n_steps (int, optional): The number of generate-reflect cycles to perform. Defaults to 3. 114 | verbose (int, optional): The verbosity level controlling printed output. Defaults to 0. 115 | 116 | Returns: 117 | str: The final generated response after all cycles are completed. 118 | """ 119 | generation_system_prompt += BASE_GENERATION_SYSTEM_PROMPT 120 | reflection_system_prompt += BASE_REFLECTION_SYSTEM_PROMPT 121 | 122 | # Given the iterative nature of the Reflection Pattern, we might exhaust the LLM context (or 123 | # make it really slow). That's the reason I'm limitting the chat history to three messages. 124 | # The `FixedFirstChatHistory` is a very simple class, that creates a Queue that always keeps 125 | # fixeed the first message. I thought this would be useful for maintaining the system prompt 126 | # in the chat history. 127 | generation_history = FixedFirstChatHistory( 128 | [ 129 | build_prompt_structure(prompt=generation_system_prompt, role="system"), 130 | build_prompt_structure(prompt=user_msg, role="user"), 131 | ], 132 | total_length=3, 133 | ) 134 | 135 | reflection_history = FixedFirstChatHistory( 136 | [build_prompt_structure(prompt=reflection_system_prompt, role="system")], 137 | total_length=3, 138 | ) 139 | 140 | for step in range(n_steps): 141 | if verbose > 0: 142 | fancy_step_tracker(step, n_steps) 143 | 144 | # Generate the response 145 | generation = self.generate(generation_history, verbose=verbose) 146 | update_chat_history(generation_history, generation, "assistant") 147 | update_chat_history(reflection_history, generation, "user") 148 | 149 | # Reflect and critique the generation 150 | critique = self.reflect(reflection_history, verbose=verbose) 151 | 152 | if "" in critique: 153 | # If no additional suggestions are made, stop the loop 154 | print( 155 | Fore.RED, 156 | "\n\nStop Sequence found. Stopping the reflection loop ... \n\n", 157 | ) 158 | break 159 | 160 | update_chat_history(generation_history, critique, "user") 161 | update_chat_history(reflection_history, critique, "assistant") 162 | 163 | return generation 164 | -------------------------------------------------------------------------------- /src/agentic_patterns/planning_pattern/react_agent.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from colorama import Fore 5 | from dotenv import load_dotenv 6 | from groq import Groq 7 | 8 | from agentic_patterns.tool_pattern.tool import Tool 9 | from agentic_patterns.tool_pattern.tool import validate_arguments 10 | from agentic_patterns.utils.completions import build_prompt_structure 11 | from agentic_patterns.utils.completions import ChatHistory 12 | from agentic_patterns.utils.completions import completions_create 13 | from agentic_patterns.utils.completions import update_chat_history 14 | from agentic_patterns.utils.extraction import extract_tag_content 15 | 16 | load_dotenv() 17 | 18 | BASE_SYSTEM_PROMPT = "" 19 | 20 | 21 | REACT_SYSTEM_PROMPT = """ 22 | You operate by running a loop with the following steps: Thought, Action, Observation. 23 | You are provided with function signatures within XML tags. 24 | You may call one or more functions to assist with the user query. Don' make assumptions about what values to plug 25 | into functions. Pay special attention to the properties 'types'. You should use those types as in a Python dict. 26 | 27 | For each function call return a json object with function name and arguments within XML tags as follows: 28 | 29 | 30 | {"name": ,"arguments": , "id": } 31 | 32 | 33 | Here are the available tools / actions: 34 | 35 | 36 | %s 37 | 38 | 39 | Example session: 40 | 41 | What's the current temperature in Madrid? 42 | I need to get the current weather in Madrid 43 | {"name": "get_current_weather","arguments": {"location": "Madrid", "unit": "celsius"}, "id": 0} 44 | 45 | You will be called again with this: 46 | 47 | {0: {"temperature": 25, "unit": "celsius"}} 48 | 49 | You then output: 50 | 51 | The current temperature in Madrid is 25 degrees Celsius 52 | 53 | Additional constraints: 54 | 55 | - If the user asks you something unrelated to any of the tools above, answer freely enclosing your answer with tags. 56 | """ 57 | 58 | 59 | class ReactAgent: 60 | """ 61 | A class that represents an agent using the ReAct logic that interacts with tools to process 62 | user inputs, make decisions, and execute tool calls. The agent can run interactive sessions, 63 | collect tool signatures, and process multiple tool calls in a given round of interaction. 64 | 65 | Attributes: 66 | client (Groq): The Groq client used to handle model-based completions. 67 | model (str): The name of the model used for generating responses. Default is "llama-3.1-70b-versatile". 68 | tools (list[Tool]): A list of Tool instances available for execution. 69 | tools_dict (dict): A dictionary mapping tool names to their corresponding Tool instances. 70 | """ 71 | 72 | def __init__( 73 | self, 74 | tools: Tool | list[Tool], 75 | model: str = "llama-3.1-70b-versatile", 76 | system_prompt: str = BASE_SYSTEM_PROMPT, 77 | ) -> None: 78 | self.client = Groq() 79 | self.model = model 80 | self.system_prompt = system_prompt 81 | self.tools = tools if isinstance(tools, list) else [tools] 82 | self.tools_dict = {tool.name: tool for tool in self.tools} 83 | 84 | def add_tool_signatures(self) -> str: 85 | """ 86 | Collects the function signatures of all available tools. 87 | 88 | Returns: 89 | str: A concatenated string of all tool function signatures in JSON format. 90 | """ 91 | return "".join([tool.fn_signature for tool in self.tools]) 92 | 93 | def process_tool_calls(self, tool_calls_content: list) -> dict: 94 | """ 95 | Processes each tool call, validates arguments, executes the tools, and collects results. 96 | 97 | Args: 98 | tool_calls_content (list): List of strings, each representing a tool call in JSON format. 99 | 100 | Returns: 101 | dict: A dictionary where the keys are tool call IDs and values are the results from the tools. 102 | """ 103 | observations = {} 104 | for tool_call_str in tool_calls_content: 105 | tool_call = json.loads(tool_call_str) 106 | tool_name = tool_call["name"] 107 | tool = self.tools_dict[tool_name] 108 | 109 | print(Fore.GREEN + f"\nUsing Tool: {tool_name}") 110 | 111 | # Validate and execute the tool call 112 | validated_tool_call = validate_arguments( 113 | tool_call, json.loads(tool.fn_signature) 114 | ) 115 | print(Fore.GREEN + f"\nTool call dict: \n{validated_tool_call}") 116 | 117 | result = tool.run(**validated_tool_call["arguments"]) 118 | print(Fore.GREEN + f"\nTool result: \n{result}") 119 | 120 | # Store the result using the tool call ID 121 | observations[validated_tool_call["id"]] = result 122 | 123 | return observations 124 | 125 | def run( 126 | self, 127 | user_msg: str, 128 | max_rounds: int = 10, 129 | ) -> str: 130 | """ 131 | Executes a user interaction session, where the agent processes user input, generates responses, 132 | handles tool calls, and updates chat history until a final response is ready or the maximum 133 | number of rounds is reached. 134 | 135 | Args: 136 | user_msg (str): The user's input message to start the interaction. 137 | max_rounds (int, optional): Maximum number of interaction rounds the agent should perform. Default is 10. 138 | 139 | Returns: 140 | str: The final response generated by the agent after processing user input and any tool calls. 141 | """ 142 | user_prompt = build_prompt_structure( 143 | prompt=user_msg, role="user", tag="question" 144 | ) 145 | if self.tools: 146 | self.system_prompt += ( 147 | "\n" + REACT_SYSTEM_PROMPT % self.add_tool_signatures() 148 | ) 149 | 150 | chat_history = ChatHistory( 151 | [ 152 | build_prompt_structure( 153 | prompt=self.system_prompt, 154 | role="system", 155 | ), 156 | user_prompt, 157 | ] 158 | ) 159 | 160 | if self.tools: 161 | # Run the ReAct loop for max_rounds 162 | for _ in range(max_rounds): 163 | 164 | completion = completions_create(self.client, chat_history, self.model) 165 | 166 | response = extract_tag_content(str(completion), "response") 167 | if response.found: 168 | return response.content[0] 169 | 170 | thought = extract_tag_content(str(completion), "thought") 171 | tool_calls = extract_tag_content(str(completion), "tool_call") 172 | 173 | update_chat_history(chat_history, completion, "assistant") 174 | 175 | print(Fore.MAGENTA + f"\nThought: {thought.content[0]}") 176 | 177 | if tool_calls.found: 178 | observations = self.process_tool_calls(tool_calls.content) 179 | print(Fore.BLUE + f"\nObservations: {observations}") 180 | update_chat_history(chat_history, f"{observations}", "user") 181 | 182 | return completions_create(self.client, chat_history, self.model) 183 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/macos,python,visualstudiocode,pycharm+all 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos,python,visualstudiocode,pycharm+all 3 | 4 | ### macOS ### 5 | # General 6 | .DS_Store 7 | .AppleDouble 8 | .LSOverride 9 | 10 | # Icon must end with two \r 11 | Icon 12 | 13 | 14 | # Thumbnails 15 | ._* 16 | 17 | # Files that might appear in the root of a volume 18 | .DocumentRevisions-V100 19 | .fseventsd 20 | .Spotlight-V100 21 | .TemporaryItems 22 | .Trashes 23 | .VolumeIcon.icns 24 | .com.apple.timemachine.donotpresent 25 | 26 | # Directories potentially created on remote AFP share 27 | .AppleDB 28 | .AppleDesktop 29 | Network Trash Folder 30 | Temporary Items 31 | .apdisk 32 | 33 | ### macOS Patch ### 34 | # iCloud generated files 35 | *.icloud 36 | 37 | ### PyCharm+all ### 38 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 39 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 40 | 41 | # User-specific stuff 42 | .idea/**/workspace.xml 43 | .idea/**/tasks.xml 44 | .idea/**/usage.statistics.xml 45 | .idea/**/dictionaries 46 | .idea/**/shelf 47 | 48 | # AWS User-specific 49 | .idea/**/aws.xml 50 | 51 | # Generated files 52 | .idea/**/contentModel.xml 53 | 54 | # Sensitive or high-churn files 55 | .idea/**/dataSources/ 56 | .idea/**/dataSources.ids 57 | .idea/**/dataSources.local.xml 58 | .idea/**/sqlDataSources.xml 59 | .idea/**/dynamic.xml 60 | .idea/**/uiDesigner.xml 61 | .idea/**/dbnavigator.xml 62 | 63 | # Gradle 64 | .idea/**/gradle.xml 65 | .idea/**/libraries 66 | 67 | # Gradle and Maven with auto-import 68 | # When using Gradle or Maven with auto-import, you should exclude module files, 69 | # since they will be recreated, and may cause churn. Uncomment if using 70 | # auto-import. 71 | # .idea/artifacts 72 | # .idea/compiler.xml 73 | # .idea/jarRepositories.xml 74 | # .idea/modules.xml 75 | # .idea/*.iml 76 | # .idea/modules 77 | # *.iml 78 | # *.ipr 79 | 80 | # CMake 81 | cmake-build-*/ 82 | 83 | # Mongo Explorer plugin 84 | .idea/**/mongoSettings.xml 85 | 86 | # File-based project format 87 | *.iws 88 | 89 | # IntelliJ 90 | out/ 91 | 92 | # mpeltonen/sbt-idea plugin 93 | .idea_modules/ 94 | 95 | # JIRA plugin 96 | atlassian-ide-plugin.xml 97 | 98 | # Cursive Clojure plugin 99 | .idea/replstate.xml 100 | 101 | # SonarLint plugin 102 | .idea/sonarlint/ 103 | 104 | # Crashlytics plugin (for Android Studio and IntelliJ) 105 | com_crashlytics_export_strings.xml 106 | crashlytics.properties 107 | crashlytics-build.properties 108 | fabric.properties 109 | 110 | # Editor-based Rest Client 111 | .idea/httpRequests 112 | 113 | # Android studio 3.1+ serialized cache file 114 | .idea/caches/build_file_checksums.ser 115 | 116 | ### PyCharm+all Patch ### 117 | # Ignore everything but code style settings and run configurations 118 | # that are supposed to be shared within teams. 119 | 120 | .idea/* 121 | 122 | !.idea/codeStyles 123 | !.idea/runConfigurations 124 | 125 | ### Python ### 126 | # Byte-compiled / optimized / DLL files 127 | __pycache__/ 128 | *.py[cod] 129 | *$py.class 130 | 131 | # C extensions 132 | *.so 133 | 134 | # Distribution / packaging 135 | .Python 136 | build/ 137 | develop-eggs/ 138 | dist/ 139 | downloads/ 140 | eggs/ 141 | .eggs/ 142 | lib/ 143 | lib64/ 144 | parts/ 145 | sdist/ 146 | var/ 147 | wheels/ 148 | share/python-wheels/ 149 | *.egg-info/ 150 | .installed.cfg 151 | *.egg 152 | MANIFEST 153 | 154 | # PyInstaller 155 | # Usually these files are written by a python script from a template 156 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 157 | *.manifest 158 | *.spec 159 | 160 | # Installer logs 161 | pip-log.txt 162 | pip-delete-this-directory.txt 163 | 164 | # Unit test / coverage reports 165 | htmlcov/ 166 | .tox/ 167 | .nox/ 168 | .coverage 169 | .coverage.* 170 | .cache 171 | nosetests.xml 172 | coverage.xml 173 | *.cover 174 | *.py,cover 175 | .hypothesis/ 176 | .pytest_cache/ 177 | cover/ 178 | 179 | # Translations 180 | *.mo 181 | *.pot 182 | 183 | # Django stuff: 184 | *.log 185 | local_settings.py 186 | db.sqlite3 187 | db.sqlite3-journal 188 | 189 | # Flask stuff: 190 | instance/ 191 | .webassets-cache 192 | 193 | # Scrapy stuff: 194 | .scrapy 195 | 196 | # Sphinx documentation 197 | docs/_build/ 198 | 199 | # PyBuilder 200 | .pybuilder/ 201 | target/ 202 | 203 | # Jupyter Notebook 204 | .ipynb_checkpoints 205 | 206 | # IPython 207 | profile_default/ 208 | ipython_config.py 209 | 210 | # pyenv 211 | # For a library or package, you might want to ignore these files since the code is 212 | # intended to run in multiple environments; otherwise, check them in: 213 | # .python-version 214 | 215 | # pipenv 216 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 217 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 218 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 219 | # install all needed dependencies. 220 | #Pipfile.lock 221 | 222 | # poetry 223 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 224 | # This is especially recommended for binary packages to ensure reproducibility, and is more 225 | # commonly ignored for libraries. 226 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 227 | #poetry.lock 228 | 229 | # pdm 230 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 231 | #pdm.lock 232 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 233 | # in version control. 234 | # https://pdm.fming.dev/#use-with-ide 235 | .pdm.toml 236 | 237 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 238 | __pypackages__/ 239 | 240 | # Celery stuff 241 | celerybeat-schedule 242 | celerybeat.pid 243 | 244 | # SageMath parsed files 245 | *.sage.py 246 | 247 | # Environments 248 | .env 249 | .venv 250 | env/ 251 | venv/ 252 | ENV/ 253 | env.bak/ 254 | venv.bak/ 255 | 256 | # Spyder project settings 257 | .spyderproject 258 | .spyproject 259 | 260 | # Rope project settings 261 | .ropeproject 262 | 263 | # mkdocs documentation 264 | /site 265 | 266 | # mypy 267 | .mypy_cache/ 268 | .dmypy.json 269 | dmypy.json 270 | 271 | # Pyre type checker 272 | .pyre/ 273 | 274 | # pytype static type analyzer 275 | .pytype/ 276 | 277 | # Cython debug symbols 278 | cython_debug/ 279 | 280 | # PyCharm 281 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 282 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 283 | # and can be added to the global gitignore or merged into this file. For a more nuclear 284 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 285 | #.idea/ 286 | 287 | ### Python Patch ### 288 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration 289 | poetry.toml 290 | 291 | # ruff 292 | .ruff_cache/ 293 | 294 | # LSP config files 295 | pyrightconfig.json 296 | 297 | ### VisualStudioCode ### 298 | .vscode/* 299 | !.vscode/settings.json 300 | !.vscode/tasks.json 301 | !.vscode/launch.json 302 | !.vscode/extensions.json 303 | !.vscode/*.code-snippets 304 | 305 | # Local History for Visual Studio Code 306 | .history/ 307 | 308 | # Built Visual Studio Code Extensions 309 | *.vsix 310 | 311 | ### VisualStudioCode Patch ### 312 | # Ignore all local history of files 313 | .history 314 | .ionide 315 | 316 | # End of https://www.toptal.com/developers/gitignore/api/macos,python,visualstudiocode,pycharm+all 317 | 318 | # Additional ignored files / folders from hypermoder cookiecutter 319 | 320 | /.coverage 321 | /.coverage.* 322 | /.nox/ 323 | /.python-version 324 | /.pytype/ 325 | /dist/ 326 | /docs/_build/ 327 | /src/*.egg-info/ 328 | chatgpt_experiment.py 329 | chatgpt_battery.py 330 | chatgpt_battery_classes.py 331 | *.diff -------------------------------------------------------------------------------- /src/agentic_patterns/multiagent_pattern/agent.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | 3 | from agentic_patterns.multiagent_pattern.crew import Crew 4 | from agentic_patterns.planning_pattern.react_agent import ReactAgent 5 | from agentic_patterns.tool_pattern.tool import Tool 6 | 7 | 8 | class Agent: 9 | """ 10 | Represents an AI agent that can work as part of a team to complete tasks. 11 | 12 | This class implements an agent with dependencies, context handling, and task execution capabilities. 13 | It can be used in a multi-agent system where agents collaborate to solve complex problems. 14 | 15 | Attributes: 16 | name (str): The name of the agent. 17 | backstory (str): The backstory or background of the agent. 18 | task_description (str): A description of the task assigned to the agent. 19 | task_expected_output (str): The expected format or content of the task output. 20 | react_agent (ReactAgent): An instance of ReactAgent used for generating responses. 21 | dependencies (list[Agent]): A list of Agent instances that this agent depends on. 22 | dependents (list[Agent]): A list of Agent instances that depend on this agent. 23 | context (str): Accumulated context information from other agents. 24 | 25 | Args: 26 | name (str): The name of the agent. 27 | backstory (str): The backstory or background of the agent. 28 | task_description (str): A description of the task assigned to the agent. 29 | task_expected_output (str, optional): The expected format or content of the task output. Defaults to "". 30 | tools (list[Tool] | None, optional): A list of Tool instances available to the agent. Defaults to None. 31 | llm (str, optional): The name of the language model to use. Defaults to "llama-3.1-70b-versatile". 32 | """ 33 | 34 | def __init__( 35 | self, 36 | name: str, 37 | backstory: str, 38 | task_description: str, 39 | task_expected_output: str = "", 40 | tools: list[Tool] | None = None, 41 | llm: str = "llama-3.1-70b-versatile", 42 | ): 43 | self.name = name 44 | self.backstory = backstory 45 | self.task_description = task_description 46 | self.task_expected_output = task_expected_output 47 | self.react_agent = ReactAgent( 48 | model=llm, system_prompt=self.backstory, tools=tools or [] 49 | ) 50 | 51 | self.dependencies: list[Agent] = [] # Agents that this agent depends on 52 | self.dependents: list[Agent] = [] # Agents that depend on this agent 53 | 54 | self.context = "" 55 | 56 | # Automatically register this agent to the active Crew context if one exists 57 | Crew.register_agent(self) 58 | 59 | def __repr__(self): 60 | return f"{self.name}" 61 | 62 | def __rshift__(self, other): 63 | """ 64 | Defines the '>>' operator. This operator is used to indicate agent dependency. 65 | 66 | Args: 67 | other (Agent): The agent that depends on this agent. 68 | """ 69 | self.add_dependent(other) 70 | return other # Allow chaining 71 | 72 | def __lshift__(self, other): 73 | """ 74 | Defines the '<<' operator to indicate agent dependency in reverse. 75 | 76 | Args: 77 | other (Agent): The agent that this agent depends on. 78 | 79 | Returns: 80 | Agent: The `other` agent to allow for chaining. 81 | """ 82 | self.add_dependency(other) 83 | return other # Allow chaining 84 | 85 | def __rrshift__(self, other): 86 | """ 87 | Defines the '<<' operator.This operator is used to indicate agent dependency. 88 | 89 | Args: 90 | other (Agent): The agent that this agent depends on. 91 | """ 92 | self.add_dependency(other) 93 | return self # Allow chaining 94 | 95 | def __rlshift__(self, other): 96 | """ 97 | Defines the '<<' operator when evaluated from right to left. 98 | This operator is used to indicate agent dependency in the normal order. 99 | 100 | Args: 101 | other (Agent): The agent that depends on this agent. 102 | 103 | Returns: 104 | Agent: The current agent (self) to allow for chaining. 105 | """ 106 | self.add_dependent(other) 107 | return self # Allow chaining 108 | 109 | def add_dependency(self, other): 110 | """ 111 | Adds a dependency to this agent. 112 | 113 | Args: 114 | other (Agent | list[Agent]): The agent(s) that this agent depends on. 115 | 116 | Raises: 117 | TypeError: If the dependency is not an Agent or a list of Agents. 118 | """ 119 | if isinstance(other, Agent): 120 | self.dependencies.append(other) 121 | other.dependents.append(self) 122 | elif isinstance(other, list) and all(isinstance(item, Agent) for item in other): 123 | for item in other: 124 | self.dependencies.append(item) 125 | item.dependents.append(self) 126 | else: 127 | raise TypeError("The dependency must be an instance or list of Agent.") 128 | 129 | def add_dependent(self, other): 130 | """ 131 | Adds a dependent to this agent. 132 | 133 | Args: 134 | other (Agent | list[Agent]): The agent(s) that depend on this agent. 135 | 136 | Raises: 137 | TypeError: If the dependent is not an Agent or a list of Agents. 138 | """ 139 | if isinstance(other, Agent): 140 | other.dependencies.append(self) 141 | self.dependents.append(other) 142 | elif isinstance(other, list) and all(isinstance(item, Agent) for item in other): 143 | for item in other: 144 | item.dependencies.append(self) 145 | self.dependents.append(item) 146 | else: 147 | raise TypeError("The dependent must be an instance or list of Agent.") 148 | 149 | def receive_context(self, input_data): 150 | """ 151 | Receives and stores context information from other agents. 152 | 153 | Args: 154 | input_data (str): The context information to be added. 155 | """ 156 | self.context += f"{self.name} received context: \n{input_data}" 157 | 158 | def create_prompt(self): 159 | """ 160 | Creates a prompt for the agent based on its task description, expected output, and context. 161 | 162 | Returns: 163 | str: The formatted prompt string. 164 | """ 165 | prompt = dedent( 166 | f""" 167 | You are an AI agent. You are part of a team of agents working together to complete a task. 168 | I'm going to give you the task description enclosed in tags. I'll also give 169 | you the available context from the other agents in tags. If the context 170 | is not available, the tags will be empty. You'll also receive the task 171 | expected output enclosed in tags. With all this information 172 | you need to create the best possible response, always respecting the format as describe in 173 | tags. If expected output is not available, just create 174 | a meaningful response to complete the task. 175 | 176 | 177 | {self.task_description} 178 | 179 | 180 | 181 | {self.task_expected_output} 182 | 183 | 184 | 185 | {self.context} 186 | 187 | 188 | Your response: 189 | """ 190 | ).strip() 191 | 192 | return prompt 193 | 194 | def run(self): 195 | """ 196 | Runs the agent's task and generates the output. 197 | 198 | This method creates a prompt, runs it through the ReactAgent, and passes the output to all dependent agents. 199 | 200 | Returns: 201 | str: The output generated by the agent. 202 | """ 203 | msg = self.create_prompt() 204 | output = self.react_agent.run(user_msg=msg) 205 | 206 | # Pass the output to all dependents 207 | for dependent in self.dependents: 208 | dependent.receive_context(output) 209 | return output 210 | -------------------------------------------------------------------------------- /notebooks/multiagent_pattern.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0acdc7c0-61bd-45a1-a040-df05c67ab0b2", 6 | "metadata": {}, 7 | "source": [ 8 | "# Multiagent Pattern - Multiagent Collaboration" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "7ba71f3e-5421-4dd7-bc63-e3fcdf2680a6", 14 | "metadata": {}, 15 | "source": [ 16 | "\"Alt\n", 17 | "\n", 18 | "---\n", 19 | "\n", 20 | "You may have heard about frameworks like [CrewAI](https://www.crewai.com/) or [AutoGen](https://microsoft.github.io/autogen/0.2/), which allow you to create multi-agent applications.\n", 21 | "\n", 22 | "These frameworks implement different variations of the multi-agent pattern, in which tasks are divided into **smaller subtasks executed by different roles** (e.g. one agent can be a software engineer, another a project manager, etc.)\n", 23 | "\n", 24 | "For this final lesson, I wanted to build something more elaborate. That's why I've been working on a 𝐦𝐢𝐧𝐢𝐦𝐚𝐥𝐢𝐬𝐭 𝐯𝐞𝐫𝐬𝐢𝐨𝐧 𝐨𝐟 𝐂𝐫𝐞𝐰𝐀𝐈, drawing inspiration from two of its key concepts: 𝐂𝐫𝐞𝐰 and 𝐀𝐠𝐞𝐧𝐭.\n", 25 | "\n", 26 | "Additionally, I've also borrowed ideas from [𝐀𝐢𝐫𝐟𝐥𝐨𝐰](https://airflow.apache.org/)'𝐬 𝐝𝐞𝐬𝐢𝐠𝐧 𝐩𝐡𝐢𝐥𝐨𝐬𝐨𝐩𝐡𝐲, using >> and << to define dependencies between my agents. In this micro-CrewAI, 𝐚𝐠𝐞𝐧𝐭𝐬 are equivalent to 𝐀𝐢𝐫𝐟𝐥𝐨𝐰 𝐓𝐚𝐬𝐤𝐬 and the 𝐂𝐫𝐞𝐰 is equivalent to an 𝐀𝐢𝐫𝐟𝐥𝐨𝐰 𝐃𝐀𝐆.\n", 27 | "\n", 28 | "Take a look at the previous lessons if you haven't!\n", 29 | "\n", 30 | "* [First Lesson: The Reflection Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/reflection_pattern.ipynb)\n", 31 | "* [Second Lesson: The Tool Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/tool_pattern.ipynb)\n", 32 | "* [Third Lesson: The Planning Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/planning_pattern.ipynb)\n", 33 | "\n", 34 | "**Let's begin!!** 💪" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "f786e114-cde4-400e-8781-fb7f48bd072c", 40 | "metadata": {}, 41 | "source": [ 42 | "## The Agent Class" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "ec27b59f-82c6-4b21-917c-fa6d64c1e614", 48 | "metadata": {}, 49 | "source": [ 50 | "First of all, we need an **Agent Class**. This class implements an\n", 51 | "Agent, and internally it implements the ReAct technique (check [Lesson 3](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/planning_pattern.ipynb) if you want to see this technique in detail!)." 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "id": "69780ba5-c321-4d71-b0aa-35b1d6c55ff3", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "from agentic_patterns.multiagent_pattern.agent import Agent" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "id": "6ba412fb-9604-4146-93bb-ed0451c529f8", 67 | "metadata": {}, 68 | "source": [ 69 | "Let's create some example agent, to see how it works." 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "id": "3aa589d5-7825-4fe0-a269-f35474bf6da5", 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "agent_example = Agent(\n", 80 | " name=\"Poet Agent\",\n", 81 | " backstory=\"You are a well-known poet, who enjoys creating high quality poetry.\",\n", 82 | " task_description=\"Write a poem about the meaning of life\",\n", 83 | " task_expected_output=\"Just output the poem, without any title or introductory sentences\",\n", 84 | ")" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "id": "fd71e1e3-893f-40e1-b6c5-f15e42beb851", 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "print(agent_example.run())" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "id": "a8e0e4aa-cce0-4648-a6fd-eac244662874", 100 | "metadata": {}, 101 | "source": [ 102 | "You can also associate tools with the agent. Let's create a tool for writing some string into a CSV." 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "id": "b7fae777-3bf6-4be5-8ab8-de79f545d25a", 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "from agentic_patterns.tool_pattern.tool import tool" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "id": "4016dc0d-cc7f-4e7a-b971-2ecfd31b79cf", 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "@tool\n", 123 | "def write_str_to_txt(string_data: str, txt_filename: str):\n", 124 | " \"\"\"\n", 125 | " Writes a string to a txt file.\n", 126 | "\n", 127 | " This function takes a string and writes it to a text file. If the file already exists, \n", 128 | " it will be overwritten with the new data.\n", 129 | "\n", 130 | " Args:\n", 131 | " string_data (str): The string containing the data to be written to the file.\n", 132 | " txt_filename (str): The name of the text file to which the data should be written.\n", 133 | " \"\"\"\n", 134 | " # Write the string data to the text file\n", 135 | " with open(txt_filename, mode='w', encoding='utf-8') as file:\n", 136 | " file.write(string_data)\n", 137 | "\n", 138 | " print(f\"Data successfully written to {txt_filename}\")" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "id": "7ed9d5c5-6a1a-4510-85a1-5c03c2d2fa37", 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "agent_tool_example = Agent(\n", 149 | " name=\"Writer Agent\",\n", 150 | " backstory=\"You are a language model specialised in writing text into .txt files\",\n", 151 | " task_description=\"Write the string 'This is a Tool Agent' into './tool_agent_example.txt'\",\n", 152 | " task_expected_output=\"A .txt file containing the given string\",\n", 153 | " tools=write_str_to_txt,\n", 154 | ")" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": null, 160 | "id": "a6214dc3-e24f-430c-99f0-6a8fe484d8ba", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "agent_tool_example.run()" 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "id": "026c6229-5833-4a99-afa6-e45cc72fb04d", 170 | "metadata": {}, 171 | "source": [ 172 | "## Defining Agent Dependencies" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "id": "ab6688e9-1fdf-4b51-bb33-c63c80ce9bb7", 178 | "metadata": {}, 179 | "source": [ 180 | "Let's define two agents now." 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "id": "bf142278-b556-42cb-9a5e-2761af96f5c0", 187 | "metadata": {}, 188 | "outputs": [], 189 | "source": [ 190 | "agent_1 = Agent(\n", 191 | " name=\"Poet Agent\",\n", 192 | " backstory=\"You are a well-known poet, who enjoys creating high quality poetry.\",\n", 193 | " task_description=\"Write a poem about the meaning of life\",\n", 194 | " task_expected_output=\"Just output the poem, without any title or introductory sentences\",\n", 195 | ")\n", 196 | "\n", 197 | "agent_2 = Agent(\n", 198 | " name=\"Poem Translator Agent\",\n", 199 | " backstory=\"You are an expert translator especially skilled in Ancient Greek\",\n", 200 | " task_description=\"Translate a poem into Ancient Greek\", \n", 201 | " task_expected_output=\"Just output the translated poem and nothing else\"\n", 202 | ")" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "id": "565d174d-ab42-4318-b53a-1ed142512f4f", 208 | "metadata": {}, 209 | "source": [ 210 | "We can define the agent dependencies using the `>>` operator." 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "id": "f52347ac-99b3-4695-a6a4-cf31f7784826", 217 | "metadata": {}, 218 | "outputs": [], 219 | "source": [ 220 | "agent_1 >> agent_2" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "id": "603e5a0d-25ef-4fdf-8ed8-63bd50f90cfd", 226 | "metadata": {}, 227 | "source": [ 228 | "This means `agent_2` depends on `agent_1`. We can check the dependencies and dependents of both agents." 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "id": "560148d0-ae20-45ba-b22f-938eda0be64d", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "print(\"Agent 1 dependencies: \", agent_1.dependencies)\n", 239 | "print(\"Agent 1 dependents: \", agent_1.dependents)\n", 240 | "print(\"Agent 2 dependencies: \", agent_2.dependencies)\n", 241 | "print(\"Agent 2 dependents: \", agent_2.dependents)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "id": "e9276064-136c-49bc-ae95-4d2b2ba47187", 247 | "metadata": {}, 248 | "source": [ 249 | "Now, if we run `agent_1`, the results will be added to `agent_2`'s context." 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": null, 255 | "id": "26f58fda-3673-4067-ad0b-a4055a476a32", 256 | "metadata": {}, 257 | "outputs": [], 258 | "source": [ 259 | "print(agent_1.run())" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": null, 265 | "id": "cd3dccc9-c201-4a70-bebf-1b9b83b7f59d", 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "print(agent_2.context)" 270 | ] 271 | }, 272 | { 273 | "cell_type": "markdown", 274 | "id": "b4019dd7-b79f-4595-8104-9a47c64e4ba4", 275 | "metadata": {}, 276 | "source": [ 277 | "Now, if we run the second agent, it will use the context received from the previous agent to generate its output." 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": null, 283 | "id": "a686cefc-db4f-4eb4-9e93-11bdbdd30d6d", 284 | "metadata": {}, 285 | "outputs": [], 286 | "source": [ 287 | "print(agent_2.run())" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "id": "eb11c5a8-fc37-42e3-a521-8cf87ae86dbc", 293 | "metadata": {}, 294 | "source": [ 295 | "## The Crew" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "id": "66c83cc4-87cf-4afc-855b-e5263ad95a71", 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "from agentic_patterns.multiagent_pattern.crew import Crew" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "id": "e58d1cbd-3dbb-4a36-8fc8-9b4242a7b1d3", 312 | "metadata": {}, 313 | "outputs": [], 314 | "source": [ 315 | "with Crew() as crew:\n", 316 | " agent_1 = Agent(\n", 317 | " name=\"Poet Agent\",\n", 318 | " backstory=\"You are a well-known poet, who enjoys creating high quality poetry.\",\n", 319 | " task_description=\"Write a poem about the meaning of life\",\n", 320 | " task_expected_output=\"Just output the poem, without any title or introductory sentences\",\n", 321 | " )\n", 322 | "\n", 323 | " agent_2 = Agent(\n", 324 | " name=\"Poem Translator Agent\",\n", 325 | " backstory=\"You are an expert translator especially skilled in Spanish\",\n", 326 | " task_description=\"Translate a poem into Spanish\", \n", 327 | " task_expected_output=\"Just output the translated poem and nothing else\"\n", 328 | " )\n", 329 | "\n", 330 | " agent_3 = Agent(\n", 331 | " name=\"Writer Agent\",\n", 332 | " backstory=\"You are an expert transcriber, that loves writing poems into txt files\",\n", 333 | " task_description=\"You'll receive a Spanish poem in your context. You need to write the poem into './poem.txt' file\",\n", 334 | " task_expected_output=\"A txt file containing the greek poem received from the context\",\n", 335 | " tools=write_str_to_txt,\n", 336 | " )\n", 337 | "\n", 338 | " agent_1 >> agent_2 >> agent_3" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": null, 344 | "id": "9027cbdc-2bb0-419b-87a0-0352f59d3079", 345 | "metadata": {}, 346 | "outputs": [], 347 | "source": [ 348 | "crew.plot()" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": null, 354 | "id": "b39f70a0-2c91-40c4-ab56-8318cef7ef97", 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [ 358 | "crew.run()" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": null, 364 | "id": "02d92f91-47b1-4f86-ba72-408ed0989206", 365 | "metadata": {}, 366 | "outputs": [], 367 | "source": [] 368 | } 369 | ], 370 | "metadata": { 371 | "kernelspec": { 372 | "display_name": "Python 3 (ipykernel)", 373 | "language": "python", 374 | "name": "python3" 375 | }, 376 | "language_info": { 377 | "codemirror_mode": { 378 | "name": "ipython", 379 | "version": 3 380 | }, 381 | "file_extension": ".py", 382 | "mimetype": "text/x-python", 383 | "name": "python", 384 | "nbconvert_exporter": "python", 385 | "pygments_lexer": "ipython3", 386 | "version": "3.11.3" 387 | } 388 | }, 389 | "nbformat": 4, 390 | "nbformat_minor": 5 391 | } 392 | -------------------------------------------------------------------------------- /notebooks/reflection_pattern.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "e1bc453d-c8d3-4503-b3da-52120ad92c74", 7 | "metadata": { 8 | "tags": [] 9 | }, 10 | "source": [ 11 | "# Reflection Pattern\n", 12 | "\n", 13 | "The first pattern we are going to implement is the **reflection pattern**. \n", 14 | "\n", 15 | "---\n", 16 | "\n", 17 | "\"Alt\n", 18 | "\n", 19 | "---\n", 20 | "\n", 21 | "This pattern allows the LLM to reflect and critique its outputs, following the next steps:\n", 22 | "\n", 23 | "1. The LLM **generates** a candidate output. If you look at the diagram above, it happens inside the **\"Generate\"** box.\n", 24 | "2. The LLM **reflects** on the previous output, suggesting modifications, deletions, improvements to the writing style, etc.\n", 25 | "3. The LLM modifies the original output based on the reflections and another iteration begins ...\n", 26 | "\n", 27 | "**Now, we are going to build, from scratch, each step, so that you can truly understand how this pattern works.**" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "id": "7898c34d-de9a-4970-b7f4-3d86b69d45a7", 33 | "metadata": {}, 34 | "source": [ 35 | "## Generation Step" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "031f6b07-4f99-46f6-a53c-ff242585cbad", 41 | "metadata": {}, 42 | "source": [ 43 | "The first thing we need to consider is:\n", 44 | "\n", 45 | "> What do we want to generate? A poem? An essay? Python code?\n", 46 | "\n", 47 | "For this example, I've decided to test the Python coding skills of Llama3 70B (that's the LLM we are going to use for all the tutorials). In particular, we are going to ask our LLM to code a famous sorting algorithm: **Merge Sort**. \n", 48 | "\n", 49 | "---\n", 50 | "\n", 51 | "\"Alt" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "id": "73f4d7b7-40bf-43b9-a626-2a11d5529ac8", 57 | "metadata": {}, 58 | "source": [ 59 | "### Groq Client and relevant imports" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "id": "96731d2f-a079-4e41-9756-220f02d4ebd8", 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "import os\n", 70 | "from pprint import pprint\n", 71 | "from groq import Groq\n", 72 | "from dotenv import load_dotenv\n", 73 | "from IPython.display import display_markdown\n", 74 | "\n", 75 | "# Remember to load the environment variables. You should have the Groq API Key in there :)\n", 76 | "load_dotenv()\n", 77 | "\n", 78 | "client = Groq()" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "id": "e644a635-e035-44e2-8c25-cee0f2b56556", 84 | "metadata": {}, 85 | "source": [ 86 | "We will start the **\"generation\"** chat history with the system prompt, as we said before. In this case, let the LLM act like a Python \n", 87 | "programmer eager to receive feedback / critique by the user." 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "id": "12467256-c741-495a-9923-439c1fcf270d", 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "generation_chat_history = [\n", 98 | " {\n", 99 | " \"role\": \"system\",\n", 100 | " \"content\": \"You are a Python programmer tasked with generating high quality Python code.\"\n", 101 | " \"Your task is to Generate the best content possible for the user's request. If the user provides critique,\" \n", 102 | " \"respond with a revised version of your previous attempt.\"\n", 103 | " }\n", 104 | "]" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "id": "43149b4f-54db-455f-9d39-6ad2f5c52b94", 110 | "metadata": {}, 111 | "source": [ 112 | "Now, as the user, we are going to ask the LLM to generate an implementation of the **Merge Sort** algorithm. Just add a new message with the **user** role to the chat history." 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "id": "0742e7bd-4857-4ed1-a96b-37098d448bdd", 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "generation_chat_history.append(\n", 123 | " {\n", 124 | " \"role\": \"user\",\n", 125 | " \"content\": \"Generate a Python implementation of the Merge Sort algorithm\"\n", 126 | " }\n", 127 | ")" 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "id": "4df1bffe-375f-4a9a-8433-e217eb94aea2", 133 | "metadata": {}, 134 | "source": [ 135 | "Let's generate the first version of the essay." 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "id": "ff984277-733c-4495-b7fd-0669393380b8", 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "mergesort_code = client.chat.completions.create(\n", 146 | " messages=generation_chat_history,\n", 147 | " model=\"llama3-70b-8192\"\n", 148 | ").choices[0].message.content\n", 149 | "\n", 150 | "generation_chat_history.append(\n", 151 | " {\n", 152 | " \"role\": \"assistant\",\n", 153 | " \"content\": mergesort_code\n", 154 | " }\n", 155 | ")" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "id": "c03f208b-2234-4fd1-a02b-f4fff06c01a6", 162 | "metadata": {}, 163 | "outputs": [], 164 | "source": [ 165 | "display_markdown(mergesort_code, raw=True)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "id": "6a04ebe5-0573-4520-a529-aff22d486b7d", 171 | "metadata": {}, 172 | "source": [ 173 | "## Reflection Step" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "id": "67aa69e4-632f-4a0c-a6f0-c5a7ced4849d", 179 | "metadata": { 180 | "tags": [] 181 | }, 182 | "source": [ 183 | "Now, let's allow the LLM to reflect on its outputs by defining another system prompt. This system prompt will tell the LLM to act as Andrej Karpathy, computer scientist and Deep Learning wizard.\n", 184 | "\n", 185 | ">To be honest, I don't think the fact of acting like Andrej Karpathy will influence the LLM outputs, but it was fun :)" 186 | ] 187 | }, 188 | { 189 | "cell_type": "markdown", 190 | "id": "166f626f-dc32-4a2a-920c-88bee73bdc8b", 191 | "metadata": {}, 192 | "source": [ 193 | "\"Alt" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "id": "9d93c928-d585-48af-a74c-a5b8d84593c6", 200 | "metadata": { 201 | "tags": [] 202 | }, 203 | "outputs": [], 204 | "source": [ 205 | "reflection_chat_history = [\n", 206 | " {\n", 207 | " \"role\": \"system\",\n", 208 | " \"content\": \"You are Andrej Karpathy, an experienced computer scientist. You are tasked with generating critique and recommendations for the user's code\",\n", 209 | " }\n", 210 | "]" 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "id": "c498175f-b3f9-40af-92a3-d5b36d77d1cf", 216 | "metadata": {}, 217 | "source": [ 218 | "The user message, in this case, is the essay generated in the previous step. We simply add the `mergesort_code` to the `reflection_chat_history`." 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": null, 224 | "id": "26af1a73-4d91-40e8-a9bc-c34d32b2ab82", 225 | "metadata": { 226 | "tags": [] 227 | }, 228 | "outputs": [], 229 | "source": [ 230 | "reflection_chat_history.append(\n", 231 | " {\n", 232 | " \"role\": \"user\",\n", 233 | " \"content\": mergesort_code\n", 234 | " }\n", 235 | ")" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "id": "bfa994c8-3612-47b0-9571-e21d0d73d896", 241 | "metadata": {}, 242 | "source": [ 243 | "Now, let's generate a critique to the Python code." 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "id": "40fee42f-d47a-41b1-a40d-7208ba76ce98", 250 | "metadata": { 251 | "tags": [] 252 | }, 253 | "outputs": [], 254 | "source": [ 255 | "critique = client.chat.completions.create(\n", 256 | " messages=reflection_chat_history,\n", 257 | " model=\"llama3-70b-8192\"\n", 258 | ").choices[0].message.content" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": null, 264 | "id": "0fef3203-c7f1-407f-8b9b-4e8ae140a4cb", 265 | "metadata": { 266 | "tags": [] 267 | }, 268 | "outputs": [], 269 | "source": [ 270 | "display_markdown(critique, raw=True)" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "id": "5df433b0-d662-4378-895e-6b09dd3201bc", 276 | "metadata": {}, 277 | "source": [ 278 | "Finally, we just need to add this *critique* to the `generation_chat_history`, in this case, as the `user` role." 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": null, 284 | "id": "27a85bb3-cf6a-4576-8caf-cd41e602a1f1", 285 | "metadata": { 286 | "tags": [] 287 | }, 288 | "outputs": [], 289 | "source": [ 290 | "generation_chat_history.append(\n", 291 | " {\n", 292 | " \"role\": \"user\",\n", 293 | " \"content\": critique\n", 294 | " }\n", 295 | ")" 296 | ] 297 | }, 298 | { 299 | "cell_type": "markdown", 300 | "id": "c3c1aefa-8454-41ab-af40-2675f340a577", 301 | "metadata": {}, 302 | "source": [ 303 | "## Generation Step (II)" 304 | ] 305 | }, 306 | { 307 | "cell_type": "code", 308 | "execution_count": null, 309 | "id": "91d845cf-51c3-4cfd-b6a7-1b970413f6db", 310 | "metadata": { 311 | "tags": [] 312 | }, 313 | "outputs": [], 314 | "source": [ 315 | "essay = client.chat.completions.create(\n", 316 | " messages=generation_chat_history,\n", 317 | " model=\"llama3-70b-8192\"\n", 318 | ").choices[0].message.content" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "id": "ef14eaa8-f501-4efc-997f-8564ec8dccd8", 325 | "metadata": { 326 | "tags": [] 327 | }, 328 | "outputs": [], 329 | "source": [ 330 | "display_markdown(essay, raw=True)" 331 | ] 332 | }, 333 | { 334 | "cell_type": "markdown", 335 | "id": "75883af2-f31d-4c24-b1ff-315a0711f9fa", 336 | "metadata": {}, 337 | "source": [ 338 | "## And the iteration starts again ..." 339 | ] 340 | }, 341 | { 342 | "cell_type": "markdown", 343 | "id": "a5b824d1-c17e-448c-bdd7-df543aa5a9fd", 344 | "metadata": {}, 345 | "source": [ 346 | "After **Generation Step (II)** the corrected Python code will be received, once again, by Karpathy. Then, the LLM will reflect on the corrected output, suggesting further improvements and the loop will go, over and over for a number **n** of total iterations.\n", 347 | "\n", 348 | "> There's another possibility. Suppose the Reflection step can't find any further improvement. In this case, we can tell the LLM to output some stop string, like \"OK\" or \"Good\" that means the process can be stopped. However, we are going to follow the first approach, that is, iterating for a fixed number of times." 349 | ] 350 | }, 351 | { 352 | "cell_type": "markdown", 353 | "id": "7cf2cf5b-d083-435c-914a-3ff484d53473", 354 | "metadata": {}, 355 | "source": [ 356 | "## Implementing a class " 357 | ] 358 | }, 359 | { 360 | "cell_type": "markdown", 361 | "id": "15f9a9e6-29f3-4adf-863e-c49fbb9a6b44", 362 | "metadata": {}, 363 | "source": [ 364 | "Now that you understand the underlying loop of the Reflection Agent, let's implement this agent as a class." 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": null, 370 | "id": "3f904241-29a1-4519-b6ab-15be0a7cfc53", 371 | "metadata": {}, 372 | "outputs": [], 373 | "source": [ 374 | "from agentic_patterns import ReflectionAgent" 375 | ] 376 | }, 377 | { 378 | "cell_type": "code", 379 | "execution_count": null, 380 | "id": "dd1a8071-c763-4dbf-8db7-60f9116f62e8", 381 | "metadata": {}, 382 | "outputs": [], 383 | "source": [ 384 | "agent = ReflectionAgent()" 385 | ] 386 | }, 387 | { 388 | "cell_type": "code", 389 | "execution_count": null, 390 | "id": "87c8cf16-0dfa-49b6-bc30-8f14bbe7860a", 391 | "metadata": {}, 392 | "outputs": [], 393 | "source": [ 394 | "generation_system_prompt = \"You are a Python programmer tasked with generating high quality Python code\"\n", 395 | "\n", 396 | "reflection_system_prompt = \"You are Andrej Karpathy, an experienced computer scientist\"\n", 397 | "\n", 398 | "user_msg = \"Generate a Python implementation of the Merge Sort algorithm\"" 399 | ] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "execution_count": null, 404 | "id": "6a9a3e5b-9b45-4a27-b391-f78b57ff94f1", 405 | "metadata": {}, 406 | "outputs": [], 407 | "source": [ 408 | "final_response = agent.run(\n", 409 | " user_msg=user_msg,\n", 410 | " generation_system_prompt=generation_system_prompt,\n", 411 | " reflection_system_prompt=reflection_system_prompt,\n", 412 | " n_steps=10,\n", 413 | " verbose=1,\n", 414 | ")" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "id": "4b69d182-d12e-40bb-8dfb-cbc8903218a1", 420 | "metadata": {}, 421 | "source": [ 422 | "## Final result" 423 | ] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": null, 428 | "id": "6e4663cd-61dd-4a38-866a-f032045a444a", 429 | "metadata": {}, 430 | "outputs": [], 431 | "source": [ 432 | "display_markdown(final_response, raw=True)" 433 | ] 434 | } 435 | ], 436 | "metadata": { 437 | "kernelspec": { 438 | "display_name": "Python 3 (ipykernel)", 439 | "language": "python", 440 | "name": "python3" 441 | }, 442 | "language_info": { 443 | "codemirror_mode": { 444 | "name": "ipython", 445 | "version": 3 446 | }, 447 | "file_extension": ".py", 448 | "mimetype": "text/x-python", 449 | "name": "python", 450 | "nbconvert_exporter": "python", 451 | "pygments_lexer": "ipython3", 452 | "version": "3.11.3" 453 | } 454 | }, 455 | "nbformat": 4, 456 | "nbformat_minor": 5 457 | } 458 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | logo 3 |

Agentic Patterns

4 |

Implementing the agentic patterns using Groq

5 |

6 | 7 |

8 | logo 9 |

10 | 11 | > No LangChain, no LangGraph, no LlamaIndex, no CrewAI. Pure and simple API calls to Groq. 12 | 13 | ## Table of Contents 14 | 15 | - [1. Introduction](#introduction) 16 | - [1.1 Reflection Pattern](#reflection-pattern-) 17 | - [1.2 Tool Pattern](#tool-pattern--) 18 | - [1.3 Planning Pattern](#planning-pattern-) 19 | - [1.4 Multiagent Pattern](#multiagent-pattern-) 20 | 21 | - [2. The 4 Agentic Patterns](#the-4-agentic-patterns) 22 | 23 | - [3. Installation](#installation) 24 | - [4. Groq API Key](#groq-api-key) 25 | - [5. Usage](#usage) 26 | - [5.1 Using a Reflection Agent - Reflection Pattern](#using-a-reflection-agent---reflection-pattern) 27 | - [5.2 Creating and Using Tools - Tool Use Pattern](#creating-and-using-tools---tool-use-pattern) 28 | - [5.3 Reasoning with a ReAct Agent - Planning Pattern](#reasoning-with-a-react-agent---planning-pattern) 29 | - [5.4 Defining and running a Crew of Agents - MultiAgent Pattern](#defining-and-running-a-crew-of-agents---multiagent-pattern) 30 | - [6. Recommended Workflow](#recommended-workflow) 31 | - [7. Star History](#star-history) 32 | 33 | 34 | ## Introduction 35 | 36 | This repository contains an implementation of the 4 agentic patterns as defined by Andrew Ng in his DeepLearning.AI [blog article series](https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/?ref=dl-staging-website.ghost.io). 37 | 38 | Here’s a description of the four patterns we will be implementing. 39 | 40 | 41 | ## The 4 Agentic patterns 42 | 43 | ### Reflection Pattern 🤔 44 | 45 |

46 | logo 47 |

48 | 49 | A very basic pattern but, despite its simplicity, it provides 50 | surprising performance gains for the LLM response. 51 | 52 | It allows the LLM to **reflect on its results**, suggesting 53 | modifications, additions, improvements in the writing style, etc. 54 | 55 | Want to see how this pattern is implemented? 💻 56 | 57 | - Check the [notebook](notebooks/reflection_pattern.ipynb) for a step by step explanation 58 | - Check the [`ReflectionAgent`](src/agentic_patterns/reflection_pattern/reflection_agent.py) for a complete Python implementation 59 | 60 | 61 | Take a look at the YouTube video! 👇 62 | 63 |
64 | 65 | Watch the video 66 | 67 |
68 | 69 | --- 70 | 71 | ### Tool Pattern 🛠 72 | 73 |

74 | logo 75 |

76 | 77 | The information stored in the LLM weights is (usually) **not enough** to give accurate and insightful answers to our questions 78 | 79 | That's why we need to provide the LLM with ways to access the outside world 🌍 80 | 81 | In practice, you can build tools for whatever you want (at the end of the day they are just functions the LLM can use), from a tool that let's you access Wikipedia, another to analyse the content of YouTube videos or calculate difficult integrals in Wolfram Alpha. 82 | 83 | **Tools** are the **secret sauce of agentic applications** and the possibilities are endless! 🥫 84 | 85 | Want to see how this pattern is implemented? 💻 86 | 87 | - Check the [notebook](notebooks/tool_pattern.ipynb) for a step by step explanation 88 | - Check the [`ToolAgent`](src/agentic_patterns/tool_pattern/tool_agent.py) for a complete Python implementation 89 | - Check the [`Tool`](src/agentic_patterns/tool_pattern/tool.py) for understanding how Tools work under the hood. 90 | 91 | Take a look at the YouTube video! 👇 92 |
93 | 94 | Watch the video 95 | 96 |
97 | 98 | --- 99 | 100 | ### Planning Pattern 🧠 101 | 102 |

103 | logo 104 |

105 | 106 | So, we've seen agents capable of reflecting and using tools to access the outside world. But ... **what about planning**, 107 | i.e. deciding what sequence of steps to follow to accomplish a large task? 108 | 109 | That is exactly what the Planning Pattern provides; ways for the LLM to break a task into **smaller, more easily accomplished subgoals** without losing track of the end goal. 110 | 111 | The most paradigmatic example of the planning pattern is the **ReAct** technique, displayed in the diagram above. 112 | 113 | Want to see how this pattern is implemented? 💻 114 | 115 | - Check the [notebook](notebooks/planning_pattern.ipynb) for a step by step explanation 116 | - Check the [`ReactAgent`](src/agentic_patterns/planning_pattern/react_agent.py) for a complete Python implementation 117 | 118 | Take a look at the YouTube video! 👇 119 | 120 |
121 | 122 | Watch the video 123 | 124 |
125 | 126 | --- 127 | 128 | ### Multiagent Pattern 🧑🏽‍🤝‍🧑🏻 129 | 130 |

131 | logo 132 |

133 | 134 | You may have heard about frameworks like crewAI or AutoGen, which allow you to create multi-agent applications. 135 | 136 | These frameworks implement different variations of the multi-agent pattern, in which tasks are divided into **smaller subtasks executed by different roles** (e.g. one agent can be a software engineer, another a project manager, etc.) 137 | 138 | Want to see how this pattern is implemented? 💻 139 | 140 | - Check the [notebook](notebooks/multiagent_pattern.ipynb) for a step by step explanation 141 | - Check the [`Agent`](src/agentic_patterns/multiagent_pattern/agent.py) to see how to implement 142 | an `Agent`, member of the `Crew`. 143 | - Check the [`Crew`](src/agentic_patterns/multiagent_pattern/crew.py) to see how to implement 144 | the `Crew` 145 | 146 | Take a look at the YouTube video! 👇 147 | 148 |
149 | 150 | Watch the video 151 | 152 |
153 | 154 | --- 155 | 156 | ## Installation 157 | 158 | If you take a look at any of the notebooks in the [notebooks/](notebooks) folder you'll see some helper methods and classes being imported from this library: `agentic-patterns`. 159 | 160 | This is the library implemented in the [src/](src) folder, and contains a full implementation of the 4 patterns and related helper methods. 161 | 162 | To install this library, you have two options. 163 | 164 | ### Option 1: Use [Poetry](https://python-poetry.org/): 165 | 166 | ``` 167 | poetry install 168 | ``` 169 | 170 | ### Option 2: Install the PyPi library 171 | 172 | ```sh 173 | pip install -U agentic-patterns 174 | ``` 175 | 176 | --- 177 | 178 | ## Groq API Key 179 | 180 | This project works with [Groq](https://groq.com/) as the LLM provider, so you'll need to create an API Key in this platform. 181 | 182 | When you have your API Key copy and paste it in an `.env` file. 183 | 184 | The content of this `.env` file should match the structure of the `.env.example` I've created in the repo, but instead of an empty string, the `GROQ_API_KEY` var will contain your API Key. 185 | 186 | --- 187 | 188 | ## Usage 189 | 190 | Once you have both library installed and the `.env` file created, you can start playing with the 4 patterns implementation (I'll encourage you to take a look at the code, to fully understand what is happening under the hood). 191 | 192 | Let's see an example of how to put the 4 patterns into practise. 193 | 194 | --- 195 | 196 | ### Using a Reflection Agent - Reflection Pattern 197 | 198 | Here is an example of how to use a Reflection Agent. 199 | 200 | ```python 201 | from agentic_patterns import ReflectionAgent 202 | 203 | agent = ReflectionAgent() 204 | 205 | generation_system_prompt = "You are a Python programmer tasked with generating high quality Python code" 206 | 207 | reflection_system_prompt = "You are Andrej Karpathy, an experienced computer scientist" 208 | 209 | user_msg = "Generate a Python implementation of the Merge Sort algorithm" 210 | 211 | 212 | final_response = agent.run( 213 | user_msg=user_msg, 214 | generation_system_prompt=generation_system_prompt, 215 | reflection_system_prompt=reflection_system_prompt, 216 | n_steps=10, 217 | verbose=1, 218 | ) 219 | 220 | print(final_response) 221 | ``` 222 | 223 | ### Creating and Using Tools - Tool Use Pattern 224 | 225 | An example of how to create a custom tool and bind it to a Tool Agent. 226 | 227 | First, let's create the tool. In this case, I'll be creating a tool for interacting with Hacker News. To define a tool, we just need to decorate the Python function with the `@tool`decorator. 228 | 229 | ```python 230 | import json 231 | import requests 232 | from agentic_patterns.tool_pattern.tool import tool 233 | from agentic_patterns.tool_pattern.tool_agent import ToolAgent 234 | 235 | @tool 236 | def fetch_top_hacker_news_stories(top_n: int): 237 | """ 238 | Fetch the top stories from Hacker News. 239 | 240 | This function retrieves the top `top_n` stories from Hacker News using the Hacker News API. 241 | Each story contains the title, URL, score, author, and time of submission. The data is fetched 242 | from the official Firebase Hacker News API, which returns story details in JSON format. 243 | 244 | Args: 245 | top_n (int): The number of top stories to retrieve. 246 | """ 247 | top_stories_url = 'https://hacker-news.firebaseio.com/v0/topstories.json' 248 | 249 | try: 250 | response = requests.get(top_stories_url) 251 | response.raise_for_status() # Check for HTTP errors 252 | 253 | # Get the top story IDs 254 | top_story_ids = response.json()[:top_n] 255 | 256 | top_stories = [] 257 | 258 | # For each story ID, fetch the story details 259 | for story_id in top_story_ids: 260 | story_url = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json' 261 | story_response = requests.get(story_url) 262 | story_response.raise_for_status() # Check for HTTP errors 263 | story_data = story_response.json() 264 | 265 | # Append the story title and URL (or other relevant info) to the list 266 | top_stories.append({ 267 | 'title': story_data.get('title', 'No title'), 268 | 'url': story_data.get('url', 'No URL available'), 269 | }) 270 | 271 | return json.dumps(top_stories) 272 | 273 | except requests.exceptions.RequestException as e: 274 | print(f"An error occurred: {e}") 275 | return [] 276 | ``` 277 | 278 | Next, let's define the Agent. 279 | 280 | ```python 281 | tool_agent = ToolAgent(tools=[fetch_top_hacker_news_stories]) 282 | 283 | output = tool_agent.run(user_msg="Tell me the top 5 Hacker News stories right now") 284 | 285 | print(output) 286 | ``` 287 | 288 | ### Reasoning with a ReAct Agent - Planning Pattern 289 | 290 | As a paradigmatic example of the Planning Pattern, `agentic-patterns` offers an implementation of a ReAct Agent. 291 | 292 | The `ReactAgent` is an evolution of the `ToolAgent`, extending its planning and reasoning capabilities. 293 | 294 | As we did before, we'll begin by defining the tools. In this case, I'll create three. 295 | 296 | ```python 297 | @tool 298 | def sum_two_elements(a: int, b: int) -> int: 299 | """ 300 | Computes the sum of two integers. 301 | 302 | Args: 303 | a (int): The first integer to be summed. 304 | b (int): The second integer to be summed. 305 | 306 | Returns: 307 | int: The sum of `a` and `b`. 308 | """ 309 | return a + b 310 | 311 | 312 | @tool 313 | def multiply_two_elements(a: int, b: int) -> int: 314 | """ 315 | Multiplies two integers. 316 | 317 | Args: 318 | a (int): The first integer to multiply. 319 | b (int): The second integer to multiply. 320 | 321 | Returns: 322 | int: The product of `a` and `b`. 323 | """ 324 | return a * b 325 | 326 | @tool 327 | def compute_log(x: int) -> float | str: 328 | """ 329 | Computes the logarithm of an integer `x` with an optional base. 330 | 331 | Args: 332 | x (int): The integer value for which the logarithm is computed. Must be greater than 0. 333 | 334 | Returns: 335 | float: The logarithm of `x` to the specified `base`. 336 | """ 337 | if x <= 0: 338 | return "Logarithm is undefined for values less than or equal to 0." 339 | 340 | return math.log(x) 341 | ``` 342 | 343 | Now, let's create the agent. 344 | 345 | ```python 346 | from agentic_patterns.planning_pattern.react_agent import ReactAgent 347 | 348 | agent = ReactAgent(tools=[sum_two_elements, multiply_two_elements, compute_log]) 349 | 350 | agent.run(user_msg="I want to calculate the sum of 1234 and 5678 and multiply the result by 5. Then, I want to take the logarithm of this result") 351 | ``` 352 | 353 | ### Defining and running a Crew of Agents - MultiAgent Pattern 354 | 355 | For the Multiagent Pattern, I decided to use two [CrewAI](https://www.crewai.com/)'s abstractions: the Agent and the Crew. 356 | 357 | Additionally, I've also borrow some ideas from Airflow, defining the dependency between agents using the `>>` operator. 358 | 359 | Let's see an example: 360 | 361 | ```python 362 | from agentic_patterns.multiagent_pattern.crew import Crew 363 | 364 | 365 | with Crew() as crew: 366 | agent_1 = Agent( 367 | name="Poet Agent", 368 | backstory="You are a well-known poet, who enjoys creating high quality poetry.", 369 | task_description="Write a poem about the meaning of life", 370 | task_expected_output="Just output the poem, without any title or introductory sentences", 371 | ) 372 | 373 | agent_2 = Agent( 374 | name="Poem Translator Agent", 375 | backstory="You are an expert translator especially skilled in Spanish", 376 | task_description="Translate a poem into Spanish", 377 | task_expected_output="Just output the translated poem and nothing else" 378 | ) 379 | 380 | agent_3 = Agent( 381 | name="Writer Agent", 382 | backstory="You are an expert transcriber, that loves writing poems into txt files", 383 | task_description="You'll receive a Spanish poem in your context. You need to write the poem into './poem.txt' file", 384 | task_expected_output="A txt file containing the greek poem received from the context", 385 | tools=write_str_to_txt, 386 | ) 387 | 388 | agent_1 >> agent_2 >> agent_3 389 | ``` 390 | 391 | We can also plot the Crew, to see the DAG structure, like this: 392 | 393 | ```python 394 | crew.plot() 395 | ``` 396 | 397 | For the previous Crew, you should get something like this: 398 | 399 |
400 | alt text 401 |
402 | 403 | To run the Crew, simply run: 404 | 405 | ```python 406 | crew.run() 407 | ``` 408 | 409 | ## Recommended Workflow 410 | 411 | This is **an educational project** and not an agentic framework. 412 | 413 | The reason I've decided to implement the 4 Agentic Patterns from scratch was to deeply understand their basics and also to teach them to anyone interested. 414 | 415 | Given this, this is my recommended learning workflow: 416 | 417 | 1️⃣ Start with the YouTube video, ideally following my explanations with your own Jupyter Notebook. 418 | 419 | 2️⃣ Play with the code in the Jupyter Notebook: make changes, modify the prompts, create new examples etc. Get comfortable with the pattern fundamentals and basic concepts. 420 | 421 | 3️⃣ (Optional) Read through the library implementation of each pattern. This will require more effort and more Python knowledge, but if you take the time, I can assure you you'll benefit a lot from it. 422 | 423 | ```mermaid 424 | flowchart TD; 425 | Video --> Notebook; 426 | Notebook --> Code 427 | classDef centered text-align:center; 428 | ``` 429 | 430 | ## Star History 431 | 432 | [![Star History Chart](https://api.star-history.com/svg?repos=neural-maze/agentic_patterns&type=Date)](https://star-history.com/#neural-maze/agentic_patterns&Date) -------------------------------------------------------------------------------- /notebooks/planning_pattern.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "15ba0126-56d3-426e-977f-aee8a94646a6", 6 | "metadata": {}, 7 | "source": [ 8 | "# Planning Pattern - ReAct Technique" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "d71b47fa-4265-49ef-8e1f-cf8249e12a3f", 14 | "metadata": {}, 15 | "source": [ 16 | "\"Alt\n", 17 | "\n", 18 | "---\n", 19 | "\n", 20 | "So, we've seen agents capable of reflecting and using tools to access the outside world. But ... **what about planning**,\n", 21 | "i.e. deciding what sequence of steps to follow to accomplish a large task?\n", 22 | "\n", 23 | "That is exactly what the Planning Pattern provides; ways for the LLM to break a task into **smaller, more easily accomplished subgoals** without losing track of the end goal.\n", 24 | "\n", 25 | "The most paradigmatic example of the planning pattern is the [**ReAct**](https://react-lm.github.io/) technique, displayed in the diagram above.\n", 26 | "\n", 27 | "In this notebook, you'll learn how this technique actually works. This is the **third lesson** of the \"Agentic Patterns from Scratch\" series. Take a look\n", 28 | "at the previous lessons if you haven't!\n", 29 | "\n", 30 | "* [First Lesson: The Reflection Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/reflection_pattern.ipynb)\n", 31 | "* [Second Lesson: The Tool Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/tool_pattern.ipynb)" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "id": "bd0dc83a-f11b-469d-bb2c-afbd91f39c5e", 37 | "metadata": {}, 38 | "source": [ 39 | "## Relevant imports and Groq Client" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "id": "605420e2-4bab-4d0a-90ac-7b9a95fd9976", 45 | "metadata": {}, 46 | "source": [ 47 | "We start by importing all the libraries we'll be using in this tutorial as well as the Groq client." 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "056fc0ed-7ded-490b-ae0b-e7d1fd71430c", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "import os\n", 58 | "import re\n", 59 | "import math\n", 60 | "import json\n", 61 | "from dotenv import load_dotenv\n", 62 | "\n", 63 | "from groq import Groq\n", 64 | "\n", 65 | "from agentic_patterns.tool_pattern.tool import tool\n", 66 | "from agentic_patterns.utils.extraction import extract_tag_content\n", 67 | "\n", 68 | "\n", 69 | "# Remember to load the environment variables. You should have the Groq API Key in there :)\n", 70 | "load_dotenv()\n", 71 | "\n", 72 | "MODEL = \"llama-3.1-70b-versatile\"\n", 73 | "GROQ_CLIENT = Groq()" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "id": "46f4363c-b6e0-4c4a-aa08-ad243ddf7911", 79 | "metadata": {}, 80 | "source": [ 81 | "> If you are not familiar with the `tool` decorator, changes are you are missed the previous tutorial about the Tool Pattern. Check the video [here](https://www.youtube.com/watch?v=ApoDzZP8_ck&t=671s&ab_channel=TheNeuralMaze)." 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "id": "af6735f3-00d3-4a41-acb8-f4afb4a759d4", 87 | "metadata": {}, 88 | "source": [ 89 | "## A System Prompt for the ReAct Loop" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "id": "69b721b1-cedf-4e61-9d0b-0c090972efce", 95 | "metadata": {}, 96 | "source": [ 97 | "As we did with the Tool Pattern, we also need a System Prompt for the ReAct technique. This System Prompt is very similar, the difference is that it describes the ReAct loop, so that the LLM is aware of\n", 98 | "the three operations it's allowed to use:\n", 99 | "\n", 100 | "1. Thought: The LLM will think about which action to take\n", 101 | "2. Action: The LLM will use a Tool to \"act on the environment\"\n", 102 | "3. Observation: The LLM will observe the tool output and reflect on the next thing to do.\n", 103 | "\n", 104 | "Another key difference from the Tool Pattern System Prompt is that we are going to enclose all the messages with tags, like these: , . We could implement the ReAct logic without these tags, but I found it eeasier for the LLM to understand the instructions this way.\n", 105 | "\n", 106 | "Ok! So without further ado, there's the prompt!" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "id": "87279781-38d4-45df-b8b5-e41c587ba38b", 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "# Define the System Prompt as a constant\n", 117 | "REACT_SYSTEM_PROMPT = \"\"\"\n", 118 | "You are a function calling AI model. You operate by running a loop with the following steps: Thought, Action, Observation.\n", 119 | "You are provided with function signatures within XML tags.\n", 120 | "You may call one or more functions to assist with the user query. Don' make assumptions about what values to plug\n", 121 | "into functions. Pay special attention to the properties 'types'. You should use those types as in a Python dict.\n", 122 | "\n", 123 | "For each function call return a json object with function name and arguments within XML tags as follows:\n", 124 | "\n", 125 | "\n", 126 | "{\"name\": ,\"arguments\": , \"id\": }\n", 127 | "\n", 128 | "\n", 129 | "Here are the available tools / actions:\n", 130 | "\n", 131 | " \n", 132 | "%s\n", 133 | "\n", 134 | "\n", 135 | "Example session:\n", 136 | "\n", 137 | "What's the current temperature in Madrid?\n", 138 | "I need to get the current weather in Madrid\n", 139 | "{\"name\": \"get_current_weather\",\"arguments\": {\"location\": \"Madrid\", \"unit\": \"celsius\"}, \"id\": 0}\n", 140 | "\n", 141 | "You will be called again with this:\n", 142 | "\n", 143 | "{0: {\"temperature\": 25, \"unit\": \"celsius\"}}\n", 144 | "\n", 145 | "You then output:\n", 146 | "\n", 147 | "The current temperature in Madrid is 25 degrees Celsius\n", 148 | "\n", 149 | "Additional constraints:\n", 150 | "\n", 151 | "- If the user asks you something unrelated to any of the tools above, answer freely enclosing your answer with tags.\n", 152 | "\"\"\"" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "id": "c3db5712-97dd-424c-bdf1-5da0970da03a", 158 | "metadata": {}, 159 | "source": [ 160 | "## Example step by step" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "id": "ddd00354-7ebc-44ec-9606-0a178af59b44", 166 | "metadata": {}, 167 | "source": [ 168 | "### Defining the Tools" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "id": "4c6360cd-b9f2-4427-91bc-27f010147c04", 174 | "metadata": {}, 175 | "source": [ 176 | "Let's build an example that involves the use of three tools, like the following ones." 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "c1d3bec1-679d-4d80-bf41-bf54b00e985c", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "@tool\n", 187 | "def sum_two_elements(a: int, b: int) -> int:\n", 188 | " \"\"\"\n", 189 | " Computes the sum of two integers.\n", 190 | "\n", 191 | " Args:\n", 192 | " a (int): The first integer to be summed.\n", 193 | " b (int): The second integer to be summed.\n", 194 | "\n", 195 | " Returns:\n", 196 | " int: The sum of `a` and `b`.\n", 197 | " \"\"\"\n", 198 | " return a + b\n", 199 | "\n", 200 | "\n", 201 | "@tool\n", 202 | "def multiply_two_elements(a: int, b: int) -> int:\n", 203 | " \"\"\"\n", 204 | " Multiplies two integers.\n", 205 | "\n", 206 | " Args:\n", 207 | " a (int): The first integer to multiply.\n", 208 | " b (int): The second integer to multiply.\n", 209 | "\n", 210 | " Returns:\n", 211 | " int: The product of `a` and `b`.\n", 212 | " \"\"\"\n", 213 | " return a * b\n", 214 | "\n", 215 | "@tool\n", 216 | "def compute_log(x: int) -> float | str:\n", 217 | " \"\"\"\n", 218 | " Computes the logarithm of an integer `x` with an optional base.\n", 219 | "\n", 220 | " Args:\n", 221 | " x (int): The integer value for which the logarithm is computed. Must be greater than 0.\n", 222 | "\n", 223 | " Returns:\n", 224 | " float: The logarithm of `x` to the specified `base`.\n", 225 | " \"\"\"\n", 226 | " if x <= 0:\n", 227 | " return \"Logarithm is undefined for values less than or equal to 0.\"\n", 228 | " \n", 229 | " return math.log(x)\n", 230 | "\n", 231 | "\n", 232 | "available_tools = {\n", 233 | " \"sum_two_elements\": sum_two_elements,\n", 234 | " \"multiply_two_elements\": multiply_two_elements,\n", 235 | " \"compute_log\": compute_log\n", 236 | "}" 237 | ] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "id": "98c3044e-5d5c-44d0-973b-0c7c31f9724b", 242 | "metadata": {}, 243 | "source": [ 244 | "Remember that the `@tool` operator allows us to convert a Python function into a `Tool` automatically. We cana check that very easily with some of the functions above." 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": null, 250 | "id": "2b7e2078-9c5d-4687-8c1b-56b36a0194db", 251 | "metadata": {}, 252 | "outputs": [], 253 | "source": [ 254 | "print(\"Tool name: \", sum_two_elements.name)\n", 255 | "print(\"Tool signature: \", sum_two_elements.fn_signature)" 256 | ] 257 | }, 258 | { 259 | "cell_type": "markdown", 260 | "id": "292a747b-f48a-4c36-b62f-b528466c53a8", 261 | "metadata": {}, 262 | "source": [ 263 | "### Adding the Tools signature to the System Prompt" 264 | ] 265 | }, 266 | { 267 | "cell_type": "markdown", 268 | "id": "67cabcdb-4dc4-49b3-9a9a-fc79e1ad12b0", 269 | "metadata": {}, 270 | "source": [ 271 | "Now, we just concatenate the tools signature and add them to the System Prompt." 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "id": "b464ea29-400c-482a-83f8-d64fba727ff8", 278 | "metadata": {}, 279 | "outputs": [], 280 | "source": [ 281 | "tools_signature = sum_two_elements.fn_signature + \",\\n\" + multiply_two_elements.fn_signature + \",\\n\" + compute_log.fn_signature" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": null, 287 | "id": "501dd7c6-3ef2-46fb-bc01-a888692b8fa5", 288 | "metadata": {}, 289 | "outputs": [], 290 | "source": [ 291 | "print(tools_signature)" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": null, 297 | "id": "b52ea466-79c0-4898-9c0b-69a07c2ad1db", 298 | "metadata": {}, 299 | "outputs": [], 300 | "source": [ 301 | "REACT_SYSTEM_PROMPT = REACT_SYSTEM_PROMPT % tools_signature" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": null, 307 | "id": "40801b47-0021-4800-8364-9c515db1e419", 308 | "metadata": {}, 309 | "outputs": [], 310 | "source": [ 311 | "print(REACT_SYSTEM_PROMPT)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "id": "b2b1fd6c-253a-4a40-ab29-4d68fa976ac1", 317 | "metadata": {}, 318 | "source": [ 319 | "### ReAct Loop Step 1" 320 | ] 321 | }, 322 | { 323 | "cell_type": "code", 324 | "execution_count": null, 325 | "id": "cb771338-8865-4e37-8b8b-02a949f67c41", 326 | "metadata": {}, 327 | "outputs": [], 328 | "source": [ 329 | "USER_QUESTION = \"I want to calculate the sum of 1234 and 5678 and multiply the result by 5. Then, I want to take the logarithm of this result\"\n", 330 | "chat_history = [\n", 331 | " {\n", 332 | " \"role\": \"system\",\n", 333 | " \"content\": REACT_SYSTEM_PROMPT\n", 334 | " },\n", 335 | " {\n", 336 | " \"role\": \"user\",\n", 337 | " \"content\": f\"{USER_QUESTION}\"\n", 338 | " }\n", 339 | "]\n" 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": null, 345 | "id": "61faa368-e936-4bf2-9a88-df2a35bb205e", 346 | "metadata": {}, 347 | "outputs": [], 348 | "source": [ 349 | "output = GROQ_CLIENT.chat.completions.create(\n", 350 | " messages=chat_history,\n", 351 | " model=MODEL\n", 352 | ").choices[0].message.content\n", 353 | "\n", 354 | "print(output)" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": null, 360 | "id": "9514f9ab-c162-44f6-bc51-247061f811de", 361 | "metadata": {}, 362 | "outputs": [], 363 | "source": [ 364 | "chat_history.append(\n", 365 | " {\n", 366 | " \"role\": \"assistant\",\n", 367 | " \"content\": output\n", 368 | " }\n", 369 | ")" 370 | ] 371 | }, 372 | { 373 | "cell_type": "markdown", 374 | "id": "cf7ce6ab-7325-4844-b4d1-1ba590d8aba9", 375 | "metadata": {}, 376 | "source": [ 377 | "### ReAct Loop Step 2" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "id": "5a207669-3ced-43ee-8aad-c8ebf347f797", 384 | "metadata": {}, 385 | "outputs": [], 386 | "source": [ 387 | "tool_call = extract_tag_content(output, tag=\"tool_call\")" 388 | ] 389 | }, 390 | { 391 | "cell_type": "code", 392 | "execution_count": null, 393 | "id": "4614fe15-efa3-4111-b4e7-eb04e160f5f0", 394 | "metadata": {}, 395 | "outputs": [], 396 | "source": [ 397 | "tool_call" 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": null, 403 | "id": "3b7a6279-8cd6-4d18-8a9d-90ea04e552ed", 404 | "metadata": {}, 405 | "outputs": [], 406 | "source": [ 407 | "tool_call = json.loads(tool_call.content[0])" 408 | ] 409 | }, 410 | { 411 | "cell_type": "code", 412 | "execution_count": null, 413 | "id": "f5835594-0105-4452-9aac-7ca76fafaf6c", 414 | "metadata": {}, 415 | "outputs": [], 416 | "source": [ 417 | "tool_call" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": null, 423 | "id": "34259246-b8b6-4ae9-aa4b-1bf705fdb3d0", 424 | "metadata": {}, 425 | "outputs": [], 426 | "source": [ 427 | "tool_result = available_tools[tool_call[\"name\"]].run(**tool_call[\"arguments\"])" 428 | ] 429 | }, 430 | { 431 | "cell_type": "code", 432 | "execution_count": null, 433 | "id": "8724515b-4ba8-46a6-83fc-be472c2f1bd0", 434 | "metadata": {}, 435 | "outputs": [], 436 | "source": [ 437 | "assert tool_result == 1234 + 5678" 438 | ] 439 | }, 440 | { 441 | "cell_type": "code", 442 | "execution_count": null, 443 | "id": "a8196b58-708b-4c9c-a090-eb4ad213a927", 444 | "metadata": {}, 445 | "outputs": [], 446 | "source": [ 447 | "chat_history.append(\n", 448 | " {\n", 449 | " \"role\": \"user\",\n", 450 | " \"content\": f\"{tool_result}\"\n", 451 | " }\n", 452 | ")" 453 | ] 454 | }, 455 | { 456 | "cell_type": "markdown", 457 | "id": "608684c2-18c3-4e13-885e-a8f1b5e40caa", 458 | "metadata": {}, 459 | "source": [ 460 | "### ReAct Loop Step 3" 461 | ] 462 | }, 463 | { 464 | "cell_type": "code", 465 | "execution_count": null, 466 | "id": "4eb27636-9080-44bd-991c-5f89f96e2675", 467 | "metadata": {}, 468 | "outputs": [], 469 | "source": [ 470 | "output = GROQ_CLIENT.chat.completions.create(\n", 471 | " messages=chat_history,\n", 472 | " model=MODEL\n", 473 | ").choices[0].message.content\n", 474 | "\n", 475 | "print(output)" 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": null, 481 | "id": "34603c55-b114-4ef7-be3a-ef0148eaddb8", 482 | "metadata": {}, 483 | "outputs": [], 484 | "source": [ 485 | "chat_history.append(\n", 486 | " {\n", 487 | " \"role\": \"assistant\",\n", 488 | " \"content\": output\n", 489 | " }\n", 490 | ")" 491 | ] 492 | }, 493 | { 494 | "cell_type": "markdown", 495 | "id": "0c855578-49e3-4bff-981c-16659ec1b4a4", 496 | "metadata": {}, 497 | "source": [ 498 | "### ReAct Loop Step 4" 499 | ] 500 | }, 501 | { 502 | "cell_type": "code", 503 | "execution_count": null, 504 | "id": "8d494dab-7e1d-44e8-a91d-685731d76864", 505 | "metadata": {}, 506 | "outputs": [], 507 | "source": [ 508 | "tool_call = extract_tag_content(output, tag=\"tool_call\")\n", 509 | "tool_call = json.loads(tool_call.content[0])\n", 510 | "tool_result = available_tools[tool_call[\"name\"]].run(**tool_call[\"arguments\"])" 511 | ] 512 | }, 513 | { 514 | "cell_type": "code", 515 | "execution_count": null, 516 | "id": "33420c29-428e-4392-9c52-57b77d2fcc32", 517 | "metadata": {}, 518 | "outputs": [], 519 | "source": [ 520 | "tool_result" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": null, 526 | "id": "0b61013e-be7a-457e-8a57-329266b29cc0", 527 | "metadata": {}, 528 | "outputs": [], 529 | "source": [ 530 | "assert tool_result == (1234 + 5678) * 5" 531 | ] 532 | }, 533 | { 534 | "cell_type": "code", 535 | "execution_count": null, 536 | "id": "d422e9d2-773b-461a-b04b-5b0de30d59e5", 537 | "metadata": {}, 538 | "outputs": [], 539 | "source": [ 540 | "chat_history.append(\n", 541 | " {\n", 542 | " \"role\": \"user\",\n", 543 | " \"content\": f\"{tool_result}\"\n", 544 | " }\n", 545 | ")" 546 | ] 547 | }, 548 | { 549 | "cell_type": "markdown", 550 | "id": "7b66d90d-6386-4026-8ea5-aa37a143c21c", 551 | "metadata": {}, 552 | "source": [ 553 | "### ReAct Loop Step 5" 554 | ] 555 | }, 556 | { 557 | "cell_type": "code", 558 | "execution_count": null, 559 | "id": "9c5beee0-8684-4948-8e34-084dcab98eb0", 560 | "metadata": {}, 561 | "outputs": [], 562 | "source": [ 563 | "output = GROQ_CLIENT.chat.completions.create(\n", 564 | " messages=chat_history,\n", 565 | " model=MODEL\n", 566 | ").choices[0].message.content\n", 567 | "\n", 568 | "print(output)" 569 | ] 570 | }, 571 | { 572 | "cell_type": "code", 573 | "execution_count": null, 574 | "id": "297ce32b-7460-477a-bd5f-2ddd8a45bf11", 575 | "metadata": {}, 576 | "outputs": [], 577 | "source": [ 578 | "chat_history.append(\n", 579 | " {\n", 580 | " \"role\": \"assistant\",\n", 581 | " \"content\": output\n", 582 | " }\n", 583 | ")" 584 | ] 585 | }, 586 | { 587 | "cell_type": "markdown", 588 | "id": "4c1a336d-2795-41b4-88af-6fed9cebea60", 589 | "metadata": {}, 590 | "source": [ 591 | "### ReAct Loop Step 6" 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": null, 597 | "id": "97eeb277-1486-4d8f-8441-8adbde84389e", 598 | "metadata": {}, 599 | "outputs": [], 600 | "source": [ 601 | "tool_call = extract_tag_content(output, tag=\"tool_call\")\n", 602 | "tool_call = json.loads(tool_call.content[0])\n", 603 | "tool_result = available_tools[tool_call[\"name\"]].run(**tool_call[\"arguments\"])" 604 | ] 605 | }, 606 | { 607 | "cell_type": "code", 608 | "execution_count": null, 609 | "id": "d77818e5-ae29-42d4-8c6e-4527bcf56959", 610 | "metadata": {}, 611 | "outputs": [], 612 | "source": [ 613 | "tool_result" 614 | ] 615 | }, 616 | { 617 | "cell_type": "code", 618 | "execution_count": null, 619 | "id": "694284bb-64de-4f48-b0e2-c870b129a225", 620 | "metadata": {}, 621 | "outputs": [], 622 | "source": [ 623 | "assert tool_result == math.log((1234 + 5678) * 5)" 624 | ] 625 | }, 626 | { 627 | "cell_type": "code", 628 | "execution_count": null, 629 | "id": "c26c8ae7-a6f3-4d8c-b509-fb39054684bf", 630 | "metadata": {}, 631 | "outputs": [], 632 | "source": [ 633 | "chat_history.append(\n", 634 | " {\n", 635 | " \"role\": \"user\",\n", 636 | " \"content\": f\"{tool_result}\"\n", 637 | " }\n", 638 | ")" 639 | ] 640 | }, 641 | { 642 | "cell_type": "markdown", 643 | "id": "72c74a9e-edbe-487a-9cda-17260006e639", 644 | "metadata": {}, 645 | "source": [ 646 | "### ReAct Loop Step 7" 647 | ] 648 | }, 649 | { 650 | "cell_type": "code", 651 | "execution_count": null, 652 | "id": "d7faf46c-2ee2-4f40-b371-7a42a8885e8f", 653 | "metadata": {}, 654 | "outputs": [], 655 | "source": [ 656 | "output = GROQ_CLIENT.chat.completions.create(\n", 657 | " messages=chat_history,\n", 658 | " model=MODEL\n", 659 | ").choices[0].message.content\n", 660 | "\n", 661 | "print(output)" 662 | ] 663 | }, 664 | { 665 | "cell_type": "markdown", 666 | "id": "4960a1e6-7b49-48fe-b4f6-b6e54d9c03b8", 667 | "metadata": {}, 668 | "source": [ 669 | "## Doing the same but with `agentic_patterns` library" 670 | ] 671 | }, 672 | { 673 | "cell_type": "code", 674 | "execution_count": null, 675 | "id": "6a4cd7f2-cc28-4d8a-a227-8f1346a3b7a8", 676 | "metadata": {}, 677 | "outputs": [], 678 | "source": [ 679 | "from agentic_patterns.planning_pattern.react_agent import ReactAgent" 680 | ] 681 | }, 682 | { 683 | "cell_type": "code", 684 | "execution_count": null, 685 | "id": "f2462f18-f4ed-494e-8676-454e883ecc84", 686 | "metadata": {}, 687 | "outputs": [], 688 | "source": [ 689 | "agent = ReactAgent(tools=[sum_two_elements, multiply_two_elements, compute_log])" 690 | ] 691 | }, 692 | { 693 | "cell_type": "code", 694 | "execution_count": null, 695 | "id": "3bd048fc-1415-4ea1-a9b5-f488fc9a1ef2", 696 | "metadata": {}, 697 | "outputs": [], 698 | "source": [ 699 | "agent.run(user_msg=\"I want to calculate the sum of 1234 and 5678 and multiply the result by 5. Then, I want to take the logarithm of this result\")" 700 | ] 701 | }, 702 | { 703 | "cell_type": "markdown", 704 | "id": "817c78d6-760f-4161-b08b-7be9bf1fe010", 705 | "metadata": {}, 706 | "source": [ 707 | "---\n", 708 | "\n", 709 | "We did it!! A ReAct Agent working as expected, completely from Scratch! 🚀🚀🚀🚀" 710 | ] 711 | } 712 | ], 713 | "metadata": { 714 | "kernelspec": { 715 | "display_name": "Python 3 (ipykernel)", 716 | "language": "python", 717 | "name": "python3" 718 | }, 719 | "language_info": { 720 | "codemirror_mode": { 721 | "name": "ipython", 722 | "version": 3 723 | }, 724 | "file_extension": ".py", 725 | "mimetype": "text/x-python", 726 | "name": "python", 727 | "nbconvert_exporter": "python", 728 | "pygments_lexer": "ipython3", 729 | "version": "3.11.3" 730 | } 731 | }, 732 | "nbformat": 4, 733 | "nbformat_minor": 5 734 | } 735 | -------------------------------------------------------------------------------- /notebooks/tool_pattern.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "7d105473-c0c6-4de6-acfb-ccf3054fd1a0", 6 | "metadata": {}, 7 | "source": [ 8 | "# Tool Pattern" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "39e9c48d-cac5-48a1-a6ff-e44b618b92c9", 14 | "metadata": {}, 15 | "source": [ 16 | "\"Alt\n", 17 | "\n", 18 | "---\n", 19 | "\n", 20 | "As you may already know, the information stored in LLM weights is (usually) 𝐧𝐨𝐭 𝐞𝐧𝐨𝐮𝐠𝐡 to give accurate and insightful answers to our questions.\n", 21 | " \n", 22 | "That's why we need to provide the LLM with ways to access the outside world. 🌍 \n", 23 | "\n", 24 | "In practice, you can build tools for whatever you want (at the end of the day they are just functions the LLM can use), from a tool that let's you access Wikipedia, another to analyse the content of YouTube videos or calculate difficult integrals using Wolfram Alpha. \n", 25 | "\n", 26 | "The second pattern we are going to implement is the **tool pattern**. \n", 27 | "\n", 28 | "In this notebook, you'll learn how **tools** actually work. This is the **second lesson** of the \"Agentic Patterns from Scratch\" series. Take a look at the first lesson if you haven't!\n", 29 | "\n", 30 | "* [First Lesson: The Reflection Pattern](https://github.com/neural-maze/agentic_patterns/blob/main/notebooks/reflection_pattern.ipynb)" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "id": "a6eb2bab-9a5b-4c92-b23a-18f757d44c06", 36 | "metadata": {}, 37 | "source": [ 38 | "## A simple function" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "id": "148df24a-4ac5-4d3d-9860-8ff0e7ed7c90", 44 | "metadata": {}, 45 | "source": [ 46 | "Take a look at this function 👇" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "id": "1c851271-9b5a-4b48-a0e0-bf889cfb303b", 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "import json\n", 57 | "\n", 58 | "def get_current_weather(location: str, unit: str):\n", 59 | "\t\"\"\"\n", 60 | "\tGet the current weather in a given location\n", 61 | "\n", 62 | "\tlocation (str): The city and state, e.g. Madrid, Barcelona\n", 63 | "\tunit (str): The unit. It can take two values; \"celsius\", \"fahrenheit\"\n", 64 | "\t\"\"\"\n", 65 | "\tif location == \"Madrid\":\n", 66 | "\t\treturn json.dumps({\"temperature\": 25, \"unit\": unit})\n", 67 | "\n", 68 | "\telse:\n", 69 | "\t\treturn json.dumps({\"temperature\": 58, \"unit\": unit})" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "id": "de31cb35-847f-458f-b7d7-603acf5a714a", 75 | "metadata": {}, 76 | "source": [ 77 | "Very simple, right? You provide a `location` and a `unit` and it returns the temperature." 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "id": "3f52e61e-be31-4e6f-9f4f-eeb7082ad827", 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "get_current_weather(location=\"Madrid\", unit=\"celsius\")" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "id": "e9d63a34-8a93-4551-a34a-a0e85c95aa6a", 93 | "metadata": {}, 94 | "source": [ 95 | "But the question is:\n", 96 | "\n", 97 | "**How can you make this function available to an LLM?**\n", 98 | "\n", 99 | "An LLM is a type of NLP system, so it expects text as input. But how can we transform this function into text?" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "id": "56a4f2f8-9fc2-4e3d-87cd-bdfca15e5ddc", 105 | "metadata": {}, 106 | "source": [ 107 | "## A System Prompt that works" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "id": "93bed242-75ca-4ab7-a159-114a9e1e7e67", 113 | "metadata": {}, 114 | "source": [ 115 | "For the LLM to be aware of this function, we need to provide some relevant information about it in the context. **I'm referring to the function name, attributes, description, etc.** Take a look at the following System Prompt." 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "id": "1ad89df0-d233-41cc-b002-19963e7740a1", 121 | "metadata": {}, 122 | "source": [ 123 | "```xml\n", 124 | "You are a function calling AI model. You are provided with function signatures within XML tags. \n", 125 | "You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug \n", 126 | "into functions. Pay special attention to the properties 'types'. You should use those types as in a Python dict.\n", 127 | "For each function call return a json object with function name and arguments within XML tags as follows:\n", 128 | "\n", 129 | "\n", 130 | "{\"name\": ,\"arguments\": }\n", 131 | "\n", 132 | "\n", 133 | "Here are the available tools:\n", 134 | "\n", 135 | " {\n", 136 | " \"name\": \"get_current_weather\",\n", 137 | " \"description\": \"Get the current weather in a given location location (str): The city and state, e.g. Madrid, Barcelona unit (str): The unit. It can take two values; 'celsius', 'fahrenheit'\",\n", 138 | " \"parameters\": {\n", 139 | " \"properties\": {\n", 140 | " \"location\": {\n", 141 | " \"type\": \"string\"\n", 142 | " },\n", 143 | " \"unit\": {\n", 144 | " \"type\": \"string\"\n", 145 | " }\n", 146 | " }\n", 147 | " }\n", 148 | "}\n", 149 | "\n", 150 | "```\n", 151 | "\n", 152 | "\n", 153 | "As you can see, the LLM enforces the LLM to behave as a `function calling AI model` who, given a list of function signatures inside the XML tags\n", 154 | "will select which one to use. When the model decides a function to use, it will return a json like the following, representing a function call:\n", 155 | "\n", 156 | "```xml\n", 157 | "\n", 158 | "{\"name\": ,\"arguments\": }\n", 159 | "\n", 160 | "```\n" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "id": "0d2d8322-afc0-4469-90aa-23019bc929e7", 166 | "metadata": {}, 167 | "source": [ 168 | "Let's see how it works in practise! 👇" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "id": "692b5c16-77f3-4de0-b2b5-16bfc5812b7b", 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "import os\n", 179 | "import re\n", 180 | "from groq import Groq\n", 181 | "from dotenv import load_dotenv\n", 182 | "\n", 183 | "# Remember to load the environment variables. You should have the Groq API Key in there :)\n", 184 | "load_dotenv()\n", 185 | "\n", 186 | "MODEL = \"llama3-groq-70b-8192-tool-use-preview\"\n", 187 | "GROQ_CLIENT = Groq()\n", 188 | "\n", 189 | "# Define the System Prompt as a constant\n", 190 | "TOOL_SYSTEM_PROMPT = \"\"\"\n", 191 | "You are a function calling AI model. You are provided with function signatures within XML tags. \n", 192 | "You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug \n", 193 | "into functions. Pay special attention to the properties 'types'. You should use those types as in a Python dict.\n", 194 | "For each function call return a json object with function name and arguments within XML tags as follows:\n", 195 | "\n", 196 | "\n", 197 | "{\"name\": ,\"arguments\": }\n", 198 | "\n", 199 | "\n", 200 | "Here are the available tools:\n", 201 | "\n", 202 | " {\n", 203 | " \"name\": \"get_current_weather\",\n", 204 | " \"description\": \"Get the current weather in a given location location (str): The city and state, e.g. Madrid, Barcelona unit (str): The unit. It can take two values; 'celsius', 'fahrenheit'\",\n", 205 | " \"parameters\": {\n", 206 | " \"properties\": {\n", 207 | " \"location\": {\n", 208 | " \"type\": \"str\"\n", 209 | " },\n", 210 | " \"unit\": {\n", 211 | " \"type\": \"str\"\n", 212 | " }\n", 213 | " }\n", 214 | " }\n", 215 | "}\n", 216 | "\n", 217 | "\"\"\"" 218 | ] 219 | }, 220 | { 221 | "cell_type": "markdown", 222 | "id": "e0da45c0-0b4b-4153-83c7-eed1c312dcec", 223 | "metadata": {}, 224 | "source": [ 225 | "Let's ask a very simple question: `\"What's the current temperature in Madrid, in Celsius?\"`" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": null, 231 | "id": "e00b09e8-55d3-4a59-a9cf-29329af78d9a", 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "tool_chat_history = [\n", 236 | " {\n", 237 | " \"role\": \"system\",\n", 238 | " \"content\": TOOL_SYSTEM_PROMPT\n", 239 | " }\n", 240 | "]\n", 241 | "agent_chat_history = []\n", 242 | "\n", 243 | "user_msg = {\n", 244 | " \"role\": \"user\",\n", 245 | " \"content\": \"What's the current temperature in Madrid, in Celsius?\"\n", 246 | "}\n", 247 | "\n", 248 | "tool_chat_history.append(user_msg)\n", 249 | "agent_chat_history.append(user_msg)\n", 250 | "\n", 251 | "output = GROQ_CLIENT.chat.completions.create(\n", 252 | " messages=tool_chat_history,\n", 253 | " model=MODEL\n", 254 | ").choices[0].message.content\n", 255 | "\n", 256 | "print(output)" 257 | ] 258 | }, 259 | { 260 | "cell_type": "markdown", 261 | "id": "3c26cf72-0b60-464e-9f83-af371a93b3d5", 262 | "metadata": {}, 263 | "source": [ 264 | "---\n", 265 | "\n", 266 | "**That's an improvement!** We may not have the *proper* answer but, with this information, we can obtain it! How? Well, we just need to:\n", 267 | "\n", 268 | "1. Parse the LLM output. By this I mean deleting the XML tags\n", 269 | "2. Load the output as a proper Python dict\n", 270 | "\n", 271 | "The function below does exactly this.\n", 272 | "\n", 273 | "---" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": null, 279 | "id": "4366ae38-055a-45ec-937b-dfec7eaad00b", 280 | "metadata": {}, 281 | "outputs": [], 282 | "source": [ 283 | "def parse_tool_call_str(tool_call_str: str):\n", 284 | " pattern = r''\n", 285 | " clean_tags = re.sub(pattern, '', tool_call_str)\n", 286 | " \n", 287 | " try:\n", 288 | " tool_call_json = json.loads(clean_tags)\n", 289 | " return tool_call_json\n", 290 | " except json.JSONDecodeError:\n", 291 | " return clean_tags\n", 292 | " except Exception as e:\n", 293 | " print(f\"Unexpected error: {e}\")\n", 294 | " return \"There was some error parsing the Tool's output\"" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": null, 300 | "id": "c5890ba4-3f2f-4dc8-9a62-dff0079f07bb", 301 | "metadata": {}, 302 | "outputs": [], 303 | "source": [ 304 | "parsed_output = parse_tool_call_str(output)\n", 305 | "parsed_output" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "id": "944b0373-f647-423a-bf00-914ffb03dcd7", 311 | "metadata": {}, 312 | "source": [ 313 | "We can simply run the function now, by passing the arguments like this 👇" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": null, 319 | "id": "169f06bb-836d-4270-bd66-abc2aadc0757", 320 | "metadata": {}, 321 | "outputs": [], 322 | "source": [ 323 | "result = get_current_weather(**parsed_output[\"arguments\"])" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": null, 329 | "id": "ecdfbbc5-7cdf-4c21-8b75-055446658675", 330 | "metadata": {}, 331 | "outputs": [], 332 | "source": [ 333 | "result" 334 | ] 335 | }, 336 | { 337 | "cell_type": "markdown", 338 | "id": "272a337d-c193-4316-bed5-bc1ee4ccaae5", 339 | "metadata": {}, 340 | "source": [ 341 | "**That's it!** A temperature of 25 degrees Celsius. \n", 342 | "\n", 343 | "As you can see, we're dealing with a string, so we can simply add the parsed_output to the `chat_history` so that the LLM knows the information it has to return to the user. " 344 | ] 345 | }, 346 | { 347 | "cell_type": "code", 348 | "execution_count": null, 349 | "id": "3fb0fc08-dad9-42cd-a2a9-674b8191d06b", 350 | "metadata": {}, 351 | "outputs": [], 352 | "source": [ 353 | "agent_chat_history.append({\n", 354 | " \"role\": \"user\",\n", 355 | " \"content\": f\"Observation: {result}\"\n", 356 | "})" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": null, 362 | "id": "b610fb1f-24af-4cc1-b485-fa0c5bfca846", 363 | "metadata": {}, 364 | "outputs": [], 365 | "source": [ 366 | "GROQ_CLIENT.chat.completions.create(\n", 367 | " messages=agent_chat_history,\n", 368 | " model=MODEL\n", 369 | ").choices[0].message.content" 370 | ] 371 | }, 372 | { 373 | "cell_type": "markdown", 374 | "id": "72fa386e-edef-4e3f-903d-a2fc7008e5c3", 375 | "metadata": {}, 376 | "source": [ 377 | "## Implementing everything the good way" 378 | ] 379 | }, 380 | { 381 | "cell_type": "markdown", 382 | "id": "4217eb34-efac-4a05-bb23-ae780126c0ad", 383 | "metadata": {}, 384 | "source": [ 385 | "To recap, we have a way for the LLM to generate `tool_calls` that we can use later to *properly* run the functions. But, as you may imagine, there are some pieces missing:\n", 386 | "\n", 387 | "1. We need to automatically transform any function into a description like we saw in the initial system prompt.\n", 388 | "2. We need a way to tell the agent that this function is a tool\n", 389 | "\n", 390 | "Let's do it!" 391 | ] 392 | }, 393 | { 394 | "cell_type": "markdown", 395 | "id": "df20db23-3c1a-4744-88b8-8d47d7875f18", 396 | "metadata": {}, 397 | "source": [ 398 | "### The `tool` decorator" 399 | ] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "id": "7c538804-a381-4552-94eb-c04720e897df", 404 | "metadata": {}, 405 | "source": [ 406 | "We are going to use the `tool` decorator to transform any Python function into a tool. You can see the implementation [here](https://github.com/neural-maze/agentic_patterns/blob/main/src/agentic_patterns/tool_pattern/tool.py). To test it out, let's make a more complex tool than before. For example, a tool that interacts with [Hacker News](https://news.ycombinator.com/), getting the current top stories. \n", 407 | "\n", 408 | "> Reminder: To automatically generate the function signature for the tool, we need a way to infer the arguments types. For this reason, we need to create the typing annotations. " 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "id": "b9413902-e3ea-4c0a-bfd2-180d69ba5cd1", 415 | "metadata": {}, 416 | "outputs": [], 417 | "source": [ 418 | "import json\n", 419 | "import requests\n", 420 | "from agentic_patterns.tool_pattern.tool import tool\n", 421 | "from agentic_patterns.tool_pattern.tool_agent import ToolAgent\n", 422 | "\n", 423 | "def fetch_top_hacker_news_stories(top_n: int):\n", 424 | " \"\"\"\n", 425 | " Fetch the top stories from Hacker News.\n", 426 | "\n", 427 | " This function retrieves the top `top_n` stories from Hacker News using the Hacker News API. \n", 428 | " Each story contains the title, URL, score, author, and time of submission. The data is fetched \n", 429 | " from the official Firebase Hacker News API, which returns story details in JSON format.\n", 430 | "\n", 431 | " Args:\n", 432 | " top_n (int): The number of top stories to retrieve.\n", 433 | " \"\"\"\n", 434 | " top_stories_url = 'https://hacker-news.firebaseio.com/v0/topstories.json'\n", 435 | " \n", 436 | " try:\n", 437 | " response = requests.get(top_stories_url)\n", 438 | " response.raise_for_status() # Check for HTTP errors\n", 439 | " \n", 440 | " # Get the top story IDs\n", 441 | " top_story_ids = response.json()[:top_n]\n", 442 | " \n", 443 | " top_stories = []\n", 444 | " \n", 445 | " # For each story ID, fetch the story details\n", 446 | " for story_id in top_story_ids:\n", 447 | " story_url = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json'\n", 448 | " story_response = requests.get(story_url)\n", 449 | " story_response.raise_for_status() # Check for HTTP errors\n", 450 | " story_data = story_response.json()\n", 451 | " \n", 452 | " # Append the story title and URL (or other relevant info) to the list\n", 453 | " top_stories.append({\n", 454 | " 'title': story_data.get('title', 'No title'),\n", 455 | " 'url': story_data.get('url', 'No URL available'),\n", 456 | " })\n", 457 | " \n", 458 | " return json.dumps(top_stories)\n", 459 | "\n", 460 | " except requests.exceptions.RequestException as e:\n", 461 | " print(f\"An error occurred: {e}\")\n", 462 | " return []" 463 | ] 464 | }, 465 | { 466 | "cell_type": "markdown", 467 | "id": "73f75359-1e8a-4317-92dd-40dd1cf36e97", 468 | "metadata": {}, 469 | "source": [ 470 | "If we run this Python function, we'll obtain the top HN stories, as you can see below (the top 5 in this case)." 471 | ] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": null, 476 | "id": "aad2bbed-549e-4c0e-91fd-37b4694e0b50", 477 | "metadata": {}, 478 | "outputs": [], 479 | "source": [ 480 | "json.loads(fetch_top_hacker_news_stories(top_n=5))" 481 | ] 482 | }, 483 | { 484 | "cell_type": "markdown", 485 | "id": "fb587d13-b312-45b5-af56-4f009c11eeda", 486 | "metadata": {}, 487 | "source": [ 488 | "To transform the `fetch_top_hacker_news_stories` function into a Tool, we can use the `tool` decorator." 489 | ] 490 | }, 491 | { 492 | "cell_type": "code", 493 | "execution_count": null, 494 | "id": "4616e412-d4a8-4fe5-bcb1-dd00ce48640a", 495 | "metadata": {}, 496 | "outputs": [], 497 | "source": [ 498 | "hn_tool = tool(fetch_top_hacker_news_stories)" 499 | ] 500 | }, 501 | { 502 | "cell_type": "markdown", 503 | "id": "3f438638-a933-414f-9d00-53c37f041f16", 504 | "metadata": {}, 505 | "source": [ 506 | "The Tool has the following parameters: a `name`, a `fn_signature` and the `fn` (this is the function we are going to call, this case `fetch_top_hacker_news_stories`)" 507 | ] 508 | }, 509 | { 510 | "cell_type": "code", 511 | "execution_count": null, 512 | "id": "df16bfa5-0ed4-46e1-b262-006f36fb8e78", 513 | "metadata": {}, 514 | "outputs": [], 515 | "source": [ 516 | "hn_tool.name" 517 | ] 518 | }, 519 | { 520 | "cell_type": "markdown", 521 | "id": "3209e3e0-b59c-4b0e-b075-8fcbf9d21516", 522 | "metadata": {}, 523 | "source": [ 524 | "By default, the tool gets its name from the function name." 525 | ] 526 | }, 527 | { 528 | "cell_type": "code", 529 | "execution_count": null, 530 | "id": "e0da95e0-10a8-4d17-aae7-ed3cc20abb03", 531 | "metadata": {}, 532 | "outputs": [], 533 | "source": [ 534 | "json.loads(hn_tool.fn_signature)" 535 | ] 536 | }, 537 | { 538 | "cell_type": "markdown", 539 | "id": "d5760bf7-7d9a-4c79-bc87-6469040250b6", 540 | "metadata": {}, 541 | "source": [ 542 | "As you can see, the function signature has been automatically generated. It contains the `name`, a `description` (taken from the docstrings) and the `parameters`, whose types come from the tying annotations. Now that we have a tool, let's run the agent." 543 | ] 544 | }, 545 | { 546 | "cell_type": "markdown", 547 | "id": "043ad8ba-7789-468a-aafd-60c10bd21135", 548 | "metadata": {}, 549 | "source": [ 550 | "### The `ToolAgent`" 551 | ] 552 | }, 553 | { 554 | "cell_type": "markdown", 555 | "id": "065e04e5-50af-4452-9086-eae08a12e8cf", 556 | "metadata": {}, 557 | "source": [ 558 | "To create the agent, we just need to pass a list of tools (in this case, just one)." 559 | ] 560 | }, 561 | { 562 | "cell_type": "code", 563 | "execution_count": null, 564 | "id": "4a303211-f2a6-43c0-85aa-081fb0be2bbe", 565 | "metadata": {}, 566 | "outputs": [], 567 | "source": [ 568 | "tool_agent = ToolAgent(tools=[hn_tool])" 569 | ] 570 | }, 571 | { 572 | "cell_type": "markdown", 573 | "id": "b9eabae6-2b5c-407e-9e43-88e5e4844e9e", 574 | "metadata": {}, 575 | "source": [ 576 | "A quick check to see that everything works fine. If we ask the agent something unrelated to Hacker News, it shouldn't use the tool." 577 | ] 578 | }, 579 | { 580 | "cell_type": "code", 581 | "execution_count": null, 582 | "id": "92c706fd-0a4b-46be-bbb3-c02618dbf677", 583 | "metadata": {}, 584 | "outputs": [], 585 | "source": [ 586 | "output = tool_agent.run(user_msg=\"Tell me your name\")" 587 | ] 588 | }, 589 | { 590 | "cell_type": "code", 591 | "execution_count": null, 592 | "id": "be02a976-1e72-40ad-9ada-460148ca65d1", 593 | "metadata": {}, 594 | "outputs": [], 595 | "source": [ 596 | "print(output)" 597 | ] 598 | }, 599 | { 600 | "cell_type": "markdown", 601 | "id": "6c862a34-3cc9-428b-a246-d98effc998a5", 602 | "metadata": {}, 603 | "source": [ 604 | "Now, let's ask for specific information about Hacker News." 605 | ] 606 | }, 607 | { 608 | "cell_type": "code", 609 | "execution_count": null, 610 | "id": "b74bbc64-8943-4ae3-9928-6230ead61e77", 611 | "metadata": {}, 612 | "outputs": [], 613 | "source": [ 614 | "output = tool_agent.run(user_msg=\"Tell me the top 5 Hacker News stories right now\")" 615 | ] 616 | }, 617 | { 618 | "cell_type": "code", 619 | "execution_count": null, 620 | "id": "53476bff-812d-4e56-afb9-de21474f6580", 621 | "metadata": {}, 622 | "outputs": [], 623 | "source": [ 624 | "print(output)" 625 | ] 626 | }, 627 | { 628 | "cell_type": "markdown", 629 | "id": "f70a8059-9637-45b5-8050-ea7ba4995407", 630 | "metadata": {}, 631 | "source": [ 632 | "---\n", 633 | "There you have it!! A fully functional Tool!! 🛠️" 634 | ] 635 | } 636 | ], 637 | "metadata": { 638 | "kernelspec": { 639 | "display_name": "Python 3 (ipykernel)", 640 | "language": "python", 641 | "name": "python3" 642 | }, 643 | "language_info": { 644 | "codemirror_mode": { 645 | "name": "ipython", 646 | "version": 3 647 | }, 648 | "file_extension": ".py", 649 | "mimetype": "text/x-python", 650 | "name": "python", 651 | "nbconvert_exporter": "python", 652 | "pygments_lexer": "ipython3", 653 | "version": "3.11.3" 654 | } 655 | }, 656 | "nbformat": 4, 657 | "nbformat_minor": 5 658 | } 659 | --------------------------------------------------------------------------------