├── .gitignore
├── LICENSE
├── README.md
├── assets
├── agent.png
├── ai_engineer.png
├── dra_topology_0.png
├── dra_topology_1.png
├── planning.png
├── reflection.png
└── tool_use.png
└── building_agents_from_scratch
├── README.md
├── deep_research_agent
├── README.md
├── env.example
├── notebooks
│ └── deep_research_agent.ipynb
├── pyproject.toml
├── reports
│ └── .gitkeep
├── src
│ ├── agents.py
│ ├── config.py
│ ├── prompts.py
│ ├── state.py
│ ├── topology.py
│ └── utils.py
└── uv.lock
├── planning
├── README.md
└── reflection
│ ├── README.md
│ ├── notebooks
│ └── reflection.ipynb
│ ├── requirements.txt
│ └── src
│ ├── __init__.py
│ └── main.py
└── tool_use
├── README.md
├── notebooks
└── tool_use.ipynb
├── requirements.txt
└── src
├── __init__.py
├── main.py
├── tool_registry.py
└── tools.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.so
6 | .Python
7 | build/
8 | develop-eggs/
9 | dist/
10 | downloads/
11 | eggs/
12 | .eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | wheels/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 |
23 | # Virtual Environment
24 | .env
25 | .venv
26 | venv/
27 | env/
28 | ENV/
29 |
30 | # IDE
31 | .idea/
32 | .vscode/
33 | *.swp
34 | *.swo
35 |
36 | # OS
37 | .DS_Store
38 | Thumbs.db
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AI Engineering Resources
2 |
3 | A comprehensive collection of resources, implementations, and best practices for AI Engineering, focusing on building intelligent agents from the ground up.
4 |
5 |
6 |
7 |
8 |
9 | ## Projects
10 |
11 | ### [Building Agents from Scratch](building_agents_from_scratch)
12 | Learn how to build AI agents from fundamental principles:
13 | - [Tool Use](building_agents_from_scratch/tool_use)
14 | - Planning Strategies:
15 | - [Reflection](building_agents_from_scratch/planning/reflection)
16 | - Memory (Coming Soon)
17 | - Evaluation (Coming Soon)
18 | - [Deep Research Agent](building_agents_from_scratch/deep_research_agent)
19 |
20 | ## Stay Updated
21 |
22 | - ⭐ Star this repository to stay updated
23 | - 👀 Watch for releases and updates
24 | - 🔄 Check back regularly for new content
25 |
26 | ## Contact
27 |
28 | - 🔗 [LinkedIn](https://www.linkedin.com/in/aurimas-griciunas)
29 | - 🔗 [X](https://x.com/Aurimas_Gr)
30 | - 🔗 [Newsletter](https://www.newsletter.swirlai.com/)
31 |
--------------------------------------------------------------------------------
/assets/agent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/agent.png
--------------------------------------------------------------------------------
/assets/ai_engineer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/ai_engineer.png
--------------------------------------------------------------------------------
/assets/dra_topology_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/dra_topology_0.png
--------------------------------------------------------------------------------
/assets/dra_topology_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/dra_topology_1.png
--------------------------------------------------------------------------------
/assets/planning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/planning.png
--------------------------------------------------------------------------------
/assets/reflection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/reflection.png
--------------------------------------------------------------------------------
/assets/tool_use.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/assets/tool_use.png
--------------------------------------------------------------------------------
/building_agents_from_scratch/README.md:
--------------------------------------------------------------------------------
1 | This is the repository where I will hold all of the code examples of "Building Agents from scratch" series.
2 |
3 |
4 |
5 |
6 |
7 | The first part about implementing tool usage from scratch can be found in the [tool_use](tool_use) folder.
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Research Agent From Scratch
2 |
3 | This is part of the "Building Agents from scratch" series that will be potentially moved to it's own separate project. We will build a Deep Research Agent from scratch without using any LLM orchestration frameworks. We will also learn about nuances of DeepSeek-R1 family of reasoning models.
4 |
5 | ### The topology of the system we will be building is as follows:
6 |
7 |
8 |
9 |
10 |
11 | 1. A user will provide a query or topic to be researched.
12 |
13 | 2. A LLM will create an outline of the final report that it will be aiming for. It will be instructed to produce not more than a certain number of paragraphs.
14 |
15 | 3. Each of the paragraph description will be fed into a research process separately to produce a comprehensive set of information to be used in report construction. Detailed description of the research process will be outlined in the next section.
16 |
17 | 4. All of the information will be fed into summarisation step that will construct the final report including conclusion.
18 |
19 | 5. The report will then be delivered to the user in MarkDown form.
20 |
21 | ### Zoom in into each of the research steps:
22 |
23 |
24 |
25 |
26 |
27 | 1. Once we have the outline of each paragraph, it will be passed to a LLM to construct Web Search queries in an attempt to best enrich the information needed.
28 |
29 | 2. The LLM will output the search query and the reasoning behind it.
30 |
31 | 3. We will execute Web search against the query and retrieve top relevant results.
32 |
33 | 4. The results will be passed to the Reflection step where a LLM will reason about any missed nuances to try and come up with a search query that would enrich the initial results.
34 |
35 | 5. This process will be repeated for n times in an attempt to get the best set of information possible.
36 |
37 | You can find the detailed walkthrough of this project in my [Newsletter](https://www.newsletter.swirlai.com/p/building-deep-research-agent-from).
38 |
39 |
40 | ## Running the code
41 |
42 | [uv](https://github.com/astral-sh/uv) is a great tool for Python dependency management. To run the code:
43 |
44 | - Copy the `env.example` file to `.env` and set the correct values:
45 |
46 | ```bash
47 | cp env.example .env
48 | ```
49 | - Get your credentials for SambaNova API [here](https://fnf.dev/4aVUqro) and Tavily API [here](https://app.tavily.com/) and save them in the .env file under keys `SAMBANOVA_API_KEY` and `TAVILY_API_KEY` respectively.
50 |
51 | - We can now run the project:
52 |
53 | ```bash
54 | uv run --env-file .env src/topology.py --topic "Topic to be researched"
55 | ```
56 | For example:
57 |
58 | ```bash
59 | uv run --env-file .env src/topology.py --topic "Something interesting about humans"
60 | ```
61 | It will take some time to execute (~5 minutes) and you will get your research report in Markdown format in the `reports` folder.
62 |
63 | ## Interactive Notebook
64 |
65 | For a more interactive learning experience, you can follow along with the Jupyter notebook in the [notebooks](notebooks) folder. While detailed documentation is still being worked on, you can find the complete implementation and follow the code there.
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/env.example:
--------------------------------------------------------------------------------
1 | SAMBANOVA_API_KEY=
2 | SAMBANOVA_BASE_URL=https://api.sambanova.ai/v1
3 | LLM_REASONING=DeepSeek-R1-Distill-Llama-70B
4 | LLM_REGULAR=Meta-Llama-3.3-70B-Instruct
5 | TAVILY_API_KEY=
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/notebooks/deep_research_agent.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e67d5fef-280b-4147-936b-6bfdebe132ed",
6 | "metadata": {},
7 | "source": [
8 | "# Deep Research Agent"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "cd594550-b76f-492e-bb93-6b8e2d536108",
14 | "metadata": {},
15 | "source": [
16 | "## Imports"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "id": "8168e6f4-5015-4d85-a167-28fc278fcb0d",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import os\n",
27 | "import openai\n",
28 | "import json\n",
29 | "from dataclasses import dataclass, field\n",
30 | "from typing import List\n",
31 | "from tavily import TavilyClient\n",
32 | "from json.decoder import JSONDecodeError\n",
33 | "from pydantic_settings import BaseSettings\n",
34 | "from IPython.display import Markdown"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "id": "527271f6-8edc-44c2-913b-a808415a67e2",
40 | "metadata": {},
41 | "source": [
42 | "### Constructing application Configuration object"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "id": "91f1ab7e-8161-4f83-969f-8d6be94d4b0a",
48 | "metadata": {},
49 | "source": [
50 | "Note that we have two LLMs configures, we will be using a reasoning model (DeepSeek-R1) for some of the sub agents while we will be using a regular instruction tuned Llama model (Meta-Llama-3.3-70B-Instruct) for other."
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "id": "b2693c91-4081-4839-8c8c-3bc9c243e762",
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "class Config(BaseSettings):\n",
61 | " SAMBANOVA_API_KEY: str\n",
62 | " SAMBANOVA_BASE_URL: str\n",
63 | " LLM_REASONING: str\n",
64 | " LLM_REGULAR: str\n",
65 | " TAVILY_API_KEY: str"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "id": "d87a818b-31f4-41a5-99ae-29f17208ec18",
71 | "metadata": {},
72 | "source": [
73 | "Be sure to have your SAMBANOVA_API_KEY (get it [here](https://fnf.dev/4aVUqro)) and TAVILY_API_KEY (get it [here](https://app.tavily.com/)) exported as environment variables before running the next cell."
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "id": "9dd56f21-55ad-483b-af91-3a5dfa5c2225",
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "config = Config(SAMBANOVA_API_KEY=os.environ[\"SAMBANOVA_API_KEY\"],\n",
84 | " SAMBANOVA_BASE_URL=\"https://api.sambanova.ai/v1\",\n",
85 | " LLM_REASONING=\"DeepSeek-R1-Distill-Llama-70B\",\n",
86 | " LLM_REGULAR=\"Meta-Llama-3.3-70B-Instruct\",\n",
87 | " TAVILY_API_KEY=os.environ[\"TAVILY_API_KEY\"])"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "id": "7133be4d-524d-40b8-91fc-53fb89cc6067",
93 | "metadata": {},
94 | "source": [
95 | "### Data Classes to define the System State"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "id": "f6d38df5-5c0e-47b7-902b-8b9c36cc7e21",
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "@dataclass\n",
106 | "class Search:\n",
107 | " url: str = \"\"\n",
108 | " content: str = \"\"\n",
109 | "\n",
110 | "@dataclass\n",
111 | "class Research:\n",
112 | " search_history: List[Search] = field(default_factory=list)\n",
113 | " latest_summary: str = \"\"\n",
114 | " reflection_iteration: int = 0\n",
115 | "\n",
116 | "@dataclass\n",
117 | "class Paragraph:\n",
118 | " title: str = \"\"\n",
119 | " content: str = \"\"\n",
120 | " research: Research = field(default_factory=Research)\n",
121 | "\n",
122 | "@dataclass\n",
123 | "class State:\n",
124 | " report_title: str = \"\"\n",
125 | " paragraphs: List[Paragraph] = field(default_factory=list)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "id": "0f226607-50ca-4dfc-b476-38f365857fee",
131 | "metadata": {},
132 | "source": [
133 | "### Helper functions for data cleaning"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "id": "9b435be4-2a9b-424e-9623-1926170e8e90",
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "def remove_reasoning_from_output(output):\n",
144 | " return output.split(\"\")[-1].strip()\n",
145 | "\n",
146 | "def clean_json_tags(text):\n",
147 | " return text.replace(\"```json\\n\", \"\").replace(\"\\n```\", \"\")\n",
148 | "\n",
149 | "def clean_markdown_tags(text):\n",
150 | " return text.replace(\"```markdown\\n\", \"\").replace(\"\\n```\", \"\")"
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "id": "b33a32ff-7c41-45c8-829b-44a0a4045a44",
156 | "metadata": {},
157 | "source": [
158 | "### Search tool and a fuction to update System State with search results"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "id": "8dc6631b-66a1-41b4-95ee-59656fc9567e",
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "def tavily_search(query, include_raw_content=True, max_results=3):\n",
169 | "\n",
170 | " tavily_client = TavilyClient(api_key=config.TAVILY_API_KEY)\n",
171 | "\n",
172 | " return tavily_client.search(query,\n",
173 | " include_raw_content=include_raw_content,\n",
174 | " max_results=max_results)\n",
175 | "\n",
176 | "def update_state_with_search_results(search_results, idx_paragraph, state):\n",
177 | " \n",
178 | " for search_result in search_results[\"results\"]:\n",
179 | " search = Search(url=search_result[\"url\"], content=search_result[\"raw_content\"])\n",
180 | " state.paragraphs[idx_paragraph].research.search_history.append(search)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "id": "f1a8ec04-f822-4c9f-8a7d-b8e2836c9358",
186 | "metadata": {},
187 | "source": [
188 | "## Agents"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "id": "68d6e379-4c23-4e44-ada2-b712165aa7b3",
194 | "metadata": {},
195 | "source": [
196 | "Here we define LLM sub-Agents that will read the System State, perform computation and evolve the state."
197 | ]
198 | },
199 | {
200 | "cell_type": "markdown",
201 | "id": "f40f5af5-a137-44cb-acb3-13f8df8597f9",
202 | "metadata": {},
203 | "source": [
204 | "### Agent for Report structure creation"
205 | ]
206 | },
207 | {
208 | "cell_type": "code",
209 | "execution_count": null,
210 | "id": "a0e2bc1b-c315-4c78-a7f1-5b4f8df93d59",
211 | "metadata": {},
212 | "outputs": [],
213 | "source": [
214 | "output_schema_report_structure = {\n",
215 | " \"type\": \"array\",\n",
216 | " \"items\": {\n",
217 | " \"type\": \"object\",\n",
218 | " \"properties\": {\n",
219 | " \"title\": {\"type\": \"string\"},\n",
220 | " \"content\": {\"type\": \"string\"}\n",
221 | " }\n",
222 | " }\n",
223 | " }\n",
224 | "\n",
225 | "SYSTEM_PROMPT_REPORT_STRUCTURE = f\"\"\"\n",
226 | "You are a Deep Research assistan. Given a query, plan a structure for a report and the paragraphs to be included.\n",
227 | "Make sure that the ordering of paragraphs makes sense.\n",
228 | "Once the outline is created, you will be given tools to search the web and reflect for each of the section separately.\n",
229 | "Format the output in json with the following json schema definition:\n",
230 | "\n",
231 | "\n",
234 | "\n",
235 | "Title and content properties will be used for deeper research.\n",
236 | "Make sure that the output is a json object with an output json schema defined above.\n",
237 | "Only return the json object, no explanation or additional text.\n",
238 | "\"\"\""
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "id": "86944c78-5fb2-4bde-9325-dba96d06ca27",
245 | "metadata": {},
246 | "outputs": [],
247 | "source": [
248 | "class ReportStructureAgent:\n",
249 | "\n",
250 | " def __init__(self, query: str):\n",
251 | "\n",
252 | " self.openai_client = openai.OpenAI(\n",
253 | " api_key=config.SAMBANOVA_API_KEY,\n",
254 | " base_url=config.SAMBANOVA_BASE_URL\n",
255 | " )\n",
256 | " self.query = query\n",
257 | "\n",
258 | " def run(self) -> str:\n",
259 | "\n",
260 | " response = self.openai_client.chat.completions.create(\n",
261 | " model=config.LLM_REASONING,\n",
262 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_REPORT_STRUCTURE},\n",
263 | " {\"role\":\"user\",\"content\": self.query}]\n",
264 | " )\n",
265 | " return response.choices[0].message.content\n",
266 | "\n",
267 | " def mutate_state(self, state: State) -> State:\n",
268 | "\n",
269 | " report_structure = self.run()\n",
270 | " report_structure = remove_reasoning_from_output(report_structure)\n",
271 | " report_structure = clean_json_tags(report_structure)\n",
272 | "\n",
273 | " report_structure = json.loads(report_structure)\n",
274 | "\n",
275 | " for paragraph in report_structure:\n",
276 | " state.paragraphs.append(Paragraph(title=paragraph[\"title\"], content=paragraph[\"content\"]))\n",
277 | "\n",
278 | " return state"
279 | ]
280 | },
281 | {
282 | "cell_type": "markdown",
283 | "id": "cfa6b9c0-d47a-4297-bee6-f83ad8e4ac3a",
284 | "metadata": {},
285 | "source": [
286 | "### Agent to figure out the first search query for a given paragraph."
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": null,
292 | "id": "e4403fcd-7233-468e-b902-db740044a445",
293 | "metadata": {},
294 | "outputs": [],
295 | "source": [
296 | "input_schema_first_search = {\n",
297 | " \"type\": \"object\",\n",
298 | " \"properties\": {\n",
299 | " \"title\": {\"type\": \"string\"},\n",
300 | " \"content\": {\"type\": \"string\"}\n",
301 | " }\n",
302 | " }\n",
303 | "\n",
304 | "output_schema_first_search = {\n",
305 | " \"type\": \"object\",\n",
306 | " \"properties\": {\n",
307 | " \"search_query\": {\"type\": \"string\"},\n",
308 | " \"reasoning\": {\"type\": \"string\"}\n",
309 | " }\n",
310 | " }\n",
311 | "\n",
312 | "SYSTEM_PROMPT_FIRST_SEARCH = f\"\"\"\n",
313 | "You are a Deep Research assistan. You will be given a paragraph in a report, it's title and expected content in the following json schema definition:\n",
314 | "\n",
315 | "\n",
316 | "{json.dumps(input_schema_first_search, indent=2)}\n",
317 | "\n",
318 | "\n",
319 | "You can use a web search tool that takes a 'search_query' as parameter.\n",
320 | "Your job is to reflect on the topic and provide the most optimal web search query to enrich your current knowledge.\n",
321 | "Format the output in json with the following json schema definition:\n",
322 | "\n",
323 | "\n",
326 | "\n",
327 | "Make sure that the output is a json object with an output json schema defined above.\n",
328 | "Only return the json object, no explanation or additional text.\n",
329 | "\"\"\""
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": null,
335 | "id": "7725f6db-e1c5-48dd-b608-bc93c3a67010",
336 | "metadata": {},
337 | "outputs": [],
338 | "source": [
339 | "class FirstSearchAgent:\n",
340 | "\n",
341 | " def __init__(self):\n",
342 | "\n",
343 | " self.openai_client = openai.OpenAI(\n",
344 | " api_key=config.SAMBANOVA_API_KEY,\n",
345 | " base_url=config.SAMBANOVA_BASE_URL\n",
346 | " )\n",
347 | "\n",
348 | " def run(self, message) -> str:\n",
349 | "\n",
350 | " response = self.openai_client.chat.completions.create(\n",
351 | " model=config.LLM_REGULAR,\n",
352 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_FIRST_SEARCH},\n",
353 | " {\"role\":\"user\",\"content\": message}]\n",
354 | " )\n",
355 | "\n",
356 | " response = remove_reasoning_from_output(response.choices[0].message.content)\n",
357 | " response = clean_json_tags(response)\n",
358 | "\n",
359 | " response = json.loads(response)\n",
360 | "\n",
361 | " return response"
362 | ]
363 | },
364 | {
365 | "cell_type": "markdown",
366 | "id": "23acc12b-7106-4ccd-bcef-0a993da43b3e",
367 | "metadata": {},
368 | "source": [
369 | "### Agent to summarise search results of the first search."
370 | ]
371 | },
372 | {
373 | "cell_type": "code",
374 | "execution_count": null,
375 | "id": "17175034-449b-4cc8-b536-614e130c9e75",
376 | "metadata": {},
377 | "outputs": [],
378 | "source": [
379 | "input_schema_first_summary = {\n",
380 | " \"type\": \"object\",\n",
381 | " \"properties\": {\n",
382 | " \"title\": {\"type\": \"string\"},\n",
383 | " \"content\": {\"type\": \"string\"},\n",
384 | " \"search_query\": {\"type\": \"string\"},\n",
385 | " \"search_results\": {\n",
386 | " \"type\": \"array\",\n",
387 | " \"items\": {\"type\": \"string\"}\n",
388 | " }\n",
389 | " }\n",
390 | " }\n",
391 | "\n",
392 | "output_schema_first_summary = {\n",
393 | " \"type\": \"object\",\n",
394 | " \"properties\": {\n",
395 | " \"paragraph_latest_state\": {\"type\": \"string\"}\n",
396 | " }\n",
397 | " }\n",
398 | "\n",
399 | "SYSTEM_PROMPT_FIRST_SUMMARY = f\"\"\"\n",
400 | "You are a Deep Research assistan. You will be given a search query, search results and the paragraph a report that you are researching following json schema definition:\n",
401 | "\n",
402 | "\n",
403 | "{json.dumps(input_schema_first_summary, indent=2)}\n",
404 | "\n",
405 | "\n",
406 | "Your job is to write the paragraph as a researcher using the search results to align with the paragraph topic and structure it properly to be included in the report.\n",
407 | "Format the output in json with the following json schema definition:\n",
408 | "\n",
409 | "\n",
412 | "\n",
413 | "Make sure that the output is a json object with an output json schema defined above.\n",
414 | "Only return the json object, no explanation or additional text.\n",
415 | "\"\"\""
416 | ]
417 | },
418 | {
419 | "cell_type": "code",
420 | "execution_count": null,
421 | "id": "a7a3a30a-ecb6-4805-9e75-15d04da7036e",
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "class FirstSummaryAgent:\n",
426 | "\n",
427 | " def __init__(self):\n",
428 | "\n",
429 | " self.openai_client = openai.OpenAI(\n",
430 | " api_key=config.SAMBANOVA_API_KEY,\n",
431 | " base_url=config.SAMBANOVA_BASE_URL\n",
432 | " )\n",
433 | "\n",
434 | " def run(self, message) -> str:\n",
435 | "\n",
436 | " response = self.openai_client.chat.completions.create(\n",
437 | " model=config.LLM_REGULAR,\n",
438 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_FIRST_SUMMARY},\n",
439 | " {\"role\":\"user\",\"content\": message}]\n",
440 | " )\n",
441 | " return response.choices[0].message.content\n",
442 | "\n",
443 | " def mutate_state(self, message: str, idx_paragraph: int, state: State) -> State:\n",
444 | "\n",
445 | " summary = self.run(message)\n",
446 | " summary = remove_reasoning_from_output(summary)\n",
447 | " summary = clean_json_tags(summary)\n",
448 | " \n",
449 | " try:\n",
450 | " summary = json.loads(summary)\n",
451 | " except JSONDecodeError:\n",
452 | " summary = {\"paragraph_latest_state\": summary}\n",
453 | "\n",
454 | " state.paragraphs[idx_paragraph].research.latest_summary = summary[\"paragraph_latest_state\"]\n",
455 | "\n",
456 | " return state"
457 | ]
458 | },
459 | {
460 | "cell_type": "markdown",
461 | "id": "55f41e15-706b-460f-9bdf-c38159a20d16",
462 | "metadata": {},
463 | "source": [
464 | "### Agent to Reflect on the latest state of the paragraph."
465 | ]
466 | },
467 | {
468 | "cell_type": "code",
469 | "execution_count": null,
470 | "id": "b26a3828-6975-44d3-9188-02a17bc5617b",
471 | "metadata": {},
472 | "outputs": [],
473 | "source": [
474 | "input_schema_reflection = {\n",
475 | " \"type\": \"object\",\n",
476 | " \"properties\": {\n",
477 | " \"title\": {\"type\": \"string\"},\n",
478 | " \"content\": {\"type\": \"string\"},\n",
479 | " \"paragraph_latest_state\": {\"type\": \"string\"}\n",
480 | " }\n",
481 | " }\n",
482 | "\n",
483 | "output_schema_reflection = {\n",
484 | " \"type\": \"object\",\n",
485 | " \"properties\": {\n",
486 | " \"search_query\": {\"type\": \"string\"},\n",
487 | " \"reasoning\": {\"type\": \"string\"}\n",
488 | " }\n",
489 | " }\n",
490 | "\n",
491 | "SYSTEM_PROMPT_REFLECTION = f\"\"\"\n",
492 | "You are a Deep Research assistan. You are responsible for constructing comprehensife paragraphs for a research report. You will be provided paragraph title and planned content summary, also the latest state of the paragraph that you have already created all in the following json schema definition:\n",
493 | "\n",
494 | "\n",
495 | "{json.dumps(input_schema_reflection, indent=2)}\n",
496 | "\n",
497 | "\n",
498 | "You can use a web search tool that takes a 'search_query' as parameter.\n",
499 | "Your job is to reflect on the current state of the paragraph text and think if you havent missed some critical aspect of the topic and provide the most optimal web search query to enrich the latest state.\n",
500 | "Format the output in json with the following json schema definition:\n",
501 | "\n",
502 | "\n",
505 | "\n",
506 | "Make sure that the output is a json object with an output json schema defined above.\n",
507 | "Only return the json object, no explanation or additional text.\n",
508 | "\"\"\""
509 | ]
510 | },
511 | {
512 | "cell_type": "code",
513 | "execution_count": null,
514 | "id": "fd2e65b7-fab6-4dfd-b12f-b269dd815944",
515 | "metadata": {},
516 | "outputs": [],
517 | "source": [
518 | "class ReflectionAgent:\n",
519 | "\n",
520 | " def __init__(self):\n",
521 | "\n",
522 | " self.openai_client = openai.OpenAI(\n",
523 | " api_key=config.SAMBANOVA_API_KEY,\n",
524 | " base_url=config.SAMBANOVA_BASE_URL\n",
525 | " )\n",
526 | "\n",
527 | " def run(self, message) -> str:\n",
528 | "\n",
529 | " response = self.openai_client.chat.completions.create(\n",
530 | " model=config.LLM_REGULAR,\n",
531 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_REFLECTION},\n",
532 | " {\"role\":\"user\",\"content\": message}]\n",
533 | " )\n",
534 | "\n",
535 | " response = remove_reasoning_from_output(response.choices[0].message.content)\n",
536 | " response = clean_json_tags(response)\n",
537 | " response = json.loads(response)\n",
538 | "\n",
539 | " return response"
540 | ]
541 | },
542 | {
543 | "cell_type": "markdown",
544 | "id": "24ce5813-bd97-4da0-a36b-16eb73536a51",
545 | "metadata": {},
546 | "source": [
547 | "### Agent to summarise search results after Reflection."
548 | ]
549 | },
550 | {
551 | "cell_type": "code",
552 | "execution_count": null,
553 | "id": "db7299fe-ed97-4210-8fe1-21ebbdaa395a",
554 | "metadata": {},
555 | "outputs": [],
556 | "source": [
557 | "input_schema_reflection_summary = {\n",
558 | " \"type\": \"object\",\n",
559 | " \"properties\": {\n",
560 | " \"title\": {\"type\": \"string\"},\n",
561 | " \"content\": {\"type\": \"string\"},\n",
562 | " \"search_query\": {\"type\": \"string\"},\n",
563 | " \"search_results\": {\n",
564 | " \"type\": \"array\",\n",
565 | " \"items\": {\"type\": \"string\"}\n",
566 | " },\n",
567 | " \"paragraph_latest_state\": {\"type\": \"string\"}\n",
568 | " }\n",
569 | " }\n",
570 | "\n",
571 | "output_schema_reflection_summary = {\n",
572 | " \"type\": \"object\",\n",
573 | " \"properties\": {\n",
574 | " \"updated_paragraph_latest_state\": {\"type\": \"string\"}\n",
575 | " }\n",
576 | " }\n",
577 | "\n",
578 | "SYSTEM_PROMPT_REFLECTION_SUMMARY = f\"\"\"\n",
579 | "You are a Deep Research assistan.\n",
580 | "You will be given a search query, search results, paragraph title and expected content for the paragraph in a report that you are researching.\n",
581 | "You are iterating on the paragraph and the latest state of the paragraph is also provided.\n",
582 | "The data will be in the following json schema definition:\n",
583 | "\n",
584 | "\n",
585 | "{json.dumps(input_schema_reflection_summary, indent=2)}\n",
586 | "\n",
587 | "\n",
588 | "Your job is to enrich the current latest state of the paragraph with the search results considering expected content.\n",
589 | "Do not remove key information from the latest state and try to enrich it, only add information that is missing.\n",
590 | "Structure the paragraph properly to be included in the report.\n",
591 | "Format the output in json with the following json schema definition:\n",
592 | "\n",
593 | "\n",
596 | "\n",
597 | "Make sure that the output is a json object with an output json schema defined above.\n",
598 | "Only return the json object, no explanation or additional text.\n",
599 | "\"\"\""
600 | ]
601 | },
602 | {
603 | "cell_type": "code",
604 | "execution_count": null,
605 | "id": "a661e117-6e7d-497f-ab4c-a9a6e13709c7",
606 | "metadata": {},
607 | "outputs": [],
608 | "source": [
609 | "class ReflectionSummaryAgent:\n",
610 | "\n",
611 | " def __init__(self):\n",
612 | "\n",
613 | " self.openai_client = openai.OpenAI(\n",
614 | " api_key=config.SAMBANOVA_API_KEY,\n",
615 | " base_url=config.SAMBANOVA_BASE_URL\n",
616 | " )\n",
617 | "\n",
618 | " def run(self, message) -> str:\n",
619 | "\n",
620 | " response = self.openai_client.chat.completions.create(\n",
621 | " model=config.LLM_REGULAR,\n",
622 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_REFLECTION_SUMMARY},\n",
623 | " {\"role\":\"user\",\"content\": message}]\n",
624 | " )\n",
625 | " return response.choices[0].message.content\n",
626 | "\n",
627 | " def mutate_state(self, message: str, idx_paragraph: int, state: State) -> State:\n",
628 | "\n",
629 | " summary = self.run(message)\n",
630 | " summary = remove_reasoning_from_output(summary)\n",
631 | " summary = clean_json_tags(summary)\n",
632 | "\n",
633 | " try:\n",
634 | " summary = json.loads(summary)\n",
635 | " except JSONDecodeError:\n",
636 | " summary = {\"updated_paragraph_latest_state\": summary}\n",
637 | "\n",
638 | " state.paragraphs[idx_paragraph].research.latest_summary = summary[\"updated_paragraph_latest_state\"]\n",
639 | "\n",
640 | " return state"
641 | ]
642 | },
643 | {
644 | "cell_type": "markdown",
645 | "id": "fbf9e53a-5455-463c-82aa-60261bd7762d",
646 | "metadata": {},
647 | "source": [
648 | "### Agent to summarise results and produce the formatted report"
649 | ]
650 | },
651 | {
652 | "cell_type": "code",
653 | "execution_count": null,
654 | "id": "bdd8e72d-b050-4e6e-a860-65caeb627049",
655 | "metadata": {},
656 | "outputs": [],
657 | "source": [
658 | "input_schema_report_formatting = {\n",
659 | " \"type\": \"array\",\n",
660 | " \"items\": {\n",
661 | " \"type\": \"object\",\n",
662 | " \"properties\": {\n",
663 | " \"title\": {\"type\": \"string\"},\n",
664 | " \"paragraph_latest_state\": {\"type\": \"string\"}\n",
665 | " }\n",
666 | " }\n",
667 | " }\n",
668 | "\n",
669 | "SYSTEM_PROMPT_REPORT_FORMATTING = f\"\"\"\n",
670 | "You are a Deep Research assistan. You have already performed the research and construted final versions of all paragraphs in the report.\n",
671 | "You will get the data in the following json format:\n",
672 | "\n",
673 | "\n",
674 | "{json.dumps(input_schema_report_formatting, indent=2)}\n",
675 | "\n",
676 | "\n",
677 | "Your job is to format the Report nicely and return it in MarkDown.\n",
678 | "If Conclusion paragraph is not present, add it to the end of the report from the latest state of the other paragraphs.\n",
679 | "Use titles of the paragraphs to create a title for the report.\n",
680 | "\"\"\""
681 | ]
682 | },
683 | {
684 | "cell_type": "code",
685 | "execution_count": null,
686 | "id": "43924fb3-fe3d-4c17-b1e8-6488b4c35839",
687 | "metadata": {},
688 | "outputs": [],
689 | "source": [
690 | "class ReportFormattingAgent:\n",
691 | "\n",
692 | " def __init__(self):\n",
693 | "\n",
694 | " self.openai_client = openai.OpenAI(\n",
695 | " api_key=config.SAMBANOVA_API_KEY,\n",
696 | " base_url=config.SAMBANOVA_BASE_URL\n",
697 | " )\n",
698 | "\n",
699 | " def run(self, message) -> str:\n",
700 | "\n",
701 | " response = self.openai_client.chat.completions.create(\n",
702 | " model=config.LLM_REASONING,\n",
703 | " messages=[{\"role\": \"system\", \"content\": SYSTEM_PROMPT_REPORT_FORMATTING},\n",
704 | " {\"role\":\"user\",\"content\": message}]\n",
705 | " )\n",
706 | " summary = response.choices[0].message.content\n",
707 | " summary = remove_reasoning_from_output(summary)\n",
708 | " summary = clean_markdown_tags(summary)\n",
709 | " \n",
710 | " return summary"
711 | ]
712 | },
713 | {
714 | "cell_type": "markdown",
715 | "id": "068919b8-3798-4896-80c1-9863ccbc9b6e",
716 | "metadata": {},
717 | "source": [
718 | "## The Topology of the System"
719 | ]
720 | },
721 | {
722 | "cell_type": "code",
723 | "execution_count": null,
724 | "id": "e34692f4-b385-4461-912d-65c7038befec",
725 | "metadata": {},
726 | "outputs": [],
727 | "source": [
728 | "STATE = State()\n",
729 | "QUERY=\"Tell me something interesting about human species\"\n",
730 | "NUM_REFLECTIONS = 2\n",
731 | "NUM_RESULTS_PER_SEARCH = 3\n",
732 | "CAP_SEARCH_LENGTH = 20000"
733 | ]
734 | },
735 | {
736 | "cell_type": "code",
737 | "execution_count": null,
738 | "id": "d46750f1-73a3-4320-b246-94a62f7c304f",
739 | "metadata": {},
740 | "outputs": [],
741 | "source": [
742 | "report_structure_agent = ReportStructureAgent(topic)\n",
743 | "\n",
744 | "_ = report_structure_agent.mutate_state(STATE)\n",
745 | "\n",
746 | "first_search_agent = FirstSearchAgent()\n",
747 | "first_summary_agent = FirstSummaryAgent()\n",
748 | "reflection_agent = ReflectionAgent()\n",
749 | "reflection_summary_agent = ReflectionSummaryAgent()\n",
750 | "report_formatting_agent = ReportFormattingAgent()\n",
751 | "\n",
752 | "print(f\"Total Number of Paragraphs: {len(STATE.paragraphs)}\")\n",
753 | "\n",
754 | "idx = 1\n",
755 | "\n",
756 | "for paragraph in STATE.paragraphs:\n",
757 | "\n",
758 | " print(f\"\\nParagraph {idx}: {paragraph.title}\")\n",
759 | "\n",
760 | " idx += 1\n",
761 | "\n",
762 | "\n",
763 | "################## Iterate through paragraphs ##################\n",
764 | "\n",
765 | "for j in range(len(STATE.paragraphs)):\n",
766 | "\n",
767 | " print(f\"\\n\\n==============Paragraph: {j+1}==============\\n\")\n",
768 | " print(f\"=============={STATE.paragraphs[j].title}==============\\n\")\n",
769 | "\n",
770 | " ################## First Search ##################\n",
771 | " \n",
772 | " message = json.dumps(\n",
773 | " {\n",
774 | " \"title\": STATE.paragraphs[j].title, \n",
775 | " \"content\": STATE.paragraphs[j].content\n",
776 | " }\n",
777 | " )\n",
778 | " \n",
779 | " output = first_search_agent.run(message)\n",
780 | " \n",
781 | " search_results = tavily_search(output[\"search_query\"], max_results=NUM_RESULTS_PER_SEARCH)\n",
782 | " \n",
783 | " _ = update_state_with_search_results(search_results, j, STATE)\n",
784 | " \n",
785 | " ################## First Search Summary ##################\n",
786 | " \n",
787 | " message = {\n",
788 | " \"title\": STATE.paragraphs[j].title,\n",
789 | " \"content\": STATE.paragraphs[j].content,\n",
790 | " \"search_query\": search_results[\"query\"],\n",
791 | " \"search_results\": [result[\"raw_content\"][0:CAP_SEARCH_LENGTH] for result in search_results[\"results\"] if result[\"raw_content\"]]\n",
792 | " }\n",
793 | " \n",
794 | " \n",
795 | " _ = first_summary_agent.mutate_state(message=json.dumps(message), idx_paragraph=j, state=STATE)\n",
796 | " \n",
797 | " ################## Run NUM_REFLECTIONS Reflection steps ##################\n",
798 | " \n",
799 | " for i in range(NUM_REFLECTIONS):\n",
800 | " \n",
801 | " print(f\"Running reflection: {i+1}\")\n",
802 | "\n",
803 | " ################## Reflection Step ##################\n",
804 | " \n",
805 | " message = {\"paragraph_latest_state\": STATE.paragraphs[j].research.latest_summary,\n",
806 | " \"title\": STATE.paragraphs[j].title,\n",
807 | " \"content\": STATE.paragraphs[j].content}\n",
808 | " \n",
809 | " output = reflection_agent.run(message=json.dumps(message))\n",
810 | "\n",
811 | " ################## Reflection Search ##################\n",
812 | " \n",
813 | " search_results = tavily_search(output[\"search_query\"])\n",
814 | " \n",
815 | " _ = update_state_with_search_results(search_results, j, STATE)\n",
816 | "\n",
817 | " ################## Reflection Search Summary ##################\n",
818 | " \n",
819 | " message = {\n",
820 | " \"title\": STATE.paragraphs[j].title,\n",
821 | " \"content\": STATE.paragraphs[j].content,\n",
822 | " \"search_query\": search_results[\"query\"],\n",
823 | " \"search_results\": [result[\"raw_content\"][0:20000] for result in search_results[\"results\"] if result[\"raw_content\"]],\n",
824 | " \"paragraph_latest_state\": STATE.paragraphs[j].research.latest_summary\n",
825 | " }\n",
826 | " \n",
827 | " _ = reflection_summary_agent.mutate_state(message=json.dumps(message), idx_paragraph=j, state=STATE)\n",
828 | "\n",
829 | "################## Generate Final Report ##################\n",
830 | "\n",
831 | "report_data = [{\"title\": paragraph.title, \"paragraph_latest_state\": paragraph.research.latest_summary} for paragraph in STATE.paragraphs]\n",
832 | "\n",
833 | "final_report = report_formatting_agent.run(json.dumps(report_data))"
834 | ]
835 | },
836 | {
837 | "cell_type": "markdown",
838 | "id": "b3d303a7-4939-4af9-af83-45847b9a95db",
839 | "metadata": {},
840 | "source": [
841 | "### Render the final Report"
842 | ]
843 | },
844 | {
845 | "cell_type": "code",
846 | "execution_count": null,
847 | "id": "12f53788-7446-4408-9b48-a2cf98222c9e",
848 | "metadata": {},
849 | "outputs": [],
850 | "source": [
851 | "display(Markdown(final_report))"
852 | ]
853 | }
854 | ],
855 | "metadata": {
856 | "kernelspec": {
857 | "display_name": "Python 3 (ipykernel)",
858 | "language": "python",
859 | "name": "python3"
860 | },
861 | "language_info": {
862 | "codemirror_mode": {
863 | "name": "ipython",
864 | "version": 3
865 | },
866 | "file_extension": ".py",
867 | "mimetype": "text/x-python",
868 | "name": "python",
869 | "nbconvert_exporter": "python",
870 | "pygments_lexer": "ipython3",
871 | "version": "3.12.8"
872 | }
873 | },
874 | "nbformat": 4,
875 | "nbformat_minor": 5
876 | }
877 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "deep-research-agent"
3 | version = "0.1.0"
4 | description = ""
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "openai",
9 | "tavily-python",
10 | "pydantic",
11 | "pydantic-settings"
12 | ]
13 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/reports/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/building_agents_from_scratch/deep_research_agent/reports/.gitkeep
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/agents.py:
--------------------------------------------------------------------------------
1 | import json
2 | from json.decoder import JSONDecodeError
3 | import openai
4 | from config import config
5 | from prompts import SYSTEM_PROMPT_REPORT_STRUCTURE, SYSTEM_PROMPT_FIRST_SEARCH, SYSTEM_PROMPT_FIRST_SUMMARY, SYSTEM_PROMPT_REFLECTION, \
6 | SYSTEM_PROMPT_REPORT_FORMATTING, SYSTEM_PROMPT_REFLECTION_SUMMARY
7 | from state import State, Paragraph, Research, Search
8 | from utils import clean_json_tags, remove_reasoning_from_output, clean_markdown_tags
9 |
10 |
11 | class ReportStructureAgent:
12 |
13 | def __init__(self, query: str):
14 |
15 | self.openai_client = openai.OpenAI(
16 | api_key=config.SAMBANOVA_API_KEY,
17 | base_url=config.SAMBANOVA_BASE_URL
18 | )
19 | self.query = query
20 |
21 | def run(self) -> str:
22 |
23 | response = self.openai_client.chat.completions.create(
24 | model=config.LLM_REASONING,
25 | messages=[{"role": "system", "content": SYSTEM_PROMPT_REPORT_STRUCTURE},
26 | {"role":"user","content": self.query}]
27 | )
28 | return response.choices[0].message.content
29 |
30 | def mutate_state(self, state: State) -> State:
31 |
32 | report_structure = self.run()
33 | report_structure = remove_reasoning_from_output(report_structure)
34 | report_structure = clean_json_tags(report_structure)
35 |
36 | report_structure = json.loads(report_structure)
37 |
38 | for paragraph in report_structure:
39 | state.paragraphs.append(Paragraph(title=paragraph["title"], content=paragraph["content"]))
40 |
41 | return state
42 |
43 |
44 | class FirstSearchAgent:
45 |
46 | def __init__(self):
47 |
48 | self.openai_client = openai.OpenAI(
49 | api_key=config.SAMBANOVA_API_KEY,
50 | base_url=config.SAMBANOVA_BASE_URL
51 | )
52 |
53 | def run(self, message) -> str:
54 |
55 | response = self.openai_client.chat.completions.create(
56 | model=config.LLM_REGULAR,
57 | messages=[{"role": "system", "content": SYSTEM_PROMPT_FIRST_SEARCH},
58 | {"role":"user","content": message}]
59 | )
60 |
61 | response = remove_reasoning_from_output(response.choices[0].message.content)
62 | response = clean_json_tags(response)
63 |
64 | response = json.loads(response)
65 |
66 | return response
67 |
68 |
69 | class FirstSummaryAgent:
70 |
71 | def __init__(self):
72 |
73 | self.openai_client = openai.OpenAI(
74 | api_key=config.SAMBANOVA_API_KEY,
75 | base_url=config.SAMBANOVA_BASE_URL
76 | )
77 |
78 | def run(self, message) -> str:
79 |
80 | response = self.openai_client.chat.completions.create(
81 | model=config.LLM_REGULAR,
82 | messages=[{"role": "system", "content": SYSTEM_PROMPT_FIRST_SUMMARY},
83 | {"role":"user","content": message}]
84 | )
85 | return response.choices[0].message.content
86 |
87 | def mutate_state(self, message: str, idx_paragraph: int, state: State) -> State:
88 |
89 | summary = self.run(message)
90 | summary = remove_reasoning_from_output(summary)
91 | summary = clean_json_tags(summary)
92 |
93 | try:
94 | summary = json.loads(summary)
95 | except JSONDecodeError:
96 | summary = {"paragraph_latest_state": summary}
97 |
98 | state.paragraphs[idx_paragraph].research.latest_summary = summary["paragraph_latest_state"]
99 |
100 | return state
101 |
102 |
103 | class ReflectionAgent:
104 |
105 | def __init__(self):
106 |
107 | self.openai_client = openai.OpenAI(
108 | api_key=config.SAMBANOVA_API_KEY,
109 | base_url=config.SAMBANOVA_BASE_URL
110 | )
111 |
112 | def run(self, message) -> str:
113 |
114 | response = self.openai_client.chat.completions.create(
115 | model=config.LLM_REGULAR,
116 | messages=[{"role": "system", "content": SYSTEM_PROMPT_REFLECTION},
117 | {"role":"user","content": message}]
118 | )
119 |
120 | response = remove_reasoning_from_output(response.choices[0].message.content)
121 | response = clean_json_tags(response)
122 | response = json.loads(response)
123 |
124 | return response
125 |
126 |
127 | class ReflectionSummaryAgent:
128 |
129 | def __init__(self):
130 |
131 | self.openai_client = openai.OpenAI(
132 | api_key=config.SAMBANOVA_API_KEY,
133 | base_url=config.SAMBANOVA_BASE_URL
134 | )
135 |
136 | def run(self, message) -> str:
137 |
138 | response = self.openai_client.chat.completions.create(
139 | model=config.LLM_REGULAR,
140 | messages=[{"role": "system", "content": SYSTEM_PROMPT_REFLECTION_SUMMARY},
141 | {"role":"user","content": message}]
142 | )
143 | return response.choices[0].message.content
144 |
145 | def mutate_state(self, message: str, idx_paragraph: int, state: State) -> State:
146 |
147 | summary = self.run(message)
148 | summary = remove_reasoning_from_output(summary)
149 | summary = clean_json_tags(summary)
150 |
151 | try:
152 | summary = json.loads(summary)
153 | except JSONDecodeError:
154 | summary = {"updated_paragraph_latest_state": summary}
155 |
156 | state.paragraphs[idx_paragraph].research.latest_summary = summary["updated_paragraph_latest_state"]
157 |
158 | return state
159 |
160 |
161 | class ReportFormattingAgent:
162 |
163 | def __init__(self):
164 |
165 | self.openai_client = openai.OpenAI(
166 | api_key=config.SAMBANOVA_API_KEY,
167 | base_url=config.SAMBANOVA_BASE_URL
168 | )
169 |
170 | def run(self, message) -> str:
171 |
172 | response = self.openai_client.chat.completions.create(
173 | model=config.LLM_REASONING,
174 | messages=[{"role": "system", "content": SYSTEM_PROMPT_REPORT_FORMATTING},
175 | {"role":"user","content": message}]
176 | )
177 | summary = response.choices[0].message.content
178 | summary = remove_reasoning_from_output(summary)
179 | summary = clean_markdown_tags(summary)
180 |
181 | return summary
182 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/config.py:
--------------------------------------------------------------------------------
1 | from pydantic_settings import BaseSettings
2 |
3 |
4 | class Config(BaseSettings):
5 | SAMBANOVA_API_KEY: str
6 | SAMBANOVA_BASE_URL: str
7 | LLM_REASONING: str
8 | LLM_REGULAR: str
9 | TAVILY_API_KEY: str
10 |
11 | config = Config()
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/prompts.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | ## Report Structure
4 |
5 | output_schema_report_structure = {
6 | "type": "array",
7 | "items": {
8 | "type": "object",
9 | "properties": {
10 | "title": {"type": "string"},
11 | "content": {"type": "string"}
12 | }
13 | }
14 | }
15 |
16 | SYSTEM_PROMPT_REPORT_STRUCTURE = f"""
17 | You are a Deep Research assistan. Given a query, plan a structure for a report and the paragraphs to be included.
18 | Make sure that the ordering of paragraphs makes sense.
19 | Once the outline is created, you will be given tools to search the web and reflect for each of the section separately.
20 | Format the output in json with the following json schema definition:
21 |
22 |
25 |
26 | Title and content properties will be used for deeper research.
27 | Make sure that the output is a json object with an output json schema defined above.
28 | Only return the json object, no explanation or additional text.
29 | """
30 |
31 | ## First Search per paragraph
32 |
33 | input_schema_first_search = {
34 | "type": "object",
35 | "properties": {
36 | "title": {"type": "string"},
37 | "content": {"type": "string"}
38 | }
39 | }
40 |
41 | output_schema_first_search = {
42 | "type": "object",
43 | "properties": {
44 | "search_query": {"type": "string"},
45 | "reasoning": {"type": "string"}
46 | }
47 | }
48 |
49 | SYSTEM_PROMPT_FIRST_SEARCH = f"""
50 | You are a Deep Research assistan. You will be given a paragraph in a report, it's title and expected content in the following json schema definition:
51 |
52 |
53 | {json.dumps(input_schema_first_search, indent=2)}
54 |
55 |
56 | You can use a web search tool that takes a 'search_query' as parameter.
57 | Your job is to reflect on the topic and provide the most optimal web search query to enrich your current knowledge.
58 | Format the output in json with the following json schema definition:
59 |
60 |
63 |
64 | Make sure that the output is a json object with an output json schema defined above.
65 | Only return the json object, no explanation or additional text.
66 | """
67 |
68 | ## First Summary per paragraph
69 |
70 | input_schema_first_summary = {
71 | "type": "object",
72 | "properties": {
73 | "title": {"type": "string"},
74 | "content": {"type": "string"},
75 | "search_query": {"type": "string"},
76 | "search_results": {
77 | "type": "array",
78 | "items": {"type": "string"}
79 | }
80 | }
81 | }
82 |
83 | output_schema_first_summary = {
84 | "type": "object",
85 | "properties": {
86 | "paragraph_latest_state": {"type": "string"}
87 | }
88 | }
89 |
90 | SYSTEM_PROMPT_FIRST_SUMMARY = f"""
91 | You are a Deep Research assistan. You will be given a search query, search results and the paragraph a report that you are researching following json schema definition:
92 |
93 |
94 | {json.dumps(input_schema_first_summary, indent=2)}
95 |
96 |
97 | Your job is to write the paragraph as a researcher using the search results to align with the paragraph topic and structure it properly to be included in the report.
98 | Format the output in json with the following json schema definition:
99 |
100 |
103 |
104 | Make sure that the output is a json object with an output json schema defined above.
105 | Only return the json object, no explanation or additional text.
106 | """
107 |
108 | ## Reflection per paragraph
109 |
110 | input_schema_reflection = {
111 | "type": "object",
112 | "properties": {
113 | "title": {"type": "string"},
114 | "content": {"type": "string"},
115 | "paragraph_latest_state": {"type": "string"}
116 | }
117 | }
118 |
119 | output_schema_reflection = {
120 | "type": "object",
121 | "properties": {
122 | "search_query": {"type": "string"},
123 | "reasoning": {"type": "string"}
124 | }
125 | }
126 |
127 | SYSTEM_PROMPT_REFLECTION = f"""
128 | You are a Deep Research assistan. You are responsible for constructing comprehensife paragraphs for a research report. You will be provided paragraph title and planned content summary, also the latest state of the paragraph that you have already created all in the following json schema definition:
129 |
130 |
131 | {json.dumps(input_schema_reflection, indent=2)}
132 |
133 |
134 | You can use a web search tool that takes a 'search_query' as parameter.
135 | Your job is to reflect on the current state of the paragraph text and think if you havent missed some critical aspect of the topic and provide the most optimal web search query to enrich the latest state.
136 | Format the output in json with the following json schema definition:
137 |
138 |
141 |
142 | Make sure that the output is a json object with an output json schema defined above.
143 | Only return the json object, no explanation or additional text.
144 | """
145 |
146 |
147 | ## Reflection Summary per paragraph
148 |
149 | input_schema_reflection_summary = {
150 | "type": "object",
151 | "properties": {
152 | "title": {"type": "string"},
153 | "content": {"type": "string"},
154 | "search_query": {"type": "string"},
155 | "search_results": {
156 | "type": "array",
157 | "items": {"type": "string"}
158 | },
159 | "paragraph_latest_state": {"type": "string"}
160 | }
161 | }
162 |
163 | output_schema_reflection_summary = {
164 | "type": "object",
165 | "properties": {
166 | "updated_paragraph_latest_state": {"type": "string"}
167 | }
168 | }
169 |
170 | SYSTEM_PROMPT_REFLECTION_SUMMARY = f"""
171 | You are a Deep Research assistan.
172 | You will be given a search query, search results, paragraph title and expected content for the paragraph in a report that you are researching.
173 | You are iterating on the paragraph and the latest state of the paragraph is also provided.
174 | The data will be in the following json schema definition:
175 |
176 |
177 | {json.dumps(input_schema_reflection_summary, indent=2)}
178 |
179 |
180 | Your job is to enrich the current latest state of the paragraph with the search results considering expected content.
181 | Do not remove key information from the latest state and try to enrich it, only add information that is missing.
182 | Structure the paragraph properly to be included in the report.
183 | Format the output in json with the following json schema definition:
184 |
185 |
188 |
189 | Make sure that the output is a json object with an output json schema defined above.
190 | Only return the json object, no explanation or additional text.
191 | """
192 |
193 | ## Report Formatting
194 |
195 | input_schema_report_formatting = {
196 | "type": "array",
197 | "items": {
198 | "type": "object",
199 | "properties": {
200 | "title": {"type": "string"},
201 | "paragraph_latest_state": {"type": "string"}
202 | }
203 | }
204 | }
205 |
206 | SYSTEM_PROMPT_REPORT_FORMATTING = f"""
207 | You are a Deep Research assistan. You have already performed the research and construted final versions of all paragraphs in the report.
208 | You will get the data in the following json format:
209 |
210 |
211 | {json.dumps(input_schema_report_formatting, indent=2)}
212 |
213 |
214 | Your job is to format the Report nicely and return it in MarkDown.
215 | If Conclusion paragraph is not present, add it to the end of the report from the latest state of the other paragraphs.
216 | Use titles of the paragraphs to create a title for the report.
217 | """
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/state.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import List
3 |
4 | @dataclass
5 | class Search:
6 | url: str = ""
7 | content: str = ""
8 |
9 | @dataclass
10 | class Research:
11 | search_history: List[Search] = field(default_factory=list)
12 | latest_summary: str = ""
13 | reflection_iteration: int = 0
14 |
15 | @dataclass
16 | class Paragraph:
17 | title: str = ""
18 | content: str = ""
19 | research: Research = field(default_factory=Research)
20 |
21 | @dataclass
22 | class State:
23 | report_title: str = ""
24 | paragraphs: List[Paragraph] = field(default_factory=list)
25 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/topology.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 |
4 | from agents import ReportStructureAgent, FirstSearchAgent, FirstSummaryAgent, ReflectionAgent, ReflectionSummaryAgent, ReportFormattingAgent
5 | from state import State
6 | from utils import tavily_search, update_state_with_search_results
7 |
8 | from datetime import datetime
9 |
10 |
11 | STATE = State()
12 | QUERY="Tell me something interesting about human species"
13 | NUM_REFLECTIONS = 2
14 | NUM_RESULTS_PER_SEARCH = 3
15 | CAP_SEARCH_LENGTH = 20000
16 |
17 |
18 | def main(topic: str = QUERY):
19 | report_structure_agent = ReportStructureAgent(topic)
20 |
21 | _ = report_structure_agent.mutate_state(STATE)
22 |
23 | first_search_agent = FirstSearchAgent()
24 | first_summary_agent = FirstSummaryAgent()
25 | reflection_agent = ReflectionAgent()
26 | reflection_summary_agent = ReflectionSummaryAgent()
27 | report_formatting_agent = ReportFormattingAgent()
28 |
29 | print(f"Total Number of Paragraphs: {len(STATE.paragraphs)}")
30 |
31 | idx = 1
32 |
33 | for paragraph in STATE.paragraphs:
34 |
35 | print(f"\nParagraph {idx}: {paragraph.title}")
36 |
37 | idx += 1
38 |
39 |
40 | for j in range(len(STATE.paragraphs)):
41 |
42 | print(f"\n\n==============Paragraph: {j+1}==============\n")
43 | print(f"=============={STATE.paragraphs[j].title}==============\n")
44 |
45 | ##################
46 |
47 | message = json.dumps(
48 | {
49 | "title": STATE.paragraphs[j].title,
50 | "content": STATE.paragraphs[j].content
51 | }
52 | )
53 |
54 | output = first_search_agent.run(message)
55 |
56 | search_results = tavily_search(output["search_query"], max_results=NUM_RESULTS_PER_SEARCH)
57 |
58 | _ = update_state_with_search_results(search_results, j, STATE)
59 |
60 | ##################
61 |
62 | message = {
63 | "title": STATE.paragraphs[j].title,
64 | "content": STATE.paragraphs[j].content,
65 | "search_query": search_results["query"],
66 | "search_results": [result["raw_content"][0:CAP_SEARCH_LENGTH] for result in search_results["results"] if result["raw_content"]]
67 | }
68 |
69 |
70 | _ = first_summary_agent.mutate_state(message=json.dumps(message), idx_paragraph=j, state=STATE)
71 |
72 | ##################
73 |
74 | for i in range(NUM_REFLECTIONS):
75 |
76 | print(f"Running reflection: {i+1}")
77 |
78 | message = {"paragraph_latest_state": STATE.paragraphs[j].research.latest_summary,
79 | "title": STATE.paragraphs[j].title,
80 | "content": STATE.paragraphs[j].content}
81 |
82 | output = reflection_agent.run(message=json.dumps(message))
83 |
84 | search_results = tavily_search(output["search_query"])
85 |
86 | _ = update_state_with_search_results(search_results, j, STATE)
87 |
88 | message = {
89 | "title": STATE.paragraphs[j].title,
90 | "content": STATE.paragraphs[j].content,
91 | "search_query": search_results["query"],
92 | "search_results": [result["raw_content"][0:20000] for result in search_results["results"] if result["raw_content"]],
93 | "paragraph_latest_state": STATE.paragraphs[j].research.latest_summary
94 | }
95 |
96 | _ = reflection_summary_agent.mutate_state(message=json.dumps(message), idx_paragraph=j, state=STATE)
97 |
98 | report_data = [{"title": paragraph.title, "paragraph_latest_state": paragraph.research.latest_summary} for paragraph in STATE.paragraphs]
99 |
100 | final_report = report_formatting_agent.run(json.dumps(report_data))
101 |
102 | print(final_report)
103 |
104 | with open(f"reports/report_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.md", "w") as f:
105 | f.write(final_report)
106 |
107 | if __name__ == "__main__":
108 |
109 | parser = argparse.ArgumentParser()
110 | parser.add_argument("--topic", type=str, default=QUERY)
111 | args = parser.parse_args()
112 |
113 | main(args.topic)
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/src/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from tavily import TavilyClient
4 | from state import Search
5 |
6 |
7 | def remove_reasoning_from_output(output):
8 | return output.split("")[-1].strip()
9 |
10 | def clean_json_tags(text):
11 | return text.replace("```json\n", "").replace("\n```", "")
12 |
13 | def clean_markdown_tags(text):
14 | return text.replace("```markdown\n", "").replace("\n```", "")
15 |
16 | def tavily_search(query, include_raw_content=True, max_results=5):
17 |
18 | tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
19 |
20 | return tavily_client.search(query,
21 | include_raw_content=include_raw_content,
22 | max_results=max_results)
23 |
24 | def update_state_with_search_results(search_results, idx_paragraph, state):
25 |
26 | for search_result in search_results["results"]:
27 | search = Search(url=search_result["url"], content=search_result["raw_content"])
28 | state.paragraphs[idx_paragraph].research.search_history.append(search)
29 |
30 | return state
31 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/deep_research_agent/uv.lock:
--------------------------------------------------------------------------------
1 | version = 1
2 | requires-python = ">=3.12"
3 |
4 | [[package]]
5 | name = "annotated-types"
6 | version = "0.7.0"
7 | source = { registry = "https://pypi.org/simple" }
8 | sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
9 | wheels = [
10 | { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
11 | ]
12 |
13 | [[package]]
14 | name = "anyio"
15 | version = "4.8.0"
16 | source = { registry = "https://pypi.org/simple" }
17 | dependencies = [
18 | { name = "idna" },
19 | { name = "sniffio" },
20 | { name = "typing-extensions", marker = "python_full_version < '3.13'" },
21 | ]
22 | sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 }
23 | wheels = [
24 | { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 },
25 | ]
26 |
27 | [[package]]
28 | name = "certifi"
29 | version = "2025.1.31"
30 | source = { registry = "https://pypi.org/simple" }
31 | sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
32 | wheels = [
33 | { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
34 | ]
35 |
36 | [[package]]
37 | name = "charset-normalizer"
38 | version = "3.4.1"
39 | source = { registry = "https://pypi.org/simple" }
40 | sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 }
41 | wheels = [
42 | { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 },
43 | { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 },
44 | { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 },
45 | { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 },
46 | { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 },
47 | { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 },
48 | { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 },
49 | { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 },
50 | { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 },
51 | { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 },
52 | { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 },
53 | { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 },
54 | { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 },
55 | { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 },
56 | { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 },
57 | { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 },
58 | { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 },
59 | { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 },
60 | { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 },
61 | { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 },
62 | { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 },
63 | { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 },
64 | { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 },
65 | { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 },
66 | { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 },
67 | { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 },
68 | { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 },
69 | ]
70 |
71 | [[package]]
72 | name = "colorama"
73 | version = "0.4.6"
74 | source = { registry = "https://pypi.org/simple" }
75 | sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
76 | wheels = [
77 | { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
78 | ]
79 |
80 | [[package]]
81 | name = "deep-research-agent"
82 | version = "0.1.0"
83 | source = { virtual = "." }
84 | dependencies = [
85 | { name = "openai" },
86 | { name = "pydantic" },
87 | { name = "pydantic-settings" },
88 | { name = "tavily-python" },
89 | ]
90 |
91 | [package.metadata]
92 | requires-dist = [
93 | { name = "openai" },
94 | { name = "pydantic" },
95 | { name = "pydantic-settings" },
96 | { name = "tavily-python" },
97 | ]
98 |
99 | [[package]]
100 | name = "distro"
101 | version = "1.9.0"
102 | source = { registry = "https://pypi.org/simple" }
103 | sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 }
104 | wheels = [
105 | { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 },
106 | ]
107 |
108 | [[package]]
109 | name = "h11"
110 | version = "0.14.0"
111 | source = { registry = "https://pypi.org/simple" }
112 | sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 }
113 | wheels = [
114 | { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 },
115 | ]
116 |
117 | [[package]]
118 | name = "httpcore"
119 | version = "1.0.7"
120 | source = { registry = "https://pypi.org/simple" }
121 | dependencies = [
122 | { name = "certifi" },
123 | { name = "h11" },
124 | ]
125 | sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 }
126 | wheels = [
127 | { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 },
128 | ]
129 |
130 | [[package]]
131 | name = "httpx"
132 | version = "0.28.1"
133 | source = { registry = "https://pypi.org/simple" }
134 | dependencies = [
135 | { name = "anyio" },
136 | { name = "certifi" },
137 | { name = "httpcore" },
138 | { name = "idna" },
139 | ]
140 | sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 }
141 | wheels = [
142 | { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 },
143 | ]
144 |
145 | [[package]]
146 | name = "idna"
147 | version = "3.10"
148 | source = { registry = "https://pypi.org/simple" }
149 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
150 | wheels = [
151 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
152 | ]
153 |
154 | [[package]]
155 | name = "jiter"
156 | version = "0.8.2"
157 | source = { registry = "https://pypi.org/simple" }
158 | sdist = { url = "https://files.pythonhosted.org/packages/f8/70/90bc7bd3932e651486861df5c8ffea4ca7c77d28e8532ddefe2abc561a53/jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d", size = 163007 }
159 | wheels = [
160 | { url = "https://files.pythonhosted.org/packages/a1/17/c8747af8ea4e045f57d6cfd6fc180752cab9bc3de0e8a0c9ca4e8af333b1/jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f", size = 302027 },
161 | { url = "https://files.pythonhosted.org/packages/3c/c1/6da849640cd35a41e91085723b76acc818d4b7d92b0b6e5111736ce1dd10/jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44", size = 310326 },
162 | { url = "https://files.pythonhosted.org/packages/06/99/a2bf660d8ccffee9ad7ed46b4f860d2108a148d0ea36043fd16f4dc37e94/jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f", size = 334242 },
163 | { url = "https://files.pythonhosted.org/packages/a7/5f/cea1c17864828731f11427b9d1ab7f24764dbd9aaf4648a7f851164d2718/jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60", size = 356654 },
164 | { url = "https://files.pythonhosted.org/packages/e9/13/62774b7e5e7f5d5043efe1d0f94ead66e6d0f894ae010adb56b3f788de71/jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57", size = 379967 },
165 | { url = "https://files.pythonhosted.org/packages/ec/fb/096b34c553bb0bd3f2289d5013dcad6074948b8d55212aa13a10d44c5326/jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e", size = 389252 },
166 | { url = "https://files.pythonhosted.org/packages/17/61/beea645c0bf398ced8b199e377b61eb999d8e46e053bb285c91c3d3eaab0/jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887", size = 345490 },
167 | { url = "https://files.pythonhosted.org/packages/d5/df/834aa17ad5dcc3cf0118821da0a0cf1589ea7db9832589278553640366bc/jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d", size = 376991 },
168 | { url = "https://files.pythonhosted.org/packages/67/80/87d140399d382fb4ea5b3d56e7ecaa4efdca17cd7411ff904c1517855314/jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152", size = 510822 },
169 | { url = "https://files.pythonhosted.org/packages/5c/37/3394bb47bac1ad2cb0465601f86828a0518d07828a650722e55268cdb7e6/jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29", size = 503730 },
170 | { url = "https://files.pythonhosted.org/packages/f9/e2/253fc1fa59103bb4e3aa0665d6ceb1818df1cd7bf3eb492c4dad229b1cd4/jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e", size = 203375 },
171 | { url = "https://files.pythonhosted.org/packages/41/69/6d4bbe66b3b3b4507e47aa1dd5d075919ad242b4b1115b3f80eecd443687/jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c", size = 204740 },
172 | { url = "https://files.pythonhosted.org/packages/6c/b0/bfa1f6f2c956b948802ef5a021281978bf53b7a6ca54bb126fd88a5d014e/jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84", size = 301190 },
173 | { url = "https://files.pythonhosted.org/packages/a4/8f/396ddb4e292b5ea57e45ade5dc48229556b9044bad29a3b4b2dddeaedd52/jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4", size = 309334 },
174 | { url = "https://files.pythonhosted.org/packages/7f/68/805978f2f446fa6362ba0cc2e4489b945695940656edd844e110a61c98f8/jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587", size = 333918 },
175 | { url = "https://files.pythonhosted.org/packages/b3/99/0f71f7be667c33403fa9706e5b50583ae5106d96fab997fa7e2f38ee8347/jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c", size = 356057 },
176 | { url = "https://files.pythonhosted.org/packages/8d/50/a82796e421a22b699ee4d2ce527e5bcb29471a2351cbdc931819d941a167/jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18", size = 379790 },
177 | { url = "https://files.pythonhosted.org/packages/3c/31/10fb012b00f6d83342ca9e2c9618869ab449f1aa78c8f1b2193a6b49647c/jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6", size = 388285 },
178 | { url = "https://files.pythonhosted.org/packages/c8/81/f15ebf7de57be488aa22944bf4274962aca8092e4f7817f92ffa50d3ee46/jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef", size = 344764 },
179 | { url = "https://files.pythonhosted.org/packages/b3/e8/0cae550d72b48829ba653eb348cdc25f3f06f8a62363723702ec18e7be9c/jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1", size = 376620 },
180 | { url = "https://files.pythonhosted.org/packages/b8/50/e5478ff9d82534a944c03b63bc217c5f37019d4a34d288db0f079b13c10b/jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9", size = 510402 },
181 | { url = "https://files.pythonhosted.org/packages/8e/1e/3de48bbebbc8f7025bd454cedc8c62378c0e32dd483dece5f4a814a5cb55/jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05", size = 503018 },
182 | { url = "https://files.pythonhosted.org/packages/d5/cd/d5a5501d72a11fe3e5fd65c78c884e5164eefe80077680533919be22d3a3/jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a", size = 203190 },
183 | { url = "https://files.pythonhosted.org/packages/51/bf/e5ca301245ba951447e3ad677a02a64a8845b185de2603dabd83e1e4b9c6/jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865", size = 203551 },
184 | { url = "https://files.pythonhosted.org/packages/2f/3c/71a491952c37b87d127790dd7a0b1ebea0514c6b6ad30085b16bbe00aee6/jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca", size = 308347 },
185 | { url = "https://files.pythonhosted.org/packages/a0/4c/c02408042e6a7605ec063daed138e07b982fdb98467deaaf1c90950cf2c6/jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0", size = 342875 },
186 | { url = "https://files.pythonhosted.org/packages/91/61/c80ef80ed8a0a21158e289ef70dac01e351d929a1c30cb0f49be60772547/jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566", size = 202374 },
187 | ]
188 |
189 | [[package]]
190 | name = "openai"
191 | version = "1.65.4"
192 | source = { registry = "https://pypi.org/simple" }
193 | dependencies = [
194 | { name = "anyio" },
195 | { name = "distro" },
196 | { name = "httpx" },
197 | { name = "jiter" },
198 | { name = "pydantic" },
199 | { name = "sniffio" },
200 | { name = "tqdm" },
201 | { name = "typing-extensions" },
202 | ]
203 | sdist = { url = "https://files.pythonhosted.org/packages/fa/8d/1f7aace801afbbe4d6b8c7fa89b76eb9a3a8eeff38b84d4005d47b226b30/openai-1.65.4.tar.gz", hash = "sha256:0b08c58625d556f5c6654701af1023689c173eb0989ce8f73c7fd0eb22203c76", size = 359365 }
204 | wheels = [
205 | { url = "https://files.pythonhosted.org/packages/ba/db/7bab832be24631a793492c1c61ecbf029018b99696f435db3b63d690bf1c/openai-1.65.4-py3-none-any.whl", hash = "sha256:15566d46574b94eae3d18efc2f9a4ebd1366d1d44bfc1bdafeea7a5cf8271bcb", size = 473523 },
206 | ]
207 |
208 | [[package]]
209 | name = "pydantic"
210 | version = "2.10.6"
211 | source = { registry = "https://pypi.org/simple" }
212 | dependencies = [
213 | { name = "annotated-types" },
214 | { name = "pydantic-core" },
215 | { name = "typing-extensions" },
216 | ]
217 | sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 }
218 | wheels = [
219 | { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 },
220 | ]
221 |
222 | [[package]]
223 | name = "pydantic-core"
224 | version = "2.27.2"
225 | source = { registry = "https://pypi.org/simple" }
226 | dependencies = [
227 | { name = "typing-extensions" },
228 | ]
229 | sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 }
230 | wheels = [
231 | { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 },
232 | { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 },
233 | { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 },
234 | { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 },
235 | { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 },
236 | { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 },
237 | { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 },
238 | { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 },
239 | { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 },
240 | { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 },
241 | { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 },
242 | { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 },
243 | { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 },
244 | { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 },
245 | { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 },
246 | { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 },
247 | { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 },
248 | { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 },
249 | { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 },
250 | { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 },
251 | { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 },
252 | { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 },
253 | { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 },
254 | { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 },
255 | { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 },
256 | { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 },
257 | { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 },
258 | { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 },
259 | ]
260 |
261 | [[package]]
262 | name = "pydantic-settings"
263 | version = "2.8.1"
264 | source = { registry = "https://pypi.org/simple" }
265 | dependencies = [
266 | { name = "pydantic" },
267 | { name = "python-dotenv" },
268 | ]
269 | sdist = { url = "https://files.pythonhosted.org/packages/88/82/c79424d7d8c29b994fb01d277da57b0a9b09cc03c3ff875f9bd8a86b2145/pydantic_settings-2.8.1.tar.gz", hash = "sha256:d5c663dfbe9db9d5e1c646b2e161da12f0d734d422ee56f567d0ea2cee4e8585", size = 83550 }
270 | wheels = [
271 | { url = "https://files.pythonhosted.org/packages/0b/53/a64f03044927dc47aafe029c42a5b7aabc38dfb813475e0e1bf71c4a59d0/pydantic_settings-2.8.1-py3-none-any.whl", hash = "sha256:81942d5ac3d905f7f3ee1a70df5dfb62d5569c12f51a5a647defc1c3d9ee2e9c", size = 30839 },
272 | ]
273 |
274 | [[package]]
275 | name = "python-dotenv"
276 | version = "1.0.1"
277 | source = { registry = "https://pypi.org/simple" }
278 | sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 }
279 | wheels = [
280 | { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 },
281 | ]
282 |
283 | [[package]]
284 | name = "regex"
285 | version = "2024.11.6"
286 | source = { registry = "https://pypi.org/simple" }
287 | sdist = { url = "https://files.pythonhosted.org/packages/8e/5f/bd69653fbfb76cf8604468d3b4ec4c403197144c7bfe0e6a5fc9e02a07cb/regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519", size = 399494 }
288 | wheels = [
289 | { url = "https://files.pythonhosted.org/packages/ba/30/9a87ce8336b172cc232a0db89a3af97929d06c11ceaa19d97d84fa90a8f8/regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a", size = 483781 },
290 | { url = "https://files.pythonhosted.org/packages/01/e8/00008ad4ff4be8b1844786ba6636035f7ef926db5686e4c0f98093612add/regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9", size = 288455 },
291 | { url = "https://files.pythonhosted.org/packages/60/85/cebcc0aff603ea0a201667b203f13ba75d9fc8668fab917ac5b2de3967bc/regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2", size = 284759 },
292 | { url = "https://files.pythonhosted.org/packages/94/2b/701a4b0585cb05472a4da28ee28fdfe155f3638f5e1ec92306d924e5faf0/regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4", size = 794976 },
293 | { url = "https://files.pythonhosted.org/packages/4b/bf/fa87e563bf5fee75db8915f7352e1887b1249126a1be4813837f5dbec965/regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577", size = 833077 },
294 | { url = "https://files.pythonhosted.org/packages/a1/56/7295e6bad94b047f4d0834e4779491b81216583c00c288252ef625c01d23/regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3", size = 823160 },
295 | { url = "https://files.pythonhosted.org/packages/fb/13/e3b075031a738c9598c51cfbc4c7879e26729c53aa9cca59211c44235314/regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e", size = 796896 },
296 | { url = "https://files.pythonhosted.org/packages/24/56/0b3f1b66d592be6efec23a795b37732682520b47c53da5a32c33ed7d84e3/regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe", size = 783997 },
297 | { url = "https://files.pythonhosted.org/packages/f9/a1/eb378dada8b91c0e4c5f08ffb56f25fcae47bf52ad18f9b2f33b83e6d498/regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e", size = 781725 },
298 | { url = "https://files.pythonhosted.org/packages/83/f2/033e7dec0cfd6dda93390089864732a3409246ffe8b042e9554afa9bff4e/regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29", size = 789481 },
299 | { url = "https://files.pythonhosted.org/packages/83/23/15d4552ea28990a74e7696780c438aadd73a20318c47e527b47a4a5a596d/regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39", size = 852896 },
300 | { url = "https://files.pythonhosted.org/packages/e3/39/ed4416bc90deedbfdada2568b2cb0bc1fdb98efe11f5378d9892b2a88f8f/regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51", size = 860138 },
301 | { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692 },
302 | { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135 },
303 | { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567 },
304 | { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525 },
305 | { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324 },
306 | { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617 },
307 | { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023 },
308 | { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072 },
309 | { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130 },
310 | { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857 },
311 | { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006 },
312 | { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650 },
313 | { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545 },
314 | { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045 },
315 | { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182 },
316 | { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733 },
317 | { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122 },
318 | { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545 },
319 | ]
320 |
321 | [[package]]
322 | name = "requests"
323 | version = "2.32.3"
324 | source = { registry = "https://pypi.org/simple" }
325 | dependencies = [
326 | { name = "certifi" },
327 | { name = "charset-normalizer" },
328 | { name = "idna" },
329 | { name = "urllib3" },
330 | ]
331 | sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
332 | wheels = [
333 | { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
334 | ]
335 |
336 | [[package]]
337 | name = "sniffio"
338 | version = "1.3.1"
339 | source = { registry = "https://pypi.org/simple" }
340 | sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
341 | wheels = [
342 | { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
343 | ]
344 |
345 | [[package]]
346 | name = "tavily-python"
347 | version = "0.5.1"
348 | source = { registry = "https://pypi.org/simple" }
349 | dependencies = [
350 | { name = "httpx" },
351 | { name = "requests" },
352 | { name = "tiktoken" },
353 | ]
354 | sdist = { url = "https://files.pythonhosted.org/packages/db/ff/ba1a3769c34d022aeba544ff7b18cbcd0d23a6358fc3566b2101c6bf2817/tavily_python-0.5.1.tar.gz", hash = "sha256:44b0eefe79a057cd11d3cd03780b63b4913400122350e38285acfb502c2fffc1", size = 107503 }
355 | wheels = [
356 | { url = "https://files.pythonhosted.org/packages/a5/cd/71088461d7720128c78802289b3b36298f42745e5f8c334b0ffc157b881e/tavily_python-0.5.1-py3-none-any.whl", hash = "sha256:169601f703c55cf338758dcacfa7102473b479a9271d65a3af6fc3668990f757", size = 43767 },
357 | ]
358 |
359 | [[package]]
360 | name = "tiktoken"
361 | version = "0.9.0"
362 | source = { registry = "https://pypi.org/simple" }
363 | dependencies = [
364 | { name = "regex" },
365 | { name = "requests" },
366 | ]
367 | sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991 }
368 | wheels = [
369 | { url = "https://files.pythonhosted.org/packages/cf/e5/21ff33ecfa2101c1bb0f9b6df750553bd873b7fb532ce2cb276ff40b197f/tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03", size = 1065073 },
370 | { url = "https://files.pythonhosted.org/packages/8e/03/a95e7b4863ee9ceec1c55983e4cc9558bcfd8f4f80e19c4f8a99642f697d/tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210", size = 1008075 },
371 | { url = "https://files.pythonhosted.org/packages/40/10/1305bb02a561595088235a513ec73e50b32e74364fef4de519da69bc8010/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794", size = 1140754 },
372 | { url = "https://files.pythonhosted.org/packages/1b/40/da42522018ca496432ffd02793c3a72a739ac04c3794a4914570c9bb2925/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22", size = 1196678 },
373 | { url = "https://files.pythonhosted.org/packages/5c/41/1e59dddaae270ba20187ceb8aa52c75b24ffc09f547233991d5fd822838b/tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2", size = 1259283 },
374 | { url = "https://files.pythonhosted.org/packages/5b/64/b16003419a1d7728d0d8c0d56a4c24325e7b10a21a9dd1fc0f7115c02f0a/tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16", size = 894897 },
375 | { url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919 },
376 | { url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877 },
377 | { url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095 },
378 | { url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649 },
379 | { url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465 },
380 | { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669 },
381 | ]
382 |
383 | [[package]]
384 | name = "tqdm"
385 | version = "4.67.1"
386 | source = { registry = "https://pypi.org/simple" }
387 | dependencies = [
388 | { name = "colorama", marker = "sys_platform == 'win32'" },
389 | ]
390 | sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 }
391 | wheels = [
392 | { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 },
393 | ]
394 |
395 | [[package]]
396 | name = "typing-extensions"
397 | version = "4.12.2"
398 | source = { registry = "https://pypi.org/simple" }
399 | sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 }
400 | wheels = [
401 | { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 },
402 | ]
403 |
404 | [[package]]
405 | name = "urllib3"
406 | version = "2.3.0"
407 | source = { registry = "https://pypi.org/simple" }
408 | sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 }
409 | wheels = [
410 | { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 },
411 | ]
412 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/README.md:
--------------------------------------------------------------------------------
1 | # Planning Strategies
2 |
3 |
4 |
5 |
6 |
7 | Implementation of various planning strategies from scratch for AI agents.
8 |
9 | ## Implementations
10 |
11 | - [Reflection](reflection/)
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/reflection/README.md:
--------------------------------------------------------------------------------
1 | This is the second part of the "Building Agents from scratch" series where we will implement Reflection pattern together with a simple version of Working Memory from scratch without using any LLM orchestration frameworks.
2 |
3 |
4 |
5 |
6 |
7 | You can find the detailed walkthrough of this project in my [Newsletter](https://www.newsletter.swirlai.com/p/building-ai-agents-from-scratch-part-8ca).
8 |
9 |
10 | ## Installing dependencies
11 |
12 | [uv](https://github.com/astral-sh/uv) is a great tool for Python dependency management. To install the requirements:
13 |
14 | ```bash
15 | uv pip install -r requirements.txt
16 | ```
17 |
18 | ## Running the code
19 |
20 | First, export your OpenAI API key:
21 |
22 | ```bash
23 | export OPENAI_API_KEY=your_api_key
24 | ```
25 |
26 | > Note: Support for other LLM providers (like Anthropic, Mistral, etc.) is coming soon! This will allow you to run the examples with your preferred model provider.
27 |
28 | Then you can run the example:
29 |
30 | ```bash
31 | python3 .src/main.py
32 | ```
33 |
34 | ## Interactive Notebook
35 |
36 | For a more interactive learning experience, you can follow along with the Jupyter notebook in the [notebooks](notebooks) folder. While detailed documentation is still being worked on, you can find the complete implementation and follow the code there.
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/reflection/notebooks/reflection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "id": "fcffe431-d534-4d3f-b057-164b0554fbd6",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from typing import Dict, List, Any\n",
11 | "from dataclasses import dataclass\n",
12 | "from datetime import datetime\n",
13 | "import openai\n",
14 | "import os\n",
15 | "import json"
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "id": "7632875d-4090-4a18-8012-8d325d3c75d0",
21 | "metadata": {},
22 | "source": [
23 | "#### You will need to set OPENAI_API_KEY environment variable in order to run the example successfully"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "id": "d59e853a-bd3a-46ea-88c2-7be07246ca2e",
29 | "metadata": {},
30 | "source": [
31 | "### Interaction (element of Working Memory) dataclass"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "id": "433305a5-ba6b-4b1e-9144-7d11100e486a",
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "@dataclass\n",
42 | "class Interaction:\n",
43 | " \"\"\"Record of a single interaction with the agent\"\"\"\n",
44 | " timestamp: datetime\n",
45 | " query: str\n",
46 | " plan: Dict[str, Any]"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "id": "1d538c64-8434-4a18-ba8b-cc33c9b2cb50",
52 | "metadata": {},
53 | "source": [
54 | "### Agent class"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "id": "cea8f690-9e51-498e-bfae-bb60415bdfd3",
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "class Agent:\n",
65 | " def __init__(self, model: str = \"gpt-4o-mini\"):\n",
66 | " \"\"\"Initialize Agent with empty interaction history.\"\"\"\n",
67 | " self.client = openai.OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
68 | " self.interactions: List[Interaction] = [] # Working memory\n",
69 | " self.model = model\n",
70 | "\n",
71 | " def create_system_prompt(self) -> str:\n",
72 | " \"\"\"Create the system prompt for the LLM with available tools.\"\"\"\n",
73 | " tools_json = {\n",
74 | " \"role\": \"AI Assistant\",\n",
75 | " \"capabilities\": [\n",
76 | " \"Using provided tools to help users when necessary\",\n",
77 | " \"Responding directly without tools for questions that don't require tool usage\",\n",
78 | " \"Planning efficient tool usage sequences\",\n",
79 | " \"If asked by the user, reflecting on the plan and suggesting changes if needed\"\n",
80 | " ],\n",
81 | " \"instructions\": [\n",
82 | " \"Use tools only when they are necessary for the task\",\n",
83 | " \"If a query can be answered directly, respond with a simple message instead of using tools\",\n",
84 | " \"When tools are needed, plan their usage efficiently to minimize tool calls\",\n",
85 | " \"If asked by the user, reflect on the plan and suggest changes if needed\"\n",
86 | " ],\n",
87 | " \"tools\": [\n",
88 | " {\n",
89 | " \"name\": \"convert_currency\",\n",
90 | " \"description\": \"Converts currency using latest exchange rates.\",\n",
91 | " \"parameters\": {\n",
92 | " \"amount\": {\n",
93 | " \"type\": \"float\",\n",
94 | " \"description\": \"Amount to convert\"\n",
95 | " },\n",
96 | " \"from_currency\": {\n",
97 | " \"type\": \"str\",\n",
98 | " \"description\": \"Source currency code (e.g., USD)\"\n",
99 | " },\n",
100 | " \"to_currency\": {\n",
101 | " \"type\": \"str\",\n",
102 | " \"description\": \"Target currency code (e.g., EUR)\"\n",
103 | " }\n",
104 | " }\n",
105 | " }\n",
106 | " ],\n",
107 | " \"response_format\": {\n",
108 | " \"type\": \"json\",\n",
109 | " \"schema\": {\n",
110 | " \"requires_tools\": {\n",
111 | " \"type\": \"boolean\",\n",
112 | " \"description\": \"whether tools are needed for this query\"\n",
113 | " },\n",
114 | " \"direct_response\": {\n",
115 | " \"type\": \"string\",\n",
116 | " \"description\": \"response when no tools are needed\",\n",
117 | " \"optional\": True\n",
118 | " },\n",
119 | " \"thought\": {\n",
120 | " \"type\": \"string\", \n",
121 | " \"description\": \"reasoning about how to solve the task (when tools are needed)\",\n",
122 | " \"optional\": True\n",
123 | " },\n",
124 | " \"plan\": {\n",
125 | " \"type\": \"array\",\n",
126 | " \"items\": {\"type\": \"string\"},\n",
127 | " \"description\": \"steps to solve the task (when tools are needed)\",\n",
128 | " \"optional\": True\n",
129 | " },\n",
130 | " \"tool_calls\": {\n",
131 | " \"type\": \"array\",\n",
132 | " \"items\": {\n",
133 | " \"type\": \"object\",\n",
134 | " \"properties\": {\n",
135 | " \"tool\": {\n",
136 | " \"type\": \"string\",\n",
137 | " \"description\": \"name of the tool\"\n",
138 | " },\n",
139 | " \"args\": {\n",
140 | " \"type\": \"object\",\n",
141 | " \"description\": \"parameters for the tool\"\n",
142 | " }\n",
143 | " }\n",
144 | " },\n",
145 | " \"description\": \"tools to call in sequence (when tools are needed)\",\n",
146 | " \"optional\": True\n",
147 | " }\n",
148 | " },\n",
149 | " \"examples\": [\n",
150 | " {\n",
151 | " \"query\": \"Convert 100 USD to EUR\",\n",
152 | " \"response\": {\n",
153 | " \"requires_tools\": True,\n",
154 | " \"thought\": \"I need to use the currency conversion tool to convert USD to EUR\",\n",
155 | " \"plan\": [\n",
156 | " \"Use convert_currency tool to convert 100 USD to EUR\",\n",
157 | " \"Return the conversion result\"\n",
158 | " ],\n",
159 | " \"tool_calls\": [\n",
160 | " {\n",
161 | " \"tool\": \"convert_currency\",\n",
162 | " \"args\": {\n",
163 | " \"amount\": 100,\n",
164 | " \"from_currency\": \"USD\", \n",
165 | " \"to_currency\": \"EUR\"\n",
166 | " }\n",
167 | " }\n",
168 | " ]\n",
169 | " }\n",
170 | " },\n",
171 | " {\n",
172 | " \"query\": \"What's 500 Japanese Yen in British Pounds?\",\n",
173 | " \"response\": {\n",
174 | " \"requires_tools\": True,\n",
175 | " \"thought\": \"I need to convert JPY to GBP using the currency converter\",\n",
176 | " \"plan\": [\n",
177 | " \"Use convert_currency tool to convert 500 JPY to GBP\",\n",
178 | " \"Return the conversion result\"\n",
179 | " ],\n",
180 | " \"tool_calls\": [\n",
181 | " {\n",
182 | " \"tool\": \"convert_currency\",\n",
183 | " \"args\": {\n",
184 | " \"amount\": 500,\n",
185 | " \"from_currency\": \"JPY\",\n",
186 | " \"to_currency\": \"GBP\"\n",
187 | " }\n",
188 | " }\n",
189 | " ]\n",
190 | " }\n",
191 | " },\n",
192 | " {\n",
193 | " \"query\": \"What currency does Japan use?\",\n",
194 | " \"response\": {\n",
195 | " \"requires_tools\": False,\n",
196 | " \"direct_response\": \"Japan uses the Japanese Yen (JPY) as its official currency. This is common knowledge that doesn't require using the currency conversion tool.\"\n",
197 | " }\n",
198 | " }\n",
199 | " ]\n",
200 | " }\n",
201 | " }\n",
202 | " \n",
203 | " return f\"\"\"You are an AI assistant that helps users by providing direct answers or using tools when necessary.\n",
204 | "Configuration, instructions, and available tools are provided in JSON format below:\n",
205 | "\n",
206 | "{json.dumps(tools_json, indent=2)}\n",
207 | "\n",
208 | "Always respond with a JSON object following the response_format schema above. \n",
209 | "Remember to use tools only when they are actually needed for the task.\"\"\"\n",
210 | "\n",
211 | " def plan(self, user_query: str) -> Dict:\n",
212 | " \"\"\"Use LLM to create a plan and store it in memory.\"\"\"\n",
213 | " messages = [\n",
214 | " {\"role\": \"system\", \"content\": self.create_system_prompt()},\n",
215 | " {\"role\": \"user\", \"content\": user_query}\n",
216 | " ]\n",
217 | " \n",
218 | " response = self.client.chat.completions.create(\n",
219 | " model=self.model,\n",
220 | " messages=messages,\n",
221 | " temperature=0\n",
222 | " )\n",
223 | " \n",
224 | " try:\n",
225 | " plan = json.loads(response.choices[0].message.content)\n",
226 | " # Store the interaction immediately after planning\n",
227 | " interaction = Interaction(\n",
228 | " timestamp=datetime.now(),\n",
229 | " query=user_query,\n",
230 | " plan=plan\n",
231 | " )\n",
232 | " self.interactions.append(interaction)\n",
233 | " return plan\n",
234 | " except json.JSONDecodeError:\n",
235 | " raise ValueError(\"Failed to parse LLM response as JSON\")\n",
236 | "\n",
237 | " def reflect_on_plan(self) -> Dict[str, Any]:\n",
238 | " \"\"\"Reflect on the most recent plan using interaction history.\"\"\"\n",
239 | " if not self.interactions:\n",
240 | " return {\"reflection\": \"No plan to reflect on\", \"requires_changes\": False}\n",
241 | " \n",
242 | " latest_interaction = self.interactions[-1]\n",
243 | " \n",
244 | " reflection_prompt = {\n",
245 | " \"task\": \"reflection\",\n",
246 | " \"context\": {\n",
247 | " \"user_query\": latest_interaction.query,\n",
248 | " \"generated_plan\": latest_interaction.plan\n",
249 | " },\n",
250 | " \"instructions\": [\n",
251 | " \"Review the generated plan for potential improvements\",\n",
252 | " \"Consider if the chosen tools are appropriate\",\n",
253 | " \"Verify tool parameters are correct\",\n",
254 | " \"Check if the plan is efficient\",\n",
255 | " \"Determine if tools are actually needed\"\n",
256 | " ],\n",
257 | " \"response_format\": {\n",
258 | " \"type\": \"json\",\n",
259 | " \"schema\": {\n",
260 | " \"requires_changes\": {\n",
261 | " \"type\": \"boolean\",\n",
262 | " \"description\": \"whether the plan needs modifications\"\n",
263 | " },\n",
264 | " \"reflection\": {\n",
265 | " \"type\": \"string\",\n",
266 | " \"description\": \"explanation of what changes are needed or why no changes are needed\"\n",
267 | " },\n",
268 | " \"suggestions\": {\n",
269 | " \"type\": \"array\",\n",
270 | " \"items\": {\"type\": \"string\"},\n",
271 | " \"description\": \"specific suggestions for improvements\",\n",
272 | " \"optional\": True\n",
273 | " }\n",
274 | " }\n",
275 | " }\n",
276 | " }\n",
277 | " \n",
278 | " messages = [\n",
279 | " {\"role\": \"system\", \"content\": self.create_system_prompt()},\n",
280 | " {\"role\": \"user\", \"content\": json.dumps(reflection_prompt, indent=2)}\n",
281 | " ]\n",
282 | " \n",
283 | " response = self.client.chat.completions.create(\n",
284 | " model=self.model,\n",
285 | " messages=messages,\n",
286 | " temperature=0\n",
287 | " )\n",
288 | " \n",
289 | " try:\n",
290 | " return json.loads(response.choices[0].message.content)\n",
291 | " except json.JSONDecodeError:\n",
292 | " return {\"reflection\": response.choices[0].message.content}\n",
293 | "\n",
294 | " def execute(self, user_query: str) -> str:\n",
295 | " \"\"\"Execute the full pipeline: plan, reflect, and potentially replan.\"\"\"\n",
296 | " try:\n",
297 | " # Create initial plan (this also stores it in memory)\n",
298 | " initial_plan = self.plan(user_query)\n",
299 | " \n",
300 | " # Reflect on the plan using memory\n",
301 | " reflection = self.reflect_on_plan()\n",
302 | " \n",
303 | " # Check if reflection suggests changes\n",
304 | " if reflection.get(\"requires_changes\", False):\n",
305 | " # Generate new plan based on reflection\n",
306 | " messages = [\n",
307 | " {\"role\": \"system\", \"content\": self.create_system_prompt()},\n",
308 | " {\"role\": \"user\", \"content\": user_query},\n",
309 | " {\"role\": \"assistant\", \"content\": json.dumps(initial_plan)},\n",
310 | " {\"role\": \"user\", \"content\": f\"Please revise the plan based on this feedback: {json.dumps(reflection)}\"}\n",
311 | " ]\n",
312 | " \n",
313 | " response = self.client.chat.completions.create(\n",
314 | " model=self.model,\n",
315 | " messages=messages,\n",
316 | " temperature=0\n",
317 | " )\n",
318 | " \n",
319 | " try:\n",
320 | " final_plan = json.loads(response.choices[0].message.content)\n",
321 | " except json.JSONDecodeError:\n",
322 | " final_plan = initial_plan # Fallback to initial plan if parsing fails\n",
323 | " else:\n",
324 | " final_plan = initial_plan\n",
325 | " \n",
326 | " # Update the stored interaction with all information\n",
327 | " self.interactions[-1].plan = {\n",
328 | " \"initial_plan\": initial_plan,\n",
329 | " \"reflection\": reflection,\n",
330 | " \"final_plan\": final_plan\n",
331 | " }\n",
332 | " \n",
333 | " # Return the appropriate response\n",
334 | " if final_plan.get(\"requires_tools\", True):\n",
335 | " return f\"\"\"Initial Thought: {initial_plan['thought']}\n",
336 | "Initial Plan: {'. '.join(initial_plan['plan'])}\n",
337 | "Reflection: {reflection.get('reflection', 'No improvements suggested')}\n",
338 | "Final Plan: {'. '.join(final_plan['plan'])}\"\"\"\n",
339 | " else:\n",
340 | " return f\"\"\"Response: {final_plan['direct_response']}\n",
341 | "Reflection: {reflection.get('reflection', 'No improvements suggested')}\"\"\"\n",
342 | " \n",
343 | " except Exception as e:\n",
344 | " return f\"Error executing plan: {str(e)}\""
345 | ]
346 | },
347 | {
348 | "cell_type": "markdown",
349 | "id": "a38f7147-c451-45d1-b6bf-31ce5cd845aa",
350 | "metadata": {},
351 | "source": [
352 | "### Create and run the Agent"
353 | ]
354 | },
355 | {
356 | "cell_type": "code",
357 | "execution_count": null,
358 | "id": "e197bb48-d910-4f89-b4e4-5e5fc542b90b",
359 | "metadata": {},
360 | "outputs": [],
361 | "source": [
362 | "agent = Agent(model=\"gpt-4o-mini\")\n",
363 | " \n",
364 | "query_list = [\"I am traveling to Japan from Lithuania, I have 1500 of local currency, how much of Japaese currency will I be able to get?\"]\n",
365 | "\n",
366 | "for query in query_list:\n",
367 | " print(f\"\\nQuery: {query}\")\n",
368 | " result = agent.execute(query)\n",
369 | " print(result)"
370 | ]
371 | }
372 | ],
373 | "metadata": {
374 | "kernelspec": {
375 | "display_name": "Python 3 (ipykernel)",
376 | "language": "python",
377 | "name": "python3"
378 | },
379 | "language_info": {
380 | "codemirror_mode": {
381 | "name": "ipython",
382 | "version": 3
383 | },
384 | "file_extension": ".py",
385 | "mimetype": "text/x-python",
386 | "name": "python",
387 | "nbconvert_exporter": "python",
388 | "pygments_lexer": "ipython3",
389 | "version": "3.11.5"
390 | }
391 | },
392 | "nbformat": 4,
393 | "nbformat_minor": 5
394 | }
395 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/reflection/requirements.txt:
--------------------------------------------------------------------------------
1 | openai>=1.0.0
2 | python-dotenv>=1.0.0
3 | pydantic>=2.0.0
4 | typing-extensions>=4.0.0
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/reflection/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/building_agents_from_scratch/planning/reflection/src/__init__.py
--------------------------------------------------------------------------------
/building_agents_from_scratch/planning/reflection/src/main.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Any
2 | from dataclasses import dataclass
3 | from datetime import datetime
4 | import openai
5 | import os
6 | import json
7 |
8 | @dataclass
9 | class Interaction:
10 | """Record of a single interaction with the agent"""
11 | timestamp: datetime
12 | query: str
13 | plan: Dict[str, Any]
14 |
15 | class Agent:
16 | def __init__(self, model: str = "gpt-4o-mini"):
17 | """Initialize Agent with empty interaction history."""
18 | self.client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
19 | self.interactions: List[Interaction] = [] # Working memory
20 | self.model = model
21 |
22 | def create_system_prompt(self) -> str:
23 | """Create the system prompt for the LLM with available tools."""
24 | tools_json = {
25 | "role": "AI Assistant",
26 | "capabilities": [
27 | "Using provided tools to help users when necessary",
28 | "Responding directly without tools for questions that don't require tool usage",
29 | "Planning efficient tool usage sequences",
30 | "If asked by the user, reflecting on the plan and suggesting changes if needed"
31 | ],
32 | "instructions": [
33 | "Use tools only when they are necessary for the task",
34 | "If a query can be answered directly, respond with a simple message instead of using tools",
35 | "When tools are needed, plan their usage efficiently to minimize tool calls",
36 | "If asked by the user, reflect on the plan and suggest changes if needed"
37 | ],
38 | "tools": [
39 | {
40 | "name": "convert_currency",
41 | "description": "Converts currency using latest exchange rates.",
42 | "parameters": {
43 | "amount": {
44 | "type": "float",
45 | "description": "Amount to convert"
46 | },
47 | "from_currency": {
48 | "type": "str",
49 | "description": "Source currency code (e.g., USD)"
50 | },
51 | "to_currency": {
52 | "type": "str",
53 | "description": "Target currency code (e.g., EUR)"
54 | }
55 | }
56 | }
57 | ],
58 | "response_format": {
59 | "type": "json",
60 | "schema": {
61 | "requires_tools": {
62 | "type": "boolean",
63 | "description": "whether tools are needed for this query"
64 | },
65 | "direct_response": {
66 | "type": "string",
67 | "description": "response when no tools are needed",
68 | "optional": True
69 | },
70 | "thought": {
71 | "type": "string",
72 | "description": "reasoning about how to solve the task (when tools are needed)",
73 | "optional": True
74 | },
75 | "plan": {
76 | "type": "array",
77 | "items": {"type": "string"},
78 | "description": "steps to solve the task (when tools are needed)",
79 | "optional": True
80 | },
81 | "tool_calls": {
82 | "type": "array",
83 | "items": {
84 | "type": "object",
85 | "properties": {
86 | "tool": {
87 | "type": "string",
88 | "description": "name of the tool"
89 | },
90 | "args": {
91 | "type": "object",
92 | "description": "parameters for the tool"
93 | }
94 | }
95 | },
96 | "description": "tools to call in sequence (when tools are needed)",
97 | "optional": True
98 | }
99 | },
100 | "examples": [
101 | {
102 | "query": "Convert 100 USD to EUR",
103 | "response": {
104 | "requires_tools": True,
105 | "thought": "I need to use the currency conversion tool to convert USD to EUR",
106 | "plan": [
107 | "Use convert_currency tool to convert 100 USD to EUR",
108 | "Return the conversion result"
109 | ],
110 | "tool_calls": [
111 | {
112 | "tool": "convert_currency",
113 | "args": {
114 | "amount": 100,
115 | "from_currency": "USD",
116 | "to_currency": "EUR"
117 | }
118 | }
119 | ]
120 | }
121 | },
122 | {
123 | "query": "What's 500 Japanese Yen in British Pounds?",
124 | "response": {
125 | "requires_tools": True,
126 | "thought": "I need to convert JPY to GBP using the currency converter",
127 | "plan": [
128 | "Use convert_currency tool to convert 500 JPY to GBP",
129 | "Return the conversion result"
130 | ],
131 | "tool_calls": [
132 | {
133 | "tool": "convert_currency",
134 | "args": {
135 | "amount": 500,
136 | "from_currency": "JPY",
137 | "to_currency": "GBP"
138 | }
139 | }
140 | ]
141 | }
142 | },
143 | {
144 | "query": "What currency does Japan use?",
145 | "response": {
146 | "requires_tools": False,
147 | "direct_response": "Japan uses the Japanese Yen (JPY) as its official currency. This is common knowledge that doesn't require using the currency conversion tool."
148 | }
149 | }
150 | ]
151 | }
152 | }
153 |
154 | return f"""You are an AI assistant that helps users by providing direct answers or using tools when necessary.
155 | Configuration, instructions, and available tools are provided in JSON format below:
156 |
157 | {json.dumps(tools_json, indent=2)}
158 |
159 | Always respond with a JSON object following the response_format schema above.
160 | Remember to use tools only when they are actually needed for the task."""
161 |
162 | def plan(self, user_query: str) -> Dict:
163 | """Use LLM to create a plan and store it in memory."""
164 | messages = [
165 | {"role": "system", "content": self.create_system_prompt()},
166 | {"role": "user", "content": user_query}
167 | ]
168 |
169 | response = self.client.chat.completions.create(
170 | model=self.model,
171 | messages=messages,
172 | temperature=0
173 | )
174 |
175 | try:
176 | plan = json.loads(response.choices[0].message.content)
177 | # Store the interaction immediately after planning
178 | interaction = Interaction(
179 | timestamp=datetime.now(),
180 | query=user_query,
181 | plan=plan
182 | )
183 | self.interactions.append(interaction)
184 | return plan
185 | except json.JSONDecodeError:
186 | raise ValueError("Failed to parse LLM response as JSON")
187 |
188 | def reflect_on_plan(self) -> Dict[str, Any]:
189 | """Reflect on the most recent plan using interaction history."""
190 | if not self.interactions:
191 | return {"reflection": "No plan to reflect on", "requires_changes": False}
192 |
193 | latest_interaction = self.interactions[-1]
194 |
195 | reflection_prompt = {
196 | "task": "reflection",
197 | "context": {
198 | "user_query": latest_interaction.query,
199 | "generated_plan": latest_interaction.plan
200 | },
201 | "instructions": [
202 | "Review the generated plan for potential improvements",
203 | "Consider if the chosen tools are appropriate",
204 | "Verify tool parameters are correct",
205 | "Check if the plan is efficient",
206 | "Determine if tools are actually needed"
207 | ],
208 | "response_format": {
209 | "type": "json",
210 | "schema": {
211 | "requires_changes": {
212 | "type": "boolean",
213 | "description": "whether the plan needs modifications"
214 | },
215 | "reflection": {
216 | "type": "string",
217 | "description": "explanation of what changes are needed or why no changes are needed"
218 | },
219 | "suggestions": {
220 | "type": "array",
221 | "items": {"type": "string"},
222 | "description": "specific suggestions for improvements",
223 | "optional": True
224 | }
225 | }
226 | }
227 | }
228 |
229 | messages = [
230 | {"role": "system", "content": self.create_system_prompt()},
231 | {"role": "user", "content": json.dumps(reflection_prompt, indent=2)}
232 | ]
233 |
234 | response = self.client.chat.completions.create(
235 | model=self.model,
236 | messages=messages,
237 | temperature=0
238 | )
239 |
240 | try:
241 | return json.loads(response.choices[0].message.content)
242 | except json.JSONDecodeError:
243 | return {"reflection": response.choices[0].message.content}
244 |
245 | def execute(self, user_query: str) -> str:
246 | """Execute the full pipeline: plan, reflect, and potentially replan."""
247 | try:
248 | # Create initial plan (this also stores it in memory)
249 | initial_plan = self.plan(user_query)
250 |
251 | # Reflect on the plan using memory
252 | reflection = self.reflect_on_plan()
253 |
254 | # Check if reflection suggests changes
255 | if reflection.get("requires_changes", False):
256 | # Generate new plan based on reflection
257 | messages = [
258 | {"role": "system", "content": self.create_system_prompt()},
259 | {"role": "user", "content": user_query},
260 | {"role": "assistant", "content": json.dumps(initial_plan)},
261 | {"role": "user", "content": f"Please revise the plan based on this feedback: {json.dumps(reflection)}"}
262 | ]
263 |
264 | response = self.client.chat.completions.create(
265 | model=self.model,
266 | messages=messages,
267 | temperature=0
268 | )
269 |
270 | try:
271 | final_plan = json.loads(response.choices[0].message.content)
272 | except json.JSONDecodeError:
273 | final_plan = initial_plan # Fallback to initial plan if parsing fails
274 | else:
275 | final_plan = initial_plan
276 |
277 | # Update the stored interaction with all information
278 | self.interactions[-1].plan = {
279 | "initial_plan": initial_plan,
280 | "reflection": reflection,
281 | "final_plan": final_plan
282 | }
283 |
284 | # Return the appropriate response
285 | if final_plan.get("requires_tools", True):
286 | return f"""Initial Thought: {initial_plan['thought']}
287 | Initial Plan: {'. '.join(initial_plan['plan'])}
288 | Reflection: {reflection.get('reflection', 'No improvements suggested')}
289 | Final Plan: {'. '.join(final_plan['plan'])}"""
290 | else:
291 | return f"""Response: {final_plan['direct_response']}
292 | Reflection: {reflection.get('reflection', 'No improvements suggested')}"""
293 |
294 | except Exception as e:
295 | return f"Error executing plan: {str(e)}"
296 |
297 | def main():
298 | agent = Agent(model="gpt-4o-mini")
299 |
300 | query_list = ["I am traveling to Japan from Lithuania, I have 1500 of local currency, how much of Japaese currency will I be able to get?",
301 | "How are you doing?"]
302 |
303 | for query in query_list:
304 | print(f"\nQuery: {query}")
305 | result = agent.execute(query)
306 | print(result)
307 |
308 | if __name__ == "__main__":
309 | main()
310 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/README.md:
--------------------------------------------------------------------------------
1 | This is the first part of the "Building Agents from scratch" series where we will implement tool use for an AI Agent from scratch without using any LLM orchestration frameworks. In this example, we'll build a currency conversion tool and configure our agent to intelligently determine when currency conversion is needed and use the tool appropriately.
2 |
3 |
4 |
5 |
6 |
7 | You can find the detailed walkthrough of this project in my [Newsletter](https://www.newsletter.swirlai.com/p/building-ai-agents-from-scratch-part).
8 |
9 |
10 | ## Installing dependencies
11 |
12 | [uv](https://github.com/astral-sh/uv) is a great tool for Python dependency management. To install the requirements:
13 |
14 | ```bash
15 | uv pip install -r requirements.txt
16 | ```
17 |
18 | ## Running the code
19 |
20 | First, export your OpenAI API key:
21 |
22 | ```bash
23 | export OPENAI_API_KEY=your_api_key
24 | ```
25 |
26 | > Note: Support for other LLM providers (like Anthropic, Mistral, etc.) is coming soon! This will allow you to run the examples with your preferred model provider.
27 |
28 | Then you can run the example:
29 |
30 | ```bash
31 | python3 .src/main.py
32 | ```
33 |
34 | ## Interactive Notebook
35 |
36 | For a more interactive learning experience, you can follow along with the Jupyter notebook in the [notebooks](notebooks) folder. While detailed documentation is still being worked on, you can find the complete implementation and follow the code there.
37 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/notebooks/tool_use.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "id": "fd7ee009-e66d-41f2-8563-9439fa629a70",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from typing import Callable, Any, Dict, get_type_hints, Optional, List\n",
11 | "from dataclasses import dataclass\n",
12 | "import inspect\n",
13 | "from typing import _GenericAlias\n",
14 | "import urllib.request\n",
15 | "import json\n",
16 | "import openai\n",
17 | "import os"
18 | ]
19 | },
20 | {
21 | "cell_type": "markdown",
22 | "id": "b19fb7ca-6905-4f51-b949-ac5500db94c6",
23 | "metadata": {},
24 | "source": [
25 | "#### You will need to set OPENAI_API_KEY environment variable in order to run the example successfully"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "id": "abe2cab0-673a-4831-b5a2-1517c8fb3389",
31 | "metadata": {},
32 | "source": [
33 | "### Tool creation utilities"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": null,
39 | "id": "ee937d49-7d76-419e-bb96-c38c421acb12",
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "@dataclass\n",
44 | "class Tool:\n",
45 | " name: str\n",
46 | " description: str\n",
47 | " func: Callable[..., str]\n",
48 | " parameters: Dict[str, Dict[str, str]]\n",
49 | " \n",
50 | " def __call__(self, *args, **kwargs) -> str:\n",
51 | " return self.func(*args, **kwargs)\n",
52 | "\n",
53 | "def parse_docstring_params(docstring: str) -> Dict[str, str]:\n",
54 | " \"\"\"Extract parameter descriptions from docstring.\"\"\"\n",
55 | " if not docstring:\n",
56 | " return {}\n",
57 | " \n",
58 | " params = {}\n",
59 | " lines = docstring.split('\\n')\n",
60 | " in_params = False\n",
61 | " current_param = None\n",
62 | " \n",
63 | " for line in lines:\n",
64 | " line = line.strip()\n",
65 | " if line.startswith('Parameters:'):\n",
66 | " in_params = True\n",
67 | " elif in_params:\n",
68 | " if line.startswith('-') or line.startswith('*'):\n",
69 | " current_param = line.lstrip('- *').split(':')[0].strip()\n",
70 | " params[current_param] = line.lstrip('- *').split(':')[1].strip()\n",
71 | " elif current_param and line:\n",
72 | " params[current_param] += ' ' + line.strip()\n",
73 | " elif not line:\n",
74 | " in_params = False\n",
75 | " \n",
76 | " return params\n",
77 | "\n",
78 | "def get_type_description(type_hint: Any) -> str:\n",
79 | " \"\"\"Get a human-readable description of a type hint.\"\"\"\n",
80 | " if isinstance(type_hint, _GenericAlias):\n",
81 | " if type_hint._name == 'Literal':\n",
82 | " return f\"one of {type_hint.__args__}\"\n",
83 | " return type_hint.__name__"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "id": "994da7c1-7323-47bc-b40c-20a121cef778",
89 | "metadata": {},
90 | "source": [
91 | "### Tool creation decorator"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "id": "bfb9feb7-a372-40b7-8801-c94b5327f607",
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "def tool(name: str = None):\n",
102 | " def decorator(func: Callable[..., str]) -> Tool:\n",
103 | " tool_name = name or func.__name__\n",
104 | " description = inspect.getdoc(func) or \"No description available\"\n",
105 | " \n",
106 | " type_hints = get_type_hints(func)\n",
107 | " param_docs = parse_docstring_params(description)\n",
108 | " sig = inspect.signature(func)\n",
109 | " \n",
110 | " params = {}\n",
111 | " for param_name, param in sig.parameters.items():\n",
112 | " params[param_name] = {\n",
113 | " \"type\": get_type_description(type_hints.get(param_name, Any)),\n",
114 | " \"description\": param_docs.get(param_name, \"No description available\")\n",
115 | " }\n",
116 | " \n",
117 | " return Tool(\n",
118 | " name=tool_name,\n",
119 | " description=description.split('\\n\\n')[0],\n",
120 | " func=func,\n",
121 | " parameters=params\n",
122 | " )\n",
123 | " return decorator"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "5601aa5d-cf17-478a-ae1a-a2b78098323e",
129 | "metadata": {},
130 | "source": [
131 | "### Creating the currency convertion tool"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "e7e59069-682e-4ad1-9fb9-7984dc96ec27",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "@tool()\n",
142 | "def convert_currency(amount: float, from_currency: str, to_currency: str) -> str:\n",
143 | " \"\"\"Converts currency using latest exchange rates.\n",
144 | " \n",
145 | " Parameters:\n",
146 | " - amount: Amount to convert\n",
147 | " - from_currency: Source currency code (e.g., USD)\n",
148 | " - to_currency: Target currency code (e.g., EUR)\n",
149 | " \"\"\"\n",
150 | " try:\n",
151 | " url = f\"https://open.er-api.com/v6/latest/{from_currency.upper()}\"\n",
152 | " with urllib.request.urlopen(url) as response:\n",
153 | " data = json.loads(response.read())\n",
154 | " \n",
155 | " if \"rates\" not in data:\n",
156 | " return \"Error: Could not fetch exchange rates\"\n",
157 | " \n",
158 | " rate = data[\"rates\"].get(to_currency.upper())\n",
159 | " if not rate:\n",
160 | " return f\"Error: No rate found for {to_currency}\"\n",
161 | " \n",
162 | " converted = amount * rate\n",
163 | " return f\"{amount} {from_currency.upper()} = {converted:.2f} {to_currency.upper()}\"\n",
164 | " \n",
165 | " except Exception as e:\n",
166 | " return f\"Error converting currency: {str(e)}\""
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "id": "e43441da-9f35-490b-8950-5e83d7a22b65",
172 | "metadata": {},
173 | "source": [
174 | "### Agent class"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": null,
180 | "id": "a13ac4d0-fd54-4757-976d-bbbad4840645",
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "class Agent:\n",
185 | " def __init__(self):\n",
186 | " \"\"\"Initialize Agent with empty tool registry.\"\"\"\n",
187 | " self.client = openai.OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
188 | " self.tools: Dict[str, Tool] = {}\n",
189 | " \n",
190 | " def add_tool(self, tool: Tool) -> None:\n",
191 | " \"\"\"Register a new tool with the agent.\"\"\"\n",
192 | " self.tools[tool.name] = tool\n",
193 | " \n",
194 | " def get_available_tools(self) -> List[str]:\n",
195 | " \"\"\"Get list of available tool descriptions.\"\"\"\n",
196 | " return [f\"{tool.name}: {tool.description}\" for tool in self.tools.values()]\n",
197 | " \n",
198 | " def use_tool(self, tool_name: str, **kwargs: Any) -> str:\n",
199 | " \"\"\"Execute a specific tool with given arguments.\"\"\"\n",
200 | " if tool_name not in self.tools:\n",
201 | " raise ValueError(f\"Tool '{tool_name}' not found. Available tools: {list(self.tools.keys())}\")\n",
202 | " \n",
203 | " tool = self.tools[tool_name]\n",
204 | " return tool.func(**kwargs)\n",
205 | "\n",
206 | " def create_system_prompt(self) -> str:\n",
207 | " \"\"\"Create the system prompt for the LLM with available tools.\"\"\"\n",
208 | " tools_json = {\n",
209 | " \"role\": \"AI Assistant\",\n",
210 | " \"capabilities\": [\n",
211 | " \"Using provided tools to help users when necessary\",\n",
212 | " \"Responding directly without tools for questions that don't require tool usage\",\n",
213 | " \"Planning efficient tool usage sequences\"\n",
214 | " ],\n",
215 | " \"instructions\": [\n",
216 | " \"Use tools only when they are necessary for the task\",\n",
217 | " \"If a query can be answered directly, respond with a simple message instead of using tools\",\n",
218 | " \"When tools are needed, plan their usage efficiently to minimize tool calls\"\n",
219 | " ],\n",
220 | " \"tools\": [\n",
221 | " {\n",
222 | " \"name\": tool.name,\n",
223 | " \"description\": tool.description,\n",
224 | " \"parameters\": {\n",
225 | " name: {\n",
226 | " \"type\": info[\"type\"],\n",
227 | " \"description\": info[\"description\"]\n",
228 | " }\n",
229 | " for name, info in tool.parameters.items()\n",
230 | " }\n",
231 | " }\n",
232 | " for tool in self.tools.values()\n",
233 | " ],\n",
234 | " \"response_format\": {\n",
235 | " \"type\": \"json\",\n",
236 | " \"schema\": {\n",
237 | " \"requires_tools\": {\n",
238 | " \"type\": \"boolean\",\n",
239 | " \"description\": \"whether tools are needed for this query\"\n",
240 | " },\n",
241 | " \"direct_response\": {\n",
242 | " \"type\": \"string\",\n",
243 | " \"description\": \"response when no tools are needed\",\n",
244 | " \"optional\": True\n",
245 | " },\n",
246 | " \"thought\": {\n",
247 | " \"type\": \"string\", \n",
248 | " \"description\": \"reasoning about how to solve the task (when tools are needed)\",\n",
249 | " \"optional\": True\n",
250 | " },\n",
251 | " \"plan\": {\n",
252 | " \"type\": \"array\",\n",
253 | " \"items\": {\"type\": \"string\"},\n",
254 | " \"description\": \"steps to solve the task (when tools are needed)\",\n",
255 | " \"optional\": True\n",
256 | " },\n",
257 | " \"tool_calls\": {\n",
258 | " \"type\": \"array\",\n",
259 | " \"items\": {\n",
260 | " \"type\": \"object\",\n",
261 | " \"properties\": {\n",
262 | " \"tool\": {\n",
263 | " \"type\": \"string\",\n",
264 | " \"description\": \"name of the tool\"\n",
265 | " },\n",
266 | " \"args\": {\n",
267 | " \"type\": \"object\",\n",
268 | " \"description\": \"parameters for the tool\"\n",
269 | " }\n",
270 | " }\n",
271 | " },\n",
272 | " \"description\": \"tools to call in sequence (when tools are needed)\",\n",
273 | " \"optional\": True\n",
274 | " }\n",
275 | " },\n",
276 | " \"examples\": [\n",
277 | " {\n",
278 | " \"query\": \"Convert 100 USD to EUR\",\n",
279 | " \"response\": {\n",
280 | " \"requires_tools\": True,\n",
281 | " \"thought\": \"I need to use the currency conversion tool to convert USD to EUR\",\n",
282 | " \"plan\": [\n",
283 | " \"Use convert_currency tool to convert 100 USD to EUR\",\n",
284 | " \"Return the conversion result\"\n",
285 | " ],\n",
286 | " \"tool_calls\": [\n",
287 | " {\n",
288 | " \"tool\": \"convert_currency\",\n",
289 | " \"args\": {\n",
290 | " \"amount\": 100,\n",
291 | " \"from_currency\": \"USD\", \n",
292 | " \"to_currency\": \"EUR\"\n",
293 | " }\n",
294 | " }\n",
295 | " ]\n",
296 | " }\n",
297 | " },\n",
298 | " {\n",
299 | " \"query\": \"What's 500 Japanese Yen in British Pounds?\",\n",
300 | " \"response\": {\n",
301 | " \"requires_tools\": True,\n",
302 | " \"thought\": \"I need to convert JPY to GBP using the currency converter\",\n",
303 | " \"plan\": [\n",
304 | " \"Use convert_currency tool to convert 500 JPY to GBP\",\n",
305 | " \"Return the conversion result\"\n",
306 | " ],\n",
307 | " \"tool_calls\": [\n",
308 | " {\n",
309 | " \"tool\": \"convert_currency\",\n",
310 | " \"args\": {\n",
311 | " \"amount\": 500,\n",
312 | " \"from_currency\": \"JPY\",\n",
313 | " \"to_currency\": \"GBP\"\n",
314 | " }\n",
315 | " }\n",
316 | " ]\n",
317 | " }\n",
318 | " },\n",
319 | " {\n",
320 | " \"query\": \"What currency does Japan use?\",\n",
321 | " \"response\": {\n",
322 | " \"requires_tools\": False,\n",
323 | " \"direct_response\": \"Japan uses the Japanese Yen (JPY) as its official currency. This is common knowledge that doesn't require using the currency conversion tool.\"\n",
324 | " }\n",
325 | " }\n",
326 | " ]\n",
327 | " }\n",
328 | " }\n",
329 | " \n",
330 | " return f\"\"\"You are an AI assistant that helps users by providing direct answers or using tools when necessary.\n",
331 | "Configuration, instructions, and available tools are provided in JSON format below:\n",
332 | "\n",
333 | "{json.dumps(tools_json, indent=2)}\n",
334 | "\n",
335 | "Always respond with a JSON object following the response_format schema above. \n",
336 | "Remember to use tools only when they are actually needed for the task.\"\"\"\n",
337 | "\n",
338 | " def plan(self, user_query: str) -> Dict:\n",
339 | " \"\"\"Use LLM to create a plan for tool usage.\"\"\"\n",
340 | " messages = [\n",
341 | " {\"role\": \"system\", \"content\": self.create_system_prompt()},\n",
342 | " {\"role\": \"user\", \"content\": user_query}\n",
343 | " ]\n",
344 | " \n",
345 | " response = self.client.chat.completions.create(\n",
346 | " model=\"gpt-4o-mini\",\n",
347 | " messages=messages,\n",
348 | " temperature=0\n",
349 | " )\n",
350 | " \n",
351 | " try:\n",
352 | " return json.loads(response.choices[0].message.content)\n",
353 | " except json.JSONDecodeError:\n",
354 | " raise ValueError(\"Failed to parse LLM response as JSON\")\n",
355 | "\n",
356 | " def execute(self, user_query: str) -> str:\n",
357 | " \"\"\"Execute the full pipeline: plan and execute tools.\"\"\"\n",
358 | " try:\n",
359 | " plan = self.plan(user_query)\n",
360 | " \n",
361 | " if not plan.get(\"requires_tools\", True):\n",
362 | " return plan[\"direct_response\"]\n",
363 | " \n",
364 | " # Execute each tool in sequence\n",
365 | " results = []\n",
366 | " for tool_call in plan[\"tool_calls\"]:\n",
367 | " tool_name = tool_call[\"tool\"]\n",
368 | " tool_args = tool_call[\"args\"]\n",
369 | " result = self.use_tool(tool_name, **tool_args)\n",
370 | " results.append(result)\n",
371 | " \n",
372 | " # Combine results\n",
373 | " return f\"\"\"Thought: {plan['thought']}\n",
374 | "Plan: {'. '.join(plan['plan'])}\n",
375 | "Results: {'. '.join(results)}\"\"\"\n",
376 | " \n",
377 | " except Exception as e:\n",
378 | " return f\"Error executing plan: {str(e)}\""
379 | ]
380 | },
381 | {
382 | "cell_type": "markdown",
383 | "id": "3f42026f-6db2-4b72-b745-417814d0641d",
384 | "metadata": {},
385 | "source": [
386 | "### Create and run the Agent"
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": null,
392 | "id": "a641ae1c-e8e6-423d-abe7-b4a0826c9b93",
393 | "metadata": {},
394 | "outputs": [],
395 | "source": [
396 | "agent = Agent()\n",
397 | "agent.add_tool(convert_currency)\n",
398 | "\n",
399 | "query_list = [\"I am traveling to Japan from Serbia, I have 1500 of local currency, how much of Japaese currency will I be able to get?\",\n",
400 | " \"How are you doing?\"]\n",
401 | "\n",
402 | "for query in query_list:\n",
403 | " print(f\"\\nQuery: {query}\")\n",
404 | " result = agent.execute(query)\n",
405 | " print(result)"
406 | ]
407 | }
408 | ],
409 | "metadata": {
410 | "kernelspec": {
411 | "display_name": "Python 3 (ipykernel)",
412 | "language": "python",
413 | "name": "python3"
414 | },
415 | "language_info": {
416 | "codemirror_mode": {
417 | "name": "ipython",
418 | "version": 3
419 | },
420 | "file_extension": ".py",
421 | "mimetype": "text/x-python",
422 | "name": "python",
423 | "nbconvert_exporter": "python",
424 | "pygments_lexer": "ipython3",
425 | "version": "3.11.5"
426 | }
427 | },
428 | "nbformat": 4,
429 | "nbformat_minor": 5
430 | }
431 |
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/requirements.txt:
--------------------------------------------------------------------------------
1 | openai>=1.0.0
2 | python-dotenv>=1.0.0
3 | pydantic>=2.0.0
4 | typing-extensions>=4.0.0
5 | tenacity>=8.0.0
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/swirl-ai/ai-angineers-handbook/8b32995b841889b6d160a7756fc0b6bc5521818a/building_agents_from_scratch/tool_use/src/__init__.py
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/src/main.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Any
2 | from tool_registry import Tool
3 | import openai
4 | import os
5 | import json
6 |
7 |
8 | class Agent:
9 | def __init__(self):
10 | """Initialize Agent with empty tool registry."""
11 | self.client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
12 | self.tools: Dict[str, Tool] = {}
13 |
14 | def add_tool(self, tool: Tool) -> None:
15 | """Register a new tool with the agent."""
16 | self.tools[tool.name] = tool
17 |
18 | def get_available_tools(self) -> List[str]:
19 | """Get list of available tool descriptions."""
20 | return [f"{tool.name}: {tool.description}" for tool in self.tools.values()]
21 |
22 | def use_tool(self, tool_name: str, **kwargs: Any) -> str:
23 | """Execute a specific tool with given arguments."""
24 | if tool_name not in self.tools:
25 | raise ValueError(f"Tool '{tool_name}' not found. Available tools: {list(self.tools.keys())}")
26 |
27 | tool = self.tools[tool_name]
28 | return tool.func(**kwargs)
29 |
30 | def create_system_prompt(self) -> str:
31 | """Create the system prompt for the LLM with available tools."""
32 | tools_json = {
33 | "role": "AI Assistant",
34 | "capabilities": [
35 | "Using provided tools to help users when necessary",
36 | "Responding directly without tools for questions that don't require tool usage",
37 | "Planning efficient tool usage sequences"
38 | ],
39 | "instructions": [
40 | "Use tools only when they are necessary for the task",
41 | "If a query can be answered directly, respond with a simple message instead of using tools",
42 | "When tools are needed, plan their usage efficiently to minimize tool calls"
43 | ],
44 | "tools": [
45 | {
46 | "name": tool.name,
47 | "description": tool.description,
48 | "parameters": {
49 | name: {
50 | "type": info["type"],
51 | "description": info["description"]
52 | }
53 | for name, info in tool.parameters.items()
54 | }
55 | }
56 | for tool in self.tools.values()
57 | ],
58 | "response_format": {
59 | "type": "json",
60 | "schema": {
61 | "requires_tools": {
62 | "type": "boolean",
63 | "description": "whether tools are needed for this query"
64 | },
65 | "direct_response": {
66 | "type": "string",
67 | "description": "response when no tools are needed",
68 | "optional": True
69 | },
70 | "thought": {
71 | "type": "string",
72 | "description": "reasoning about how to solve the task (when tools are needed)",
73 | "optional": True
74 | },
75 | "plan": {
76 | "type": "array",
77 | "items": {"type": "string"},
78 | "description": "steps to solve the task (when tools are needed)",
79 | "optional": True
80 | },
81 | "tool_calls": {
82 | "type": "array",
83 | "items": {
84 | "type": "object",
85 | "properties": {
86 | "tool": {
87 | "type": "string",
88 | "description": "name of the tool"
89 | },
90 | "args": {
91 | "type": "object",
92 | "description": "parameters for the tool"
93 | }
94 | }
95 | },
96 | "description": "tools to call in sequence (when tools are needed)",
97 | "optional": True
98 | }
99 | },
100 | "examples": [
101 | {
102 | "query": "Convert 100 USD to EUR",
103 | "response": {
104 | "requires_tools": True,
105 | "thought": "I need to use the currency conversion tool to convert USD to EUR",
106 | "plan": [
107 | "Use convert_currency tool to convert 100 USD to EUR",
108 | "Return the conversion result"
109 | ],
110 | "tool_calls": [
111 | {
112 | "tool": "convert_currency",
113 | "args": {
114 | "amount": 100,
115 | "from_currency": "USD",
116 | "to_currency": "EUR"
117 | }
118 | }
119 | ]
120 | }
121 | },
122 | {
123 | "query": "What's 500 Japanese Yen in British Pounds?",
124 | "response": {
125 | "requires_tools": True,
126 | "thought": "I need to convert JPY to GBP using the currency converter",
127 | "plan": [
128 | "Use convert_currency tool to convert 500 JPY to GBP",
129 | "Return the conversion result"
130 | ],
131 | "tool_calls": [
132 | {
133 | "tool": "convert_currency",
134 | "args": {
135 | "amount": 500,
136 | "from_currency": "JPY",
137 | "to_currency": "GBP"
138 | }
139 | }
140 | ]
141 | }
142 | },
143 | {
144 | "query": "What currency does Japan use?",
145 | "response": {
146 | "requires_tools": False,
147 | "direct_response": "Japan uses the Japanese Yen (JPY) as its official currency. This is common knowledge that doesn't require using the currency conversion tool."
148 | }
149 | }
150 | ]
151 | }
152 | }
153 |
154 | return f"""You are an AI assistant that helps users by providing direct answers or using tools when necessary.
155 | Configuration, instructions, and available tools are provided in JSON format below:
156 |
157 | {json.dumps(tools_json, indent=2)}
158 |
159 | Always respond with a JSON object following the response_format schema above.
160 | Remember to use tools only when they are actually needed for the task."""
161 |
162 | def plan(self, user_query: str) -> Dict:
163 | """Use LLM to create a plan for tool usage."""
164 | messages = [
165 | {"role": "system", "content": self.create_system_prompt()},
166 | {"role": "user", "content": user_query}
167 | ]
168 |
169 | response = self.client.chat.completions.create(
170 | model="gpt-4o-mini",
171 | messages=messages,
172 | temperature=0
173 | )
174 |
175 | try:
176 | return json.loads(response.choices[0].message.content)
177 | except json.JSONDecodeError:
178 | raise ValueError("Failed to parse LLM response as JSON")
179 |
180 | def execute(self, user_query: str) -> str:
181 | """Execute the full pipeline: plan and execute tools."""
182 | try:
183 | plan = self.plan(user_query)
184 |
185 | if not plan.get("requires_tools", True):
186 | return plan["direct_response"]
187 |
188 | # Execute each tool in sequence
189 | results = []
190 | for tool_call in plan["tool_calls"]:
191 | tool_name = tool_call["tool"]
192 | tool_args = tool_call["args"]
193 | result = self.use_tool(tool_name, **tool_args)
194 | results.append(result)
195 |
196 | # Combine results
197 | return f"""Thought: {plan['thought']}
198 | Plan: {'. '.join(plan['plan'])}
199 | Results: {'. '.join(results)}"""
200 |
201 | except Exception as e:
202 | return f"Error executing plan: {str(e)}"
203 |
204 | def main():
205 | from tools import convert_currency
206 |
207 | agent = Agent()
208 | agent.add_tool(convert_currency)
209 |
210 | query_list = ["I am traveling to Japan from Serbia, I have 1500 of local currency, how much of Japaese currency will I be able to get?",
211 | "How are you doing?"]
212 |
213 | for query in query_list:
214 | print(f"\nQuery: {query}")
215 | result = agent.execute(query)
216 | print(result)
217 |
218 | if __name__ == "__main__":
219 | main()
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/src/tool_registry.py:
--------------------------------------------------------------------------------
1 | from typing import Callable, Any, Dict, get_type_hints, Optional
2 | from dataclasses import dataclass
3 | import inspect
4 | from typing import _GenericAlias
5 |
6 | @dataclass
7 | class Tool:
8 | name: str
9 | description: str
10 | func: Callable[..., str]
11 | parameters: Dict[str, Dict[str, str]]
12 |
13 | def __call__(self, *args, **kwargs) -> str:
14 | return self.func(*args, **kwargs)
15 |
16 | def parse_docstring_params(docstring: str) -> Dict[str, str]:
17 | """Extract parameter descriptions from docstring."""
18 | if not docstring:
19 | return {}
20 |
21 | params = {}
22 | lines = docstring.split('\n')
23 | in_params = False
24 | current_param = None
25 |
26 | for line in lines:
27 | line = line.strip()
28 | if line.startswith('Parameters:'):
29 | in_params = True
30 | elif in_params:
31 | if line.startswith('-') or line.startswith('*'):
32 | current_param = line.lstrip('- *').split(':')[0].strip()
33 | params[current_param] = line.lstrip('- *').split(':')[1].strip()
34 | elif current_param and line:
35 | params[current_param] += ' ' + line.strip()
36 | elif not line:
37 | in_params = False
38 |
39 | return params
40 |
41 | def get_type_description(type_hint: Any) -> str:
42 | """Get a human-readable description of a type hint."""
43 | if isinstance(type_hint, _GenericAlias):
44 | if type_hint._name == 'Literal':
45 | return f"one of {type_hint.__args__}"
46 | return type_hint.__name__
47 |
48 | def tool(name: str = None):
49 | def decorator(func: Callable[..., str]) -> Tool:
50 | tool_name = name or func.__name__
51 | description = inspect.getdoc(func) or "No description available"
52 |
53 | type_hints = get_type_hints(func)
54 | param_docs = parse_docstring_params(description)
55 | sig = inspect.signature(func)
56 |
57 | params = {}
58 | for param_name, param in sig.parameters.items():
59 | params[param_name] = {
60 | "type": get_type_description(type_hints.get(param_name, Any)),
61 | "description": param_docs.get(param_name, "No description available")
62 | }
63 |
64 | return Tool(
65 | name=tool_name,
66 | description=description.split('\n\n')[0],
67 | func=func,
68 | parameters=params
69 | )
70 | return decorator
--------------------------------------------------------------------------------
/building_agents_from_scratch/tool_use/src/tools.py:
--------------------------------------------------------------------------------
1 | from tool_registry import tool
2 | import urllib.request
3 | import json
4 |
5 | @tool()
6 | def convert_currency(amount: float, from_currency: str, to_currency: str) -> str:
7 | """Converts currency using latest exchange rates.
8 |
9 | Parameters:
10 | - amount: Amount to convert
11 | - from_currency: Source currency code (e.g., USD)
12 | - to_currency: Target currency code (e.g., EUR)
13 | """
14 | try:
15 | url = f"https://open.er-api.com/v6/latest/{from_currency.upper()}"
16 | with urllib.request.urlopen(url) as response:
17 | data = json.loads(response.read())
18 |
19 | if "rates" not in data:
20 | return "Error: Could not fetch exchange rates"
21 |
22 | rate = data["rates"].get(to_currency.upper())
23 | if not rate:
24 | return f"Error: No rate found for {to_currency}"
25 |
26 | converted = amount * rate
27 | return f"{amount} {from_currency.upper()} = {converted:.2f} {to_currency.upper()}"
28 |
29 | except Exception as e:
30 | return f"Error converting currency: {str(e)}"
--------------------------------------------------------------------------------