├── .gitignore
├── Guide to Jupyter.ipynb
├── Intermediate Python.ipynb
├── LICENSE
├── README.md
├── SETUP-PC.md
├── SETUP-mac.md
├── business.jpg
├── diagnostics.ipynb
├── diagnostics.py
├── environment.yml
├── handson.jpg
├── important.jpg
├── intro
├── lab1.ipynb
└── lab2.ipynb
├── outputs
├── agent1 (1).ipynb
├── agent2 (1).ipynb
├── agent3 (1).ipynb
├── agent4 (1).ipynb
├── agent5 (1).ipynb
├── lab1_output.ipynb
├── lab2_output.ipynb
└── new_agent5_output.ipynb
├── price_agent.jpg
├── requirements.txt
├── resources.jpg
├── troubleshooting.ipynb
└── workshop
├── agent1.ipynb
├── agent2.ipynb
├── agent3.ipynb
├── agent4.ipynb
├── agent5.ipynb
├── deal_agent_framework.py
├── items.py
├── keep_warm.py
├── log_utils.py
├── memory.json
├── old_agent5.ipynb
├── previous_hardcoded_agent5.ipynb
├── price_agents
├── agent.py
├── autonomous_planning_agent.py
├── deals.py
├── frontier_agent.py
├── messaging_agent.py
├── planning_agent.py
├── scanner_agent.py
└── specialist_agent.py
├── price_is_right.py
├── pricer_service.py
├── sandbox
└── deals.md
└── testing.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Github's default gitignore for Python
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | *.py,cover
52 | .hypothesis/
53 | .pytest_cache/
54 | cover/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | .pybuilder/
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | # For a library or package, you might want to ignore these files since the code is
89 | # intended to run in multiple environments; otherwise, check them in:
90 | # .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # poetry
100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101 | # This is especially recommended for binary packages to ensure reproducibility, and is more
102 | # commonly ignored for libraries.
103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104 | #poetry.lock
105 |
106 | # pdm
107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108 | #pdm.lock
109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110 | # in version control.
111 | # https://pdm.fming.dev/#use-with-ide
112 | .pdm.toml
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 | llms/
133 | llms.bak/
134 | agentic/
135 | agentic.bak/
136 |
137 | # Spyder project settings
138 | .spyderproject
139 | .spyproject
140 |
141 | # Rope project settings
142 | .ropeproject
143 |
144 | # mkdocs documentation
145 | /site
146 |
147 | # mypy
148 | .mypy_cache/
149 | .dmypy.json
150 | dmypy.json
151 |
152 | # Pyre type checker
153 | .pyre/
154 |
155 | # pytype static type analyzer
156 | .pytype/
157 |
158 | # Cython debug symbols
159 | cython_debug/
160 |
161 | # PyCharm
162 | .idea/
163 |
164 | # Added this to ignore models downloaded from HF
165 | model_cache/
166 | # Ignore finder files
167 | .DS_Store
168 | /.DS_Store
169 |
170 | # Ignore Chroma vector database
171 | vector_db/
172 | products_vectorstore/
173 |
174 | # And ignore any pickle files made during the course
175 | *.pkl
176 |
177 | # ignore gradio private files
178 | .gradio
179 | /.gradio
180 |
181 | # ignore diagnostics reports
182 | **/report.txt
183 |
--------------------------------------------------------------------------------
/Guide to Jupyter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "5c291475-8c7c-461c-9b12-545a887b2432",
6 | "metadata": {},
7 | "source": [
8 | "# Jupyter Lab\n",
9 | "\n",
10 | "## A Quick Start Guide\n",
11 | "\n",
12 | "Welcome to the wonderful world of Jupyter lab! \n",
13 | "This is a Data Science playground where you can easily write code and investigate the results. It's an ideal environment for: \n",
14 | "- Research & Development\n",
15 | "- Prototyping\n",
16 | "- Learning (that's us!)\n",
17 | "\n",
18 | "It's not typically used for shipping production code, and in Week 8 we'll explore the bridge between Jupyter and python code.\n",
19 | "\n",
20 | "A file in Jupyter Lab, like this one, is called a **Notebook**.\n",
21 | "\n",
22 | "A long time ago, Jupyter used to be called \"IPython\", and so the extensions of notebooks are \".ipynb\" which stands for \"IPython Notebook\".\n",
23 | "\n",
24 | "On the left is a File Browser that lets you navigate around the directories and choose different notebooks. But you probably know that already, or you wouldn't have got here!\n",
25 | "\n",
26 | "The notebook consists of a series of square boxes called \"cells\". Some of them contain text, like this cell, and some of them contain code, like the cell below.\n",
27 | "\n",
28 | "Click in a cell with code and press `Shift + Return` (or `Shift + Enter`) to run the code and print the output.\n",
29 | "\n",
30 | "Do that now for the cell below this:"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "id": "33d37cd8-55c9-4e03-868c-34aa9cab2c80",
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "# Click anywhere in this cell and press Shift + Return\n",
41 | "\n",
42 | "2 + 2"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "id": "9e95df7b-55c6-4204-b8f9-cae83360fc23",
48 | "metadata": {},
49 | "source": [
50 | "## Congrats!\n",
51 | "\n",
52 | "Now run the next cell which sets a value, followed by the cells after it to print the value"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "id": "585eb9c1-85ee-4c27-8dc2-b4d8d022eda0",
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "# Set a value for a variable\n",
63 | "\n",
64 | "favorite_fruit = \"bananas\""
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "id": "07792faa-761d-46cb-b9b7-2bbf70bb1628",
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "# The result of the last statement is shown after you run it\n",
75 | "\n",
76 | "favorite_fruit"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "id": "a067d2b1-53d5-4aeb-8a3c-574d39ff654a",
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "# Use the variable\n",
87 | "\n",
88 | "print(f\"My favorite fruit is {favorite_fruit}\")"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "id": "4c5a4e60-b7f4-4953-9e80-6d84ba4664ad",
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "# Now change the variable\n",
99 | "\n",
100 | "favorite_fruit = f\"anything but {favorite_fruit}\""
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "9442d5c9-f57d-4839-b0af-dce58646c04f",
106 | "metadata": {},
107 | "source": [
108 | "## Now go back and rerun the cell with the print statement, two cells back\n",
109 | "\n",
110 | "See how it prints something different, even though favorite_fruit was changed further down in the notebook? \n",
111 | "\n",
112 | "The order that code appears in the notebook doesn't matter. What matters is the order that the code is **executed**. There's a python process sitting behind this notebook in which the variables are being changed.\n",
113 | "\n",
114 | "This catches some people out when they first use Jupyter."
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": null,
120 | "id": "8e5ec81d-7c5b-4025-bd2e-468d67b581b6",
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "# Then run this cell twice, and see if you understand what's going on\n",
125 | "\n",
126 | "print(f\"My favorite fruit is {favorite_fruit}\")\n",
127 | "\n",
128 | "favorite_fruit = \"apples\""
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "id": "a29dab2d-bab9-4a54-8504-05e62594cc6f",
134 | "metadata": {},
135 | "source": [
136 | "# Explaining the 'kernel'\n",
137 | "\n",
138 | "Sitting behind this notebook is a Python process which executes each cell when you run it. That Python process is known as the Kernel. Each notebook has its own separate Kernel.\n",
139 | "\n",
140 | "You can go to the Kernel menu and select \"Restart Kernel\".\n",
141 | "\n",
142 | "If you then try to run the next cell, you'll get an error, because favorite_fruit is no longer defined. You'll need to run the cells from the top of the notebook again. Then the next cell should run fine."
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "id": "84b1e410-5eda-4e2c-97ce-4eebcff816c5",
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "print(f\"My favorite fruit is {favorite_fruit}\")"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "id": "4d4188fc-d9cc-42be-8b4e-ae8630456764",
158 | "metadata": {},
159 | "source": [
160 | "# Adding and moving cells\n",
161 | "\n",
162 | "Click in this cell, then click the \\[+\\] button in the toolbar above to create a new cell immediately below this one. Copy and paste in the code in the prior cell, then run it! There are also icons in the top right of the selected cell to delete it (bin), duplicate it, and move it up and down.\n"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "id": "ce258424-40c3-49a7-9462-e6fa25014b03",
169 | "metadata": {},
170 | "outputs": [],
171 | "source": []
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "id": "30e71f50-8f01-470a-9d7a-b82a6cef4236",
176 | "metadata": {},
177 | "source": [
178 | "# Cell output\n",
179 | "\n",
180 | "When you execute a cell, the standard output and the result of the last statement is written to the area immediately under the code, known as the 'cell output'. When you save a Notebook from the file menu (or command+S), the output is also saved, making it a useful record of what happened.\n",
181 | "\n",
182 | "You can clean this up by going to Edit menu >> Clear Outputs of All Cells, or Kernel menu >> Restart Kernel and Clear Outputs of All Cells."
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "id": "a4d021e2-c284-411f-8ab1-030530cfbe72",
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "spams = [\"spam\"] * 1000\n",
193 | "print(spams)\n",
194 | "\n",
195 | "# Might be worth clearing output after running this!"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "id": "eac060f2-7a71-46e7-8235-b6ad0a76f5f8",
201 | "metadata": {},
202 | "source": [
203 | "# Using markdown\n",
204 | "\n",
205 | "So what's going on with these areas with writing in them, like this one? Well, there's actually a different kind of cell called a 'Markdown' cell for adding explanations like this. Click the + button to add a cell. Then in the toolbar, click where it says 'Code' and change it to 'Markdown'.\n",
206 | "\n",
207 | "Add some comments using Markdown format, perhaps copying and pasting from here:\n",
208 | "\n",
209 | "```\n",
210 | "# This is a heading\n",
211 | "## This is a sub-head\n",
212 | "### And a sub-sub-head\n",
213 | "\n",
214 | "I like Jupyter Lab because it's\n",
215 | "- Easy\n",
216 | "- Flexible\n",
217 | "- Satisfying\n",
218 | "```\n",
219 | "\n",
220 | "And to turn this into formatted text simply with Shift+Return in the cell.\n",
221 | "Click in the cell and press the Bin icon if you want to remove it."
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "id": "e1586320-c90f-4f22-8b39-df6865484950",
228 | "metadata": {},
229 | "outputs": [],
230 | "source": []
231 | },
232 | {
233 | "cell_type": "markdown",
234 | "id": "1330c83c-67ac-4ca0-ac92-a71699e0c31b",
235 | "metadata": {},
236 | "source": [
237 | "# The exclamation point\n",
238 | "\n",
239 | "There's a super useful feature of jupyter labs; you can type a command with a ! in front of it in a code cell, like:\n",
240 | "\n",
241 | "!pip install \\[some_package\\]\n",
242 | "\n",
243 | "And it will run it at the command line (as if in Windows Powershell or Mac Terminal) and print the result"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "id": "82042fc5-a907-4381-a4b8-eb9386df19cd",
250 | "metadata": {},
251 | "outputs": [],
252 | "source": [
253 | "# list the current directory\n",
254 | "\n",
255 | "!ls"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": null,
261 | "id": "4fc3e3da-8a55-40cc-9706-48bf12a0e20e",
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "# ping cnn.com - press the stop button in the toolbar when you're bored\n",
266 | "\n",
267 | "!ping cnn.com"
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "execution_count": null,
273 | "id": "a58e9462-89a2-4b4f-b4aa-51c4bd9f796b",
274 | "metadata": {},
275 | "outputs": [],
276 | "source": [
277 | "# This is a useful command that ensures your Anaconda environment \n",
278 | "# is up to date with any new upgrades to packages;\n",
279 | "# But it might take a minute and will print a lot to output\n",
280 | "\n",
281 | "!conda env update -f ../environment.yml --prune"
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "id": "4688baaf-a72c-41b5-90b6-474cb24790a7",
287 | "metadata": {},
288 | "source": [
289 | "# Minor things we encounter on the course\n",
290 | "\n",
291 | "This isn't necessarily a feature of Jupyter, but it's a nice package to know about that is useful in Jupyter Labs, and I use it in the course.\n",
292 | "\n",
293 | "The package `tqdm` will print a nice progress bar if you wrap any iterable."
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": null,
299 | "id": "2646a4e5-3c23-4aee-a34d-d623815187d2",
300 | "metadata": {},
301 | "outputs": [],
302 | "source": [
303 | "# Here's some code with no progress bar\n",
304 | "# It will take 10 seconds while you wonder what's happpening..\n",
305 | "\n",
306 | "import time\n",
307 | "\n",
308 | "spams = [\"spam\"] * 1000\n",
309 | "\n",
310 | "for spam in spams:\n",
311 | " time.sleep(0.01)"
312 | ]
313 | },
314 | {
315 | "cell_type": "code",
316 | "execution_count": null,
317 | "id": "6e96be3d-fa82-42a3-a8aa-b81dd20563a5",
318 | "metadata": {},
319 | "outputs": [],
320 | "source": [
321 | "# And now, with a nice little progress bar:\n",
322 | "\n",
323 | "import time\n",
324 | "from tqdm import tqdm\n",
325 | "\n",
326 | "spams = [\"spam\"] * 1000\n",
327 | "\n",
328 | "for spam in tqdm(spams):\n",
329 | " time.sleep(0.01)"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": null,
335 | "id": "63c788dd-4618-4bb4-a5ce-204411a38ade",
336 | "metadata": {},
337 | "outputs": [],
338 | "source": [
339 | "# On a different topic, here's a useful way to print output in markdown\n",
340 | "\n",
341 | "from IPython.display import Markdown, display\n",
342 | "\n",
343 | "display(Markdown(\"# This is a big heading!\\n\\n- And this is a bullet-point\\n- So is this\\n- Me, too!\"))\n"
344 | ]
345 | },
346 | {
347 | "cell_type": "markdown",
348 | "id": "9d14c1fb-3321-4387-b6ca-9af27676f980",
349 | "metadata": {},
350 | "source": [
351 | "# That's it! You're up to speed on Jupyter Lab.\n",
352 | "\n",
353 | "## Want to be even more advanced?\n",
354 | "\n",
355 | "If you want to become a pro at Jupyter Lab, you can read their tutorial [here](https://jupyterlab.readthedocs.io/en/latest/). But this isn't required for our course; just a good technique for hitting Shift + Return and enjoying the result!"
356 | ]
357 | }
358 | ],
359 | "metadata": {
360 | "kernelspec": {
361 | "display_name": "Python 3 (ipykernel)",
362 | "language": "python",
363 | "name": "python3"
364 | },
365 | "language_info": {
366 | "codemirror_mode": {
367 | "name": "ipython",
368 | "version": 3
369 | },
370 | "file_extension": ".py",
371 | "mimetype": "text/x-python",
372 | "name": "python",
373 | "nbconvert_exporter": "python",
374 | "pygments_lexer": "ipython3",
375 | "version": "3.11.11"
376 | }
377 | },
378 | "nbformat": 4,
379 | "nbformat_minor": 5
380 | }
381 |
--------------------------------------------------------------------------------
/Intermediate Python.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "5c291475-8c7c-461c-9b12-545a887b2432",
6 | "metadata": {},
7 | "source": [
8 | "# Intermediate Level Python\n",
9 | "\n",
10 | "## Getting you up to speed\n",
11 | "\n",
12 | "This course assumes that you're at an intermediate level of python. For example, you should have a decent idea what something like this might do:\n",
13 | "\n",
14 | "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n",
15 | "\n",
16 | "If not - then you've come to the right place! Welcome to the crash course in intermediate level python. The best way to learn is by doing!\n"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "id": "542f0577-a826-4613-a5d7-4170e9666d04",
22 | "metadata": {},
23 | "source": [
24 | "## First: if you need a refresher on the foundations\n",
25 | "\n",
26 | "I'm going to defer to an AI friend for this, because these explanations are so well written with great examples. Copy and paste the code examples into a new cell to give them a try. Pick whichever section(s) you'd like to brush up on.\n",
27 | "\n",
28 | "**Python imports:** \n",
29 | "https://chatgpt.com/share/672f9f31-8114-8012-be09-29ef0d0140fb\n",
30 | "\n",
31 | "**Python functions** including default arguments: \n",
32 | "https://chatgpt.com/share/672f9f99-7060-8012-bfec-46d4cf77d672\n",
33 | "\n",
34 | "**Python strings**, including slicing, split/join, replace and literals: \n",
35 | "https://chatgpt.com/share/672fb526-0aa0-8012-9e00-ad1687c04518\n",
36 | "\n",
37 | "**Python f-strings** including number and date formatting: \n",
38 | "https://chatgpt.com/share/672fa125-0de0-8012-8e35-27918cbb481c\n",
39 | "\n",
40 | "**Python lists, dicts and sets**, including the `get()` method: \n",
41 | "https://chatgpt.com/share/672fa225-3f04-8012-91af-f9c95287da8d\n",
42 | "\n",
43 | "**Python files** including modes, encoding, context managers, Path, glob.glob: \n",
44 | "https://chatgpt.com/share/673b53b2-6d5c-8012-a344-221056c2f960\n",
45 | "\n",
46 | "**Python classes:** \n",
47 | "https://chatgpt.com/share/672fa07a-1014-8012-b2ea-6dc679552715\n",
48 | "\n",
49 | "**Pickling Python objects and converting to JSON:** \n",
50 | "https://chatgpt.com/share/673b553e-9d0c-8012-9919-f3bb5aa23e31"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "id": "5802e2f0-0ea0-4237-bbb7-f375a34260f0",
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "# Next let's create some things:\n",
61 | "\n",
62 | "fruits = [\"Apples\", \"Bananas\", \"Pears\"]\n",
63 | "\n",
64 | "book1 = {\"title\": \"Great Expectations\", \"author\": \"Charles Dickens\"}\n",
65 | "book2 = {\"title\": \"Bleak House\", \"author\": \"Charles Dickens\"}\n",
66 | "book3 = {\"title\": \"An Book By No Author\"}\n",
67 | "book4 = {\"title\": \"Moby Dick\", \"author\": \"Herman Melville\"}\n",
68 | "\n",
69 | "books = [book1, book2, book3, book4]"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "id": "9b941e6a-3658-4144-a8d4-72f5e72f3707",
75 | "metadata": {},
76 | "source": [
77 | "# Part 1: List and dict comprehensions"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "id": "61992bb8-735d-4dad-8747-8c10b63aec82",
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "# Simple enough to start\n",
88 | "\n",
89 | "for fruit in fruits:\n",
90 | " print(fruit)"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "id": "c89c3842-9b74-47fa-8424-0fcb08e4177c",
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "# Let's make a new version of fruits\n",
101 | "\n",
102 | "fruits_shouted = []\n",
103 | "for fruit in fruits:\n",
104 | " fruits_shouted.append(fruit.upper())\n",
105 | "\n",
106 | "fruits_shouted"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "id": "4ec13b3a-9545-44f1-874a-2910a0663560",
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "# You probably already know this\n",
117 | "# There's a nice Python construct called \"list comprehension\" that does this:\n",
118 | "\n",
119 | "fruits_shouted2 = [fruit.upper() for fruit in fruits]\n",
120 | "fruits_shouted2"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "id": "ecc08c3c-181d-4b64-a3e1-b0ccffc6c0cd",
127 | "metadata": {},
128 | "outputs": [],
129 | "source": [
130 | "# But you may not know that you can do this to create dictionaries, too:\n",
131 | "\n",
132 | "fruit_mapping = {fruit: fruit.upper() for fruit in fruits}\n",
133 | "fruit_mapping"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "id": "500c2406-00d2-4793-b57b-f49b612760c8",
140 | "metadata": {},
141 | "outputs": [],
142 | "source": [
143 | "# you can also use the if statement to filter the results\n",
144 | "\n",
145 | "fruits_with_longer_names_shouted = [fruit.upper() for fruit in fruits if len(fruit)>5]\n",
146 | "fruits_with_longer_names_shouted"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "id": "38c11c34-d71e-45ba-945b-a3d37dc29793",
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "fruit_mapping_unless_starts_with_a = {fruit: fruit.upper() for fruit in fruits if not fruit.startswith('A')}\n",
157 | "fruit_mapping_unless_starts_with_a"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "id": "5c97d8e8-31de-4afa-973e-28d8e5cab749",
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "# Another comprehension\n",
168 | "\n",
169 | "[book['title'] for book in books]"
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "id": "50be0edc-a4cd-493f-a680-06080bb497b4",
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "# This code will fail with an error because one of our books doesn't have an author\n",
180 | "\n",
181 | "[book['author'] for book in books]"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "id": "53794083-cc09-4edb-b448-2ffb7e8495c2",
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "# But this will work, because get() returns None\n",
192 | "\n",
193 | "[book.get('author') for book in books]"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "id": "b8e4b859-24f8-4016-8d74-c2cef226d049",
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "# And this variation will filter out the None\n",
204 | "\n",
205 | "[book.get('author') for book in books if book.get('author')]"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "id": "c44bb999-52b4-4dee-810b-8a400db8f25f",
212 | "metadata": {},
213 | "outputs": [],
214 | "source": [
215 | "# And this version will convert it into a set, removing duplicates\n",
216 | "\n",
217 | "set([book.get('author') for book in books if book.get('author')])"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "id": "80a65156-6192-4bb4-b4e6-df3fdc933891",
224 | "metadata": {},
225 | "outputs": [],
226 | "source": [
227 | "# And finally, this version is even nicer\n",
228 | "# curly braces creates a set, so this is a set comprehension\n",
229 | "\n",
230 | "{book.get('author') for book in books if book.get('author')}"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "id": "c100e5db-5438-4715-921c-3f7152f83f4a",
236 | "metadata": {},
237 | "source": [
238 | "# Part 2: Generators\n",
239 | "\n",
240 | "We use Generators in the course because AI models can stream back results.\n",
241 | "\n",
242 | "If you've not used Generators before, please start with this excellent intro from ChatGPT:\n",
243 | "\n",
244 | "https://chatgpt.com/share/672faa6e-7dd0-8012-aae5-44fc0d0ec218\n",
245 | "\n",
246 | "Try pasting some of its examples into a cell."
247 | ]
248 | },
249 | {
250 | "cell_type": "code",
251 | "execution_count": null,
252 | "id": "1efc26fa-9144-4352-9a17-dfec1d246aad",
253 | "metadata": {},
254 | "outputs": [],
255 | "source": [
256 | "# First define a generator; it looks like a function, but it has yield instead of return\n",
257 | "\n",
258 | "import time\n",
259 | "\n",
260 | "def come_up_with_fruit_names():\n",
261 | " for fruit in fruits:\n",
262 | " time.sleep(1) # thinking of a fruit\n",
263 | " yield fruit"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "id": "eac338bb-285c-45c8-8a3e-dbfc41409ca3",
270 | "metadata": {},
271 | "outputs": [],
272 | "source": [
273 | "# Then use it\n",
274 | "\n",
275 | "for fruit in come_up_with_fruit_names():\n",
276 | " print(fruit)"
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "id": "f6880578-a3de-4502-952a-4572b95eb9ff",
283 | "metadata": {},
284 | "outputs": [],
285 | "source": [
286 | "# Here's another one\n",
287 | "\n",
288 | "def authors_generator():\n",
289 | " for book in books:\n",
290 | " if book.get(\"author\"):\n",
291 | " yield book.get(\"author\")"
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "id": "9e316f02-f87f-441d-a01f-024ade949607",
298 | "metadata": {},
299 | "outputs": [],
300 | "source": [
301 | "# Use it\n",
302 | "\n",
303 | "for author in authors_generator():\n",
304 | " print(author)"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": null,
310 | "id": "7535c9d0-410e-4e56-a86c-ae6c0e16053f",
311 | "metadata": {},
312 | "outputs": [],
313 | "source": [
314 | "# Here's the same thing written with list comprehension\n",
315 | "\n",
316 | "def authors_generator():\n",
317 | " for author in [book.get(\"author\") for book in books if book.get(\"author\")]:\n",
318 | " yield author"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": null,
324 | "id": "dad34494-0f6c-4edb-b03f-b8d49ee186f2",
325 | "metadata": {},
326 | "outputs": [],
327 | "source": [
328 | "# Use it\n",
329 | "\n",
330 | "for author in authors_generator():\n",
331 | " print(author)"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": null,
337 | "id": "abeb7e61-d8aa-4af0-b05a-ae17323e678c",
338 | "metadata": {},
339 | "outputs": [],
340 | "source": [
341 | "# Here's a nice shortcut\n",
342 | "# You can use \"yield from\" to yield each item of an iterable\n",
343 | "\n",
344 | "def authors_generator():\n",
345 | " yield from [book.get(\"author\") for book in books if book.get(\"author\")]"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": null,
351 | "id": "05b0cb43-aa83-4762-a797-d3beb0f22c44",
352 | "metadata": {},
353 | "outputs": [],
354 | "source": [
355 | "# Use it\n",
356 | "\n",
357 | "for author in authors_generator():\n",
358 | " print(author)"
359 | ]
360 | },
361 | {
362 | "cell_type": "code",
363 | "execution_count": null,
364 | "id": "fdfea58e-d809-4dd4-b7b0-c26427f8be55",
365 | "metadata": {},
366 | "outputs": [],
367 | "source": [
368 | "# And finally - we can replace the list comprehension with a set comprehension\n",
369 | "\n",
370 | "def unique_authors_generator():\n",
371 | " yield from {book.get(\"author\") for book in books if book.get(\"author\")}"
372 | ]
373 | },
374 | {
375 | "cell_type": "code",
376 | "execution_count": null,
377 | "id": "3e821d08-97be-4db9-9a5b-ce5dced3eff8",
378 | "metadata": {},
379 | "outputs": [],
380 | "source": [
381 | "# Use it\n",
382 | "\n",
383 | "for author in unique_authors_generator():\n",
384 | " print(author)"
385 | ]
386 | },
387 | {
388 | "cell_type": "code",
389 | "execution_count": null,
390 | "id": "905ba603-15d8-4d01-9a79-60ec293d7ca1",
391 | "metadata": {},
392 | "outputs": [],
393 | "source": [
394 | "# And for some fun - press the stop button in the toolbar when bored!\n",
395 | "# It's like we've made our own Large Language Model... although not particularly large..\n",
396 | "# See if you understand why it prints a letter at a time, instead of a word at a time. If you're unsure, try removing the keyword \"from\" everywhere in the code.\n",
397 | "\n",
398 | "import random\n",
399 | "import time\n",
400 | "\n",
401 | "pronouns = [\"I\", \"You\", \"We\", \"They\"]\n",
402 | "verbs = [\"eat\", \"detest\", \"bathe in\", \"deny the existence of\", \"resent\", \"pontificate about\", \"juggle\", \"impersonate\", \"worship\", \"misplace\", \"conspire with\", \"philosophize about\", \"tap dance on\", \"dramatically renounce\", \"secretly collect\"]\n",
403 | "adjectives = [\"turqoise\", \"smelly\", \"arrogant\", \"festering\", \"pleasing\", \"whimsical\", \"disheveled\", \"pretentious\", \"wobbly\", \"melodramatic\", \"pompous\", \"fluorescent\", \"bewildered\", \"suspicious\", \"overripe\"]\n",
404 | "nouns = [\"turnips\", \"rodents\", \"eels\", \"walruses\", \"kumquats\", \"monocles\", \"spreadsheets\", \"bagpipes\", \"wombats\", \"accordions\", \"mustaches\", \"calculators\", \"jellyfish\", \"thermostats\"]\n",
405 | "\n",
406 | "def infinite_random_sentences():\n",
407 | " while True:\n",
408 | " yield from random.choice(pronouns)\n",
409 | " yield \" \"\n",
410 | " yield from random.choice(verbs)\n",
411 | " yield \" \"\n",
412 | " yield from random.choice(adjectives)\n",
413 | " yield \" \"\n",
414 | " yield from random.choice(nouns)\n",
415 | " yield \". \"\n",
416 | "\n",
417 | "for letter in infinite_random_sentences():\n",
418 | " print(letter, end=\"\", flush=True)\n",
419 | " time.sleep(0.02)"
420 | ]
421 | },
422 | {
423 | "cell_type": "markdown",
424 | "id": "04832ea2-2447-4473-a449-104f80e24d85",
425 | "metadata": {},
426 | "source": [
427 | "# Exercise\n",
428 | "\n",
429 | "Write some python classes for the books example.\n",
430 | "\n",
431 | "Write a Book class with a title and author. Include a method has_author()\n",
432 | "\n",
433 | "Write a BookShelf class with a list of books. Include a generator method unique_authors()"
434 | ]
435 | },
436 | {
437 | "cell_type": "markdown",
438 | "id": "35760406-fe6c-41f9-b0c0-3e8cf73aafd0",
439 | "metadata": {},
440 | "source": [
441 | "# Finally\n",
442 | "\n",
443 | "Here are some intermediate level details of Classes from our AI friend, including use of type hints, inheritance and class methods. This includes a Book example.\n",
444 | "\n",
445 | "https://chatgpt.com/share/67348aca-65fc-8012-a4a9-fd1b8f04ba59"
446 | ]
447 | }
448 | ],
449 | "metadata": {
450 | "kernelspec": {
451 | "display_name": "Python 3 (ipykernel)",
452 | "language": "python",
453 | "name": "python3"
454 | },
455 | "language_info": {
456 | "codemirror_mode": {
457 | "name": "ipython",
458 | "version": 3
459 | },
460 | "file_extension": ".py",
461 | "mimetype": "text/x-python",
462 | "name": "python",
463 | "nbconvert_exporter": "python",
464 | "pygments_lexer": "ipython3",
465 | "version": "3.11.11"
466 | }
467 | },
468 | "nbformat": 4,
469 | "nbformat_minor": 5
470 | }
471 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Ed Donner
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LLM Engineering including Agentic AI Project
2 |
3 | ## Hands-on with LLMs and Agents
4 |
5 | 
6 |
7 | Welcome to the code to accompany the Hands-on LLM Engineering Live Event
8 |
9 | ### A note before you begin
10 |
11 | I'm here to help you be most successful with your learning! If you hit any snafus, please do reach out by emailing me direct (ed@edwarddonner.com). It's always great to connect with people on LinkedIn - you'll find me here:
12 | https://www.linkedin.com/in/eddonner/
13 |
14 | If you'd like to go more deeply into LLMs and Agents:
15 | - I'm running a number of [Live Events](https://www.oreilly.com/search/?q=author%3A%20%22Ed%20Donner%22) with O'Reilly and Pearson
16 | - I also have a comprehensive, hands-on 8-week [Mastering LLM engineering](https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models/?referralCode=35EB41EBB11DD247CF54) course that builds this entire Agentic AI platform from the ground up (and more!), including RAG and fine-tuning.
17 |
18 | ## Pre-Setup: running Ollama locally with Open-Source
19 |
20 | Before the full setup, try installing Ollama so you can see results immediately!
21 | 1. Download and install Ollama from https://ollama.com noting that on a PC you might need to have administrator permissions for the install to work properly
22 | 2. On a PC, start a Command prompt / Powershell (Press Win + R, type `cmd`, and press Enter). On a Mac, start a Terminal (Applications > Utilities > Terminal).
23 | 3. Run `ollama run llama3.2` or for smaller machines try `ollama run llama3.2:1b` - **please note** steer clear of Meta's latest model llama3.3 because at 70B parameters that's way too large for most home computers!
24 | 4. If this doesn't work, you may need to run `ollama serve` in another Powershell (Windows) or Terminal (Mac), and try step 3 again
25 | 5. And if that doesn't work on your box, I've set up this on the cloud. This is on Google Colab, which will need you to have a Google account to sign in, but is free: https://colab.research.google.com/drive/1-_f5XZPsChvfU1sJ0QqCePtIuc55LSdu?usp=sharing
26 |
27 | ## Setup instructions
28 |
29 | Hopefully I've done a decent job of making these guides bulletproof - but please contact me right away if you hit roadblocks:
30 |
31 | - PC people please follow the instructions in [SETUP-PC.md](SETUP-PC.md)
32 | - Mac people please follow the instructions in [SETUP-mac.md](SETUP-mac.md)
33 | - Linux people, the Mac instructions should be close enough!
34 |
35 | ### An important point on API costs (which are optional! No need to spend if you don't wish)
36 |
37 | During this example project, I'll suggest you try out the leading models at the forefront of progress, known as the Frontier models. These services have some charges, but I'll keep cost minimal - like, a few cents at a time. And I'll provide alternatives if you'd prefer not to use them.
38 |
39 | Please do monitor your API usage to ensure you're comfortable with spend; I've included links below. There's no need to spend anything more than a couple of dollars for the entire course. Some AI providers such as OpenAI require a minimum credit like \$5 or local equivalent; we should only spend a fraction of it, and you'll have plenty of opportunity to put it to good use in your own projects. But it's not necessary in the least; the important part is that you focus on learning.
40 |
41 | ### Free alternative to Paid APIs
42 |
43 | Here is an alternative if you'd rather not spend anything on APIs:
44 | Any time that we have code like:
45 | `openai = OpenAI()`
46 | You can use this as a direct replacement:
47 | `openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')`
48 |
49 | Below is a full example:
50 |
51 | ```
52 | # You need to do this one time on your computer
53 | !ollama pull llama3.2
54 |
55 | from openai import OpenAI
56 | MODEL = "llama3.2"
57 | openai = OpenAI(base_url="http://localhost:11434/v1", api_key="ollama")
58 |
59 | response = openai.chat.completions.create(
60 | model=MODEL,
61 | messages=[{"role": "user", "content": "What is 2 + 2?"}]
62 | )
63 |
64 | print(response.choices[0].message.content)
65 | ```
66 |
67 | ### The most important part
68 |
69 | The best way to learn is by **DOING**. I don't type all the code during the workshop; I execute it for you to see the results. You should work through afterwards, running each cell, inspecting the objects to get a detailed understanding of what's happening. Then tweak the code and make it your own.
70 |
71 | ### Monitoring API charges
72 |
73 | You can keep your API spend very low throughout this course; you can monitor spend at the OpenAI dashboard [here](https://platform.openai.com/usage).
74 |
75 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on.
76 |
--------------------------------------------------------------------------------
/SETUP-PC.md:
--------------------------------------------------------------------------------
1 | ## Setup instructions for Windows
2 |
3 | Welcome, PC people!
4 |
5 | I should confess up-front: setting up a powerful environment to work at the forefront of AI is not as simple as I'd like. For most people these instructions will go great; but in some cases, for whatever reason, you'll hit a problem. Please don't hesitate to reach out - I am here to get you up and running quickly. There's nothing worse than feeling _stuck_. Message me, email me or LinkedIn message me and I will unstick you quickly!
6 |
7 | Email: ed@edwarddonner.com
8 | LinkedIn: https://www.linkedin.com/in/eddonner/
9 |
10 | I use a platform called Anaconda to set up your environment. It's a powerful tool that builds a complete science environment. Anaconda ensures that you're working with the right version of Python and all your packages are compatible with mine, even if our systems are completely different. It takes more time to set up, and it uses more hard drive space (5+ GB) but it's very reliable once its working.
11 |
12 | Having said that: if you have any problems with Anaconda, I've provided an alternative approach. It's faster and simpler and should have you running quickly, with less of a guarantee around compatibility.
13 |
14 | ### Part 1: Clone the Repo
15 |
16 | This gets you a local copy of the code on your box.
17 |
18 | 1. **Install Git** (if not already installed):
19 |
20 | - Download Git from https://git-scm.com/download/win
21 | - Run the installer and follow the prompts, using default options (press OK lots of times!)
22 |
23 | 2. **Open Command Prompt:**
24 |
25 | - Press Win + R, type `cmd`, and press Enter
26 |
27 | 3. **Navigate to your projects folder:**
28 |
29 | If you have a specific folder for projects, navigate to it using the cd command. For example:
30 | `cd C:\Users\YourUsername\Documents\Projects`
31 | Replacing YourUsername with your actual Windows user
32 |
33 | If you don't have a projects folder, you can create one:
34 | ```
35 | mkdir C:\Users\YourUsername\Documents\Projects
36 | cd C:\Users\YourUsername\Documents\Projects
37 | ```
38 |
39 | 4. **Clone the repository:**
40 |
41 | Enter this in the command prompt in the Projects folder:
42 |
43 | `git clone https://github.com/ed-donner/agentic.git`
44 |
45 | This creates a new directory `agentic` within your Projects folder and downloads the code for the class. Do `cd agentic` to go into it. This `agentic` directory is known as the "project root directory".
46 |
47 | ### Part 2: Install Anaconda environment
48 |
49 | If this Part 2 gives you any problems, there is an alternative Part 2B below that can be used instead.
50 |
51 | 1. **Install Anaconda:**
52 |
53 | - Download Anaconda from https://docs.anaconda.com/anaconda/install/windows/
54 | - Run the installer and follow the prompts. Note that it takes up several GB and take a while to install, but it will be a powerful platform for you to use in the future.
55 |
56 | 2. **Set up the environment:**
57 |
58 | - Open **Anaconda Prompt** (search for it in the Start menu)
59 | - Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\agentic` using the actual path to your agentic project root directory. Do a `dir` and check you can see subdirectories for each week of the course.
60 | - Create the environment: `conda env create -f environment.yml`
61 | - Wait for a few minutes for all packages to be installed - in some cases, this can literally take 20-30 minutes if you've not used Anaconda before, and even longer depending on your internet connection. Important stuff is happening! If this runs for more than 1 hour 15 mins, or gives you other problems, please go to Part 2B instead.
62 | - You have now built an isolated, dedicated AI environment for engineering LLMs, running vector datastores, and so much more! You now need to **activate** it using this command: `conda activate agentic`
63 |
64 | You should see `(agentic)` in your prompt, which indicates you've activated your new environment.
65 |
66 | 3. **Start Jupyter Lab:**
67 |
68 | - In the Anaconda Prompt, from within the `agentic` folder, type: `jupyter lab`
69 |
70 | ...and Jupyter Lab should open up in a browser. If you've not seen Jupyter Lab before, I'll explain it in a moment! Now close the jupyter lab browser tab, and close the Anaconda prompt, and move on to Part 3.
71 |
72 | ### Part 2B - Alternative to Part 2 if Anaconda gives you trouble
73 |
74 | 1. **Open Command Prompt**
75 |
76 | Press Win + R, type `cmd`, and press Enter
77 |
78 | Run `python --version` to find out which python you're on. Ideally you'd be using a version of Python 3.11, so we're completely in sync.
79 | If not, it's not a big deal, but we might need to come back to this later if you have compatibility issues.
80 | You can download python here:
81 | https://www.python.org/downloads/
82 |
83 | 2. Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\agentic` using the actual path to your agentic project root directory. Do a `dir` and check you can see subdirectories for each week of the course.
84 |
85 | Then, create a new virtual environment with this command:
86 | `python -m venv agentic`
87 |
88 | 3. Activate the virtual environment with
89 | `agentic\Scripts\activate`
90 | You should see (llms) in your command prompt, which is your sign that things are going well.
91 |
92 | 4. Run `pip install -r requirements.txt`
93 | This may take a few minutes to install.
94 |
95 | 5. **Start Jupyter Lab:**
96 |
97 | From within the `agentic` folder, type: `jupyter lab`
98 | ...and Jupyter Lab should open up, ready for you to get started. Open the `workshop` folder and double click on `agent1.ipynb`. Success! Now close down jupyter lab and move on to Part 3.
99 |
100 | If there are any problems, contact me!
101 |
102 | ### Part 3 - OpenAI key (OPTIONAL but recommended)
103 |
104 | You'll be writing code to call the APIs of Frontier models (models at the forefront of AI).
105 |
106 | 1. Create an OpenAI account if you don't have one by visiting:
107 | https://platform.openai.com/
108 |
109 | 2. OpenAI asks for a minimum credit to use the API. For me in the US, it's \$5. The API calls will spend against this \$5. On this course, we'll only use a small portion of this. I do recommend you make the investment as you'll be able to put it to excellent use.
110 |
111 | You can add your credit balance to OpenAI at Settings > Billing:
112 | https://platform.openai.com/settings/organization/billing/overview
113 |
114 | I recommend you disable the automatic recharge!
115 |
116 | 3. Create your API key
117 |
118 | The webpage where you set up your OpenAI key is at https://platform.openai.com/api-keys - press the green 'Create new secret key' button and press 'Create secret key'. Keep a record of the API key somewhere private; you won't be able to retrieve it from the OpenAI screens in the future. It should start `sk-proj-`.
119 |
120 | You'll be using the fabulous HuggingFace platform; an account is available for free at https://huggingface.co - you can create an API token from the Avatar menu >> Settings >> Access Tokens.
121 |
122 | ### PART 4 - .env file
123 |
124 | When you have these keys, please create a new file called `.env` in your project root directory. The filename needs to be exactly the four characters ".env" rather than "my-keys.env" or ".env.txt". Here's how to do it:
125 |
126 | 1. Open the Notepad (Windows + R to open the Run box, enter `notepad`)
127 |
128 | 2. In the Notepad, type this, replacing xxxx with your API key (starting `sk-proj-`).
129 |
130 | ```
131 | OPENAI_API_KEY=xxxx
132 | HF_TOKEN=xxxx
133 | ```
134 |
135 | Double check there are no spaces before or after the `=` sign, and no spaces at the end of the key.
136 |
137 | 3. Go to File > Save As. In the "Save as type" dropdown, select All Files. In the "File name" field, type exactly **.env** as the filename. Choose to save this in the project root directory (the folder called `llm_engineering`) and click Save.
138 |
139 | 4. Navigate to the folder where you saved the file in Explorer and ensure it was saved as ".env" not ".env.txt" - if necessary rename it to ".env" - you might need to ensure that "Show file extensions" is set to "On" so that you see the file extensions. Message or email me if that doesn't make sense!
140 |
141 | This file won't appear in Jupyter Lab because jupyter hides files starting with a dot. This file is listed in the `.gitignore` file, so it won't get checked in and your keys stay safe.
142 |
143 | ### Part 5 - Showtime!!
144 |
145 | - Open **Anaconda Prompt** (search for it in the Start menu) if you used Anaconda, otherwise open a Powershell if you used the alternative approach in Part 2B
146 |
147 | - Navigate to the "project root directory" by entering something like `cd C:\Users\YourUsername\Documents\Projects\agentic` using the actual path to your agentic project root directory. Do a `dir` and check you can see subdirectories for each week of the course.
148 |
149 | - Activate your environment with `conda activate agentic` if you used Anaconda or `agentic\Scripts\activate` if you used the alternative approach in Part 2B
150 |
151 | - You should see (agentic) in your prompt which is your sign that all is well. And now, type: `jupyter lab` and Jupyter Lab should open up, ready for you to get started. Open the `workshop` folder and double click on `agent1.ipynb`.
152 |
153 | And you're off to the races!
154 |
155 | Note that any time you start jupyter lab in the future, you'll need to follow these Part 5 instructions to start it from within the `agentic` directory with the `agentic` environment activated.
156 |
157 | For those new to Jupyter Lab / Jupyter Notebook, it's a delightful Data Science environment where you can simply hit shift+return in any cell to run it; start at the top and work your way down! There's a notebook in the week1 folder with a [Guide to Jupyter Lab](Guide%20to%20Jupyter.ipynb), and an [Intermediate Python](Intermediate%20Python.ipynb) tutorial, if that would be helpful.
158 |
159 | If you have any problems, I've included a notebook in week1 called [troubleshooting.ipynb](troubleshooting.ipynb) to figure it out.
160 |
161 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on.
--------------------------------------------------------------------------------
/SETUP-mac.md:
--------------------------------------------------------------------------------
1 | ## Setup instructions for Mac
2 |
3 | Welcome, Mac people!
4 |
5 | I should confess up-front: setting up a powerful environment to work at the forefront of AI is not as simple as I'd like. For most people these instructions will go great; but in some cases, for whatever reason, you'll hit a problem. Please don't hesitate to reach out - I am here to get you up and running quickly. There's nothing worse than feeling _stuck_. Message me, email me or LinkedIn message me and I will unstick you quickly!
6 |
7 | Email: ed@edwarddonner.com
8 | LinkedIn: https://www.linkedin.com/in/eddonner/
9 |
10 | I use a platform called Anaconda to set up your environment. It's a powerful tool that builds a complete science environment. Anaconda ensures that you're working with the right version of Python and all your packages are compatible with mine, even if our systems are completely different. It takes more time to set up, and it uses more hard drive space (5+ GB) but it's very reliable once its working.
11 |
12 | Having said that: if you have any problems with Anaconda, I've provided an alternative approach. It's faster and simpler and should have you running quickly, with less of a guarantee around compatibility.
13 |
14 | ### Part 1: Clone the Repo
15 |
16 | This gets you a local copy of the code on your box.
17 |
18 | 1. **Install Git** if not already installed (it will be in most cases)
19 |
20 | - Open Terminal (Applications > Utilities > Terminal)
21 | - Type `git --version` If not installed, you'll be prompted to install it
22 |
23 | 2. **Navigate to your projects folder:**
24 |
25 | If you have a specific folder for projects, navigate to it using the cd command. For example:
26 | `cd ~/Documents/Projects`
27 |
28 | If you don't have a projects folder, you can create one:
29 | ```
30 | mkdir ~/Documents/Projects
31 | cd ~/Documents/Projects
32 | ```
33 |
34 | 3. **Clone the repository:**
35 |
36 | Enter this in the terminal in the Projects folder:
37 |
38 | `git clone https://github.com/ed-donner/agentic.git`
39 |
40 | This creates a new directory `agentic` within your Projects folder and downloads the code for the class. Do `cd agentic` to go into it. This `agentic` directory is known as the "project root directory".
41 |
42 | ### Part 2: Install Anaconda environment
43 |
44 | If this Part 2 gives you any problems, there is an alternative Part 2B below that can be used instead.
45 |
46 | 1. **Install Anaconda:**
47 |
48 | - Download Anaconda from https://docs.anaconda.com/anaconda/install/mac-os/
49 | - Double-click the downloaded file and follow the installation prompts. Note that it takes up several GB and take a while to install, but it will be a powerful platform for you to use in the future.
50 |
51 | 2. **Set up the environment:**
52 |
53 | - Open a new Terminal (Applications > Utilities > Terminal)
54 | - Navigate to the "project root directory" using `cd ~/Documents/Projects/agentic` (replace this path as needed with the actual path to the agentic directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course.
55 | - Create the environment: `conda env create --f environment.yml`
56 | - Wait for a few minutes for all packages to be installed - in some cases, this can literally take 20-30 minutes if you've not used Anaconda before, and even longer depending on your internet connection. Important stuff is happening! If this runs for more than 1 hour 15 mins, or gives you other problems, please go to Part 2B instead.
57 | - You have now built an isolated, dedicated AI environment for engineering LLMs, running vector datastores, and so much more! You now need to **activate** it using this command: `conda activate agentic`
58 |
59 | You should see `(agentic)` in your prompt, which indicates you've activated your new environment.
60 |
61 | 3. **Start Jupyter Lab:**
62 |
63 | - In the Terminal window, from within the `agentic` folder, type: `jupyter lab`
64 |
65 | ...and Jupyter Lab should open up in a browser. If you've not seen Jupyter Lab before, I'll explain it in a moment! Now close the jupyter lab browser tab, and close the Terminal, and move on to Part 3.
66 |
67 | ### Part 2B - Alternative to Part 2 if Anaconda gives you trouble
68 |
69 | 1. **Open a new Terminal** (Applications > Utilities > Terminal)
70 |
71 | Run `python --version` to find out which python you're on. Ideally you'd be using a version of Python 3.11, so we're completely in sync.
72 | If not, it's not a big deal, but we might need to come back to this later if you have compatibility issues.
73 | You can download python here:
74 | https://www.python.org/downloads/
75 |
76 | 2. Navigate to the "project root directory" using `cd ~/Documents/Projects/agentic` (replace this path with the actual path to the agentic directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course.
77 |
78 | Then, create a new virtual environment with this command:
79 | `python -m venv agentic`
80 |
81 | 3. Activate the virtual environment with
82 | `source agentic/bin/activate`
83 | You should see (agentic) in your command prompt, which is your sign that things are going well.
84 |
85 | 4. Run `pip install -r requirements.txt`
86 | This may take a few minutes to install.
87 |
88 | 5. **Start Jupyter Lab:**
89 |
90 | From within the `agentic` folder, type: `jupyter lab`
91 | ...and Jupyter Lab should open up, ready for you to get started. Open the `workshop` folder and double click on `agent1.ipynb`. Success! Now close down jupyter lab and move on to Part 3.
92 |
93 | If there are any problems, contact me!
94 |
95 | ### Part 3 - OpenAI key (OPTIONAL but recommended)
96 |
97 | 1. Create an OpenAI account if you don't have one by visiting:
98 | https://platform.openai.com/
99 |
100 | 2. OpenAI asks for a minimum credit to use the API. For me in the US, it's \$5. The API calls will spend against this \$5. On this course, we'll only use a small portion of this. I do recommend you make the investment as you'll be able to put it to excellent use.
101 |
102 | You can add your credit balance to OpenAI at Settings > Billing:
103 | https://platform.openai.com/settings/organization/billing/overview
104 |
105 | I recommend you disable the automatic recharge!
106 |
107 | 3. Create your API key
108 |
109 | The webpage where you set up your OpenAI key is at https://platform.openai.com/api-keys - press the green 'Create new secret key' button and press 'Create secret key'. Keep a record of the API key somewhere private; you won't be able to retrieve it from the OpenAI screens in the future. It should start `sk-proj-`.
110 |
111 | You'll be using the fabulous HuggingFace platform; an account is available for free at https://huggingface.co - you can create an API token from the Avatar menu >> Settings >> Access Tokens.
112 |
113 | ### PART 4 - .env file
114 |
115 | When you have these keys, please create a new file called `.env` in your project root directory. The filename needs to be exactly the four characters ".env" rather than "my-keys.env" or ".env.txt". Here's how to do it:
116 |
117 | 1. Open Terminal (Applications > Utilities > Terminal)
118 |
119 | 2. Navigate to the "project root directory" using `cd ~/Documents/Projects/agentic` (replace this path with the actual path to the agentic directory, your locally cloned version of the repo).
120 |
121 | 3. Create the .env file with
122 |
123 | nano .env
124 |
125 | 4. Then type your API keys into nano, replacing xxxx with your API key (starting `sk-proj-`).
126 |
127 | ```
128 | OPENAI_API_KEY=xxxx
129 | HF_TOKEN=xxxx
130 | ```
131 |
132 | 5. Save the file:
133 |
134 | Control + O
135 | Enter (to confirm save the file)
136 | Control + X to exit the editor
137 |
138 | 6. Use this command to list files in your project root directory:
139 |
140 | `ls -a`
141 |
142 | And confirm that the `.env` file is there.
143 |
144 | This file won't appear in Jupyter Lab because jupyter hides files starting with a dot. This file is listed in the `.gitignore` file, so it won't get checked in and your keys stay safe.
145 |
146 | ### Part 5 - Showtime!!
147 |
148 | - Open Terminal (Applications > Utilities > Terminal)
149 |
150 | - Navigate to the "project root directory" using `cd ~/Documents/Projects/agentic` (replace this path with the actual path to the agentic directory, your locally cloned version of the repo). Do `ls` and check you can see subdirectories for each week of the course.
151 |
152 | - Activate your environment with `conda activate agentic` (or `source agentic/bin/activate` if you used the alternative approach in Part 2B)
153 |
154 | - You should see (agentic) in your prompt which is your sign that all is well. And now, type: `jupyter lab` and Jupyter Lab should open up, ready for you to get started. Open the `workshop` folder and double click on `agent1.ipynb`.
155 |
156 | And you're off to the races!
157 |
158 | Note that any time you start jupyter lab in the future, you'll need to follow these Part 5 instructions to start it from within the `agentic` directory with the `agentic` environment activated.
159 |
160 | For those new to Jupyter Lab / Jupyter Notebook, it's a delightful Data Science environment where you can simply hit shift+return in any cell to run it; start at the top and work your way down! There's a notebook in the week1 folder with a [Guide to Jupyter Lab](Guide%20to%20Jupyter.ipynb), and an [Intermediate Python](Intermediate%20Python.ipynb) tutorial, if that would be helpful.
161 |
162 | If you have any problems, I've included a notebook called [troubleshooting.ipynb](troubleshooting.ipynb) to figure it out.
163 |
164 | Please do message me or email me at ed@edwarddonner.com if this doesn't work or if I can help with anything. I can't wait to hear how you get on.
--------------------------------------------------------------------------------
/business.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ed-donner/agentic/8bd4b605ebbca8a3eaa394ca5f95abe55629962e/business.jpg
--------------------------------------------------------------------------------
/diagnostics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "73287ed4-81e3-496a-9e47-f0e8c3770ce9",
6 | "metadata": {},
7 | "source": [
8 | "# Gathering Essential Diagnostic information\n",
9 | "\n",
10 | "## Please run this next cell to gather some important data\n",
11 | "\n",
12 | "Please run the next cell; it should take a minute or so to run (mostly the network test).\n",
13 | "Rhen email me the output of the last cell to ed@edwarddonner.com. \n",
14 | "Alternatively: this will create a file called report.txt - just attach the file to your email."
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "id": "ed8056e8-efa2-4b6f-a4bb-e7ceb733c517",
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "# Run my diagnostics report to collect key information for debugging\n",
25 | "# Please email me the results. Either copy & paste the output, or attach the file report.txt\n",
26 | "\n",
27 | "!pip install -q requests speedtest-cli psutil setuptools\n",
28 | "from diagnostics import Diagnostics\n",
29 | "Diagnostics().run()"
30 | ]
31 | }
32 | ],
33 | "metadata": {
34 | "kernelspec": {
35 | "display_name": "Python 3 (ipykernel)",
36 | "language": "python",
37 | "name": "python3"
38 | },
39 | "language_info": {
40 | "codemirror_mode": {
41 | "name": "ipython",
42 | "version": 3
43 | },
44 | "file_extension": ".py",
45 | "mimetype": "text/x-python",
46 | "name": "python",
47 | "nbconvert_exporter": "python",
48 | "pygments_lexer": "ipython3",
49 | "version": "3.11.10"
50 | }
51 | },
52 | "nbformat": 4,
53 | "nbformat_minor": 5
54 | }
55 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: agentic
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - python=3.11
7 | - pip
8 | - python-dotenv
9 | - requests
10 | - beautifulsoup4
11 | - numpy
12 | - pandas
13 | - scipy
14 | - pytorch
15 | - jupyterlab
16 | - ipywidgets
17 | - pyarrow
18 | - matplotlib
19 | - scikit-learn
20 | - tiktoken
21 | - jupyter-dash
22 | - plotly
23 | - feedparser
24 | - pip:
25 | - transformers
26 | - sentence-transformers
27 | - datasets
28 | - accelerate
29 | - sentencepiece
30 | - bitsandbytes
31 | - openai
32 | - anthropic
33 | - google-generativeai
34 | - gradio
35 | - gensim
36 | - modal
37 | - chromadb
38 | - ollama
39 | - psutil
40 | - setuptools
41 | - speedtest-cli
42 | - python-multipart
43 |
--------------------------------------------------------------------------------
/handson.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ed-donner/agentic/8bd4b605ebbca8a3eaa394ca5f95abe55629962e/handson.jpg
--------------------------------------------------------------------------------
/important.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ed-donner/agentic/8bd4b605ebbca8a3eaa394ca5f95abe55629962e/important.jpg
--------------------------------------------------------------------------------
/intro/lab2.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2",
6 | "metadata": {},
7 | "source": [
8 | "# Segment 1 Lab 2\n",
9 | "\n",
10 | "## Making our own Customer Support Chatbot\n",
11 | "\n",
12 | "One of the most common business use cases of Gen AI.\n",
13 | "\n",
14 | "We'll even make our own User Interface - no frontend skills required!\n",
15 | "\n",
16 | "We will use the delightful `gradio` framework which makes it remarkably easy for data scientists to build great UIs.\n",
17 | "\n",
18 | "We are going to move quickly as this is just a teaser - but please come back and look at this later!"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "# imports\n",
29 | "\n",
30 | "import os\n",
31 | "import json\n",
32 | "from dotenv import load_dotenv\n",
33 | "from openai import OpenAI\n",
34 | "import gradio as gr"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "id": "231605aa-fccb-447e-89cf-8b187444536a",
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "# Load environment variables in a file called .env\n",
45 | "# Print the key prefixes to help with any debugging\n",
46 | "\n",
47 | "load_dotenv(override=True)\n",
48 | "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
49 | "\n",
50 | "if openai_api_key:\n",
51 | " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
52 | "else:\n",
53 | " print(\"OpenAI API Key not set\")"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "# Initialize\n",
64 | "\n",
65 | "openai = OpenAI()\n",
66 | "MODEL = 'gpt-4o-mini'"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "id": "e16839b5-c03b-4d9d-add6-87a0f6f37575",
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "system_prompt = \"\"\"\n",
77 | "You are a customer support assistant for an airline.\n",
78 | "You give short, courteous, engaging answers in a humorous way, no more than 2-3 sentences.\n",
79 | "For context, if it's relevant: a return ticket to London costs $499.\n",
80 | "\"\"\"\n",
81 | "\n",
82 | "system_message = {\"role\": \"system\", \"content\": system_prompt}"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "id": "98e97227-f162-4d1a-a0b2-345ff248cbe7",
88 | "metadata": {},
89 | "source": [
90 | "Reminder of the structure of prompt messages to OpenAI:\n",
91 | "\n",
92 | "```\n",
93 | "[\n",
94 | " {\"role\": \"system\", \"content\": \"system message here\"},\n",
95 | " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
96 | " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
97 | " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
98 | "]\n",
99 | "```\n",
100 | "\n"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "9b73eb80-c1ec-465e-8d8a-f57e75df5cba",
106 | "metadata": {},
107 | "source": [
108 | "## The chat function\n",
109 | "\n",
110 | "In order to use Gradio's out-of-the-box Chat User Interface, we need to write a single function, `chat(message, history)`"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1",
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "def chat(message, history):\n",
121 | " messages = [system_message] + history + [{\"role\": \"user\", \"content\": message}]\n",
122 | " results = openai.chat.completions.create(model=MODEL, messages=messages)\n",
123 | " return results.choices[0].message.content"
124 | ]
125 | },
126 | {
127 | "cell_type": "markdown",
128 | "id": "1334422a-808f-4147-9c4c-57d63d9780d0",
129 | "metadata": {},
130 | "source": [
131 | "## And then enter Gradio's magic!"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2",
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)"
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "id": "bc005e6d-174b-4d4a-a9d7-6b7648b27b26",
147 | "metadata": {},
148 | "source": [
149 | "# Let's go multi-modal!!\n",
150 | "\n",
151 | "We can use DALL-E-3, the image generation model behind GPT-4o, to make us some images\n",
152 | "\n",
153 | "Let's put this in a function called artist.\n",
154 | "\n",
155 | "### Price alert: each time I generate an image it costs about 4c - don't go crazy with images!"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "id": "4ae95c0e-db8d-4631-b3bb-353ef13baf0d",
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "# Some imports for handling images\n",
166 | "\n",
167 | "import base64\n",
168 | "from io import BytesIO\n",
169 | "from PIL import Image"
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "id": "ddbc7959-9d5b-4b69-93ab-f12d57d9cc6a",
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "def artist(city):\n",
180 | " prompt = f\"An image representing a vacation in {city}, showing tourist spots and everything unique about {city}, in a vibrant pop-art style\"\n",
181 | " image_response = openai.images.generate(\n",
182 | " model=\"dall-e-3\",\n",
183 | " prompt=prompt,\n",
184 | " size=\"1024x1024\",\n",
185 | " n=1,\n",
186 | " response_format=\"b64_json\",\n",
187 | " )\n",
188 | " image_base64 = image_response.data[0].b64_json\n",
189 | " image_data = base64.b64decode(image_base64)\n",
190 | " return Image.open(BytesIO(image_data))"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": null,
196 | "id": "ae324773-7c3c-4ef5-9174-de96a247cd75",
197 | "metadata": {},
198 | "outputs": [],
199 | "source": [
200 | "image = artist(\"New York City\")\n",
201 | "display(image)"
202 | ]
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "id": "abb4110d-15cf-4f7f-9ed6-411f83a21d08",
207 | "metadata": {},
208 | "source": [
209 | "# Bringing it together\n",
210 | "\n",
211 | "This is the start of what you might call an \"agent framework\", in that we will use multiple LLM calls to solve a complex problem.\n",
212 | "\n",
213 | "We'll work on a full agent framework in the final project today!"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "id": "28e7d62e-2984-4148-984b-2b729b64997d",
220 | "metadata": {},
221 | "outputs": [],
222 | "source": [
223 | "def chat(history):\n",
224 | " message = history[-1][\"content\"]\n",
225 | " messages = [system_message] + history\n",
226 | " results = openai.chat.completions.create(model=MODEL, messages=messages)\n",
227 | " image = artist(\"London\") if \"london\" in message.lower() else None\n",
228 | " response = results.choices[0].message.content\n",
229 | " history += [{\"role\":\"assistant\", \"content\":response}]\n",
230 | " return history, image"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "id": "b3ef3d8f-d5fe-44ce-a6a4-d8cd961c82d2",
237 | "metadata": {},
238 | "outputs": [],
239 | "source": [
240 | "# More involved Gradio code as we're not using the preset Chat interface\n",
241 | "\n",
242 | "with gr.Blocks() as ui:\n",
243 | " with gr.Row():\n",
244 | " chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
245 | " image_output = gr.Image(height=500)\n",
246 | " with gr.Row():\n",
247 | " entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
248 | "\n",
249 | " def do_entry(message, history):\n",
250 | " history += [{\"role\":\"user\", \"content\":message}]\n",
251 | " return \"\", history\n",
252 | "\n",
253 | " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
254 | " chat, inputs=chatbot, outputs=[chatbot, image_output]\n",
255 | " )\n",
256 | "\n",
257 | "ui.launch(inbrowser=True)"
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": null,
263 | "id": "f16a600e-1f6d-4fb0-b196-6be23181c902",
264 | "metadata": {},
265 | "outputs": [],
266 | "source": [
267 | "# More involved Gradio code as we're not using the preset Chat interface\n",
268 | "\n",
269 | "with gr.Blocks() as ui:\n",
270 | " with gr.Row():\n",
271 | " chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
272 | " image_output = gr.Image(height=500)\n",
273 | " with gr.Row():\n",
274 | " entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
275 | " with gr.Row():\n",
276 | " clear = gr.Button(\"Clear\")\n",
277 | "\n",
278 | " def do_entry(message, history):\n",
279 | " history += [{\"role\":\"user\", \"content\":message}]\n",
280 | " return \"\", history\n",
281 | "\n",
282 | " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
283 | " chat, inputs=chatbot, outputs=[chatbot, image_output]\n",
284 | " )\n",
285 | " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
286 | "\n",
287 | "ui.launch(inbrowser=True)"
288 | ]
289 | },
290 | {
291 | "cell_type": "markdown",
292 | "id": "44bb6218-2f43-4049-8954-43e60e275a53",
293 | "metadata": {},
294 | "source": [
295 | "# A multi-modal customer support chatbot - in minutes!\n",
296 | "\n",
297 | "This illustrated how easy it is to build a chatbot with character and knowledge.\n",
298 | "\n",
299 | "# Exercise\n",
300 | "\n",
301 | "Take this further - have it generate audio for its responses, and use Tools to look up costs of flights. \n",
302 | "See my companion repo llm_engineering in week2 folder for the solution.\n",
303 | "\n",
304 | "## And.. apply this to your business! Make an AI Assistant for your domain"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": null,
310 | "id": "c9bde540-0513-4ae0-8fbd-ffe8ff688087",
311 | "metadata": {},
312 | "outputs": [],
313 | "source": []
314 | }
315 | ],
316 | "metadata": {
317 | "kernelspec": {
318 | "display_name": "Python 3 (ipykernel)",
319 | "language": "python",
320 | "name": "python3"
321 | },
322 | "language_info": {
323 | "codemirror_mode": {
324 | "name": "ipython",
325 | "version": 3
326 | },
327 | "file_extension": ".py",
328 | "mimetype": "text/x-python",
329 | "name": "python",
330 | "nbconvert_exporter": "python",
331 | "pygments_lexer": "ipython3",
332 | "version": "3.11.11"
333 | }
334 | },
335 | "nbformat": 4,
336 | "nbformat_minor": 5
337 | }
338 |
--------------------------------------------------------------------------------
/outputs/agent3 (1).ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e426cd04-c053-43e8-b505-63cee7956a53",
6 | "metadata": {},
7 | "source": [
8 | "# The Third Agent\n",
9 | "\n",
10 | "## Fine-tuned LLM using QLoRA with Llama 3.1 as the base model\n",
11 | "\n",
12 | "First, here's a link to Google Colab set up for training with QLoRA\n",
13 | "\n",
14 | "https://colab.research.google.com/drive/1IqxWtUzuV5ks2kS1oO4Mge3Mf1o3rhRj\n",
15 | "\n",
16 | "And here's a link to Google Colab set up for inference:\n",
17 | "\n",
18 | "https://colab.research.google.com/drive/1shI0i5QiMWL8fSmM-VcBI7RT5NjzZJ17\n",
19 | "\n",
20 | "Once this is set up, I have this running on Modal\n",
21 | "\n",
22 | "If you want to do this too, head over to modal.com to set up your free starter account with free credit"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 1,
28 | "id": "bc0e1c1c-be6a-4395-bbbd-eeafc9330d7e",
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "# Just one import to start with!!\n",
33 | "\n",
34 | "import modal"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "id": "80fe1f83-986d-49ca-a8d6-74c27fe7ef20",
40 | "metadata": {},
41 | "source": [
42 | "During the class I might visit this URL to show the code deployed on Modal:\n",
43 | "\n",
44 | "https://modal.com/apps/ed-donner/main/ap-stiZMq9syc9zikKRoLnRor?functionId=fu-LumBocLb9rvkzuIUBQGn42&activeTab=functions"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 2,
50 | "id": "3c9db702-4337-46ff-a6ba-8e1f213740b9",
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "133.0\n"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "# For you to experiment after the class: below we set up and deploy our proprietary LLM over modal\n",
63 | "# Here we execute it directly\n",
64 | "\n",
65 | "Pricer = modal.Cls.lookup(\"pricer-service\", \"Pricer\")\n",
66 | "pricer = Pricer()\n",
67 | "\n",
68 | "reply = pricer.price.remote(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")\n",
69 | "print(reply)"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 3,
75 | "id": "dcc44afd-07e4-485e-b6d2-f96908a7726e",
76 | "metadata": {},
77 | "outputs": [
78 | {
79 | "name": "stdout",
80 | "output_type": "stream",
81 | "text": [
82 | "iPad Pro 1st gen estimate: 299.0\n",
83 | "iPad Pro 6th gen estimate: 799.0\n"
84 | ]
85 | }
86 | ],
87 | "source": [
88 | "# Generations of iPad pro\n",
89 | "\n",
90 | "print(\"iPad Pro 1st gen estimate:\", pricer.price.remote(\"iPad pro 1st generation\"))\n",
91 | "print(\"iPad Pro 6th gen estimate:\", pricer.price.remote(\"iPad pro 6th generation\"))"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 4,
97 | "id": "f4a8ee05-4c85-4407-8c9d-384159752dcc",
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "import logging\n",
102 | "root = logging.getLogger()\n",
103 | "root.setLevel(logging.INFO)"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 5,
109 | "id": "b7e60466-25b7-4f0d-8136-a94997ef41b1",
110 | "metadata": {},
111 | "outputs": [
112 | {
113 | "name": "stderr",
114 | "output_type": "stream",
115 | "text": [
116 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent is initializing - connecting to modal\u001b[0m\n",
117 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent is ready\u001b[0m\n",
118 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent is calling remote fine-tuned model\u001b[0m\n",
119 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent completed - predicting $133.00\u001b[0m\n"
120 | ]
121 | },
122 | {
123 | "data": {
124 | "text/plain": [
125 | "133.0"
126 | ]
127 | },
128 | "execution_count": 5,
129 | "metadata": {},
130 | "output_type": "execute_result"
131 | }
132 | ],
133 | "source": [
134 | "from agents.specialist_agent import SpecialistAgent\n",
135 | "\n",
136 | "agent = SpecialistAgent()\n",
137 | "agent.price(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 6,
143 | "id": "40039e10-6e19-4a9e-a58f-617dca05e544",
144 | "metadata": {},
145 | "outputs": [
146 | {
147 | "name": "stderr",
148 | "output_type": "stream",
149 | "text": [
150 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent is calling remote fine-tuned model\u001b[0m\n",
151 | "INFO:root:\u001b[40m\u001b[31m[Specialist Agent] Specialist Agent completed - predicting $299.00\u001b[0m\n"
152 | ]
153 | },
154 | {
155 | "data": {
156 | "text/plain": [
157 | "299.0"
158 | ]
159 | },
160 | "execution_count": 6,
161 | "metadata": {},
162 | "output_type": "execute_result"
163 | }
164 | ],
165 | "source": [
166 | "agent.price(\"Shure MV7+ professional podcaster microphone with usb-c and XLR outputs\")"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "id": "22e8d804-c027-45fb-8fef-06e7bba6295a",
172 | "metadata": {},
173 | "source": [
174 | "# For you to get this to work yourself\n",
175 | "\n",
176 | "## We need to set your HuggingFace Token as a secret in Modal\n",
177 | "\n",
178 | "1. Go to modal.com, sign in and go to your dashboard\n",
179 | "2. Click on Secrets in the nav bar\n",
180 | "3. Create new secret, click on Hugging Face\n",
181 | "4. Fill in your HF_TOKEN where it prompts you\n"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "id": "5788a3a4-f7a7-4c9f-9c88-55ba1afe0d2e",
188 | "metadata": {},
189 | "outputs": [],
190 | "source": [
191 | "# First time: uncomment and run the line below\n",
192 | "# !modal setup"
193 | ]
194 | },
195 | {
196 | "cell_type": "markdown",
197 | "id": "04d8747f-8452-4077-8af6-27e03888508a",
198 | "metadata": {},
199 | "source": [
200 | "# Deploying and running:\n",
201 | "\n",
202 | "From a command line, `modal deploy xxx` will deploy your code as a Deployed App\n",
203 | "\n",
204 | "This is how you could package your AI service behind an API to be used in a Production System.\n",
205 | "\n",
206 | "You can also build REST endpoints easily, although we won't cover that as we'll be calling direct from Python."
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "id": "f56d1e55-2a03-4ce2-bb47-2ab6b9175a02",
213 | "metadata": {},
214 | "outputs": [],
215 | "source": [
216 | "!modal deploy pricer_service"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "id": "9e19daeb-1281-484b-9d2f-95cc6fed2622",
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "Pricer = modal.Cls.lookup(\"pricer-service\", \"Pricer\")\n",
227 | "pricer = Pricer()\n",
228 | "reply = pricer.price.remote(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")\n",
229 | "print(reply)"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "id": "f5a3181b-1310-4102-8d7d-52caf4c00538",
236 | "metadata": {},
237 | "outputs": [],
238 | "source": []
239 | }
240 | ],
241 | "metadata": {
242 | "kernelspec": {
243 | "display_name": "Python 3 (ipykernel)",
244 | "language": "python",
245 | "name": "python3"
246 | },
247 | "language_info": {
248 | "codemirror_mode": {
249 | "name": "ipython",
250 | "version": 3
251 | },
252 | "file_extension": ".py",
253 | "mimetype": "text/x-python",
254 | "name": "python",
255 | "nbconvert_exporter": "python",
256 | "pygments_lexer": "ipython3",
257 | "version": "3.11.11"
258 | }
259 | },
260 | "nbformat": 4,
261 | "nbformat_minor": 5
262 | }
263 |
--------------------------------------------------------------------------------
/outputs/agent4 (1).ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "23f53670-1a73-46ba-a754-4a497e8e0e64",
6 | "metadata": {},
7 | "source": [
8 | "# Fourth Agent\n",
9 | "\n",
10 | "Use Claude to craft a message and send it as a Push Notification\n",
11 | "\n",
12 | "## Messaging Agent\n",
13 | "\n",
14 | "For the Push Notification, we will be using a nifty platform called Pushover. \n",
15 | "You'll need to set up a free account and add 2 tokens to your `.env` file:\n",
16 | "\n",
17 | "```\n",
18 | "PUSHOVER_USER=xxx\n",
19 | "PUSHOVER_TOKEN=xxx\n",
20 | "```\n",
21 | "\n",
22 | "## Just to say one more time...\n",
23 | "\n",
24 | "I realize I'm going through tons of material very quickly! \n",
25 | "The idea is to give you general intuition so you can come back and try for yourself."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 1,
31 | "id": "80d683d9-9e92-44ae-af87-a413ca84db21",
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "import os\n",
36 | "import sys\n",
37 | "import logging\n",
38 | "from agents.deals import Opportunity\n",
39 | "import http.client\n",
40 | "import urllib\n",
41 | "from dotenv import load_dotenv"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 2,
47 | "id": "e05cc427-3d2c-4792-ade1-d356f95a82a9",
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "load_dotenv()\n",
52 | "pushover_user = os.getenv('PUSHOVER_USER', 'your-pushover-user-if-not-using-env')\n",
53 | "pushover_token = os.getenv('PUSHOVER_TOKEN', 'your-pushover-user-if-not-using-env')"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": 3,
59 | "id": "5ec518f5-dae4-44b1-a185-d7eaf853ec00",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "def push(text):\n",
64 | " conn = http.client.HTTPSConnection(\"api.pushover.net:443\")\n",
65 | " conn.request(\"POST\", \"/1/messages.json\",\n",
66 | " urllib.parse.urlencode({\n",
67 | " \"token\": pushover_token,\n",
68 | " \"user\": pushover_user,\n",
69 | " \"message\": text,\n",
70 | " \"sound\": \"cashregister\"\n",
71 | " }), { \"Content-type\": \"application/x-www-form-urlencoded\" })\n",
72 | " conn.getresponse()"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 6,
78 | "id": "0056a02f-06a3-4acc-99f3-cbe919ee936b",
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "push(\"MASSIVE DEAL!!\")"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": 7,
88 | "id": "fbcb97c0-3f5f-48a0-a414-8a1c93e53a7f",
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "import logging\n",
93 | "root = logging.getLogger()\n",
94 | "root.setLevel(logging.INFO)"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 8,
100 | "id": "093776c1-078c-42fb-8cb4-92a1e9481061",
101 | "metadata": {},
102 | "outputs": [
103 | {
104 | "name": "stderr",
105 | "output_type": "stream",
106 | "text": [
107 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent is initializing\u001b[0m\n",
108 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent has initialized Pushover and Claude\u001b[0m\n",
109 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent is sending a push notification\u001b[0m\n"
110 | ]
111 | }
112 | ],
113 | "source": [
114 | "from agents.messaging_agent import MessagingAgent\n",
115 | "\n",
116 | "agent = MessagingAgent()\n",
117 | "agent.push(\"SUCH A MASSIVE DEAL!!\")"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": 9,
123 | "id": "85a5a527-06ec-4d19-98cf-9ca7cc68f85c",
124 | "metadata": {},
125 | "outputs": [
126 | {
127 | "name": "stderr",
128 | "output_type": "stream",
129 | "text": [
130 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent is using Claude to craft the message\u001b[0m\n",
131 | "INFO:httpx:HTTP Request: POST https://api.anthropic.com/v1/messages \"HTTP/1.1 200 OK\"\n",
132 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent is sending a push notification\u001b[0m\n",
133 | "INFO:root:\u001b[40m\u001b[35m[Messaging Agent] Messaging Agent has completed\u001b[0m\n"
134 | ]
135 | }
136 | ],
137 | "source": [
138 | "agent.notify(\"A special deal on Sumsung 60 inch LED TV going at a great bargain\", 300, 1000, \"www.samsung.com\")"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "id": "943e180b-7f8b-4833-adfc-6a5712d369fd",
145 | "metadata": {},
146 | "outputs": [],
147 | "source": []
148 | }
149 | ],
150 | "metadata": {
151 | "kernelspec": {
152 | "display_name": "Python 3 (ipykernel)",
153 | "language": "python",
154 | "name": "python3"
155 | },
156 | "language_info": {
157 | "codemirror_mode": {
158 | "name": "ipython",
159 | "version": 3
160 | },
161 | "file_extension": ".py",
162 | "mimetype": "text/x-python",
163 | "name": "python",
164 | "nbconvert_exporter": "python",
165 | "pygments_lexer": "ipython3",
166 | "version": "3.11.11"
167 | }
168 | },
169 | "nbformat": 4,
170 | "nbformat_minor": 5
171 | }
172 |
--------------------------------------------------------------------------------
/price_agent.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ed-donner/agentic/8bd4b605ebbca8a3eaa394ca5f95abe55629962e/price_agent.jpg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | python-dotenv
2 | jupyterlab
3 | ipywidgets
4 | requests
5 | numpy
6 | pandas
7 | scipy
8 | scikit-learn
9 | matplotlib
10 | gensim
11 | torch
12 | transformers
13 | tqdm
14 | openai
15 | gradio
16 | datasets
17 | sentencepiece
18 | anthropic
19 | google-generativeai
20 | unstructured
21 | chromadb
22 | plotly
23 | jupyter-dash
24 | beautifulsoup4
25 | modal
26 | ollama
27 | accelerate
28 | bitsandbytes
29 | psutil
30 | setuptools
31 | speedtest-cli
--------------------------------------------------------------------------------
/resources.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ed-donner/agentic/8bd4b605ebbca8a3eaa394ca5f95abe55629962e/resources.jpg
--------------------------------------------------------------------------------
/troubleshooting.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "2a793b1d-a0a9-404c-ada6-58937227cfce",
6 | "metadata": {},
7 | "source": [
8 | "# Oh dear!\n",
9 | "\n",
10 | "If you've got here, then you're still having problems setting up your environment. I'm so sorry! Hang in there and we should have you up and running in no time.\n",
11 | "\n",
12 | "Setting up a Data Science environment can be challenging because there's a lot going on under the hood. But we will get there.\n",
13 | "\n",
14 | "And please remember - I'm standing by to help out. Message me or email ed@edwarddonner.com and I'll get on the case. The very last cell in this notebook has some diagnostics that will help me figure out what's happening.\n"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "id": "f5190688-205a-46d1-a0dc-9136a42ad0db",
20 | "metadata": {},
21 | "source": [
22 | "# Step 1\n",
23 | "\n",
24 | "Try running the next 2 cells (click in the cell under this one and hit shift+return, then shift+return again).\n",
25 | "\n",
26 | "If this gives an error, then you're likely not running in an \"activated\" environment. Please check back in Part 5 of the SETUP guide for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) for setting up the Anaconda (or virtualenv) environment and activating it, before running `jupyter lab`.\n",
27 | "\n",
28 | "If you look in the Anaconda prompt (PC) or the Terminal (Mac), you should see `(agentic)` in your prompt where you launch `jupyter lab` - that's your clue that the agentic environment is activated.\n",
29 | "\n",
30 | "If you are in an activated environment, the next thing to try is to restart everything:\n",
31 | "1. Close down all Jupyter windows, like this one\n",
32 | "2. Exit all command prompts / Terminals / Anaconda\n",
33 | "3. Repeat Part 5 from the SETUP instructions to begin a new activated environment and launch `jupyter lab` from the `agentic` directory \n",
34 | "4. Come back to this notebook, and do Kernel menu >> Restart Kernel and Clear Outputs of All Cells\n",
35 | "5. Try the cell below again.\n",
36 | "\n",
37 | "If **that** doesn't work, then please contact me! I'll respond quickly, and we'll figure it out. Please run the diagnostics (last cell in this notebook) so I can debug. If you used Anaconda, it might be that for some reason your environment is corrupted, in which case the simplest fix is to use the virtualenv approach instead (Part 2B in the setup guides)."
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "7c8c0bb3-0e94-466e-8d1a-4dfbaa014cbe",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "# Some quick checks that your Conda environment or VirtualEnv is as expected\n",
48 | "# The Environment Name should be: llms\n",
49 | "\n",
50 | "import os\n",
51 | "\n",
52 | "conda_prefix = os.environ.get('CONDA_PREFIX')\n",
53 | "if conda_prefix:\n",
54 | " print(\"Anaconda environment is active:\")\n",
55 | " print(f\"Environment Path: {conda_prefix}\")\n",
56 | " print(f\"Environment Name: {os.path.basename(conda_prefix)}\")\n",
57 | "\n",
58 | "virtual_env = os.environ.get('VIRTUAL_ENV')\n",
59 | "if virtual_env:\n",
60 | " print(\"Virtualenv is active:\")\n",
61 | " print(f\"Environment Path: {virtual_env}\")\n",
62 | " print(f\"Environment Name: {os.path.basename(virtual_env)}\")\n",
63 | "\n",
64 | "if not conda_prefix and not virtual_env:\n",
65 | " print(\"Neither Anaconda nor Virtualenv seems to be active. Did you start jupyter lab in an Activated environment? See Setup Part 5.\")"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "id": "6c78b7d9-1eea-412d-8751-3de20c0f6e2f",
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "# And now, this should run with no output - no import errors.\n",
76 | "# Import errors might indicate that you started jupyter lab without your environment activated? See SETUP part 5.\n",
77 | "# Or you might need to restart your Kernel and Jupyter Lab.\n",
78 | "# Or it's possible that something is wrong with Anaconda, in which case we may have to use virtualenv instead.\n",
79 | "# If you're unsure, please run the diagnostics (last cell in this notebook) and then email me at ed@edwarddonner.com\n",
80 | "\n",
81 | "from openai import OpenAI"
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "id": "b66a8460-7b37-4b4c-a64b-24ae45cf07eb",
87 | "metadata": {},
88 | "source": [
89 | "# Step 2\n",
90 | "\n",
91 | "Let's check your .env file exists and has the OpenAI key set properly inside it. \n",
92 | "Please run this code and check that it prints a successful message, otherwise follow its instructions.\n",
93 | "\n",
94 | "Note that the `.env` file won't show up in your Jupyter Lab file browser, because Jupyter hides files that start with a dot for your security; they're considered hidden files. If you need to change the name, you'll need to use a command terminal or File Explorer (PC) / Finder Window (Mac). Ask ChatGPT if that's giving you problems, or email me!\n",
95 | "\n",
96 | "If you're having challenges creating the `.env` file, we can also do it with code! See the cell after the next one.\n",
97 | "\n",
98 | "It's important to launch `jupyter lab` from the project root directory, `agentic`. If you didn't do that, this cell might give you problems."
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "id": "caa4837e-b970-4f89-aa9a-8aa793c754fd",
105 | "metadata": {},
106 | "outputs": [],
107 | "source": [
108 | "from pathlib import Path\n",
109 | "\n",
110 | "this_dir = Path(\".\")\n",
111 | "env_path = this_dir / \".env\"\n",
112 | "\n",
113 | "if env_path.exists() and env_path.is_file():\n",
114 | " print(\".env file found.\")\n",
115 | "\n",
116 | " # Read the contents of the .env file\n",
117 | " with env_path.open(\"r\") as env_file:\n",
118 | " contents = env_file.readlines()\n",
119 | "\n",
120 | " key_exists = any(line.startswith(\"OPENAI_API_KEY=\") for line in contents)\n",
121 | " good_key = any(line.startswith(\"OPENAI_API_KEY=sk-proj-\") for line in contents)\n",
122 | " \n",
123 | " if key_exists and good_key:\n",
124 | " print(\"SUCCESS! OPENAI_API_KEY found and it has the right prefix\")\n",
125 | " elif key_exists:\n",
126 | " print(\"Found an OPENAI_API_KEY although it didn't have the expected prefix sk-proj- \\nPlease double check your key in the file..\")\n",
127 | " else:\n",
128 | " print(\"Didn't find an OPENAI_API_KEY in the .env file\")\n",
129 | "else:\n",
130 | " print(\".env file not found in the llm_engineering directory. It needs to have exactly the name: .env\")\n",
131 | " \n",
132 | " possible_misnamed_files = list(parent_dir.glob(\"*.env*\"))\n",
133 | " \n",
134 | " if possible_misnamed_files:\n",
135 | " print(\"\\nWarning: No '.env' file found, but the following files were found in the llm_engineering directory that contain '.env' in the name. Perhaps this needs to be renamed?\")\n",
136 | " for file in possible_misnamed_files:\n",
137 | " print(file.name)"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "id": "105f9e0a-9ff4-4344-87c8-e3e41bc50869",
143 | "metadata": {},
144 | "source": [
145 | "## Fallback plan - python code to create the .env file for you\n",
146 | "\n",
147 | "Only run the next cell if you're having problems making the .env file. \n",
148 | "Replace the text in the first line of code with your key from OpenAI."
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "id": "ab9ea6ef-49ee-4899-a1c7-75a8bd9ac36b",
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "# Only run this code in this cell if you want to have a .env file created for you!\n",
159 | "\n",
160 | "make_me_a_file_with_this_key = \"put your key here inside these quotes.. it should start sk-proj-\"\n",
161 | "\n",
162 | "from pathlib import Path\n",
163 | "\n",
164 | "this_dir = Path(\".\")\n",
165 | "env_path = this_dir / \".env\"\n",
166 | "\n",
167 | "if env_path.exists():\n",
168 | " print(\"There is already a .env file - if you want me to create a new one, please delete the existing one first\")\n",
169 | "else:\n",
170 | " try:\n",
171 | " with env_path.open(mode='w', encoding='utf-8') as env_file:\n",
172 | " env_file.write(f\"OPENAI_API_KEY={make_me_a_file_with_this_key}\")\n",
173 | " print(f\"Successfully created the .env file at {env_path}\")\n",
174 | " print(\"Now rerun the previous cell to confirm that the file is created and the key is correct.\")\n",
175 | " except Exception as e:\n",
176 | " print(f\"An error occurred while creating the .env file: {e}\")"
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "id": "0ba9420d-3bf0-4e08-abac-f2fbf0e9c7f1",
182 | "metadata": {},
183 | "source": [
184 | "# Step 3\n",
185 | "\n",
186 | "Now let's check that your API key is correct set up in your `.env` file, and available using the dotenv package.\n",
187 | "Try running the next cell."
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "0ee8e613-5a6e-4d1f-96ef-91132da545c8",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "# This should print your API key to the output\n",
198 | "\n",
199 | "import os\n",
200 | "from dotenv import load_dotenv\n",
201 | "load_dotenv()\n",
202 | "\n",
203 | "api_key = os.getenv(\"OPENAI_API_KEY\")\n",
204 | "\n",
205 | "if not api_key:\n",
206 | " print(\"No API key was found - please try Kernel menu >> Restart Kernel And Clear Outputs of All Cells\")\n",
207 | "elif api_key[:8]!=\"sk-proj-\":\n",
208 | " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key\")\n",
209 | "elif api_key.strip() != api_key:\n",
210 | " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them\")\n",
211 | "else:\n",
212 | " print(\"API key found and looks good so far!\")\n",
213 | "\n",
214 | "print(\"My key is\", os.getenv(\"OPENAI_API_KEY\"))"
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "id": "f403e515-0e7d-4be4-bb79-5a102dbd6c94",
220 | "metadata": {},
221 | "source": [
222 | "## It should print something like:\n",
223 | "\n",
224 | "`My key is sk-proj-blahblahblah`\n",
225 | "\n",
226 | "If it didn't print a key, then it's not able to find a file called `.env` in the `llm_engineering` folder. \n",
227 | "The name of the file must be exactly `.env` - it won't work if it's called `my-keys.env` or `.env.doc`. \n",
228 | "Double check those steps in the instructions. Is it possible that `.env` is actually called `.env.txt`? In Windows, you may need to change a setting in the File Explorer to ensure that file extensions are showing (\"Show file extensions\" set to \"On\"). You should also see file extensions if you type `dir` in the `llm_engineering` directory.\n",
229 | "\n",
230 | "Nasty gotchas to watch out for: \n",
231 | "- In the .env file, there should be no space between the equals sign and the key. Like: `OPENAI_API_KEY=sk-proj-...`\n",
232 | "- If you copied and pasted your API key from another application, make sure that it didn't replace hyphens in your key with long dashes \n",
233 | "- If you changed your .env file, you might need to restart your Jupyter \"kernel\" (the python process) to pick up the change via the Kernel menu >> Restart kernel, then rerun the cells from the top.\n",
234 | "\n",
235 | "Worst case, if you're not able to get this part to work, it's not a big deal. You'll just have to paste your key into the Jupyter Notebook (see below for an example), and be sure to remove it before you share the Notebook with anybody else."
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "id": "42afad1f-b0bf-4882-b469-7709060fee3a",
241 | "metadata": {},
242 | "source": [
243 | "# Step 4\n",
244 | "\n",
245 | "Now run the below code and you will hopefully see that GPT can handle basic arithmetic!!\n",
246 | "\n",
247 | "If not, see the cell below."
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "id": "cccb58e7-6626-4033-9dc1-e7e3ff742f6b",
254 | "metadata": {},
255 | "outputs": [],
256 | "source": [
257 | "from openai import OpenAI\n",
258 | "from dotenv import load_dotenv\n",
259 | "load_dotenv()\n",
260 | "\n",
261 | "# EITHER:\n",
262 | "my_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
263 | "\n",
264 | "# OR if you haven't been able to get .env working, uncomment this next line and paste your key inside the quote marks\n",
265 | "# my_api_key = \"REPLACE THIS TEXT WITH YOUR OPENAI API KEY WITHIN THE QUOTE MARKS - it should start sk-proj-\"\n",
266 | "\n",
267 | "print(f\"Using API key {my_api_key}\")\n",
268 | "\n",
269 | "openai = OpenAI(api_key=my_api_key)\n",
270 | "completion = openai.chat.completions.create(\n",
271 | " model='gpt-4o-mini',\n",
272 | " messages=[{\"role\":\"user\", \"content\": \"What's 2+2?\"}],\n",
273 | ")\n",
274 | "print(completion.choices[0].message.content)"
275 | ]
276 | },
277 | {
278 | "cell_type": "markdown",
279 | "id": "81046a77-c359-4388-929f-ffc8ad5cb93c",
280 | "metadata": {},
281 | "source": [
282 | "# If the key was set correctly, and this still didn't work, perhaps with a RateLimit error\n",
283 | "\n",
284 | "Then there's something up with your API key!\n",
285 | "\n",
286 | "First check this webpage to make sure you have a positive credit balance.\n",
287 | "OpenAI requires that you have a positive credit balance and it has minimums. My salespitch for OpenAI is that this is well worth it for your education: for less than the price of a music album, you will build so much valuable commercial experience. But it's not required for this course at all; you can watch me running OpenAI code, and then wait until we get to open source models in week 3. Also, I'll show you how to use Ollama to run open-source models locally.\n",
288 | "\n",
289 | "https://platform.openai.com/settings/organization/billing/overview\n",
290 | "\n",
291 | "Also try creating a new key (button on the top right) here:\n",
292 | "\n",
293 | "https://platform.openai.com/api-keys\n",
294 | "\n",
295 | "Sometimes OpenAI may take a few minutes to give you access after you try.\n",
296 | "\n",
297 | "## If all else fails:\n",
298 | "\n",
299 | "(1) Try pasting your error into ChatGPT or Claude! It's amazing how often they can figure things out\n",
300 | "\n",
301 | "(2) Contact me! Please run the diagnostics in the cell below, then email me your problems to ed@edwarddonner.com\n",
302 | "\n",
303 | "Thanks so much, and I'm sorry this is giving you bother!"
304 | ]
305 | },
306 | {
307 | "cell_type": "markdown",
308 | "id": "dc83f944-6ce0-4b5c-817f-952676e284ec",
309 | "metadata": {},
310 | "source": [
311 | "# Gathering Essential Diagnostic information\n",
312 | "\n",
313 | "## Please run this next cell to gather some important data\n",
314 | "\n",
315 | "Please run the next cell; it should take a minute or so to run. Most of the time is checking your network bandwidth.\n",
316 | "Then email me the output of the last cell to ed@edwarddonner.com. \n",
317 | "Alternatively: this will create a file called report.txt - just attach the file to your email."
318 | ]
319 | },
320 | {
321 | "cell_type": "code",
322 | "execution_count": null,
323 | "id": "248204f0-7bad-482a-b715-fb06a3553916",
324 | "metadata": {},
325 | "outputs": [],
326 | "source": [
327 | "# Run my diagnostics report to collect key information for debugging\n",
328 | "# Please email me the results. Either copy & paste the output, or attach the file report.txt\n",
329 | "\n",
330 | "!pip install -q requests speedtest-cli psutil setuptools\n",
331 | "from diagnostics import Diagnostics\n",
332 | "Diagnostics().run()"
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "execution_count": null,
338 | "id": "15379c98-3c64-4227-99eb-48f16e91b7d5",
339 | "metadata": {},
340 | "outputs": [],
341 | "source": []
342 | }
343 | ],
344 | "metadata": {
345 | "kernelspec": {
346 | "display_name": "Python 3 (ipykernel)",
347 | "language": "python",
348 | "name": "python3"
349 | },
350 | "language_info": {
351 | "codemirror_mode": {
352 | "name": "ipython",
353 | "version": 3
354 | },
355 | "file_extension": ".py",
356 | "mimetype": "text/x-python",
357 | "name": "python",
358 | "nbconvert_exporter": "python",
359 | "pygments_lexer": "ipython3",
360 | "version": "3.11.11"
361 | }
362 | },
363 | "nbformat": 4,
364 | "nbformat_minor": 5
365 | }
366 |
--------------------------------------------------------------------------------
/workshop/agent1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e06c733a-1124-44b5-a634-37d0887fdfe6",
6 | "metadata": {},
7 | "source": [
8 | "# Agentic AI Workshop\n",
9 | "\n",
10 | "# Introducing: The Price Is Right\n",
11 | "\n",
12 | "We are going to build a multi-agent framework that will:\n",
13 | "1. Pull RSS feeds from the web and interpret them, looking for promising deals (multi-shot prompting, structured outputs)\n",
14 | "2. Use RAG and a Frontier Model to estimate the price of a product, using a knowledge base\n",
15 | "3. Use a QLoRA fine tuned LLM to estimate the price\n",
16 | "4. Send a push notification when it finds a deal that's worth significantly less than it estimates\n",
17 | "5. A planning agent will orchestrate this\n",
18 | "\n",
19 | "If you're having any set up problems, please see the [troubleshooting](../troubleshooting.ipynb) notebook in the parent directory. \n",
20 | "For more details on using Jupyter Lab, please see the [Guide To Jupyter](../Guide%20to%20Jupyter.ipynb) notebook. \n",
21 | "If you'd like to brush up your Python skills, there's an [Intermediate Python](../Intermediate%20Python.ipynb) notebook.\n",
22 | "\n",
23 | "\n",
24 | "## First: Let's look at the Agent code for interpreting RSS feeds, including figuring out the pricing, using multi-shot prompting\n",
25 | "\n",
26 | "## Remember: just get some intuition for this, and run it yourself later.."
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "id": "2db71ba5-55a8-48b7-97d5-9db8dc872837",
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "# Some imports\n",
37 | "\n",
38 | "from dotenv import load_dotenv\n",
39 | "from price_agents.deals import ScrapedDeal\n",
40 | "from openai import OpenAI\n",
41 | "import logging"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "id": "b044d040-e467-4463-a3a5-119939ca8199",
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "# Set up our env variables\n",
52 | "\n",
53 | "load_dotenv()\n",
54 | "openai = OpenAI()\n",
55 | "MODEL = \"gpt-4o-mini\""
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "a6b8a0f3-af5c-4f21-8a5f-a4df4fa420ad",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "# I wrote this code to fetch RSS feeds\n",
66 | "\n",
67 | "scraped_deals = ScrapedDeal.fetch(show_progress=True)"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "e50266c2-10b6-4e36-a2cb-a5297c1d8e7a",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "print(scraped_deals[3].describe())"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "id": "800ae99a-03cc-4c8f-ac01-1c87e31923ac",
83 | "metadata": {},
84 | "source": [
85 | "## We are going to ask GPT-4o-mini to summarize deals and identify their price"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "id": "84278b54-5289-4d60-8ea6-7de9f8067663",
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "SYSTEM_PROMPT = \"\"\"You identify and summarize the 5 most detailed deals from a list, by selecting deals that have the most detailed, high quality description and the most clear price.\n",
96 | "Respond strictly in JSON with no explanation, using this format. You should provide the price as a number derived from the description. If the price of a deal isn't clear, do not include that deal in your response.\n",
97 | "Most important is that you respond with the 5 deals that have the most detailed product description with price. It's not important to mention the terms of the deal; most important is a thorough description of the product.\n",
98 | "Be careful with products that are described as \"$XXX off\" or \"reduced by $XXX\" - this isn't the actual price of the product. Only respond with products when you are highly confident about the price. \n",
99 | " \n",
100 | "{\n",
101 | " \"deals\": [\n",
102 | " {\n",
103 | " \"product_description\": \"Your clearly expressed summary of the product in 3-4 sentences. Details of the item are much more important than why it's a good deal. Avoid mentioning discounts and coupons; focus on the item itself. There should be a paragpraph of text for each item you choose.\",\n",
104 | " \"price\": 99.99,\n",
105 | " \"url\": \"the url as provided\"\n",
106 | " },\n",
107 | " ...\n",
108 | " ]\n",
109 | "}\"\"\""
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "id": "af2545a0-e160-41db-8914-f77b1c7eff26",
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "USER_PROMPT_PREFIX = \"\"\"Respond with the most promising 5 deals from this list, selecting those which have the most detailed, high quality product description and a clear price that is greater than 0.\n",
120 | "Respond strictly in JSON, and only JSON. You should rephrase the description to be a summary of the product itself, not the terms of the deal.\n",
121 | "Remember to respond with a paragraph of text in the product_description field for each of the 5 items that you select.\n",
122 | "Be careful with products that are described as \"$XXX off\" or \"reduced by $XXX\" - this isn't the actual price of the product. Only respond with products when you are highly confident about the price. \n",
123 | " \n",
124 | "Deals:\n",
125 | " \n",
126 | "\"\"\"\n",
127 | "\n",
128 | "USER_PROMPT_SUFFIX = \"\"\"\\n\\nStrictly respond in JSON and include exactly 5 deals, no more.\n",
129 | "Your response must follow this format:\n",
130 | "\n",
131 | "{\n",
132 | " \"deals\": [\n",
133 | " {\n",
134 | " \"product_description\": \"Your summary of the product in 3-4 sentences. Details of the item are much more important than why it's a good deal. Avoid mentioning discounts and coupons; focus on the item itself. There should be a paragpraph of text for each item you choose.\",\n",
135 | " \"price\": 99.99,\n",
136 | " \"url\": \"the url as provided\"\n",
137 | " },\n",
138 | " {\n",
139 | " \"product_description\": \"Your summary of the product in 3-4 sentences. Details of the item are much more important than why it's a good deal. Avoid mentioning discounts and coupons; focus on the item itself. There should be a paragpraph of text for each item you choose.\",\n",
140 | " \"price\": 210.30,\n",
141 | " \"url\": \"the url as provided\"\n",
142 | " },\n",
143 | " ...\n",
144 | "]}\n",
145 | "\"\"\""
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "id": "cf00a4e4-62c7-47a4-8415-2b7b4f86391a",
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "# this makes a suitable user prompt given scraped deals\n",
156 | "\n",
157 | "def make_user_prompt(scraped):\n",
158 | " user_prompt = USER_PROMPT_PREFIX\n",
159 | " user_prompt += '\\n\\n'.join([scrape.describe() for scrape in scraped])\n",
160 | " user_prompt += USER_PROMPT_SUFFIX\n",
161 | " return user_prompt"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "id": "504ecdac-7228-48eb-80b4-6e31d2dc8f5a",
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "# Let's create a user prompt for the deals we just scraped, and look at how it begins\n",
172 | "\n",
173 | "user_prompt = make_user_prompt(scraped_deals)\n",
174 | "print(user_prompt[:2000])"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "id": "9f7dcd64-9f16-4a6c-b822-ca3601c2f631",
180 | "metadata": {},
181 | "source": [
182 | "## Calling OpenAI using their Python client\n",
183 | "\n",
184 | "Recall the format of OpenAI calls:\n",
185 | "\n",
186 | "When you call OpenAI, you pass in your conversation as a list of python dictionaries:\n",
187 | "\n",
188 | "```\n",
189 | " [\n",
190 | " {\"role\": \"system\", \"content\": \"system_message_goes_here\"},\n",
191 | " {\"role\": \"user\", \"content\": \"user_prompt_goes_here\"}\n",
192 | " ]\n",
193 | "```"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "id": "20401a75-c12a-466b-9e66-7cd46ac03443",
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "# Create the user prompt that reflects the deals we have scraped\n",
204 | "\n",
205 | "user_prompt = make_user_prompt(scraped_deals)\n",
206 | "\n",
207 | "# Call OpenAI using their Python client. Request a response in JSON and stream it back.\n",
208 | "\n",
209 | "stream = openai.chat.completions.create(\n",
210 | " model=MODEL,\n",
211 | " messages=[\n",
212 | " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
213 | " {\"role\": \"user\", \"content\": user_prompt}\n",
214 | " ],\n",
215 | " max_tokens=1000,\n",
216 | " stream=True,\n",
217 | " response_format={\"type\": \"json_object\"}\n",
218 | ")\n",
219 | "\n",
220 | "response = \"\"\n",
221 | "for chunk in stream:\n",
222 | " chunk_text = chunk.choices[0].delta.content or ''\n",
223 | " response += chunk_text\n",
224 | " print(chunk_text, end=\"\", flush=True)"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "id": "70e74543-5aa6-4b92-bfde-a40451db295f",
231 | "metadata": {},
232 | "outputs": [],
233 | "source": [
234 | "import json\n",
235 | "json.loads(response)"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "id": "a99b6d4b-6223-4901-902a-14f1bda34244",
241 | "metadata": {},
242 | "source": [
243 | "# Putting this into an \"Agent\"\n",
244 | "\n",
245 | "I've packaged this code into a class called `ScannerAgent`\n",
246 | "\n",
247 | "There are various Agent frameworks that add an abstraction layer, but in our case it's easy just to write the code directly."
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "id": "81eb5f68-168c-4d6c-936a-40b79f08f26a",
254 | "metadata": {},
255 | "outputs": [],
256 | "source": [
257 | "root = logging.getLogger()\n",
258 | "root.setLevel(logging.INFO)"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": null,
264 | "id": "9d0af2d8-4419-49a7-af66-218a2d1f986c",
265 | "metadata": {},
266 | "outputs": [],
267 | "source": [
268 | "from price_agents.scanner_agent import ScannerAgent\n",
269 | "\n",
270 | "scanner = ScannerAgent()\n",
271 | "scanner.scan()"
272 | ]
273 | },
274 | {
275 | "cell_type": "code",
276 | "execution_count": null,
277 | "id": "425b0702-7b96-4b37-a6d1-4f5dc9deb1ad",
278 | "metadata": {},
279 | "outputs": [],
280 | "source": []
281 | }
282 | ],
283 | "metadata": {
284 | "kernelspec": {
285 | "display_name": "Python 3 (ipykernel)",
286 | "language": "python",
287 | "name": "python3"
288 | },
289 | "language_info": {
290 | "codemirror_mode": {
291 | "name": "ipython",
292 | "version": 3
293 | },
294 | "file_extension": ".py",
295 | "mimetype": "text/x-python",
296 | "name": "python",
297 | "nbconvert_exporter": "python",
298 | "pygments_lexer": "ipython3",
299 | "version": "3.11.11"
300 | }
301 | },
302 | "nbformat": 4,
303 | "nbformat_minor": 5
304 | }
305 |
--------------------------------------------------------------------------------
/workshop/agent3.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "e426cd04-c053-43e8-b505-63cee7956a53",
6 | "metadata": {},
7 | "source": [
8 | "# The Third Agent\n",
9 | "\n",
10 | "## Fine-tuned LLM using QLoRA with Llama 3.1 as the base model\n",
11 | "\n",
12 | "First, here's a link to Google Colab set up for training with QLoRA\n",
13 | "\n",
14 | "https://colab.research.google.com/drive/1IqxWtUzuV5ks2kS1oO4Mge3Mf1o3rhRj\n",
15 | "\n",
16 | "And here's a link to Google Colab set up for inference:\n",
17 | "\n",
18 | "https://colab.research.google.com/drive/1shI0i5QiMWL8fSmM-VcBI7RT5NjzZJ17\n",
19 | "\n",
20 | "Once this is set up, I have this running on Modal\n",
21 | "\n",
22 | "If you want to do this too, head over to modal.com to set up your free starter account with free credit"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "id": "bc0e1c1c-be6a-4395-bbbd-eeafc9330d7e",
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "# Just one import to start with!!\n",
33 | "\n",
34 | "import modal"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "id": "80fe1f83-986d-49ca-a8d6-74c27fe7ef20",
40 | "metadata": {},
41 | "source": [
42 | "During the class I might visit this URL to show the code deployed on Modal:\n",
43 | "\n",
44 | "https://modal.com/apps/ed-donner/main/ap-stiZMq9syc9zikKRoLnRor?functionId=fu-LumBocLb9rvkzuIUBQGn42&activeTab=functions"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "id": "3c9db702-4337-46ff-a6ba-8e1f213740b9",
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "# For you to experiment after the class: below we set up and deploy our proprietary LLM over modal\n",
55 | "# Here we execute it directly\n",
56 | "\n",
57 | "Pricer = modal.Cls.from_name(\"pricer-service\", \"Pricer\")\n",
58 | "pricer = Pricer()\n",
59 | "\n",
60 | "reply = pricer.price.remote(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")\n",
61 | "print(reply)"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "id": "dcc44afd-07e4-485e-b6d2-f96908a7726e",
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "# Generations of iPad pro\n",
72 | "\n",
73 | "print(\"iPad Pro 1st gen estimate:\", pricer.price.remote(\"iPad pro 1st generation\"))\n",
74 | "print(\"iPad Pro 6th gen estimate:\", pricer.price.remote(\"iPad pro 6th generation\"))"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "id": "f4a8ee05-4c85-4407-8c9d-384159752dcc",
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "import logging\n",
85 | "root = logging.getLogger()\n",
86 | "root.setLevel(logging.INFO)"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "id": "b7e60466-25b7-4f0d-8136-a94997ef41b1",
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "from price_agents.specialist_agent import SpecialistAgent\n",
97 | "\n",
98 | "agent = SpecialistAgent()\n",
99 | "agent.price(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "id": "40039e10-6e19-4a9e-a58f-617dca05e544",
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "agent.price(\"Shure MV7+ professional podcaster microphone with usb-c and XLR outputs\")"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "id": "22e8d804-c027-45fb-8fef-06e7bba6295a",
115 | "metadata": {},
116 | "source": [
117 | "# For you to get this to work yourself\n",
118 | "\n",
119 | "## We need to set your HuggingFace Token as a secret in Modal\n",
120 | "\n",
121 | "1. Go to modal.com, sign in and go to your dashboard\n",
122 | "2. Click on Secrets in the nav bar\n",
123 | "3. Create new secret, click on Hugging Face\n",
124 | "4. Fill in your HF_TOKEN where it prompts you\n"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "id": "5788a3a4-f7a7-4c9f-9c88-55ba1afe0d2e",
131 | "metadata": {},
132 | "outputs": [],
133 | "source": [
134 | "# First time: uncomment and run the line below\n",
135 | "# !modal setup"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "id": "04d8747f-8452-4077-8af6-27e03888508a",
141 | "metadata": {},
142 | "source": [
143 | "# Deploying and running:\n",
144 | "\n",
145 | "From a command line, `modal deploy xxx` will deploy your code as a Deployed App\n",
146 | "\n",
147 | "This is how you could package your AI service behind an API to be used in a Production System.\n",
148 | "\n",
149 | "You can also build REST endpoints easily, although we won't cover that as we'll be calling direct from Python."
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "id": "f56d1e55-2a03-4ce2-bb47-2ab6b9175a02",
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "!modal deploy pricer_service"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "id": "9e19daeb-1281-484b-9d2f-95cc6fed2622",
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "Pricer = modal.Cls.lookup(\"pricer-service\", \"Pricer\")\n",
170 | "pricer = Pricer()\n",
171 | "reply = pricer.price.remote(\"Quadcast HyperX condenser mic, connects via usb-c to your computer for crystal clear audio\")\n",
172 | "print(reply)"
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": null,
178 | "id": "f5a3181b-1310-4102-8d7d-52caf4c00538",
179 | "metadata": {},
180 | "outputs": [],
181 | "source": []
182 | }
183 | ],
184 | "metadata": {
185 | "kernelspec": {
186 | "display_name": "Python 3 (ipykernel)",
187 | "language": "python",
188 | "name": "python3"
189 | },
190 | "language_info": {
191 | "codemirror_mode": {
192 | "name": "ipython",
193 | "version": 3
194 | },
195 | "file_extension": ".py",
196 | "mimetype": "text/x-python",
197 | "name": "python",
198 | "nbconvert_exporter": "python",
199 | "pygments_lexer": "ipython3",
200 | "version": "3.11.11"
201 | }
202 | },
203 | "nbformat": 4,
204 | "nbformat_minor": 5
205 | }
206 |
--------------------------------------------------------------------------------
/workshop/agent4.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "23f53670-1a73-46ba-a754-4a497e8e0e64",
6 | "metadata": {},
7 | "source": [
8 | "# Fourth Agent\n",
9 | "\n",
10 | "Use Claude to craft a message and send it as a Push Notification\n",
11 | "\n",
12 | "## Messaging Agent\n",
13 | "\n",
14 | "For the Push Notification, we will be using a nifty platform called Pushover. \n",
15 | "You'll need to set up a free account and add 2 tokens to your `.env` file:\n",
16 | "\n",
17 | "```\n",
18 | "PUSHOVER_USER=xxx\n",
19 | "PUSHOVER_TOKEN=xxx\n",
20 | "```\n",
21 | "\n",
22 | "## Just to say one more time...\n",
23 | "\n",
24 | "I realize I'm going through tons of material very quickly! \n",
25 | "The idea is to give you general intuition so you can come back and try for yourself."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": null,
31 | "id": "80d683d9-9e92-44ae-af87-a413ca84db21",
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "import os\n",
36 | "import sys\n",
37 | "import logging\n",
38 | "from price_agents.deals import Opportunity\n",
39 | "import http.client\n",
40 | "import urllib\n",
41 | "from dotenv import load_dotenv"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "id": "e05cc427-3d2c-4792-ade1-d356f95a82a9",
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "load_dotenv()\n",
52 | "pushover_user = os.getenv('PUSHOVER_USER', 'your-pushover-user-if-not-using-env')\n",
53 | "pushover_token = os.getenv('PUSHOVER_TOKEN', 'your-pushover-user-if-not-using-env')"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "id": "5ec518f5-dae4-44b1-a185-d7eaf853ec00",
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "def push(text):\n",
64 | " conn = http.client.HTTPSConnection(\"api.pushover.net:443\")\n",
65 | " conn.request(\"POST\", \"/1/messages.json\",\n",
66 | " urllib.parse.urlencode({\n",
67 | " \"token\": pushover_token,\n",
68 | " \"user\": pushover_user,\n",
69 | " \"message\": text,\n",
70 | " \"sound\": \"cashregister\"\n",
71 | " }), { \"Content-type\": \"application/x-www-form-urlencoded\" })\n",
72 | " conn.getresponse()"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": null,
78 | "id": "0056a02f-06a3-4acc-99f3-cbe919ee936b",
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "push(\"MASSIVE DEAL!!\")"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "id": "fbcb97c0-3f5f-48a0-a414-8a1c93e53a7f",
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "import logging\n",
93 | "root = logging.getLogger()\n",
94 | "root.setLevel(logging.INFO)"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "id": "093776c1-078c-42fb-8cb4-92a1e9481061",
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "from price_agents.messaging_agent import MessagingAgent\n",
105 | "\n",
106 | "agent = MessagingAgent()\n",
107 | "agent.push(\"SUCH A MASSIVE DEAL!!\")"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "id": "85a5a527-06ec-4d19-98cf-9ca7cc68f85c",
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "agent.notify(\"A special deal on Sumsung 60 inch LED TV going at a great bargain\", 300, 1000, \"www.samsung.com\")"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "id": "943e180b-7f8b-4833-adfc-6a5712d369fd",
124 | "metadata": {},
125 | "outputs": [],
126 | "source": []
127 | }
128 | ],
129 | "metadata": {
130 | "kernelspec": {
131 | "display_name": "Python 3 (ipykernel)",
132 | "language": "python",
133 | "name": "python3"
134 | },
135 | "language_info": {
136 | "codemirror_mode": {
137 | "name": "ipython",
138 | "version": 3
139 | },
140 | "file_extension": ".py",
141 | "mimetype": "text/x-python",
142 | "name": "python",
143 | "nbconvert_exporter": "python",
144 | "pygments_lexer": "ipython3",
145 | "version": "3.11.11"
146 | }
147 | },
148 | "nbformat": 4,
149 | "nbformat_minor": 5
150 | }
151 |
--------------------------------------------------------------------------------
/workshop/agent5.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "eae2b3a2-02d4-4061-9639-3a6f09810a44",
6 | "metadata": {},
7 | "source": [
8 | "# Fifth Agent Agent\n",
9 | "\n",
10 | "Introducing a critical agent - the agent that brings it all together.\n",
11 | "\n",
12 | "# Planning Agent\n",
13 | "\n",
14 | "There are a number of frameworks out there that support building Agentic Workflows.\n",
15 | "\n",
16 | "OpenAI has OpenAI Agents SDK, LangChain has LangGraph, and there's Autogen from Microsoft, Crew.ai and many others. \n",
17 | "\n",
18 | "Each of these are abstractions on top of APIs to LLMs; some are lightweight, others come with significant functionality.\n",
19 | "\n",
20 | "It's also perfectly possible - and sometimes considerably easier - to build an agentic solution by calling LLMs directly.\n",
21 | "\n",
22 | "There's been considerable convergence on LLM APIs, and it's not clear that there's a need to sign up for one of the agent ecosystems for many use cases.\n",
23 | "\n",
24 | "Anthropic has an [insightful post](https://www.anthropic.com/research/building-effective-agents) on building effective Agentic architectures that's well worth a read.\n",
25 | "\n",
26 | "# We are going to use OpenAI Agents SDK for this Agent\n",
27 | "\n",
28 | "## And we're using Tools to give our Agent autonomy\n",
29 | "\n",
30 | "In our case, we're going to create an Agent that uses Tools to make decisions about what to do next.\n",
31 | "\n",
32 | "Let's see how it works"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "id": "8f171c2b-1943-43a5-85c6-0bcd84bdd3e7",
39 | "metadata": {},
40 | "outputs": [],
41 | "source": [
42 | "# imports\n",
43 | "\n",
44 | "import os\n",
45 | "import json\n",
46 | "from dotenv import load_dotenv\n",
47 | "from openai import OpenAI"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "id": "c6a21ae3-da90-46a2-97f0-156cdd48542c",
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "!pip install openai-agents"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "id": "2bc09be7-0666-4fd4-8699-78e2c0cac93c",
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "from agents import Agent, Runner, trace, function_tool"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "612c8116-e4b6-4332-8bdb-d7c90b4aa9de",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "# Initialization\n",
78 | "\n",
79 | "load_dotenv()\n",
80 | "\n",
81 | "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
82 | "if openai_api_key:\n",
83 | " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
84 | "else:\n",
85 | " print(\"OpenAI API Key not set\")\n",
86 | " \n",
87 | "MODEL = \"gpt-4o\"\n"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "id": "0896c5f3-1ecc-4464-b913-2e7cfe29c365",
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "# Use the Scanner agent from before\n",
98 | "\n",
99 | "from price_agents.scanner_agent import ScannerAgent\n",
100 | "scanner = ScannerAgent()"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "id": "04f25eed-e48a-4f9a-9817-4c0451378b40",
106 | "metadata": {},
107 | "source": [
108 | "# Our tools\n",
109 | "\n",
110 | "The next 3 cells have 3 **fake** functions that we will allow our LLM to call"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "id": "5f920a35-c58d-4961-8c3c-40d70157da22",
117 | "metadata": {},
118 | "outputs": [],
119 | "source": [
120 | "@function_tool\n",
121 | "def scan_the_internet_for_bargains():\n",
122 | " \"\"\" This tool scans the internet for great deals and gets a curated list of promising deals \"\"\"\n",
123 | " print(f\"Scanning the internet\")\n",
124 | " results = scanner.test_scan()\n",
125 | " return results.model_dump()"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "id": "8f885983-e054-43f3-86b4-6db9323216da",
132 | "metadata": {},
133 | "outputs": [],
134 | "source": [
135 | "@function_tool\n",
136 | "def estimate_true_value(description: str) -> str:\n",
137 | " \"\"\" This tool estimates how much a product is actually work, given a text description of the product \"\"\"\n",
138 | " print(f\"Estimating true value of {description[:20]}...\")\n",
139 | " return {\"description\": description, \"estimated_true_value\": 300}"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "id": "93a42f55-0f75-44b1-830f-ee13d161cdd1",
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "@function_tool\n",
150 | "def notify_user_of_deal(description: str, deal_price: float, estimated_true_value: float):\n",
151 | " \"\"\" This tool notified the user of a deal, given a description, a price and an estimated actual value \"\"\"\n",
152 | " print(f\"Notifying user of {description} which costs {deal_price} and estimate is {estimated_true_value}\")\n",
153 | " return {\"notification_sent\": \"ok\"}"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "id": "aa1dd672-e77e-4f91-a66d-fe67f10ac093",
159 | "metadata": {},
160 | "source": [
161 | "## Telling the LLM about the tools it can use, with JSON\n",
162 | "\n",
163 | "\"Tool calling\" is giving an LLM the power to run code on your computer!\n",
164 | "\n",
165 | "Sounds a bit spooky?\n",
166 | "\n",
167 | "The way it works is a little more mundane. We give the LLM a description of each Tool and the parameters, and we let it inform us if it wants any tool to be run.\n",
168 | "\n",
169 | "It's not like OpenAI reaches in and runs a function. In the end, we have an if statement that calls our function if the model requests it.\n",
170 | "\n",
171 | "## OpenAI Agents SDK has made this easy for us\n",
172 | "\n",
173 | "The decorator `function_tools` around each of our functions automatically generates the description we need for the LLM"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "54c1d76a-8744-46d0-afb3-d881820d876c",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "tools = [scan_the_internet_for_bargains, estimate_true_value, notify_user_of_deal]\n",
184 | "tools"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "id": "e088a008-fed8-4bc6-8041-8801adb3754c",
190 | "metadata": {},
191 | "source": [
192 | "## And.. MCP\n",
193 | "\n",
194 | "The Model Context Protocol from Anthropic is causing a lot of excitement.\n",
195 | "\n",
196 | "It gives us a really easy way to integrate new capabilities with our agent, as more tools.\n",
197 | "\n",
198 | "Here we will give our agent powers to write to our local filesystem in a directory \"sandbox\""
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": null,
204 | "id": "018b825e-df74-412f-a829-2a4c92cf1d92",
205 | "metadata": {},
206 | "outputs": [],
207 | "source": [
208 | "import os\n",
209 | "from agents.mcp import MCPServerStdio\n",
210 | "sandbox_path = os.path.abspath(os.path.join(os.getcwd(), \"sandbox\"))\n",
211 | "\n",
212 | "# parameters describe an MCP server\n",
213 | "files_params = {\"command\": \"npx\", \"args\": [\"-y\", \"@modelcontextprotocol/server-filesystem\", sandbox_path]}\n",
214 | "\n",
215 | "\n",
216 | "async with MCPServerStdio(params=files_params) as server:\n",
217 | " file_tools = await server.list_tools()"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": null,
223 | "id": "83173856-54d4-4afd-ac13-83fa15eca766",
224 | "metadata": {},
225 | "outputs": [],
226 | "source": [
227 | "file_tools"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": null,
233 | "id": "d195e6ad-c838-4a5f-ab3c-f3cb7f470fce",
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "system_message = \"You are an Autonomous AI Agent that makes use of tools to carry out your mission. Your mission is to find great deals on bargain products, and notify the user when you find them with a push notification and a written file.\"\n",
238 | "user_message = \"Your mission is to discover great deals on products. First you should scan the internet for bargain deals. Then for each deal, you should estimate its true value - how much it's actually worth. \"\n",
239 | "user_message += \"Finally, you should pick the single most compelling deal where the deal price is much lower than the estimated true value, and \"\n",
240 | "user_message += \"use your tools to send the user a push notification about that deal, and also use your tools to write or update a file called sandbox/deals.md with a description in markdown. \"\n",
241 | "user_message += \"You must only notify the user about one deal, and be sure to pick the most compelling deal, where the deal price is much lower than the estimated true value. Then just respond OK to indicate success.\"\n",
242 | "messages = [{\"role\": \"system\", \"content\": system_message},{\"role\": \"user\", \"content\": user_message}]"
243 | ]
244 | },
245 | {
246 | "cell_type": "markdown",
247 | "id": "215cad2e-a470-4cb7-ad78-3a13352bf4c5",
248 | "metadata": {},
249 | "source": [
250 | "### And here's where it comes together - just 2 lines of code"
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": null,
256 | "id": "a3021830-b216-4013-8456-671a370f4450",
257 | "metadata": {},
258 | "outputs": [],
259 | "source": [
260 | "async with MCPServerStdio(params=files_params) as server:\n",
261 | " agent = Agent(name=\"Planner\", instructions=system_message, model=\"gpt-4o\", tools=tools, mcp_servers=[server])\n",
262 | " result = await Runner.run(agent, user_message)\n",
263 | "\n",
264 | "print(result)"
265 | ]
266 | },
267 | {
268 | "cell_type": "markdown",
269 | "id": "67020429-93a3-4c26-b4ae-7c9c9f1d41a2",
270 | "metadata": {},
271 | "source": [
272 | "## And now - putting all of this into a Planning Agent"
273 | ]
274 | },
275 | {
276 | "cell_type": "code",
277 | "execution_count": null,
278 | "id": "e30b1875-3a42-41c0-b217-9789090347b8",
279 | "metadata": {},
280 | "outputs": [],
281 | "source": [
282 | "from price_agents.autonomous_planning_agent import AutonomousPlanningAgent"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": null,
288 | "id": "767db7b9-8d78-4d02-9b79-6c5e2dd8ddd2",
289 | "metadata": {},
290 | "outputs": [],
291 | "source": [
292 | "import logging\n",
293 | "root = logging.getLogger()\n",
294 | "root.setLevel(logging.INFO)"
295 | ]
296 | },
297 | {
298 | "cell_type": "code",
299 | "execution_count": null,
300 | "id": "83fbf6c0-301e-4da0-b4e3-8f91ab4d686f",
301 | "metadata": {},
302 | "outputs": [],
303 | "source": [
304 | "import chromadb\n",
305 | "DB = \"products_vectorstore\"\n",
306 | "client = chromadb.PersistentClient(path=DB)\n",
307 | "collection = client.get_or_create_collection('products')"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "id": "208067d9-8396-4f95-8dc8-a614c9a455df",
314 | "metadata": {},
315 | "outputs": [],
316 | "source": [
317 | "agent = AutonomousPlanningAgent(collection)"
318 | ]
319 | },
320 | {
321 | "cell_type": "code",
322 | "execution_count": null,
323 | "id": "0121edc8-c309-4d04-b737-16a4235a83fb",
324 | "metadata": {},
325 | "outputs": [],
326 | "source": [
327 | "result = agent.plan()"
328 | ]
329 | },
330 | {
331 | "cell_type": "markdown",
332 | "id": "f3c43c38-46d5-4186-880c-439ec975bb4b",
333 | "metadata": {},
334 | "source": [
335 | "### Check out the trace\n",
336 | "\n",
337 | "https://platform.openai.com/traces"
338 | ]
339 | },
340 | {
341 | "cell_type": "markdown",
342 | "id": "dc3e15e5-dddc-4f2e-bbb4-ab9a5392eca7",
343 | "metadata": {},
344 | "source": [
345 | "# Finally - with a Gradio UI"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": null,
351 | "id": "b6f9da59-43c4-409f-8a93-5993e1d9e180",
352 | "metadata": {},
353 | "outputs": [],
354 | "source": [
355 | "# Reset memory back to 2 deals discovered in the past\n",
356 | "\n",
357 | "from deal_agent_framework import DealAgentFramework\n",
358 | "DealAgentFramework.reset_memory()"
359 | ]
360 | },
361 | {
362 | "cell_type": "code",
363 | "execution_count": null,
364 | "id": "805095ad-9d07-4869-9432-338f87fb65ca",
365 | "metadata": {},
366 | "outputs": [],
367 | "source": [
368 | "!python price_is_right.py"
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": null,
374 | "id": "9df197f7-7ed4-4b24-a333-80ffda9d7032",
375 | "metadata": {},
376 | "outputs": [],
377 | "source": []
378 | }
379 | ],
380 | "metadata": {
381 | "kernelspec": {
382 | "display_name": "Python 3 (ipykernel)",
383 | "language": "python",
384 | "name": "python3"
385 | },
386 | "language_info": {
387 | "codemirror_mode": {
388 | "name": "ipython",
389 | "version": 3
390 | },
391 | "file_extension": ".py",
392 | "mimetype": "text/x-python",
393 | "name": "python",
394 | "nbconvert_exporter": "python",
395 | "pygments_lexer": "ipython3",
396 | "version": "3.11.11"
397 | }
398 | },
399 | "nbformat": 4,
400 | "nbformat_minor": 5
401 | }
402 |
--------------------------------------------------------------------------------
/workshop/deal_agent_framework.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import logging
4 | import json
5 | from typing import List, Optional
6 | from dotenv import load_dotenv
7 | import chromadb
8 | from price_agents.autonomous_planning_agent import AutonomousPlanningAgent
9 | from price_agents.deals import Opportunity
10 | from sklearn.manifold import TSNE
11 | import numpy as np
12 |
13 |
14 | # Colors for logging
15 | BG_BLUE = '\033[44m'
16 | WHITE = '\033[37m'
17 | RESET = '\033[0m'
18 |
19 | # Colors for plot
20 | CATEGORIES = ['Appliances', 'Automotive', 'Cell_Phones_and_Accessories', 'Electronics','Musical_Instruments', 'Office_Products', 'Tools_and_Home_Improvement', 'Toys_and_Games']
21 | COLORS = ['red', 'blue', 'brown', 'orange', 'yellow', 'green' , 'purple', 'cyan']
22 |
23 | def init_logging():
24 | root = logging.getLogger()
25 | root.setLevel(logging.INFO)
26 |
27 | handler = logging.StreamHandler(sys.stdout)
28 | handler.setLevel(logging.INFO)
29 | formatter = logging.Formatter(
30 | "[%(asctime)s] [Agents] [%(levelname)s] %(message)s",
31 | datefmt="%Y-%m-%d %H:%M:%S %z",
32 | )
33 | handler.setFormatter(formatter)
34 | root.addHandler(handler)
35 |
36 | class DealAgentFramework:
37 |
38 | DB = "products_vectorstore"
39 | MEMORY_FILENAME = "memory.json"
40 |
41 | def __init__(self):
42 | init_logging()
43 | load_dotenv()
44 | client = chromadb.PersistentClient(path=self.DB)
45 | self.memory = self.read_memory()
46 | self.collection = client.get_or_create_collection('products')
47 | self.planner = None
48 |
49 | def init_agents_as_needed(self):
50 | if not self.planner:
51 | self.log("Initializing Agent Framework")
52 | self.planner = AutonomousPlanningAgent(self.collection)
53 | self.log("Agent Framework is ready")
54 |
55 | def read_memory(self) -> List[Opportunity]:
56 | if os.path.exists(self.MEMORY_FILENAME):
57 | with open(self.MEMORY_FILENAME, "r") as file:
58 | data = json.load(file)
59 | opportunities = [Opportunity(**item) for item in data]
60 | return opportunities
61 | return []
62 |
63 | def write_memory(self) -> None:
64 | data = [opportunity.dict() for opportunity in self.memory]
65 | with open(self.MEMORY_FILENAME, "w") as file:
66 | json.dump(data, file, indent=2)
67 |
68 | @classmethod
69 | def reset_memory(cls) -> None:
70 | data = []
71 | if os.path.exists(cls.MEMORY_FILENAME):
72 | with open(cls.MEMORY_FILENAME, "r") as file:
73 | data = json.load(file)
74 | truncated = data[:2]
75 | with open(cls.MEMORY_FILENAME, "w") as file:
76 | json.dump(truncated, file, indent=2)
77 |
78 | def log(self, message: str):
79 | text = BG_BLUE + WHITE + "[Agent Framework] " + message + RESET
80 | logging.info(text)
81 |
82 | def run(self) -> List[Opportunity]:
83 | self.init_agents_as_needed()
84 | logging.info("Kicking off Planning Agent")
85 | result = self.planner.plan(memory=self.memory)
86 | logging.info(f"Planning Agent has completed and returned: {result}")
87 | if result:
88 | self.memory.append(result)
89 | self.write_memory()
90 | return self.memory
91 |
92 | @classmethod
93 | def get_plot_data(cls, max_datapoints=2000):
94 | client = chromadb.PersistentClient(path=cls.DB)
95 | collection = client.get_or_create_collection('products')
96 | result = collection.get(include=['embeddings', 'documents', 'metadatas'], limit=max_datapoints)
97 | vectors = np.array(result['embeddings'])
98 | documents = result['documents']
99 | categories = [metadata['category'] for metadata in result['metadatas']]
100 | colors = [COLORS[CATEGORIES.index(c)] for c in categories]
101 | tsne = TSNE(n_components=3, random_state=42, n_jobs=-1)
102 | reduced_vectors = tsne.fit_transform(vectors)
103 | return documents, reduced_vectors, colors
104 |
105 |
106 | if __name__=="__main__":
107 | DealAgentFramework().run()
108 |
--------------------------------------------------------------------------------
/workshop/items.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from transformers import AutoTokenizer
3 | import re
4 |
5 | BASE_MODEL = "meta-llama/Meta-Llama-3.1-8B"
6 | MIN_TOKENS = 150
7 | MAX_TOKENS = 160
8 | MIN_CHARS = 300
9 | CEILING_CHARS = MAX_TOKENS * 7
10 |
11 | class Item:
12 | """
13 | An Item is a cleaned, curated datapoint of a Product with a Price
14 | """
15 |
16 | # This line is commented out as we don't directly use the tokenizer in this class
17 | # tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
18 |
19 | PREFIX = "Price is $"
20 | QUESTION = "How much does this cost to the nearest dollar?"
21 | REMOVALS = ['"Batteries Included?": "No"', '"Batteries Included?": "Yes"', '"Batteries Required?": "No"', '"Batteries Required?": "Yes"', "By Manufacturer", "Item", "Date First", "Package", ":", "Number of", "Best Sellers", "Number", "Product "]
22 |
23 | title: str
24 | price: float
25 | text: str
26 | category: str
27 | token_count: int = 0
28 | details: Optional[str]
29 | prompt: Optional[str] = None
30 | include = False
31 |
32 | def __init__(self, data, price):
33 | self.title = data['title']
34 | self.price = price
35 | self.parse(data)
36 |
37 | def scrub_details(self):
38 | """
39 | Clean up the details string by removing common text that doesn't add value
40 | """
41 | details = self.details
42 | for remove in self.REMOVALS:
43 | details = details.replace(remove, "")
44 | return details
45 |
46 | def scrub(self, stuff):
47 | """
48 | Clean up the provided text by removing unnecessary characters and whitespace
49 | Also remove words that are 7+ chars and contain numbers, as these are likely irrelevant product numbers
50 | """
51 | stuff = re.sub(r'[:\[\]"{}【】\s]+', ' ', stuff).strip()
52 | stuff = stuff.replace(" ,", ",").replace(",,,",",").replace(",,",",")
53 | words = stuff.split(' ')
54 | select = [word for word in words if len(word)<7 or not any(char.isdigit() for char in word)]
55 | return " ".join(select)
56 |
57 | def parse(self, data):
58 | """
59 | Parse this datapoint and if it fits within the allowed Token range,
60 | then set include to True
61 | """
62 | contents = '\n'.join(data['description'])
63 | if contents:
64 | contents += '\n'
65 | features = '\n'.join(data['features'])
66 | if features:
67 | contents += features + '\n'
68 | self.details = data['details']
69 | if self.details:
70 | contents += self.scrub_details() + '\n'
71 | if len(contents) > MIN_CHARS:
72 | contents = contents[:CEILING_CHARS]
73 | text = f"{self.scrub(self.title)}\n{self.scrub(contents)}"
74 | tokens = self.tokenizer.encode(text, add_special_tokens=False)
75 | if len(tokens) > MIN_TOKENS:
76 | tokens = tokens[:MAX_TOKENS]
77 | text = self.tokenizer.decode(tokens)
78 | self.make_prompt(text)
79 | self.include = True
80 |
81 | def make_prompt(self, text):
82 | """
83 | Set the prompt instance variable to be a prompt appropriate for training
84 | """
85 | self.prompt = f"{self.QUESTION}\n\n{text}\n\n"
86 | self.prompt += f"{self.PREFIX}{str(round(self.price))}.00"
87 | self.token_count = len(self.tokenizer.encode(self.prompt, add_special_tokens=False))
88 |
89 | def test_prompt(self):
90 | """
91 | Return a prompt suitable for testing, with the actual price removed
92 | """
93 | return self.prompt.split(self.PREFIX)[0] + self.PREFIX
94 |
95 | def __repr__(self):
96 | """
97 | Return a String version of this Item
98 | """
99 | return f"<{self.title} = ${self.price}>"
100 |
101 |
102 |
103 |
104 |
--------------------------------------------------------------------------------
/workshop/keep_warm.py:
--------------------------------------------------------------------------------
1 | import time
2 | import modal
3 | from datetime import datetime
4 |
5 | Pricer = modal.Cls.lookup("pricer-service", "Pricer")
6 | pricer = Pricer()
7 | while True:
8 | reply = pricer.wake_up.remote()
9 | print(f"{datetime.now()}: {reply}")
10 | time.sleep(30)
--------------------------------------------------------------------------------
/workshop/log_utils.py:
--------------------------------------------------------------------------------
1 | # Foreground colors
2 | RED = '\033[31m'
3 | GREEN = '\033[32m'
4 | YELLOW = '\033[33m'
5 | BLUE = '\033[34m'
6 | MAGENTA = '\033[35m'
7 | CYAN = '\033[36m'
8 | WHITE = '\033[37m'
9 |
10 | # Background color
11 | BG_BLACK = '\033[40m'
12 | BG_BLUE = '\033[44m'
13 |
14 | # Reset code to return to default color
15 | RESET = '\033[0m'
16 |
17 | mapper = {
18 | BG_BLACK+RED: "#dd0000",
19 | BG_BLACK+GREEN: "#00dd00",
20 | BG_BLACK+YELLOW: "#dddd00",
21 | BG_BLACK+BLUE: "#0000ee",
22 | BG_BLACK+MAGENTA: "#aa00dd",
23 | BG_BLACK+CYAN: "#00dddd",
24 | BG_BLACK+WHITE: "#87CEEB",
25 | BG_BLUE+WHITE: "#ff7800"
26 | }
27 |
28 |
29 | def reformat(message):
30 | for key, value in mapper.items():
31 | message = message.replace(key, f'')
32 | message = message.replace(RESET, '')
33 | return message
34 |
35 |
--------------------------------------------------------------------------------
/workshop/memory.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "deal": {
4 | "product_description": "The Samsung Galaxy Watch Ultra is a premium 47mm LTE Titanium smartwatch designed for both style and functionality. It features a circular display made with durable materials suitable for outdoor activities, providing GPS tracking, health monitoring, and custom apps for various needs. The robust design integrates a range of smart features including notifications, music control, and heart rate tracking, making it an ideal companion for fitness enthusiasts and tech-savvy users alike.",
5 | "price": 350.0,
6 | "url": "https://www.dealnews.com/Samsung-Galaxy-Watch-Ultra-47-mm-LTE-Titanium-Smartwatch-up-to-350-off-w-Trade-in-free-shipping/21663266.html?iref=rss-c142"
7 | },
8 | "estimate": 773.8138460593241,
9 | "discount": 423.8138460593241
10 | },
11 | {
12 | "deal": {
13 | "product_description": "The Refurbished Unlocked Apple iPhone 14 Pro Max offers an impressive 256GB storage and a huge display, perfect for both media consumption and productivity. Enjoy advanced camera technology for stunning photos. This model is designed to provide a seamless user experience with 5G capabilities for faster downloads and streaming. Refurbished to high standards, it comes in various colors and can support all the latest apps from the App Store, accommodating any Apple enthusiast's needs.",
14 | "price": 705.0,
15 | "url": "https://www.dealnews.com/products/Apple/Unlocked-Apple-iPhone-14-Pro-Max-256-GB-Smartphone/462808.html?iref=rss-c142"
16 | },
17 | "estimate": 930.8824204895075,
18 | "discount": 225.88242048950747
19 | },
20 | {
21 | "deal": {
22 | "product_description": "The Unlocked Motorola razr+ is a cutting-edge smartphone featuring a Qualcomm Snapdragon 8 Gen 1 processor, 8GB of RAM, and 256GB of storage. With a stunning 6.9-inch AMOLED display, the device runs on Android 13, offering a sleek design and premium function. Perfect for users looking for a powerful, stylish, and modern phone experience.",
23 | "price": 300.0,
24 | "url": "https://www.dealnews.com/products/Motorola/Unlocked-Motorola-razr-256-GB-Phone-2023/456024.html?iref=rss-c142"
25 | },
26 | "estimate": 749.995,
27 | "discount": 449.995
28 | }
29 | ]
--------------------------------------------------------------------------------
/workshop/old_agent5.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "eae2b3a2-02d4-4061-9639-3a6f09810a44",
6 | "metadata": {},
7 | "source": [
8 | "# Fifth Agent Agent\n",
9 | "\n",
10 | "Introducing a critical agent - the agent that brings it all together.\n",
11 | "\n",
12 | "# Planning Agent\n",
13 | "\n",
14 | "There are a number of frameworks out there that support building Agentic Workflows.\n",
15 | "\n",
16 | "OpenAI has Swarm, LangChain has LangGraph, Gradio and HuggingFace have offerings, and there's Autogen from Microsoft, Crew.ai and many others. \n",
17 | "\n",
18 | "Each of these are abstractions on top of APIs to LLMs; some are lightweight, others come with significant functionality.\n",
19 | "\n",
20 | "It's also perfectly possible - and sometimes considerably easier - to build an agentic solution by calling LLMs directly.\n",
21 | "\n",
22 | "There's been considerable convergence on LLM APIs, and it's not clear that there's a need to sign up for one of the agent ecosystems for many use cases.\n",
23 | "\n",
24 | "Anthropic has an [insightful post](https://www.anthropic.com/research/building-effective-agents) on building effective Agentic architectures that's well worth a read.\n",
25 | "\n",
26 | "## Using Tools to give our Agent autonomy\n",
27 | "\n",
28 | "In our case, we're going to create an Agent that uses Tools to make decisions about what to do next.\n",
29 | "\n",
30 | "This is a bit over the top for this simple example, because we know exactly what the Agent is supposed to do. But it allows us to give the Agent some freedom..\n",
31 | "\n",
32 | "Let's see how it works:"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "id": "8f171c2b-1943-43a5-85c6-0bcd84bdd3e7",
39 | "metadata": {},
40 | "outputs": [],
41 | "source": [
42 | "# imports\n",
43 | "\n",
44 | "import os\n",
45 | "import json\n",
46 | "from dotenv import load_dotenv\n",
47 | "from openai import OpenAI"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "id": "612c8116-e4b6-4332-8bdb-d7c90b4aa9de",
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "# Initialization\n",
58 | "\n",
59 | "load_dotenv()\n",
60 | "\n",
61 | "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
62 | "if openai_api_key:\n",
63 | " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
64 | "else:\n",
65 | " print(\"OpenAI API Key not set\")\n",
66 | " \n",
67 | "MODEL = \"gpt-4o-mini\"\n",
68 | "openai = OpenAI()\n"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "id": "0896c5f3-1ecc-4464-b913-2e7cfe29c365",
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "# Use the Scanner agent from before\n",
79 | "\n",
80 | "from agents.scanner_agent import ScannerAgent\n",
81 | "scanner = ScannerAgent()"
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "id": "04f25eed-e48a-4f9a-9817-4c0451378b40",
87 | "metadata": {},
88 | "source": [
89 | "# Our tools\n",
90 | "\n",
91 | "The next 3 cells have 3 **fake** functions that we will allow our LLM to call"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "id": "5f920a35-c58d-4961-8c3c-40d70157da22",
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "def scan_the_internet_for_bargains():\n",
102 | " print(f\"Scanning the internet\")\n",
103 | " results = scanner.test_scan()\n",
104 | " return results.model_dump()"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "id": "8f885983-e054-43f3-86b4-6db9323216da",
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "def estimate_true_value(description: str) -> str:\n",
115 | " print(f\"Estimating true value of {description[:20]}...\")\n",
116 | " return {\"description\": description, \"estimated_true_value\": 300}"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "id": "93a42f55-0f75-44b1-830f-ee13d161cdd1",
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "def notify_user_of_deal(description: str, deal_price: float, estimated_true_value: float):\n",
127 | " print(f\"Notifying user of {description} which costs {deal_price} and estimate is {estimated_true_value}\")\n",
128 | " return {\"notification_sent\": \"ok\"}"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "id": "aa1dd672-e77e-4f91-a66d-fe67f10ac093",
134 | "metadata": {},
135 | "source": [
136 | "## Telling the LLM about the tools it can use, with JSON\n",
137 | "\n",
138 | "\"Tool calling\" is giving an LLM the power to run code on your computer!\n",
139 | "\n",
140 | "Sounds a bit spooky?\n",
141 | "\n",
142 | "The way it works is a little more mundane. We give the LLM a description of each Tool and the parameters, and we let it inform us if it wants any tool to be run.\n",
143 | "\n",
144 | "It's not like OpenAI reaches in and runs a function. In the end, we have an if statement that calls our function if the model requests it."
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "id": "fb4a0727-d971-44bb-9c90-59f9e1323261",
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "scan_function = {\n",
155 | " \"name\": \"scan_the_internet_for_bargains\",\n",
156 | " \"description\": \"Returns top bargains scraped from the internet along with the price each item is being offered for\",\n",
157 | " \"parameters\": {\n",
158 | " \"type\": \"object\",\n",
159 | " \"properties\": {},\n",
160 | " \"required\": [],\n",
161 | " \"additionalProperties\": False\n",
162 | " }\n",
163 | "}"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": null,
169 | "id": "ef4c9cea-fd6f-45ac-b8fe-cc15722ec231",
170 | "metadata": {},
171 | "outputs": [],
172 | "source": [
173 | "estimate_function = {\n",
174 | " \"name\": \"estimate_true_value\",\n",
175 | " \"description\": \"Given the description of an item, estimate how much it is actually worth\",\n",
176 | " \"parameters\": {\n",
177 | " \"type\": \"object\",\n",
178 | " \"properties\": {\n",
179 | " \"description\": {\n",
180 | " \"type\": \"string\",\n",
181 | " \"description\": \"The description of the item to be estimated\"\n",
182 | " },\n",
183 | " },\n",
184 | " \"required\": [\"description\"],\n",
185 | " \"additionalProperties\": False\n",
186 | " }\n",
187 | "}"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "id": "22b863aa-b86b-45db-8ac0-0fe6fb101284",
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "notify_function = {\n",
198 | " \"name\": \"notify_user_of_deal\",\n",
199 | " \"description\": \"Send the user a push notification about the most compelling deal\",\n",
200 | " \"parameters\": {\n",
201 | " \"type\": \"object\",\n",
202 | " \"properties\": {\n",
203 | " \"description\": {\n",
204 | " \"type\": \"string\",\n",
205 | " \"description\": \"The description of the item\"\n",
206 | " },\n",
207 | " \"deal_price\": {\n",
208 | " \"type\": \"number\",\n",
209 | " \"description\": \"The price offered by this deal scraped from the internet\"\n",
210 | " }\n",
211 | " ,\n",
212 | " \"estimated_true_value\": {\n",
213 | " \"type\": \"number\",\n",
214 | " \"description\": \"The estimated actual value that this is worth\"\n",
215 | " }\n",
216 | " },\n",
217 | " \"required\": [\"description\", \"deal_price\", \"estimated_true_value\"],\n",
218 | " \"additionalProperties\": False\n",
219 | " }\n",
220 | "}"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "id": "54c1d76a-8744-46d0-afb3-d881820d876c",
227 | "metadata": {},
228 | "outputs": [],
229 | "source": [
230 | "tools = [{\"type\": \"function\", \"function\": scan_function},\n",
231 | " {\"type\": \"function\", \"function\": estimate_function},\n",
232 | " {\"type\": \"function\", \"function\": notify_function}]"
233 | ]
234 | },
235 | {
236 | "cell_type": "markdown",
237 | "id": "e088a008-fed8-4bc6-8041-8801adb3754c",
238 | "metadata": {},
239 | "source": [
240 | "## And now to bring it together - Tool calling in action"
241 | ]
242 | },
243 | {
244 | "cell_type": "code",
245 | "execution_count": null,
246 | "id": "018b825e-df74-412f-a829-2a4c92cf1d92",
247 | "metadata": {},
248 | "outputs": [],
249 | "source": [
250 | "mapping = {\"scan_the_internet_for_bargains\": scan_the_internet_for_bargains, \"estimate_true_value\": estimate_true_value, \"notify_user_of_deal\": notify_user_of_deal}\n",
251 | "\n",
252 | "def handle_tool_call(message):\n",
253 | " results = []\n",
254 | " for tool_call in message.tool_calls:\n",
255 | " tool_name = tool_call.function.name\n",
256 | " arguments = json.loads(tool_call.function.arguments)\n",
257 | " print(f\"Tool called: {tool_name}\", flush=True)\n",
258 | " tool = globals().get(tool_name)\n",
259 | " if tool:\n",
260 | " result = tool(**arguments) if tool else {}\n",
261 | " results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n",
262 | " else:\n",
263 | " print(\"Error calling tool\")\n",
264 | " results.append({\"role\": \"tool\",\"content\": json.dumps({\"result\":\"error\"}),\"tool_call_id\": tool_call.id})\n",
265 | " return results"
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": null,
271 | "id": "d195e6ad-c838-4a5f-ab3c-f3cb7f470fce",
272 | "metadata": {},
273 | "outputs": [],
274 | "source": [
275 | "system_message = \"You are an Autonomous AI Agent that makes use of tools to carry out your mission. Your mission is to find great deals on bargain products, and notify the user when you find them.\"\n",
276 | "user_message = \"Your mission is to discover great deals on products. First you should scan the internet for bargain deals. Then for each deal, you should estimate its true value - how much it's actually worth. \"\n",
277 | "user_message += \"Finally, you should pick the single most compelling deal where the deal price is much lower than the estimated true value, and send the user a push notification about that deal. \"\n",
278 | "user_message += \"You must only notify the user about one deal, and be sure to pick the most compelling deal, where the deal price is much lower than the estimated true value. Then just respond OK to indicate success.\"\n",
279 | "messages = [{\"role\": \"system\", \"content\": system_message},{\"role\": \"user\", \"content\": user_message}]"
280 | ]
281 | },
282 | {
283 | "cell_type": "code",
284 | "execution_count": null,
285 | "id": "a3021830-b216-4013-8456-671a370f4450",
286 | "metadata": {},
287 | "outputs": [],
288 | "source": [
289 | "# A loop that repeatedly calls gpt-4o-mini until it has no more tools to call\n",
290 | "\n",
291 | "done = False\n",
292 | "while not done:\n",
293 | " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
294 | " if response.choices[0].finish_reason==\"tool_calls\":\n",
295 | " message = response.choices[0].message\n",
296 | " results = handle_tool_call(message)\n",
297 | " messages.append(message)\n",
298 | " messages.extend(results)\n",
299 | " else:\n",
300 | " done = True\n",
301 | "\n",
302 | "print(response.choices[0].message.content)"
303 | ]
304 | },
305 | {
306 | "cell_type": "markdown",
307 | "id": "67020429-93a3-4c26-b4ae-7c9c9f1d41a2",
308 | "metadata": {},
309 | "source": [
310 | "## And now - putting all of this into a Planning Agent"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "id": "e30b1875-3a42-41c0-b217-9789090347b8",
317 | "metadata": {},
318 | "outputs": [],
319 | "source": [
320 | "from agents.autonomous_planning_agent import AutonomousPlanningAgent"
321 | ]
322 | },
323 | {
324 | "cell_type": "code",
325 | "execution_count": null,
326 | "id": "767db7b9-8d78-4d02-9b79-6c5e2dd8ddd2",
327 | "metadata": {},
328 | "outputs": [],
329 | "source": [
330 | "import logging\n",
331 | "root = logging.getLogger()\n",
332 | "root.setLevel(logging.INFO)"
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "execution_count": null,
338 | "id": "83fbf6c0-301e-4da0-b4e3-8f91ab4d686f",
339 | "metadata": {},
340 | "outputs": [],
341 | "source": [
342 | "import chromadb\n",
343 | "DB = \"products_vectorstore\"\n",
344 | "client = chromadb.PersistentClient(path=DB)\n",
345 | "collection = client.get_or_create_collection('products')"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": null,
351 | "id": "208067d9-8396-4f95-8dc8-a614c9a455df",
352 | "metadata": {},
353 | "outputs": [],
354 | "source": [
355 | "agent = AutonomousPlanningAgent(collection)"
356 | ]
357 | },
358 | {
359 | "cell_type": "code",
360 | "execution_count": null,
361 | "id": "0121edc8-c309-4d04-b737-16a4235a83fb",
362 | "metadata": {},
363 | "outputs": [],
364 | "source": [
365 | "result = agent.plan()"
366 | ]
367 | },
368 | {
369 | "cell_type": "markdown",
370 | "id": "dc3e15e5-dddc-4f2e-bbb4-ab9a5392eca7",
371 | "metadata": {},
372 | "source": [
373 | "# Finally - with a Gradio UI"
374 | ]
375 | },
376 | {
377 | "cell_type": "code",
378 | "execution_count": null,
379 | "id": "b6f9da59-43c4-409f-8a93-5993e1d9e180",
380 | "metadata": {},
381 | "outputs": [],
382 | "source": [
383 | "# Reset memory back to 2 deals discovered in the past\n",
384 | "\n",
385 | "from deal_agent_framework import DealAgentFramework\n",
386 | "DealAgentFramework.reset_memory()"
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": null,
392 | "id": "805095ad-9d07-4869-9432-338f87fb65ca",
393 | "metadata": {},
394 | "outputs": [],
395 | "source": [
396 | "!python price_is_right.py"
397 | ]
398 | },
399 | {
400 | "cell_type": "code",
401 | "execution_count": null,
402 | "id": "9df197f7-7ed4-4b24-a333-80ffda9d7032",
403 | "metadata": {},
404 | "outputs": [],
405 | "source": []
406 | }
407 | ],
408 | "metadata": {
409 | "kernelspec": {
410 | "display_name": "Python 3 (ipykernel)",
411 | "language": "python",
412 | "name": "python3"
413 | },
414 | "language_info": {
415 | "codemirror_mode": {
416 | "name": "ipython",
417 | "version": 3
418 | },
419 | "file_extension": ".py",
420 | "mimetype": "text/x-python",
421 | "name": "python",
422 | "nbconvert_exporter": "python",
423 | "pygments_lexer": "ipython3",
424 | "version": "3.11.11"
425 | }
426 | },
427 | "nbformat": 4,
428 | "nbformat_minor": 5
429 | }
430 |
--------------------------------------------------------------------------------
/workshop/previous_hardcoded_agent5.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "23f53670-1a73-46ba-a754-4a497e8e0e64",
6 | "metadata": {},
7 | "source": [
8 | "# Fifth Agent Agent\n",
9 | "\n",
10 | "Introducing a critical agent - the agent that brings it all together.\n",
11 | "\n",
12 | "# Planning Agent\n",
13 | "\n",
14 | "There are a number of frameworks out there that support building Agentic Workflows.\n",
15 | "\n",
16 | "OpenAI recently announced Swarm, LangChain has LangGraph, Gradio and HuggingFace have offerings, and there's Autogen from Microsoft, Crew.ai and many others. \n",
17 | "\n",
18 | "Each of these are abstractions on top of APIs to LLMs; some are lightweight, others come with significant functionality.\n",
19 | "\n",
20 | "It's also perfectly possible - and sometimes considerably easier - to build an agentic solution by calling LLMs directly.\n",
21 | "\n",
22 | "There's been considerable convergence on LLM APIs, and it's not clear that there's a need to sign up for one of the agent ecosystems for many use cases.\n",
23 | "\n",
24 | "In our case, we're simply going to make direct calls to our models to build our Agentic workflows.\n",
25 | "\n",
26 | "Now let's browse the Agents folder and look at the Planning Agent.\n",
27 | "\n",
28 | "Then we will run it here:"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "id": "80d683d9-9e92-44ae-af87-a413ca84db21",
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "import os\n",
39 | "import sys\n",
40 | "import logging\n",
41 | "from agents.deals import Opportunity\n",
42 | "import http.client\n",
43 | "import urllib\n",
44 | "from dotenv import load_dotenv"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "id": "5ba769cc-5301-4810-b01f-cab584cfb3b3",
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "load_dotenv()\n",
55 | "DB = \"products_vectorstore\""
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "57b3a014-0b15-425a-a29b-6fefc5006dee",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "import chromadb\n",
66 | "client = chromadb.PersistentClient(path=DB)\n",
67 | "collection = client.get_or_create_collection('products')"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "d59931e4-386c-4f51-940a-c30d09fd27b9",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "import logging\n",
78 | "root = logging.getLogger()\n",
79 | "root.setLevel(logging.INFO)"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "id": "a5c31c39-e357-446e-9cec-b4775c298941",
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "from agents.planning_agent import PlanningAgent\n",
90 | "\n",
91 | "planner = PlanningAgent(collection)"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "id": "d9ac771b-ea12-41c0-a7ce-05f12e27ad9e",
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "result = planner.plan()"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "id": "8dd94a70-3202-452b-9ef0-551d6feb159b",
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "result"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "id": "fcf589c3-6ada-4116-b39d-df13e653879e",
117 | "metadata": {},
118 | "source": [
119 | "# And now - unveiling the Agent framework\n",
120 | "\n",
121 | "- Look at `deal_agent_framework.py` for the framework\n",
122 | "- Look at `memory.json` for the memory\n",
123 | "- And finally: look at the gradio UI in `price_is_right.py`\n"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "id": "41432c45-697a-4ffe-8324-8cea18878dd2",
130 | "metadata": {},
131 | "outputs": [],
132 | "source": [
133 | "!python price_is_right.py"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "id": "70d6bed5-d7ae-4f8e-a0d6-25980d1e11e3",
140 | "metadata": {},
141 | "outputs": [],
142 | "source": []
143 | }
144 | ],
145 | "metadata": {
146 | "kernelspec": {
147 | "display_name": "Python 3 (ipykernel)",
148 | "language": "python",
149 | "name": "python3"
150 | },
151 | "language_info": {
152 | "codemirror_mode": {
153 | "name": "ipython",
154 | "version": 3
155 | },
156 | "file_extension": ".py",
157 | "mimetype": "text/x-python",
158 | "name": "python",
159 | "nbconvert_exporter": "python",
160 | "pygments_lexer": "ipython3",
161 | "version": "3.11.11"
162 | }
163 | },
164 | "nbformat": 4,
165 | "nbformat_minor": 5
166 | }
167 |
--------------------------------------------------------------------------------
/workshop/price_agents/agent.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | class Agent:
4 | """
5 | An abstract superclass for Agents
6 | Used to log messages in a way that can identify each Agent
7 | """
8 |
9 | # Foreground colors
10 | RED = '\033[31m'
11 | GREEN = '\033[32m'
12 | YELLOW = '\033[33m'
13 | BLUE = '\033[34m'
14 | MAGENTA = '\033[35m'
15 | CYAN = '\033[36m'
16 | WHITE = '\033[37m'
17 |
18 | # Background color
19 | BG_BLACK = '\033[40m'
20 |
21 | # Reset code to return to default color
22 | RESET = '\033[0m'
23 |
24 | name: str = ""
25 | color: str = '\033[37m'
26 |
27 | def log(self, message):
28 | """
29 | Log this as an info message, identifying the agent
30 | """
31 | color_code = self.BG_BLACK + self.color
32 | message = f"[{self.name}] {message}"
33 | logging.info(color_code + message + self.RESET)
--------------------------------------------------------------------------------
/workshop/price_agents/autonomous_planning_agent.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List, Dict
2 | from price_agents.agent import Agent as BaseAgent
3 | from price_agents.deals import ScrapedDeal, DealSelection, Deal, Opportunity
4 | from price_agents.scanner_agent import ScannerAgent
5 | from price_agents.frontier_agent import FrontierAgent
6 | from price_agents.specialist_agent import SpecialistAgent
7 | from price_agents.messaging_agent import MessagingAgent
8 | from agents import Agent, Runner, function_tool
9 | import json
10 | import asyncio
11 | import os
12 | from agents.mcp import MCPServerStdio
13 |
14 | sandbox_path = os.path.abspath(os.path.join(os.getcwd(), "sandbox"))
15 | files_params = {"command": "npx", "args": ["-y", "@modelcontextprotocol/server-filesystem", sandbox_path]}
16 |
17 | planner = None
18 |
19 | @function_tool
20 | def scan_the_internet_for_bargains() -> Dict:
21 | """
22 | This tool scans the internet for bargains and returns a curated list of top deals
23 | """
24 | planner.log("Autonomous Planning agent is calling scanner")
25 | results = planner.scanner.scan(memory=planner.memory)
26 | return results.model_dump() if results else {}
27 |
28 | @function_tool
29 | def estimate_true_value(description: str) -> Dict:
30 | """
31 | This tool estimates the true value of a product based on a text description of it
32 | """
33 | planner.log(f"Autonomous Planning agent is estimating value")
34 | estimate1 = planner.frontier.price(description)
35 | estimate2 = planner.specialist.price(description)
36 | estimate = (estimate1 + estimate2) / 2.0
37 | return {"description": description, "estimated_true_value": estimate}
38 |
39 | @function_tool
40 | def notify_user_of_deal(description: str, deal_price: float, estimated_true_value: float, url: str) -> Dict:
41 | """
42 | This tool notifies the user of a great deal, given a description of it, the price of the deal, and the estimated true value
43 | """
44 | if planner.opportunity:
45 | planner.log("Autonomous Planning agent is trying to notify the user a 2nd time; ignoring")
46 | else:
47 | planner.log("Autonomous Planning agent is notifying user")
48 | planner.messenger.notify(description, deal_price, estimated_true_value, url)
49 | deal = Deal(product_description=description, price=deal_price, url=url)
50 | discount = estimated_true_value - deal_price
51 | planner.opportunity = Opportunity(deal=deal, estimate=estimated_true_value, discount=discount)
52 | return {"notification_sent": "ok"}
53 |
54 |
55 | class AutonomousPlanningAgent(BaseAgent):
56 |
57 | name = "Autonomous Planning Agent"
58 | color = BaseAgent.GREEN
59 | MODEL = "gpt-4o"
60 |
61 | def __init__(self, collection):
62 | """
63 | Create instances of the 3 Agents that this planner coordinates across
64 | """
65 | self.log("Autonomous Planning Agent is initializing")
66 | self.scanner = ScannerAgent()
67 | self.frontier = FrontierAgent(collection)
68 | self.specialist = SpecialistAgent()
69 | self.messenger = MessagingAgent()
70 | self.memory = None
71 | self.opportunity = None
72 | self.log("Autonomous Planning Agent is ready")
73 |
74 | def get_tools(self):
75 | """
76 | Return the json for the tools to be used
77 | """
78 | return [scan_the_internet_for_bargains, estimate_true_value, notify_user_of_deal]
79 |
80 | system_message = """
81 | Your mission is to find great deals on bargain products using your tools, and notify the user when you find them
82 | by sending a push notification and by writing a file in markdown with a summary.
83 | """
84 | user_message = """
85 | Your mission is to discover great deals on products. First you should use your tool to scan the internet for bargain deals.
86 | Then for each deal, you should use your tool to estimate its true value - how much it's actually worth.
87 | Finally, you should pick the single most compelling deal where the deal price is much lower than the estimated true value,
88 | and use your tool to send the user a push notification about that deal, and also use your tool to write this to the file sandbox/deals.md with a summary in markdown,
89 | adding to the end of the file if it already exists.
90 |
91 | You must only notify the user about one deal, and be sure to pick the most compelling deal, where the deal price is much lower than the estimated true value.
92 | Only notify the user for the one best deal. Then just respond OK to indicate success."""
93 |
94 | messages = [{"role": "system", "content": system_message},{"role": "user", "content": user_message}]
95 |
96 | def run_async_task(self, coro):
97 | try:
98 | loop = asyncio.get_running_loop()
99 | except RuntimeError:
100 | loop = asyncio.new_event_loop()
101 | asyncio.set_event_loop(loop)
102 | return loop.run_until_complete(coro)
103 | else:
104 | import nest_asyncio
105 | nest_asyncio.apply()
106 | return loop.run_until_complete(coro)
107 |
108 | async def go(self):
109 | async with MCPServerStdio(params=files_params) as server:
110 | file_tools = await server.list_tools()
111 | agent = Agent(name="Planner", instructions=self.system_message, model=self.MODEL, tools=self.get_tools(), mcp_servers=[server])
112 | reply = await Runner.run(agent, self.user_message)
113 | return reply
114 |
115 |
116 | def plan(self, memory: List[str] = []) -> Optional[Opportunity]:
117 | """
118 | Run the full workflow, providing the LLM with tools to surface scraped deals to the user
119 | :param memory: a list of URLs that have been surfaced in the past
120 | :return: an Opportunity if one was surfaced, otherwise None
121 | """
122 | self.log("Autonomous Planning Agent is kicking off a run")
123 | self.memory = memory
124 | self.opportunity = None
125 | global planner # TODO find a better way to do this without globals!!
126 | planner = self
127 | reply = self.run_async_task(self.go())
128 | self.log(f"Autonomous Planning Agent completed with: {reply}")
129 | return self.opportunity
--------------------------------------------------------------------------------
/workshop/price_agents/deals.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import List, Dict, Self
3 | from bs4 import BeautifulSoup
4 | import re
5 | import feedparser
6 | from tqdm import tqdm
7 | import requests
8 | import time
9 |
10 | feeds = [
11 | "https://www.dealnews.com/c142/Electronics/?rss=1",
12 | "https://www.dealnews.com/c39/Computers/?rss=1",
13 | # "https://www.dealnews.com/c238/Automotive/?rss=1",
14 | "https://www.dealnews.com/f1912/Smart-Home/?rss=1",
15 | ]
16 |
17 | def extract(html_snippet: str) -> str:
18 | """
19 | Use Beautiful Soup to clean up this HTML snippet and extract useful text
20 | """
21 | soup = BeautifulSoup(html_snippet, 'html.parser')
22 | snippet_div = soup.find('div', class_='snippet summary')
23 |
24 | if snippet_div:
25 | description = snippet_div.get_text(strip=True)
26 | description = BeautifulSoup(description, 'html.parser').get_text()
27 | description = re.sub('<[^<]+?>', '', description)
28 | result = description.strip()
29 | else:
30 | result = html_snippet
31 | return result.replace('\n', ' ')
32 |
33 | class ScrapedDeal:
34 | """
35 | A class to represent a Deal retrieved from an RSS feed
36 | """
37 | category: str
38 | title: str
39 | summary: str
40 | url: str
41 | details: str
42 | features: str
43 |
44 | def __init__(self, entry: Dict[str, str]):
45 | """
46 | Populate this instance based on the provided dict
47 | """
48 | self.title = entry['title']
49 | self.summary = extract(entry['summary'])
50 | self.url = entry['links'][0]['href']
51 | stuff = requests.get(self.url).content
52 | soup = BeautifulSoup(stuff, 'html.parser')
53 | content = soup.find('div', class_='content-section').get_text()
54 | content = content.replace('\nmore', '').replace('\n', ' ')
55 | if "Features" in content:
56 | splits = content.split("Features")
57 | self.details = splits[0]
58 | self.features = splits[1]
59 | else:
60 | self.details = content
61 | self.features = ""
62 | self.truncate()
63 |
64 | def truncate(self):
65 | """
66 | Limit the fields to a sensible length to avoid sending too much info to the model
67 | """
68 | self.title = self.title[:100]
69 | self.details = self.details[:500]
70 | self.features = self.features[:500]
71 |
72 | def __repr__(self):
73 | """
74 | Return a string to describe this deal
75 | """
76 | return f"<{self.title}>"
77 |
78 | def describe(self):
79 | """
80 | Return a longer string to describe this deal for use in calling a model
81 | """
82 | return f"Title: {self.title}\nDetails: {self.details.strip()}\nFeatures: {self.features.strip()}\nURL: {self.url}"
83 |
84 | @classmethod
85 | def fetch(cls, show_progress : bool = False) -> List[Self]:
86 | """
87 | Retrieve all deals from the selected RSS feeds
88 | Include a slight pause to ensure we don't overload the deals website
89 | """
90 | deals = []
91 | feed_iter = tqdm(feeds) if show_progress else feeds
92 | for feed_url in feed_iter:
93 | feed = feedparser.parse(feed_url)
94 | for entry in feed.entries[:5]:
95 | deals.append(cls(entry))
96 | time.sleep(0.05)
97 | return deals
98 |
99 | class Deal(BaseModel):
100 | """
101 | A class to Represent a Deal with a summary description
102 | """
103 | product_description: str
104 | price: float
105 | url: str
106 |
107 | class DealSelection(BaseModel):
108 | """
109 | A class to Represent a list of Deals
110 | """
111 | deals: List[Deal]
112 |
113 | class Opportunity(BaseModel):
114 | """
115 | A class to represent a possible opportunity: a Deal where we estimate
116 | it should cost more than it's being offered
117 | """
118 | deal: Deal
119 | estimate: float
120 | discount: float
--------------------------------------------------------------------------------
/workshop/price_agents/frontier_agent.py:
--------------------------------------------------------------------------------
1 | # imports
2 |
3 | import os
4 | import re
5 | import math
6 | import json
7 | from typing import List, Dict
8 | from openai import OpenAI
9 | from sentence_transformers import SentenceTransformer
10 | from datasets import load_dataset
11 | import chromadb
12 | from items import Item
13 | from testing import Tester
14 | from price_agents.agent import Agent
15 |
16 |
17 | class FrontierAgent(Agent):
18 |
19 | name = "Frontier Agent"
20 | color = Agent.YELLOW
21 |
22 | MODEL = "deepseek-chat"
23 | PREPROCESS_MODEL = "llama3.2"
24 |
25 | def __init__(self, collection):
26 | """
27 | Set up this instance by connecting to OpenAI, to the Chroma Datastore,
28 | And setting up the vector encoding model
29 | """
30 | self.log("Initializing Frontier Agent")
31 | deepseek_api_key = os.getenv("DEEPSEEK_API_KEY")
32 | if deepseek_api_key:
33 | self.client = OpenAI(api_key=deepseek_api_key, base_url="https://api.deepseek.com")
34 | self.MODEL = "deepseek-chat"
35 | self.log("Frontier Agent is set up with DeepSeek")
36 | else:
37 | self.client = OpenAI()
38 | self.MODEL = "gpt-4o-mini"
39 | self.log("Frontier Agent is setting up with OpenAI")
40 | self.ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
41 | self.collection = collection
42 | self.model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
43 | self.log("Frontier Agent is ready")
44 |
45 | def make_context(self, similars: List[str], prices: List[float]) -> str:
46 | """
47 | Create context that can be inserted into the prompt
48 | :param similars: similar products to the one being estimated
49 | :param prices: prices of the similar products
50 | :return: text to insert in the prompt that provides context
51 | """
52 | message = "To provide some context, here are some other items that might be similar to the item you need to estimate.\n\n"
53 | for similar, price in zip(similars, prices):
54 | message += f"Potentially related product:\n{similar}\nPrice is ${price:.2f}\n\n"
55 | return message
56 |
57 | def messages_for(self, description: str, similars: List[str], prices: List[float]) -> List[Dict[str, str]]:
58 | """
59 | Create the message list to be included in a call to OpenAI
60 | With the system and user prompt
61 | :param description: a description of the product
62 | :param similars: similar products to this one
63 | :param prices: prices of similar products
64 | :return: the list of messages in the format expected by OpenAI
65 | """
66 | system_message = "You estimate prices of items. Reply only with the price, no explanation"
67 | user_prompt = self.make_context(similars, prices)
68 | user_prompt += "And now the question for you:\n\n"
69 | user_prompt += "How much does this cost?\n\n" + description
70 | return [
71 | {"role": "system", "content": system_message},
72 | {"role": "user", "content": user_prompt},
73 | {"role": "assistant", "content": "Price is $"}
74 | ]
75 |
76 | def preprocess(self, item: str):
77 | """
78 | Run the description through llama3.2 running locally to make it most suitable for RAG lookup
79 | """
80 | system_message = "You rewrite product descriptions in a format most suitable for finding similar products in a Knowledge Base"
81 | user_message = "Please write a short 2-3 sentence description of the following product; your description will be used to find similar products so it should be comprehensive and only about the product. Details:\n"
82 | user_message += item
83 | user_message += "\n\nNow please reply only with the short description, with no introduction"
84 | messages = [{"role": "system", "content": system_message}, {"role": "user", "content": user_message}]
85 | response = self.ollama_via_openai.chat.completions.create(model="llama3.2", messages=messages, seed=42)
86 | return response.choices[0].message.content
87 |
88 | def find_similars(self, description: str):
89 | """
90 | Return a list of items similar to the given one by looking in the Chroma datastore
91 | """
92 | self.log("Frontier Agent is using Llama 3.2 to preprocess the description")
93 | preprocessed = self.preprocess(description)
94 | self.log("Frontier Agent is vectorizing using all-MiniLM-L6-v2")
95 | vector = self.model.encode([preprocessed])
96 | self.log("Frontier Agent is performing a RAG search of the Chroma datastore to find 5 similar products")
97 | results = self.collection.query(query_embeddings=vector.astype(float).tolist(), n_results=5)
98 | documents = results['documents'][0][:]
99 | prices = [m['price'] for m in results['metadatas'][0][:]]
100 | self.log("Frontier Agent has found similar products")
101 | return documents, prices
102 |
103 | def get_price(self, s) -> float:
104 | """
105 | A utility that plucks a floating point number out of a string
106 | """
107 | s = s.replace('$','').replace(',','')
108 | match = re.search(r"[-+]?\d*\.\d+|\d+", s)
109 | return float(match.group()) if match else 0.0
110 |
111 | def price(self, description: str) -> float:
112 | """
113 | Make a call to OpenAI or DeepSeek to estimate the price of the described product,
114 | by looking up 5 similar products and including them in the prompt to give context
115 | :param description: a description of the product
116 | :return: an estimate of the price
117 | """
118 | documents, prices = self.find_similars(description)
119 | self.log("Frontier Agent is about to call DeepSeek with context including 5 similar products")
120 | response = self.client.chat.completions.create(
121 | model=self.MODEL,
122 | messages=self.messages_for(description, documents, prices),
123 | seed=42,
124 | max_tokens=5
125 | )
126 | reply = response.choices[0].message.content
127 | result = self.get_price(reply)
128 | self.log(f"Frontier Agent completed - predicting ${result:.2f}")
129 | return result
130 |
--------------------------------------------------------------------------------
/workshop/price_agents/messaging_agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | from price_agents.deals import Opportunity
3 | import http.client
4 | import urllib
5 | from price_agents.agent import Agent
6 | from anthropic import Anthropic
7 |
8 |
9 | class MessagingAgent(Agent):
10 |
11 | name = "Messaging Agent"
12 | color = Agent.MAGENTA
13 | MODEL = "claude-3-7-sonnet-latest"
14 |
15 | def __init__(self):
16 | """
17 | Set up this object to either do push notifications via Pushover,
18 | or SMS via Twilio,
19 | whichever is specified in the constants
20 | """
21 | self.log(f"Messaging Agent is initializing")
22 | self.pushover_user = os.getenv('PUSHOVER_USER', 'your-pushover-user-if-not-using-env')
23 | self.pushover_token = os.getenv('PUSHOVER_TOKEN', 'your-pushover-user-if-not-using-env')
24 | self.claude = Anthropic()
25 | self.log("Messaging Agent has initialized Pushover and Claude")
26 |
27 | def push(self, text):
28 | """
29 | Send a Push Notification using the Pushover API
30 | """
31 | self.log("Messaging Agent is sending a push notification")
32 | conn = http.client.HTTPSConnection("api.pushover.net:443")
33 | conn.request("POST", "/1/messages.json",
34 | urllib.parse.urlencode({
35 | "token": self.pushover_token,
36 | "user": self.pushover_user,
37 | "message": text,
38 | "sound": "cashregister"
39 | }), { "Content-type": "application/x-www-form-urlencoded" })
40 | conn.getresponse()
41 |
42 | def alert(self, opportunity: Opportunity):
43 | """
44 | Make an alert about the specified Opportunity
45 | """
46 | text = f"Deal Alert! Price=${opportunity.deal.price:.2f}, "
47 | text += f"Estimate=${opportunity.estimate:.2f}, "
48 | text += f"Discount=${opportunity.discount:.2f} :"
49 | text += opportunity.deal.product_description[:10]+'... '
50 | text += opportunity.deal.url
51 | self.push(text)
52 | self.log("Messaging Agent has completed")
53 |
54 | def craft_message(self, description: str, deal_price: float, estimated_true_value: float) -> str:
55 | system_prompt = "You are given details of a great deal on special offer, "
56 | system_prompt += "and you summarise it in a short message of 2-3 sentences"
57 | user_prompt = "Please summarize this great deal in 2-3 sentences.\n"
58 | user_prompt += f"Item Description: {description}\nOffered Price: {deal_price}\nEstimated true value: {estimated_true_value}"
59 | user_prompt += "\n\nRespond only with the 2-3 sentence message which will be used to alert the user about this deal"
60 | message = self.claude.messages.create(
61 | model=self.MODEL,
62 | max_tokens=200,
63 | temperature=0.7,
64 | system=system_prompt,
65 | messages=[
66 | {"role": "user", "content": user_prompt},
67 | ])
68 | return message.content[0].text
69 |
70 | def notify(self, description: str, deal_price: float, estimated_true_value: float, url: str):
71 | """
72 | Make an alert about the specified details
73 | """
74 | self.log("Messaging Agent is using Claude to craft the message")
75 | text = self.craft_message(description, deal_price, estimated_true_value)
76 | self.push(text[:200]+"... "+url)
77 | self.log("Messaging Agent has completed")
78 |
79 |
80 |
--------------------------------------------------------------------------------
/workshop/price_agents/planning_agent.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List
2 | from price_agents.agent import Agent as BaseAgent
3 | from price_agents.deals import ScrapedDeal, DealSelection, Deal, Opportunity
4 | from price_agents.scanner_agent import ScannerAgent
5 | from price_agents.frontier_agent import FrontierAgent
6 | from price_agents.specialist_agent import SpecialistAgent
7 | from price_agents.messaging_agent import MessagingAgent
8 |
9 |
10 | class PlanningAgent(BaseAgent):
11 |
12 | name = "Planning Agent"
13 | color = Agent.GREEN
14 | DEAL_THRESHOLD = 50
15 |
16 | def __init__(self, collection):
17 | """
18 | Create instances of the 3 Agents that this planner coordinates across
19 | """
20 | self.log("Planning Agent is initializing")
21 | self.scanner = ScannerAgent()
22 | self.frontier = FrontierAgent(collection)
23 | self.specialist = SpecialistAgent()
24 | self.messenger = MessagingAgent()
25 | self.log("Planning Agent is ready")
26 |
27 | def run(self, deal: Deal) -> Opportunity:
28 | """
29 | Run the workflow for a particular deal
30 | :param deal: the deal, summarized from an RSS scrape
31 | :returns: an opportunity including the discount
32 | """
33 | self.log("Planning Agent is pricing up a potential deal")
34 | estimate1 = self.frontier.price(deal.product_description)
35 | estimate2 = self.specialist.price(deal.product_description)
36 | estimate = (estimate1 + estimate2) / 2.0
37 | discount = estimate - deal.price
38 | self.log(f"Planning Agent has processed a deal with discount ${discount:.2f}")
39 | return Opportunity(deal=deal, estimate=estimate, discount=discount)
40 |
41 | def plan(self, memory: List[str] = []) -> Optional[Opportunity]:
42 | """
43 | Run the full workflow:
44 | 1. Use the ScannerAgent to find deals from RSS feeds
45 | 2. Use the SpecialistAgent and FrontierAgent to estimate them
46 | 3. Use the MessagingAgent to send a notification of deals
47 | We could have an LLM come up with this workflow, providing it with the Tools for each step
48 | But that would be overkill in this case as the workflow is simple and fixed; no intelligent triaging is required.
49 | :param memory: a list of URLs that have been surfaced in the past
50 | :return: an Opportunity if one was surfaced, otherwise None
51 | """
52 | self.log("Planning Agent is kicking off a run")
53 | selection = self.scanner.scan(memory=memory)
54 | if selection:
55 | opportunities = [self.run(deal) for deal in selection.deals[:5]]
56 | opportunities.sort(key=lambda opp: opp.discount, reverse=True)
57 | best = opportunities[0]
58 | self.log(f"Planning Agent has identified the best deal has discount ${best.discount:.2f}")
59 | if best.discount > self.DEAL_THRESHOLD:
60 | self.messenger.alert(best)
61 | self.log("Planning Agent has completed a run")
62 | return best if best.discount > self.DEAL_THRESHOLD else None
63 | return None
--------------------------------------------------------------------------------
/workshop/price_agents/scanner_agent.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from typing import Optional, List
4 | from openai import OpenAI
5 | from price_agents.deals import ScrapedDeal, DealSelection
6 | from price_agents.agent import Agent
7 |
8 |
9 | class ScannerAgent(Agent):
10 |
11 | MODEL = "gpt-4o-mini"
12 |
13 | SYSTEM_PROMPT = """You identify and summarize the 5 most detailed deals from a list, by selecting deals that have the most detailed, high quality description and the most clear price.
14 | Respond strictly in JSON with no explanation, using this format. You should provide the price as a number derived from the description. If the price of a deal isn't clear, do not include that deal in your response.
15 | Most important is that you respond with the 5 deals that have the most detailed product description with price. It's not important to mention the terms of the deal; most important is a thorough description of the product.
16 | Be careful with products that are described as "$XXX off" or "reduced by $XXX" - this isn't the actual price of the product. Only respond with products when you are highly confident about the price.
17 |
18 | {"deals": [
19 | {
20 | "product_description": "Your clearly expressed summary of the product in 3-4 sentences. Details of the item are much more important than why it's a good deal. Avoid mentioning discounts and coupons; focus on the item itself. There should be a short paragraph of text for each item you choose.",
21 | "price": 99.99,
22 | "url": "the url as provided"
23 | },
24 | ...
25 | ]}"""
26 |
27 | USER_PROMPT_PREFIX = """Respond with the most promising 5 deals from this list, selecting those which have the most detailed, high quality product description and a clear price that is greater than 0.
28 | Respond strictly in JSON, and only JSON. You should rephrase the description to be a summary of the product itself, not the terms of the deal.
29 | Remember to respond with a short paragraph of text in the product_description field for each of the 5 items that you select.
30 | Be careful with products that are described as "$XXX off" or "reduced by $XXX" - this isn't the actual price of the product. Only respond with products when you are highly confident about the price.
31 |
32 | Deals:
33 |
34 | """
35 |
36 | USER_PROMPT_SUFFIX = "\n\nStrictly respond in JSON and include exactly 5 deals, no more."
37 |
38 | name = "Scanner Agent"
39 | color = Agent.CYAN
40 |
41 | def __init__(self):
42 | """
43 | Set up this instance by initializing OpenAI
44 | """
45 | self.log("Scanner Agent is initializing")
46 | self.openai = OpenAI()
47 | self.log("Scanner Agent is ready")
48 |
49 | def fetch_deals(self, memory) -> List[ScrapedDeal]:
50 | """
51 | Look up deals published on RSS feeds
52 | Return any new deals that are not already in the memory provided
53 | """
54 | self.log("Scanner Agent is about to fetch deals from RSS feed")
55 | urls = [opp.deal.url for opp in memory]
56 | scraped = ScrapedDeal.fetch()
57 | result = [scrape for scrape in scraped if scrape.url not in urls]
58 | self.log(f"Scanner Agent received {len(result)} deals not already scraped")
59 | return result
60 |
61 | def make_user_prompt(self, scraped) -> str:
62 | """
63 | Create a user prompt for OpenAI based on the scraped deals provided
64 | """
65 | user_prompt = self.USER_PROMPT_PREFIX
66 | user_prompt += '\n\n'.join([scrape.describe() for scrape in scraped])
67 | user_prompt += self.USER_PROMPT_SUFFIX
68 | return user_prompt
69 |
70 | def scan(self, memory: List[str]=[]) -> Optional[DealSelection]:
71 | """
72 | Call OpenAI to provide a high potential list of deals with good descriptions and prices
73 | Use StructuredOutputs to ensure it conforms to our specifications
74 | :param memory: a list of URLs representing deals already raised
75 | :return: a selection of good deals, or None if there aren't any
76 | """
77 | scraped = self.fetch_deals(memory)
78 | if scraped:
79 | user_prompt = self.make_user_prompt(scraped)
80 | self.log("Scanner Agent is calling OpenAI using Structured Output")
81 | result = self.openai.beta.chat.completions.parse(
82 | model=self.MODEL,
83 | messages=[
84 | {"role": "system", "content": self.SYSTEM_PROMPT},
85 | {"role": "user", "content": user_prompt}
86 | ],
87 | max_tokens=2000,
88 | response_format=DealSelection
89 | )
90 | result = result.choices[0].message.parsed
91 | result.deals = [deal for deal in result.deals if deal.price>0]
92 | self.log(f"Scanner Agent received {len(result.deals)} selected deals with price>0 from OpenAI")
93 | return result
94 | return None
95 |
96 | def test_scan(self, memory: List[str]=[]) -> Optional[DealSelection]:
97 | """
98 | Return a test DealSelection, to be used during testing
99 | """
100 | results = {'deals': [{'product_description': "The Hisense R6 Series 55R6030N is a 55-inch 4K UHD Roku Smart TV that offers stunning picture quality with 3840x2160 resolution. It features Dolby Vision HDR and HDR10 compatibility, ensuring a vibrant and dynamic viewing experience. The TV runs on Roku's operating system, allowing easy access to streaming services and voice control compatibility with Google Assistant and Alexa. With three HDMI ports available, connecting multiple devices is simple and efficient.",
101 | 'price': 178,
102 | 'url': 'https://www.dealnews.com/products/Hisense/Hisense-R6-Series-55-R6030-N-55-4-K-UHD-Roku-Smart-TV/484824.html?iref=rss-c142'},
103 | {'product_description': 'The Poly Studio P21 is a 21.5-inch LED personal meeting display designed specifically for remote work and video conferencing. With a native resolution of 1080p, it provides crystal-clear video quality, featuring a privacy shutter and stereo speakers. This display includes a 1080p webcam with manual pan, tilt, and zoom control, along with an ambient light sensor to adjust the vanity lighting as needed. It also supports 5W wireless charging for mobile devices, making it an all-in-one solution for home offices.',
104 | 'price': 30,
105 | 'url': 'https://www.dealnews.com/products/Poly-Studio-P21-21-5-1080-p-LED-Personal-Meeting-Display/378335.html?iref=rss-c39'},
106 | {'product_description': 'The Lenovo IdeaPad Slim 5 laptop is powered by a 7th generation AMD Ryzen 5 8645HS 6-core CPU, offering efficient performance for multitasking and demanding applications. It features a 16-inch touch display with a resolution of 1920x1080, ensuring bright and vivid visuals. Accompanied by 16GB of RAM and a 512GB SSD, the laptop provides ample speed and storage for all your files. This model is designed to handle everyday tasks with ease while delivering an enjoyable user experience.',
107 | 'price': 446,
108 | 'url': 'https://www.dealnews.com/products/Lenovo/Lenovo-Idea-Pad-Slim-5-7-th-Gen-Ryzen-5-16-Touch-Laptop/485068.html?iref=rss-c39'},
109 | {'product_description': 'The Dell G15 gaming laptop is equipped with a 6th-generation AMD Ryzen 5 7640HS 6-Core CPU, providing powerful performance for gaming and content creation. It features a 15.6-inch 1080p display with a 120Hz refresh rate, allowing for smooth and responsive gameplay. With 16GB of RAM and a substantial 1TB NVMe M.2 SSD, this laptop ensures speedy performance and plenty of storage for games and applications. Additionally, it includes the Nvidia GeForce RTX 3050 GPU for enhanced graphics and gaming experiences.',
110 | 'price': 650,
111 | 'url': 'https://www.dealnews.com/products/Dell/Dell-G15-Ryzen-5-15-6-Gaming-Laptop-w-Nvidia-RTX-3050/485067.html?iref=rss-c39'}]}
112 | return DealSelection(**results)
113 |
114 |
--------------------------------------------------------------------------------
/workshop/price_agents/specialist_agent.py:
--------------------------------------------------------------------------------
1 | import modal
2 | from price_agents.agent import Agent
3 |
4 |
5 | class SpecialistAgent(Agent):
6 | """
7 | An Agent that runs our fine-tuned LLM that's running remotely on Modal
8 | """
9 |
10 | name = "Specialist Agent"
11 | color = Agent.RED
12 |
13 | def __init__(self):
14 | """
15 | Set up this Agent by creating an instance of the modal class
16 | """
17 | self.log("Specialist Agent is initializing - connecting to modal")
18 | Pricer = modal.Cls.from_name("pricer-service", "Pricer")
19 | self.pricer = Pricer()
20 | self.log("Specialist Agent is ready")
21 |
22 | def price(self, description: str) -> float:
23 | """
24 | Make a remote call to return the estimate of the price of this item
25 | """
26 | self.log("Specialist Agent is calling remote fine-tuned model")
27 | result = self.pricer.price.remote(description)
28 | self.log(f"Specialist Agent completed - predicting ${result:.2f}")
29 | return result
30 |
--------------------------------------------------------------------------------
/workshop/price_is_right.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import queue
3 | import threading
4 | import time
5 | import gradio as gr
6 | from deal_agent_framework import DealAgentFramework
7 | from price_agents.deals import Opportunity, Deal
8 | from log_utils import reformat
9 | import plotly.graph_objects as go
10 |
11 |
12 | class QueueHandler(logging.Handler):
13 | def __init__(self, log_queue):
14 | super().__init__()
15 | self.log_queue = log_queue
16 |
17 | def emit(self, record):
18 | self.log_queue.put(self.format(record))
19 |
20 | def html_for(log_data):
21 | output = '
'.join(log_data[-18:])
22 | return f"""
23 |