├── .gitignore ├── L1_Multi-Agent_Conversation_and_Stand-up_Comedy.ipynb ├── L2_Sequential_Chats_and_Customer_Onboarding.ipynb ├── L3_Reflection_and_Blogpost_Writing.ipynb ├── L4_Tool_Use_and_Conversational_Chess.ipynb ├── L5_Coding_and_Financial_Analysis.ipynb ├── L6-Planning_and_Stock_Report_Generation.ipynb ├── README.md └── images ├── l1.png ├── l2.png ├── l3.png ├── l4.png ├── l5.png └── l6.png /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /L1_Multi-Agent_Conversation_and_Stand-up_Comedy.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "a81456dd", 6 | "metadata": {}, 7 | "source": [ 8 | "# Lesson 1: Multi-Agent Conversation and Stand-up Comedy" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "4693467e", 14 | "metadata": {}, 15 | "source": [ 16 | "Welcome to Lesson 1.\n", 17 | "\n", 18 | "To access the `requirements.txt` file and the`utils` modules, please go to the `File` menu and select`Open...`.\n", 19 | "\n", 20 | "I hope you enjoy this course!" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "id": "742cf649", 26 | "metadata": {}, 27 | "source": [ 28 | "## Setup" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 1, 34 | "id": "04d006c1-22fa-40ea-b3e0-d543142e0788", 35 | "metadata": { 36 | "height": 64 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "from utils import get_openai_api_key\n", 41 | "OPENAI_API_KEY = get_openai_api_key()\n", 42 | "llm_config = {\"model\": \"gpt-3.5-turbo\"}" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "116a1c4d", 48 | "metadata": {}, 49 | "source": [ 50 | "## Define an AutoGen agent" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 2, 56 | "id": "6fb8c441-c58c-41a8-a54b-5c387afceac5", 57 | "metadata": { 58 | "height": 132 59 | }, 60 | "outputs": [], 61 | "source": [ 62 | "from autogen import ConversableAgent\n", 63 | "\n", 64 | "agent = ConversableAgent(\n", 65 | " name=\"chatbot\",\n", 66 | " llm_config=llm_config,\n", 67 | " human_input_mode=\"NEVER\",\n", 68 | ")" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 3, 74 | "id": "47886b5f-fc7c-431a-8036-cff6e88f85c6", 75 | "metadata": { 76 | "height": 93 77 | }, 78 | "outputs": [ 79 | { 80 | "name": "stdout", 81 | "output_type": "stream", 82 | "text": [ 83 | "Sure, here you go: Why don't scientists trust atoms? Because they make up everything!\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "reply = agent.generate_reply(\n", 89 | " messages=[{\"content\": \"Tell me a joke.\", \"role\": \"user\"}]\n", 90 | ")\n", 91 | "print(reply)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 4, 97 | "id": "67f626e9-4cec-40c1-abde-2eff1252b848", 98 | "metadata": { 99 | "height": 93 100 | }, 101 | "outputs": [ 102 | { 103 | "name": "stdout", 104 | "output_type": "stream", 105 | "text": [ 106 | "Of course! Just let me know which joke you'd like me to repeat.\n" 107 | ] 108 | } 109 | ], 110 | "source": [ 111 | "reply = agent.generate_reply(\n", 112 | " messages=[{\"content\": \"Repeat the joke.\", \"role\": \"user\"}]\n", 113 | ")\n", 114 | "print(reply)" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "id": "8c98a301", 120 | "metadata": {}, 121 | "source": [ 122 | "## Conversation\n", 123 | "\n", 124 | "Setting up a conversation between two agents, Cathy and Joe, where the memory of their interactions is retained." 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 5, 130 | "id": "8f109dcb-824e-40d7-8e86-efee42b75f3c", 131 | "metadata": { 132 | "height": 297 133 | }, 134 | "outputs": [], 135 | "source": [ 136 | "cathy = ConversableAgent(\n", 137 | " name=\"cathy\",\n", 138 | " system_message=\n", 139 | " \"Your name is Cathy and you are a stand-up comedian.\",\n", 140 | " llm_config=llm_config,\n", 141 | " human_input_mode=\"NEVER\",\n", 142 | ")\n", 143 | "\n", 144 | "joe = ConversableAgent(\n", 145 | " name=\"joe\",\n", 146 | " system_message=\n", 147 | " \"Your name is Joe and you are a stand-up comedian. \"\n", 148 | " \"Start the next joke from the punchline of the previous joke.\",\n", 149 | " llm_config=llm_config,\n", 150 | " human_input_mode=\"NEVER\",\n", 151 | ")" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "id": "43f71a61", 157 | "metadata": {}, 158 | "source": [ 159 | "**Note**: You might get a slightly different response (set of jokes) than what is shown in the video" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 6, 165 | "id": "46a1c6f6-687e-40de-8819-374201cfed9f", 166 | "metadata": { 167 | "height": 110 168 | }, 169 | "outputs": [ 170 | { 171 | "name": "stdout", 172 | "output_type": "stream", 173 | "text": [ 174 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 175 | "\n", 176 | "I'm Joe. Cathy, let's keep the jokes rolling.\n", 177 | "\n", 178 | "--------------------------------------------------------------------------------\n", 179 | "\u001b[33mcathy\u001b[0m (to joe):\n", 180 | "\n", 181 | "Absolutely, Joe! I'm ready to bring the laughs. So, I recently tried to write a book about reverse psychology, but nobody bought it because I told them not to.\n", 182 | "\n", 183 | "--------------------------------------------------------------------------------\n", 184 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 185 | "\n", 186 | "Well, that explains why my book on weight loss didn't sell. I told everyone it's so effective, you'll lose weight just by reading it!\n", 187 | "\n", 188 | "--------------------------------------------------------------------------------\n", 189 | "\u001b[33mcathy\u001b[0m (to joe):\n", 190 | "\n", 191 | "Haha, Joe, maybe we should team up and write a book together! We'll make millions by telling people not to buy it. But hey, speaking of weight loss, I tried a new diet where you only eat in the hours that your kitchen clock is displaying. Let's just say I've had a lot of midnight snacks lately!\n", 192 | "\n", 193 | "--------------------------------------------------------------------------------\n" 194 | ] 195 | } 196 | ], 197 | "source": [ 198 | "chat_result = joe.initiate_chat(\n", 199 | " recipient=cathy, \n", 200 | " message=\"I'm Joe. Cathy, let's keep the jokes rolling.\",\n", 201 | " max_turns=2,\n", 202 | ")" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "id": "78edc810", 208 | "metadata": {}, 209 | "source": [ 210 | "## Print some results\n", 211 | "\n", 212 | "You can print out:\n", 213 | "\n", 214 | "1. Chat history\n", 215 | "2. Cost\n", 216 | "3. Summary of the conversation" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": 7, 222 | "id": "1169ea24-eadd-4909-8d56-9b7ec5677c66", 223 | "metadata": { 224 | "height": 64 225 | }, 226 | "outputs": [ 227 | { 228 | "name": "stdout", 229 | "output_type": "stream", 230 | "text": [ 231 | "[{'content': \"I'm Joe. Cathy, let's keep the jokes rolling.\",\n", 232 | " 'role': 'assistant'},\n", 233 | " {'content': \"Absolutely, Joe! I'm ready to bring the laughs. So, I recently \"\n", 234 | " 'tried to write a book about reverse psychology, but nobody '\n", 235 | " 'bought it because I told them not to.',\n", 236 | " 'role': 'user'},\n", 237 | " {'content': \"Well, that explains why my book on weight loss didn't sell. I \"\n", 238 | " \"told everyone it's so effective, you'll lose weight just by \"\n", 239 | " 'reading it!',\n", 240 | " 'role': 'assistant'},\n", 241 | " {'content': 'Haha, Joe, maybe we should team up and write a book together! '\n", 242 | " \"We'll make millions by telling people not to buy it. But hey, \"\n", 243 | " 'speaking of weight loss, I tried a new diet where you only eat '\n", 244 | " \"in the hours that your kitchen clock is displaying. Let's just \"\n", 245 | " \"say I've had a lot of midnight snacks lately!\",\n", 246 | " 'role': 'user'}]\n" 247 | ] 248 | } 249 | ], 250 | "source": [ 251 | "import pprint\n", 252 | "\n", 253 | "pprint.pprint(chat_result.chat_history)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 8, 259 | "id": "550267b6-3652-40dc-9997-c5401f6d4c47", 260 | "metadata": { 261 | "height": 30 262 | }, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | "{'usage_excluding_cached_inference': {'gpt-3.5-turbo-0125': {'completion_tokens': 136,\n", 269 | " 'cost': 0.000322,\n", 270 | " 'prompt_tokens': 236,\n", 271 | " 'total_tokens': 372},\n", 272 | " 'total_cost': 0.000322},\n", 273 | " 'usage_including_cached_inference': {'gpt-3.5-turbo-0125': {'completion_tokens': 136,\n", 274 | " 'cost': 0.000322,\n", 275 | " 'prompt_tokens': 236,\n", 276 | " 'total_tokens': 372},\n", 277 | " 'total_cost': 0.000322}}\n" 278 | ] 279 | } 280 | ], 281 | "source": [ 282 | "pprint.pprint(chat_result.cost)" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": 9, 288 | "id": "dfcf468e-d217-4731-8cb4-3485377230f1", 289 | "metadata": { 290 | "height": 30 291 | }, 292 | "outputs": [ 293 | { 294 | "name": "stdout", 295 | "output_type": "stream", 296 | "text": [ 297 | "(\"Haha, Joe, maybe we should team up and write a book together! We'll make \"\n", 298 | " 'millions by telling people not to buy it. But hey, speaking of weight loss, '\n", 299 | " 'I tried a new diet where you only eat in the hours that your kitchen clock '\n", 300 | " \"is displaying. Let's just say I've had a lot of midnight snacks lately!\")\n" 301 | ] 302 | } 303 | ], 304 | "source": [ 305 | "pprint.pprint(chat_result.summary)" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "id": "ba8c6cf8", 311 | "metadata": {}, 312 | "source": [ 313 | "## Get a better summary of the conversation" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 10, 319 | "id": "c1a8fef1-8030-4652-a2d2-1648834f62c2", 320 | "metadata": { 321 | "height": 144 322 | }, 323 | "outputs": [ 324 | { 325 | "name": "stdout", 326 | "output_type": "stream", 327 | "text": [ 328 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 329 | "\n", 330 | "I'm Joe. Cathy, let's keep the jokes rolling.\n", 331 | "\n", 332 | "--------------------------------------------------------------------------------\n", 333 | "\u001b[33mcathy\u001b[0m (to joe):\n", 334 | "\n", 335 | "Absolutely, Joe! I'm ready to bring the laughs. So, I recently tried to write a book about reverse psychology, but nobody bought it because I told them not to.\n", 336 | "\n", 337 | "--------------------------------------------------------------------------------\n", 338 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 339 | "\n", 340 | "Well, that explains why my book on weight loss didn't sell. I told everyone it's so effective, you'll lose weight just by reading it!\n", 341 | "\n", 342 | "--------------------------------------------------------------------------------\n", 343 | "\u001b[33mcathy\u001b[0m (to joe):\n", 344 | "\n", 345 | "Haha, Joe, maybe we should team up and write a book together! We'll make millions by telling people not to buy it. But hey, speaking of weight loss, I tried a new diet where you only eat in the hours that your kitchen clock is displaying. Let's just say I've had a lot of midnight snacks lately!\n", 346 | "\n", 347 | "--------------------------------------------------------------------------------\n" 348 | ] 349 | } 350 | ], 351 | "source": [ 352 | "chat_result = joe.initiate_chat(\n", 353 | " cathy, \n", 354 | " message=\"I'm Joe. Cathy, let's keep the jokes rolling.\", \n", 355 | " max_turns=2, \n", 356 | " summary_method=\"reflection_with_llm\",\n", 357 | " summary_prompt=\"Summarize the conversation\",\n", 358 | ")" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": 11, 364 | "id": "b042de62-bc49-49ee-99f2-4f972e23670b", 365 | "metadata": { 366 | "height": 30 367 | }, 368 | "outputs": [ 369 | { 370 | "name": "stdout", 371 | "output_type": "stream", 372 | "text": [ 373 | "('Joe and Cathy exchange jokes about their failed book writing attempts and '\n", 374 | " 'humorous weight loss tactics.')\n" 375 | ] 376 | } 377 | ], 378 | "source": [ 379 | "pprint.pprint(chat_result.summary)" 380 | ] 381 | }, 382 | { 383 | "cell_type": "markdown", 384 | "id": "300525bd", 385 | "metadata": {}, 386 | "source": [ 387 | "## Chat Termination\n", 388 | "\n", 389 | "Chat can be terminated using a termination conditions." 390 | ] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": 12, 395 | "id": "044dfd61-7f1d-46d8-9e28-4b2601b43d70", 396 | "metadata": { 397 | "height": 348 398 | }, 399 | "outputs": [], 400 | "source": [ 401 | "cathy = ConversableAgent(\n", 402 | " name=\"cathy\",\n", 403 | " system_message=\n", 404 | " \"Your name is Cathy and you are a stand-up comedian. \"\n", 405 | " \"When you're ready to end the conversation, say 'I gotta go'.\",\n", 406 | " llm_config=llm_config,\n", 407 | " human_input_mode=\"NEVER\",\n", 408 | " is_termination_msg=lambda msg: \"I gotta go\" in msg[\"content\"],\n", 409 | ")\n", 410 | "\n", 411 | "joe = ConversableAgent(\n", 412 | " name=\"joe\",\n", 413 | " system_message=\n", 414 | " \"Your name is Joe and you are a stand-up comedian. \"\n", 415 | " \"When you're ready to end the conversation, say 'I gotta go'.\",\n", 416 | " llm_config=llm_config,\n", 417 | " human_input_mode=\"NEVER\",\n", 418 | " is_termination_msg=lambda msg: \"I gotta go\" in msg[\"content\"] or \"Goodbye\" in msg[\"content\"],\n", 419 | ")" 420 | ] 421 | }, 422 | { 423 | "cell_type": "code", 424 | "execution_count": 13, 425 | "id": "bc49d959-1025-4709-8866-9d4035eaeae7", 426 | "metadata": { 427 | "height": 93 428 | }, 429 | "outputs": [ 430 | { 431 | "name": "stdout", 432 | "output_type": "stream", 433 | "text": [ 434 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 435 | "\n", 436 | "I'm Joe. Cathy, let's keep the jokes rolling.\n", 437 | "\n", 438 | "--------------------------------------------------------------------------------\n", 439 | "\u001b[33mcathy\u001b[0m (to joe):\n", 440 | "\n", 441 | "Hey Joe! Let's get this comedy show on the road. Why did the scarecrow win an award? Because he was outstanding in his field!\n", 442 | "\n", 443 | "--------------------------------------------------------------------------------\n", 444 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 445 | "\n", 446 | "Haha, that's a good one, Cathy! I bet he was also great at hay-ing low.\n", 447 | "\n", 448 | "--------------------------------------------------------------------------------\n", 449 | "\u001b[33mcathy\u001b[0m (to joe):\n", 450 | "\n", 451 | "Haha, I see what you did there, Joe! That scarecrow knows how to make a great first “straw-tion!”\n", 452 | "\n", 453 | "--------------------------------------------------------------------------------\n", 454 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 455 | "\n", 456 | "Haha, nice one, Cathy! That scarecrow sure knows how to make a grand entrance. Speaking of which, have you heard about the actor who fell through the floorboards? He was just going through a stage.\n", 457 | "\n", 458 | "--------------------------------------------------------------------------------\n", 459 | "\u001b[33mcathy\u001b[0m (to joe):\n", 460 | "\n", 461 | "Haha, Joe, that's a classic one! Sounds like that actor really knows how to make a dramatic exit... through the floor!\n", 462 | "\n", 463 | "--------------------------------------------------------------------------------\n", 464 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 465 | "\n", 466 | "Exactly, Cathy! It was a dramatic exit indeed. I gotta say, your comedic timing is spot on!\n", 467 | "\n", 468 | "--------------------------------------------------------------------------------\n", 469 | "\u001b[33mcathy\u001b[0m (to joe):\n", 470 | "\n", 471 | "Thanks, Joe! I appreciate that. It's all about the timing, just like knowing when to wrap up a great conversation! Speaking of which, I gotta go. Keep spreading those laughter vibes, my friend!\n", 472 | "\n", 473 | "--------------------------------------------------------------------------------\n" 474 | ] 475 | } 476 | ], 477 | "source": [ 478 | "chat_result = joe.initiate_chat(\n", 479 | " recipient=cathy,\n", 480 | " message=\"I'm Joe. Cathy, let's keep the jokes rolling.\"\n", 481 | ")" 482 | ] 483 | }, 484 | { 485 | "cell_type": "code", 486 | "execution_count": 14, 487 | "id": "846eccbd-efd1-464b-9385-279c19b17c1d", 488 | "metadata": { 489 | "height": 42 490 | }, 491 | "outputs": [ 492 | { 493 | "name": "stdout", 494 | "output_type": "stream", 495 | "text": [ 496 | "\u001b[33mcathy\u001b[0m (to joe):\n", 497 | "\n", 498 | "What's last joke we talked about?\n", 499 | "\n", 500 | "--------------------------------------------------------------------------------\n", 501 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 502 | "\n", 503 | "We were talking about the actor who fell through the floorboards and made a dramatic exit!\n", 504 | "\n", 505 | "--------------------------------------------------------------------------------\n", 506 | "\u001b[33mcathy\u001b[0m (to joe):\n", 507 | "\n", 508 | "Thanks for jogging my memory! That was a fun one. If you want to hear more jokes or chat again, feel free to reach out. Have a great day!\n", 509 | "\n", 510 | "--------------------------------------------------------------------------------\n", 511 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 512 | "\n", 513 | "You too, Cathy! Have a fantastic day ahead!\n", 514 | "\n", 515 | "--------------------------------------------------------------------------------\n", 516 | "\u001b[33mcathy\u001b[0m (to joe):\n", 517 | "\n", 518 | "Thanks, Joe! Take care and keep smiling! Byeee!\n", 519 | "\n", 520 | "--------------------------------------------------------------------------------\n", 521 | "\u001b[33mjoe\u001b[0m (to cathy):\n", 522 | "\n", 523 | "Bye!\n", 524 | "\n", 525 | "--------------------------------------------------------------------------------\n", 526 | "\u001b[33mcathy\u001b[0m (to joe):\n", 527 | "\n", 528 | "Goodbye!\n", 529 | "\n", 530 | "--------------------------------------------------------------------------------\n" 531 | ] 532 | } 533 | ], 534 | "source": [ 535 | "cathy.send(message=\"What's last joke we talked about?\", recipient=joe)" 536 | ] 537 | } 538 | ], 539 | "metadata": { 540 | "kernelspec": { 541 | "display_name": "Python 3 (ipykernel)", 542 | "language": "python", 543 | "name": "python3" 544 | }, 545 | "language_info": { 546 | "codemirror_mode": { 547 | "name": "ipython", 548 | "version": 3 549 | }, 550 | "file_extension": ".py", 551 | "mimetype": "text/x-python", 552 | "name": "python", 553 | "nbconvert_exporter": "python", 554 | "pygments_lexer": "ipython3", 555 | "version": "3.11.9" 556 | } 557 | }, 558 | "nbformat": 4, 559 | "nbformat_minor": 5 560 | } 561 | -------------------------------------------------------------------------------- /L2_Sequential_Chats_and_Customer_Onboarding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "8140b161", 6 | "metadata": {}, 7 | "source": [ 8 | "# Lesson 2: Sequential Chats and Customer Onboarding" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "e9d4d307", 14 | "metadata": {}, 15 | "source": [ 16 | "## Setup" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "id": "24b75995-4ee4-4ff0-9c44-3943caae37e7", 23 | "metadata": { 24 | "height": 30 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "llm_config={\"model\": \"gpt-3.5-turbo\"}" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "id": "20ce6700-8a33-424f-aefe-8852fd1e6d07", 35 | "metadata": { 36 | "height": 30 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "from autogen import ConversableAgent" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "id": "76f979f9", 46 | "metadata": {}, 47 | "source": [ 48 | "## Creating the needed agents" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 3, 54 | "id": "a527bb1e-dd4e-47b0-a1b7-a9cbcd87cbdb", 55 | "metadata": { 56 | "height": 212 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "onboarding_personal_information_agent = ConversableAgent(\n", 61 | " name=\"Onboarding Personal Information Agent\",\n", 62 | " system_message='''You are a helpful customer onboarding agent,\n", 63 | " you are here to help new customers get started with our product.\n", 64 | " Your job is to gather customer's name and location.\n", 65 | " Do not ask for other information. Return 'TERMINATE' \n", 66 | " when you have gathered all the information.''',\n", 67 | " llm_config=llm_config,\n", 68 | " code_execution_config=False,\n", 69 | " human_input_mode=\"NEVER\",\n", 70 | ")" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 4, 76 | "id": "51bc9a24-a680-444d-943b-b740bce0189d", 77 | "metadata": { 78 | "height": 212 79 | }, 80 | "outputs": [], 81 | "source": [ 82 | "onboarding_topic_preference_agent = ConversableAgent(\n", 83 | " name=\"Onboarding Topic preference Agent\",\n", 84 | " system_message='''You are a helpful customer onboarding agent,\n", 85 | " you are here to help new customers get started with our product.\n", 86 | " Your job is to gather customer's preferences on news topics.\n", 87 | " Do not ask for other information.\n", 88 | " Return 'TERMINATE' when you have gathered all the information.''',\n", 89 | " llm_config=llm_config,\n", 90 | " code_execution_config=False,\n", 91 | " human_input_mode=\"NEVER\",\n", 92 | ")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 5, 98 | "id": "6755a7fc-cb17-4d62-a03f-48e260f39010", 99 | "metadata": { 100 | "height": 246 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "customer_engagement_agent = ConversableAgent(\n", 105 | " name=\"Customer Engagement Agent\",\n", 106 | " system_message='''You are a helpful customer service agent\n", 107 | " here to provide fun for the customer based on the user's\n", 108 | " personal information and topic preferences.\n", 109 | " This could include fun facts, jokes, or interesting stories.\n", 110 | " Make sure to make it engaging and fun!\n", 111 | " Return 'TERMINATE' when you are done.''',\n", 112 | " llm_config=llm_config,\n", 113 | " code_execution_config=False,\n", 114 | " human_input_mode=\"NEVER\",\n", 115 | " is_termination_msg=lambda msg: \"terminate\" in msg.get(\"content\").lower(),\n", 116 | ")" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 6, 122 | "id": "64267c0b-f7f2-46e6-ab44-6f7b5fbd9db7", 123 | "metadata": { 124 | "height": 144 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "customer_proxy_agent = ConversableAgent(\n", 129 | " name=\"customer_proxy_agent\",\n", 130 | " llm_config=False,\n", 131 | " code_execution_config=False,\n", 132 | " human_input_mode=\"ALWAYS\",\n", 133 | " is_termination_msg=lambda msg: \"terminate\" in msg.get(\"content\").lower(),\n", 134 | ")" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "id": "4f240408", 140 | "metadata": {}, 141 | "source": [ 142 | "## Creating tasks\n", 143 | "\n", 144 | "Now, you can craft a series of tasks to facilitate the onboarding process." 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 7, 150 | "id": "2b15af1d-7042-4569-a936-7966be203f05", 151 | "metadata": { 152 | "height": 603 153 | }, 154 | "outputs": [], 155 | "source": [ 156 | "chats = [\n", 157 | " {\n", 158 | " \"sender\": onboarding_personal_information_agent,\n", 159 | " \"recipient\": customer_proxy_agent,\n", 160 | " \"message\": \n", 161 | " \"Hello, I'm here to help you get started with our product.\"\n", 162 | " \"Could you tell me your name and location?\",\n", 163 | " \"summary_method\": \"reflection_with_llm\",\n", 164 | " \"summary_args\": {\n", 165 | " \"summary_prompt\" : \"Return the customer information \"\n", 166 | " \"into as JSON object only: \"\n", 167 | " \"{'name': '', 'location': ''}\",\n", 168 | " },\n", 169 | " \"max_turns\": 2,\n", 170 | " \"clear_history\" : True\n", 171 | " },\n", 172 | " {\n", 173 | " \"sender\": onboarding_topic_preference_agent,\n", 174 | " \"recipient\": customer_proxy_agent,\n", 175 | " \"message\": \n", 176 | " \"Great! Could you tell me what topics you are \"\n", 177 | " \"interested in reading about?\",\n", 178 | " \"summary_method\": \"reflection_with_llm\",\n", 179 | " \"max_turns\": 1,\n", 180 | " \"clear_history\" : False\n", 181 | " },\n", 182 | " {\n", 183 | " \"sender\": customer_proxy_agent,\n", 184 | " \"recipient\": customer_engagement_agent,\n", 185 | " \"message\": \"Let's find something fun to read.\",\n", 186 | " \"max_turns\": 1,\n", 187 | " \"summary_method\": \"reflection_with_llm\",\n", 188 | " },\n", 189 | "]" 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "id": "862a066b", 195 | "metadata": {}, 196 | "source": [ 197 | "## Start the onboarding process" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "id": "e0fa8f99", 203 | "metadata": {}, 204 | "source": [ 205 | "**Note**: You might get a slightly different response than what's shown in the video. Feel free to try different inputs, such as name, location, and preferences." 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 8, 211 | "id": "9d6d1d4a-0b50-41a5-a1f0-3ff208398bc6", 212 | "metadata": { 213 | "height": 64 214 | }, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "\u001b[34m\n", 221 | "********************************************************************************\u001b[0m\n", 222 | "\u001b[34mStarting a new chat....\u001b[0m\n", 223 | "\u001b[34m\n", 224 | "********************************************************************************\u001b[0m\n", 225 | "\u001b[33mOnboarding Personal Information Agent\u001b[0m (to customer_proxy_agent):\n", 226 | "\n", 227 | "Hello, I'm here to help you get started with our product.Could you tell me your name and location?\n", 228 | "\n", 229 | "--------------------------------------------------------------------------------\n" 230 | ] 231 | }, 232 | { 233 | "name": "stderr", 234 | "output_type": "stream", 235 | "text": [ 236 | "/usr/local/lib/python3.11/site-packages/autogen/agentchat/chat.py:47: UserWarning: Repetitive recipients detected: The chat history will be cleared by default if a recipient appears more than once. To retain the chat history, please set 'clear_history=False' in the configuration of the repeating agent.\n", 237 | " warnings.warn(\n" 238 | ] 239 | }, 240 | { 241 | "name": "stdout", 242 | "output_type": "stream", 243 | "text": [ 244 | "Provide feedback to Onboarding Personal Information Agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: good !\n", 245 | "\u001b[33mcustomer_proxy_agent\u001b[0m (to Onboarding Personal Information Agent):\n", 246 | "\n", 247 | "good !\n", 248 | "\n", 249 | "--------------------------------------------------------------------------------\n", 250 | "\u001b[33mOnboarding Personal Information Agent\u001b[0m (to customer_proxy_agent):\n", 251 | "\n", 252 | "I'm glad to hear that! Could you please provide me with your name and location?\n", 253 | "\n", 254 | "--------------------------------------------------------------------------------\n", 255 | "Provide feedback to Onboarding Personal Information Agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n", 256 | "\u001b[34m\n", 257 | "********************************************************************************\u001b[0m\n", 258 | "\u001b[34mStarting a new chat....\u001b[0m\n", 259 | "\u001b[34m\n", 260 | "********************************************************************************\u001b[0m\n", 261 | "\u001b[33mOnboarding Topic preference Agent\u001b[0m (to customer_proxy_agent):\n", 262 | "\n", 263 | "Great! Could you tell me what topics you are interested in reading about?\n", 264 | "Context: \n", 265 | "{\n", 266 | " \"name\": \"John\",\n", 267 | " \"location\": \"New York\"\n", 268 | "}\n", 269 | "\n", 270 | "--------------------------------------------------------------------------------\n", 271 | "Provide feedback to Onboarding Topic preference Agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n", 272 | "\u001b[34m\n", 273 | "********************************************************************************\u001b[0m\n", 274 | "\u001b[34mStarting a new chat....\u001b[0m\n", 275 | "\u001b[34m\n", 276 | "********************************************************************************\u001b[0m\n", 277 | "\u001b[33mcustomer_proxy_agent\u001b[0m (to Customer Engagement Agent):\n", 278 | "\n", 279 | "Let's find something fun to read.\n", 280 | "Context: \n", 281 | "{\n", 282 | " \"name\": \"John\",\n", 283 | " \"location\": \"New York\"\n", 284 | "}\n", 285 | "The user asked what topics the individual is interested in reading about, but did not specify any specific preferences.\n", 286 | "\n", 287 | "--------------------------------------------------------------------------------\n", 288 | "\u001b[33mCustomer Engagement Agent\u001b[0m (to customer_proxy_agent):\n", 289 | "\n", 290 | "Hey John from New York! 🗽 How about we explore some fun facts about the Big Apple? Did you know that New York City has a hidden park that sits 30 feet below street level? It's called the Lowline Lab, and it's an underground oasis of plants and greenery right in the heart of the city! If you're interested in urban exploration and unique hidden gems, this could be a fascinating read. What do you think? 🌳🌆\n", 291 | "\n", 292 | "--------------------------------------------------------------------------------\n" 293 | ] 294 | } 295 | ], 296 | "source": [ 297 | "from autogen import initiate_chats\n", 298 | "\n", 299 | "chat_results = initiate_chats(chats)" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "id": "4f9e2713", 305 | "metadata": {}, 306 | "source": [ 307 | "## Print out the summary" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": 9, 313 | "id": "1e122f8a-1ceb-4635-9672-662114b0552a", 314 | "metadata": { 315 | "height": 64 316 | }, 317 | "outputs": [ 318 | { 319 | "name": "stdout", 320 | "output_type": "stream", 321 | "text": [ 322 | "{\n", 323 | " \"name\": \"John\",\n", 324 | " \"location\": \"New York\"\n", 325 | "}\n", 326 | "\n", 327 | "\n", 328 | "The user asked what topics the individual is interested in reading about, but did not specify any specific preferences.\n", 329 | "\n", 330 | "\n", 331 | "Explore fun facts about the Big Apple, such as the hidden Lowline Lab park in New York City.\n", 332 | "\n", 333 | "\n" 334 | ] 335 | } 336 | ], 337 | "source": [ 338 | "for chat_result in chat_results:\n", 339 | " print(chat_result.summary)\n", 340 | " print(\"\\n\")" 341 | ] 342 | }, 343 | { 344 | "cell_type": "markdown", 345 | "id": "a674c4eb", 346 | "metadata": {}, 347 | "source": [ 348 | "## Print out the cost" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": 10, 354 | "id": "8b82a10a-afe5-4ba3-97b4-41c8c14b739f", 355 | "metadata": { 356 | "height": 64 357 | }, 358 | "outputs": [ 359 | { 360 | "name": "stdout", 361 | "output_type": "stream", 362 | "text": [ 363 | "{'usage_including_cached_inference': {'total_cost': 0.0001425, 'gpt-3.5-turbo-0125': {'cost': 0.0001425, 'prompt_tokens': 180, 'completion_tokens': 35, 'total_tokens': 215}}, 'usage_excluding_cached_inference': {'total_cost': 0.0001425, 'gpt-3.5-turbo-0125': {'cost': 0.0001425, 'prompt_tokens': 180, 'completion_tokens': 35, 'total_tokens': 215}}}\n", 364 | "\n", 365 | "\n", 366 | "{'usage_including_cached_inference': {'total_cost': 6.25e-05, 'gpt-3.5-turbo-0125': {'cost': 6.25e-05, 'prompt_tokens': 62, 'completion_tokens': 21, 'total_tokens': 83}}, 'usage_excluding_cached_inference': {'total_cost': 6.25e-05, 'gpt-3.5-turbo-0125': {'cost': 6.25e-05, 'prompt_tokens': 62, 'completion_tokens': 21, 'total_tokens': 83}}}\n", 367 | "\n", 368 | "\n", 369 | "{'usage_including_cached_inference': {'total_cost': 0.0003295, 'gpt-3.5-turbo-0125': {'cost': 0.0003295, 'prompt_tokens': 302, 'completion_tokens': 119, 'total_tokens': 421}}, 'usage_excluding_cached_inference': {'total_cost': 0.0003295, 'gpt-3.5-turbo-0125': {'cost': 0.0003295, 'prompt_tokens': 302, 'completion_tokens': 119, 'total_tokens': 421}}}\n", 370 | "\n", 371 | "\n" 372 | ] 373 | } 374 | ], 375 | "source": [ 376 | "for chat_result in chat_results:\n", 377 | " print(chat_result.cost)\n", 378 | " print(\"\\n\")" 379 | ] 380 | } 381 | ], 382 | "metadata": { 383 | "kernelspec": { 384 | "display_name": "Python 3 (ipykernel)", 385 | "language": "python", 386 | "name": "python3" 387 | }, 388 | "language_info": { 389 | "codemirror_mode": { 390 | "name": "ipython", 391 | "version": 3 392 | }, 393 | "file_extension": ".py", 394 | "mimetype": "text/x-python", 395 | "name": "python", 396 | "nbconvert_exporter": "python", 397 | "pygments_lexer": "ipython3", 398 | "version": "3.11.9" 399 | } 400 | }, 401 | "nbformat": 4, 402 | "nbformat_minor": 5 403 | } 404 | -------------------------------------------------------------------------------- /L3_Reflection_and_Blogpost_Writing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "fc3024e2", 6 | "metadata": {}, 7 | "source": [ 8 | "# Lesson 3: Reflection and Blogpost Writing" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "3b0cc42f", 14 | "metadata": {}, 15 | "source": [ 16 | "## Setup" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "id": "96d39be0-eaf3-456d-8613-ba21099ed36b", 23 | "metadata": { 24 | "height": 29 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "llm_config = {\"model\": \"gpt-3.5-turbo\"}" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "id": "0969e6bb", 34 | "metadata": {}, 35 | "source": [ 36 | "## The task!" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 2, 42 | "id": "e8074032-3690-4de9-ad08-ea8323cb441b", 43 | "metadata": { 44 | "height": 114 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "task = '''\n", 49 | " Write a concise but engaging blogpost about\n", 50 | " DeepLearning.AI. Make sure the blogpost is\n", 51 | " within 100 words.\n", 52 | " '''\n" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "id": "1987f023", 58 | "metadata": {}, 59 | "source": [ 60 | "## Create a writer agent" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 3, 66 | "id": "fe0f0a47-a9fe-43a0-b7b1-79922e4c4ac8", 67 | "metadata": { 68 | "height": 194 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "import autogen\n", 73 | "\n", 74 | "writer = autogen.AssistantAgent(\n", 75 | " name=\"Writer\",\n", 76 | " system_message=\"You are a writer. You write engaging and concise \" \n", 77 | " \"blogpost (with title) on given topics. You must polish your \"\n", 78 | " \"writing based on the feedback you receive and give a refined \"\n", 79 | " \"version. Only return your final work without additional comments.\",\n", 80 | " llm_config=llm_config,\n", 81 | ")" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 4, 87 | "id": "7c7b4d8d-40f7-4a05-8958-25d20054de3a", 88 | "metadata": { 89 | "height": 41 90 | }, 91 | "outputs": [], 92 | "source": [ 93 | "reply = writer.generate_reply(messages=[{\"content\": task, \"role\": \"user\"}])" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "id": "c501c97d-e338-4f36-a384-6ec45983cf77", 100 | "metadata": { 101 | "height": 29 102 | }, 103 | "outputs": [ 104 | { 105 | "name": "stdout", 106 | "output_type": "stream", 107 | "text": [ 108 | "Title: Unveiling the Power of DeepLearning.AI\n", 109 | "\n", 110 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n" 111 | ] 112 | } 113 | ], 114 | "source": [ 115 | "print(reply)" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "id": "49658114", 121 | "metadata": {}, 122 | "source": [ 123 | "## Adding reflection \n", 124 | "\n", 125 | "Create a critic agent to reflect on the work of the writer agent." 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 6, 131 | "id": "c7fcd1c7-51ec-4915-8e97-bac03565c4c7", 132 | "metadata": { 133 | "height": 160 134 | }, 135 | "outputs": [], 136 | "source": [ 137 | "critic = autogen.AssistantAgent(\n", 138 | " name=\"Critic\",\n", 139 | " is_termination_msg=lambda x: x.get(\"content\", \"\").find(\"TERMINATE\") >= 0,\n", 140 | " llm_config=llm_config,\n", 141 | " system_message=\"You are a critic. You review the work of \"\n", 142 | " \"the writer and provide constructive \"\n", 143 | " \"feedback to help improve the quality of the content.\",\n", 144 | ")" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 7, 150 | "id": "899d5fdb-6081-470b-b287-8cf8b8142d0d", 151 | "metadata": { 152 | "height": 114 153 | }, 154 | "outputs": [ 155 | { 156 | "name": "stdout", 157 | "output_type": "stream", 158 | "text": [ 159 | "\u001b[33mCritic\u001b[0m (to Writer):\n", 160 | "\n", 161 | "\n", 162 | " Write a concise but engaging blogpost about\n", 163 | " DeepLearning.AI. Make sure the blogpost is\n", 164 | " within 100 words.\n", 165 | " \n", 166 | "\n", 167 | "--------------------------------------------------------------------------------\n", 168 | "\u001b[33mWriter\u001b[0m (to Critic):\n", 169 | "\n", 170 | "Title: Unveiling the Power of DeepLearning.AI\n", 171 | "\n", 172 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n", 173 | "\n", 174 | "--------------------------------------------------------------------------------\n", 175 | "\u001b[33mCritic\u001b[0m (to Writer):\n", 176 | "\n", 177 | "Your blogpost is off to a great start with a captivating introduction and a clear overview of DeepLearning.AI. To enhance the piece, consider incorporating specific examples of the courses offered or highlighting success stories of individuals who have benefited from the platform. Additionally, providing some insights into the methodology or unique features of DeepLearning.AI could make the content more informative and engaging for readers interested in AI. Overall, expanding on the program's impact or potential future developments would enrich the blogpost and make it even more enticing to readers. Great job so far!\n", 178 | "\n", 179 | "--------------------------------------------------------------------------------\n", 180 | "\u001b[33mWriter\u001b[0m (to Critic):\n", 181 | "\n", 182 | "Title: Unleashing the Potential of DeepLearning.AI: A Game-Changer in AI Education\n", 183 | "\n", 184 | "Step into the dynamic world of DeepLearning.AI, where innovation meets education to shape the future of artificial intelligence. Curated by the visionary Andrew Ng, this platform offers a diverse range of courses, from \"AI For Everyone\" to \"Deep Learning Specialization,\" catering to beginners and experts alike. Dive into hands-on projects, learn from industry leaders, and witness real-world applications of AI. Explore success stories of individuals who have transformed their careers with DeepLearning.AI. Experience the interactive learning approach and unlock the endless possibilities that AI has to offer. Join the AI revolution today with DeepLearning.AI.\n", 185 | "\n", 186 | "--------------------------------------------------------------------------------\n" 187 | ] 188 | } 189 | ], 190 | "source": [ 191 | "res = critic.initiate_chat(\n", 192 | " recipient=writer,\n", 193 | " message=task,\n", 194 | " max_turns=2,\n", 195 | " summary_method=\"last_msg\"\n", 196 | ")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "e7b76449", 202 | "metadata": {}, 203 | "source": [ 204 | "## Nested chat" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 8, 210 | "id": "401ecf92-63e9-40ff-aeed-1c404352e4ab", 211 | "metadata": { 212 | "height": 211 213 | }, 214 | "outputs": [], 215 | "source": [ 216 | "SEO_reviewer = autogen.AssistantAgent(\n", 217 | " name=\"SEO Reviewer\",\n", 218 | " llm_config=llm_config,\n", 219 | " system_message=\"You are an SEO reviewer, known for \"\n", 220 | " \"your ability to optimize content for search engines, \"\n", 221 | " \"ensuring that it ranks well and attracts organic traffic. \" \n", 222 | " \"Make sure your suggestion is concise (within 3 bullet points), \"\n", 223 | " \"concrete and to the point. \"\n", 224 | " \"Begin the review by stating your role.\",\n", 225 | ")\n" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": 9, 231 | "id": "f85acb81-7ab9-4c84-b8bb-6fbae3dce848", 232 | "metadata": { 233 | "height": 194 234 | }, 235 | "outputs": [], 236 | "source": [ 237 | "legal_reviewer = autogen.AssistantAgent(\n", 238 | " name=\"Legal Reviewer\",\n", 239 | " llm_config=llm_config,\n", 240 | " system_message=\"You are a legal reviewer, known for \"\n", 241 | " \"your ability to ensure that content is legally compliant \"\n", 242 | " \"and free from any potential legal issues. \"\n", 243 | " \"Make sure your suggestion is concise (within 3 bullet points), \"\n", 244 | " \"concrete and to the point. \"\n", 245 | " \"Begin the review by stating your role.\",\n", 246 | ")" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 10, 252 | "id": "d46a177a-8088-4956-8d2b-3e916b8ca5e9", 253 | "metadata": { 254 | "height": 194 255 | }, 256 | "outputs": [], 257 | "source": [ 258 | "ethics_reviewer = autogen.AssistantAgent(\n", 259 | " name=\"Ethics Reviewer\",\n", 260 | " llm_config=llm_config,\n", 261 | " system_message=\"You are an ethics reviewer, known for \"\n", 262 | " \"your ability to ensure that content is ethically sound \"\n", 263 | " \"and free from any potential ethical issues. \" \n", 264 | " \"Make sure your suggestion is concise (within 3 bullet points), \"\n", 265 | " \"concrete and to the point. \"\n", 266 | " \"Begin the review by stating your role. \",\n", 267 | ")" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": 11, 273 | "id": "a7b2ad6f-8ba6-436a-9459-14ffbe8a32d3", 274 | "metadata": { 275 | "height": 126 276 | }, 277 | "outputs": [], 278 | "source": [ 279 | "meta_reviewer = autogen.AssistantAgent(\n", 280 | " name=\"Meta Reviewer\",\n", 281 | " llm_config=llm_config,\n", 282 | " system_message=\"You are a meta reviewer, you aggragate and review \"\n", 283 | " \"the work of other reviewers and give a final suggestion on the content.\",\n", 284 | ")" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "id": "913beca1", 290 | "metadata": {}, 291 | "source": [ 292 | "## Orchestrate the nested chats to solve the task" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 12, 298 | "id": "a11a70c7-19ca-4e5a-ad3d-f2b481fb5915", 299 | "metadata": { 300 | "height": 551 301 | }, 302 | "outputs": [], 303 | "source": [ 304 | "def reflection_message(recipient, messages, sender, config):\n", 305 | " return f'''Review the following content. \n", 306 | " \\n\\n {recipient.chat_messages_for_summary(sender)[-1]['content']}'''\n", 307 | "\n", 308 | "review_chats = [\n", 309 | " {\n", 310 | " \"recipient\": SEO_reviewer, \n", 311 | " \"message\": reflection_message, \n", 312 | " \"summary_method\": \"reflection_with_llm\",\n", 313 | " \"summary_args\": {\"summary_prompt\" : \n", 314 | " \"Return review into as JSON object only:\"\n", 315 | " \"{'Reviewer': '', 'Review': ''}. Here Reviewer should be your role\",},\n", 316 | " \"max_turns\": 1},\n", 317 | " {\n", 318 | " \"recipient\": legal_reviewer, \"message\": reflection_message, \n", 319 | " \"summary_method\": \"reflection_with_llm\",\n", 320 | " \"summary_args\": {\"summary_prompt\" : \n", 321 | " \"Return review into as JSON object only:\"\n", 322 | " \"{'Reviewer': '', 'Review': ''}.\",},\n", 323 | " \"max_turns\": 1},\n", 324 | " {\"recipient\": ethics_reviewer, \"message\": reflection_message, \n", 325 | " \"summary_method\": \"reflection_with_llm\",\n", 326 | " \"summary_args\": {\"summary_prompt\" : \n", 327 | " \"Return review into as JSON object only:\"\n", 328 | " \"{'reviewer': '', 'review': ''}\",},\n", 329 | " \"max_turns\": 1},\n", 330 | " {\"recipient\": meta_reviewer, \n", 331 | " \"message\": \"Aggregrate feedback from all reviewers and give final suggestions on the writing.\", \n", 332 | " \"max_turns\": 1},\n", 333 | "]\n" 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "execution_count": 13, 339 | "id": "b3a40b66-5061-460d-ad9d-c0dbcfbba2e9", 340 | "metadata": { 341 | "height": 80 342 | }, 343 | "outputs": [], 344 | "source": [ 345 | "critic.register_nested_chats(\n", 346 | " review_chats,\n", 347 | " trigger=writer,\n", 348 | ")" 349 | ] 350 | }, 351 | { 352 | "cell_type": "markdown", 353 | "id": "43b8797d", 354 | "metadata": {}, 355 | "source": [ 356 | "**Note**: You might get a slightly different response than what's shown in the video. Feel free to try different task." 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 14, 362 | "id": "3b8dcac3-1e72-43b7-9d5a-1be740f6efd5", 363 | "metadata": { 364 | "height": 114 365 | }, 366 | "outputs": [ 367 | { 368 | "name": "stdout", 369 | "output_type": "stream", 370 | "text": [ 371 | "\u001b[33mCritic\u001b[0m (to Writer):\n", 372 | "\n", 373 | "\n", 374 | " Write a concise but engaging blogpost about\n", 375 | " DeepLearning.AI. Make sure the blogpost is\n", 376 | " within 100 words.\n", 377 | " \n", 378 | "\n", 379 | "--------------------------------------------------------------------------------\n", 380 | "\u001b[33mWriter\u001b[0m (to Critic):\n", 381 | "\n", 382 | "Title: Unveiling the Power of DeepLearning.AI\n", 383 | "\n", 384 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n", 385 | "\n", 386 | "--------------------------------------------------------------------------------\n", 387 | "\u001b[34m\n", 388 | "********************************************************************************\u001b[0m\n", 389 | "\u001b[34mStarting a new chat....\u001b[0m\n", 390 | "\u001b[34m\n", 391 | "********************************************************************************\u001b[0m\n", 392 | "\u001b[33mCritic\u001b[0m (to SEO Reviewer):\n", 393 | "\n", 394 | "Review the following content. \n", 395 | " \n", 396 | "\n", 397 | " Title: Unveiling the Power of DeepLearning.AI\n", 398 | "\n", 399 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n", 400 | "\n", 401 | "--------------------------------------------------------------------------------\n", 402 | "\u001b[33mSEO Reviewer\u001b[0m (to Critic):\n", 403 | "\n", 404 | "As an SEO reviewer:\n", 405 | "\n", 406 | "- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\n", 407 | " \n", 408 | "- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\n", 409 | " \n", 410 | "- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.\n", 411 | "\n", 412 | "--------------------------------------------------------------------------------\n", 413 | "\u001b[34m\n", 414 | "********************************************************************************\u001b[0m\n", 415 | "\u001b[34mStarting a new chat....\u001b[0m\n", 416 | "\u001b[34m\n", 417 | "********************************************************************************\u001b[0m\n", 418 | "\u001b[33mCritic\u001b[0m (to Legal Reviewer):\n", 419 | "\n", 420 | "Review the following content. \n", 421 | " \n", 422 | "\n", 423 | " Title: Unveiling the Power of DeepLearning.AI\n", 424 | "\n", 425 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n", 426 | "Context: \n", 427 | "{'Reviewer': 'SEO Reviewer', 'Review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 428 | "\n", 429 | "--------------------------------------------------------------------------------\n", 430 | "\u001b[33mLegal Reviewer\u001b[0m (to Critic):\n", 431 | "\n", 432 | "As a Legal Reviewer:\n", 433 | "\n", 434 | "1. Ensure compliance with trademark laws by verifying the use of the term \"DeepLearning.AI\" aligns with any existing copyrights or trademarks held by Andrew Ng or the DeepLearning.AI platform.\n", 435 | "2. Confirm the accuracy of any claims made regarding the effectiveness or outcomes of the DeepLearning.AI program to avoid potential false advertising or deceptive marketing implications.\n", 436 | "3. Evaluate the disclosure of any disclaimers or terms of use related to the platform's courses and projects to address liability and clarify user expectations.\n", 437 | "\n", 438 | "--------------------------------------------------------------------------------\n", 439 | "\u001b[34m\n", 440 | "********************************************************************************\u001b[0m\n", 441 | "\u001b[34mStarting a new chat....\u001b[0m\n", 442 | "\u001b[34m\n", 443 | "********************************************************************************\u001b[0m\n", 444 | "\u001b[33mCritic\u001b[0m (to Ethics Reviewer):\n", 445 | "\n", 446 | "Review the following content. \n", 447 | " \n", 448 | "\n", 449 | " Title: Unveiling the Power of DeepLearning.AI\n", 450 | "\n", 451 | "Welcome to the world of DeepLearning.AI, a transformative platform revolutionizing the way we perceive artificial intelligence. Created by the renowned Andrew Ng, this cutting-edge program offers a gateway to the realms of deep learning, empowering enthusiasts worldwide. From convolutional neural networks to natural language processing, DeepLearning.AI equips you with the skills needed to thrive in this data-driven era. Whether you're a novice or an expert in AI, this platform caters to all, providing interactive courses and hands-on projects. Embark on this enlightening journey and unlock the endless possibilities of AI with DeepLearning.AI.\n", 452 | "Context: \n", 453 | "{'Reviewer': 'SEO Reviewer', 'Review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 454 | "{'Reviewer': 'SEO Reviewer', 'Review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 455 | "\n", 456 | "--------------------------------------------------------------------------------\n", 457 | "\u001b[33mEthics Reviewer\u001b[0m (to Critic):\n", 458 | "\n", 459 | "As an ethics reviewer:\n", 460 | "\n", 461 | "- Ensure that the content accurately represents the capabilities and limitations of the DeepLearning.AI platform, avoiding any exaggerated or misleading claims about its effectiveness or outcomes.\n", 462 | "- Include a disclaimer highlighting any potential biases or conflicts of interest, particularly if there are any financial relationships between the content creator and the platform being promoted.\n", 463 | "- Respect user privacy by clearly stating how any personal data collected through the platform will be used and protected, following best practices for data security and transparency.\n", 464 | "\n", 465 | "--------------------------------------------------------------------------------\n", 466 | "\u001b[34m\n", 467 | "********************************************************************************\u001b[0m\n", 468 | "\u001b[34mStarting a new chat....\u001b[0m\n", 469 | "\u001b[34m\n", 470 | "********************************************************************************\u001b[0m\n" 471 | ] 472 | }, 473 | { 474 | "name": "stdout", 475 | "output_type": "stream", 476 | "text": [ 477 | "\u001b[33mCritic\u001b[0m (to Meta Reviewer):\n", 478 | "\n", 479 | "Aggregrate feedback from all reviewers and give final suggestions on the writing.\n", 480 | "Context: \n", 481 | "{'Reviewer': 'SEO Reviewer', 'Review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 482 | "{'Reviewer': 'SEO Reviewer', 'Review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 483 | "{'reviewer': 'SEO Reviewer', 'review': '- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\\n- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\\n- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.'}\n", 484 | "\n", 485 | "--------------------------------------------------------------------------------\n", 486 | "\u001b[33mMeta Reviewer\u001b[0m (to Critic):\n", 487 | "\n", 488 | "Aggregated Feedback from SEO Reviewers:\n", 489 | "- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\n", 490 | "- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\n", 491 | "- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.\n", 492 | "\n", 493 | "Final Suggestion:\n", 494 | "The SEO Reviewers provided consistent feedback on using relevant keywords, such as \"DeepLearning.AI,\" \"Andrew Ng,\" and related terms to improve search engine visibility. Implementing these suggestions will likely increase the visibility of the content and attract users interested in AI education. Applying SEO strategies effectively can enhance the organic reach of the page and potentially drive more traffic. It's important to carefully integrate these keywords naturally within the content to maintain readability and engagement while boosting search rankings.\n", 495 | "\n", 496 | "--------------------------------------------------------------------------------\n", 497 | "\u001b[33mCritic\u001b[0m (to Writer):\n", 498 | "\n", 499 | "Aggregated Feedback from SEO Reviewers:\n", 500 | "- Consider using keywords related to \"DeepLearning.AI\" in the title, such as \"DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\" to improve search engine visibility.\n", 501 | "- Include relevant terms like \"deep learning courses\" and \"AI training\" in the content to enhance organic search rankings and attract users looking for AI learning opportunities.\n", 502 | "- Add meta tags with keywords like \"Andrew Ng,\" \"DeepLearning.AI courses,\" and \"artificial intelligence\" to optimize the page further for search engines.\n", 503 | "\n", 504 | "Final Suggestion:\n", 505 | "The SEO Reviewers provided consistent feedback on using relevant keywords, such as \"DeepLearning.AI,\" \"Andrew Ng,\" and related terms to improve search engine visibility. Implementing these suggestions will likely increase the visibility of the content and attract users interested in AI education. Applying SEO strategies effectively can enhance the organic reach of the page and potentially drive more traffic. It's important to carefully integrate these keywords naturally within the content to maintain readability and engagement while boosting search rankings.\n", 506 | "\n", 507 | "--------------------------------------------------------------------------------\n", 508 | "\u001b[33mWriter\u001b[0m (to Critic):\n", 509 | "\n", 510 | "Title: DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\n", 511 | "\n", 512 | "Welcome to DeepLearning.AI, the ultimate hub for deep learning courses and AI training created by the renowned Andrew Ng. Dive into a world of convolutional neural networks and natural language processing, mastering the art of artificial intelligence. Whether you're a beginner or an AI expert, our interactive platform caters to all levels. Explore our curated courses, hands-on projects, and gain practical skills essential for the data-driven future. Join us on this transformative journey and unleash the power of AI with DeepLearning.AI. Embrace the future of technology today!\n", 513 | "\n", 514 | "--------------------------------------------------------------------------------\n" 515 | ] 516 | } 517 | ], 518 | "source": [ 519 | "res = critic.initiate_chat(\n", 520 | " recipient=writer,\n", 521 | " message=task,\n", 522 | " max_turns=2,\n", 523 | " summary_method=\"last_msg\"\n", 524 | ")" 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "id": "c5c833b0", 530 | "metadata": {}, 531 | "source": [ 532 | "## Get the summary" 533 | ] 534 | }, 535 | { 536 | "cell_type": "code", 537 | "execution_count": 15, 538 | "id": "68ef82ed-f102-4964-b7be-60e2f258a39b", 539 | "metadata": { 540 | "height": 29 541 | }, 542 | "outputs": [ 543 | { 544 | "name": "stdout", 545 | "output_type": "stream", 546 | "text": [ 547 | "Title: DeepLearning.AI: Revolutionizing AI Education by Andrew Ng\n", 548 | "\n", 549 | "Welcome to DeepLearning.AI, the ultimate hub for deep learning courses and AI training created by the renowned Andrew Ng. Dive into a world of convolutional neural networks and natural language processing, mastering the art of artificial intelligence. Whether you're a beginner or an AI expert, our interactive platform caters to all levels. Explore our curated courses, hands-on projects, and gain practical skills essential for the data-driven future. Join us on this transformative journey and unleash the power of AI with DeepLearning.AI. Embrace the future of technology today!\n" 550 | ] 551 | } 552 | ], 553 | "source": [ 554 | "print(res.summary)" 555 | ] 556 | } 557 | ], 558 | "metadata": { 559 | "kernelspec": { 560 | "display_name": "Python 3 (ipykernel)", 561 | "language": "python", 562 | "name": "python3" 563 | }, 564 | "language_info": { 565 | "codemirror_mode": { 566 | "name": "ipython", 567 | "version": 3 568 | }, 569 | "file_extension": ".py", 570 | "mimetype": "text/x-python", 571 | "name": "python", 572 | "nbconvert_exporter": "python", 573 | "pygments_lexer": "ipython3", 574 | "version": "3.11.9" 575 | } 576 | }, 577 | "nbformat": 4, 578 | "nbformat_minor": 5 579 | } 580 | -------------------------------------------------------------------------------- /L5_Coding_and_Financial_Analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "eacbb70c", 6 | "metadata": {}, 7 | "source": [ 8 | "# Lesson 5: Coding and Financial Analysis" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "babbc472", 14 | "metadata": {}, 15 | "source": [ 16 | "## Setup" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "id": "b5613ba9-d387-4b7d-9670-ec2b6e4353be", 23 | "metadata": { 24 | "height": 29 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "llm_config = {\"model\": \"gpt-4-turbo\"}" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "id": "e4e8fd85", 34 | "metadata": {}, 35 | "source": [ 36 | "## Define a code executor" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 2, 42 | "id": "fb90a672-4318-47de-a061-384e68dd75c9", 43 | "metadata": { 44 | "height": 29 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "from autogen.coding import LocalCommandLineCodeExecutor" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 3, 54 | "id": "b5cafee7-a0ac-4583-8201-5e19eb603138", 55 | "metadata": { 56 | "height": 80 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "executor = LocalCommandLineCodeExecutor(\n", 61 | " timeout=60,\n", 62 | " work_dir=\"coding\",\n", 63 | ")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "id": "3b62a1a0", 69 | "metadata": {}, 70 | "source": [ 71 | "## Create agents " 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 4, 77 | "id": "e6d9b3d6-9915-4fea-a853-512dfb77c4a5", 78 | "metadata": { 79 | "height": 29 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "from autogen import ConversableAgent, AssistantAgent" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "id": "b741b947", 89 | "metadata": {}, 90 | "source": [ 91 | "### 1. Agent with code executor configuration" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 5, 97 | "id": "57d286e5-b7b7-405a-b1d7-212c0cc84497", 98 | "metadata": { 99 | "height": 148 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "code_executor_agent = ConversableAgent(\n", 104 | " name=\"code_executor_agent\",\n", 105 | " llm_config=False,\n", 106 | " code_execution_config={\"executor\": executor},\n", 107 | " human_input_mode=\"ALWAYS\",\n", 108 | " default_auto_reply=\n", 109 | " \"Please continue. If everything is done, reply 'TERMINATE'.\",\n", 110 | ")" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "id": "c1eb1094", 116 | "metadata": {}, 117 | "source": [ 118 | "### 2. Agent with code writing capability" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 6, 124 | "id": "99c38c33-d335-48cd-96eb-cafe9ac70142", 125 | "metadata": { 126 | "height": 114 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "code_writer_agent = AssistantAgent(\n", 131 | " name=\"code_writer_agent\",\n", 132 | " llm_config=llm_config,\n", 133 | " code_execution_config=False,\n", 134 | " human_input_mode=\"NEVER\",\n", 135 | ")" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": 7, 141 | "id": "0fb1a62f-8dd2-4636-9103-b26ea27a29bd", 142 | "metadata": { 143 | "height": 41 144 | }, 145 | "outputs": [], 146 | "source": [ 147 | "code_writer_agent_system_message = code_writer_agent.system_message" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 8, 153 | "id": "7c8e9a04-55fb-4124-a9d0-3431274476d2", 154 | "metadata": { 155 | "height": 29 156 | }, 157 | "outputs": [ 158 | { 159 | "name": "stdout", 160 | "output_type": "stream", 161 | "text": [ 162 | "You are a helpful AI assistant.\n", 163 | "Solve tasks using your coding and language skills.\n", 164 | "In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n", 165 | " 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n", 166 | " 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\n", 167 | "Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\n", 168 | "When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\n", 169 | "If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\n", 170 | "If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\n", 171 | "When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.\n", 172 | "Reply \"TERMINATE\" in the end when everything is done.\n", 173 | " \n" 174 | ] 175 | } 176 | ], 177 | "source": [ 178 | "print(code_writer_agent_system_message)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "id": "f487b75a", 184 | "metadata": {}, 185 | "source": [ 186 | "## The task!\n", 187 | "\n", 188 | "Ask the two agents to collaborate on a stock analysis task." 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 9, 194 | "id": "9cce0ba4-9fea-4d36-b4a0-a1e51fcd09a6", 195 | "metadata": { 196 | "height": 143 197 | }, 198 | "outputs": [], 199 | "source": [ 200 | "import datetime\n", 201 | "\n", 202 | "today = datetime.datetime.now().date()\n", 203 | "message = f\"Today is {today}. \"\\\n", 204 | "\"Create a plot showing stock gain YTD for NVDA and TLSA. \"\\\n", 205 | "\"Make sure the code is in markdown code block and save the figure\"\\\n", 206 | "\" to a file ytd_stock_gains.png.\"\"\"" 207 | ] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "id": "4d02df6c", 212 | "metadata": {}, 213 | "source": [ 214 | "

Note: In this lesson, you will use GPT 4 for better results. Please note that the lesson has a quota limit. If you want to explore the code in this lesson further, we recommend trying it locally with your own API key." 215 | ] 216 | }, 217 | { 218 | "cell_type": "markdown", 219 | "id": "77fc5f1b", 220 | "metadata": {}, 221 | "source": [ 222 | "**Note**: You might see a different set of outputs than those shown in the video. The agents collaborate to generate the code needed for your task, and they might produce code with errors in the process. However, they will ultimately provide a correct code in the end." 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 10, 228 | "id": "230bb9c0-9c80-498f-a05f-96cef11ddb4e", 229 | "metadata": { 230 | "height": 80 231 | }, 232 | "outputs": [ 233 | { 234 | "name": "stdout", 235 | "output_type": "stream", 236 | "text": [ 237 | "\u001b[33mcode_executor_agent\u001b[0m (to code_writer_agent):\n", 238 | "\n", 239 | "Today is 2024-06-04. Create a plot showing stock gain YTD for NVDA and TLSA. Make sure the code is in markdown code block and save the figure to a file ytd_stock_gains.png.\n", 240 | "\n", 241 | "--------------------------------------------------------------------------------\n", 242 | "\u001b[33mcode_writer_agent\u001b[0m (to code_executor_agent):\n", 243 | "\n", 244 | "We need to perform the following steps to achieve this:\n", 245 | "\n", 246 | "1. Fetch the historical stock data for NVDA (Nvidia Corporation) and TSLA (Tesla, Inc) starting from the first trading day of 2024.\n", 247 | "2. Calculate the Year-To-Date (YTD) gains for each of these stocks.\n", 248 | "3. Plot the data and save the figure.\n", 249 | "\n", 250 | "For fetching the historical stock data, we can use the `yfinance` library which provides easy-to-use access to Yahoo Finance's API. We will then use `matplotlib` to plot the data and save it to a file.\n", 251 | "\n", 252 | "Please run the following Python code to generate and save the plot showing YTD stock gains:\n", 253 | "\n", 254 | "```python\n", 255 | "# filename: ytd_stock_plot.py\n", 256 | "\n", 257 | "import yfinance as yf\n", 258 | "import matplotlib.pyplot as plt\n", 259 | "import pandas as pd\n", 260 | "from datetime import datetime\n", 261 | "\n", 262 | "# Fetch historical data from the start of 2024.\n", 263 | "start_date = \"2024-01-01\"\n", 264 | "today = \"2024-06-04\" # As mentioned by the user, using this fixed date\n", 265 | "\n", 266 | "# Symbols for Nvidia and Tesla\n", 267 | "symbols = ['NVDA', 'TSLA']\n", 268 | "\n", 269 | "# Download the stock data\n", 270 | "stocks = yf.download(symbols, start=start_date, end=today)\n", 271 | "\n", 272 | "# Calculate the percentage change from the year's open\n", 273 | "stock_close_prices = stocks['Close']\n", 274 | "ytd_gains = (stock_close_prices - stock_warice(stocks['Close'].iloc[0])) / stock_close_prices.iloc[0] * 100\n", 275 | "\n", 276 | "# Create a plot\n", 277 | "plt.figure(figsize=(10, 5))\n", 278 | "plt.plot(ytd_gains, label=ytd_gains.columns)\n", 279 | "plt.title('YTD Stock Gains for NVDA and TSLA as of 2024-06-04')\n", 280 | "plt.xlabel('Date')\n", 281 | "plt.ylabel('Gain (%)')\n", 282 | "plt.legend()\n", 283 | "plt.grid(True)\n", 284 | "\n", 285 | "# Save the plot to a file\n", 286 | "plt.savefig('ytd_stock_gains.png')\n", 287 | "plt.show()\n", 288 | "```\n", 289 | "\n", 290 | "Ensure you have both `yfinance` and `matplotlib` installed. You can install them using `pip` if you haven't already:\n", 291 | "\n", 292 | "```sh\n", 293 | "pip install yfinance matplotlib\n", 294 | "```\n", 295 | "\n", 296 | "Once the setup is complete, run the code using a Python interpreter. This will produce the plot and save it as `ytd_stock_gains.png` as requested.\n", 297 | "\n", 298 | "--------------------------------------------------------------------------------\n", 299 | "Provide feedback to code_writer_agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: good\n", 300 | "\u001b[33mcode_executor_agent\u001b[0m (to code_writer_agent):\n", 301 | "\n", 302 | "good\n", 303 | "\n", 304 | "--------------------------------------------------------------------------------\n", 305 | "\u001b[33mcode_writer_agent\u001b[0m (to code_executor_agent):\n", 306 | "\n", 307 | "TERMINATE\n", 308 | "\n", 309 | "--------------------------------------------------------------------------------\n", 310 | "Provide feedback to code_writer_agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n" 311 | ] 312 | } 313 | ], 314 | "source": [ 315 | "chat_result = code_executor_agent.initiate_chat(\n", 316 | " code_writer_agent,\n", 317 | " message=message,\n", 318 | ")" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "id": "9b0cbbdd", 324 | "metadata": {}, 325 | "source": [ 326 | "## Let's see the plot!\n", 327 | "\n", 328 | "**Note**: \n", 329 | "* Your plot might differ from the one shown in the video because the LLM's freestyle code generation could choose a different plot type, such as a bar plot. \n", 330 | "* You can re-run the previous cell and check the generated code. If it produces a bar plot, remember you can directly specify your preference by asking for a specific plot type instead of a bar plot." 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": null, 336 | "id": "a6e97526", 337 | "metadata": { 338 | "height": 80 339 | }, 340 | "outputs": [], 341 | "source": [ 342 | "import os\n", 343 | "from IPython.display import Image\n", 344 | "\n", 345 | "Image(os.path.join(\"coding\", \"ytd_stock_gains.png\"))" 346 | ] 347 | }, 348 | { 349 | "cell_type": "markdown", 350 | "id": "ccf49d7e", 351 | "metadata": {}, 352 | "source": [ 353 | "**Note**: The agent will automatically save the code in a .py file and the plot in a .png file. To access and check the files generated by the agents, go to the `File` menu and select `Open....` Then, open the folder named `coding` to find all the generated files." 354 | ] 355 | }, 356 | { 357 | "cell_type": "markdown", 358 | "id": "5aded8dc", 359 | "metadata": {}, 360 | "source": [ 361 | "## User-Defined Functions\n", 362 | "\n", 363 | "Instead of asking LLM to generate the code for downloading stock data \n", 364 | "and plotting charts each time, you can define functions for these two tasks and have LLM call these functions in the code." 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": 12, 370 | "id": "bca089a7-d85a-40ad-b33d-6a5318076c6e", 371 | "metadata": { 372 | "height": 386 373 | }, 374 | "outputs": [], 375 | "source": [ 376 | "def get_stock_prices(stock_symbols, start_date, end_date):\n", 377 | " \"\"\"Get the stock prices for the given stock symbols between\n", 378 | " the start and end dates.\n", 379 | "\n", 380 | " Args:\n", 381 | " stock_symbols (str or list): The stock symbols to get the\n", 382 | " prices for.\n", 383 | " start_date (str): The start date in the format \n", 384 | " 'YYYY-MM-DD'.\n", 385 | " end_date (str): The end date in the format 'YYYY-MM-DD'.\n", 386 | " \n", 387 | " Returns:\n", 388 | " pandas.DataFrame: The stock prices for the given stock\n", 389 | " symbols indexed by date, with one column per stock \n", 390 | " symbol.\n", 391 | " \"\"\"\n", 392 | " import yfinance\n", 393 | "\n", 394 | " stock_data = yfinance.download(\n", 395 | " stock_symbols, start=start_date, end=end_date\n", 396 | " )\n", 397 | " return stock_data.get(\"Close\")" 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": 13, 403 | "id": "5dff50a6-39ae-46a4-9b9c-e98c1550472b", 404 | "metadata": { 405 | "height": 335 406 | }, 407 | "outputs": [], 408 | "source": [ 409 | "def plot_stock_prices(stock_prices, filename):\n", 410 | " \"\"\"Plot the stock prices for the given stock symbols.\n", 411 | "\n", 412 | " Args:\n", 413 | " stock_prices (pandas.DataFrame): The stock prices for the \n", 414 | " given stock symbols.\n", 415 | " \"\"\"\n", 416 | " import matplotlib.pyplot as plt\n", 417 | "\n", 418 | " plt.figure(figsize=(10, 5))\n", 419 | " for column in stock_prices.columns:\n", 420 | " plt.plot(\n", 421 | " stock_prices.index, stock_prices[column], label=column\n", 422 | " )\n", 423 | " plt.title(\"Stock Prices\")\n", 424 | " plt.xlabel(\"Date\")\n", 425 | " plt.ylabel(\"Price\")\n", 426 | " plt.grid(True)\n", 427 | " plt.savefig(filename)" 428 | ] 429 | }, 430 | { 431 | "cell_type": "markdown", 432 | "id": "63ff7175", 433 | "metadata": {}, 434 | "source": [ 435 | "### Create a new executor with the user-defined functions" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": 14, 441 | "id": "452d315f-681d-4418-9cd0-653cc8d6a668", 442 | "metadata": { 443 | "height": 97 444 | }, 445 | "outputs": [], 446 | "source": [ 447 | "executor = LocalCommandLineCodeExecutor(\n", 448 | " timeout=60,\n", 449 | " work_dir=\"coding\",\n", 450 | " functions=[get_stock_prices, plot_stock_prices],\n", 451 | ")" 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": 15, 457 | "id": "b95b30ad-1e6d-4d88-9a6c-2b0f11b625e4", 458 | "metadata": { 459 | "height": 58 460 | }, 461 | "outputs": [ 462 | { 463 | "name": "stdout", 464 | "output_type": "stream", 465 | "text": [ 466 | "You are a helpful AI assistant.\n", 467 | "Solve tasks using your coding and language skills.\n", 468 | "In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n", 469 | " 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n", 470 | " 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\n", 471 | "Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\n", 472 | "When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\n", 473 | "If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\n", 474 | "If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\n", 475 | "When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.\n", 476 | "Reply \"TERMINATE\" in the end when everything is done.\n", 477 | " You have access to the following user defined functions. They can be accessed from the module called `functions` by their function names.\n", 478 | "\n", 479 | "For example, if there was a function called `foo` you could import it by writing `from functions import foo`\n", 480 | "\n", 481 | "def get_stock_prices(stock_symbols, start_date, end_date):\n", 482 | " \"\"\"Get the stock prices for the given stock symbols between\n", 483 | " the start and end dates.\n", 484 | "\n", 485 | " Args:\n", 486 | " stock_symbols (str or list): The stock symbols to get the\n", 487 | " prices for.\n", 488 | " start_date (str): The start date in the format \n", 489 | " 'YYYY-MM-DD'.\n", 490 | " end_date (str): The end date in the format 'YYYY-MM-DD'.\n", 491 | "\n", 492 | " Returns:\n", 493 | " pandas.DataFrame: The stock prices for the given stock\n", 494 | " symbols indexed by date, with one column per stock \n", 495 | " symbol.\n", 496 | " \"\"\"\n", 497 | " ...\n", 498 | "\n", 499 | "def plot_stock_prices(stock_prices, filename):\n", 500 | " \"\"\"Plot the stock prices for the given stock symbols.\n", 501 | "\n", 502 | " Args:\n", 503 | " stock_prices (pandas.DataFrame): The stock prices for the \n", 504 | " given stock symbols.\n", 505 | " \"\"\"\n", 506 | " ...\n" 507 | ] 508 | } 509 | ], 510 | "source": [ 511 | "code_writer_agent_system_message += executor.format_functions_for_prompt()\n", 512 | "print(code_writer_agent_system_message)" 513 | ] 514 | }, 515 | { 516 | "cell_type": "markdown", 517 | "id": "64d3cc77", 518 | "metadata": {}, 519 | "source": [ 520 | "### Let's update the agents with the new system message" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": 16, 526 | "id": "615c02b0-0175-442e-bed2-52bb35b8945c", 527 | "metadata": { 528 | "height": 131 529 | }, 530 | "outputs": [], 531 | "source": [ 532 | "code_writer_agent = ConversableAgent(\n", 533 | " name=\"code_writer_agent\",\n", 534 | " system_message=code_writer_agent_system_message,\n", 535 | " llm_config=llm_config,\n", 536 | " code_execution_config=False,\n", 537 | " human_input_mode=\"NEVER\",\n", 538 | ")" 539 | ] 540 | }, 541 | { 542 | "cell_type": "code", 543 | "execution_count": 17, 544 | "id": "a9a725b0-9bb6-47e3-85df-27d9aadc9290", 545 | "metadata": { 546 | "height": 148 547 | }, 548 | "outputs": [], 549 | "source": [ 550 | "code_executor_agent = ConversableAgent(\n", 551 | " name=\"code_executor_agent\",\n", 552 | " llm_config=False,\n", 553 | " code_execution_config={\"executor\": executor},\n", 554 | " human_input_mode=\"ALWAYS\",\n", 555 | " default_auto_reply=\n", 556 | " \"Please continue. If everything is done, reply 'TERMINATE'.\",\n", 557 | ")" 558 | ] 559 | }, 560 | { 561 | "cell_type": "markdown", 562 | "id": "e8d7b57c", 563 | "metadata": {}, 564 | "source": [ 565 | "### Start the same task again!" 566 | ] 567 | }, 568 | { 569 | "cell_type": "code", 570 | "execution_count": 18, 571 | "id": "8895bb51-bcdd-4f63-a49f-858fc25c474f", 572 | "metadata": { 573 | "height": 131 574 | }, 575 | "outputs": [ 576 | { 577 | "name": "stdout", 578 | "output_type": "stream", 579 | "text": [ 580 | "\u001b[33mcode_executor_agent\u001b[0m (to code_writer_agent):\n", 581 | "\n", 582 | "Today is 2024-06-04.Download the stock prices YTD for NVDA and TSLA and createa plot. Make sure the code is in markdown code block and save the figure to a file stock_prices_YTD_plot.png.\n", 583 | "\n", 584 | "--------------------------------------------------------------------------------\n", 585 | "\u001b[33mcode_writer_agent\u001b[0m (to code_executor_agent):\n", 586 | "\n", 587 | "To accomplish the task, we will follow these steps:\n", 588 | "\n", 589 | "1. Use the `get_stock_prices` function to retrieve the year-to-date (YTD) stock prices for NVIDIA (NVDA) and Tesla (TSLA), starting from 2024-01-01 to today's date 2024-06-04.\n", 590 | "2. Use the `plot_stock_prices` function to plot these prices and save the output to a file named `stock_prices_YTD_plot.png`.\n", 591 | "\n", 592 | "Here is the Python code to execute these steps:\n", 593 | "\n", 594 | "```python\n", 595 | "# filename: fetch_and_plot_stocks.py\n", 596 | "from functions import get_stock_prices, plot_stock_spacing_prices\n", 597 | "\n", 598 | "# Set the stock symbols and date range\n", 599 | "stock_symbols = ['NVDA', 'TSLA']\n", 600 | "start_date = '2024-01-01'\n", 601 | "end_date = '2024-06-04'\n", 602 | "\n", 603 | "# Fetch the YTD stock prices\n", 604 | "stock_prices = get_stock_prices(stock_symbols, start_date, end_date)\n", 605 | "\n", 606 | "# Plot the stock prices and save to a file\n", 607 | "plot_stock_prices(stock_prices, 'stock_prices_YTD_plot.png')\n", 608 | "```\n", 609 | "\n", 610 | "Please save this script and execute it to generate the plot. The plot will be saved as 'stock Prices_YTD_plot.png', and you can view it in your current directory after execution.\n", 611 | "\n", 612 | "--------------------------------------------------------------------------------\n", 613 | "Provide feedback to code_writer_agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: good\n", 614 | "\u001b[33mcode_executor_agent\u001b[0m (to code_writer_agent):\n", 615 | "\n", 616 | "good\n", 617 | "\n", 618 | "--------------------------------------------------------------------------------\n", 619 | "\u001b[33mcode_writer_agent\u001b[0m (to code_executor_agent):\n", 620 | "\n", 621 | "Great! If you need further assistance or have additional questions, feel free to ask. Otherwise, if everything is completed to your satisfaction, you may conclude this session.\n", 622 | "\n", 623 | "TERMINATE\n", 624 | "\n", 625 | "--------------------------------------------------------------------------------\n", 626 | "Provide feedback to code_writer_agent. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: exit\n" 627 | ] 628 | } 629 | ], 630 | "source": [ 631 | "chat_result = code_executor_agent.initiate_chat(\n", 632 | " code_writer_agent,\n", 633 | " message=f\"Today is {today}.\"\n", 634 | " \"Download the stock prices YTD for NVDA and TSLA and create\"\n", 635 | " \"a plot. Make sure the code is in markdown code block and \"\n", 636 | " \"save the figure to a file stock_prices_YTD_plot.png.\",\n", 637 | ")" 638 | ] 639 | }, 640 | { 641 | "cell_type": "markdown", 642 | "id": "3c4bb718", 643 | "metadata": {}, 644 | "source": [ 645 | "### Plot the results" 646 | ] 647 | }, 648 | { 649 | "cell_type": "markdown", 650 | "id": "c65bac5e", 651 | "metadata": {}, 652 | "source": [ 653 | "**Note**: The agent will automatically save the code in a .py file and the plot in a .png file. To access and check the files generated by the agents, go to the `File` menu and select `Open....` Then, open the folder named `coding` to find all the generated files." 654 | ] 655 | }, 656 | { 657 | "cell_type": "code", 658 | "execution_count": null, 659 | "id": "9d635ff7", 660 | "metadata": { 661 | "height": 29 662 | }, 663 | "outputs": [], 664 | "source": [ 665 | "Image(os.path.join(\"coding\", \"stock_prices_YTD_plot.png\"))" 666 | ] 667 | } 668 | ], 669 | "metadata": { 670 | "kernelspec": { 671 | "display_name": "Python 3 (ipykernel)", 672 | "language": "python", 673 | "name": "python3" 674 | }, 675 | "language_info": { 676 | "codemirror_mode": { 677 | "name": "ipython", 678 | "version": 3 679 | }, 680 | "file_extension": ".py", 681 | "mimetype": "text/x-python", 682 | "name": "python", 683 | "nbconvert_exporter": "python", 684 | "pygments_lexer": "ipython3", 685 | "version": "3.11.9" 686 | } 687 | }, 688 | "nbformat": 4, 689 | "nbformat_minor": 5 690 | } 691 | -------------------------------------------------------------------------------- /L6-Planning_and_Stock_Report_Generation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0b9a3d83", 6 | "metadata": {}, 7 | "source": [ 8 | "# Lesson 6: Planning and Stock Report Generation" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "8d5f1491", 14 | "metadata": {}, 15 | "source": [ 16 | "## Setup" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "id": "ad72cb6c-a916-40f7-960c-501c6309037f", 23 | "metadata": { 24 | "height": 29 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "llm_config={\"model\": \"gpt-4-turbo\"}" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "id": "d09b03c3", 34 | "metadata": {}, 35 | "source": [ 36 | "## The task!" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 2, 42 | "id": "01c1772f-2583-4c7a-a8a0-dd2946eea022", 43 | "metadata": { 44 | "height": 46 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "task = \"Write a blogpost about the stock price performance of \"\\\n", 49 | "\"Nvidia in the past month. Today's date is 2024-04-23.\"" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "id": "be3ca91b", 55 | "metadata": {}, 56 | "source": [ 57 | "## Build a group chat\n", 58 | "\n", 59 | "This group chat will include these agents:\n", 60 | "\n", 61 | "1. **User_proxy** or **Admin**: to allow the user to comment on the report and ask the writer to refine it.\n", 62 | "2. **Planner**: to determine relevant information needed to complete the task.\n", 63 | "3. **Engineer**: to write code using the defined plan by the planner.\n", 64 | "4. **Executor**: to execute the code written by the engineer.\n", 65 | "5. **Writer**: to write the report." 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 3, 71 | "id": "bd7befab-9487-4c50-8eca-1e2e30dbf7d6", 72 | "metadata": { 73 | "height": 29 74 | }, 75 | "outputs": [], 76 | "source": [ 77 | "import autogen" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 4, 83 | "id": "3258cb72-cb74-4eab-a387-ee409baf4947", 84 | "metadata": { 85 | "height": 148 86 | }, 87 | "outputs": [], 88 | "source": [ 89 | "user_proxy = autogen.ConversableAgent(\n", 90 | " name=\"Admin\",\n", 91 | " system_message=\"Give the task, and send \"\n", 92 | " \"instructions to writer to refine the blog post.\",\n", 93 | " code_execution_config=False,\n", 94 | " llm_config=llm_config,\n", 95 | " human_input_mode=\"ALWAYS\",\n", 96 | ")" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": 5, 102 | "id": "0832b856-f13d-4e16-b61f-69eb23e0ae71", 103 | "metadata": { 104 | "height": 284 105 | }, 106 | "outputs": [], 107 | "source": [ 108 | "planner = autogen.ConversableAgent(\n", 109 | " name=\"Planner\",\n", 110 | " system_message=\"Given a task, please determine \"\n", 111 | " \"what information is needed to complete the task. \"\n", 112 | " \"Please note that the information will all be retrieved using\"\n", 113 | " \" Python code. Please only suggest information that can be \"\n", 114 | " \"retrieved using Python code. \"\n", 115 | " \"After each step is done by others, check the progress and \"\n", 116 | " \"instruct the remaining steps. If a step fails, try to \"\n", 117 | " \"workaround\",\n", 118 | " description=\"Planner. Given a task, determine what \"\n", 119 | " \"information is needed to complete the task. \"\n", 120 | " \"After each step is done by others, check the progress and \"\n", 121 | " \"instruct the remaining steps\",\n", 122 | " llm_config=llm_config,\n", 123 | ")" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 6, 129 | "id": "2427f55d-fb7b-4f3c-85d4-dcd35547b397", 130 | "metadata": { 131 | "height": 114 132 | }, 133 | "outputs": [], 134 | "source": [ 135 | "engineer = autogen.AssistantAgent(\n", 136 | " name=\"Engineer\",\n", 137 | " llm_config=llm_config,\n", 138 | " description=\"An engineer that writes code based on the plan \"\n", 139 | " \"provided by the planner.\",\n", 140 | ")" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "id": "429ed9f1", 146 | "metadata": {}, 147 | "source": [ 148 | "**Note**: In this lesson, you'll use an alternative method of code execution by providing a dict config. However, you can always use the LocalCommandLineCodeExecutor if you prefer. For more details about code_execution_config, check this: https://microsoft.github.io/autogen/docs/reference/agentchat/conversable_agent/#__init__" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 7, 154 | "id": "040ac21f-b4bf-4bd6-ac58-fdb2c8f8de14", 155 | "metadata": { 156 | "height": 199 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "executor = autogen.ConversableAgent(\n", 161 | " name=\"Executor\",\n", 162 | " system_message=\"Execute the code written by the \"\n", 163 | " \"engineer and report the result.\",\n", 164 | " human_input_mode=\"NEVER\",\n", 165 | " code_execution_config={\n", 166 | " \"last_n_messages\": 3,\n", 167 | " \"work_dir\": \"coding\",\n", 168 | " \"use_docker\": False,\n", 169 | " },\n", 170 | ")" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 8, 176 | "id": "d6367fb4-a81b-411c-a339-8dd3d1d41483", 177 | "metadata": { 178 | "height": 199 179 | }, 180 | "outputs": [], 181 | "source": [ 182 | "writer = autogen.ConversableAgent(\n", 183 | " name=\"Writer\",\n", 184 | " llm_config=llm_config,\n", 185 | " system_message=\"Writer.\"\n", 186 | " \"Please write blogs in markdown format (with relevant titles)\"\n", 187 | " \" and put the content in pseudo ```md``` code block. \"\n", 188 | " \"You take feedback from the admin and refine your blog.\",\n", 189 | " description=\"Writer.\"\n", 190 | " \"Write blogs based on the code execution results and take \"\n", 191 | " \"feedback from the admin to refine the blog.\"\n", 192 | ")" 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "id": "a2ea86b5", 198 | "metadata": {}, 199 | "source": [ 200 | "## Define the group chat" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 9, 206 | "id": "cbf3c4b7-d4b8-4f97-8a25-e23107904ad5", 207 | "metadata": { 208 | "height": 97 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "groupchat = autogen.GroupChat(\n", 213 | " agents=[user_proxy, engineer, writer, executor, planner],\n", 214 | " messages=[],\n", 215 | " max_round=10,\n", 216 | ")" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": 10, 222 | "id": "bfcabc20-9316-45b7-852d-42e14550a956", 223 | "metadata": { 224 | "height": 80 225 | }, 226 | "outputs": [], 227 | "source": [ 228 | "manager = autogen.GroupChatManager(\n", 229 | " groupchat=groupchat, llm_config=llm_config\n", 230 | ")\n" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "id": "b51eaffa", 236 | "metadata": {}, 237 | "source": [ 238 | "## Start the group chat!" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "id": "5dd69615", 244 | "metadata": {}, 245 | "source": [ 246 | "

Note: In this lesson, you will use GPT 4 for better results. Please note that the lesson has a quota limit. If you want to explore the code in this lesson further, we recommend trying it locally with your own API key." 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 11, 252 | "id": "a9899e8f-2971-487c-9d0e-932c36b3b75a", 253 | "metadata": { 254 | "height": 80 255 | }, 256 | "outputs": [ 257 | { 258 | "name": "stdout", 259 | "output_type": "stream", 260 | "text": [ 261 | "\u001b[33mAdmin\u001b[0m (to chat_manager):\n", 262 | "\n", 263 | "Write a blogpost about the stock price performance of Nvidia in the past month. Today's date is 2024-04-23.\n", 264 | "\n", 265 | "--------------------------------------------------------------------------------\n", 266 | "\u001b[31m\n", 267 | ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", 268 | "\u001b[33mPlanner\u001b[0m (to chat_manager):\n", 269 | "\n", 270 | "To write a blog post about Nvidia's stock price performance over the past month, we will need to gather, analyze, and present the following information using Python:\n", 271 | "\n", 272 | "1. **Historical Stock Prices for Nvidia**: Retrieve the daily closing prices of Nvidia stock from March 23, 2024, to April 23, 2024. This data will enable us to analyze the price movement and significant fluctuations within the timeframe.\n", 273 | "\n", 274 | "2. **Significant Events**: Identify any significant events within the past month that might have influenced the stock prices of Nvidia. This could include product launches, financial earnings reports, major partnerships, or industry changes.\n", 275 | "\n", 276 | "3. **Comparison with Market Trends**: Compare Nvidia's stock performance with general market indices such as the S&P 500 or NASDAQ to see if the stock's movements align with or diverge from broader market trends.\n", 277 | "\n", 278 | "### Steps to Complete Using Python:\n", 279 | "\n", 280 | "#### Step 1: Retrieve Historical Stock Prices\n", 281 | "- **Tools and Libraries Needed**: `yfinance` library can be used to fetch historical stock data for Nvidia (ticker: NVDA).\n", 282 | "- **Expected Output**: DataFrame with dates and corresponding closing stock prices.\n", 283 | "\n", 284 | "#### Step 2: Identify Significant Events\n", 285 | "- **Tools and Libraries Needed**: Web scraping tools like `BeautifulSoup` or APIs that provide news headlines or events related to Nvidia.\n", 286 | "- **Expected Output**: List or summary of significant events affecting Nvidia in the last month.\n", 287 | "\n", 288 | "#### Step 3: Comparison with Market Trends\n", 289 | "- **Tools and Libraries Needed**: Use `yfinance` to also fetch historical data for market indices (e.g., NASDAQ).\n", 290 | "- **Expected Output**: Comparative analysis, possibly visualized using libraries like `matplotlib` or `seaborn`.\n", 291 | "\n", 292 | "Please start with Step 1: Retrieving the historical stock prices for Nvidia using `yfinance`. After this task is completed, please share the data so we can proceed with analyzing the information and preparing the draft of the blog post. Let me know once you have the data or if you encounter any issues!\n", 293 | "\n", 294 | "--------------------------------------------------------------------------------\n", 295 | "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", 296 | "\n", 297 | "To proceed with retrieving the historical stock prices for Nvidia, you can use the following Python code. This script utilizes the `yfinance` library to fetch stock prices for Nvidia (ticker: NVDA) from March 23, 2024, to April 23, 2024. Please ensure that you have the `yfinance` library installed; if not, you can install it using `pip install yfinance`.\n", 298 | "\n", 299 | "### Python Code to Fetch Historical Stock Prices\n", 300 | "\n", 301 | "```python\n", 302 | "# Import the required library\n", 303 | "import yfinance as yf\n", 304 | "\n", 305 | "# Fetch the historical data for Nvidia for the specified period\n", 306 | "def fetch_stock_data():\n", 307 | " # Define the ticker symbol for Nvidia\n", 308 | " ticker_symbol = 'NVDA'\n", 309 | " \n", 310 | " # Set the time period for data retrieval\n", 311 | " start_date = '2024-03-23'\n", 312 | " end_date = '2024-04-23'\n", 313 | " \n", 314 | " # Get the historical data from Yahoo Finance\n", 315 | " data = yf.download(ticker_symbol, start=start_date, end=end_date)\n", 316 | " \n", 317 | " # Display the closing prices\n", 318 | " print(data['Close'])\n", 319 | "\n", 320 | "# Execute the function\n", 321 | "fetch_stock_data()\n", 322 | "```\n", 323 | "\n", 324 | "### Instructions:\n", 325 | "1. Copy the above code block.\n", 326 | "2. Run it in your Python environment.\n", 327 | "3. Share the results with me after you have successfully executed the code to continue with the analysis and the drafting of the blog post.\n", 328 | "\n", 329 | "If you run into any issues executing this code or installing `yfinance`, please let me know!\n", 330 | "\n", 331 | "--------------------------------------------------------------------------------\n", 332 | "\u001b[31m\n", 333 | ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", 334 | "\u001b[33mExecutor\u001b[0m (to chat_manager):\n", 335 | "\n", 336 | "exitcode: 0 (execution succeeded)\n", 337 | "Code output: \n", 338 | "Date\n", 339 | "2024-03-25 950.020020\n", 340 | "2024-03-26 925.609985\n", 341 | "2024-03-27 902.500000\n", 342 | "2024-03-28 903.559998\n", 343 | "2024-04-01 903.630005\n", 344 | "2024-04-02 894.520020\n", 345 | "2024-04-03 889.640015\n", 346 | "2024-04-04 859.049988\n", 347 | "2024-04-05 880.080017\n", 348 | "2024-04-08 871.330017\n", 349 | "2024-04-09 853.539978\n", 350 | "2024-04-10 870.390015\n", 351 | "2024-04-11 906.159973\n", 352 | "2024-04-12 881.859985\n", 353 | "2024-04-15 860.010010\n", 354 | "2024-04-16 874.150024\n", 355 | "2024-04-17 840.349976\n", 356 | "2024-04-18 846.710022\n", 357 | "2024-04-19 762.000000\n", 358 | "2024-04-22 795.179993\n", 359 | "Name: Close, dtype: float64\n", 360 | "\n", 361 | "\n", 362 | "--------------------------------------------------------------------------------\n", 363 | "\u001b[31m\n", 364 | ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", 365 | "\u001b[33mPlanner\u001b[0m (to chat_manager):\n", 366 | "\n", 367 | "Great! You have successfully retrieved the historical closing prices for Nvidia stock from March 23, 2024, to April 23, 2024. The next step is to identify any significant events during this period that may have influenced the stock's price.\n", 368 | "\n", 369 | "### Step 2: Identify Significant Events\n", 370 | "\n", 371 | "For this next task, you will either scrape news websites or use an API that can provide news headlines or relevant company events. Here’s a suggested approach using a Python library to potentially retrieve news:\n", 372 | "\n", 373 | "- **Using `newsapi-python`**: This library interfaces with the NewsAPI for fetching news headlines about a specific company. You will need an API key, which you can get by registering at [NewsAPI](https://newsapi.org/).\n", 374 | "\n", 375 | "#### Python Code to Fetch News Headlines for Nvidia\n", 376 | "\n", 377 | "```python\n", 378 | "from newsapi import NewsApiClient\n", 379 | "\n", 380 | "# Initialize the client with your API key\n", 381 | "newsapi = NewsApiClient(api_key='your_api_key_here')\n", 382 | "\n", 383 | "# Fetch news articles about Nvidia in the last month\n", 384 | "def fetch_news():\n", 385 | " all_articles = newsapi.get_everything(q='NVIDIA',\n", 386 | " from_param='2024-03-25',\n", 387 | " to='2024-04-22',\n", 388 | " language='en',\n", 389 | " sort_by='relevancy',\n", 390 | " page_size=10)\n", 391 | " # Print the titles of the articles\n", 392 | " for article in all_articles['articles']:\n", 393 | " print(article['title'], ' - ', article['publishedAt'])\n", 394 | "\n", 395 | "# Execute the function\n", 396 | "fetch_news()\n", 397 | "```\n", 398 | "\n", 399 | "### Instructions:\n", 400 | "1. Acquire an API key from NewsAPI and replace `'your_api_key_here'` with your key in the script.\n", 401 | "2. Copy and execute the provided Python code in your environment to retrieve news related to Nvidia.\n", 402 | "3. Share the news headlines along with their publication dates with me.\n", 403 | "\n", 404 | "This information will help analyze the potential impact of external factors on Nvidia's stock price movements.\n", 405 | "\n", 406 | "If you encounter difficulties in retrieving news, please inform me so we can explore alternative sources or methods.\n", 407 | "\n", 408 | "--------------------------------------------------------------------------------\n", 409 | "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", 410 | "\n", 411 | "TERMINATE\n", 412 | "\n", 413 | "--------------------------------------------------------------------------------\n" 414 | ] 415 | } 416 | ], 417 | "source": [ 418 | "groupchat_result = user_proxy.initiate_chat(\n", 419 | " manager,\n", 420 | " message=task,\n", 421 | ")" 422 | ] 423 | }, 424 | { 425 | "cell_type": "markdown", 426 | "id": "ba5d14b6", 427 | "metadata": {}, 428 | "source": [ 429 | "## Add a speaker selection policy" 430 | ] 431 | }, 432 | { 433 | "cell_type": "code", 434 | "execution_count": 12, 435 | "id": "c17e5a76-3c40-4e2a-b8f1-378f901f5b74", 436 | "metadata": { 437 | "height": 964 438 | }, 439 | "outputs": [], 440 | "source": [ 441 | "user_proxy = autogen.ConversableAgent(\n", 442 | " name=\"Admin\",\n", 443 | " system_message=\"Give the task, and send \"\n", 444 | " \"instructions to writer to refine the blog post.\",\n", 445 | " code_execution_config=False,\n", 446 | " llm_config=llm_config,\n", 447 | " human_input_mode=\"ALWAYS\",\n", 448 | ")\n", 449 | "\n", 450 | "planner = autogen.ConversableAgent(\n", 451 | " name=\"Planner\",\n", 452 | " system_message=\"Given a task, please determine \"\n", 453 | " \"what information is needed to complete the task. \"\n", 454 | " \"Please note that the information will all be retrieved using\"\n", 455 | " \" Python code. Please only suggest information that can be \"\n", 456 | " \"retrieved using Python code. \"\n", 457 | " \"After each step is done by others, check the progress and \"\n", 458 | " \"instruct the remaining steps. If a step fails, try to \"\n", 459 | " \"workaround\",\n", 460 | " description=\"Given a task, determine what \"\n", 461 | " \"information is needed to complete the task. \"\n", 462 | " \"After each step is done by others, check the progress and \"\n", 463 | " \"instruct the remaining steps\",\n", 464 | " llm_config=llm_config,\n", 465 | ")\n", 466 | "\n", 467 | "engineer = autogen.AssistantAgent(\n", 468 | " name=\"Engineer\",\n", 469 | " llm_config=llm_config,\n", 470 | " description=\"Write code based on the plan \"\n", 471 | " \"provided by the planner.\",\n", 472 | ")\n", 473 | "\n", 474 | "writer = autogen.ConversableAgent(\n", 475 | " name=\"Writer\",\n", 476 | " llm_config=llm_config,\n", 477 | " system_message=\"Writer. \"\n", 478 | " \"Please write blogs in markdown format (with relevant titles)\"\n", 479 | " \" and put the content in pseudo ```md``` code block. \"\n", 480 | " \"You take feedback from the admin and refine your blog.\",\n", 481 | " description=\"After all the info is available, \"\n", 482 | " \"write blogs based on the code execution results and take \"\n", 483 | " \"feedback from the admin to refine the blog. \",\n", 484 | ")\n", 485 | "\n", 486 | "executor = autogen.ConversableAgent(\n", 487 | " name=\"Executor\",\n", 488 | " description=\"Execute the code written by the \"\n", 489 | " \"engineer and report the result.\",\n", 490 | " human_input_mode=\"NEVER\",\n", 491 | " code_execution_config={\n", 492 | " \"last_n_messages\": 3,\n", 493 | " \"work_dir\": \"coding\",\n", 494 | " \"use_docker\": False,\n", 495 | " },\n", 496 | ")" 497 | ] 498 | }, 499 | { 500 | "cell_type": "code", 501 | "execution_count": 13, 502 | "id": "d1ea0a1b-78f9-4be3-9479-228d6e679268", 503 | "metadata": { 504 | "height": 233 505 | }, 506 | "outputs": [], 507 | "source": [ 508 | "groupchat = autogen.GroupChat(\n", 509 | " agents=[user_proxy, engineer, writer, executor, planner],\n", 510 | " messages=[],\n", 511 | " max_round=10,\n", 512 | " allowed_or_disallowed_speaker_transitions={\n", 513 | " user_proxy: [engineer, writer, executor, planner],\n", 514 | " engineer: [user_proxy, executor],\n", 515 | " writer: [user_proxy, planner],\n", 516 | " executor: [user_proxy, engineer, planner],\n", 517 | " planner: [user_proxy, engineer, writer],\n", 518 | " },\n", 519 | " speaker_transitions_type=\"allowed\",\n", 520 | ")" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": 14, 526 | "id": "a62255bb-644f-4af6-a640-08297e55de45", 527 | "metadata": { 528 | "height": 148 529 | }, 530 | "outputs": [ 531 | { 532 | "name": "stdout", 533 | "output_type": "stream", 534 | "text": [ 535 | "\u001b[33mAdmin\u001b[0m (to chat_manager):\n", 536 | "\n", 537 | "Write a blogpost about the stock price performance of Nvidia in the past month. Today's date is 2024-04-23.\n", 538 | "\n", 539 | "--------------------------------------------------------------------------------\n", 540 | "\u001b[31m\n", 541 | ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", 542 | "\u001b[33mPlanner\u001b[0m (to chat_manager):\n", 543 | "\n", 544 | "To write a blog post about Nvidia's stock price performance over the past month, we will need to gather, analyze, and present the following information using Python:\n", 545 | "\n", 546 | "1. **Historical Stock Prices for Nvidia**: Retrieve the daily closing prices of Nvidia stock from March 23, 2024, to April 23, 2024. This data will enable us to analyze the price movement and significant fluctuations within the timeframe.\n", 547 | "\n", 548 | "2. **Significant Events**: Identify any significant events within the past month that might have influenced the stock prices of Nvidia. This could include product launches, financial earnings reports, major partnerships, or industry changes.\n", 549 | "\n", 550 | "3. **Comparison with Market Trends**: Compare Nvidia's stock performance with general market indices such as the S&P 500 or NASDAQ to see if the stock's movements align with or diverge from broader market trends.\n", 551 | "\n", 552 | "### Steps to Complete Using Python:\n", 553 | "\n", 554 | "#### Step 1: Retrieve Historical Stock Prices\n", 555 | "- **Tools and Libraries Needed**: `yfinance` library can be used to fetch historical stock data for Nvidia (ticker: NVDA).\n", 556 | "- **Expected Output**: DataFrame with dates and corresponding closing stock prices.\n", 557 | "\n", 558 | "#### Step 2: Identify Significant Events\n", 559 | "- **Tools and Libraries Needed**: Web scraping tools like `BeautifulSoup` or APIs that provide news headlines or events related to Nvidia.\n", 560 | "- **Expected Output**: List or summary of significant events affecting Nvidia in the last month.\n", 561 | "\n", 562 | "#### Step 3: Comparison with Market Trends\n", 563 | "- **Tools and Libraries Needed**: Use `yfinance` to also fetch historical data for market indices (e.g., NASDAQ).\n", 564 | "- **Expected Output**: Comparative analysis, possibly visualized using libraries like `matplotlib` or `seaborn`.\n", 565 | "\n", 566 | "Please start with Step 1: Retrieving the historical stock prices for Nvidia using `yfinance`. After this task is completed, please share the data so we can proceed with analyzing the information and preparing the draft of the blog post. Let me know once you have the data or if you encounter any issues!\n", 567 | "\n", 568 | "--------------------------------------------------------------------------------\n", 569 | "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", 570 | "\n", 571 | "To proceed with retrieving the historical stock prices for Nvidia, you can use the following Python code. This script utilizes the `yfinance` library to fetch stock prices for Nvidia (ticker: NVDA) from March 23, 2024, to April 23, 2024. Please ensure that you have the `yfinance` library installed; if not, you can install it using `pip install yfinance`.\n", 572 | "\n", 573 | "### Python Code to Fetch Historical Stock Prices\n", 574 | "\n", 575 | "```python\n", 576 | "# Import the required library\n", 577 | "import yfinance as yf\n", 578 | "\n", 579 | "# Fetch the historical data for Nvidia for the specified period\n", 580 | "def fetch_stock_data():\n", 581 | " # Define the ticker symbol for Nvidia\n", 582 | " ticker_symbol = 'NVDA'\n", 583 | " \n", 584 | " # Set the time period for data retrieval\n", 585 | " start_date = '2024-03-23'\n", 586 | " end_date = '2024-04-23'\n", 587 | " \n", 588 | " # Get the historical data from Yahoo Finance\n", 589 | " data = yf.download(ticker_symbol, start=start_date, end=end_date)\n", 590 | " \n", 591 | " # Display the closing prices\n", 592 | " print(data['Close'])\n", 593 | "\n", 594 | "# Execute the function\n", 595 | "fetch_stock_data()\n", 596 | "```\n", 597 | "\n", 598 | "### Instructions:\n", 599 | "1. Copy the above code block.\n", 600 | "2. Run it in your Python environment.\n", 601 | "3. Share the results with me after you have successfully executed the code to continue with the analysis and the drafting of the blog post.\n", 602 | "\n", 603 | "If you run into any issues executing this code or installing `yfinance`, please let me know!\n", 604 | "\n", 605 | "--------------------------------------------------------------------------------\n", 606 | "\u001b[31m\n", 607 | ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", 608 | "\u001b[33mExecutor\u001b[0m (to chat_manager):\n", 609 | "\n", 610 | "exitcode: 0 (execution succeeded)\n", 611 | "Code output: \n", 612 | "Date\n", 613 | "2024-03-25 950.020020\n", 614 | "2024-03-26 925.609985\n", 615 | "2024-03-27 902.500000\n", 616 | "2024-03-28 903.559998\n", 617 | "2024-04-01 903.630005\n", 618 | "2024-04-02 894.520020\n", 619 | "2024-04-03 889.640015\n", 620 | "2024-04-04 859.049988\n", 621 | "2024-04-05 880.080017\n", 622 | "2024-04-08 871.330017\n", 623 | "2024-04-09 853.539978\n", 624 | "2024-04-10 870.390015\n", 625 | "2024-04-11 906.159973\n", 626 | "2024-04-12 881.859985\n", 627 | "2024-04-15 860.010010\n", 628 | "2024-04-16 874.150024\n", 629 | "2024-04-17 840.349976\n", 630 | "2024-04-18 846.710022\n", 631 | "2024-04-19 762.000000\n", 632 | "2024-04-22 795.179993\n", 633 | "Name: Close, dtype: float64\n", 634 | "\n", 635 | "\n", 636 | "--------------------------------------------------------------------------------\n", 637 | "\u001b[31m\n", 638 | ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", 639 | "\u001b[33mPlanner\u001b[0m (to chat_manager):\n", 640 | "\n", 641 | "Great! You have successfully retrieved the historical closing prices for Nvidia stock from March 23, 2024, to April 23, 2024. The next step is to identify any significant events during this period that may have influenced the stock's price.\n", 642 | "\n", 643 | "### Step 2: Identify Significant Events\n", 644 | "\n", 645 | "For this next task, you will either scrape news websites or use an API that can provide news headlines or relevant company events. Here’s a suggested approach using a Python library to potentially retrieve news:\n", 646 | "\n", 647 | "- **Using `newsapi-python`**: This library interfaces with the NewsAPI for fetching news headlines about a specific company. You will need an API key, which you can get by registering at [NewsAPI](https://newsapi.org/).\n", 648 | "\n", 649 | "#### Python Code to Fetch News Headlines for Nvidia\n", 650 | "\n", 651 | "```python\n", 652 | "from newsapi import NewsApiClient\n", 653 | "\n", 654 | "# Initialize the client with your API key\n", 655 | "newsapi = NewsApiClient(api_key='your_api_key_here')\n", 656 | "\n", 657 | "# Fetch news articles about Nvidia in the last month\n", 658 | "def fetch_news():\n", 659 | " all_articles = newsapi.get_everything(q='NVIDIA',\n", 660 | " from_param='2024-03-25',\n", 661 | " to='2024-04-22',\n", 662 | " language='en',\n", 663 | " sort_by='relevancy',\n", 664 | " page_size=10)\n", 665 | " # Print the titles of the articles\n", 666 | " for article in all_articles['articles']:\n", 667 | " print(article['title'], ' - ', article['publishedAt'])\n", 668 | "\n", 669 | "# Execute the function\n", 670 | "fetch_news()\n", 671 | "```\n", 672 | "\n", 673 | "### Instructions:\n", 674 | "1. Acquire an API key from NewsAPI and replace `'your_api_key_here'` with your key in the script.\n", 675 | "2. Copy and execute the provided Python code in your environment to retrieve news related to Nvidia.\n", 676 | "3. Share the news headlines along with their publication dates with me.\n", 677 | "\n", 678 | "This information will help analyze the potential impact of external factors on Nvidia's stock price movements.\n", 679 | "\n", 680 | "If you encounter difficulties in retrieving news, please inform me so we can explore alternative sources or methods.\n", 681 | "\n", 682 | "--------------------------------------------------------------------------------\n", 683 | "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", 684 | "\n", 685 | "TERMINATE\n", 686 | "\n", 687 | "--------------------------------------------------------------------------------\n" 688 | ] 689 | } 690 | ], 691 | "source": [ 692 | "manager = autogen.GroupChatManager(\n", 693 | " groupchat=groupchat, llm_config=llm_config\n", 694 | ")\n", 695 | "\n", 696 | "groupchat_result = user_proxy.initiate_chat(\n", 697 | " manager,\n", 698 | " message=task,\n", 699 | ")" 700 | ] 701 | }, 702 | { 703 | "cell_type": "markdown", 704 | "id": "e738fd8b", 705 | "metadata": {}, 706 | "source": [ 707 | "**Note**: You might experience slightly different interactions between the agents. The engineer agent may write incorrect code, which the executor agent will report and send back for correction. This process could go through multiple rounds." 708 | ] 709 | } 710 | ], 711 | "metadata": { 712 | "kernelspec": { 713 | "display_name": "Python 3 (ipykernel)", 714 | "language": "python", 715 | "name": "python3" 716 | }, 717 | "language_info": { 718 | "codemirror_mode": { 719 | "name": "ipython", 720 | "version": 3 721 | }, 722 | "file_extension": ".py", 723 | "mimetype": "text/x-python", 724 | "name": "python", 725 | "nbconvert_exporter": "python", 726 | "pygments_lexer": "ipython3", 727 | "version": "3.11.9" 728 | } 729 | }, 730 | "nbformat": 4, 731 | "nbformat_minor": 5 732 | } 733 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 [AI Agentic Design Patterns with AutoGen](https://www.deeplearning.ai/short-courses/ai-agentic-design-patterns-with-autogen/) 2 | 3 | 💡 Welcome to the "AI Agentic Design Patterns with AutoGen" course! The course will equip you with the knowledge and skills to build and customize multi-agent systems using AutoGen. 4 | 5 | ## Course Summary 6 | In this course, you'll explore key principles of designing multi-agent systems and enabling agents to collaborate on complex tasks using the AutoGen framework. Here's what you can expect to learn and experience: 7 | 8 | 1. 🎭 **Conversational Agents**: Create a two-agent chat showing a conversation between two standup comedians using “ConversableAgent,” a built-in agent class of AutoGen. 9 |

10 | 11 |

12 | 13 | 2. 🎉 **Customer Onboarding**: Develop a sequence of chats between agents to provide a fun customer onboarding experience for a product using the multi-agent collaboration design pattern. 14 |

15 | 16 |

17 | 18 | 3. 📝 **Blog Post Creation**: Use the agent reflection framework to create a high-quality blog post with nested chats, where reviewer agents reflect on the blog post written by another agent. 19 |

20 | 21 |

22 | 23 | 4. ♟️ **Chess Game**: Implement a conversational chess game where two agent players can call a tool and make legal moves on the chessboard using the tool use design pattern. 24 |

25 | 26 |

27 | 28 | 5. 💻 **Coding Agent**: Develop a coding agent capable of generating the necessary code to plot stock gains for financial analysis and integrating user-defined functions into the code. 29 |

30 | 31 |

32 | 33 | 6. 📊 **Financial Analysis**: Create systems where agents collaborate and seek human feedback to complete a financial analysis task, generating code from scratch or using user-provided code. 34 |

35 | 36 |

37 | 38 | By the end of the course, you’ll have hands-on experience with AutoGen’s core components and a solid understanding of agentic design patterns, ready to implement multi-agent systems in your workflows. 39 | 40 | ## Key Points 41 | - 🛠️ Use the AutoGen framework to build multi-agent systems with diverse roles and capabilities for implementing complex AI applications. 42 | - 📚 Implement agentic design patterns such as Reflection, Tool Use, Planning, and Multi-agent Collaboration using AutoGen. 43 | - 🌟 Learn directly from the creators of AutoGen, Chi Wang and Qingyun Wu. 44 | 45 | ## About the Instructors 46 | 🌟 **Chi Wang** is a Principal Researcher at Microsoft Research, bringing extensive expertise in AI and multi-agent systems to guide you through this course. 47 | 48 | 🌟 **Qingyun Wu** is an Assistant Professor at Penn State University, specializing in AI and multi-agent collaboration, to help you master agentic design patterns. 49 | 50 | 🔗 To enroll in the course or for further information, visit [deeplearning.ai](https://www.deeplearning.ai/short-courses/). 51 | -------------------------------------------------------------------------------- /images/l1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l1.png -------------------------------------------------------------------------------- /images/l2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l2.png -------------------------------------------------------------------------------- /images/l3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l3.png -------------------------------------------------------------------------------- /images/l4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l4.png -------------------------------------------------------------------------------- /images/l5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l5.png -------------------------------------------------------------------------------- /images/l6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ksm26/AI-Agentic-Design-Patterns-with-AutoGen/b038b3b38b6d937cfda9e21e1c5f07049f7de8a5/images/l6.png --------------------------------------------------------------------------------